Bug 1509327 - Use timing and color information from decoded picture in dav1d wrapper. r=jya
authorAlex Chronopoulos <achronop@gmail.com>
Sat, 01 Dec 2018 22:03:29 +0000
changeset 505545 a2d943e9f9aa4f27bb879b4b1fc065c0189bba98
parent 505544 6facccbbd28cafc05f7e8615b7367f0cb3b08449
child 505546 3c08dc2c9eaa6a4892203375fddc160cd9594a52
push id10290
push userffxbld-merge
push dateMon, 03 Dec 2018 16:23:23 +0000
treeherdermozilla-beta@700bed2445e6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjya
bugs1509327
milestone65.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1509327 - Use timing and color information from decoded picture in dav1d wrapper. r=jya Latest dav1d version supports to store the timing information in undecoded frame and restore it later from decoded picture. This can provide more accurate timing especially during drain. In additionto that, colorspace information is set according to the size of the image. Finally this patch addresses some style comments. Differential Revision: https://phabricator.services.mozilla.com/D13428
dom/media/platforms/agnostic/DAV1DDecoder.cpp
dom/media/platforms/agnostic/DAV1DDecoder.h
--- a/dom/media/platforms/agnostic/DAV1DDecoder.cpp
+++ b/dom/media/platforms/agnostic/DAV1DDecoder.cpp
@@ -13,18 +13,16 @@
 
 namespace mozilla {
 
 DAV1DDecoder::DAV1DDecoder(const CreateDecoderParams& aParams)
     : mInfo(aParams.VideoConfig()),
       mTaskQueue(aParams.mTaskQueue),
       mImageContainer(aParams.mImageContainer) {}
 
-DAV1DDecoder::~DAV1DDecoder() {}
-
 RefPtr<MediaDataDecoder::InitPromise> DAV1DDecoder::Init() {
   Dav1dSettings settings;
   dav1d_default_settings(&settings);
   int decoder_threads = 2;
   if (mInfo.mDisplay.width >= 2048) {
     decoder_threads = 8;
   } else if (mInfo.mDisplay.width >= 1024) {
     decoder_threads = 4;
@@ -72,32 +70,32 @@ void DAV1DDecoder::ReleaseDataBuffer(con
   Unused << rv;
 }
 
 RefPtr<MediaDataDecoder::DecodePromise> DAV1DDecoder::InvokeDecode(
     MediaRawData* aSample) {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
   MOZ_ASSERT(aSample);
 
-  // Save the last timing values to use in drain.
-  mLastTimecode = aSample->mTimecode;
-  mLastDuration = aSample->mDuration;
-  mLastOffset = aSample->mOffset;
   // Add the buffer to the hashtable in order to increase
   // the ref counter and keep it alive. When dav1d does not
   // need it any more will call it's release callback. Remove
   // the buffer, in there, to reduce the ref counter and eventually
   // free it. We need a hashtable and not an array because the
   // release callback are not coming in the same order that the
   // buffers have been added in the decoder (threading ordering
   // inside decoder)
   mDecodingBuffers.Put(aSample->Data(), aSample);
   Dav1dData data;
   int res = dav1d_data_wrap(&data, aSample->Data(), aSample->Size(),
                             ReleaseDataBuffer_s, this);
+  data.m.timestamp = aSample->mTimecode.ToMicroseconds();
+  data.m.duration = aSample->mDuration.ToMicroseconds();
+  data.m.offset = aSample->mOffset;
+
   if (res < 0) {
     LOG("Create decoder data error.");
     return DecodePromise::CreateAndReject(
         MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
   }
   DecodedData results;
   do {
     res = dav1d_send_data(mContext, &data);
@@ -107,34 +105,33 @@ RefPtr<MediaDataDecoder::DecodePromise> 
           MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__), __func__);
     }
     // Alway consume the whole buffer on success.
     // At this point only -EAGAIN error is expected.
     MOZ_ASSERT((res == 0 && !data.sz) ||
                (res == -EAGAIN && data.sz == aSample->Size()));
 
     MediaResult rs(NS_OK);
-    res = GetPicture(aSample, results, rs);
+    res = GetPicture(results, rs);
     if (res < 0) {
       if (res == -EAGAIN) {
         // No frames ready to return. This is not an
         // error, in some circumstances, we need to
         // feed it with a certain amount of frames
         // before we get a picture.
         continue;
       }
       return DecodePromise::CreateAndReject(rs, __func__);
     }
   } while (data.sz > 0);
 
   return DecodePromise::CreateAndResolve(std::move(results), __func__);
 }
 
-int DAV1DDecoder::GetPicture(const MediaRawData* aSample, DecodedData& aData,
-                             MediaResult& aResult) {
+int DAV1DDecoder::GetPicture(DecodedData& aData, MediaResult& aResult) {
   class Dav1dPictureWrapper {
    public:
     Dav1dPicture* operator&() { return &p; }
     const Dav1dPicture& operator*() const { return p; }
     ~Dav1dPictureWrapper() { dav1d_picture_unref(&p); }
 
    private:
     Dav1dPicture p = Dav1dPicture();
@@ -147,93 +144,102 @@ int DAV1DDecoder::GetPicture(const Media
     aResult = MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__);
     return res;
   }
 
   if ((*picture).p.layout == DAV1D_PIXEL_LAYOUT_I400) {
     return 0;
   }
 
-  RefPtr<VideoData> v = ConstructImage(aSample, *picture);
+  RefPtr<VideoData> v = ConstructImage(*picture);
   if (!v) {
     LOG("Image allocation error: %ux%u"
         " display %ux%u picture %ux%u",
         (*picture).p.w, (*picture).p.h, mInfo.mDisplay.width,
         mInfo.mDisplay.height, mInfo.mImage.width, mInfo.mImage.height);
     aResult = MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
     return -1;
   }
   aData.AppendElement(std::move(v));
   return 0;
 }
 
 already_AddRefed<VideoData> DAV1DDecoder::ConstructImage(
-    const MediaRawData* aSample, const Dav1dPicture& picture) {
+    const Dav1dPicture& aPicture) {
   VideoData::YCbCrBuffer b;
-  if (picture.p.bpc == 10) {
+  if (aPicture.p.bpc == 10) {
     b.mColorDepth = ColorDepth::COLOR_10;
-  } else if (picture.p.bpc == 12) {
+  } else if (aPicture.p.bpc == 12) {
     b.mColorDepth = ColorDepth::COLOR_12;
   }
-  b.mPlanes[0].mData = static_cast<uint8_t*>(picture.data[0]);
-  b.mPlanes[0].mStride = picture.stride[0];
-  b.mPlanes[0].mHeight = picture.p.h;
-  b.mPlanes[0].mWidth = picture.p.w;
+
+  // On every other case use the default (BT601).
+  if (aPicture.seq_hdr->color_description_present) {
+    if (aPicture.seq_hdr->pri == DAV1D_COLOR_PRI_BT709) {
+      b.mYUVColorSpace = YUVColorSpace::BT709;
+    }
+  } else if (aPicture.p.h >= 720) {
+    b.mYUVColorSpace = YUVColorSpace::BT709;
+  }
+
+  b.mPlanes[0].mData = static_cast<uint8_t*>(aPicture.data[0]);
+  b.mPlanes[0].mStride = aPicture.stride[0];
+  b.mPlanes[0].mHeight = aPicture.p.h;
+  b.mPlanes[0].mWidth = aPicture.p.w;
   b.mPlanes[0].mOffset = 0;
   b.mPlanes[0].mSkip = 0;
 
-  b.mPlanes[1].mData = static_cast<uint8_t*>(picture.data[1]);
-  b.mPlanes[1].mStride = picture.stride[1];
+  b.mPlanes[1].mData = static_cast<uint8_t*>(aPicture.data[1]);
+  b.mPlanes[1].mStride = aPicture.stride[1];
   b.mPlanes[1].mOffset = 0;
   b.mPlanes[1].mSkip = 0;
 
-  b.mPlanes[2].mData = static_cast<uint8_t*>(picture.data[2]);
-  b.mPlanes[2].mStride = picture.stride[1];
+  b.mPlanes[2].mData = static_cast<uint8_t*>(aPicture.data[2]);
+  b.mPlanes[2].mStride = aPicture.stride[1];
   b.mPlanes[2].mOffset = 0;
   b.mPlanes[2].mSkip = 0;
 
   // https://code.videolan.org/videolan/dav1d/blob/master/tools/output/yuv.c#L67
-  const int ss_ver = picture.p.layout == DAV1D_PIXEL_LAYOUT_I420;
-  const int ss_hor = picture.p.layout != DAV1D_PIXEL_LAYOUT_I444;
+  const int ss_ver = aPicture.p.layout == DAV1D_PIXEL_LAYOUT_I420;
+  const int ss_hor = aPicture.p.layout != DAV1D_PIXEL_LAYOUT_I444;
 
-  b.mPlanes[1].mHeight = (picture.p.h + ss_ver) >> ss_ver;
-  b.mPlanes[1].mWidth = (picture.p.w + ss_hor) >> ss_hor;
+  b.mPlanes[1].mHeight = (aPicture.p.h + ss_ver) >> ss_ver;
+  b.mPlanes[1].mWidth = (aPicture.p.w + ss_hor) >> ss_hor;
 
-  b.mPlanes[2].mHeight = (picture.p.h + ss_ver) >> ss_ver;
-  b.mPlanes[2].mWidth = (picture.p.w + ss_hor) >> ss_hor;
+  b.mPlanes[2].mHeight = (aPicture.p.h + ss_ver) >> ss_ver;
+  b.mPlanes[2].mWidth = (aPicture.p.w + ss_hor) >> ss_hor;
 
   // Timestamp, duration and offset used here are wrong.
   // We need to take those values from the decoder. Latest
   // dav1d version allows for that.
+  media::TimeUnit timecode =
+      media::TimeUnit::FromMicroseconds(aPicture.m.timestamp);
+  media::TimeUnit duration =
+      media::TimeUnit::FromMicroseconds(aPicture.m.duration);
+  int64_t offset = aPicture.m.offset;
+  bool keyframe = aPicture.frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
+
   return VideoData::CreateAndCopyData(
-      mInfo, mImageContainer, aSample->mOffset, aSample->mTime,
-      aSample->mDuration, b, aSample->mKeyframe, aSample->mTimecode,
-      mInfo.ScaledImageRect(picture.p.w, picture.p.h));
+      mInfo, mImageContainer, offset, timecode, duration, b, keyframe, timecode,
+      mInfo.ScaledImageRect(aPicture.p.w, aPicture.p.h));
 }
 
 RefPtr<MediaDataDecoder::DecodePromise> DAV1DDecoder::Drain() {
   RefPtr<DAV1DDecoder> self = this;
   return InvokeAsync(mTaskQueue, __func__, [self, this] {
     int res = 0;
     DecodedData results;
     do {
-      RefPtr<MediaRawData> empty(new MediaRawData());
-      // Update last timecode in case we loop over.
-      empty->mTimecode = empty->mTime = mLastTimecode =
-          mLastTimecode + mLastDuration;
-      empty->mDuration = mLastDuration;
-      empty->mOffset = mLastOffset;
-
       MediaResult rs(NS_OK);
-      res = GetPicture(empty, results, rs);
+      res = GetPicture(results, rs);
       if (res < 0 && res != -EAGAIN) {
         return DecodePromise::CreateAndReject(rs, __func__);
       }
     } while (res != -EAGAIN);
-    return DecodePromise::CreateAndResolve(results, __func__);
+    return DecodePromise::CreateAndResolve(std::move(results), __func__);
   });
 }
 
 RefPtr<MediaDataDecoder::FlushPromise> DAV1DDecoder::Flush() {
   RefPtr<DAV1DDecoder> self = this;
   return InvokeAsync(mTaskQueue, __func__, [self]() {
     dav1d_flush(self->mContext);
     return FlushPromise::CreateAndResolve(true, __func__);
--- a/dom/media/platforms/agnostic/DAV1DDecoder.h
+++ b/dom/media/platforms/agnostic/DAV1DDecoder.h
@@ -28,35 +28,27 @@ class DAV1DDecoder : public MediaDataDec
   RefPtr<ShutdownPromise> Shutdown() override;
   nsCString GetDescriptionName() const override {
     return NS_LITERAL_CSTRING("av1 libdav1d video decoder");
   }
 
   void ReleaseDataBuffer(const uint8_t* buf);
 
  private:
-  ~DAV1DDecoder();
+  ~DAV1DDecoder() = default;
   RefPtr<DecodePromise> InvokeDecode(MediaRawData* aSample);
-  int GetPicture(const MediaRawData* aSample, DecodedData& aData,
-                 MediaResult& aResult);
-  already_AddRefed<VideoData> ConstructImage(const MediaRawData* aSample,
-                                             const Dav1dPicture&);
+  int GetPicture(DecodedData& aData, MediaResult& aResult);
+  already_AddRefed<VideoData> ConstructImage(const Dav1dPicture& aPicture);
 
   Dav1dContext* mContext;
 
   const VideoInfo& mInfo;
   const RefPtr<TaskQueue> mTaskQueue;
   const RefPtr<layers::ImageContainer> mImageContainer;
 
   // Keep the buffers alive until dav1d
   // does not need them any more.
   MediaRawDataHashtable mDecodingBuffers;
-
-  // Store the last timing values to use
-  // them during drain.
-  media::TimeUnit mLastTimecode;
-  media::TimeUnit mLastDuration;
-  int64_t mLastOffset = 0;
 };
 
 }  // namespace mozilla
 
 #endif  // DAV1DDecoder_h_