Bug 1143575. Remove Theora-only duplicate frame optimization. r=cpearce draft
authorRobert O'Callahan <robert@ocallahan.org>
Sat, 28 Mar 2015 10:53:37 +1300
changeset 275570 4dbc962455c8285271af26f2a6abbc67bc36c1d0
parent 275569 3e8b2379cefaca160390f400930797249273aed4
child 275571 3fb0da87c65f297cabe286d1932f95d6ef512d15
push id3189
push userrocallahan@mozilla.com
push dateFri, 03 Jul 2015 11:12:01 +0000
reviewerscpearce
bugs1143575
milestone42.0a1
Bug 1143575. Remove Theora-only duplicate frame optimization. r=cpearce
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/ogg/OggReader.cpp
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -106,33 +106,21 @@ IsInEmulator()
   return !strncmp(propQemu, "1", 1);
 }
 
 #endif
 
 VideoData::VideoData(int64_t aOffset,
                      int64_t aTime,
                      int64_t aDuration,
-                     int64_t aTimecode)
-  : MediaData(sType, aOffset, aTime, aDuration)
-  , mDuplicate(true)
-{
-  NS_ASSERTION(mDuration >= 0, "Frame must have non-negative duration.");
-  mTimecode = aTimecode;
-}
-
-VideoData::VideoData(int64_t aOffset,
-                     int64_t aTime,
-                     int64_t aDuration,
                      bool aKeyframe,
                      int64_t aTimecode,
                      IntSize aDisplay)
-  : MediaData(sType, aOffset, aTime, aDuration)
+  : MediaData(VIDEO_DATA, aOffset, aTime, aDuration)
   , mDisplay(aDisplay)
-  , mDuplicate(false)
 {
   NS_ASSERTION(mDuration >= 0, "Frame must have non-negative duration.");
   mKeyframe = aKeyframe;
   mTimecode = aTimecode;
 }
 
 VideoData::~VideoData()
 {
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -264,46 +264,26 @@ public:
   // Initialize PlanarYCbCrImage. Only When aCopyData is true,
   // video data is copied to PlanarYCbCrImage.
   static void SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
                                   const VideoInfo& aInfo,
                                   const YCbCrBuffer &aBuffer,
                                   const IntRect& aPicture,
                                   bool aCopyData);
 
-  // Constructs a duplicate VideoData object. This intrinsically tells the
-  // player that it does not need to update the displayed frame when this
-  // frame is played; this frame is identical to the previous.
-  static already_AddRefed<VideoData> CreateDuplicate(int64_t aOffset,
-                                                     int64_t aTime,
-                                                     int64_t aDuration,
-                                                     int64_t aTimecode)
-  {
-    nsRefPtr<VideoData> rv = new VideoData(aOffset, aTime, aDuration, aTimecode);
-    return rv.forget();
-  }
-
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
 
   // Dimensions at which to display the video frame. The picture region
   // will be scaled to this size. This is should be the picture region's
   // dimensions scaled with respect to its aspect ratio.
   const IntSize mDisplay;
 
   // This frame's image.
   nsRefPtr<Image> mImage;
 
-  // When true, denotes that this frame is identical to the frame that
-  // came before; it's a duplicate. mBuffer will be empty.
-  const bool mDuplicate;
-
-  VideoData(int64_t aOffset,
-            int64_t aTime,
-            int64_t aDuration,
-            int64_t aTimecode);
 
   VideoData(int64_t aOffset,
             int64_t aTime,
             int64_t aDuration,
             bool aKeyframe,
             int64_t aTimecode,
             IntSize aDisplay);
 
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -2720,20 +2720,16 @@ MediaDecoderStateMachine::Reset()
 }
 
 void MediaDecoderStateMachine::RenderVideoFrame(VideoData* aData,
                                                 TimeStamp aTarget)
 {
   MOZ_ASSERT(OnTaskQueue());
   mDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn();
 
-  if (aData->mDuplicate) {
-    return;
-  }
-
   VERBOSE_LOG("playing video frame %lld (queued=%i, state-machine=%i, decoder-queued=%i)",
               aData->mTime, VideoQueue().GetSize() + mReader->SizeOfVideoQueueInFrames(),
               VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames());
 
   VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
   if (container) {
     if (aData->mImage && !aData->mImage->IsValid()) {
       MediaDecoder::FrameStatistics& frameStats = mDecoder->GetFrameStatistics();
@@ -2966,37 +2962,21 @@ void MediaDecoderStateMachine::AdvanceFr
 }
 
 nsresult
 MediaDecoderStateMachine::DropVideoUpToSeekTarget(VideoData* aSample)
 {
   MOZ_ASSERT(OnTaskQueue());
   nsRefPtr<VideoData> video(aSample);
   MOZ_ASSERT(video);
-  DECODER_LOG("DropVideoUpToSeekTarget() frame [%lld, %lld] dup=%d",
-              video->mTime, video->GetEndTime(), video->mDuplicate);
+  DECODER_LOG("DropVideoUpToSeekTarget() frame [%lld, %lld]",
+              video->mTime, video->GetEndTime());
   MOZ_ASSERT(mCurrentSeek.Exists());
   const int64_t target = mCurrentSeek.mTarget.mTime;
 
-  // Duplicate handling: if we're dropping frames up the seek target, we must
-  // be wary of Theora duplicate frames. They don't have an image, so if the
-  // target frame is in a run of duplicates, we won't have an image to draw
-  // after the seek. So store the last frame encountered while dropping, and
-  // copy its Image forward onto duplicate frames, so that every frame has
-  // an Image.
-  if (video->mDuplicate &&
-      mFirstVideoFrameAfterSeek &&
-      !mFirstVideoFrameAfterSeek->mDuplicate) {
-    nsRefPtr<VideoData> temp =
-      VideoData::ShallowCopyUpdateTimestampAndDuration(mFirstVideoFrameAfterSeek,
-                                                       video->mTime,
-                                                       video->mDuration);
-    video = temp;
-  }
-
   // If the frame end time is less than the seek target, we won't want
   // to display this frame after the seek, so discard it.
   if (target >= video->GetEndTime()) {
     DECODER_LOG("DropVideoUpToSeekTarget() pop video frame [%lld, %lld] target=%lld",
                 video->mTime, video->GetEndTime(), target);
     mFirstVideoFrameAfterSeek = video;
   } else {
     if (target >= video->mTime && video->GetEndTime() >= target) {
--- a/dom/media/ogg/OggReader.cpp
+++ b/dom/media/ogg/OggReader.cpp
@@ -849,53 +849,45 @@ nsresult OggReader::DecodeTheora(ogg_pac
 
   int64_t endTime = mTheoraState->Time(aPacket->granulepos);
   if (endTime < aTimeThreshold) {
     // The end time of this frame is already before the current playback
     // position. It will never be displayed, don't bother enqueing it.
     return NS_OK;
   }
 
-  if (ret == TH_DUPFRAME) {
-    nsRefPtr<VideoData> v = VideoData::CreateDuplicate(mDecoder->GetResource()->Tell(),
-                                                       time,
-                                                       endTime - time,
-                                                       aPacket->granulepos);
-    mVideoQueue.Push(v);
-  } else if (ret == 0) {
-    th_ycbcr_buffer buffer;
-    ret = th_decode_ycbcr_out(mTheoraState->mCtx, buffer);
-    NS_ASSERTION(ret == 0, "th_decode_ycbcr_out failed");
-    bool isKeyframe = th_packet_iskeyframe(aPacket) == 1;
-    VideoData::YCbCrBuffer b;
-    for (uint32_t i=0; i < 3; ++i) {
-      b.mPlanes[i].mData = buffer[i].data;
-      b.mPlanes[i].mHeight = buffer[i].height;
-      b.mPlanes[i].mWidth = buffer[i].width;
-      b.mPlanes[i].mStride = buffer[i].stride;
-      b.mPlanes[i].mOffset = b.mPlanes[i].mSkip = 0;
-    }
+  th_ycbcr_buffer buffer;
+  ret = th_decode_ycbcr_out(mTheoraState->mCtx, buffer);
+  NS_ASSERTION(ret == 0, "th_decode_ycbcr_out failed");
+  bool isKeyframe = th_packet_iskeyframe(aPacket) == 1;
+  VideoData::YCbCrBuffer b;
+  for (uint32_t i=0; i < 3; ++i) {
+    b.mPlanes[i].mData = buffer[i].data;
+    b.mPlanes[i].mHeight = buffer[i].height;
+    b.mPlanes[i].mWidth = buffer[i].width;
+    b.mPlanes[i].mStride = buffer[i].stride;
+    b.mPlanes[i].mOffset = b.mPlanes[i].mSkip = 0;
+  }
 
-    nsRefPtr<VideoData> v = VideoData::Create(mInfo.mVideo,
-                                              mDecoder->GetImageContainer(),
-                                              mDecoder->GetResource()->Tell(),
-                                              time,
-                                              endTime - time,
-                                              b,
-                                              isKeyframe,
-                                              aPacket->granulepos,
-                                              mPicture);
-    if (!v) {
-      // There may be other reasons for this error, but for
-      // simplicity just assume the worst case: out of memory.
-      NS_WARNING("Failed to allocate memory for video frame");
-      return NS_ERROR_OUT_OF_MEMORY;
-    }
-    mVideoQueue.Push(v);
+  nsRefPtr<VideoData> v = VideoData::Create(mInfo.mVideo,
+                                            mDecoder->GetImageContainer(),
+                                            mDecoder->GetResource()->Tell(),
+                                            time,
+                                            endTime - time,
+                                            b,
+                                            isKeyframe,
+                                            aPacket->granulepos,
+                                            mPicture);
+  if (!v) {
+    // There may be other reasons for this error, but for
+    // simplicity just assume the worst case: out of memory.
+    NS_WARNING("Failed to allocate memory for video frame");
+    return NS_ERROR_OUT_OF_MEMORY;
   }
+  mVideoQueue.Push(v);
   return NS_OK;
 }
 
 bool OggReader::DecodeVideoFrame(bool &aKeyframeSkip,
                                  int64_t aTimeThreshold)
 {
   MOZ_ASSERT(OnTaskQueue());