Bug 1330918 - Use timestamps for frames in VideoTrackEncoder. r=bechen,jesup
☠☠ backed out by 951b80d391e6 ☠ ☠
authorAndreas Pehrson <pehrsons@gmail.com>
Wed, 18 Jan 2017 20:08:23 +0100
changeset 329937 b2d8a93a50a81b885abb8880547a0be604bd0be0
parent 329936 05ba56765a31727b97475dcd7644598a99909010
child 329938 aedd9a68f2c013ca2df44e3f331b176a004d2341
push id36197
push userpehrsons@gmail.com
push dateWed, 18 Jan 2017 21:50:09 +0000
treeherderautoland@aedd9a68f2c0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbechen, jesup
bugs1330918
milestone53.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1330918 - Use timestamps for frames in VideoTrackEncoder. r=bechen,jesup This makes VideoTrackEncoder use timestamps when it passes frames to VP8TrackEncoder. It also rewrites the chunks' durations and bases them on said timestamps. This should mean that VP8TrackEncoder can continue passing durations to the encoder as it does today. MozReview-Commit-ID: GaUsF5PR4ZN
dom/media/encoder/TrackEncoder.cpp
dom/media/encoder/TrackEncoder.h
--- a/dom/media/encoder/TrackEncoder.cpp
+++ b/dom/media/encoder/TrackEncoder.cpp
@@ -194,16 +194,18 @@ AudioTrackEncoder::SizeOfExcludingThis(m
 
 void
 VideoTrackEncoder::Init(const VideoSegment& aSegment)
 {
   if (mInitialized) {
     return;
   }
 
+  mLastChunk.mDuration = 0;
+
   mInitCounter++;
   TRACK_LOG(LogLevel::Debug, ("Init the video encoder %d times", mInitCounter));
   VideoSegment::ConstChunkIterator iter(aSegment);
   while (!iter.IsEnded()) {
    VideoChunk chunk = *iter;
    if (!chunk.IsNull()) {
      gfx::IntSize imgsize = chunk.mFrame.GetImage()->GetSize();
      gfx::IntSize intrinsicSize = chunk.mFrame.GetIntrinsicSize();
@@ -274,44 +276,83 @@ VideoTrackEncoder::NotifyQueuedTrackChan
 
 nsresult
 VideoTrackEncoder::AppendVideoSegment(const VideoSegment& aSegment)
 {
   ReentrantMonitorAutoEnter mon(mReentrantMonitor);
 
   // Append all video segments from MediaStreamGraph, including null an
   // non-null frames.
-  VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(aSegment));
-  while (!iter.IsEnded()) {
+  VideoSegment::ConstChunkIterator iter(aSegment);
+  for (; !iter.IsEnded(); iter.Next()) {
     VideoChunk chunk = *iter;
-    mLastFrameDuration += chunk.GetDuration();
-    // Send only the unique video frames for encoding.
-    // Or if we got the same video chunks more than 1 seconds,
-    // force to send into encoder.
-    if ((mLastFrame != chunk.mFrame) ||
-        (mLastFrameDuration >= mTrackRate)) {
-      RefPtr<layers::Image> image = chunk.mFrame.GetImage();
+
+    if (mLastChunk.mTimeStamp.IsNull()) {
+      if (chunk.IsNull()) {
+        // The start of this track is frameless. We need to track the time
+        // it takes to get the first frame.
+        mLastChunk.mDuration += chunk.mDuration;
+        continue;
+      }
+
+      // This is the first real chunk in the track. Use its timestamp as the
+      // starting point for this track.
+      MOZ_ASSERT(!chunk.mTimeStamp.IsNull());
+      const StreamTime nullDuration = mLastChunk.mDuration;
+      mLastChunk = chunk;
 
-      // Because we may get chunks with a null image (due to input blocking),
-      // accumulate duration and give it to the next frame that arrives.
-      // Canonically incorrect - the duration should go to the previous frame
-      // - but that would require delaying until the next frame arrives.
-      // Best would be to do like OMXEncoder and pass an effective timestamp
-      // in with each frame.
-      if (image) {
-        mRawSegment.AppendFrame(image.forget(),
-                                mLastFrameDuration,
-                                chunk.mFrame.GetIntrinsicSize(),
-                                PRINCIPAL_HANDLE_NONE,
-                                chunk.mFrame.GetForceBlack());
-        mLastFrameDuration = 0;
+      // Adapt to the time before the first frame. This extends the first frame
+      // from [start, end] to [0, end], but it'll do for now.
+      mLastChunk.mTimeStamp -=
+        TimeDuration::FromMicroseconds(
+          RateConvertTicksRoundUp(PR_USEC_PER_SEC, mTrackRate, nullDuration));
+    }
+
+    MOZ_ASSERT(!mLastChunk.IsNull());
+    if (mLastChunk.CanCombineWithFollowing(chunk) || chunk.IsNull()) {
+      // This is the same frame as before (or null). We extend the last chunk
+      // with its duration.
+      mLastChunk.mDuration += chunk.mDuration;
+
+      if (mLastChunk.mDuration < mTrackRate) {
+        continue;
+      }
+
+      // If we have gotten dupes for over a second, we force send one
+      // to the encoder to make sure there is some output.
+      chunk.mTimeStamp = mLastChunk.mTimeStamp + TimeDuration::FromSeconds(1);
+
+      if (chunk.IsNull()) {
+        // Ensure that we don't pass null to the encoder by making mLastChunk
+        // null later on.
+        chunk.mFrame = mLastChunk.mFrame;
       }
     }
-    mLastFrame.TakeFrom(&chunk.mFrame);
-    iter.Next();
+
+    TimeDuration diff = chunk.mTimeStamp - mLastChunk.mTimeStamp;
+    if (diff <= TimeDuration::FromSeconds(0)) {
+      // The timestamp from mLastChunk is newer than from chunk.
+      // This means the durations reported from MediaStreamGraph for mLastChunk
+      // were larger than the timestamp diff - and durations were used to
+      // trigger the 1-second frame above. This could happen due to drift or
+      // underruns in the graph.
+      chunk.mTimeStamp = mLastChunk.mTimeStamp;
+    } else {
+      RefPtr<layers::Image> lastImage = mLastChunk.mFrame.GetImage();
+      mRawSegment.AppendFrame(lastImage.forget(),
+                              RateConvertTicksRoundUp(
+                                  mTrackRate, PR_USEC_PER_SEC,
+                                  diff.ToMicroseconds()),
+                              mLastChunk.mFrame.GetIntrinsicSize(),
+                              PRINCIPAL_HANDLE_NONE,
+                              mLastChunk.mFrame.GetForceBlack(),
+                              mLastChunk.mTimeStamp);
+    }
+
+    mLastChunk = chunk;
   }
 
   if (mRawSegment.GetDuration() > 0) {
     mReentrantMonitor.NotifyAll();
   }
 
   return NS_OK;
 }
--- a/dom/media/encoder/TrackEncoder.h
+++ b/dom/media/encoder/TrackEncoder.h
@@ -250,18 +250,16 @@ class VideoTrackEncoder : public TrackEn
 public:
   explicit VideoTrackEncoder(TrackRate aTrackRate)
     : TrackEncoder()
     , mFrameWidth(0)
     , mFrameHeight(0)
     , mDisplayWidth(0)
     , mDisplayHeight(0)
     , mTrackRate(aTrackRate)
-    , mTotalFrameDuration(0)
-    , mLastFrameDuration(0)
     , mVideoBitrate(0)
   {}
 
   /**
    * Notified by the same callback of MediaEncoder when it has received a track
    * change from MediaStreamGraph. Called on the MediaStreamGraph thread.
    */
   void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
@@ -334,27 +332,20 @@ protected:
   int mDisplayHeight;
 
   /**
    * The track rate of source video.
    */
   TrackRate mTrackRate;
 
   /**
-   * The total duration of frames in encoded video in StreamTime, kept track of
-   * in subclasses.
-   */
-  StreamTime mTotalFrameDuration;
-
-  /**
    * The last unique frame and duration we've sent to track encoder,
    * kept track of in subclasses.
    */
-  VideoFrame mLastFrame;
-  StreamTime mLastFrameDuration;
+  VideoChunk mLastChunk;
 
   /**
    * A segment queue of audio track data, protected by mReentrantMonitor.
    */
   VideoSegment mRawSegment;
 
   uint32_t mVideoBitrate;
 };