Bug 1080755 - Push video frames into MediaStreamGraph instead of waiting for pulls. r=padenot, a=lmandel
authorRandell Jesup <rjesup@jesup.org>
Fri, 24 Oct 2014 08:57:03 -0400
changeset 233488 746e357374f2022020f1bdfba005316ff20587d4
parent 233487 ca735d24f34ca5e59c04d8b6b4408fe2913b1641
child 233489 349413b124b74c2a0696b372ca7cdd628176337d
push id4187
push userbhearsum@mozilla.com
push dateFri, 28 Nov 2014 15:29:12 +0000
treeherdermozilla-beta@f23cc6a30c11 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot, lmandel
bugs1080755
milestone35.0a2
Bug 1080755 - Push video frames into MediaStreamGraph instead of waiting for pulls. r=padenot, a=lmandel
content/media/webrtc/MediaEngineCameraVideoSource.cpp
content/media/webrtc/MediaEngineCameraVideoSource.h
content/media/webrtc/MediaEngineGonkVideoSource.cpp
content/media/webrtc/MediaEngineWebRTCAudio.cpp
content/media/webrtc/MediaEngineWebRTCVideo.cpp
--- a/content/media/webrtc/MediaEngineCameraVideoSource.cpp
+++ b/content/media/webrtc/MediaEngineCameraVideoSource.cpp
@@ -1,16 +1,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaEngineCameraVideoSource.h"
 
 namespace mozilla {
 
+using namespace mozilla::gfx;
 using dom::ConstrainLongRange;
 using dom::ConstrainDoubleRange;
 using dom::MediaTrackConstraintSet;
 
 #ifdef PR_LOGGING
 extern PRLogModuleInfo* GetMediaManagerLog();
 #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
 #define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
@@ -42,16 +43,36 @@ MediaEngineCameraVideoSource::AreInterse
 /* static */ bool
 MediaEngineCameraVideoSource::Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
   MOZ_ASSERT(AreIntersecting(aA, aB));
   aA.mMin = std::max(aA.mMin, aB.mMin);
   aA.mMax = std::min(aA.mMax, aB.mMax);
   return true;
 }
 
+// guts for appending data to the MSG track
+bool MediaEngineCameraVideoSource::AppendToTrack(SourceMediaStream* aSource,
+                                                 layers::Image* aImage,
+                                                 TrackID aID,
+                                                 TrackTicks delta)
+{
+  MOZ_ASSERT(aSource);
+
+  VideoSegment segment;
+  nsRefPtr<layers::Image> image = aImage;
+  IntSize size(image ? mWidth : 0, image ? mHeight : 0);
+  segment.AppendFrame(image.forget(), delta, size);
+
+  // This is safe from any thread, and is safe if the track is Finished
+  // or Destroyed.
+  // This can fail if either a) we haven't added the track yet, or b)
+  // we've removed or finished the track.
+  return aSource->AppendToTrack(aID, &(segment));
+}
+
 // A special version of the algorithm for cameras that don't list capabilities.
 void
 MediaEngineCameraVideoSource::GuessCapability(
     const VideoTrackConstraintsN& aConstraints,
     const MediaEnginePrefs& aPrefs)
 {
   LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
        aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
--- a/content/media/webrtc/MediaEngineCameraVideoSource.h
+++ b/content/media/webrtc/MediaEngineCameraVideoSource.h
@@ -23,16 +23,17 @@ public:
                                const char* aMonitorName = "Camera.Monitor")
     : MediaEngineVideoSource(kReleased)
     , mMonitor(aMonitorName)
     , mWidth(0)
     , mHeight(0)
     , mInitDone(false)
     , mHasDirectListeners(false)
     , mCaptureIndex(aIndex)
+    , mTrackID(0)
     , mFps(-1)
   {}
 
 
   virtual void GetName(nsAString& aName) MOZ_OVERRIDE;
   virtual void GetUUID(nsAString& aUUID) MOZ_OVERRIDE;
   virtual void SetDirectListeners(bool aHasListeners) MOZ_OVERRIDE;
   virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
@@ -55,16 +56,22 @@ public:
   virtual nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE
   {
     return NS_ERROR_NOT_IMPLEMENTED;
   }
 
 protected:
   ~MediaEngineCameraVideoSource() {}
 
+  // guts for appending data to the MSG track
+  virtual bool AppendToTrack(SourceMediaStream* aSource,
+                             layers::Image* aImage,
+                             TrackID aID,
+                             TrackTicks delta);
+
   static bool IsWithin(int32_t n, const dom::ConstrainLongRange& aRange);
   static bool IsWithin(double n, const dom::ConstrainDoubleRange& aRange);
   static int32_t Clamp(int32_t n, const dom::ConstrainLongRange& aRange);
   static bool AreIntersecting(const dom::ConstrainLongRange& aA,
                               const dom::ConstrainLongRange& aB);
   static bool Intersect(dom::ConstrainLongRange& aA, const dom::ConstrainLongRange& aB);
   void GuessCapability(const VideoTrackConstraintsN& aConstraints,
                        const MediaEnginePrefs& aPrefs);
@@ -82,16 +89,17 @@ protected:
   int mWidth, mHeight; // protected with mMonitor on Gonk due to different threading
   // end of data protected by mMonitor
 
   nsTArray<SourceMediaStream*> mSources; // When this goes empty, we shut down HW
 
   bool mInitDone;
   bool mHasDirectListeners;
   int mCaptureIndex;
+  TrackID mTrackID;
   int mFps; // Track rate (30 fps by default)
 
   webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
 
   nsString mDeviceName;
   nsString mUniqueId;
 };
 
--- a/content/media/webrtc/MediaEngineGonkVideoSource.cpp
+++ b/content/media/webrtc/MediaEngineGonkVideoSource.cpp
@@ -149,16 +149,17 @@ MediaEngineGonkVideoSource::Start(Source
   aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
   aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
 
   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
 
   if (mState == kStarted) {
     return NS_OK;
   }
+  mTrackID = aID;
   mImageContainer = layers::LayerManager::CreateImageContainer();
 
   NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
                                        &MediaEngineGonkVideoSource::StartImpl,
                                        mCapability));
   mCallbackMonitor.Wait();
   if (mState != kStarted) {
     return NS_ERROR_FAILURE;
@@ -613,16 +614,31 @@ MediaEngineGonkVideoSource::RotateImage(
   data.mPicY = 0;
   data.mPicSize = IntSize(dstWidth, dstHeight);
   data.mStereoMode = StereoMode::MONO;
 
   videoImage->SetDataNoCopy(data);
 
   // implicitly releases last image
   mImage = image.forget();
+
+  // Push the frame into the MSG with a minimal duration.  This will likely
+  // mean we'll still get NotifyPull calls which will then return the same
+  // frame again with a longer duration.  However, this means we won't
+  // fail to get the frame in and drop frames.
+
+  // XXX The timestamp for the frame should be base on the Capture time,
+  // not the MSG time, and MSG should never, ever block on a (realtime)
+  // video frame (or even really for streaming - audio yes, video probably no).
+  uint32_t len = mSources.Length();
+  for (uint32_t i = 0; i < len; i++) {
+    if (mSources[i]) {
+      AppendToTrack(mSources[i], mImage, mTrackID, 1); // shortest possible duration
+    }
+  }
 }
 
 bool
 MediaEngineGonkVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
   {
     ReentrantMonitorAutoEnter sync(mCallbackMonitor);
     if (mState == kStopped) {
       return false;
--- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -34,18 +34,20 @@ namespace mozilla {
 
 #ifdef LOG
 #undef LOG
 #endif
 
 #ifdef PR_LOGGING
 extern PRLogModuleInfo* GetMediaManagerLog();
 #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
+#define LOG_FRAMES(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
 #else
 #define LOG(msg)
+#define LOG_FRAMES(msg)
 #endif
 
 /**
  * Webrtc audio source.
  */
 NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource)
 
 // XXX temp until MSG supports registration
@@ -396,17 +398,17 @@ MediaEngineWebRTCAudioSource::NotifyPull
                                          TrackID aID,
                                          StreamTime aDesiredTime,
                                          TrackTicks &aLastEndTime)
 {
   // Ignore - we push audio data
 #ifdef DEBUG
   TrackTicks target = aSource->TimeToTicksRoundUp(SAMPLE_FREQUENCY, aDesiredTime);
   TrackTicks delta = target - aLastEndTime;
-  LOG(("Audio: NotifyPull: aDesiredTime %ld, target %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) target, (int64_t) delta));
+  LOG_FRAMES(("Audio: NotifyPull: aDesiredTime %ld, target %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) target, (int64_t) delta));
   aLastEndTime = target;
 #endif
 }
 
 void
 MediaEngineWebRTCAudioSource::Init()
 {
   mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
--- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCVideo.cpp
@@ -93,60 +93,70 @@ MediaEngineWebRTCVideoSource::DeliverFra
 
   // we don't touch anything in 'this' until here (except for snapshot,
   // which has it's own lock)
   MonitorAutoLock lock(mMonitor);
 
   // implicitly releases last image
   mImage = image.forget();
 
+  // Push the frame into the MSG with a minimal duration.  This will likely
+  // mean we'll still get NotifyPull calls which will then return the same
+  // frame again with a longer duration.  However, this means we won't
+  // fail to get the frame in and drop frames.
+
+  // XXX The timestamp for the frame should be based on the Capture time,
+  // not the MSG time, and MSG should never, ever block on a (realtime)
+  // video frame (or even really for streaming - audio yes, video probably no).
+  // Note that MediaPipeline currently ignores the timestamps from MSG
+  uint32_t len = mSources.Length();
+  for (uint32_t i = 0; i < len; i++) {
+    if (mSources[i]) {
+      AppendToTrack(mSources[i], mImage, mTrackID, 1); // shortest possible duration
+    }
+  }
+
   return 0;
 }
 
 // Called if the graph thinks it's running out of buffered video; repeat
 // the last frame for whatever minimum period it think it needs.  Note that
 // this means that no *real* frame can be inserted during this period.
 void
 MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
-                                         SourceMediaStream *aSource,
+                                         SourceMediaStream* aSource,
                                          TrackID aID,
                                          StreamTime aDesiredTime,
                                          TrackTicks &aLastEndTime)
 {
   VideoSegment segment;
 
   MonitorAutoLock lock(mMonitor);
   // B2G does AddTrack, but holds kStarted until the hardware changes state.
   // So mState could be kReleased here.  We really don't care about the state,
   // though.
 
-  // Note: we're not giving up mImage here
-  nsRefPtr<layers::Image> image = mImage;
   TrackTicks target = aSource->TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
   TrackTicks delta = target - aLastEndTime;
   LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime,
-            (int64_t) target, (int64_t) delta, image ? "" : "<null>"));
+            (int64_t) target, (int64_t) delta, mImage ? "" : "<null>"));
 
   // Bug 846188 We may want to limit incoming frames to the requested frame rate
   // mFps - if you want 30FPS, and the camera gives you 60FPS, this could
   // cause issues.
   // We may want to signal if the actual frame rate is below mMinFPS -
   // cameras often don't return the requested frame rate especially in low
   // light; we should consider surfacing this so that we can switch to a
   // lower resolution (which may up the frame rate)
 
   // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime
   // Doing so means a negative delta and thus messes up handling of the graph
   if (delta > 0) {
     // nullptr images are allowed
-    IntSize size(image ? mWidth : 0, image ? mHeight : 0);
-    segment.AppendFrame(image.forget(), delta, size);
-    // This can fail if either a) we haven't added the track yet, or b)
-    // we've removed or finished the track.
-    if (aSource->AppendToTrack(aID, &(segment))) {
+    if (AppendToTrack(aSource, mImage, aID, delta)) {
       aLastEndTime = target;
     }
   }
 }
 
 /*static*/
 bool MediaEngineWebRTCVideoSource::SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
                                                         const webrtc::CaptureCapability& aCandidate) {
@@ -358,16 +368,18 @@ MediaEngineWebRTCVideoSource::Start(Sour
   aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
 
   if (mState == kStarted) {
     return NS_OK;
   }
   mImageContainer = layers::LayerManager::CreateImageContainer();
 
   mState = kStarted;
+  mTrackID = aID;
+
   error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
   if (error == -1) {
     return NS_ERROR_FAILURE;
   }
 
   error = mViERender->StartRender(mCaptureIndex);
   if (error == -1) {
     return NS_ERROR_FAILURE;