Bug 804760: Drop cached image when we stop capturing + debugs r=anant
authorRandell Jesup <rjesup@jesup.org>
Sat, 29 Dec 2012 21:55:24 -0500
changeset 126398 d2b38501de2c7471c0a10db1e2adc2f721a6be69
parent 126394 a812ef63de87c9e3b22f125c448f997062f3fb1d
child 126399 fc263eb8a7dbb543dc9ef83fbd8186cfa2894be2
push id2151
push userlsblakk@mozilla.com
push dateTue, 19 Feb 2013 18:06:57 +0000
treeherdermozilla-beta@4952e88741ec [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersanant
bugs804760
milestone20.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 804760: Drop cached image when we stop capturing + debugs r=anant
content/media/webrtc/MediaEngineWebRTCVideo.cpp
--- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCVideo.cpp
@@ -7,18 +7,20 @@
 #include "ImageTypes.h"
 #include "ImageContainer.h"
 
 namespace mozilla {
 
 #ifdef PR_LOGGING
 extern PRLogModuleInfo* GetMediaManagerLog();
 #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
+#define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
 #else
 #define LOG(msg)
+#define LOGFRAME(msg)
 #endif
 
 /**
  * Webrtc video source.
  */
 NS_IMPL_THREADSAFE_ISUPPORTS1(MediaEngineWebRTCVideoSource, nsIRunnable)
 
 // ViEExternalRenderer Callback.
@@ -72,19 +74,19 @@ MediaEngineWebRTCVideoSource::DeliverFra
   data.mCbCrSize = gfxIntSize(mWidth/ 2, mHeight/ 2);
   data.mPicX = 0;
   data.mPicY = 0;
   data.mPicSize = gfxIntSize(mWidth, mHeight);
   data.mStereoMode = STEREO_MODE_MONO;
 
   videoImage->SetData(data);
 
-#ifdef LOG_ALL_FRAMES
+#ifdef DEBUG
   static uint32_t frame_num = 0;
-  LOG(("frame %d; timestamp %u, render_time %lu", frame_num++, time_stamp, render_time));
+  LOGFRAME(("frame %d; timestamp %u, render_time %lu", frame_num++, time_stamp, render_time));
 #endif
 
   // we don't touch anything in 'this' until here (except for snapshot,
   // which has it's own lock)
   ReentrantMonitorAutoEnter enter(mMonitor);
 
   // implicitly releases last image
   mImage = image.forget();
@@ -107,19 +109,18 @@ MediaEngineWebRTCVideoSource::NotifyPull
   ReentrantMonitorAutoEnter enter(mMonitor);
   if (mState != kStarted)
     return;
 
   // Note: we're not giving up mImage here
   nsRefPtr<layers::Image> image = mImage;
   TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
   TrackTicks delta = target - aLastEndTime;
-#ifdef LOG_ALL_FRAMES
-  LOG(("NotifyPull, target = %lu, delta = %lu", (uint64_t) target, (uint64_t) delta));
-#endif
+  LOGFRAME(("NotifyPull, target = %lu, delta = %lu %s", (uint64_t) target, (uint64_t) delta,
+            image ? "" : "<null>"));
   // NULL images are allowed
   segment.AppendFrame(image ? image.forget() : nullptr, delta, gfxIntSize(mWidth, mHeight));
   aSource->AppendToTrack(aID, &(segment));
   aLastEndTime = target;
 }
 
 void
 MediaEngineWebRTCVideoSource::ChooseCapability(uint32_t aWidth, uint32_t aHeight, uint32_t aMinFPS)
@@ -187,16 +188,17 @@ MediaEngineWebRTCVideoSource::GetUUID(ns
 {
   // mUniqueId is UTF8
   CopyUTF8toUTF16(mUniqueId, aUUID);
 }
 
 nsresult
 MediaEngineWebRTCVideoSource::Allocate()
 {
+  LOG((__FUNCTION__));
   if (!mCapabilityChosen) {
     // XXX these should come from constraints
     ChooseCapability(mWidth, mHeight, mMinFps);
   }
 
   if (mState == kReleased && mInitDone) {
     if (mViECapture->AllocateCaptureDevice(mUniqueId, KMaxUniqueIdLength, mCaptureIndex)) {
       return NS_ERROR_FAILURE;
@@ -210,16 +212,17 @@ MediaEngineWebRTCVideoSource::Allocate()
   }
 
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCVideoSource::Deallocate()
 {
+  LOG((__FUNCTION__));
   if (mSources.IsEmpty()) {
     if (mState != kStopped && mState != kAllocated) {
       return NS_ERROR_FAILURE;
     }
 
     mViECapture->ReleaseCaptureDevice(mCaptureIndex);
     mState = kReleased;
     LOG(("Video device %d deallocated", mCaptureIndex));
@@ -236,16 +239,17 @@ MediaEngineWebRTCVideoSource::GetOptions
     ChooseCapability(mWidth, mHeight, mMinFps);
   }
   return &mOpts;
 }
 
 nsresult
 MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
 {
+  LOG((__FUNCTION__));
   int error = 0;
   if (!mInitDone || !aStream) {
     return NS_ERROR_FAILURE;
   }
 
   mSources.AppendElement(aStream);
 
   aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
@@ -273,32 +277,36 @@ MediaEngineWebRTCVideoSource::Start(Sour
   }
 
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
 {
+  LOG((__FUNCTION__));
   if (!mSources.RemoveElement(aSource)) {
     // Already stopped - this is allowed
     return NS_OK;
   }
   if (!mSources.IsEmpty()) {
     return NS_OK;
   }
 
   if (mState != kStarted) {
     return NS_ERROR_FAILURE;
   }
 
   {
     ReentrantMonitorAutoEnter enter(mMonitor);
     mState = kStopped;
     aSource->EndTrack(aID);
+    // Drop any cached image so we don't start with a stale image on next
+    // usage
+    mImage = nullptr;
   }
 
   mViERender->StopRender(mCaptureIndex);
   mViERender->RemoveRenderer(mCaptureIndex);
   mViECapture->StopCapture(mCaptureIndex);
 
   return NS_OK;
 }
@@ -402,16 +410,17 @@ MediaEngineWebRTCVideoSource::Snapshot(u
  */
 
 void
 MediaEngineWebRTCVideoSource::Init()
 {
   mDeviceName[0] = '\0'; // paranoia
   mUniqueId[0] = '\0';
 
+  LOG((__FUNCTION__));
   if (mVideoEngine == NULL) {
     return;
   }
 
   mViEBase = webrtc::ViEBase::GetInterface(mVideoEngine);
   if (mViEBase == NULL) {
     return;
   }
@@ -431,16 +440,17 @@ MediaEngineWebRTCVideoSource::Init()
   }
 
   mInitDone = true;
 }
 
 void
 MediaEngineWebRTCVideoSource::Shutdown()
 {
+  LOG((__FUNCTION__));
   if (!mInitDone) {
     return;
   }
 
   if (mState == kStarted) {
     while (!mSources.IsEmpty()) {
       Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
     }