Bug 1513973 - Enable pulling at the same time as starting audio sources. r=jib, a=RyanVM
authorAndreas Pehrson <apehrson@mozilla.com>
Fri, 21 Dec 2018 16:55:17 +0100
changeset 509320 1a2283b8f8c9cc85c8d3ba4e5624b76bb365cb26
parent 509319 59f2b329c3a1a189cb830443c0410e3bf54bec44
child 509321 7fcc850b0e9ee55583626e9a40ea830bfdb72307
push id1905
push userffxbld-merge
push dateMon, 21 Jan 2019 12:33:13 +0000
treeherdermozilla-release@c2fca1944d8c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjib, RyanVM
bugs1513973
milestone65.0
Bug 1513973 - Enable pulling at the same time as starting audio sources. r=jib, a=RyanVM
dom/media/MediaManager.cpp
dom/media/webrtc/MediaEngineDefault.cpp
dom/media/webrtc/MediaEngineSource.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -4043,16 +4043,27 @@ SourceListener::InitializeAsync() {
              [stream = mStream, principal = GetPrincipalHandle(),
               audioDevice =
                   mAudioDeviceState ? mAudioDeviceState->mDevice : nullptr,
               videoDevice =
                   mVideoDeviceState ? mVideoDeviceState->mDevice : nullptr](
                  MozPromiseHolder<SourceListenerPromise>& aHolder) {
                if (audioDevice) {
                  audioDevice->SetTrack(stream, kAudioTrack, principal);
+               }
+
+               if (videoDevice) {
+                 videoDevice->SetTrack(stream, kVideoTrack, principal);
+               }
+
+               // SetTrack() queued the tracks. We add them synchronously here
+               // to avoid races.
+               stream->FinishAddTracks();
+
+               if (audioDevice) {
                  nsresult rv = audioDevice->Start();
                  if (NS_FAILED(rv)) {
                    nsString log;
                    if (rv == NS_ERROR_NOT_AVAILABLE) {
                      log.AssignLiteral("Concurrent mic process limit.");
                      aHolder.Reject(
                          MakeRefPtr<MediaMgrError>(
                              MediaMgrError::Name::NotReadableError, log),
@@ -4063,38 +4074,33 @@ SourceListener::InitializeAsync() {
                    aHolder.Reject(MakeRefPtr<MediaMgrError>(
                                       MediaMgrError::Name::AbortError, log),
                                   __func__);
                    return;
                  }
                }
 
                if (videoDevice) {
-                 videoDevice->SetTrack(stream, kVideoTrack, principal);
                  nsresult rv = videoDevice->Start();
                  if (NS_FAILED(rv)) {
                    if (audioDevice) {
                      if (NS_WARN_IF(NS_FAILED(audioDevice->Stop()))) {
                        MOZ_ASSERT_UNREACHABLE("Stopping audio failed");
                      }
                    }
                    nsString log;
                    log.AssignLiteral("Starting video failed");
                    aHolder.Reject(MakeRefPtr<MediaMgrError>(
                                       MediaMgrError::Name::AbortError, log),
                                   __func__);
                    return;
                  }
                }
 
-               // Start() queued the tracks to be added synchronously to avoid
-               // races
-               stream->FinishAddTracks();
                LOG(("started all sources"));
-
                aHolder.Resolve(true, __func__);
              })
       ->Then(GetMainThreadSerialEventTarget(), __func__,
              [self = RefPtr<SourceListener>(this), this]() {
                if (mStopped) {
                  // We were shut down during the async init
                  return SourceListenerPromise::CreateAndResolve(true, __func__);
                }
@@ -4107,25 +4113,18 @@ SourceListener::InitializeAsync() {
                  MOZ_DIAGNOSTIC_ASSERT(!state->mTrackEnabled);
                  MOZ_DIAGNOSTIC_ASSERT(!state->mDeviceEnabled);
                  MOZ_DIAGNOSTIC_ASSERT(!state->mStopped);
 
                  state->mDeviceEnabled = true;
                  state->mTrackEnabled = true;
                  state->mTrackEnabledTime = TimeStamp::Now();
 
-                 if (state->mDevice->GetMediaSource() !=
-                     MediaSourceEnum::AudioCapture) {
-                   // For AudioCapture mStream is a dummy stream, so we don't
-                   // try to enable pulling - there won't be a track to enable
-                   // it for.
-                   mStream->SetPullingEnabled(state == mAudioDeviceState.get()
-                                                  ? kAudioTrack
-                                                  : kVideoTrack,
-                                              true);
+                 if (state == mVideoDeviceState.get()) {
+                   mStream->SetPullingEnabled(kVideoTrack, true);
                  }
                }
                return SourceListenerPromise::CreateAndResolve(true, __func__);
              },
              [self = RefPtr<SourceListener>(this),
               this](RefPtr<MediaMgrError>&& aResult) {
                if (mStopped) {
                  return SourceListenerPromise::CreateAndReject(
--- a/dom/media/webrtc/MediaEngineDefault.cpp
+++ b/dom/media/webrtc/MediaEngineDefault.cpp
@@ -449,33 +449,48 @@ nsresult MediaEngineDefaultAudioSource::
   MOZ_ASSERT(IsTrackIDExplicit(mTrackID),
              "SetTrack() must happen before Start()");
 
   if (!mSineGenerator) {
     // generate sine wave (default 1KHz)
     mSineGenerator = new SineWaveGenerator(mStream->GraphRate(), mFreq);
   }
 
-  MutexAutoLock lock(mMutex);
-  mState = kStarted;
+  {
+    MutexAutoLock lock(mMutex);
+    mState = kStarted;
+  }
+
+  NS_DispatchToMainThread(
+      NS_NewRunnableFunction(__func__, [stream = mStream, track = mTrackID]() {
+        stream->SetPullingEnabled(track, true);
+      }));
+
   return NS_OK;
 }
 
 nsresult MediaEngineDefaultAudioSource::Stop(
     const RefPtr<const AllocationHandle>& aHandle) {
   AssertIsOnOwningThread();
 
   if (mState == kStopped || mState == kAllocated) {
     return NS_OK;
   }
 
   MOZ_ASSERT(mState == kStarted);
 
-  MutexAutoLock lock(mMutex);
-  mState = kStopped;
+  {
+    MutexAutoLock lock(mMutex);
+    mState = kStopped;
+  }
+
+  NS_DispatchToMainThread(
+      NS_NewRunnableFunction(__func__, [stream = mStream, track = mTrackID]() {
+        stream->SetPullingEnabled(track, false);
+      }));
   return NS_OK;
 }
 
 nsresult MediaEngineDefaultAudioSource::Reconfigure(
     const RefPtr<AllocationHandle>& aHandle,
     const dom::MediaTrackConstraints& aConstraints,
     const MediaEnginePrefs& aPrefs, const nsString& aDeviceId,
     const char** aOutBadConstraint) {
--- a/dom/media/webrtc/MediaEngineSource.h
+++ b/dom/media/webrtc/MediaEngineSource.h
@@ -143,16 +143,18 @@ class MediaEngineSourceInterface {
                         const PrincipalHandle& aPrincipal) = 0;
 
   /**
    * Called by MediaEngine to start feeding data to the track associated with
    * the given AllocationHandle.
    *
    * If this is the first AllocationHandle to start, the underlying device
    * will be started.
+   *
+   * NB: Audio sources handle the enabling of pulling themselves.
    */
   virtual nsresult Start(const RefPtr<const AllocationHandle>& aHandle) = 0;
 
   /**
    * This brings focus to the selected source, e.g. to bring a captured window
    * to the front.
    *
    * We return one of the following:
@@ -191,16 +193,18 @@ class MediaEngineSourceInterface {
    * Called by MediaEngine to stop feeding data to the track associated with
    * the given AllocationHandle.
    *
    * If this was the last AllocationHandle that had been started,
    * the underlying device will be stopped.
    *
    * Double-stopping a given allocation handle is allowed and will return NS_OK.
    * This is necessary sometimes during shutdown.
+   *
+   * NB: Audio sources handle the disabling of pulling themselves.
    */
   virtual nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) = 0;
 
   /**
    * Called by MediaEngine to deallocate a handle to this source.
    *
    * If this was the last registered AllocationHandle, the underlying device
    * will be deallocated.
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -574,24 +574,26 @@ nsresult MediaEngineWebRTCMicrophoneSour
     return NS_ERROR_FAILURE;
   }
 
   mInputProcessing = new AudioInputProcessing(mDeviceMaxChannelCount, mStream,
                                               mTrackID, mPrincipal);
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
-  NS_DispatchToMainThread(media::NewRunnableFrom(
-      [that, graph = std::move(gripGraph), deviceID]() mutable {
+  NS_DispatchToMainThread(
+      media::NewRunnableFrom([that, graph = std::move(gripGraph), deviceID,
+                              stream = mStream, track = mTrackID]() mutable {
         if (graph) {
           graph->AppendMessage(MakeUnique<StartStopMessage>(
               that->mInputProcessing, StartStopMessage::Start));
         }
 
-        that->mStream->OpenAudioInput(deviceID, that->mInputProcessing);
+        stream->OpenAudioInput(deviceID, that->mInputProcessing);
+        stream->SetPullingEnabled(track, true);
 
         return NS_OK;
       }));
 
   ApplySettings(mCurrentPrefs);
 
   MOZ_ASSERT(mState != kReleased);
   mState = kStarted;
@@ -609,26 +611,28 @@ nsresult MediaEngineWebRTCMicrophoneSour
 
   if (mState == kStopped) {
     // Already stopped - this is allowed
     return NS_OK;
   }
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
-  NS_DispatchToMainThread(media::NewRunnableFrom(
-      [that, graph = std::move(gripGraph), stream = mStream]() mutable {
+  NS_DispatchToMainThread(
+      media::NewRunnableFrom([that, graph = std::move(gripGraph),
+                              stream = mStream, track = mTrackID]() mutable {
         if (graph) {
           graph->AppendMessage(MakeUnique<StartStopMessage>(
               that->mInputProcessing, StartStopMessage::Stop));
         }
 
         CubebUtils::AudioDeviceID deviceID = that->mDeviceInfo->DeviceID();
         Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
         stream->CloseAudioInput(id, that->mInputProcessing);
+        stream->SetPullingEnabled(track, false);
 
         return NS_OK;
       }));
 
   MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
   mState = kStopped;
 
   return NS_OK;