Bug 1487057 - Part 9 - Make SourceMediaStream::SetEnded go through the message queue so it's in the right order w.r.t. Stop. r=pehrsons
authorPaul Adenot <paul@paul.cx>
Mon, 01 Oct 2018 17:12:14 +0200
changeset 489228 e1a790218e20b97d32957965379423a67196f54b
parent 489227 45d2c462dc92c43eed9582c1b309f8df0c66b37f
child 489229 9cf36402deed806fb359c25f633d6dc9f31dc7ea
push id247
push userfmarier@mozilla.com
push dateSat, 27 Oct 2018 01:06:44 +0000
reviewerspehrsons
bugs1487057
milestone64.0a1
Bug 1487057 - Part 9 - Make SourceMediaStream::SetEnded go through the message queue so it's in the right order w.r.t. Stop. r=pehrsons Differential Revision: https://phabricator.services.mozilla.com/D7601
dom/media/MediaStreamGraph.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -3318,17 +3318,17 @@ SourceMediaStream::RemoveDirectTrackList
 StreamTime
 SourceMediaStream::GetEndOfAppendedData(TrackID aID)
 {
   MutexAutoLock lock(mMutex);
   TrackData *track = FindDataForTrack(aID);
   if (track) {
     return track->mEndOfFlushedData + track->mData->GetDuration();
   }
-  NS_ERROR("Track not found");
+  MOZ_CRASH("Track not found");
   return 0;
 }
 
 void
 SourceMediaStream::EndTrack(TrackID aID)
 {
   MutexAutoLock lock(mMutex);
   TrackData *track = FindDataForTrack(aID);
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -530,45 +530,77 @@ MediaEngineWebRTCMicrophoneSource::Alloc
                                             const MediaEnginePrefs& aPrefs,
                                             const nsString& aDeviceId,
                                             const ipc::PrincipalInfo& aPrincipalInfo,
                                             AllocationHandle** aOutHandle,
                                             const char** aOutBadConstraint)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aOutHandle);
+  // This is going away in bug 1497254
   auto handle = MakeRefPtr<AllocationHandle>(aConstraints, aPrincipalInfo,
                                              aDeviceId);
-  LOG(("Mic source %p allocation %p Allocate()", this, handle.get()));
-
   nsresult rv = ReevaluateAllocation(handle, nullptr, aPrefs, aDeviceId,
                                      aOutBadConstraint);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
   MOZ_ASSERT(!mHandle, "Only allocate once.");
   mHandle = handle;
 
   handle.forget(aOutHandle);
   return NS_OK;
 }
 
+class EndTrackMessage : public ControlMessage
+{
+  public:
+    EndTrackMessage(MediaStream* aStream,
+                    TrackID aTrackID)
+    : ControlMessage(aStream)
+    , mTrackID(aTrackID)
+  {
+  }
+
+  void Run() override
+  {
+    mStream->AsSourceStream()->EndTrack(mTrackID);
+  }
+
+protected:
+  RefPtr<AudioInputProcessing> mInputProcessing;
+  TrackID mTrackID;
+};
+
+
 nsresult
 MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
   MOZ_ASSERT(mState == kStopped);
 
   MOZ_DIAGNOSTIC_ASSERT(!mEnabled,
                         "Source should be stopped for the track before removing");
 
   if (mStream && IsTrackIDExplicit(mTrackID)) {
-    mStream->EndTrack(mTrackID);
+    RefPtr<MediaStream> sourceStream = mStream;
+    RefPtr<MediaStreamGraphImpl> graphImpl = mStream->GraphImpl();
+    NS_DispatchToMainThread(media::NewRunnableFrom(
+      [ graph = std::move(graphImpl),
+        stream = std::move(sourceStream),
+        trackID = mTrackID]() mutable {
+        if (graph) {
+        graph->AppendMessage(
+            MakeUnique<EndTrackMessage>(stream, trackID));
+        }
+        return NS_OK;
+      }
+    ));
   }
 
   MOZ_ASSERT(mHandle, "Only deallocate once");
 
   // Reset all state. This is not strictly necessary, this instance will get
   // destroyed soon.
   mHandle = nullptr;
   mStream = nullptr;
@@ -687,27 +719,28 @@ MediaEngineWebRTCMicrophoneSource::Start
   AssertIsOnOwningThread();
 
   mInputProcessing = new AudioInputProcessing(
     mDeviceMaxChannelCount, mStream, mTrackID, mPrincipal);
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
   NS_DispatchToMainThread(media::NewRunnableFrom(
-    [ that, graph = std::move(gripGraph), enabled = that->mEnabled ]() mutable {
+    [ that, graph = std::move(gripGraph), enabled = mEnabled, deviceID ]() mutable {
 
     if (graph) {
-    graph->AppendMessage(
-        MakeUnique<StartStopMessage>(that->mInputProcessing, enabled));
+      graph->AppendMessage(
+          MakeUnique<StartStopMessage>(that->mInputProcessing, enabled));
     }
 
+    that->mStream->OpenAudioInput(deviceID, that->mInputProcessing);
+
     return NS_OK;
   }));
 
-  mStream->OpenAudioInput(deviceID, mInputProcessing);
 
   MOZ_ASSERT(mState != kReleased);
   mState = kStarted;
 
   ApplySettings(mNetPrefs, mStream->GraphImpl());
 
   return NS_OK;
 }
@@ -716,44 +749,44 @@ nsresult
 MediaEngineWebRTCMicrophoneSource::Stop(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
   LOG(("Mic source %p allocation %p Stop()", this, aHandle.get()));
 
   MOZ_ASSERT(mStream, "SetTrack must have been called before ::Stop");
 
-  // This spans setting both the enabled state and mState.
   if (!mEnabled) {
     // Already stopped - this is allowed
     return NS_OK;
   }
 
   mEnabled = false;
 
 #ifdef MOZ_PULSEAUDIO
     MOZ_ASSERT(sInputStreamsOpen > 0);
     sInputStreamsOpen--;
 #endif
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
   NS_DispatchToMainThread(media::NewRunnableFrom(
-    [ that, graph = std::move(gripGraph), enabled = that->mEnabled ]() mutable {
+    [ that, graph = std::move(gripGraph), enabled = mEnabled, stream = mStream]() mutable {
 
     if (graph) {
       graph->AppendMessage(
           MakeUnique<StartStopMessage>(that->mInputProcessing, enabled));
     }
 
+    CubebUtils::AudioDeviceID deviceID = that->mDeviceInfo->DeviceID();
+    Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
+    stream->CloseAudioInput(id, that->mInputProcessing);
+
     return NS_OK;
   }));
 
-  CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
-  Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
-  mStream->CloseAudioInput(id, mInputProcessing);
 
   MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
   mState = kStopped;
 
   return NS_OK;
 }
 
 void
@@ -774,16 +807,17 @@ AudioInputProcessing::AudioInputProcessi
   , mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100)
 #ifdef DEBUG
   , mLastCallbackAppendTime(0)
 #endif
   , mLiveFramesAppended(false)
   , mLiveSilenceAppended(false)
   , mTrackID(aTrackID)
   , mPrincipal(aPrincipalHandle)
+  , mEnabled(false)
 {
 }
 
 void
 AudioInputProcessing::Disconnect(MediaStreamGraphImpl* aGraph)
 {
   // This method is just for asserts.
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
@@ -1071,20 +1105,17 @@ AudioInputProcessing::Pull(const RefPtr<
 void
 AudioInputProcessing::NotifyOutputData(MediaStreamGraphImpl* aGraph,
                                        AudioDataValue* aBuffer,
                                        size_t aFrames,
                                        TrackRate aRate,
                                        uint32_t aChannels)
 {
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
-
-  if (!mEnabled) {
-    return;
-  }
+  MOZ_ASSERT(mEnabled);
 
   if (!mPacketizerOutput ||
       mPacketizerOutput->PacketSize() != aRate/100u ||
       mPacketizerOutput->Channels() != aChannels) {
     // It's ok to drop the audio still in the packetizer here: if this changes,
     // we changed devices or something.
     mPacketizerOutput =
       new AudioPacketizer<AudioDataValue, float>(aRate/100, aChannels);