Bug 1487057 - Part 11 - Work around the fact that EndTrack uses mCommands. r=pehrsons
authorPaul Adenot <paul@paul.cx>
Fri, 12 Oct 2018 15:57:49 +0200
changeset 489230 0190c5793ffeeb0eec281cf377435c7a592415f3
parent 489229 9cf36402deed806fb359c25f633d6dc9f31dc7ea
child 489231 28e4a41b82890ca85a1bd557ef46d63813e339d5
push id247
push userfmarier@mozilla.com
push dateSat, 27 Oct 2018 01:06:44 +0000
reviewerspehrsons
bugs1487057
milestone64.0a1
Bug 1487057 - Part 11 - Work around the fact that EndTrack uses mCommands. r=pehrsons This is temporaray until Andreas fixes all this.
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.h
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -545,54 +545,59 @@ MediaEngineWebRTCMicrophoneSource::Alloc
 
   MOZ_ASSERT(!mHandle, "Only allocate once.");
   mHandle = handle;
 
   handle.forget(aOutHandle);
   return NS_OK;
 }
 
-class EndTrackMessage : public ControlMessage
-{
-  public:
-    EndTrackMessage(MediaStream* aStream,
-                    TrackID aTrackID)
-    : ControlMessage(aStream)
-    , mTrackID(aTrackID)
-  {
-  }
-
-  void Run() override
-  {
-    mStream->AsSourceStream()->EndTrack(mTrackID);
-  }
-
-protected:
-  RefPtr<AudioInputProcessing> mInputProcessing;
-  TrackID mTrackID;
-};
-
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
   MOZ_ASSERT(mState == kStopped);
 
+  class EndTrackMessage : public ControlMessage
+  {
+    public:
+      EndTrackMessage(MediaStream* aStream,
+                      AudioInputProcessing* aAudioInputProcessing,
+                      TrackID aTrackID)
+      : ControlMessage(aStream)
+      , mInputProcessing(aAudioInputProcessing)
+      , mTrackID(aTrackID)
+    {
+    }
+
+    void Run() override
+    {
+      mInputProcessing->End();
+      mStream->AsSourceStream()->EndTrack(mTrackID);
+    }
+
+  protected:
+    RefPtr<AudioInputProcessing> mInputProcessing;
+    TrackID mTrackID;
+  };
+
   if (mStream && IsTrackIDExplicit(mTrackID)) {
     RefPtr<MediaStream> sourceStream = mStream;
     RefPtr<MediaStreamGraphImpl> graphImpl = mStream->GraphImpl();
+    RefPtr<AudioInputProcessing> inputProcessing = mInputProcessing;
     NS_DispatchToMainThread(media::NewRunnableFrom(
       [ graph = std::move(graphImpl),
         stream = std::move(sourceStream),
+        audioInputProcessing = std::move(inputProcessing),
         trackID = mTrackID]() mutable {
         if (graph) {
-        graph->AppendMessage(
-            MakeUnique<EndTrackMessage>(stream, trackID));
+          graph->AppendMessage(
+              MakeUnique<EndTrackMessage>(stream, audioInputProcessing, trackID));
         }
         return NS_OK;
       }
     ));
   }
 
   MOZ_ASSERT(mHandle, "Only deallocate once");
 
@@ -803,16 +808,17 @@ AudioInputProcessing::AudioInputProcessi
 #ifdef DEBUG
   , mLastCallbackAppendTime(0)
 #endif
   , mLiveFramesAppended(false)
   , mLiveSilenceAppended(false)
   , mTrackID(aTrackID)
   , mPrincipal(aPrincipalHandle)
   , mEnabled(false)
+  , mEnded(false)
 {
 }
 
 void
 AudioInputProcessing::Disconnect(MediaStreamGraphImpl* aGraph)
 {
   // This method is just for asserts.
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
@@ -1028,16 +1034,20 @@ AudioInputProcessing::Pull(const RefPtr<
                            TrackID aTrackID,
                            StreamTime aDesiredTime,
                            const PrincipalHandle& aPrincipalHandle)
 {
   TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i",
                                aStream.get(), aTrackID);
   StreamTime delta;
 
+  if (mEnded) {
+    return;
+  }
+
   delta = aDesiredTime - aStream->GetEndOfAppendedData(aTrackID);
 
   if (delta < 0) {
     LOG_FRAMES(
       ("Not appending silence; %" PRId64 " frames already buffered", -delta));
     return;
   }
 
@@ -1405,16 +1415,22 @@ AudioInputProcessing::DeviceChanged(Medi
 {
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
   // Reset some processing
   ResetProcessingIfNeeded(gain_control);
   ResetProcessingIfNeeded(echo_cancellation);
   ResetProcessingIfNeeded(noise_suppression);
 }
 
+void
+AudioInputProcessing::End()
+{
+  mEnded = true;
+}
+
 nsString
 MediaEngineWebRTCAudioCaptureSource::GetName() const
 {
   return NS_LITERAL_STRING(u"AudioCapture");
 }
 
 nsCString
 MediaEngineWebRTCAudioCaptureSource::GetUUID() const
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -221,16 +221,18 @@ public:
 
   // This allow changing the APM options, enabling or disabling processing
   // steps.
   void UpdateAECSettingsIfNeeded(bool aEnable, webrtc::EcModes aMode);
   void UpdateAGCSettingsIfNeeded(bool aEnable, webrtc::AgcModes aMode);
   void UpdateNSSettingsIfNeeded(bool aEnable, webrtc::NsModes aMode);
   void UpdateAPMExtraOptions(bool aExtendedFilter, bool aDelayAgnostic);
 
+  void End();
+
 private:
   ~AudioInputProcessing() = default;
   RefPtr<SourceMediaStream> mStream;
   // This implements the processing algoritm to apply to the input (e.g. a
   // microphone). If all algorithms are disabled, this class in not used. This
   // class only accepts audio chunks of 10ms. It has two inputs and one output:
   // it is fed the speaker data and the microphone data. It outputs processed
   // input data.
@@ -272,16 +274,18 @@ private:
   // Track ID on which the data is to be appended after processing
   TrackID mTrackID;
   // Principal for the data that flows through this class.
   PrincipalHandle mPrincipal;
   // Whether or not this MediaEngine is enabled. If it's not enabled, it
   // operates in "pull" mode, and we append silence only, releasing the audio
   // input stream.
   bool mEnabled;
+  // Whether or not we've ended and removed the track in the SourceMediaStream
+  bool mEnded;
 };
 
 
 class MediaEngineWebRTCAudioCaptureSource : public MediaEngineSource
 {
 public:
   explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid)
   {