Bug 1525323 - Remove SourceTrackListener from MediaManager. r=padenot
authorAndreas Pehrson <apehrson@mozilla.com>
Wed, 27 Mar 2019 18:05:56 +0000
changeset 466787 1f79ed91a20bbe91ced53c1fe3c70fafd6272fd5
parent 466786 9414b7e93b3570b3e4209992a3adf3c94e388dc1
child 466788 9e7832fee028f5de447c95bc0d88ddea5b09ab37
push id35780
push useropoprus@mozilla.com
push dateFri, 29 Mar 2019 21:53:01 +0000
treeherdermozilla-central@414f37afbe07 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1525323
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1525323 - Remove SourceTrackListener from MediaManager. r=padenot This moves the responsibility of forwarding NotifyPull() from the graph thread to MediaEngineSources out of MediaManager and into the sources themselves. This is better aligned with how the sources work, since not all sources need pulling. This also clarifies lifetime management of these listeners in relation to when pulling is enabled for a track, since the sources are already handling enabling pulling themselves. Differential Revision: https://phabricator.services.mozilla.com/D24896
dom/media/MediaManager.cpp
dom/media/webrtc/MediaEngineDefault.cpp
dom/media/webrtc/MediaEngineDefault.h
dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
dom/media/webrtc/MediaEngineRemoteVideoSource.h
dom/media/webrtc/MediaEngineSource.h
dom/media/webrtc/MediaEngineTabVideoSource.cpp
dom/media/webrtc/MediaEngineTabVideoSource.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.h
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -216,24 +216,19 @@ using dom::OwningStringOrStringSequenceO
 using dom::Promise;
 using dom::Sequence;
 using media::NewRunnableFrom;
 using media::NewTaskFrom;
 using media::Refcountable;
 
 static Atomic<bool> sHasShutdown;
 
-class SourceTrackListener;
-
 struct DeviceState {
-  DeviceState(const RefPtr<MediaDevice>& aDevice, bool aOffWhileDisabled,
-              RefPtr<SourceTrackListener> aListener)
-      : mOffWhileDisabled(aOffWhileDisabled),
-        mDevice(aDevice),
-        mListener(std::move(aListener)) {
+  DeviceState(const RefPtr<MediaDevice>& aDevice, bool aOffWhileDisabled)
+      : mOffWhileDisabled(aOffWhileDisabled), mDevice(aDevice) {
     MOZ_ASSERT(mDevice);
   }
 
   // true if we have stopped mDevice, this is a terminal state.
   // MainThread only.
   bool mStopped = false;
 
   // true if mDevice is currently enabled, i.e., turned on and capturing.
@@ -262,20 +257,16 @@ struct DeviceState {
   // disabled. When the timer fires we initiate Stop()ing mDevice.
   // If set we allow dynamically stopping and starting mDevice.
   // Any thread.
   const RefPtr<MediaTimer> mDisableTimer = new MediaTimer();
 
   // The underlying device we keep state for. Always non-null.
   // Threadsafe access, but see method declarations for individual constraints.
   const RefPtr<MediaDevice> mDevice;
-
-  // The track listener for the track hooked up to mDevice.
-  // Main thread only.
-  RefPtr<SourceTrackListener> mListener;
 };
 
 /**
  * This mimics the capture state from nsIMediaManagerService.
  */
 enum class CaptureState : uint16_t {
   Off = nsIMediaManagerService::STATE_NOCAPTURE,
   Enabled = nsIMediaManagerService::STATE_CAPTURE_ENABLED,
@@ -311,20 +302,16 @@ void MediaManager::CallOnSuccess(GetUser
   aCallback.Call(aStream);
 }
 
 /**
  * SourceListener has threadsafe refcounting for use across the main, media and
  * MSG threads. But it has a non-threadsafe SupportsWeakPtr for WeakPtr usage
  * only from main thread, to ensure that garbage- and cycle-collected objects
  * don't hold a reference to it during late shutdown.
- *
- * There's also a hard reference to the SourceListener through its
- * SourceStreamListener and the MediaStreamGraph. MediaStreamGraph
- * clears this on XPCOM_WILL_SHUTDOWN, before MediaManager enters shutdown.
  */
 class SourceListener : public SupportsWeakPtr<SourceListener> {
  public:
   typedef MozPromise<bool /* aIgnored */, RefPtr<MediaMgrError>, true>
       SourceListenerPromise;
 
   MOZ_DECLARE_WEAKREFERENCE_TYPENAME(SourceListener)
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING_WITH_MAIN_THREAD_DESTRUCTION_AND_RECORDING(
@@ -411,23 +398,16 @@ class SourceListener : public SupportsWe
     return mAudioDeviceState ? mAudioDeviceState->mDevice.get() : nullptr;
   }
 
   MediaDevice* GetVideoDevice() const {
     return mVideoDeviceState ? mVideoDeviceState->mDevice.get() : nullptr;
   }
 
   /**
-   * Called on MediaStreamGraph thread when MSG asks us for more data from
-   * input devices.
-   */
-  void Pull(TrackID aTrackID, StreamTime aEndOfAppendedData,
-            StreamTime aDesiredTime);
-
-  /**
    * Called on main thread after MediaStreamGraph notifies us that one of our
    * track listeners was removed as listener from its track in the graph.
    */
   void NotifyRemoved(TrackID aTrackID);
 
   bool Activated() const { return mStream; }
 
   bool Stopped() const { return mStopped; }
@@ -464,72 +444,30 @@ class SourceListener : public SupportsWe
 
   // true after this listener has been removed from its MediaStream.
   // MainThread only.
   bool mRemoved;
 
   // never ever indirect off this; just for assertions
   PRThread* mMainThreadCheck;
 
-  // For access to mMainThreadCheck
-  friend class SourceTrackListener;
-
   // Set in Register() on main thread, then read from any thread.
   PrincipalHandle mPrincipalHandle;
 
   // Weak pointer to the window listener that owns us. MainThread only.
   GetUserMediaWindowListener* mWindowListener;
 
   // Accessed from MediaStreamGraph thread, MediaManager thread, and MainThread
   // No locking needed as they're set on Activate() and never assigned to again.
   UniquePtr<DeviceState> mAudioDeviceState;
   UniquePtr<DeviceState> mVideoDeviceState;
   RefPtr<SourceMediaStream> mStream;  // threadsafe refcnt
 };
 
 /**
- * Wrapper class for the MediaStreamTrackListener part of SourceListener.
- *
- * This is required since MediaStreamTrackListener and SupportsWeakPtr
- * both implement refcounting.
- */
-class SourceTrackListener : public MediaStreamTrackListener {
- public:
-  SourceTrackListener(SourceListener* aSourceListener, TrackID aTrackID)
-      : mSourceListener(aSourceListener), mTrackID(aTrackID) {}
-
-  void NotifyPull(MediaStreamGraph* aGraph, StreamTime aEndOfAppendedData,
-                  StreamTime aDesiredTime) override {
-    mSourceListener->Pull(mTrackID, aEndOfAppendedData, aDesiredTime);
-  }
-
-  void NotifyEnded() override { NotifyRemoved(); }
-
-  void NotifyRemoved() override {
-    nsCOMPtr<nsIEventTarget> target = GetMainThreadEventTarget();
-    if (NS_WARN_IF(!target)) {
-      NS_ASSERTION(false,
-                   "Mainthread not available; running on current thread");
-      // Ensure this really *was* MainThread (NS_GetCurrentThread won't work)
-      MOZ_RELEASE_ASSERT(mSourceListener->mMainThreadCheck ==
-                         GetCurrentVirtualThread());
-      mSourceListener->NotifyRemoved(mTrackID);
-      return;
-    }
-    target->Dispatch(NewRunnableMethod<TrackID>(
-        "SourceListener::NotifyRemoved", mSourceListener,
-        &SourceListener::NotifyRemoved, mTrackID));
-  }
-
- private:
-  const RefPtr<SourceListener> mSourceListener;
-  const TrackID mTrackID;
-};
-
-/**
  * This class represents a WindowID and handles all MediaStreamTrackListeners
  * (here subclassed as SourceListeners) used to feed GetUserMedia source
  * streams. It proxies feedback from them into messages for browser chrome.
  * The SourceListeners are used to Start() and Stop() the underlying
  * MediaEngineSource when MediaStreams are assigned and deassigned in content.
  */
 class GetUserMediaWindowListener {
   friend MediaManager;
@@ -1017,28 +955,16 @@ nsresult MediaDevice::Stop() {
 }
 
 nsresult MediaDevice::Deallocate() {
   MOZ_ASSERT(MediaManager::IsInMediaThread());
   MOZ_ASSERT(mSource);
   return mSource->Deallocate(mAllocationHandle);
 }
 
-void MediaDevice::Pull(const RefPtr<SourceMediaStream>& aStream,
-                       TrackID aTrackID, StreamTime aEndOfAppendedData,
-                       StreamTime aDesiredTime,
-                       const PrincipalHandle& aPrincipal) {
-  // This is on the graph thread, but mAllocationHandle is safe since we never
-  // change it after it's been set, which is guaranteed to happen before
-  // registering the listener for pulls.
-  MOZ_ASSERT(mSource);
-  mSource->Pull(mAllocationHandle, aStream, aTrackID, aEndOfAppendedData,
-                aDesiredTime, aPrincipal);
-}
-
 dom::MediaSourceEnum MediaDevice::GetMediaSource() const {
   // Threadsafe because mSource is const. GetMediaSource() might have other
   // requirements.
   MOZ_ASSERT(mSource);
   return mSource->GetMediaSource();
 }
 
 static bool IsOn(const OwningBooleanOrMediaTrackConstraints& aUnion) {
@@ -4200,29 +4126,25 @@ void SourceListener::Activate(SourceMedi
   mMainThreadCheck = GetCurrentVirtualThread();
   mStream = aStream;
   if (aAudioDevice) {
     mAudioDeviceState = MakeUnique<DeviceState>(
         aAudioDevice,
         aAudioDevice->GetMediaSource() == dom::MediaSourceEnum::Microphone &&
             Preferences::GetBool(
                 "media.getusermedia.microphone.off_while_disabled.enabled",
-                true),
-        MakeRefPtr<SourceTrackListener>(this, kAudioTrack));
-    mStream->AddTrackListener(mAudioDeviceState->mListener, kAudioTrack);
+                true));
   }
 
   if (aVideoDevice) {
     mVideoDeviceState = MakeUnique<DeviceState>(
         aVideoDevice,
         aVideoDevice->GetMediaSource() == dom::MediaSourceEnum::Camera &&
             Preferences::GetBool(
-                "media.getusermedia.camera.off_while_disabled.enabled", true),
-        MakeRefPtr<SourceTrackListener>(this, kVideoTrack));
-    mStream->AddTrackListener(mVideoDeviceState->mListener, kVideoTrack);
+                "media.getusermedia.camera.off_while_disabled.enabled", true));
   }
 }
 
 RefPtr<SourceListener::SourceListenerPromise>
 SourceListener::InitializeAsync() {
   MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
   MOZ_DIAGNOSTIC_ASSERT(!mStopped);
 
@@ -4369,38 +4291,16 @@ void SourceListener::Remove() {
 
   if (!mStream || mRemoved) {
     return;
   }
 
   LOG("SourceListener %p removed on purpose", this);
   mRemoved = true;  // RemoveListener is async, avoid races
   mWindowListener = nullptr;
-
-  // If it's destroyed, don't call - listener will be removed and we'll be
-  // notified!
-  if (!mStream->IsDestroyed()) {
-    // We disable pulling before removing so we don't risk having live tracks
-    // without a listener attached - that wouldn't produce data and would be
-    // illegal to the graph.
-    if (mAudioDeviceState) {
-      mStream->SetPullingEnabled(kAudioTrack, false);
-      mStream->RemoveTrackListener(mAudioDeviceState->mListener, kAudioTrack);
-    }
-    if (mVideoDeviceState) {
-      mStream->RemoveTrackListener(mVideoDeviceState->mListener, kVideoTrack);
-    }
-  }
-
-  if (mAudioDeviceState) {
-    mAudioDeviceState->mListener = nullptr;
-  }
-  if (mVideoDeviceState) {
-    mVideoDeviceState->mListener = nullptr;
-  }
 }
 
 void SourceListener::StopTrack(TrackID aTrackID) {
   MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
   MOZ_ASSERT(Activated(), "No device to stop");
   MOZ_ASSERT(aTrackID == kAudioTrack || aTrackID == kVideoTrack,
              "Unknown track id");
   DeviceState& state = GetDeviceStateFor(aTrackID);
@@ -4643,24 +4543,16 @@ void SourceListener::StopSharing() {
   }
 }
 
 SourceMediaStream* SourceListener::GetSourceStream() {
   NS_ASSERTION(mStream, "Getting stream from never-activated SourceListener");
   return mStream;
 }
 
-// Proxy Pull() to the right source
-void SourceListener::Pull(TrackID aTrackID, StreamTime aEndOfAppendedData,
-                          StreamTime aDesiredTime) {
-  DeviceState& state = GetDeviceStateFor(aTrackID);
-  state.mDevice->Pull(mStream, aTrackID, aEndOfAppendedData, aDesiredTime,
-                      mPrincipalHandle);
-}
-
 void SourceListener::NotifyRemoved(TrackID aTrackID) {
   MOZ_ASSERT(NS_IsMainThread());
   LOG("Track %d for SourceListener %p removed", aTrackID, this);
 
   StopTrack(aTrackID);
 
   if (!mStopped) {
     // There are more live tracks that need to be stopped before removal.
@@ -4670,18 +4562,16 @@ void SourceListener::NotifyRemoved(Track
   if (!mWindowListener) {
     // Removed explicitly before MSG's notification.
     return;
   }
 
   mWindowListener->Remove(this);
 
   MOZ_ASSERT(!mWindowListener);
-  MOZ_ASSERT_IF(mAudioDeviceState, !mAudioDeviceState->mListener);
-  MOZ_ASSERT_IF(mVideoDeviceState, !mVideoDeviceState->mListener);
 }
 
 bool SourceListener::CapturingVideo() const {
   MOZ_ASSERT(NS_IsMainThread());
   return Activated() && mVideoDeviceState && !mVideoDeviceState->mStopped &&
          (!mVideoDeviceState->mDevice->mSource->IsFake() ||
           Preferences::GetBool("media.navigator.permission.fake"));
 }
--- a/dom/media/webrtc/MediaEngineDefault.cpp
+++ b/dom/media/webrtc/MediaEngineDefault.cpp
@@ -3,25 +3,27 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaEngineDefault.h"
 
 #include "ImageContainer.h"
 #include "ImageTypes.h"
 #include "Layers.h"
 #include "MediaStreamGraph.h"
+#include "MediaStreamListener.h"
 #include "MediaTrackConstraints.h"
 #include "mozilla/dom/File.h"
 #include "mozilla/UniquePtr.h"
 #include "nsCOMPtr.h"
 #include "nsContentUtils.h"
 #include "nsIFile.h"
 #include "nsIFilePicker.h"
 #include "nsIPrefBranch.h"
 #include "nsIPrefService.h"
+#include "SineWaveGenerator.h"
 #include "Tracing.h"
 
 #ifdef MOZ_WIDGET_ANDROID
 #  include "nsISupportsUtils.h"
 #endif
 
 #ifdef MOZ_WEBRTC
 #  include "YuvStamper.h"
@@ -321,32 +323,50 @@ void MediaEngineDefaultVideoSource::Gene
   if (!setData) {
     return;
   }
 
   VideoSegment segment;
   segment.AppendFrame(ycbcr_image.forget(),
                       gfx::IntSize(mOpts.mWidth, mOpts.mHeight),
                       mPrincipalHandle);
-  ;
   mStream->AppendToTrack(mTrackID, &segment);
 }
 
-void MediaEngineDefaultVideoSource::Pull(
-    const RefPtr<const AllocationHandle>& aHandle,
-    const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-    StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-    const PrincipalHandle& aPrincipalHandle) {}
+// This class is created on the media thread, as part of Start(), then entirely
+// self-sustained until destruction, just forwarding calls to Pull().
+class AudioSourcePullListener : public MediaStreamTrackListener {
+ public:
+  AudioSourcePullListener(RefPtr<SourceMediaStream> aStream, TrackID aTrackID,
+                          const PrincipalHandle& aPrincipalHandle,
+                          uint32_t aFrequency)
+      : mStream(std::move(aStream)),
+        mTrackID(aTrackID),
+        mPrincipalHandle(aPrincipalHandle),
+        mSineGenerator(
+            MakeUnique<SineWaveGenerator>(mStream->GraphRate(), aFrequency)) {
+    MOZ_COUNT_CTOR(AudioSourcePullListener);
+  }
+
+  ~AudioSourcePullListener() { MOZ_COUNT_DTOR(AudioSourcePullListener); }
+
+  void NotifyPull(MediaStreamGraph* aGraph, StreamTime aEndOfAppendedData,
+                  StreamTime aDesiredTime) override;
+
+  const RefPtr<SourceMediaStream> mStream;
+  const TrackID mTrackID;
+  const PrincipalHandle mPrincipalHandle;
+  const UniquePtr<SineWaveGenerator> mSineGenerator;
+};
 
 /**
  * Default audio source.
  */
 
-MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource()
-    : mMutex("MediaEngineDefaultAudioSource::mMutex") {}
+MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource() = default;
 
 MediaEngineDefaultAudioSource::~MediaEngineDefaultAudioSource() = default;
 
 nsString MediaEngineDefaultAudioSource::GetName() const {
   return NS_LITERAL_STRING(u"Default Audio Device");
 }
 
 nsCString MediaEngineDefaultAudioSource::GetUUID() const {
@@ -388,36 +408,35 @@ nsresult MediaEngineDefaultAudioSource::
 
   // Mock failure for automated tests.
   if (aConstraints.mDeviceId.WasPassed() &&
       aConstraints.mDeviceId.Value().IsString() &&
       aConstraints.mDeviceId.Value().GetAsString().EqualsASCII("bad device")) {
     return NS_ERROR_FAILURE;
   }
 
-  mFreq = aPrefs.mFreq ? aPrefs.mFreq : 1000;
+  mFrequency = aPrefs.mFreq ? aPrefs.mFreq : 1000;
   *aOutHandle = nullptr;
 
-  MutexAutoLock lock(mMutex);
   mState = kAllocated;
   return NS_OK;
 }
 
 nsresult MediaEngineDefaultAudioSource::Deallocate(
     const RefPtr<const AllocationHandle>& aHandle) {
   AssertIsOnOwningThread();
 
   MOZ_ASSERT(!aHandle);
   MOZ_ASSERT(mState == kStopped || mState == kAllocated);
 
-  MutexAutoLock lock(mMutex);
   if (mStream && IsTrackIDExplicit(mTrackID)) {
     mStream->EndTrack(mTrackID);
     mStream = nullptr;
     mTrackID = TRACK_NONE;
+    mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
   }
   mState = kReleased;
   return NS_OK;
 }
 
 void MediaEngineDefaultAudioSource::SetTrack(
     const RefPtr<const AllocationHandle>& aHandle,
     const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
@@ -426,111 +445,94 @@ void MediaEngineDefaultAudioSource::SetT
 
   MOZ_ASSERT(mState == kAllocated);
   MOZ_ASSERT(!mStream);
   MOZ_ASSERT(mTrackID == TRACK_NONE);
 
   // AddAudioTrack will take ownership of segment
   mStream = aStream;
   mTrackID = aTrackID;
+  mPrincipalHandle = aPrincipal;
   aStream->AddAudioTrack(aTrackID, aStream->GraphRate(), new AudioSegment(),
                          SourceMediaStream::ADDTRACK_QUEUED);
 }
 
 nsresult MediaEngineDefaultAudioSource::Start(
     const RefPtr<const AllocationHandle>& aHandle) {
   AssertIsOnOwningThread();
 
   MOZ_ASSERT(mState == kAllocated || mState == kStopped);
   MOZ_ASSERT(mStream, "SetTrack() must happen before Start()");
   MOZ_ASSERT(IsTrackIDExplicit(mTrackID),
              "SetTrack() must happen before Start()");
 
-  if (!mSineGenerator) {
-    // generate sine wave (default 1KHz)
-    mSineGenerator = new SineWaveGenerator(mStream->GraphRate(), mFreq);
+  if (!mPullListener) {
+    mPullListener = MakeAndAddRef<AudioSourcePullListener>(
+        mStream, mTrackID, mPrincipalHandle, mFrequency);
   }
 
-  {
-    MutexAutoLock lock(mMutex);
-    mState = kStarted;
-  }
+  mState = kStarted;
 
-  NS_DispatchToMainThread(
-      NS_NewRunnableFunction(__func__, [stream = mStream, track = mTrackID]() {
+  NS_DispatchToMainThread(NS_NewRunnableFunction(
+      __func__,
+      [stream = mStream, track = mTrackID, listener = mPullListener]() {
         if (stream->IsDestroyed()) {
           return;
         }
+        stream->AddTrackListener(listener, track);
         stream->SetPullingEnabled(track, true);
       }));
 
   return NS_OK;
 }
 
 nsresult MediaEngineDefaultAudioSource::Stop(
     const RefPtr<const AllocationHandle>& aHandle) {
   AssertIsOnOwningThread();
 
   if (mState == kStopped || mState == kAllocated) {
     return NS_OK;
   }
-
   MOZ_ASSERT(mState == kStarted);
-
-  {
-    MutexAutoLock lock(mMutex);
-    mState = kStopped;
-  }
+  mState = kStopped;
 
   NS_DispatchToMainThread(
-      NS_NewRunnableFunction(__func__, [stream = mStream, track = mTrackID]() {
+      NS_NewRunnableFunction(__func__, [stream = mStream, track = mTrackID,
+                                        listener = std::move(mPullListener)]() {
         if (stream->IsDestroyed()) {
           return;
         }
+        stream->RemoveTrackListener(listener, track);
         stream->SetPullingEnabled(track, false);
       }));
   return NS_OK;
 }
 
 nsresult MediaEngineDefaultAudioSource::Reconfigure(
     const RefPtr<AllocationHandle>& aHandle,
     const dom::MediaTrackConstraints& aConstraints,
     const MediaEnginePrefs& aPrefs, const nsString& aDeviceId,
     const char** aOutBadConstraint) {
   return NS_OK;
 }
 
-void MediaEngineDefaultAudioSource::AppendToSegment(
-    AudioSegment& aSegment, TrackTicks aSamples,
-    const PrincipalHandle& aPrincipalHandle) {
-  RefPtr<SharedBuffer> buffer =
-      SharedBuffer::Create(aSamples * sizeof(int16_t));
+void AudioSourcePullListener::NotifyPull(MediaStreamGraph* aGraph,
+                                         StreamTime aEndOfAppendedData,
+                                         StreamTime aDesiredTime) {
+  TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", mStream.get(),
+                               mTrackID);
+  AudioSegment segment;
+  TrackTicks delta = aDesiredTime - aEndOfAppendedData;
+  RefPtr<SharedBuffer> buffer = SharedBuffer::Create(delta * sizeof(int16_t));
   int16_t* dest = static_cast<int16_t*>(buffer->Data());
-
-  mSineGenerator->generate(dest, aSamples);
+  mSineGenerator->generate(dest, delta);
   AutoTArray<const int16_t*, 1> channels;
   channels.AppendElement(dest);
-  aSegment.AppendFrames(buffer.forget(), channels, aSamples, aPrincipalHandle);
-}
-
-void MediaEngineDefaultAudioSource::Pull(
-    const RefPtr<const AllocationHandle>& aHandle,
-    const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-    StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-    const PrincipalHandle& aPrincipalHandle) {
-  TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", aStream.get(),
-                               aTrackID);
-  AudioSegment segment;
-  // avoid accumulating rounding errors
-  TrackTicks desired =
-      aStream->TimeToTicksRoundUp(aStream->GraphRate(), aDesiredTime);
-  TrackTicks delta = desired - mLastNotify;
-  mLastNotify += delta;
-  AppendToSegment(segment, delta, aPrincipalHandle);
-  aStream->AppendToTrack(aTrackID, &segment);
+  segment.AppendFrames(buffer.forget(), channels, delta, mPrincipalHandle);
+  mStream->AppendToTrack(mTrackID, &segment);
 }
 
 void MediaEngineDefault::EnumerateDevices(
     uint64_t aWindowId, dom::MediaSourceEnum aMediaSource,
     MediaSinkEnum aMediaSink, nsTArray<RefPtr<MediaDevice>>* aDevices) {
   AssertIsOnOwningThread();
 
   switch (aMediaSource) {
--- a/dom/media/webrtc/MediaEngineDefault.h
+++ b/dom/media/webrtc/MediaEngineDefault.h
@@ -17,17 +17,16 @@
 #include "VideoUtils.h"
 #include "MediaEngine.h"
 #include "MediaEnginePrefs.h"
 #include "VideoSegment.h"
 #include "AudioSegment.h"
 #include "StreamTracks.h"
 #include "MediaEngineSource.h"
 #include "MediaStreamGraph.h"
-#include "SineWaveGenerator.h"
 
 namespace mozilla {
 
 namespace layers {
 class ImageContainer;
 }  // namespace layers
 
 class MediaEngineDefault;
@@ -54,20 +53,16 @@ class MediaEngineDefaultVideoSource : pu
   nsresult Start(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Reconfigure(const RefPtr<AllocationHandle>& aHandle,
                        const dom::MediaTrackConstraints& aConstraints,
                        const MediaEnginePrefs& aPrefs,
                        const nsString& aDeviceId,
                        const char** aOutBadConstraint) override;
   nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Deallocate(const RefPtr<const AllocationHandle>& aHandle) override;
-  void Pull(const RefPtr<const AllocationHandle>& aHandle,
-            const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-            StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-            const PrincipalHandle& aPrincipalHandle) override;
 
   uint32_t GetBestFitnessDistance(
       const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
       const nsString& aDeviceId) const override;
 
   bool IsFake() const override { return true; }
 
   dom::MediaSourceEnum GetMediaSource() const override {
@@ -96,17 +91,17 @@ class MediaEngineDefaultVideoSource : pu
   MediaEnginePrefs mOpts;
   int mCb = 16;
   int mCr = 16;
 
  private:
   const nsString mName;
 };
 
-class SineWaveGenerator;
+class AudioSourcePullListener;
 
 class MediaEngineDefaultAudioSource : public MediaEngineSource {
  public:
   MediaEngineDefaultAudioSource();
 
   nsString GetName() const override;
   nsCString GetUUID() const override;
   nsString GetGroupId() const override;
@@ -122,53 +117,39 @@ class MediaEngineDefaultAudioSource : pu
   nsresult Start(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Reconfigure(const RefPtr<AllocationHandle>& aHandle,
                        const dom::MediaTrackConstraints& aConstraints,
                        const MediaEnginePrefs& aPrefs,
                        const nsString& aDeviceId,
                        const char** aOutBadConstraint) override;
   nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Deallocate(const RefPtr<const AllocationHandle>& aHandle) override;
-  void inline AppendToSegment(AudioSegment& aSegment, TrackTicks aSamples,
-                              const PrincipalHandle& aPrincipalHandle);
-  void Pull(const RefPtr<const AllocationHandle>& aHandle,
-            const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-            StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-            const PrincipalHandle& aPrincipalHandle) override;
 
   bool IsFake() const override { return true; }
 
   dom::MediaSourceEnum GetMediaSource() const override {
     return dom::MediaSourceEnum::Microphone;
   }
 
   uint32_t GetBestFitnessDistance(
       const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
       const nsString& aDeviceId) const override;
 
   bool IsAvailable() const;
 
  protected:
   ~MediaEngineDefaultAudioSource();
 
-  // mMutex protects mState, mStream, mTrackID
-  Mutex mMutex;
-
   // Current state of this source.
-  // Set under mMutex on the owning thread. Accessed under one of the two.
   MediaEngineSourceState mState = kReleased;
   RefPtr<SourceMediaStream> mStream;
   TrackID mTrackID = TRACK_NONE;
-
-  // Accessed in Pull (from MSG thread)
-  TrackTicks mLastNotify = 0;
-  uint32_t mFreq = 1000;  // ditto
-
-  // Created on Start, then accessed from Pull (MSG thread)
-  nsAutoPtr<SineWaveGenerator> mSineGenerator;
+  PrincipalHandle mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
+  uint32_t mFrequency = 1000;
+  RefPtr<AudioSourcePullListener> mPullListener;
 };
 
 class MediaEngineDefault : public MediaEngine {
  public:
   MediaEngineDefault() = default;
 
   void EnumerateDevices(uint64_t aWindowId, dom::MediaSourceEnum, MediaSinkEnum,
                         nsTArray<RefPtr<MediaDevice>>*) override;
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
@@ -478,22 +478,16 @@ webrtc::CaptureCapability MediaEngineRem
     result = mHardcodedCapabilities.SafeElementAt(aIndex,
                                                   webrtc::CaptureCapability());
   }
   camera::GetChildAndCall(&camera::CamerasChild::GetCaptureCapability,
                           mCapEngine, mUniqueId.get(), aIndex, result);
   return result;
 }
 
-void MediaEngineRemoteVideoSource::Pull(
-    const RefPtr<const AllocationHandle>& aHandle,
-    const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-    StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-    const PrincipalHandle& aPrincipalHandle) {}
-
 int MediaEngineRemoteVideoSource::DeliverFrame(
     uint8_t* aBuffer, const camera::VideoFrameProperties& aProps) {
   // Cameras IPC thread - take great care with accessing members!
 
   int32_t req_max_width;
   int32_t req_max_height;
   int32_t req_ideal_width;
   int32_t req_ideal_height;
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.h
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.h
@@ -130,20 +130,16 @@ class MediaEngineRemoteVideoSource : pub
   nsresult Reconfigure(const RefPtr<AllocationHandle>& aHandle,
                        const dom::MediaTrackConstraints& aConstraints,
                        const MediaEnginePrefs& aPrefs,
                        const nsString& aDeviceId,
                        const char** aOutBadConstraint) override;
   nsresult FocusOnSelectedSource(
       const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
-  void Pull(const RefPtr<const AllocationHandle>& aHandle,
-            const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-            StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-            const PrincipalHandle& aPrincipalHandle) override;
 
   void GetSettings(dom::MediaTrackSettings& aOutSettings) const override;
 
   void Refresh(int aIndex);
 
   void Shutdown() override;
 
   nsString GetName() const override;
--- a/dom/media/webrtc/MediaEngineSource.h
+++ b/dom/media/webrtc/MediaEngineSource.h
@@ -256,26 +256,16 @@ class MediaEngineSourceInterface {
    *
    * Note that this might not be the settings of the underlying hardware.
    * In case of a camera where we intervene and scale frames to avoid
    * leaking information from other documents than the current one,
    * GetSettings() will return the scaled resolution. I.e., the
    * device settings as seen by js.
    */
   virtual void GetSettings(dom::MediaTrackSettings& aOutSettings) const = 0;
-
-  /**
-   * Pulls data from the MediaEngineSource into the track.
-   *
-   * Driven by MediaStreamTrackListener::NotifyPull.
-   */
-  virtual void Pull(const RefPtr<const AllocationHandle>& aHandle,
-                    const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-                    StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-                    const PrincipalHandle& aPrincipalHandle) = 0;
 };
 
 /**
  * Abstract base class for MediaEngineSources.
  *
  * Implements defaults for some common MediaEngineSourceInterface methods below.
  * Also implements RefPtr support and an owning-thread model for thread safety
  * checks in subclasses.
--- a/dom/media/webrtc/MediaEngineTabVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineTabVideoSource.cpp
@@ -259,22 +259,16 @@ nsresult MediaEngineTabVideoSource::Star
     runnable = new StartRunnable(this, mStream, mTrackID, mPrincipalHandle);
   }
   NS_DispatchToMainThread(runnable);
   mState = kStarted;
 
   return NS_OK;
 }
 
-void MediaEngineTabVideoSource::Pull(
-    const RefPtr<const AllocationHandle>& aHandle,
-    const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-    StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-    const PrincipalHandle& aPrincipalHandle) {}
-
 void MediaEngineTabVideoSource::Draw() {
   MOZ_ASSERT(NS_IsMainThread());
 
   if (!mWindow && !mBlackedoutWindow) {
     return;
   }
 
   if (mWindow) {
--- a/dom/media/webrtc/MediaEngineTabVideoSource.h
+++ b/dom/media/webrtc/MediaEngineTabVideoSource.h
@@ -43,21 +43,16 @@ class MediaEngineTabVideoSource : public
                        const dom::MediaTrackConstraints& aConstraints,
                        const MediaEnginePrefs& aPrefs,
                        const nsString& aDeviceId,
                        const char** aOutBadConstraint) override;
   nsresult FocusOnSelectedSource(
       const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
 
-  void Pull(const RefPtr<const AllocationHandle>& aHandle,
-            const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-            StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-            const PrincipalHandle& aPrincipalHandle) override;
-
   uint32_t GetBestFitnessDistance(
       const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
       const nsString& aDeviceId) const override {
     return 0;
   }
 
   void Draw();
 
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -184,37 +184,26 @@ nsresult MediaEngineWebRTCMicrophoneSour
 
   ApplySettings(outputPrefs);
 
   mCurrentPrefs = outputPrefs;
 
   return NS_OK;
 }
 
-void MediaEngineWebRTCMicrophoneSource::Pull(
-    const RefPtr<const AllocationHandle>&,
-    const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-    StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-    const PrincipalHandle& aPrincipalHandle) {
-  // If pull is enabled, it means that the audio input is not open, and we
-  // should fill it out with silence. This is the only method called on the
-  // MSG thread.
-  mInputProcessing->Pull(aStream, aTrackID, aEndOfAppendedData, aDesiredTime,
-                         aPrincipalHandle);
-}
-
 void MediaEngineWebRTCMicrophoneSource::UpdateAECSettings(
     bool aEnable, bool aUseAecMobile,
     EchoCancellation::SuppressionLevel aLevel) {
   AssertIsOnOwningThread();
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
-  NS_DispatchToMainThread(media::NewRunnableFrom(
-      [that, graph = std::move(gripGraph), aEnable, aUseAecMobile, aLevel]() {
+  NS_DispatchToMainThread(NS_NewRunnableFunction(
+      __func__,
+      [that, graph = std::move(gripGraph), aEnable, aUseAecMobile, aLevel] {
         class Message : public ControlMessage {
          public:
           Message(AudioInputProcessing* aInputProcessing, bool aEnable,
                   bool aUseAecMobile, EchoCancellation::SuppressionLevel aLevel)
               : ControlMessage(nullptr),
                 mInputProcessing(aInputProcessing),
                 mEnable(aEnable),
                 mUseAecMobile(aUseAecMobile),
@@ -230,29 +219,27 @@ void MediaEngineWebRTCMicrophoneSource::
           bool mUseAecMobile;
           EchoCancellation::SuppressionLevel mLevel;
         };
 
         if (graph) {
           graph->AppendMessage(MakeUnique<Message>(
               that->mInputProcessing, aEnable, aUseAecMobile, aLevel));
         }
-
-        return NS_OK;
       }));
 }
 
 void MediaEngineWebRTCMicrophoneSource::UpdateAGCSettings(
     bool aEnable, GainControl::Mode aMode) {
   AssertIsOnOwningThread();
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
-  NS_DispatchToMainThread(media::NewRunnableFrom(
-      [that, graph = std::move(gripGraph), aEnable, aMode]() {
+  NS_DispatchToMainThread(NS_NewRunnableFunction(
+      __func__, [that, graph = std::move(gripGraph), aEnable, aMode] {
         class Message : public ControlMessage {
          public:
           Message(AudioInputProcessing* aInputProcessing, bool aEnable,
                   GainControl::Mode aMode)
               : ControlMessage(nullptr),
                 mInputProcessing(aInputProcessing),
                 mEnable(aEnable),
                 mMode(aMode) {}
@@ -266,29 +253,27 @@ void MediaEngineWebRTCMicrophoneSource::
           bool mEnable;
           GainControl::Mode mMode;
         };
 
         if (graph) {
           graph->AppendMessage(
               MakeUnique<Message>(that->mInputProcessing, aEnable, aMode));
         }
-
-        return NS_OK;
       }));
 }
 
 void MediaEngineWebRTCMicrophoneSource::UpdateNSSettings(
     bool aEnable, webrtc::NoiseSuppression::Level aLevel) {
   AssertIsOnOwningThread();
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
-  NS_DispatchToMainThread(media::NewRunnableFrom(
-      [that, graph = std::move(gripGraph), aEnable, aLevel]() {
+  NS_DispatchToMainThread(NS_NewRunnableFunction(
+      __func__, [that, graph = std::move(gripGraph), aEnable, aLevel] {
         class Message : public ControlMessage {
          public:
           Message(AudioInputProcessing* aInputProcessing, bool aEnable,
                   webrtc::NoiseSuppression::Level aLevel)
               : ControlMessage(nullptr),
                 mInputProcessing(aInputProcessing),
                 mEnable(aEnable),
                 mLevel(aLevel) {}
@@ -302,29 +287,28 @@ void MediaEngineWebRTCMicrophoneSource::
           bool mEnable;
           webrtc::NoiseSuppression::Level mLevel;
         };
 
         if (graph) {
           graph->AppendMessage(
               MakeUnique<Message>(that->mInputProcessing, aEnable, aLevel));
         }
-
-        return NS_OK;
       }));
 }
 
 void MediaEngineWebRTCMicrophoneSource::UpdateAPMExtraOptions(
     bool aExtendedFilter, bool aDelayAgnostic) {
   AssertIsOnOwningThread();
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
-  NS_DispatchToMainThread(media::NewRunnableFrom(
-      [that, graph = std::move(gripGraph), aExtendedFilter, aDelayAgnostic]() {
+  NS_DispatchToMainThread(NS_NewRunnableFunction(
+      __func__,
+      [that, graph = std::move(gripGraph), aExtendedFilter, aDelayAgnostic] {
         class Message : public ControlMessage {
          public:
           Message(AudioInputProcessing* aInputProcessing, bool aExtendedFilter,
                   bool aDelayAgnostic)
               : ControlMessage(nullptr),
                 mInputProcessing(aInputProcessing),
                 mExtendedFilter(aExtendedFilter),
                 mDelayAgnostic(aDelayAgnostic) {}
@@ -339,18 +323,16 @@ void MediaEngineWebRTCMicrophoneSource::
           bool mExtendedFilter;
           bool mDelayAgnostic;
         };
 
         if (graph) {
           graph->AppendMessage(MakeUnique<Message>(
               that->mInputProcessing, aExtendedFilter, aDelayAgnostic));
         }
-
-        return NS_OK;
       }));
 }
 
 void MediaEngineWebRTCMicrophoneSource::ApplySettings(
     const MediaEnginePrefs& aPrefs) {
   AssertIsOnOwningThread();
 
   MOZ_ASSERT(
@@ -367,18 +349,18 @@ void MediaEngineWebRTCMicrophoneSource::
         aPrefs.mAecOn, aPrefs.mUseAecMobile,
         static_cast<webrtc::EchoCancellation::SuppressionLevel>(aPrefs.mAec));
 
     UpdateAPMExtraOptions(mExtendedFilter, mDelayAgnostic);
   }
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> graphImpl = mStream->GraphImpl();
-  NS_DispatchToMainThread(media::NewRunnableFrom(
-      [that, graph = std::move(graphImpl), prefs = aPrefs]() {
+  NS_DispatchToMainThread(NS_NewRunnableFunction(
+      __func__, [that, graph = std::move(graphImpl), prefs = aPrefs] {
         that->mSettings->mEchoCancellation.Value() = prefs.mAecOn;
         that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
         that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
         that->mSettings->mChannelCount.Value() = prefs.mChannels;
 
         class Message : public ControlMessage {
          public:
           Message(AudioInputProcessing* aInputProcessing, bool aPassThrough,
@@ -400,18 +382,16 @@ void MediaEngineWebRTCMicrophoneSource::
           uint32_t mRequestedInputChannelCount;
         };
 
         bool passThrough = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn);
         if (graph) {
           graph->AppendMessage(MakeUnique<Message>(
               that->mInputProcessing, passThrough, prefs.mChannels));
         }
-
-        return NS_OK;
       }));
 }
 
 nsresult MediaEngineWebRTCMicrophoneSource::Allocate(
     const dom::MediaTrackConstraints& aConstraints,
     const MediaEnginePrefs& aPrefs, const nsString& aDeviceId,
     const ipc::PrincipalInfo& aPrincipalInfo, AllocationHandle** aOutHandle,
     const char** aOutBadConstraint) {
@@ -426,23 +406,23 @@ nsresult MediaEngineWebRTCMicrophoneSour
   MediaEnginePrefs outputPrefs;
   nsresult rv =
       EvaluateSettings(normalized, aPrefs, &outputPrefs, aOutBadConstraint);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
-  NS_DispatchToMainThread(media::NewRunnableFrom([that, prefs = outputPrefs]() {
-    that->mSettings->mEchoCancellation.Value() = prefs.mAecOn;
-    that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
-    that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
-    that->mSettings->mChannelCount.Value() = prefs.mChannels;
-    return NS_OK;
-  }));
+  NS_DispatchToMainThread(
+      NS_NewRunnableFunction(__func__, [that, prefs = outputPrefs] {
+        that->mSettings->mEchoCancellation.Value() = prefs.mAecOn;
+        that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn;
+        that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn;
+        that->mSettings->mChannelCount.Value() = prefs.mChannels;
+      }));
 
   mCurrentPrefs = outputPrefs;
 
   return rv;
 }
 
 nsresult MediaEngineWebRTCMicrophoneSource::Deallocate(
     const RefPtr<const AllocationHandle>&) {
@@ -467,29 +447,28 @@ nsresult MediaEngineWebRTCMicrophoneSour
    protected:
     RefPtr<AudioInputProcessing> mInputProcessing;
     TrackID mTrackID;
   };
 
   if (mStream && IsTrackIDExplicit(mTrackID)) {
     RefPtr<MediaStream> sourceStream = mStream;
     RefPtr<AudioInputProcessing> inputProcessing = mInputProcessing;
-    NS_DispatchToMainThread(media::NewRunnableFrom(
-        [stream = std::move(sourceStream),
-         audioInputProcessing = std::move(inputProcessing),
-         trackID = mTrackID]() {
+    NS_DispatchToMainThread(NS_NewRunnableFunction(
+        __func__, [stream = std::move(sourceStream),
+                   audioInputProcessing = std::move(inputProcessing),
+                   trackID = mTrackID] {
           if (stream->IsDestroyed()) {
             // This stream has already been destroyed on main thread by its
             // DOMMediaStream. No cleanup left to do.
-            return NS_OK;
+            return;
           }
           MOZ_ASSERT(stream->GraphImpl());
           stream->GraphImpl()->AppendMessage(MakeUnique<EndTrackMessage>(
               stream, audioInputProcessing, trackID));
-          return NS_OK;
         }));
   }
 
   MOZ_ASSERT(mTrackID != TRACK_NONE, "Only deallocate once");
 
   // Reset all state. This is not strictly necessary, this instance will get
   // destroyed soon.
   mStream = nullptr;
@@ -517,22 +496,35 @@ void MediaEngineWebRTCMicrophoneSource::
   MOZ_ASSERT(mTrackID == TRACK_NONE);
   MOZ_ASSERT(mPrincipal == PRINCIPAL_HANDLE_NONE);
   mStream = aStream;
   mTrackID = aTrackID;
   mPrincipal = aPrincipal;
 
   AudioSegment* segment = new AudioSegment();
 
-  aStream->AddAudioTrack(aTrackID, aStream->GraphRate(), segment,
+  mStream->AddAudioTrack(mTrackID, mStream->GraphRate(), segment,
                          SourceMediaStream::ADDTRACK_QUEUED);
 
   mInputProcessing = new AudioInputProcessing(mDeviceMaxChannelCount, mStream,
                                               mTrackID, mPrincipal);
 
+  // We only add the listener once -- AudioInputProcessing wants pull
+  // notifications also when stopped for appending silence.
+  mPullListener = new AudioInputProcessingPullListener(mInputProcessing);
+  NS_DispatchToMainThread(NS_NewRunnableFunction(
+      __func__, [self = RefPtr<MediaEngineWebRTCMicrophoneSource>(this),
+                 stream = mStream, track = mTrackID, listener = mPullListener] {
+        if (stream->IsDestroyed()) {
+          return;
+        }
+
+        stream->AddTrackListener(listener, track);
+      }));
+
   LOG("Stream %p registered for microphone capture", aStream.get());
 }
 
 class StartStopMessage : public ControlMessage {
  public:
   enum StartStop { Start, Stop };
 
   StartStopMessage(AudioInputProcessing* aInputProcessing, StartStop aAction)
@@ -570,28 +562,26 @@ nsresult MediaEngineWebRTCMicrophoneSour
   if (mStream->GraphImpl()->InputDeviceID() &&
       mStream->GraphImpl()->InputDeviceID() != deviceID) {
     // For now, we only allow opening a single audio input device per document,
     // because we can only have one MSG per document.
     return NS_ERROR_FAILURE;
   }
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
-  NS_DispatchToMainThread(media::NewRunnableFrom(
-      [that, deviceID, stream = mStream, track = mTrackID]() {
+  NS_DispatchToMainThread(NS_NewRunnableFunction(
+      __func__, [that, deviceID, stream = mStream, track = mTrackID] {
         if (stream->IsDestroyed()) {
-          return NS_OK;
+          return;
         }
 
         stream->GraphImpl()->AppendMessage(MakeUnique<StartStopMessage>(
             that->mInputProcessing, StartStopMessage::Start));
         stream->SetPullingEnabled(track, true);
         stream->OpenAudioInput(deviceID, that->mInputProcessing);
-
-        return NS_OK;
       }));
 
   ApplySettings(mCurrentPrefs);
 
   MOZ_ASSERT(mState != kReleased);
   mState = kStarted;
 
   return NS_OK;
@@ -605,29 +595,28 @@ nsresult MediaEngineWebRTCMicrophoneSour
   MOZ_ASSERT(mStream, "SetTrack must have been called before ::Stop");
 
   if (mState == kStopped) {
     // Already stopped - this is allowed
     return NS_OK;
   }
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
-  NS_DispatchToMainThread(media::NewRunnableFrom([that, stream = mStream]() {
-    if (stream->IsDestroyed()) {
-      return NS_OK;
-    }
+  NS_DispatchToMainThread(
+      NS_NewRunnableFunction(__func__, [that, stream = mStream] {
+        if (stream->IsDestroyed()) {
+          return;
+        }
 
-    stream->GraphImpl()->AppendMessage(MakeUnique<StartStopMessage>(
-        that->mInputProcessing, StartStopMessage::Stop));
-    CubebUtils::AudioDeviceID deviceID = that->mDeviceInfo->DeviceID();
-    Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
-    stream->CloseAudioInput(id, that->mInputProcessing);
-
-    return NS_OK;
-  }));
+        stream->GraphImpl()->AppendMessage(MakeUnique<StartStopMessage>(
+            that->mInputProcessing, StartStopMessage::Stop));
+        CubebUtils::AudioDeviceID deviceID = that->mDeviceInfo->DeviceID();
+        Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
+        stream->CloseAudioInput(id, that->mInputProcessing);
+      }));
 
   MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
   mState = kStopped;
 
   return NS_OK;
 }
 
 void MediaEngineWebRTCMicrophoneSource::GetSettings(
@@ -786,22 +775,20 @@ void AudioInputProcessing::Start() {
   mLiveSilenceAppended = false;
 #ifdef DEBUG
   mLastCallbackAppendTime = 0;
 #endif
 }
 
 void AudioInputProcessing::Stop() { mEnabled = false; }
 
-void AudioInputProcessing::Pull(const RefPtr<SourceMediaStream>& aStream,
-                                TrackID aTrackID, StreamTime aEndOfAppendedData,
-                                StreamTime aDesiredTime,
-                                const PrincipalHandle& aPrincipalHandle) {
-  TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", aStream.get(),
-                               aTrackID);
+void AudioInputProcessing::Pull(StreamTime aEndOfAppendedData,
+                                StreamTime aDesiredTime) {
+  TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", mStream.get(),
+                               mTrackID);
 
   if (mEnded) {
     return;
   }
 
   StreamTime delta = aDesiredTime - aEndOfAppendedData;
   MOZ_ASSERT(delta > 0);
 
@@ -813,20 +800,20 @@ void AudioInputProcessing::Pull(const Re
     // start appending data immediately and there is no extra data buffered.
     delta += WEBAUDIO_BLOCK_SIZE;
 
     // If we're supposed to be packetizing but there's no packetizer yet,
     // there must not have been any live frames appended yet.
     // If there were live frames appended and we haven't appended the
     // right amount of silence, we'll have to append silence once more,
     // failing the other assert below.
-    MOZ_ASSERT_IF(!PassThrough(aStream->GraphImpl()) && !mPacketizerInput,
+    MOZ_ASSERT_IF(!PassThrough(mStream->GraphImpl()) && !mPacketizerInput,
                   !mLiveFramesAppended);
 
-    if (!PassThrough(aStream->GraphImpl()) && mPacketizerInput) {
+    if (!PassThrough(mStream->GraphImpl()) && mPacketizerInput) {
       // Processing is active and is processed in chunks of 10ms through the
       // input packetizer. We allow for 10ms of silence on the track to
       // accomodate the buffering worst-case.
       delta += mPacketizerInput->PacketSize();
     }
   }
 
   LOG_FRAME("Pulling %" PRId64 " frames of silence.", delta);
@@ -836,25 +823,25 @@ void AudioInputProcessing::Pull(const Re
   // Note that this is exempted until live samples and a subsequent chunk of
   // silence have been appended to the track. This will cover cases like:
   // - After Start(), there is silence (maybe multiple times) appended before
   //   the first audio callback.
   // - After Start(), there is real data (maybe multiple times) appended
   //   before the first graph iteration.
   // And other combinations of order of audio sample sources.
   MOZ_ASSERT_IF(mEnabled && mLiveFramesAppended && mLiveSilenceAppended,
-                aStream->GraphImpl()->IterationEnd() > mLastCallbackAppendTime);
+                mStream->GraphImpl()->IterationEnd() > mLastCallbackAppendTime);
 
   if (mLiveFramesAppended) {
     mLiveSilenceAppended = true;
   }
 
   AudioSegment audio;
   audio.AppendNullData(delta);
-  aStream->AppendToTrack(aTrackID, &audio);
+  mStream->AppendToTrack(mTrackID, &audio);
 }
 
 void AudioInputProcessing::NotifyOutputData(MediaStreamGraphImpl* aGraph,
                                             AudioDataValue* aBuffer,
                                             size_t aFrames, TrackRate aRate,
                                             uint32_t aChannels) {
   MOZ_ASSERT(aGraph->OnGraphThread());
   MOZ_ASSERT(mEnabled);
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -1,25 +1,27 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MediaEngineWebRTCAudio_h
 #define MediaEngineWebRTCAudio_h
 
-#include "MediaEngineWebRTC.h"
 #include "AudioPacketizer.h"
 #include "AudioSegment.h"
 #include "AudioDeviceInfo.h"
+#include "MediaEngineWebRTC.h"
+#include "MediaStreamListener.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 
 namespace mozilla {
 
 class AudioInputProcessing;
+class AudioInputProcessingPullListener;
 
 // This class is created and used exclusively on the Media Manager thread, with
 // exactly two exceptions:
 // - Pull is always called on the MSG thread. It only ever uses
 //   mInputProcessing. mInputProcessing is set, then a message is sent first to
 //   the main thread and then the MSG thread so that it can be used as part of
 //   the graph processing. On destruction, similarly, a message is sent to the
 //   graph so that it stops using it, and then it is deleted.
@@ -52,21 +54,16 @@ class MediaEngineWebRTCMicrophoneSource 
   nsresult Start(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Reconfigure(const RefPtr<AllocationHandle>& aHandle,
                        const dom::MediaTrackConstraints& aConstraints,
                        const MediaEnginePrefs& aPrefs,
                        const nsString& aDeviceId,
                        const char** aOutBadConstraint) override;
 
-  void Pull(const RefPtr<const AllocationHandle>& aHandle,
-            const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-            StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-            const PrincipalHandle& aPrincipalHandle) override;
-
   /**
    * Assigns the current settings of the capture to aOutSettings.
    * Main thread only.
    */
   void GetSettings(dom::MediaTrackSettings& aOutSettings) const override;
 
   dom::MediaSourceEnum GetMediaSource() const override {
     return dom::MediaSourceEnum::Microphone;
@@ -137,30 +134,35 @@ class MediaEngineWebRTCMicrophoneSource 
   MediaEnginePrefs mCurrentPrefs;
 
   // The SourecMediaStream on which to append data for this microphone. Set in
   // SetTrack as part of the initialization, and nulled in ::Deallocate.
   RefPtr<SourceMediaStream> mStream;
 
   // See note at the top of this class.
   RefPtr<AudioInputProcessing> mInputProcessing;
+
+  // The class receiving NotifyPull() from the MediaStreamGraph, and forwarding
+  // them on the graph thread. This is separated from AudioInputProcessing since
+  // both AudioDataListener (base class of AudioInputProcessing) and
+  // MediaStreamTrackListener (base class of AudioInputProcessingPullListener)
+  // implement refcounting.
+  RefPtr<AudioInputProcessingPullListener> mPullListener;
 };
 
 // This class is created on the MediaManager thread, and then exclusively used
 // on the MSG thread.
 // All communication is done via message passing using MSG ControlMessages
 class AudioInputProcessing : public AudioDataListener {
  public:
   AudioInputProcessing(uint32_t aMaxChannelCount,
                        RefPtr<SourceMediaStream> aStream, TrackID aTrackID,
                        const PrincipalHandle& aPrincipalHandle);
 
-  void Pull(const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-            StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-            const PrincipalHandle& aPrincipalHandle);
+  void Pull(StreamTime aEndOfAppendedData, StreamTime aDesiredTime);
 
   void NotifyOutputData(MediaStreamGraphImpl* aGraph, AudioDataValue* aBuffer,
                         size_t aFrames, TrackRate aRate,
                         uint32_t aChannels) override;
   void NotifyInputData(MediaStreamGraphImpl* aGraph,
                        const AudioDataValue* aBuffer, size_t aFrames,
                        TrackRate aRate, uint32_t aChannels) override;
 
@@ -196,17 +198,17 @@ class AudioInputProcessing : public Audi
   void UpdateAGCSettings(bool aEnable, webrtc::GainControl::Mode aMode);
   void UpdateNSSettings(bool aEnable, webrtc::NoiseSuppression::Level aLevel);
   void UpdateAPMExtraOptions(bool aExtendedFilter, bool aDelayAgnostic);
 
   void End();
 
  private:
   ~AudioInputProcessing() = default;
-  RefPtr<SourceMediaStream> mStream;
+  const RefPtr<SourceMediaStream> mStream;
   // This implements the processing algoritm to apply to the input (e.g. a
   // microphone). If all algorithms are disabled, this class in not used. This
   // class only accepts audio chunks of 10ms. It has two inputs and one output:
   // it is fed the speaker data and the microphone data. It outputs processed
   // input data.
   const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
   // Packetizer to be able to feed 10ms packets to the input side of
   // mAudioProcessing. Not used if the processing is bypassed.
@@ -238,27 +240,49 @@ class AudioInputProcessing : public Audi
 #endif
   // Set to false by Start(). Becomes true after the first time we append real
   // audio frames from the audio callback.
   bool mLiveFramesAppended;
   // Set to false by Start(). Becomes true after the first time we append
   // silence *after* the first audio callback has appended real frames.
   bool mLiveSilenceAppended;
   // Track ID on which the data is to be appended after processing
-  TrackID mTrackID;
+  const TrackID mTrackID;
   // Principal for the data that flows through this class.
-  PrincipalHandle mPrincipal;
+  const PrincipalHandle mPrincipal;
   // Whether or not this MediaEngine is enabled. If it's not enabled, it
   // operates in "pull" mode, and we append silence only, releasing the audio
   // input stream.
   bool mEnabled;
   // Whether or not we've ended and removed the track in the SourceMediaStream
   bool mEnded;
 };
 
+// This class is created on the media thread, as part of Start(), then entirely
+// self-sustained until destruction, just forwarding calls to Pull().
+class AudioInputProcessingPullListener : public MediaStreamTrackListener {
+ public:
+  explicit AudioInputProcessingPullListener(
+      RefPtr<AudioInputProcessing> aInputProcessing)
+      : mInputProcessing(std::move(aInputProcessing)) {
+    MOZ_COUNT_CTOR(AudioInputProcessingPullListener);
+  }
+
+  ~AudioInputProcessingPullListener() {
+    MOZ_COUNT_DTOR(AudioInputProcessingPullListener);
+  }
+
+  void NotifyPull(MediaStreamGraph* aGraph, StreamTime aEndOfAppendedData,
+                  StreamTime aDesiredTime) override {
+    mInputProcessing->Pull(aEndOfAppendedData, aDesiredTime);
+  }
+
+  const RefPtr<AudioInputProcessing> mInputProcessing;
+};
+
 class MediaEngineWebRTCAudioCaptureSource : public MediaEngineSource {
  public:
   explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid) {}
   nsString GetName() const override;
   nsCString GetUUID() const override;
   nsString GetGroupId() const override;
   nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
                     const MediaEnginePrefs& aPrefs, const nsString& aDeviceId,
@@ -280,23 +304,16 @@ class MediaEngineWebRTCAudioCaptureSourc
   nsresult Start(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Reconfigure(const RefPtr<AllocationHandle>& aHandle,
                        const dom::MediaTrackConstraints& aConstraints,
                        const MediaEnginePrefs& aPrefs,
                        const nsString& aDeviceId,
                        const char** aOutBadConstraint) override;
 
-  void Pull(const RefPtr<const AllocationHandle>& aHandle,
-            const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
-            StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
-            const PrincipalHandle& aPrincipalHandle) override {
-    MOZ_ASSERT_UNREACHABLE("Should never have to append silence");
-  }
-
   dom::MediaSourceEnum GetMediaSource() const override {
     return dom::MediaSourceEnum::AudioCapture;
   }
 
   nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override {
     return NS_ERROR_NOT_IMPLEMENTED;
   }