Bug 1221587: change audio listeners for full-duplex audio r=padenot
☠☠ backed out by fd77d9587c81 ☠ ☠
authorRandell Jesup <rjesup@jesup.org>
Thu, 21 Jan 2016 11:51:36 -0500
changeset 281064 765fa97d240749bc3412268659904a7de8dbfbe1
parent 281063 1c3afb2f433b62c1949c45672c828f8e51cfcc5e
child 281065 8af4dd12d47cbf1925ed18a8c84f6f3adb0a2511
push id29930
push usercbook@mozilla.com
push dateFri, 22 Jan 2016 11:05:50 +0000
treeherdermozilla-central@7104d650a97d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1221587
milestone46.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1221587: change audio listeners for full-duplex audio r=padenot
dom/media/GraphDriver.cpp
dom/media/GraphDriver.h
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/MediaStreamGraphImpl.h
dom/media/webrtc/MediaEngine.h
dom/media/webrtc/MediaEngineDefault.h
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -43,16 +43,17 @@ struct AutoProfilerUnregisterThread
   }
 };
 
 GraphDriver::GraphDriver(MediaStreamGraphImpl* aGraphImpl)
   : mIterationStart(0),
     mIterationEnd(0),
     mGraphImpl(aGraphImpl),
     mWaitState(WAITSTATE_RUNNING),
+    mAudioInput(nullptr),
     mCurrentTimeStamp(TimeStamp::Now()),
     mPreviousDriver(nullptr),
     mNextDriver(nullptr)
 { }
 
 void GraphDriver::SetGraphTime(GraphDriver* aPreviousDriver,
                                GraphTime aLastSwitchNextIterationStart,
                                GraphTime aLastSwitchNextIterationEnd)
@@ -534,16 +535,17 @@ StreamAndPromiseForOperation::StreamAndP
   // MOZ_ASSERT(aPromise);
 }
 
 AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl)
   : GraphDriver(aGraphImpl)
   , mSampleRate(0)
   , mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS)
   , mStarted(false)
+  , mAudioInput(nullptr)
   , mAudioChannel(aGraphImpl->AudioChannel())
   , mInCallback(false)
   , mMicrophoneActive(false)
 #ifdef XP_MACOSX
   , mCallbackReceivedWhileSwitching(0)
 #endif
 {
   STREAM_LOG(LogLevel::Debug, ("AudioCallbackDriver ctor for graph %p", aGraphImpl));
@@ -895,18 +897,18 @@ AudioCallbackDriver::DataCallback(AudioD
 
   mBuffer.BufferFilled();
 
   // Callback any observers for the AEC speaker data.  Note that one
   // (maybe) of these will be full-duplex, the others will get their input
   // data off separate cubeb callbacks.  Take care with how stuff is
   // removed/added to this list and TSAN issues, but input and output will
   // use separate callback methods.
-  mGraphImpl->NotifySpeakerData(aOutputBuffer, static_cast<size_t>(aFrames),
-                                ChannelCount);
+  mGraphImpl->NotifyOutputData(aOutputBuffer, static_cast<size_t>(aFrames),
+                               ChannelCount);
 
   // Process mic data if any/needed -- after inserting far-end data for AEC!
   if (aInputBuffer) {
     if (mAudioInput) { // for this specific input-only or full-duplex stream
       mAudioInput->NotifyInputData(mGraphImpl, aInputBuffer,
                                    static_cast<size_t>(aFrames),
                                    ChannelCount);
     }
--- a/dom/media/GraphDriver.h
+++ b/dom/media/GraphDriver.h
@@ -187,23 +187,22 @@ public:
   void EnsureNextIterationLocked();
 
   MediaStreamGraphImpl* GraphImpl() {
     return mGraphImpl;
   }
 
   virtual bool OnThread() = 0;
 
-  // XXX Thread-safety! Do these via commands to avoid TSAN issues
-  // and crashes!!!
-  virtual void SetInputListener(MediaStreamListener *aListener) {
+  // These are invoked on the MSG thread (or MainThread in shutdown)
+  virtual void SetInputListener(AudioDataListener *aListener) {
     mAudioInput = aListener;
   }
   // XXX do we need the param?  probably no
-  virtual void RemoveInputListener(MediaStreamListener *aListener) {
+  virtual void RemoveInputListener(AudioDataListener *aListener) {
     mAudioInput = nullptr;
   }
 
 protected:
   GraphTime StateComputedTime() const;
 
   // Time of the start of this graph iteration. This must be accessed while
   // having the monitor.
@@ -228,17 +227,17 @@ protected:
     // Something has signaled RunThread() to wake up immediately,
     // but it hasn't done so yet
     WAITSTATE_WAKING_UP
   };
   // This must be access with the monitor.
   WaitState mWaitState;
 
   // Callback for mic data, if any
-  RefPtr<MediaStreamListener> mAudioInput;
+  AudioDataListener *mAudioInput;
 
   // This is used on the main thread (during initialization), and the graph
   // thread. No monitor needed because we know the graph thread does not run
   // during the initialization.
   TimeStamp mCurrentTimeStamp;
   // This is non-null only when this driver has recently switched from an other
   // driver, and has not cleaned it up yet (for example because the audio stream
   // is currently calling the callback during initialization).
@@ -493,17 +492,17 @@ private:
    * thread (if this driver is the first one).
    * This is read on previous driver's thread (during callbacks from
    * cubeb_stream_init) and the audio thread (when switching away from this
    * driver back to a SystemClockDriver).
    * This is synchronized by the Graph's monitor.
    * */
   bool mStarted;
   /* Listener for mic input, if any. */
-  RefPtr<MediaStreamListener> mAudioInput;
+  RefPtr<AudioDataListener> mAudioInput;
 
   struct AutoInCallback
   {
     explicit AutoInCallback(AudioCallbackDriver* aDriver);
     ~AutoInCallback();
     AudioCallbackDriver* mDriver;
   };
 
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -919,91 +919,91 @@ MediaStreamGraphImpl::PlayVideo(MediaStr
   // If the stream has finished and the timestamps of all frames have expired
   // then no more updates are required.
   if (aStream->mFinished && !haveMultipleImages) {
     aStream->mLastPlayedVideoFrame.SetNull();
   }
 }
 
 void
-MediaStreamGraphImpl::OpenAudioInputImpl(char *aName, MediaStreamListener *aListener)
+MediaStreamGraphImpl::OpenAudioInputImpl(char *aName, AudioDataListener *aListener)
 {
   if (CurrentDriver()->AsAudioCallbackDriver()) {
     CurrentDriver()->SetInputListener(aListener);
   } else {
     // XXX Switch to callback driver
   }
   mAudioInputs.AppendElement(aListener); // always monitor speaker data
 }
 
 nsresult
-MediaStreamGraphImpl::OpenAudioInput(char *aName, MediaStreamListener *aListener)
+MediaStreamGraphImpl::OpenAudioInput(char *aName, AudioDataListener *aListener)
 {
   // XXX So, so, so annoying.  Can't AppendMessage except on Mainthread
   if (!NS_IsMainThread()) {
     NS_DispatchToMainThread(WrapRunnable(this,
                                          &MediaStreamGraphImpl::OpenAudioInput,
                                          aName, aListener)); // XXX Fix! string need to copied
     return NS_OK;
   }
   class Message : public ControlMessage {
   public:
-    Message(MediaStreamGraphImpl *aGraph, char *aName, MediaStreamListener *aListener) :
+    Message(MediaStreamGraphImpl *aGraph, char *aName, AudioDataListener *aListener) :
       ControlMessage(nullptr), mGraph(aGraph), mName(aName), mListener(aListener) {}
     virtual void Run()
     {
       mGraph->OpenAudioInputImpl(mName, mListener);
     }
     MediaStreamGraphImpl *mGraph;
     char *mName; // XXX needs to copy
-    MediaStreamListener *mListener;
+    RefPtr<AudioDataListener> mListener;
   };
   this->AppendMessage(new Message(this, aName, aListener));
   return NS_OK;
 }
 
 void
-MediaStreamGraphImpl::CloseAudioInputImpl(MediaStreamListener *aListener)
+MediaStreamGraphImpl::CloseAudioInputImpl(AudioDataListener *aListener)
 {
   CurrentDriver()->RemoveInputListener(aListener);
   mAudioInputs.RemoveElement(aListener);
 }
 
 void
-MediaStreamGraphImpl::CloseAudioInput(MediaStreamListener *aListener)
+MediaStreamGraphImpl::CloseAudioInput(AudioDataListener *aListener)
 {
   // XXX So, so, so annoying.  Can't AppendMessage except on Mainthread
   if (!NS_IsMainThread()) {
     NS_DispatchToMainThread(WrapRunnable(this,
                                          &MediaStreamGraphImpl::CloseAudioInput,
                                          aListener));
     return;
   }
   class Message : public ControlMessage {
   public:
-    Message(MediaStreamGraphImpl *aGraph, MediaStreamListener *aListener) :
+    Message(MediaStreamGraphImpl *aGraph, AudioDataListener *aListener) :
       ControlMessage(nullptr), mGraph(aGraph), mListener(aListener) {}
     virtual void Run()
     {
       mGraph->CloseAudioInputImpl(mListener);
     }
     MediaStreamGraphImpl *mGraph;
-    MediaStreamListener *mListener;
+    RefPtr<AudioDataListener> mListener;
   };
   this->AppendMessage(new Message(this, aListener));
 }
 
 
 // All AudioInput listeners get the same speaker data (at least for now).
 void
-MediaStreamGraph::NotifySpeakerData(AudioDataValue* aBuffer, size_t aFrames,
-                                    uint32_t aChannels)
+MediaStreamGraph::NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
+                                   uint32_t aChannels)
 {
   for (auto& listener : mAudioInputs) {
-    listener->NotifySpeakerData(this, aBuffer, aFrames, aChannels);
+    listener->NotifyOutputData(this, aBuffer, aFrames, aChannels);
   }
 }
 
 bool
 MediaStreamGraphImpl::ShouldUpdateMainThread()
 {
   if (mRealtime) {
     return true;
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -176,33 +176,49 @@ public:
                                         TrackID aInputTrackID = TRACK_INVALID) {}
 
   /**
    * Notify that all new tracks this iteration have been created.
    * This is to ensure that tracks added atomically to MediaStreamGraph
    * are also notified of atomically to MediaStreamListeners.
    */
   virtual void NotifyFinishedTrackCreation(MediaStreamGraph* aGraph) {}
+};
 
+class AudioDataListenerInterface {
+protected:
+  // Protected destructor, to discourage deletion outside of Release():
+  virtual ~AudioDataListenerInterface() {}
+
+public:
   /* These are for cubeb audio input & output streams: */
   /**
    * Output data to speakers, for use as the "far-end" data for echo
    * cancellation.  This is not guaranteed to be in any particular size
    * chunks.
    */
-  virtual void NotifySpeakerData(MediaStreamGraph* aGraph,
-                                 AudioDataValue* aBuffer, size_t aFrames,
-                                 uint32_t aChannels) {}
+  virtual void NotifyOutputData(MediaStreamGraph* aGraph,
+                                AudioDataValue* aBuffer, size_t aFrames,
+                                uint32_t aChannels) = 0;
   /**
    * Input data from a microphone (or other audio source.  This is not
    * guaranteed to be in any particular size chunks.
    */
   virtual void NotifyInputData(MediaStreamGraph* aGraph,
                                AudioDataValue* aBuffer, size_t aFrames,
-                               uint32_t aChannels) {}
+                               uint32_t aChannels) = 0;
+};
+
+class AudioDataListener : public AudioDataListenerInterface {
+protected:
+  // Protected destructor, to discourage deletion outside of Release():
+  virtual ~AudioDataListener() {}
+
+public:
+  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioDataListener)
 };
 
 /**
  * This is a base class for media graph thread listener direct callbacks
  * from within AppendToTrack().  Note that your regular listener will
  * still get NotifyQueuedTrackChanges() callbacks from the MSG thread, so
  * you must be careful to ignore them if AddDirectListener was successful.
  */
@@ -1187,20 +1203,20 @@ public:
   };
   // Main thread only
   static MediaStreamGraph* GetInstance(GraphDriverType aGraphDriverRequested,
                                        dom::AudioChannel aChannel);
   static MediaStreamGraph* CreateNonRealtimeInstance(TrackRate aSampleRate);
   // Idempotent
   static void DestroyNonRealtimeInstance(MediaStreamGraph* aGraph);
 
-  virtual nsresult OpenAudioInput(char *aName, MediaStreamListener *aListener) {
+  virtual nsresult OpenAudioInput(char *aName, AudioDataListener *aListener) {
     return NS_ERROR_FAILURE;
   }
-  virtual void CloseAudioInput(MediaStreamListener *aListener) {}
+  virtual void CloseAudioInput(AudioDataListener *aListener) {}
 
   // Control API.
   /**
    * Create a stream that a media decoder (or some other source of
    * media data, such as a camera) can write to.
    */
   SourceMediaStream* CreateSourceStream(DOMMediaStream* aWrapper);
   /**
@@ -1275,18 +1291,18 @@ public:
   void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
   already_AddRefed<MediaInputPort> ConnectToCaptureStream(
     uint64_t aWindowId, MediaStream* aMediaStream);
 
   /**
    * Data going to the speakers from the GraphDriver's DataCallback
    * to notify any listeners (for echo cancellation).
    */
-  void NotifySpeakerData(AudioDataValue* aBuffer, size_t aFrames,
-                         uint32_t aChannels);
+  void NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
+                        uint32_t aChannels);
 
 protected:
   explicit MediaStreamGraph(TrackRate aSampleRate)
     : mSampleRate(aSampleRate)
   {
     MOZ_COUNT_CTOR(MediaStreamGraph);
   }
   virtual ~MediaStreamGraph()
@@ -1299,14 +1315,18 @@ protected:
 
   /**
    * Sample rate at which this graph runs. For real time graphs, this is
    * the rate of the audio mixer. For offline graphs, this is the rate specified
    * at construction.
    */
   TrackRate mSampleRate;
 
-  nsTArray<RefPtr<MediaStreamListener>> mAudioInputs;
+  /**
+   * Lifetime is controlled by OpenAudioInput/CloseAudioInput.  Destroying the listener
+   * without removing it is an error; callers should assert on that.
+   */
+  nsTArray<AudioDataListener *> mAudioInputs;
 };
 
 } // namespace mozilla
 
 #endif /* MOZILLA_MEDIASTREAMGRAPH_H_ */
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -345,20 +345,20 @@ public:
    * Set the correct current video frame for stream aStream.
    */
   void PlayVideo(MediaStream* aStream);
   /**
    * No more data will be forthcoming for aStream. The stream will end
    * at the current buffer end point. The StreamBuffer's tracks must be
    * explicitly set to finished by the caller.
    */
-  void OpenAudioInputImpl(char *aName, MediaStreamListener *aListener);
-  virtual nsresult OpenAudioInput(char *aName, MediaStreamListener *aListener) override;
-  void CloseAudioInputImpl(MediaStreamListener *aListener);
-  virtual void CloseAudioInput(MediaStreamListener *aListener) override;
+  void OpenAudioInputImpl(char *aName, AudioDataListener *aListener);
+  virtual nsresult OpenAudioInput(char *aName, AudioDataListener *aListener) override;
+  void CloseAudioInputImpl(AudioDataListener *aListener);
+  virtual void CloseAudioInput(AudioDataListener *aListener) override;
 
   void FinishStream(MediaStream* aStream);
   /**
    * Compute how much stream data we would like to buffer for aStream.
    */
   StreamTime GetDesiredBufferEnd(MediaStream* aStream);
   /**
    * Returns true when there are no active streams.
--- a/dom/media/webrtc/MediaEngine.h
+++ b/dom/media/webrtc/MediaEngine.h
@@ -269,17 +269,18 @@ protected:
     : MediaEngineSource(aState) {}
   MediaEngineVideoSource()
     : MediaEngineSource(kReleased) {}
 };
 
 /**
  * Audio source and friends.
  */
-class MediaEngineAudioSource : public MediaEngineSource
+class MediaEngineAudioSource : public MediaEngineSource,
+                               public AudioDataListenerInterface
 {
 public:
   virtual ~MediaEngineAudioSource() {}
 
 protected:
   explicit MediaEngineAudioSource(MediaEngineState aState)
     : MediaEngineSource(aState) {}
   MediaEngineAudioSource()
--- a/dom/media/webrtc/MediaEngineDefault.h
+++ b/dom/media/webrtc/MediaEngineDefault.h
@@ -139,16 +139,24 @@ public:
 #ifdef DEBUG
     StreamBuffer::Track* data = aSource->FindTrack(aId);
     NS_WARN_IF_FALSE(!data || data->IsEnded() ||
                      aDesiredTime <= aSource->GetEndOfAppendedData(aId),
                      "MediaEngineDefaultAudioSource data underrun");
 #endif
   }
 
+  void NotifyOutputData(MediaStreamGraph* aGraph,
+                        AudioDataValue* aBuffer, size_t aFrames,
+                        uint32_t aChannels) override
+  {}
+  void NotifyInputData(MediaStreamGraph* aGraph,
+                       AudioDataValue* aBuffer, size_t aFrames,
+                       uint32_t aChannels) override
+  {}
   bool IsFake() override {
     return true;
   }
 
   dom::MediaSourceEnum GetMediaSource() const override {
     return dom::MediaSourceEnum::Microphone;
   }
 
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -24,16 +24,17 @@
 #include "VideoUtils.h"
 #include "MediaEngineCameraVideoSource.h"
 #include "VideoSegment.h"
 #include "AudioSegment.h"
 #include "StreamBuffer.h"
 #include "MediaStreamGraph.h"
 #include "cubeb/cubeb.h"
 #include "CubebUtils.h"
+#include "AudioPacketizer.h"
 
 #include "MediaEngineWrapper.h"
 #include "mozilla/dom/MediaStreamTrackBinding.h"
 // WebRTC library includes follow
 #include "webrtc/common.h"
 // Audio Engine
 #include "webrtc/voice_engine/include/voe_base.h"
 #include "webrtc/voice_engine/include/voe_codec.h"
@@ -93,16 +94,24 @@ public:
   void SetDirectListeners(bool aDirect) override
   {}
   nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn,
                   uint32_t aAGC, bool aNoiseOn, uint32_t aNoise,
                   int32_t aPlayoutDelay) override
   {
     return NS_OK;
   }
+  void NotifyOutputData(MediaStreamGraph* aGraph,
+                        AudioDataValue* aBuffer, size_t aFrames,
+                        uint32_t aChannels) override
+  {}
+  void NotifyInputData(MediaStreamGraph* aGraph,
+                       AudioDataValue* aBuffer, size_t aFrames,
+                       uint32_t aChannels) override
+  {}
   void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource,
                   TrackID aID, StreamTime aDesiredTime) override
   {}
   dom::MediaSourceEnum GetMediaSource() const override
   {
     return dom::MediaSourceEnum::AudioCapture;
   }
   bool IsFake() override
@@ -122,47 +131,42 @@ protected:
   nsCString mUUID;
 };
 
 // Small subset of VoEHardware
 class AudioInput
 {
 public:
   AudioInput(webrtc::VoiceEngine* aVoiceEngine) : mVoiceEngine(aVoiceEngine) {};
-  virtual ~AudioInput() {}
-
-  NS_INLINE_DECL_REFCOUNTING(AudioInput)
+  // Threadsafe because it's referenced from an MicrophoneSource, which can
+  // had references to it on other threads.
+  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInput)
 
   virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
   virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
                                      char aStrGuidUTF8[128]) = 0;
   virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
-  virtual void StartRecording(MediaStreamGraph *aGraph) = 0;
-  virtual void StopRecording(MediaStreamGraph *aGraph) = 0;
+  virtual void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) = 0;
+  virtual void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) = 0;
   virtual int SetRecordingDevice(int aIndex) = 0;
 
 protected:
+  // Protected destructor, to discourage deletion outside of Release():
+  virtual ~AudioInput() {}
+
   webrtc::VoiceEngine* mVoiceEngine;
 };
 
-class AudioInputCubeb : public AudioInput,
-                        public MediaStreamListener
+class AudioInputCubeb final : public AudioInput
 {
 public:
-  AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine) :
+  explicit AudioInputCubeb(webrtc::VoiceEngine* aVoiceEngine) :
     AudioInput(aVoiceEngine), mDevices(nullptr) {}
-  virtual ~AudioInputCubeb()
-  {
-    if (mDevices) {
-      cubeb_device_collection_destroy(mDevices);
-      mDevices = nullptr;
-    }
-  }
 
-  virtual int GetNumOfRecordingDevices(int& aDevices)
+  int GetNumOfRecordingDevices(int& aDevices)
   {
     // devices = cubeb_get_num_devices(...)
     if (CUBEB_OK != cubeb_enumerate_devices(CubebUtils::GetCubebContext(),
                                             CUBEB_DEVICE_TYPE_INPUT,
                                             &mDevices)) {
       return 0;
     }
     aDevices = 0;
@@ -172,112 +176,153 @@ public:
       {
         aDevices++;
         // XXX to support device changes, we need to identify by name/UUID not index
       }
     }
     return 0;
   }
 
-  virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
-                                     char aStrGuidUTF8[128])
+  int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
+                             char aStrGuidUTF8[128])
   {
     if (!mDevices) {
       return 1;
     }
     int devindex = aIndex == -1 ? 0 : aIndex;
     PR_snprintf(aStrNameUTF8, 128, "%s%s", aIndex == -1 ? "default: " : "",
                 mDevices->device[devindex]->friendly_name);
     aStrGuidUTF8[0] = '\0';
     return 0;
   }
 
-  virtual int GetRecordingDeviceStatus(bool& aIsAvailable)
+  int GetRecordingDeviceStatus(bool& aIsAvailable)
   {
     // With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT
     aIsAvailable = true;
     return 0;
   }
 
-  virtual void StartRecording(MediaStreamGraph *aGraph)
+  void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener)
   {
     ScopedCustomReleasePtr<webrtc::VoEExternalMedia> ptrVoERender;
     ptrVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
     if (ptrVoERender) {
       ptrVoERender->SetExternalRecordingStatus(true);
     }
-    aGraph->OpenAudioInput(nullptr, this);
+    aGraph->OpenAudioInput(nullptr, aListener);
+  }
+
+  void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener)
+  {
+    aGraph->CloseAudioInput(aListener);
   }
 
-  virtual void StopRecording(MediaStreamGraph *aGraph)
+  int SetRecordingDevice(int aIndex)
   {
-    aGraph->CloseAudioInput(this);
+    // Relevant with devid support
+    return 1;
   }
 
-  virtual int SetRecordingDevice(int aIndex)
+protected:
+  ~AudioInputCubeb() {
   {
-    // Not relevant to cubeb
-    return 1;
+    if (mDevices) {
+      cubeb_device_collection_destroy(mDevices);
+      mDevices = nullptr;
+    }
   }
 
 private:
   cubeb_device_collection* mDevices;
 };
 
-class AudioInputWebRTC : public AudioInput
+class AudioInputWebRTC final : public AudioInput
 {
 public:
-  AudioInputWebRTC(webrtc::VoiceEngine* aVoiceEngine) : AudioInput(aVoiceEngine) {}
-  virtual ~AudioInputWebRTC() {}
+  explicit AudioInputWebRTC(webrtc::VoiceEngine* aVoiceEngine) : AudioInput(aVoiceEngine) {}
 
-  virtual int GetNumOfRecordingDevices(int& aDevices)
+  int GetNumOfRecordingDevices(int& aDevices)
   {
     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
       return 1;
     }
     return ptrVoEHw->GetNumOfRecordingDevices(aDevices);
   }
 
-  virtual int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
-                                     char aStrGuidUTF8[128])
+  int GetRecordingDeviceName(int aIndex, char aStrNameUTF8[128],
+                             char aStrGuidUTF8[128])
   {
     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
       return 1;
     }
     return ptrVoEHw->GetRecordingDeviceName(aIndex, aStrNameUTF8,
                                             aStrGuidUTF8);
   }
 
-  virtual int GetRecordingDeviceStatus(bool& aIsAvailable)
+  int GetRecordingDeviceStatus(bool& aIsAvailable)
   {
     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
       return 1;
     }
     ptrVoEHw->GetRecordingDeviceStatus(aIsAvailable);
     return 0;
   }
 
-  virtual void StartRecording(MediaStreamGraph *aGraph) {}
-  virtual void StopRecording(MediaStreamGraph *aGraph) {}
+  void StartRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) {}
+  void StopRecording(MediaStreamGraph *aGraph, AudioDataListener *aListener) {}
 
-  virtual int SetRecordingDevice(int aIndex)
+  int SetRecordingDevice(int aIndex)
   {
     ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
     ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     if (!ptrVoEHw)  {
       return 1;
     }
     return ptrVoEHw->SetRecordingDevice(aIndex);
   }
+
+protected:
+  // Protected destructor, to discourage deletion outside of Release():
+  ~AudioInputWebRTC() {}
+};
+
+class WebRTCAudioDataListener : public AudioDataListener
+{
+protected:
+  // Protected destructor, to discourage deletion outside of Release():
+  virtual ~WebRTCAudioDataListener() {}
+
+public:
+  explicit WebRTCAudioDataListener(MediaEngineAudioSource* aAudioSource) :
+    mAudioSource(aAudioSource)
+  {}
+
+  // AudioDataListenerInterface methods
+  virtual void NotifyOutputData(MediaStreamGraph* aGraph,
+                                AudioDataValue* aBuffer, size_t aFrames,
+                                uint32_t aChannels) override
+  {
+    mAudioSource->NotifyOutputData(aGraph, aBuffer, aFrames, aChannels);
+  }
+  virtual void NotifyInputData(MediaStreamGraph* aGraph,
+                               AudioDataValue* aBuffer, size_t aFrames,
+                               uint32_t aChannels) override
+  {
+    mAudioSource->NotifyInputData(aGraph, aBuffer, aFrames, aChannels);
+  }
+
+private:
+  RefPtr<MediaEngineAudioSource> mAudioSource;
 };
 
 class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
                                           public webrtc::VoEMediaProcess,
                                           private MediaConstraintsHelper
 {
 public:
   MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
@@ -302,16 +347,17 @@ public:
     , mAGC(webrtc::kAgcDefault)
     , mNoiseSuppress(webrtc::kNsDefault)
     , mPlayoutDelay(0)
     , mNullTransport(nullptr) {
     MOZ_ASSERT(aVoiceEnginePtr);
     MOZ_ASSERT(aAudioInput);
     mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
     mDeviceUUID.Assign(uuid);
+    mListener = new mozilla::WebRTCAudioDataListener(this);
     Init();
   }
 
   void GetName(nsAString& aName) override;
   void GetUUID(nsACString& aUUID) override;
 
   nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
                     const MediaEnginePrefs& aPrefs,
@@ -328,16 +374,24 @@ public:
                   bool aNoiseOn, uint32_t aNoise,
                   int32_t aPlayoutDelay) override;
 
   void NotifyPull(MediaStreamGraph* aGraph,
                   SourceMediaStream* aSource,
                   TrackID aId,
                   StreamTime aDesiredTime) override;
 
+  // AudioDataListenerInterface methods
+  void NotifyOutputData(MediaStreamGraph* aGraph,
+                        AudioDataValue* aBuffer, size_t aFrames,
+                        uint32_t aChannels) override;
+  void NotifyInputData(MediaStreamGraph* aGraph,
+                       AudioDataValue* aBuffer, size_t aFrames,
+                       uint32_t aChannels) override;
+
   bool IsFake() override {
     return false;
   }
 
   dom::MediaSourceEnum GetMediaSource() const override {
     return dom::MediaSourceEnum::Microphone;
   }
 
@@ -362,22 +416,25 @@ public:
 protected:
   ~MediaEngineWebRTCMicrophoneSource() { Shutdown(); }
 
 private:
   void Init();
 
   webrtc::VoiceEngine* mVoiceEngine;
   RefPtr<mozilla::AudioInput> mAudioInput;
+  RefPtr<WebRTCAudioDataListener> mListener;
 
   ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
   ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
   ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
   ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
 
+  nsAutoPtr<AudioPacketizer<AudioDataValue, int16_t>> mPacketizer;
+
   // mMonitor protects mSources[] access/changes, and transitions of mState
   // from kStarted to kStopped (which are combined with EndTrack()).
   // mSources[] is accessed from webrtc threads.
   Monitor mMonitor;
   nsTArray<RefPtr<SourceMediaStream>> mSources;
   nsCOMPtr<nsIThread> mThread;
   int mCapIndex;
   int mChannel;
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -376,17 +376,17 @@ MediaEngineWebRTCMicrophoneSource::Start
   }
   if (mVoEBase->StartSend(mChannel)) {
     return NS_ERROR_FAILURE;
   }
 
   // Attach external media processor, so this::Process will be called.
   mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
 
-  mAudioInput->StartRecording(aStream->Graph());
+  mAudioInput->StartRecording(aStream->Graph(), mListener);
 
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
 {
   AssertIsOnOwningThread();
@@ -408,17 +408,17 @@ MediaEngineWebRTCMicrophoneSource::Stop(
     }
     if (!mVoEBase) {
       return NS_ERROR_FAILURE;
     }
 
     mState = kStopped;
   }
 
-  mAudioInput->StopRecording(aSource->Graph());
+  mAudioInput->StopRecording(aSource->Graph(), mListener);
 
   mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
 
   if (mVoEBase->StopSend(mChannel)) {
     return NS_ERROR_FAILURE;
   }
   if (mVoEBase->StopReceive(mChannel)) {
     return NS_ERROR_FAILURE;
@@ -440,16 +440,49 @@ MediaEngineWebRTCMicrophoneSource::Notif
                                               TrackID aID,
                                               StreamTime aDesiredTime)
 {
   // Ignore - we push audio data
   LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
 }
 
 void
+MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraph* aGraph,
+                                                    AudioDataValue* aBuffer,
+                                                    size_t aFrames,
+                                                    uint32_t aChannels)
+{
+}
+
+// Called back on GraphDriver thread
+void
+MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraph* aGraph,
+                                                   AudioDataValue* aBuffer,
+                                                   size_t aFrames,
+                                                   uint32_t aChannels)
+{
+  // This will call Process() with data coming out of the AEC/NS/AGC/etc chain
+  if (!mPacketizer ||
+      mPacketizer->PacketSize() != mSampleFrequency/100 ||
+      mPacketizer->Channels() != aChannels) {
+    // It's ok to drop the audio still in the packetizer here.
+    mPacketizer = new AudioPacketizer<AudioDataValue, int16_t>(mSampleFrequency/100, aChannels);
+   }
+
+  mPacketizer->Input(aBuffer, static_cast<uint32_t>(aFrames));
+
+  while (mPacketizer->PacketsAvailable()) {
+    uint32_t samplesPerPacket = mPacketizer->PacketSize() *
+                                mPacketizer->Channels();
+    int16_t* packet = mPacketizer->Output();
+    mVoERender->ExternalRecordingInsertData(packet, samplesPerPacket, mSampleFrequency, 0);
+  }
+}
+
+void
 MediaEngineWebRTCMicrophoneSource::Init()
 {
   mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
 
   mVoEBase->Init();
 
   mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
   if (!mVoERender) {
@@ -556,16 +589,19 @@ MediaEngineWebRTCMicrophoneSource::Shutd
   delete mNullTransport;
   mNullTransport = nullptr;
 
   mVoEProcessing = nullptr;
   mVoENetwork = nullptr;
   mVoERender = nullptr;
   mVoEBase = nullptr;
 
+  mAudioInput = nullptr;
+  mListener = nullptr; // breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us
+
   mState = kReleased;
   mInitDone = false;
 }
 
 typedef int16_t sample;
 
 void
 MediaEngineWebRTCMicrophoneSource::Process(int channel,