Bug 1404977 - functionnal changes. draft
authorPaul Adenot <paul@paul.cx>
Tue, 17 Apr 2018 17:10:51 +0200
changeset 783643 77a7ebc74c4bfd5a99415301eab7d66d7cbcf779
parent 783642 cc6af7857d4fca721d520e547f4b97c54b757751
child 783644 c3091f4996da8002c45a45fb5332bce6eca91e3e
push id106751
push userpaul@paul.cx
push dateTue, 17 Apr 2018 15:22:51 +0000
bugs1404977
milestone61.0a1
Bug 1404977 - functionnal changes. MozReview-Commit-ID: 5soDclcIXDd
dom/media/AudioDeviceInfo.cpp
dom/media/AudioDeviceInfo.h
dom/media/CubebUtils.cpp
dom/media/CubebUtils.h
dom/media/GraphDriver.cpp
dom/media/GraphDriver.h
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/MediaStreamGraphImpl.h
dom/media/webrtc/MediaEngineWebRTC.cpp
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/AudioDeviceInfo.cpp
+++ b/dom/media/AudioDeviceInfo.cpp
@@ -2,31 +2,56 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioDeviceInfo.h"
 
 NS_IMPL_ISUPPORTS(AudioDeviceInfo, nsIAudioDeviceInfo)
 
-AudioDeviceInfo::AudioDeviceInfo(const nsAString& aName,
+using namespace mozilla;
+using namespace mozilla::CubebUtils;
+
+AudioDeviceInfo::AudioDeviceInfo(cubeb_device_info* aInfo)
+:AudioDeviceInfo(aInfo->devid,
+                 NS_ConvertUTF8toUTF16(aInfo->friendly_name),
+                 NS_ConvertUTF8toUTF16(aInfo->group_id),
+                 NS_ConvertUTF8toUTF16(aInfo->vendor_name),
+                 aInfo->type,
+                 aInfo->state,
+                 aInfo->preferred,
+                 aInfo->format,
+                 aInfo->default_format,
+                 aInfo->max_channels,
+                 aInfo->default_rate,
+                 aInfo->max_rate,
+                 aInfo->min_rate,
+                 aInfo->latency_lo,
+                 aInfo->latency_hi)
+{
+}
+
+
+AudioDeviceInfo::AudioDeviceInfo(AudioDeviceID aID,
+                                 const nsAString& aName,
                                  const nsAString& aGroupId,
                                  const nsAString& aVendor,
                                  uint16_t aType,
                                  uint16_t aState,
                                  uint16_t aPreferred,
                                  uint16_t aSupportedFormat,
                                  uint16_t aDefaultFormat,
                                  uint32_t aMaxChannels,
                                  uint32_t aDefaultRate,
                                  uint32_t aMaxRate,
                                  uint32_t aMinRate,
                                  uint32_t aMaxLatency,
-				 uint32_t aMinLatency)
-  : mName(aName)
+                                 uint32_t aMinLatency)
+  : mDeviceId(aID)
+  , mName(aName)
   , mGroupId(aGroupId)
   , mVendor(aVendor)
   , mType(aType)
   , mState(aState)
   , mPreferred(aPreferred)
   , mSupportedFormat(aSupportedFormat)
   , mDefaultFormat(aDefaultFormat)
   , mMaxChannels(aMaxChannels)
@@ -49,16 +74,75 @@ AudioDeviceInfo::AudioDeviceInfo(const n
   MOZ_ASSERT(mSupportedFormat & (FMT_S16LE | FMT_S16BE | FMT_F32LE | FMT_F32BE),
              "Wrong supported format");
   MOZ_ASSERT(mDefaultFormat == FMT_S16LE ||
              mDefaultFormat == FMT_S16BE ||
              mDefaultFormat == FMT_F32LE ||
              mDefaultFormat == FMT_F32BE, "Wrong default format");
 }
 
+AudioDeviceInfo::AudioDeviceInfo(const nsAString& aName,
+                                 const nsAString& aGroupId,
+                                 const nsAString& aVendor,
+                                 uint16_t aType,
+                                 uint16_t aState,
+                                 uint16_t aPreferred,
+                                 uint16_t aSupportedFormat,
+                                 uint16_t aDefaultFormat,
+                                 uint32_t aMaxChannels,
+                                 uint32_t aDefaultRate,
+                                 uint32_t aMaxRate,
+                                 uint32_t aMinRate,
+                                 uint32_t aMaxLatency,
+                                 uint32_t aMinLatency)
+:AudioDeviceInfo(nullptr,
+                 aName,
+                 aGroupId,
+                 aVendor,
+                 aType,
+                 aState,
+                 aPreferred,
+                 aSupportedFormat,
+                 aDefaultFormat,
+                 aMaxChannels,
+                 aDefaultRate,
+                 aMaxRate,
+                 aMinRate,
+                 aMaxLatency,
+                 aMinLatency)
+{
+}
+
+Maybe<AudioDeviceID>
+AudioDeviceInfo::GetDeviceID()
+{
+  if (mDeviceId) {
+    return Some(mDeviceId);
+  } else {
+    return Nothing();
+  }
+}
+
+const nsString& AudioDeviceInfo::FriendlyName()
+{
+  return mName;
+}
+uint32_t AudioDeviceInfo::MaxChannels()
+{
+  return mMaxChannels;
+}
+uint32_t AudioDeviceInfo::Type()
+{
+  return mType;
+}
+uint32_t AudioDeviceInfo::State()
+{
+  return mState;
+}
+
 /* readonly attribute DOMString name; */
 NS_IMETHODIMP
 AudioDeviceInfo::GetName(nsAString& aName)
 {
   aName = mName;
   return NS_OK;
 }
 
--- a/dom/media/AudioDeviceInfo.h
+++ b/dom/media/AudioDeviceInfo.h
@@ -2,43 +2,73 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_AudioDeviceInfo_H
 #define MOZILLA_AudioDeviceInfo_H
 
 #include "nsIAudioDeviceInfo.h"
+#include "CubebUtils.h"
 #include "nsString.h"
+#include "mozilla/Maybe.h"
 
 // This is mapped to the cubeb_device_info.
 class AudioDeviceInfo final : public nsIAudioDeviceInfo
 {
 public:
   NS_DECL_ISUPPORTS
   NS_DECL_NSIAUDIODEVICEINFO
 
-  explicit AudioDeviceInfo(const nsAString& aName,
-                           const nsAString& aGroupId,
-                           const nsAString& aVendor,
-                           uint16_t aType,
-                           uint16_t aState,
-                           uint16_t aPreferred,
-                           uint16_t aSupportedFormat,
-                           uint16_t aDefaultFormat,
-                           uint32_t aMaxChannels,
-                           uint32_t aDefaultRate,
-                           uint32_t aMaxRate,
-                           uint32_t aMinRate,
-                           uint32_t aMaxLatency,
-                           uint32_t aMinLatency);
+  using AudioDeviceID = mozilla::CubebUtils::AudioDeviceID;
+
+
+  AudioDeviceInfo(const nsAString& aName,
+                  const nsAString& aGroupId,
+                  const nsAString& aVendor,
+                  uint16_t aType,
+                  uint16_t aState,
+                  uint16_t aPreferred,
+                  uint16_t aSupportedFormat,
+                  uint16_t aDefaultFormat,
+                  uint32_t aMaxChannels,
+                  uint32_t aDefaultRate,
+                  uint32_t aMaxRate,
+                  uint32_t aMinRate,
+                  uint32_t aMaxLatency,
+                  uint32_t aMinLatency);
 
+  AudioDeviceInfo(const AudioDeviceID aID,
+                  const nsAString& aName,
+                  const nsAString& aGroupId,
+                  const nsAString& aVendor,
+                  uint16_t aType,
+                  uint16_t aState,
+                  uint16_t aPreferred,
+                  uint16_t aSupportedFormat,
+                  uint16_t aDefaultFormat,
+                  uint32_t aMaxChannels,
+                  uint32_t aDefaultRate,
+                  uint32_t aMaxRate,
+                  uint32_t aMinRate,
+                  uint32_t aMaxLatency,
+                  uint32_t aMinLatency);
+
+  explicit AudioDeviceInfo(cubeb_device_info* aInfo);
+  // It is possible to not know the device identifier here. It depends on which
+  // ctor this instance has been constructed with.
+  mozilla::Maybe<AudioDeviceID> GetDeviceID();
+  const nsString& FriendlyName();
+  uint32_t MaxChannels();
+  uint32_t Type();
+  uint32_t State();
 private:
   virtual ~AudioDeviceInfo() = default;
 
+  AudioDeviceID mDeviceId;
   nsString mName;
   nsString mGroupId;
   nsString mVendor;
   uint16_t mType;
   uint16_t mState;
   uint16_t mPreferred;
   uint16_t mSupportedFormat;
   uint16_t mDefaultFormat;
--- a/dom/media/CubebUtils.cpp
+++ b/dom/media/CubebUtils.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "CubebUtils.h"
 
 #include "MediaInfo.h"
 #include "mozilla/AbstractThread.h"
 #include "mozilla/dom/ContentChild.h"
+#include "mozilla/dom/AudioDeviceInfo.h"
 #include "mozilla/ipc/FileDescriptor.h"
 #include "mozilla/Logging.h"
 #include "mozilla/Preferences.h"
 #include "mozilla/Services.h"
 #include "mozilla/Sprintf.h"
 #include "mozilla/StaticMutex.h"
 #include "mozilla/StaticPtr.h"
 #include "mozilla/Telemetry.h"
@@ -687,17 +688,18 @@ void GetDeviceCollection(nsTArray<RefPtr
     cubeb_device_collection collection = { nullptr, 0 };
     if (cubeb_enumerate_devices(context,
                                 aSide == Input ? CUBEB_DEVICE_TYPE_INPUT :
                                                  CUBEB_DEVICE_TYPE_OUTPUT,
                                 &collection) == CUBEB_OK) {
       for (unsigned int i = 0; i < collection.count; ++i) {
         auto device = collection.device[i];
         RefPtr<AudioDeviceInfo> info =
-          new AudioDeviceInfo(NS_ConvertUTF8toUTF16(device.friendly_name),
+          new AudioDeviceInfo(device.devid,
+                              NS_ConvertUTF8toUTF16(device.friendly_name),
                               NS_ConvertUTF8toUTF16(device.group_id),
                               NS_ConvertUTF8toUTF16(device.vendor_name),
                               ConvertCubebType(device.type),
                               ConvertCubebState(device.state),
                               ConvertCubebPreferred(device.preferred),
                               ConvertCubebFormat(device.format),
                               ConvertCubebFormat(device.default_format),
                               device.max_channels,
--- a/dom/media/CubebUtils.h
+++ b/dom/media/CubebUtils.h
@@ -3,19 +3,22 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #if !defined(CubebUtils_h_)
 #define CubebUtils_h_
 
 #include "cubeb/cubeb.h"
-#include "mozilla/dom/AudioDeviceInfo.h"
+#include "nsString.h"
+#include "mozilla/RefPtr.h"
 #include "mozilla/Maybe.h"
 
+class AudioDeviceInfo;
+
 namespace mozilla {
 namespace CubebUtils {
 
 typedef cubeb_devid AudioDeviceID;
 
 // Initialize Audio Library. Some Audio backends require initializing the
 // library before using it.
 void InitLibrary();
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -524,27 +524,25 @@ StreamAndPromiseForOperation::StreamAndP
                                           dom::AudioContextOperation aOperation)
   : mStream(aStream)
   , mPromise(aPromise)
   , mOperation(aOperation)
 {
   // MOZ_ASSERT(aPromise);
 }
 
-AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl)
+AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl, uint32_t aInputChannelCount)
   : GraphDriver(aGraphImpl)
   , mOutputChannels(0)
   , mSampleRate(0)
-  , mInputChannels(1)
+  , mInputChannelCount(aInputChannelCount)
   , mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS)
   , mStarted(false)
-  , mAudioInput(nullptr)
   , mAddedMixer(false)
   , mInCallback(false)
-  , mMicrophoneActive(false)
   , mShouldFallbackIfError(false)
   , mFromFallback(false)
 {
   LOG(LogLevel::Debug, ("AudioCallbackDriver ctor for graph %p", aGraphImpl));
 #if defined(XP_WIN)
   if (XRE_IsContentProcess()) {
     audio::AudioNotificationReceiver::Register(this);
   }
@@ -628,17 +626,17 @@ AudioCallbackDriver::Init()
 
   if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
     output.format = CUBEB_SAMPLE_S16NE;
   } else {
     output.format = CUBEB_SAMPLE_FLOAT32NE;
   }
 
   // Query and set the number of channels this AudioCallbackDriver will use.
-  mOutputChannels = mGraphImpl->AudioChannelCount();
+  mOutputChannels = GraphImpl()->AudioOutputChannelCount();
   if (!mOutputChannels) {
     LOG(LogLevel::Warning, ("Output number of channels is 0."));
     MonitorAutoLock lock(GraphImpl()->GetMonitor());
     FallbackToSystemClockDriver();
     return true;
   }
 
   mBuffer = AudioCallbackBufferWrapper<AudioDataValue>(mOutputChannels);
@@ -652,92 +650,78 @@ AudioCallbackDriver::Init()
 
   // Macbook and MacBook air don't have enough CPU to run very low latency
   // MediaStreamGraphs, cap the minimal latency to 512 frames int this case.
   if (IsMacbookOrMacbookAir()) {
     latency_frames = std::max((uint32_t) 512, latency_frames);
   }
 
   input = output;
-  input.channels = mInputChannels;
+  input.channels = mInputChannelCount;
   input.layout = CUBEB_LAYOUT_UNDEFINED;
 
-#ifdef MOZ_WEBRTC
-  if (mGraphImpl->mInputWanted) {
-    StaticMutexAutoLock lock(AudioInputCubeb::Mutex());
-    uint32_t userChannels = 0;
-    AudioInputCubeb::GetUserChannelCount(mGraphImpl->mInputDeviceID, userChannels);
-    input.channels = mInputChannels = std::min<uint32_t>(8, userChannels);
-  }
-#endif
-
   cubeb_stream* stream = nullptr;
-  CubebUtils::AudioDeviceID input_id = nullptr, output_id = nullptr;
-  // We have to translate the deviceID values to cubeb devid's since those can be
-  // freed whenever enumerate is called.
-  {
-#ifdef MOZ_WEBRTC
-    StaticMutexAutoLock lock(AudioInputCubeb::Mutex());
-#endif
-    if ((!mGraphImpl->mInputWanted
-#ifdef MOZ_WEBRTC
-         || AudioInputCubeb::GetDeviceID(mGraphImpl->mInputDeviceID, input_id)
-#endif
-         ) &&
-        (mGraphImpl->mOutputDeviceID == -1 // pass nullptr for ID for default output
-#ifdef MOZ_WEBRTC
-         // XXX we should figure out how we would use a deviceID for output without webrtc.
-         // Currently we don't set this though, so it's ok
-         || AudioInputCubeb::GetDeviceID(mGraphImpl->mOutputDeviceID, output_id)
+  bool inputWanted = !!mInputChannelCount;
+  CubebUtils::AudioDeviceID output_id;
+  if (forcedOutputDeviceId) {
+    output_id = forcedOutputDeviceId;
+  } else {
+    output_id = GraphImpl()->mOutputDeviceID;
+  }
+  CubebUtils::AudioDeviceID input_id = GraphImpl()->mInputDeviceID;
+
+  // XXX Only pass input input if we have an input listener.  Always
+  // set up output because it's easier, and it will just get silence.
+  if (cubeb_stream_init(cubebContext,
+                        &stream,
+                        "AudioCallbackDriver",
+                        input_id,
+                        inputWanted ? &input : nullptr,
+                        output_id,
+                        &output,
+                        latency_frames,
+                        DataCallback_s,
+                        StateCallback_s,
+                        this) == CUBEB_OK) {
+    mAudioStream.own(stream);
+    DebugOnly<int> rv =
+      cubeb_stream_set_volume(mAudioStream, CubebUtils::GetVolumeScale());
+    NS_WARNING_ASSERTION(
+      rv == CUBEB_OK,
+      "Could not set the audio stream volume in GraphDriver.cpp");
+    CubebUtils::ReportCubebBackendUsed();
+  } else {
+    NS_WARNING("Could not create a cubeb stream for MediaStreamGraph, falling "
+               "back to a SystemClockDriver");
+    // Only report failures when we're not coming from a driver that was
+    // created itself as a fallback driver because of a previous audio driver
+    // failure.
+    if (!mFromFallback) {
+      CubebUtils::ReportCubebStreamInitFailure(firstStream);
+    }
+    MonitorAutoLock lock(GraphImpl()->GetMonitor());
+    FallbackToSystemClockDriver();
+    return true;
+   }
+
+
+#ifdef XP_MACOSX
+   PanOutputIfNeeded(!!mInputChannelCount);
 #endif
-         ) &&
-        // XXX Only pass input input if we have an input listener.  Always
-        // set up output because it's easier, and it will just get silence.
-        // XXX Add support for adding/removing an input listener later.
-        cubeb_stream_init(cubebContext, &stream,
-                          "AudioCallbackDriver",
-                          input_id,
-                          mGraphImpl->mInputWanted ? &input : nullptr,
-                          output_id,
-                          mGraphImpl->mOutputWanted ? &output : nullptr, latency_frames,
-                          DataCallback_s, StateCallback_s, this) == CUBEB_OK) {
-      mAudioStream.own(stream);
-      DebugOnly<int> rv = cubeb_stream_set_volume(mAudioStream, CubebUtils::GetVolumeScale());
-      NS_WARNING_ASSERTION(
-        rv == CUBEB_OK,
-        "Could not set the audio stream volume in GraphDriver.cpp");
-      CubebUtils::ReportCubebBackendUsed();
-    } else {
-#ifdef MOZ_WEBRTC
-      StaticMutexAutoUnlock unlock(AudioInputCubeb::Mutex());
-#endif
-      NS_WARNING("Could not create a cubeb stream for MediaStreamGraph, falling back to a SystemClockDriver");
-      // Only report failures when we're not coming from a driver that was
-      // created itself as a fallback driver because of a previous audio driver
-      // failure.
-      if (!mFromFallback) {
-        CubebUtils::ReportCubebStreamInitFailure(firstStream);
-      }
-      MonitorAutoLock lock(GraphImpl()->GetMonitor());
-      FallbackToSystemClockDriver();
-      return true;
-    }
-  }
-  SetMicrophoneActive(mGraphImpl->mInputWanted);
+
+cubeb_stream_register_device_changed_callback(
+ mAudioStream, AudioCallbackDriver::DeviceChangedCallback_s);
 
-  cubeb_stream_register_device_changed_callback(mAudioStream,
-                                                AudioCallbackDriver::DeviceChangedCallback_s);
+   if (!StartStream()) {
+     LOG(LogLevel::Warning, ("%p: AudioCallbackDriver couldn't start a cubeb stream.", GraphImpl()));
+     return false;
+   }
 
-  if (!StartStream()) {
-    LOG(LogLevel::Warning, ("AudioCallbackDriver couldn't start stream."));
-    return false;
-  }
-
-  LOG(LogLevel::Debug, ("AudioCallbackDriver started."));
-  return true;
+   LOG(LogLevel::Debug, ("%p: AudioCallbackDriver started.", GraphImpl()));
+   return true;
 }
 
 void
 AudioCallbackDriver::Start()
 {
   if (mPreviousDriver) {
     if (mPreviousDriver->AsAudioCallbackDriver()) {
       LOG(LogLevel::Debug, ("Releasing audio driver off main thread."));
@@ -974,22 +958,19 @@ AudioCallbackDriver::DataCallback(const 
 
   if (stateComputedTime < mIterationEnd) {
     LOG(LogLevel::Error, ("Media graph global underrun detected"));
     MOZ_ASSERT_UNREACHABLE("We should not underrun in full duplex");
     mIterationEnd = stateComputedTime;
   }
 
   // Process mic data if any/needed
-  if (aInputBuffer) {
-    if (mAudioInput) { // for this specific input-only or full-duplex stream
-      mAudioInput->NotifyInputData(mGraphImpl, aInputBuffer,
-                                   static_cast<size_t>(aFrames),
-                                   mSampleRate, mInputChannels);
-    }
+  if (aInputBuffer && mInputChannelCount) {
+    GraphImpl()->NotifyInputData(aInputBuffer, static_cast<size_t>(aFrames),
+                                mSampleRate, mInputChannelCount);
   }
 
   bool stillProcessing;
   if (mBuffer.Available()) {
     // We totally filled the buffer (and mScratchBuffer isn't empty).
     // We don't need to run an iteration and if we do so we may overflow.
     stillProcessing = mGraphImpl->OneIteration(nextStateComputedTime);
   } else {
@@ -1119,32 +1100,20 @@ void AudioCallbackDriver::PanOutputIfNee
   }
 #endif
 }
 
 void
 AudioCallbackDriver::DeviceChangedCallback() {
   // Tell the audio engine the device has changed, it might want to reset some
   // state.
-  MonitorAutoLock mon(mGraphImpl->GetMonitor());
-  if (mAudioInput) {
-    mAudioInput->DeviceChanged();
-  }
+  MonitorAutoLock mon(GraphImpl()->GetMonitor());
+  GraphImpl()->DeviceChanged();
 #ifdef XP_MACOSX
-  PanOutputIfNeeded(mMicrophoneActive);
-#endif
-}
-
-void
-AudioCallbackDriver::SetMicrophoneActive(bool aActive)
-{
-  mMicrophoneActive = aActive;
-
-#ifdef XP_MACOSX
-  PanOutputIfNeeded(mMicrophoneActive);
+  PanOutputIfNeeded(!!mInputChannelCount);
 #endif
 }
 
 uint32_t
 AudioCallbackDriver::IterationDuration()
 {
   // The real fix would be to have an API in cubeb to give us the number. Short
   // of that, we approximate it here. bug 1019507
--- a/dom/media/GraphDriver.h
+++ b/dom/media/GraphDriver.h
@@ -375,17 +375,18 @@ enum AsyncCubebOperation {
  */
 class AudioCallbackDriver : public GraphDriver,
                             public MixerCallbackReceiver
 #if defined(XP_WIN)
                             , public audio::DeviceChangeListener
 #endif
 {
 public:
-  explicit AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl);
+  /** If aInputChannelCount is zero, then this driver is output-only. */
+  explicit AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl, uint32_t aInputChannelCount);
   virtual ~AudioCallbackDriver();
 
   void Start() override;
   void Revive() override;
   void RemoveCallback() override;
   void WaitForNextIteration() override;
   void WakeUp() override;
   void Shutdown() override;
@@ -394,61 +395,56 @@ public:
 #endif
 
   /* Static wrapper function cubeb calls back. */
   static long DataCallback_s(cubeb_stream * aStream,
                              void * aUser,
                              const void * aInputBuffer,
                              void * aOutputBuffer,
                              long aFrames);
-  static void StateCallback_s(cubeb_stream* aStream, void * aUser,
-                              cubeb_state aState);
+  static void StateCallback_s(cubeb_stream* aStream, void * aUser, cubeb_state aState);
   static void DeviceChangedCallback_s(void * aUser);
   /* This function is called by the underlying audio backend when a refill is
    * needed. This is what drives the whole graph when it is used to output
    * audio. If the return value is exactly aFrames, this function will get
    * called again. If it is less than aFrames, the stream will go in draining
    * mode, and this function will not be called again. */
-  long DataCallback(const AudioDataValue* aInputBuffer, AudioDataValue* aOutputBuffer, long aFrames);
+  long DataCallback(const AudioDataValue* aInputBuffer,
+                    AudioDataValue* aOutputBuffer,
+                    long aFrames);
   /* This function is called by the underlying audio backend, but is only used
    * for informational purposes at the moment. */
   void StateCallback(cubeb_state aState);
   /* This is an approximation of the number of millisecond there are between two
    * iterations of the graph. */
   uint32_t IterationDuration() override;
 
   /* This function gets called when the graph has produced the audio frames for
    * this iteration. */
   void MixerCallback(AudioDataValue* aMixedBuffer,
                      AudioSampleFormat aFormat,
                      uint32_t aChannels,
                      uint32_t aFrames,
                      uint32_t aSampleRate) override;
 
-  // These are invoked on the MSG thread (we don't call this if not LIFECYCLE_RUNNING)
-  virtual void SetInputListener(AudioDataListener *aListener) {
-    MOZ_ASSERT(OnThread());
-    mAudioInput = aListener;
-  }
-  // XXX do we need the param?  probably no
-  virtual void RemoveInputListener(AudioDataListener *aListener) {
-    MOZ_ASSERT(OnThread());
-    mAudioInput = nullptr;
-  }
-
   AudioCallbackDriver* AsAudioCallbackDriver() override {
     return this;
   }
 
   uint32_t OutputChannelCount()
   {
     MOZ_ASSERT(mOutputChannels != 0 && mOutputChannels <= 8);
     return mOutputChannels;
   }
 
+  uint32_t InputChannelCount()
+  {
+    return mInputChannelCount;
+  }
+
   /* Enqueue a promise that is going to be resolved when a specific operation
    * occurs on the cubeb stream. */
   void EnqueueStreamAndPromiseForOperation(MediaStream* aStream,
                                          void* aPromise,
                                          dom::AudioContextOperation aOperation);
 
   /**
    * Whether the audio callback is processing. This is for asserting only.
@@ -456,20 +452,16 @@ public:
   bool InCallback();
 
   bool OnThread() override { return !mStarted || InCallback(); }
 
   /* Whether the underlying cubeb stream has been started. See comment for
    * mStarted for details. */
   bool IsStarted();
 
-  /* Tell the driver whether this process is using a microphone or not. This is
-   * thread safe. */
-  void SetMicrophoneActive(bool aActive);
-
   void CompleteAudioContextOperations(AsyncCubebOperation aOperation);
 
   /* Fetch, or create a shared thread pool with up to one thread for
    * AsyncCubebTask. */
   SharedThreadPool* GetInitShutdownThread();
 
 private:
   /**
@@ -505,17 +497,17 @@ private:
   /* cubeb stream for this graph. This is guaranteed to be non-null after Init()
    * has been called, and is synchronized internaly. */
   nsAutoRef<cubeb_stream> mAudioStream;
   /* The sample rate for the aforementionned cubeb stream. This is set on
    * initialization and can be read safely afterwards. */
   uint32_t mSampleRate;
   /* The number of input channels from cubeb.  Should be set before opening cubeb
    * and then be static. */
-  uint32_t mInputChannels;
+  uint32_t mInputChannelCount;
   /* Approximation of the time between two callbacks. This is used to schedule
    * video frames. This is in milliseconds. Only even used (after
    * inizatialization) on the audio callback thread. */
   uint32_t mIterationDurationMS;
   /* cubeb_stream_init calls the audio callback to prefill the buffers. The
    * previous driver has to be kept alive until the audio stream has been
    * started, because it is responsible to call cubeb_stream_start, so we delay
    * the cleanup of the previous driver until it has started the audio stream.
@@ -525,18 +517,16 @@ private:
    * This is written on the previous driver's thread (if switching) or main
    * thread (if this driver is the first one).
    * This is read on previous driver's thread (during callbacks from
    * cubeb_stream_init) and the audio thread (when switching away from this
    * driver back to a SystemClockDriver).
    * This is synchronized by the Graph's monitor.
    * */
   bool mStarted;
-  /* Listener for mic input, if any. */
-  RefPtr<AudioDataListener> mAudioInput;
 
   struct AutoInCallback
   {
     explicit AutoInCallback(AudioCallbackDriver* aDriver);
     ~AutoInCallback();
     AudioCallbackDriver* mDriver;
   };
 
@@ -546,20 +536,16 @@ private:
   /* This must be accessed with the graph monitor held. */
   AutoTArray<StreamAndPromiseForOperation, 1> mPromisesForOperation;
   /* Used to queue us to add the mixer callback on first run. */
   bool mAddedMixer;
 
   /* This is atomic and is set by the audio callback thread. It can be read by
    * any thread safely. */
   Atomic<bool> mInCallback;
-  /**
-   * True if microphone is being used by this process. This is synchronized by
-   * the graph's monitor. */
-  Atomic<bool> mMicrophoneActive;
   /* Indication of whether a fallback SystemClockDriver should be started if
    * StateCallback() receives an error.  No mutex need be held during access.
    * The transition to true happens before cubeb_stream_start() is called.
    * After transitioning to false on the last DataCallback(), the stream is
    * not accessed from another thread until the graph thread either signals
    * main thread cleanup or dispatches an event to switch to another
    * driver. */
   bool mShouldFallbackIfError;
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -399,17 +399,17 @@ MediaStreamGraphImpl::UpdateStreamOrder(
     switching = CurrentDriver()->Switching();
   }
 
   if (audioTrackPresent && mRealtime &&
       !CurrentDriver()->AsAudioCallbackDriver() &&
       !switching) {
     MonitorAutoLock mon(mMonitor);
     if (LifecycleStateRef() == LIFECYCLE_RUNNING) {
-      AudioCallbackDriver* driver = new AudioCallbackDriver(this);
+      AudioCallbackDriver* driver = new AudioCallbackDriver(this, AudioInputChannelCount());
       CurrentDriver()->SwitchAtNextIteration(driver);
     }
   }
 
   if (!mStreamOrderDirty) {
     return;
   }
 
@@ -654,17 +654,17 @@ MediaStreamGraphImpl::CreateOrDestroyAud
         MonitorAutoLock lock(mMonitor);
         switching = CurrentDriver()->Switching();
       }
 
       if (!CurrentDriver()->AsAudioCallbackDriver() &&
           !switching) {
         MonitorAutoLock mon(mMonitor);
         if (LifecycleStateRef() == LIFECYCLE_RUNNING) {
-          AudioCallbackDriver* driver = new AudioCallbackDriver(this);
+          AudioCallbackDriver* driver = new AudioCallbackDriver(this, AudioInputChannelCount());
           CurrentDriver()->SwitchAtNextIteration(driver);
         }
       }
     }
   }
 
   for (int32_t i = audioOutputStreamsFound.Length() - 1; i >= 0; --i) {
     if (!audioOutputStreamsFound[i]) {
@@ -784,173 +784,220 @@ MediaStreamGraphImpl::PlayAudio(MediaStr
                                      mMixer,
                                      AudioChannelCount(),
                                      mSampleRate);
   }
   return ticksWritten;
 }
 
 void
-MediaStreamGraphImpl::OpenAudioInputImpl(int aID,
+MediaStreamGraphImpl::OpenAudioInputImpl(CubebUtils::AudioDeviceID aID,
                                          AudioDataListener *aListener)
 {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
-  // Bug 1238038 Need support for multiple mics at once
-  if (mInputDeviceUsers.Count() > 0 &&
-      !mInputDeviceUsers.Get(aListener, nullptr)) {
-    NS_ASSERTION(false, "Input from multiple mics not yet supported; bug 1238038");
-    // Need to support separate input-only AudioCallback drivers; they'll
-    // call us back on "other" threads.  We will need to echo-cancel them, though.
+  // Only allow one device per MSG (hence, per document), but allow opening a
+  // device multiple times
+  nsTArray<RefPtr<AudioDataListener>>& listeners = mInputDeviceUsers.GetOrInsert(aID);
+  // We don't support opening multiple input device in a graph for now.
+  if (listeners.IsEmpty() && mInputDeviceUsers.Count() > 1) {
+    listeners.RemoveElement(aID);
     return;
   }
-  mInputWanted = true;
-
-  // Add to count of users for this ID.
-  // XXX Since we can't rely on IDs staying valid (ugh), use the listener as
-  // a stand-in for the ID.  Fix as part of support for multiple-captures
-  // (Bug 1238038)
-  uint32_t count = 0;
-  mInputDeviceUsers.Get(aListener, &count); // ok if this fails
-  count++;
-  mInputDeviceUsers.Put(aListener, count); // creates a new entry in the hash if needed
-
-  if (count == 1) { // first open for this listener
-    // aID is a cubeb_devid, and we assume that opaque ptr is valid until
-    // we close cubeb.
+
+  MOZ_ASSERT(!listeners.Contains(aListener), "Don't add a listener twice.");
+
+  listeners.AppendElement(aListener);
+
+  if (listeners.Length() == 1) { // first open for this device
     mInputDeviceID = aID;
-    mAudioInputs.AppendElement(aListener); // always monitor speaker data
-
     // Switch Drivers since we're adding input (to input-only or full-duplex)
     MonitorAutoLock mon(mMonitor);
     if (LifecycleStateRef() == LIFECYCLE_RUNNING) {
-      AudioCallbackDriver* driver = new AudioCallbackDriver(this);
-      driver->SetMicrophoneActive(true);
+      AudioCallbackDriver* driver = new AudioCallbackDriver(this, AudioInputChannelCount());
       LOG(
         LogLevel::Debug,
         ("OpenAudioInput: starting new AudioCallbackDriver(input) %p", driver));
       LOG(
         LogLevel::Debug,
         ("OpenAudioInput: starting new AudioCallbackDriver(input) %p", driver));
       driver->SetInputListener(aListener);
       CurrentDriver()->SwitchAtNextIteration(driver);
    } else {
      LOG(LogLevel::Error, ("OpenAudioInput in shutdown!"));
-     LOG(LogLevel::Debug, ("OpenAudioInput in shutdown!"));
-     NS_ASSERTION(false, "Can't open cubeb inputs in shutdown");
+     MOZ_ASSERT(false, "Can't open cubeb inputs in shutdown");
     }
   }
 }
 
 nsresult
-MediaStreamGraphImpl::OpenAudioInput(int aID,
+MediaStreamGraphImpl::OpenAudioInput(CubebUtils::AudioDeviceID aID,
                                      AudioDataListener *aListener)
 {
   // So, so, so annoying.  Can't AppendMessage except on Mainthread
   if (!NS_IsMainThread()) {
     RefPtr<nsIRunnable> runnable =
       WrapRunnable(this,
                    &MediaStreamGraphImpl::OpenAudioInput,
                    aID,
                    RefPtr<AudioDataListener>(aListener));
     mAbstractMainThread->Dispatch(runnable.forget());
     return NS_OK;
   }
   class Message : public ControlMessage {
   public:
-    Message(MediaStreamGraphImpl *aGraph, int aID,
+    Message(MediaStreamGraphImpl *aGraph, CubebUtils::AudioDeviceID aID,
             AudioDataListener *aListener) :
       ControlMessage(nullptr), mGraph(aGraph), mID(aID), mListener(aListener) {}
     void Run() override
     {
       mGraph->OpenAudioInputImpl(mID, mListener);
     }
     MediaStreamGraphImpl *mGraph;
-    int mID;
+    CubebUtils::AudioDeviceID mID;
     RefPtr<AudioDataListener> mListener;
   };
   // XXX Check not destroyed!
   this->AppendMessage(MakeUnique<Message>(this, aID, aListener));
   return NS_OK;
 }
 
 void
-MediaStreamGraphImpl::CloseAudioInputImpl(AudioDataListener *aListener)
+MediaStreamGraphImpl::CloseAudioInputImpl(CubebUtils::AudioDeviceID aID, AudioDataListener* aListener)
 {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
-  uint32_t count;
-  DebugOnly<bool> result = mInputDeviceUsers.Get(aListener, &count);
-  MOZ_ASSERT(result);
-  if (--count > 0) {
-    mInputDeviceUsers.Put(aListener, count);
-    return; // still in use
+  // It is possible to not know the ID here, find it first.
+  if (aID == nullptr) {
+    for (auto iter = mInputDeviceUsers.Iter(); !iter.Done(); iter.Next()) {
+      if (iter.Data().Contains(aListener)) {
+        aID = iter.Key();
+      }
+    }
+    MOZ_ASSERT(aID != nullptr, "Closing an audio input that was not opened.");
   }
-  mInputDeviceUsers.Remove(aListener);
-  mInputDeviceID = -1;
-  mInputWanted = false;
-  AudioCallbackDriver *driver = CurrentDriver()->AsAudioCallbackDriver();
-  if (driver) {
-    driver->RemoveInputListener(aListener);
+
+  nsTArray<RefPtr<AudioDataListener>>* listeners = mInputDeviceUsers.GetValue(aID);
+
+  MOZ_ASSERT(listeners);
+  DebugOnly<bool> wasPresent = listeners->RemoveElement(aListener);
+  MOZ_ASSERT(wasPresent);
+  // Check whether there is still a consumer for this audio input device
+  if (!listeners->IsEmpty()) {
+    return;
   }
-  mAudioInputs.RemoveElement(aListener);
+
+  mInputDeviceID = nullptr; // reset to default
+  mInputDeviceUsers.Remove(aID);
 
   // Switch Drivers since we're adding or removing an input (to nothing/system or output only)
   bool audioTrackPresent = AudioTrackPresent();
 
   MonitorAutoLock mon(mMonitor);
   if (LifecycleStateRef() == LIFECYCLE_RUNNING) {
     GraphDriver* driver;
     if (audioTrackPresent) {
       // We still have audio output
-      LOG(LogLevel::Debug, ("CloseInput: output present (AudioCallback)"));
-
-      driver = new AudioCallbackDriver(this);
+      LOG(LogLevel::Debug, ("%p: CloseInput: output present (AudioCallback)", this));
+
+      driver = new AudioCallbackDriver(this, AudioInputChannelCount());
       CurrentDriver()->SwitchAtNextIteration(driver);
     } else if (CurrentDriver()->AsAudioCallbackDriver()) {
       LOG(LogLevel::Debug,
           ("CloseInput: no output present (SystemClockCallback)"));
 
       driver = new SystemClockDriver(this);
       CurrentDriver()->SwitchAtNextIteration(driver);
     } // else SystemClockDriver->SystemClockDriver, no switch
   }
 }
 
 void
-MediaStreamGraphImpl::CloseAudioInput(AudioDataListener *aListener)
+MediaStreamGraphImpl::CloseAudioInput(CubebUtils::AudioDeviceID aID, AudioDataListener* aListener)
 {
   // So, so, so annoying.  Can't AppendMessage except on Mainthread
   if (!NS_IsMainThread()) {
     RefPtr<nsIRunnable> runnable =
       WrapRunnable(this,
                    &MediaStreamGraphImpl::CloseAudioInput,
+                   aID,
                    RefPtr<AudioDataListener>(aListener));
     mAbstractMainThread->Dispatch(runnable.forget());
     return;
   }
   class Message : public ControlMessage {
   public:
-    Message(MediaStreamGraphImpl *aGraph, AudioDataListener *aListener) :
-      ControlMessage(nullptr), mGraph(aGraph), mListener(aListener) {}
+    Message(MediaStreamGraphImpl *aGraph, CubebUtils::AudioDeviceID aID, AudioDataListener *aListener) :
+      ControlMessage(nullptr), mGraph(aGraph), mID(aID), mListener(aListener) {}
     void Run() override
     {
-      mGraph->CloseAudioInputImpl(mListener);
+      mGraph->CloseAudioInputImpl(mID, mListener);
     }
     MediaStreamGraphImpl *mGraph;
+    CubebUtils::AudioDeviceID mID;
     RefPtr<AudioDataListener> mListener;
   };
-  this->AppendMessage(MakeUnique<Message>(this, aListener));
+  this->AppendMessage(MakeUnique<Message>(this, aID, aListener));
 }
 
 // All AudioInput listeners get the same speaker data (at least for now).
 void
-MediaStreamGraph::NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
-                                   TrackRate aRate, uint32_t aChannels)
+MediaStreamGraphImpl::NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
+                                       TrackRate aRate, uint32_t aChannels)
+{
+  if (!mInputDeviceID) {
+    return;
+  }
+  // When/if we decide to support multiple input device per graph, this needs
+  // loop over them.
+  nsTArray<RefPtr<AudioDataListener>>* listeners = mInputDeviceUsers.GetValue(mInputDeviceID);
+  MOZ_ASSERT(listeners);
+  for (auto& listener : *listeners) {
+    listener->NotifyOutputData(this, aBuffer, aFrames, aRate, aChannels);
+  }
+}
+
+void
+MediaStreamGraphImpl::NotifyInputData(const AudioDataValue* aBuffer, size_t aFrames,
+                                      TrackRate aRate, uint32_t aChannels)
 {
-  for (auto& listener : mAudioInputs) {
-    listener->NotifyOutputData(this, aBuffer, aFrames, aRate, aChannels);
+#ifdef DEBUG
+  MonitorAutoLock lock(mMonitor);
+  // Either we have an audio input device, or we just removed the audio input
+  // this iteration, and we're switching back to an output-only driver next
+  // iteration.
+  MOZ_ASSERT(mInputDeviceID || CurrentDriver()->Switching());
+#endif
+  nsTArray<RefPtr<AudioDataListener>>* listeners = mInputDeviceUsers.GetValue(mInputDeviceID);
+  MOZ_ASSERT(listeners);
+  for (auto& listener : *listeners) {
+    listener->NotifyInputData(this, aBuffer, aFrames, aRate, aChannels);
+  }
+}
+
+void MediaStreamGraphImpl::DeviceChanged()
+{
+  if (!mInputDeviceID) {
+    return;
+  }
+  nsTArray<RefPtr<AudioDataListener>>* listeners = mInputDeviceUsers.GetValue(mInputDeviceID);
+  for (auto& listener : *listeners) {
+    listener->DeviceChanged();
+  }
+}
+
+void MediaStreamGraphImpl::ReevaluateInputDevice()
+{
+  MOZ_ASSERT(OnGraphThread());
+  AudioCallbackDriver* audioCallbackDriver = CurrentDriver()->AsAudioCallbackDriver();
+  MOZ_ASSERT(audioCallbackDriver);
+  if (audioCallbackDriver->InputChannelCount() != AudioInputChannelCount()) {
+    AudioCallbackDriver* newDriver = new AudioCallbackDriver(this, AudioInputChannelCount());
+    {
+      MonitorAutoLock lock(mMonitor);
+      CurrentDriver()->SwitchAtNextIteration(newDriver);
+    }
   }
 }
 
 bool
 MediaStreamGraph::OnGraphThreadOrNotRunning() const
 {
   // either we're on the right thread (and calling CurrentDriver() is safe),
   // or we're going to fail the assert anyway, so don't cross-check
@@ -2696,40 +2743,42 @@ SourceMediaStream::SourceMediaStream()
   , mUpdateKnownTracksTime(0)
   , mPullEnabled(false)
   , mFinishPending(false)
   , mNeedsMixing(false)
 {
 }
 
 nsresult
-SourceMediaStream::OpenAudioInput(int aID,
+SourceMediaStream::OpenAudioInput(CubebUtils::AudioDeviceID aID,
                                   AudioDataListener *aListener)
 {
   if (GraphImpl()) {
     mInputListener = aListener;
     return GraphImpl()->OpenAudioInput(aID, aListener);
   }
   return NS_ERROR_FAILURE;
 }
 
 void
-SourceMediaStream::CloseAudioInput()
+SourceMediaStream::CloseAudioInput(CubebUtils::AudioDeviceID aID,
+                                   AudioDataListener* aListener)
 {
+  MOZ_ASSERT(mInputListener == aListener);
   // Destroy() may have run already and cleared this
   if (GraphImpl() && mInputListener) {
-    GraphImpl()->CloseAudioInput(mInputListener);
+    GraphImpl()->CloseAudioInput(aID, aListener);
   }
   mInputListener = nullptr;
 }
 
 void
 SourceMediaStream::DestroyImpl()
 {
-  CloseAudioInput();
+  CloseAudioInput(nullptr, mInputListener);
 
   GraphImpl()->AssertOnGraphThreadOrNotRunning();
   for (int32_t i = mConsumers.Length() - 1; i >= 0; --i) {
     // Disconnect before we come under mMutex's lock since it can call back
     // through RemoveDirectTrackListenerImpl() and deadlock.
     mConsumers[i]->Disconnect();
   }
 
@@ -3306,32 +3355,16 @@ SourceMediaStream::HasPendingAudioTrack(
       audioTrackPresent = true;
       break;
     }
   }
 
   return audioTrackPresent;
 }
 
-bool
-SourceMediaStream::OpenNewAudioCallbackDriver(AudioDataListener * aListener)
-{
-  AudioCallbackDriver* nextDriver = new AudioCallbackDriver(GraphImpl());
-  nextDriver->SetInputListener(aListener);
-  {
-    MonitorAutoLock lock(GraphImpl()->GetMonitor());
-    MOZ_ASSERT(GraphImpl()->LifecycleStateRef() ==
-               MediaStreamGraphImpl::LifecycleState::LIFECYCLE_RUNNING);
-    GraphImpl()->CurrentDriver()->SwitchAtNextIteration(nextDriver);
-  }
-
-  return true;
-}
-
-
 void
 MediaInputPort::Init()
 {
   LOG(LogLevel::Debug,
       ("Adding MediaInputPort %p (from %p to %p) to the graph",
        this,
        mSource,
        mDest));
@@ -3577,20 +3610,18 @@ ProcessedMediaStream::DestroyImpl()
   // SetStreamOrderDirty(), for other reasons.
 }
 
 MediaStreamGraphImpl::MediaStreamGraphImpl(GraphDriverType aDriverRequested,
                                            TrackRate aSampleRate,
                                            AbstractThread* aMainThread)
   : MediaStreamGraph(aSampleRate)
   , mPortCount(0)
-  , mInputWanted(false)
-  , mInputDeviceID(-1)
-  , mOutputWanted(true)
-  , mOutputDeviceID(-1)
+  , mInputDeviceID(nullptr)
+  , mOutputDeviceID(nullptr)
   , mNeedAnotherIteration(false)
   , mGraphDriverAsleep(false)
   , mMonitor("MediaStreamGraphImpl")
   , mLifecycleState(LIFECYCLE_THREAD_NOT_STARTED)
   , mEndTime(GRAPH_TIME_MAX)
   , mForceShutDown(false)
   , mPostedRunInStableStateEvent(false)
   , mDetectedNotRunning(false)
@@ -3604,17 +3635,18 @@ MediaStreamGraphImpl::MediaStreamGraphIm
   , mSelfRef(this)
   , mOutputChannels(std::min<uint32_t>(8, CubebUtils::MaxNumberOfChannels()))
 #ifdef DEBUG
   , mCanRunMessagesSynchronously(false)
 #endif
 {
   if (mRealtime) {
     if (aDriverRequested == AUDIO_THREAD_DRIVER) {
-      AudioCallbackDriver* driver = new AudioCallbackDriver(this);
+      // Always start with zero input channels.
+      AudioCallbackDriver* driver = new AudioCallbackDriver(this, 0);
       mDriver = driver;
     } else {
       mDriver = new SystemClockDriver(this);
     }
   } else {
     mDriver = new OfflineClockDriver(this, MEDIA_GRAPH_TARGET_PERIOD_MS);
   }
 
@@ -3668,19 +3700,18 @@ MediaStreamGraph::GetInstanceIfExists(ns
 
 MediaStreamGraph*
 MediaStreamGraph::GetInstance(MediaStreamGraph::GraphDriverType aGraphDriverRequested,
                               nsPIDOMWindowInner* aWindow,
                               TrackRate aSampleRate)
 {
   MOZ_ASSERT(NS_IsMainThread(), "Main thread only");
 
-  TrackRate sampleRate = aSampleRate ? aSampleRate : CubebUtils::PreferredSampleRate();
   MediaStreamGraphImpl* graph =
-    static_cast<MediaStreamGraphImpl*>(GetInstanceIfExists(aWindow, sampleRate));
+    static_cast<MediaStreamGraphImpl*>(GetInstanceIfExists(aWindow, aSampleRate));
 
   if (!graph) {
     if (!gMediaStreamGraphShutdownBlocker) {
 
       class Blocker : public media::ShutdownBlocker
       {
       public:
         Blocker()
@@ -3716,20 +3747,20 @@ MediaStreamGraph::GetInstance(MediaStrea
     AbstractThread* mainThread;
     if (aWindow) {
       mainThread = aWindow->AsGlobal()->AbstractMainThreadFor(TaskCategory::Other);
     } else {
       // Uncommon case, only for some old configuration of webspeech.
       mainThread = AbstractThread::MainThread();
     }
     graph = new MediaStreamGraphImpl(aGraphDriverRequested,
-                                     sampleRate,
+                                     CubebUtils::PreferredSampleRate(),
                                      mainThread);
 
-    uint32_t hashkey = WindowToHash(aWindow, sampleRate);
+    uint32_t hashkey = WindowToHash(aWindow, aSampleRate);
     gGraphs.Put(hashkey, graph);
 
     LOG(LogLevel::Debug,
         ("Starting up MediaStreamGraph %p for window %p for sample rate %d", graph, aWindow, sampleRate));
   }
 
   return graph;
 }
@@ -4127,17 +4158,17 @@ MediaStreamGraphImpl::ApplyAudioContextO
   // anyways, but doing this now save some time.
   if (aOperation == AudioContextOperation::Resume) {
     if (!CurrentDriver()->AsAudioCallbackDriver()) {
       AudioCallbackDriver* driver;
       if (switching) {
         MOZ_ASSERT(nextDriver->AsAudioCallbackDriver());
         driver = nextDriver->AsAudioCallbackDriver();
       } else {
-        driver = new AudioCallbackDriver(this);
+        driver = new AudioCallbackDriver(this, AudioInputChannelCount());
         MonitorAutoLock lock(mMonitor);
         CurrentDriver()->SwitchAtNextIteration(driver);
       }
       driver->EnqueueStreamAndPromiseForOperation(aDestinationStream,
           aPromise, aOperation);
     } else {
       // We are resuming a context, but we are already using an
       // AudioCallbackDriver, we can resolve the promise now.
@@ -4251,18 +4282,18 @@ MediaStreamGraph::StartNonRealtimeProces
 
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   NS_ASSERTION(!graph->mRealtime, "non-realtime only");
 
   if (graph->mNonRealtimeProcessing)
     return;
 
   graph->mEndTime =
-    graph->RoundUpToEndOfAudioBlock(graph->mStateComputedTime +
-                                    aTicksToProcess);
+    graph->RoundUpToNextAudioBlock(graph->mStateComputedTime +
+                                   aTicksToProcess - 1);
   graph->mNonRealtimeProcessing = true;
   graph->EnsureRunInStableState();
 }
 
 void
 ProcessedMediaStream::AddInput(MediaInputPort* aPort)
 {
   MediaStream* s = aPort->GetSource();
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -106,16 +106,21 @@ public:
    * Input data from a microphone (or other audio source.  This is not
    * guaranteed to be in any particular size chunks.
    */
   virtual void NotifyInputData(MediaStreamGraph* aGraph,
                                const AudioDataValue* aBuffer, size_t aFrames,
                                TrackRate aRate, uint32_t aChannels) = 0;
 
   /**
+   * Number of audio input channels.
+   */
+  virtual uint32_t InputChannelCount() = 0;
+
+  /**
    * Called when the underlying audio device has changed.
    */
   virtual void DeviceChanged() = 0;
 };
 
 class AudioDataListener : public AudioDataListenerInterface {
 protected:
   // Protected destructor, to discourage deletion outside of Release():
@@ -692,20 +697,21 @@ public:
    * it is still possible for a NotifyPull to occur.
    */
   void SetPullEnabled(bool aEnabled);
 
   // Users of audio inputs go through the stream so it can track when the
   // last stream referencing an input goes away, so it can close the cubeb
   // input.  Also note: callable on any thread (though it bounces through
   // MainThread to set the command if needed).
-  nsresult OpenAudioInput(int aID,
+  nsresult OpenAudioInput(CubebUtils::AudioDeviceID aID,
                           AudioDataListener *aListener);
   // Note: also implied when Destroy() happens
-  void CloseAudioInput();
+  void CloseAudioInput(CubebUtils::AudioDeviceID aID,
+                       AudioDataListener* aListener);
 
   // MediaStreamGraph thread only
   void DestroyImpl() override;
 
   // Call these on any thread.
   /**
    * Call all MediaStreamListeners to request new data via the NotifyPull API
    * (if enabled).
@@ -825,18 +831,16 @@ public:
   bool HasPendingAudioTrack();
 
   TimeStamp GetStreamTracksStrartTimeStamp()
   {
     MutexAutoLock lock(mMutex);
     return mStreamTracksStartTimeStamp;
   }
 
-  bool OpenNewAudioCallbackDriver(AudioDataListener *aListener);
-
   // XXX need a Reset API
 
   friend class MediaStreamGraphImpl;
 
 protected:
   enum TrackCommands : uint32_t;
 
   virtual ~SourceMediaStream();
@@ -1311,23 +1315,20 @@ public:
 
   // Return the correct main thread for this graph. This always returns
   // something that is valid. Thread safe.
   AbstractThread* AbstractMainThread();
 
   // Idempotent
   static void DestroyNonRealtimeInstance(MediaStreamGraph* aGraph);
 
-  virtual nsresult OpenAudioInput(int aID,
-                                  AudioDataListener *aListener)
-  {
-    return NS_ERROR_FAILURE;
-  }
-  virtual void CloseAudioInput(AudioDataListener *aListener) {}
-
+  virtual nsresult OpenAudioInput(CubebUtils::AudioDeviceID aID,
+                                  AudioDataListener *aListener) = 0;
+  virtual void CloseAudioInput(CubebUtils::AudioDeviceID aID,
+                               AudioDataListener *aListener) = 0;
   // Control API.
   /**
    * Create a stream that a media decoder (or some other source of
    * media data, such as a camera) can write to.
    */
   SourceMediaStream* CreateSourceStream();
   /**
    * Create a stream that will form the union of the tracks of its input
@@ -1395,23 +1396,16 @@ public:
   TrackRate GraphRate() const { return mSampleRate; }
 
   void RegisterCaptureStreamForWindow(uint64_t aWindowId,
                                       ProcessedMediaStream* aCaptureStream);
   void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
   already_AddRefed<MediaInputPort> ConnectToCaptureStream(
     uint64_t aWindowId, MediaStream* aMediaStream);
 
-  /**
-   * Data going to the speakers from the GraphDriver's DataCallback
-   * to notify any listeners (for echo cancellation).
-   */
-  void NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
-                        TrackRate aRate, uint32_t aChannels);
-
   void AssertOnGraphThreadOrNotRunning() const
   {
     MOZ_ASSERT(OnGraphThreadOrNotRunning());
   }
 
 protected:
   explicit MediaStreamGraph(TrackRate aSampleRate)
     : mSampleRate(aSampleRate)
@@ -1432,18 +1426,13 @@ protected:
   nsTArray<nsCOMPtr<nsIRunnable> > mPendingUpdateRunnables;
 
   /**
    * Sample rate at which this graph runs. For real time graphs, this is
    * the rate of the audio mixer. For offline graphs, this is the rate specified
    * at construction.
    */
   TrackRate mSampleRate;
-
-  /**
-   * CloseAudioInput is async, so hold a reference here.
-   */
-  nsTArray<RefPtr<AudioDataListener>> mAudioInputs;
 };
 
 } // namespace mozilla
 
 #endif /* MOZILLA_MEDIASTREAMGRAPH_H_ */
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -12,17 +12,17 @@
 #include "GraphDriver.h"
 #include "Latency.h"
 #include "mozilla/Atomics.h"
 #include "mozilla/Monitor.h"
 #include "mozilla/Services.h"
 #include "mozilla/TimeStamp.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/WeakPtr.h"
-#include "nsDataHashtable.h"
+#include "nsClassHashtable.h"
 #include "nsIMemoryReporter.h"
 #include "nsINamed.h"
 #include "nsIRunnable.h"
 #include "nsIThread.h"
 #include "nsITimer.h"
 
 namespace mozilla {
 
@@ -377,22 +377,38 @@ public:
    * to the audio output stream. Returns the number of frames played.
    */
   StreamTime PlayAudio(MediaStream* aStream);
   /**
    * No more data will be forthcoming for aStream. The stream will end
    * at the current buffer end point. The StreamTracks's tracks must be
    * explicitly set to finished by the caller.
    */
-  void OpenAudioInputImpl(int aID,
+  void OpenAudioInputImpl(CubebUtils::AudioDeviceID aID,
                           AudioDataListener *aListener);
-  virtual nsresult OpenAudioInput(int aID,
+  virtual nsresult OpenAudioInput(CubebUtils::AudioDeviceID aID,
                                   AudioDataListener *aListener) override;
-  void CloseAudioInputImpl(AudioDataListener *aListener);
-  virtual void CloseAudioInput(AudioDataListener *aListener) override;
+  void CloseAudioInputImpl(CubebUtils::AudioDeviceID aID, AudioDataListener *aListener);
+  virtual void CloseAudioInput(CubebUtils::AudioDeviceID aID, AudioDataListener *aListener) override;
+
+  /*
+   * Called on the graph thread when the input device settings should be
+   * reevaluated, for example, if the channel count of the input stream should
+   * be changed . */
+  void ReevaluateInputDevice();
+
+  /* Called on the graph thread when there is new output data for listeners.
+   * This is the mixed audio output of this MediaStreamGraph. */
+  void NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
+                        TrackRate aRate, uint32_t aChannels);
+  /* Called on the graph thread when there is new input data for listeners. This
+   * is the raw audio input of this MediaStreamGraph. */
+  void NotifyInputData(const AudioDataValue* aBuffer, size_t aFrames,
+                       TrackRate aRate, uint32_t aChannels);
+  void DeviceChanged();
 
   /**
    * Compute how much stream data we would like to buffer for aStream.
    */
   StreamTime GetDesiredBufferEnd(MediaStream* aStream);
   /**
    * Returns true when there are no active streams.
    */
@@ -421,21 +437,47 @@ public:
    * Mark the media stream order as dirty.
    */
   void SetStreamOrderDirty()
   {
     MOZ_ASSERT(OnGraphThreadOrNotRunning());
     mStreamOrderDirty = true;
   }
 
-  uint32_t AudioChannelCount() const
+  uint32_t AudioOutputChannelCount() const
   {
     return mOutputChannels;
   }
 
+  uint32_t AudioInputChannelCount()
+  {
+    MOZ_ASSERT(OnGraphThreadOrNotRunning());
+
+    if (!mInputDeviceID) {
+      MOZ_ASSERT(mInputDeviceUsers.Count() == 0);
+      return 0;
+    }
+    uint32_t maxInputChannels = 0;
+    // When/if we decide to support multiple input device per graph, this needs
+    // loop over them.
+    nsTArray<RefPtr<AudioDataListener>>* listeners =
+      mInputDeviceUsers.GetValue(mInputDeviceID);
+    MOZ_ASSERT(listeners);
+    for (auto& listener : *listeners) {
+      maxInputChannels =
+        std::max(maxInputChannels, listener->InputChannelCount());
+    }
+    return maxInputChannels;
+  }
+
+  CubebUtils::AudioDeviceID InputDeviceID()
+  {
+    return mInputDeviceID;
+  }
+
   double MediaTimeToSeconds(GraphTime aTime) const
   {
     NS_ASSERTION(aTime > -STREAM_TIME_MAX && aTime <= STREAM_TIME_MAX,
                  "Bad time");
     return static_cast<double>(aTime)/GraphRate();
   }
 
   GraphTime SecondsToMediaTime(double aS) const
@@ -619,26 +661,30 @@ public:
    */
   TimeStamp mLastMainThreadUpdate;
   /**
    * Number of active MediaInputPorts
    */
   int32_t mPortCount;
 
   /**
-   * Devices to use for cubeb input & output, or NULL for no input (void*),
-   * and boolean to control if we want input/output
+   * Devices to use for cubeb input & output, or nullptr for default device.
+   * A MediaStreamGraph always has an output (even if silent).
+   * If `mInputDeviceUsers.Count() != 0`, this MediaStreamGraph wants audio
+   * input.
+   *
+   * In any case, the number of channels to use can be queried (on the graph
+   * thread) by AudioInputChannelCount() and AudioOutputChannelCount().
    */
-  bool mInputWanted;
-  int mInputDeviceID;
-  bool mOutputWanted;
-  int mOutputDeviceID;
+  CubebUtils::AudioDeviceID mInputDeviceID;
+  CubebUtils::AudioDeviceID mOutputDeviceID;
   // Maps AudioDataListeners to a usecount of streams using the listener
   // so we can know when it's no longer in use.
-  nsDataHashtable<nsPtrHashKey<AudioDataListener>, uint32_t> mInputDeviceUsers;
+  nsDataHashtable<nsVoidPtrHashKey,
+                   nsTArray<RefPtr<AudioDataListener>>> mInputDeviceUsers;
 
   // True if the graph needs another iteration after the current iteration.
   Atomic<bool> mNeedAnotherIteration;
   // GraphDriver may need a WakeUp() if something changes
   Atomic<bool> mGraphDriverAsleep;
 
   // mMonitor guards the data below.
   // MediaStreamGraph normally does its work without holding mMonitor, so it is
--- a/dom/media/webrtc/MediaEngineWebRTC.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTC.cpp
@@ -25,106 +25,100 @@
 #endif
 
 static mozilla::LazyLogModule sGetUserMediaLog("GetUserMedia");
 #undef LOG
 #define LOG(args) MOZ_LOG(sGetUserMediaLog, mozilla::LogLevel::Debug, args)
 
 namespace mozilla {
 
-// statics from AudioInputCubeb
-nsTArray<int>* AudioInputCubeb::mDeviceIndexes;
-int AudioInputCubeb::mDefaultDevice = -1;
-nsTArray<nsCString>* AudioInputCubeb::mDeviceNames;
-cubeb_device_collection AudioInputCubeb::mDevices = { nullptr, 0 };
-bool AudioInputCubeb::mAnyInUse = false;
-StaticMutex AudioInputCubeb::sMutex;
-uint32_t AudioInputCubeb::sUserChannelCount = 0;
-
-// AudioDeviceID is an annoying opaque value that's really a string
-// pointer, and is freed when the cubeb_device_collection is destroyed
-
-void AudioInputCubeb::UpdateDeviceList()
-{
-  // We keep all the device names, but wipe the mappings and rebuild them.
-  // Do this first so that if cubeb has failed we've unmapped our devices
-  // before we early return. Otherwise we'd keep the old list.
-  for (auto& device_index : (*mDeviceIndexes)) {
-    device_index = -1; // unmapped
-  }
-
-  cubeb* cubebContext = CubebUtils::GetCubebContext();
-  if (!cubebContext) {
-    return;
-  }
-
-  cubeb_device_collection devices = { nullptr, 0 };
-
-  if (CUBEB_OK != cubeb_enumerate_devices(cubebContext,
-                                          CUBEB_DEVICE_TYPE_INPUT,
-                                          &devices)) {
-    return;
-  }
-
-  // Calculate translation from existing mDevices to new devices. Note we
-  // never end up with less devices than before, since people have
-  // stashed indexes.
-  // For some reason the "fake" device for automation is marked as DISABLED,
-  // so white-list it.
-  mDefaultDevice = -1;
-  for (uint32_t i = 0; i < devices.count; i++) {
-    LOG(("Cubeb device %u: type 0x%x, state 0x%x, name %s, id %p",
-         i, devices.device[i].type, devices.device[i].state,
-         devices.device[i].friendly_name, devices.device[i].device_id));
-    if (devices.device[i].type == CUBEB_DEVICE_TYPE_INPUT && // paranoia
-        devices.device[i].state == CUBEB_DEVICE_STATE_ENABLED )
-    {
-      auto j = mDeviceNames->IndexOf(devices.device[i].device_id);
-      if (j != nsTArray<nsCString>::NoIndex) {
-        // match! update the mapping
-        (*mDeviceIndexes)[j] = i;
-      } else {
-        // new device, add to the array
-        mDeviceIndexes->AppendElement(i);
-        mDeviceNames->AppendElement(devices.device[i].device_id);
-        j = mDeviceIndexes->Length()-1;
-      }
-      if (devices.device[i].preferred & CUBEB_DEVICE_PREF_VOICE) {
-        // There can be only one... we hope
-        NS_ASSERTION(mDefaultDevice == -1, "multiple default cubeb input devices!");
-        mDefaultDevice = j;
-      }
-    }
-  }
-  LOG(("Cubeb default input device %d", mDefaultDevice));
-  StaticMutexAutoLock lock(sMutex);
-  // swap state
-  cubeb_device_collection_destroy(cubebContext, &mDevices);
-  mDevices = devices;
-}
+using namespace CubebUtils;
 
 MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
-  : mMutex("MediaEngineWebRTC::mMutex"),
-    mAudioInput(nullptr),
-    mFullDuplex(aPrefs.mFullDuplex),
-    mDelayAgnostic(aPrefs.mDelayAgnostic),
-    mExtendedFilter(aPrefs.mExtendedFilter),
-    mHasTabVideoSource(false)
+  : mMutex("mozilla::MediaEngineWebRTC")
+  , mEnumerator()
+  , mHasTabVideoSource(false)
 {
   nsCOMPtr<nsIComponentRegistrar> compMgr;
   NS_GetComponentRegistrar(getter_AddRefs(compMgr));
   if (compMgr) {
     compMgr->IsContractIDRegistered(NS_TABSOURCESERVICE_CONTRACTID, &mHasTabVideoSource);
   }
 
   camera::GetChildAndCall(
     &camera::CamerasChild::AddDeviceChangeCallback,
     this);
 }
 
+CubebDeviceEnumerator::CubebDeviceEnumerator()
+  : mMutex("CubebDeviceListMutex")
+{
+  cubeb_register_device_collection_changed(GetCubebContext(),
+                                           CUBEB_DEVICE_TYPE_INPUT,
+                                           &mozilla::CubebDeviceEnumerator::AudioDeviceListChanged_s,
+                                           this);
+}
+
+CubebDeviceEnumerator::~CubebDeviceEnumerator()
+{
+  cubeb_register_device_collection_changed(GetCubebContext(),
+                                           CUBEB_DEVICE_TYPE_INPUT,
+                                           nullptr,
+                                           this);
+}
+
+void
+CubebDeviceEnumerator::EnumerateAudioInputDevice(nsTArray<RefPtr<AudioDeviceInfo>>& aDevices)
+{
+  cubeb* context = GetCubebContext();
+
+  if (!context) {
+    return;
+  }
+  // We keep all the device names, but wipe the mappings and rebuild them
+
+  MutexAutoLock lock(mMutex);
+
+  if (mDevices.IsEmpty()) {
+    CubebUtils::GetDeviceCollection(mDevices, CubebUtils::Input);
+  }
+
+  aDevices.AppendElements(mDevices);
+}
+
+already_AddRefed<AudioDeviceInfo>
+CubebDeviceEnumerator::DeviceInfoFromID(CubebUtils::AudioDeviceID aID)
+{
+  MutexAutoLock lock(mMutex);
+
+  for (uint32_t i  = 0; i < mDevices.Length(); i++) {
+    if (mDevices[i]->GetDeviceID().isSome() &&
+        mDevices[i]->GetDeviceID().ref() == aID) {
+      RefPtr<AudioDeviceInfo> other = mDevices[i];
+      return other.forget();
+    }
+  }
+  return nullptr;
+}
+
+void
+CubebDeviceEnumerator::AudioDeviceListChanged_s(cubeb* aContext, void* aUser)
+{
+  CubebDeviceEnumerator* self = reinterpret_cast<CubebDeviceEnumerator*>(aUser);
+  self->AudioDeviceListChanged();
+}
+
+void
+CubebDeviceEnumerator::AudioDeviceListChanged()
+{
+  MutexAutoLock lock(mMutex);
+
+  mDevices.Clear();
+}
+
 void
 MediaEngineWebRTC::SetFakeDeviceChangeEvents()
 {
   camera::GetChildAndCall(
     &camera::CamerasChild::SetFakeDeviceChangeEvents);
 }
 
 void
@@ -254,90 +248,71 @@ MediaEngineWebRTC::EnumerateDevices(uint
         aSources->AppendElement(vSource);
       }
     }
 
     if (mHasTabVideoSource || dom::MediaSourceEnum::Browser == aMediaSource) {
       aSources->AppendElement(new MediaEngineTabVideoSource());
     }
   } else {
-    // We spawn threads to handle gUM runnables, so we must protect the member vars
+    // We spawn threads to handle gUM runnables, so we must protect the member
+    // vars
     MutexAutoLock lock(mMutex);
 
     if (aMediaSource == dom::MediaSourceEnum::AudioCapture) {
       RefPtr<MediaEngineWebRTCAudioCaptureSource> audioCaptureSource =
         new MediaEngineWebRTCAudioCaptureSource(nullptr);
       aSources->AppendElement(audioCaptureSource);
       return;
     }
 
-    if (!mAudioInput) {
-      if (!SupportsDuplex()) {
-        return;
-      }
-      mAudioInput = new mozilla::AudioInputCubeb();
+    if (!mEnumerator) {
+      mEnumerator = new CubebDeviceEnumerator();
+    }
+
+    nsTArray<RefPtr<AudioDeviceInfo>> devices;
+    mEnumerator->EnumerateAudioInputDevice(devices);
+
+    // Handle enumeration error
+    if (devices.IsEmpty()) {
+      return;
     }
 
-    int nDevices = 0;
-    mAudioInput->GetNumOfRecordingDevices(nDevices);
-    int i;
-#if defined(MOZ_WIDGET_ANDROID)
-    i = 0; // Bug 1037025 - let the OS handle defaulting for now on android/b2g
-#else
-    // -1 is "default communications device" depending on OS in webrtc.org code
-    i = -1;
-#endif
-    for (; i < nDevices; i++) {
-      // We use constants here because GetRecordingDeviceName takes char[128].
-      char deviceName[128];
-      char uniqueId[128];
-      // paranoia; jingle doesn't bother with this
-      deviceName[0] = '\0';
-      uniqueId[0] = '\0';
-
-      int error = mAudioInput->GetRecordingDeviceName(i, deviceName, uniqueId);
-      if (error) {
-        LOG((" AudioInput::GetRecordingDeviceName: Failed %d", error));
-        continue;
-      }
+    // For some reason the "fake" device for automation is marked as DISABLED,
+    // so white-list it.
+    for (uint32_t i = 0; i < devices.Length(); i++) {
+      MOZ_ASSERT(devices[i]->GetDeviceID().isSome());
+      LOG(("Cubeb device %u: type 0x%x, state 0x%x, name %s, id %p",
+           i,
+           devices[i]->Type(),
+           devices[i]->State(),
+           NS_ConvertUTF16toUTF8(devices[i]->FriendlyName()).get(),
+           devices[i]->GetDeviceID().ref()));
 
-      if (uniqueId[0] == '\0') {
-        // Mac and Linux don't set uniqueId!
-        strcpy(uniqueId, deviceName); // safe given assert and initialization/error-check
-      }
-
-
-      RefPtr<MediaEngineSource> aSource;
-      NS_ConvertUTF8toUTF16 uuid(uniqueId);
-
-      nsRefPtrHashtable<nsStringHashKey, MediaEngineSource>*
-        devicesForThisWindow = mAudioSources.LookupOrAdd(aWindowId);
-
-      if (devicesForThisWindow->Get(uuid, getter_AddRefs(aSource)) &&
-          aSource->RequiresSharing()) {
-        // We've already seen this device, just append.
-        aSources->AppendElement(aSource.get());
-      } else {
-        aSource = new MediaEngineWebRTCMicrophoneSource(
-            new mozilla::AudioInputCubeb(i),
-            i, deviceName, uniqueId,
-            mDelayAgnostic, mExtendedFilter);
-        devicesForThisWindow->Put(uuid, aSource);
-        aSources->AppendElement(aSource);
+      if ((devices[i]->State() == CUBEB_DEVICE_STATE_ENABLED ||
+           (devices[i]->State() == CUBEB_DEVICE_STATE_DISABLED &&
+            devices[i]->FriendlyName().Equals(NS_LITERAL_STRING("Sine source at 440 Hz"))))) {
+        MOZ_ASSERT(devices[i]->Type() == CUBEB_DEVICE_TYPE_INPUT);
+        // XXX do something for the default device.
+        RefPtr<MediaEngineSource> source =
+          new MediaEngineWebRTCMicrophoneSource(
+            mEnumerator,
+            devices[i]->GetDeviceID().ref(),
+            devices[i]->FriendlyName(),
+            // Lie and provide the name as UUID
+            NS_LossyConvertUTF16toASCII(devices[i]->FriendlyName()),
+            devices[i]->MaxChannels(),
+            mDelayAgnostic,
+            mExtendedFilter);
+        aSources->AppendElement(source);
       }
     }
   }
 }
 
-bool
-MediaEngineWebRTC::SupportsDuplex()
-{
-  return mFullDuplex;
-}
-
 void
 MediaEngineWebRTC::ReleaseResourcesForWindow(uint64_t aWindowId)
 {
   {
     nsRefPtrHashtable<nsStringHashKey, MediaEngineSource>*
       audioDevicesForThisWindow = mAudioSources.Get(aWindowId);
 
     if (audioDevicesForThisWindow) {
@@ -392,13 +367,14 @@ MediaEngineWebRTC::Shutdown()
   }
 
   LOG(("%s", __FUNCTION__));
   // Shutdown all the sources, since we may have dangling references to the
   // sources in nsDOMUserMediaStreams waiting for GC/CC
   ShutdownSources(mVideoSources);
   ShutdownSources(mAudioSources);
 
+  mEnumerator = nullptr;
+
   mozilla::camera::Shutdown();
-  AudioInputCubeb::CleanupGlobalData();
 }
 
 }
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -2,16 +2,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MEDIAENGINEWEBRTC_H_
 #define MEDIAENGINEWEBRTC_H_
 
 #include "AudioPacketizer.h"
 #include "AudioSegment.h"
+#include "AudioDeviceInfo.h"
 #include "CamerasChild.h"
 #include "cubeb/cubeb.h"
 #include "CubebUtils.h"
 #include "DOMMediaStream.h"
 #include "ipc/IPCMessageUtils.h"
 #include "MediaEngine.h"
 #include "MediaEnginePrefs.h"
 #include "MediaEngineSource.h"
@@ -116,239 +117,51 @@ public:
   uint32_t GetBestFitnessDistance(
     const nsTArray<const NormalizedConstraintSet*>& aConstraintSets,
     const nsString& aDeviceId) const override;
 
 protected:
   virtual ~MediaEngineWebRTCAudioCaptureSource() = default;
 };
 
-// Small subset of VoEHardware
-class AudioInput
+struct CubebDeviceCollectionDestroyFunctor
 {
-public:
-  AudioInput() = default;
-  // Threadsafe because it's referenced from an MicrophoneSource, which can
-  // had references to it on other threads.
-  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioInput)
-
-  virtual int GetNumOfRecordingDevices(int& aDevices) = 0;
-  virtual int GetRecordingDeviceName(int aIndex, char (&aStrNameUTF8)[128],
-                                     char aStrGuidUTF8[128]) = 0;
-  virtual int GetRecordingDeviceStatus(bool& aIsAvailable) = 0;
-  virtual void GetChannelCount(uint32_t& aChannels) = 0;
-  virtual int GetMaxAvailableChannels(uint32_t& aChannels) = 0;
-  virtual void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener) = 0;
-  virtual void StopRecording(SourceMediaStream *aStream) = 0;
-  virtual int SetRecordingDevice(int aIndex) = 0;
-  virtual void SetUserChannelCount(uint32_t aChannels) = 0;
-
-protected:
-  // Protected destructor, to discourage deletion outside of Release():
-  virtual ~AudioInput() = default;
+  void operator()(cubeb_device_collection* aCollection)
+  {
+    cubeb_device_collection_destroy(CubebUtils::GetCubebContext(), aCollection);
+  }
 };
 
-class AudioInputCubeb final : public AudioInput
+// This class implement a cache for accessing the audio device list. It can be
+// access on any thread.
+class CubebDeviceEnumerator final
 {
+NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CubebDeviceEnumerator)
 public:
-  explicit AudioInputCubeb(int aIndex = 0) :
-    AudioInput(), mSelectedDevice(aIndex), mInUseCount(0)
-  {
-    if (!mDeviceIndexes) {
-      mDeviceIndexes = new nsTArray<int>;
-      mDeviceNames = new nsTArray<nsCString>;
-      mDefaultDevice = -1;
-    }
-  }
-
-  static void CleanupGlobalData()
-  {
-    if (mDevices.device) {
-      cubeb_device_collection_destroy(CubebUtils::GetCubebContext(), &mDevices);
-    }
-    delete mDeviceIndexes;
-    mDeviceIndexes = nullptr;
-    delete mDeviceNames;
-    mDeviceNames = nullptr;
-  }
-
-  int GetNumOfRecordingDevices(int& aDevices)
-  {
-#ifdef MOZ_WIDGET_ANDROID
-    // OpenSL ES does not support enumerate device.
-    aDevices = 1;
-#else
-    UpdateDeviceList();
-    aDevices = mDeviceIndexes->Length();
-#endif
-    return 0;
-  }
-
-  static int32_t DeviceIndex(int aIndex)
-  {
-    // -1 = system default if any
-    if (aIndex == -1) {
-      if (mDefaultDevice == -1) {
-        aIndex = 0;
-      } else {
-        aIndex = mDefaultDevice;
-      }
-    }
-    MOZ_ASSERT(mDeviceIndexes);
-    if (aIndex < 0 || aIndex >= (int) mDeviceIndexes->Length()) {
-      return -1;
-    }
-    // Note: if the device is gone, this will be -1
-    return (*mDeviceIndexes)[aIndex]; // translate to mDevices index
-  }
-
-  static StaticMutex& Mutex()
-  {
-    return sMutex;
-  }
-
-  static bool GetDeviceID(int aDeviceIndex, CubebUtils::AudioDeviceID &aID)
-  {
-    // Assert sMutex is held
-    sMutex.AssertCurrentThreadOwns();
-#ifdef MOZ_WIDGET_ANDROID
-    aID = nullptr;
-    return true;
-#else
-    int dev_index = DeviceIndex(aDeviceIndex);
-    if (dev_index != -1) {
-      aID = mDevices.device[dev_index].devid;
-      return true;
-    }
-    return false;
-#endif
-  }
-
-  int GetRecordingDeviceName(int aIndex, char (&aStrNameUTF8)[128],
-                             char aStrGuidUTF8[128])
-  {
-#ifdef MOZ_WIDGET_ANDROID
-    aStrNameUTF8[0] = '\0';
-    aStrGuidUTF8[0] = '\0';
-#else
-    int32_t devindex = DeviceIndex(aIndex);
-    if (mDevices.count == 0 || devindex < 0) {
-      return 1;
-    }
-    SprintfLiteral(aStrNameUTF8, "%s%s", aIndex == -1 ? "default: " : "",
-                   mDevices.device[devindex].friendly_name);
-    aStrGuidUTF8[0] = '\0';
-#endif
-    return 0;
-  }
-
-  int GetRecordingDeviceStatus(bool& aIsAvailable)
-  {
-    // With cubeb, we only expose devices of type CUBEB_DEVICE_TYPE_INPUT,
-    // so unless it was removed, say it's available
-    aIsAvailable = true;
-    return 0;
-  }
-
-  void GetChannelCount(uint32_t& aChannels)
-  {
-    GetUserChannelCount(mSelectedDevice, aChannels);
-  }
-
-  static void GetUserChannelCount(int aDeviceIndex, uint32_t& aChannels)
-  {
-    aChannels = sUserChannelCount;
-  }
-
-  int GetMaxAvailableChannels(uint32_t& aChannels)
-  {
-    return GetDeviceMaxChannels(mSelectedDevice, aChannels);
-  }
-
-  static int GetDeviceMaxChannels(int aDeviceIndex, uint32_t& aChannels)
-  {
-#ifdef MOZ_WIDGET_ANDROID
-    aChannels = 1;
-#else
-    int32_t devindex = DeviceIndex(aDeviceIndex);
-    if (mDevices.count == 0 || devindex < 0) {
-      return 1;
-    }
-    aChannels = mDevices.device[devindex].max_channels;
-#endif
-    return 0;
-  }
-
-  void SetUserChannelCount(uint32_t aChannels)
-  {
-    if (GetDeviceMaxChannels(mSelectedDevice, sUserChannelCount)) {
-      sUserChannelCount = 1; // error capture mono
-      return;
-    }
-
-    if (aChannels && aChannels < sUserChannelCount) {
-      sUserChannelCount = aChannels;
-    }
-  }
-
-  void StartRecording(SourceMediaStream *aStream, AudioDataListener *aListener)
-  {
-#ifdef MOZ_WIDGET_ANDROID
-    // OpenSL ES does not support enumerating devices.
-    MOZ_ASSERT(mDevices.count == 0);
-#else
-    MOZ_ASSERT(mDevices.count > 0);
-#endif
-
-    mAnyInUse = true;
-    mInUseCount++;
-    // Always tell the stream we're using it for input
-    aStream->OpenAudioInput(mSelectedDevice, aListener);
-  }
-
-  void StopRecording(SourceMediaStream *aStream)
-  {
-    aStream->CloseAudioInput();
-    if (--mInUseCount == 0) {
-      mAnyInUse = false;
-    }
-  }
-
-  int SetRecordingDevice(int aIndex)
-  {
-    mSelectedDevice = aIndex;
-    return 0;
-  }
+  CubebDeviceEnumerator();
+  void EnumerateAudioInputDevice(nsTArray<RefPtr<AudioDeviceInfo>>& aDevices);
+  // From a cubeb device id, maybe return the info for this device, if it's
+  // still a valid id.
+  already_AddRefed<AudioDeviceInfo>
+  DeviceInfoFromID(CubebUtils::AudioDeviceID aID);
 
 protected:
-  ~AudioInputCubeb() {
-    MOZ_RELEASE_ASSERT(mInUseCount == 0);
-  }
+  ~CubebDeviceEnumerator();
+
+  // Static function called by cubeb when the audio input device list changes
+  // (i.e. when a new device is made available, or non-available). This
+  // re-binds to this MediaEngineWebRTC, and simply calls
+  // `AudioDeviceListChanged` below.
+  static void AudioDeviceListChanged_s(cubeb* aContext, void* aUser);
+  // With the mutex taken, invalidates the cached audio input device list.
+  void AudioDeviceListChanged();
 
 private:
-  // It would be better to watch for device-change notifications
-  void UpdateDeviceList();
-
-  // We have an array, which consists of indexes to the current mDevices
-  // list.  This is updated on mDevices updates.  Many devices in mDevices
-  // won't be included in the array (wrong type, etc), or if a device is
-  // removed it will map to -1 (and opens of this device will need to check
-  // for this - and be careful of threading access.  The mappings need to
-  // updated on each re-enumeration.
-  int mSelectedDevice;
-  uint32_t mInUseCount;
-
-  // pointers to avoid static constructors
-  static nsTArray<int>* mDeviceIndexes;
-  static int mDefaultDevice; // -1 == not set
-  static nsTArray<nsCString>* mDeviceNames;
-  static cubeb_device_collection mDevices;
-  static bool mAnyInUse;
-  static StaticMutex sMutex;
-  static uint32_t sUserChannelCount;
+  Mutex mMutex;
+  nsTArray<RefPtr<AudioDeviceInfo>> mDevices;
 };
 
 class WebRTCAudioDataListener : public AudioDataListener
 {
 protected:
   // Protected destructor, to discourage deletion outside of Release():
   virtual ~WebRTCAudioDataListener() {}
 
@@ -366,33 +179,36 @@ public:
                         uint32_t aChannels) override;
 
   void NotifyInputData(MediaStreamGraph* aGraph,
                        const AudioDataValue* aBuffer,
                        size_t aFrames,
                        TrackRate aRate,
                        uint32_t aChannels) override;
 
+  uint32_t InputChannelCount() override;
+
   void DeviceChanged() override;
 
   void Shutdown();
 
 private:
   Mutex mMutex;
   RefPtr<MediaEngineWebRTCMicrophoneSource> mAudioSource;
 };
 
 class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource,
                                           public AudioDataListenerInterface
 {
 public:
-  MediaEngineWebRTCMicrophoneSource(mozilla::AudioInput* aAudioInput,
-                                    int aIndex,
-                                    const char* name,
-                                    const char* uuid,
+  MediaEngineWebRTCMicrophoneSource(mozilla::CubebDeviceEnumerator* aEnumerator,
+                                    CubebUtils::AudioDeviceID aID,
+                                    const nsString& name,
+                                    const nsCString& uuid,
+                                    uint32_t maxChannelCount,
                                     bool aDelayAgnostic,
                                     bool aExtendedFilter);
 
   bool RequiresSharing() const override
   {
     return true;
   }
 
@@ -413,38 +229,43 @@ public:
   nsresult Start(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Stop(const RefPtr<const AllocationHandle>& aHandle) override;
   nsresult Reconfigure(const RefPtr<AllocationHandle>& aHandle,
                        const dom::MediaTrackConstraints& aConstraints,
                        const MediaEnginePrefs& aPrefs,
                        const nsString& aDeviceId,
                        const char** aOutBadConstraint) override;
 
+  void Pull(const RefPtr<const AllocationHandle>& aHandle,
+            const RefPtr<SourceMediaStream>& aStream,
+            TrackID aTrackID,
+            StreamTime aDesiredTime,
+            const PrincipalHandle& aPrincipalHandle) override;
+
   /**
    * Assigns the current settings of the capture to aOutSettings.
    * Main thread only.
    */
   void GetSettings(dom::MediaTrackSettings& aOutSettings) const override;
 
-  void Pull(const RefPtr<const AllocationHandle>& aHandle,
-            const RefPtr<SourceMediaStream>& aStream,
-            TrackID aTrackID,
-            StreamTime aDesiredTime,
-            const PrincipalHandle& aPrincipalHandle) override;
-
   // AudioDataListenerInterface methods
   void NotifyOutputData(MediaStreamGraph* aGraph,
                         AudioDataValue* aBuffer, size_t aFrames,
                         TrackRate aRate, uint32_t aChannels) override;
   void NotifyInputData(MediaStreamGraph* aGraph,
                        const AudioDataValue* aBuffer, size_t aFrames,
                        TrackRate aRate, uint32_t aChannels) override;
 
   void DeviceChanged() override;
 
+  uint32_t InputChannelCount() override
+  {
+    return GetUserInputChannelCount();
+  }
+
   dom::MediaSourceEnum GetMediaSource() const override
   {
     return dom::MediaSourceEnum::Microphone;
   }
 
   nsresult TakePhoto(MediaEnginePhotoCallback* aCallback) override
   {
     return NS_ERROR_NOT_IMPLEMENTED;
@@ -544,32 +365,34 @@ private:
                      uint32_t aChannels);
 
   void PacketizeAndProcess(MediaStreamGraph* aGraph,
                            const AudioDataValue* aBuffer,
                            size_t aFrames,
                            TrackRate aRate,
                            uint32_t aChannels);
 
+  // Those four getters and setters are only to be called on the MSG thread.
 
   // This is true when all processing is disabled, we can skip
   // packetization, resampling and other processing passes.
   // Graph thread only.
   bool PassThrough() const;
 
   // Graph thread only.
   void SetPassThrough(bool aPassThrough);
+  uint32_t GetUserInputChannelCount();
+  void SetUserInputChannelCount(uint32_t aUserInputChannelCount);
 
   // Owning thread only.
   RefPtr<WebRTCAudioDataListener> mListener;
+  RefPtr<mozilla::CubebDeviceEnumerator> mEnumerator;
+  // Number of times this devices has been opened for this MSG.
+  int mChannelsOpen;
 
-  // Note: shared across all microphone sources. Owning thread only.
-  static int sChannelsOpen;
-
-  const RefPtr<mozilla::AudioInput> mAudioInput;
   const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
 
   // accessed from the GraphDriver thread except for deletion.
   nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
   nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerOutput;
 
   // mMutex protects some of our members off the owning thread.
   Mutex mMutex;
@@ -578,28 +401,32 @@ private:
   // Both the array and the Allocation members are modified under mMutex on
   // the owning thread. Accessed under one of the two.
   nsTArray<Allocation> mAllocations;
 
   // Current state of the shared resource for this source.
   // Set under mMutex on the owning thread. Accessed under one of the two
   MediaEngineSourceState mState = kReleased;
 
-  int mCapIndex;
+  CubebUtils::AudioDeviceID mDeviceID;
   bool mDelayAgnostic;
   bool mExtendedFilter;
   bool mStarted;
 
   const nsString mDeviceName;
   const nsCString mDeviceUUID;
 
   // The current settings for the underlying device.
   // Member access is main thread only after construction.
   const nsMainThreadPtrHandle<media::Refcountable<dom::MediaTrackSettings>> mSettings;
 
+  // The number of channels asked for by content, after clamping to the range of
+  // legal channel count for this particular device. This is the number of
+  // channels of the input buffer received.
+  uint32_t mUserInputChannelCount;
   uint64_t mTotalFrames;
   uint64_t mLastLogFrames;
 
   // mSkipProcessing is true if none of the processing passes are enabled,
   // because of prefs or constraints. This allows simply copying the audio into
   // the MSG, skipping resampling and the whole webrtc.org code.
   // This is read and written to only on the MSG thread.
   bool mSkipProcessing;
@@ -625,32 +452,24 @@ public:
   explicit MediaEngineWebRTC(MediaEnginePrefs& aPrefs);
 
   virtual void SetFakeDeviceChangeEvents() override;
 
   // Clients should ensure to clean-up sources video/audio sources
   // before invoking Shutdown on this class.
   void Shutdown() override;
 
-  // Returns whether the host supports duplex audio stream.
-  bool SupportsDuplex();
-
   void EnumerateDevices(uint64_t aWindowId,
                         dom::MediaSourceEnum,
                         nsTArray<RefPtr<MediaEngineSource>>*) override;
   void ReleaseResourcesForWindow(uint64_t aWindowId) override;
 private:
-  ~MediaEngineWebRTC() = default;
-
-  nsCOMPtr<nsIThread> mThread;
-
   // gUM runnables can e.g. Enumerate from multiple threads
   Mutex mMutex;
-  RefPtr<mozilla::AudioInput> mAudioInput;
-  bool mFullDuplex;
+  RefPtr<mozilla::CubebDeviceEnumerator> mEnumerator;
   bool mDelayAgnostic;
   bool mExtendedFilter;
   bool mHasTabVideoSource;
 
   // Maps WindowID to a map of device uuid to their MediaEngineSource,
   // separately for audio and video.
   nsClassHashtable<nsUint64HashKey,
                     nsRefPtrHashtable<nsStringHashKey,
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -17,17 +17,16 @@
 #include "mozilla/ErrorNames.h"
 #include "mtransport/runnable_utils.h"
 #include "nsAutoPtr.h"
 
 // scoped_ptr.h uses FF
 #ifdef FF
 #undef FF
 #endif
-#include "webrtc/modules/audio_device/opensl/single_rw_fifo.h"
 #include "webrtc/voice_engine/voice_engine_defines.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
 #include "webrtc/common_audio/include/audio_util.h"
 
 using namespace webrtc;
 
 // These are restrictions from the webrtc.org code
 #define MAX_CHANNELS 2
@@ -81,64 +80,76 @@ void
 WebRTCAudioDataListener::DeviceChanged()
 {
   MutexAutoLock lock(mMutex);
   if (mAudioSource) {
     mAudioSource->DeviceChanged();
   }
 }
 
+uint32_t
+WebRTCAudioDataListener::InputChannelCount()
+{
+ MutexAutoLock lock(mMutex);
+ if (mAudioSource) {
+   return mAudioSource->InputChannelCount();
+ }
+ return 0;
+}
+
 void
 WebRTCAudioDataListener::Shutdown()
 {
   MutexAutoLock lock(mMutex);
   mAudioSource = nullptr;
 }
 
 /**
  * WebRTC Microphone MediaEngineSource.
  */
-int MediaEngineWebRTCMicrophoneSource::sChannelsOpen = 0;
 
 MediaEngineWebRTCMicrophoneSource::Allocation::Allocation(
     const RefPtr<AllocationHandle>& aHandle)
   : mHandle(aHandle)
 {}
 
 MediaEngineWebRTCMicrophoneSource::Allocation::~Allocation() = default;
 
 MediaEngineWebRTCMicrophoneSource::MediaEngineWebRTCMicrophoneSource(
-    mozilla::AudioInput* aAudioInput,
-    int aIndex,
-    const char* aDeviceName,
-    const char* aDeviceUUID,
+    mozilla::CubebDeviceEnumerator* aEnumerator,
+    CubebUtils::AudioDeviceID aID,
+    const nsString& aDeviceName,
+    const nsCString& aDeviceUUID,
+    uint32_t maxChannelCount,
     bool aDelayAgnostic,
     bool aExtendedFilter)
-  : mAudioInput(aAudioInput)
+  : mEnumerator(aEnumerator)
+  , mChannelsOpen(0)
   , mAudioProcessing(AudioProcessing::Create())
   , mMutex("WebRTCMic::Mutex")
-  , mCapIndex(aIndex)
+  , mDeviceID(aID)
   , mDelayAgnostic(aDelayAgnostic)
   , mExtendedFilter(aExtendedFilter)
   , mStarted(false)
-  , mDeviceName(NS_ConvertUTF8toUTF16(aDeviceName))
+  , mDeviceName(aDeviceName)
   , mDeviceUUID(aDeviceUUID)
   , mSettings(
       new nsMainThreadPtrHolder<media::Refcountable<dom::MediaTrackSettings>>(
         "MediaEngineWebRTCMicrophoneSource::mSettings",
         new media::Refcountable<dom::MediaTrackSettings>(),
         // Non-strict means it won't assert main thread for us.
         // It would be great if it did but we're already on the media thread.
         /* aStrict = */ false))
+  , mUserInputChannelCount(maxChannelCount)
   , mTotalFrames(0)
   , mLastLogFrames(0)
   , mSkipProcessing(false)
   , mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100)
 {
-  MOZ_ASSERT(aAudioInput);
+  MOZ_ASSERT(aEnumerator);
   mSettings->mEchoCancellation.Construct(0);
   mSettings->mAutoGainControl.Construct(0);
   mSettings->mNoiseSuppression.Construct(0);
   mSettings->mChannelCount.Construct(0);
   // We'll init lazily as needed
 }
 
 nsString
@@ -411,125 +422,139 @@ MediaEngineWebRTCMicrophoneSource::Updat
   AssertIsOnOwningThread();
 
   FlattenedConstraints c(aNetConstraints);
 
   MediaEnginePrefs prefs = aPrefs;
   prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn);
   prefs.mAgcOn = c.mAutoGainControl.Get(prefs.mAgcOn);
   prefs.mNoiseOn = c.mNoiseSuppression.Get(prefs.mNoiseOn);
-  uint32_t maxChannels = 1;
-  if (mAudioInput->GetMaxAvailableChannels(maxChannels) != 0) {
+
+  RefPtr<AudioDeviceInfo> info = mEnumerator->DeviceInfoFromID(mDeviceID);
+
+  if (!info) {
     return NS_ERROR_FAILURE;
   }
-  // Check channelCount violation
-  if (static_cast<int32_t>(maxChannels) < c.mChannelCount.mMin ||
-      static_cast<int32_t>(maxChannels) > c.mChannelCount.mMax) {
+
+  // Determine an actual channel count to use for this source. Three factors at
+  // play here: the device capabilities, the constraints passed in by content,
+  // and a pref that can force things (for testing)
+
+  // First, check channelCount violation wrt constraints. This throws in case of
+  // error.
+  if (static_cast<int32_t>(info->MaxChannels()) < c.mChannelCount.mMin ||
+      static_cast<int32_t>(info->MaxChannels()) > c.mChannelCount.mMax) {
     *aOutBadConstraint = "channelCount";
     return NS_ERROR_FAILURE;
   }
-  // Clamp channelCount to a valid value
+  // A pref can force the channel count to use. If the pref has a value of zero
+  // or lower, it has no effect.
   if (prefs.mChannels <= 0) {
-    prefs.mChannels = static_cast<int32_t>(maxChannels);
+    prefs.mChannels = static_cast<int32_t>(info->MaxChannels());
   }
+
+  // Get the number of channels asked for by content, and clamp it between the
+  // pref and the maximum number of channels that the device supports.
   prefs.mChannels = c.mChannelCount.Get(std::min(prefs.mChannels,
-                                        static_cast<int32_t>(maxChannels)));
-  // Clamp channelCount to a valid value
-  prefs.mChannels = std::max(1, std::min(prefs.mChannels, static_cast<int32_t>(maxChannels)));
+                                        static_cast<int32_t>(info->MaxChannels())));
 
   LOG(("Audio config: aec: %d, agc: %d, noise: %d, channels: %d",
       prefs.mAecOn ? prefs.mAec : -1,
       prefs.mAgcOn ? prefs.mAgc : -1,
       prefs.mNoiseOn ? prefs.mNoise : -1,
       prefs.mChannels));
 
   switch (mState) {
     case kReleased:
       MOZ_ASSERT(aHandle);
-      if (sChannelsOpen != 0) {
-        // Until we fix (or wallpaper) support for multiple mic input
-        // (Bug 1238038) fail allocation for a second device
-        return NS_ERROR_FAILURE;
-      }
-      if (mAudioInput->SetRecordingDevice(mCapIndex)) {
-         return NS_ERROR_FAILURE;
-      }
-      mAudioInput->SetUserChannelCount(prefs.mChannels);
       {
         MutexAutoLock lock(mMutex);
         mState = kAllocated;
+        mChannelsOpen++;
       }
-      sChannelsOpen++;
-      LOG(("Audio device %d allocated", mCapIndex));
-      {
-        // Update with the actual applied channelCount in order
-        // to store it in settings.
-        uint32_t channelCount = 0;
-        mAudioInput->GetChannelCount(channelCount);
-        MOZ_ASSERT(channelCount > 0);
-        prefs.mChannels = channelCount;
-      }
+      LOG(("Audio device %s allocated", NS_ConvertUTF16toUTF8(info->FriendlyName()).get()));
       break;
 
     case kStarted:
     case kStopped:
-      if (prefs.mChannels != mNetPrefs.mChannels) {
-        // If the channel count changed, tell the MSG to open a new driver with
-        // the correct channel count.
-        MOZ_ASSERT(!mAllocations.IsEmpty());
-        RefPtr<SourceMediaStream> stream;
-        for (const Allocation& allocation : mAllocations) {
-          if (allocation.mStream && allocation.mStream->GraphImpl()) {
-            stream = allocation.mStream;
-            break;
-          }
-        }
-        MOZ_ASSERT(stream);
+      if (prefs == mNetPrefs) {
+        return NS_OK;
+      }
 
-        mAudioInput->SetUserChannelCount(prefs.mChannels);
-        // Get validated number of channel
-        uint32_t channelCount = 0;
-        mAudioInput->GetChannelCount(channelCount);
-        MOZ_ASSERT(channelCount > 0 && mNetPrefs.mChannels > 0);
-        if (!stream->OpenNewAudioCallbackDriver(mListener)) {
-          MOZ_LOG(GetMediaManagerLog(), LogLevel::Error, ("Could not open a new AudioCallbackDriver for input"));
-          return NS_ERROR_FAILURE;
-        }
+      if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
+        LOG(("Audio device %s reallocated", NS_ConvertUTF16toUTF8(info->FriendlyName()).get()));
+      } else {
+        LOG(("Audio device %s allocated shared", NS_ConvertUTF16toUTF8(info->FriendlyName()).get()));
       }
       break;
 
     default:
-      LOG(("Audio device %d in ignored state %d", mCapIndex, mState));
+      LOG(("Audio device %s in ignored state %d", NS_ConvertUTF16toUTF8(info->FriendlyName()).get(), mState));
       break;
   }
 
-  if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
-    if (mAllocations.IsEmpty()) {
-      LOG(("Audio device %d reallocated", mCapIndex));
-    } else {
-      LOG(("Audio device %d allocated shared", mCapIndex));
-    }
-  }
-
-  if (sChannelsOpen > 0) {
+  if (mChannelsOpen > 0) {
     UpdateAGCSettingsIfNeeded(prefs.mAgcOn, static_cast<AgcModes>(prefs.mAgc));
     UpdateNSSettingsIfNeeded(prefs.mNoiseOn, static_cast<NsModes>(prefs.mNoise));
     UpdateAECSettingsIfNeeded(prefs.mAecOn, static_cast<EcModes>(prefs.mAec));
 
     webrtc::Config config;
     config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(mExtendedFilter));
     config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(mDelayAgnostic));
     mAudioProcessing->SetExtraOptions(config);
   }
   mNetPrefs = prefs;
   return NS_OK;
 }
 
 #undef HANDLE_APM_ERROR
 
+bool
+MediaEngineWebRTCMicrophoneSource::PassThrough() const
+{
+  MOZ_ASSERT(!mAllocations.IsEmpty() &&
+             mAllocations[0].mStream &&
+             mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread());
+  return mSkipProcessing;
+}
+void
+MediaEngineWebRTCMicrophoneSource::SetPassThrough(bool aPassThrough)
+{
+  if (mAllocations.IsEmpty()) {
+    return;
+  }
+  MOZ_ASSERT(!mAllocations.IsEmpty() &&
+             mAllocations[0].mStream &&
+             mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread());
+  mSkipProcessing = aPassThrough;
+}
+
+uint32_t
+MediaEngineWebRTCMicrophoneSource::GetUserInputChannelCount()
+{
+  MOZ_ASSERT(!mAllocations.IsEmpty() &&
+             mAllocations[0].mStream &&
+             mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread());
+  return mUserInputChannelCount;
+}
+
+void
+MediaEngineWebRTCMicrophoneSource::SetUserInputChannelCount(
+  uint32_t aUserInputChannelCount)
+{
+  if (mAllocations.IsEmpty()) {
+    return;
+  }
+  MOZ_ASSERT(!mAllocations.IsEmpty() &&
+             mAllocations[0].mStream &&
+             mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread());
+  mUserInputChannelCount = aUserInputChannelCount;
+  mAllocations[0].mStream->GraphImpl()->ReevaluateInputDevice();
+}
+
 void
 MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs,
                                                  RefPtr<MediaStreamGraphImpl> aGraph)
 {
   AssertIsOnOwningThread();
   MOZ_DIAGNOSTIC_ASSERT(aGraph);
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
@@ -537,35 +562,41 @@ MediaEngineWebRTCMicrophoneSource::Apply
     that->mSettings->mEchoCancellation.Value() = aPrefs.mAecOn;
     that->mSettings->mAutoGainControl.Value() = aPrefs.mAgcOn;
     that->mSettings->mNoiseSuppression.Value() = aPrefs.mNoiseOn;
     that->mSettings->mChannelCount.Value() = aPrefs.mChannels;
 
     class Message : public ControlMessage {
     public:
       Message(MediaEngineWebRTCMicrophoneSource* aSource,
-              bool aPassThrough)
+              bool aPassThrough,
+              uint32_t aUserInputChannelCount)
         : ControlMessage(nullptr)
         , mMicrophoneSource(aSource)
         , mPassThrough(aPassThrough)
+        , mUserInputChannelCount(aUserInputChannelCount)
         {}
 
       void Run() override
       {
         mMicrophoneSource->SetPassThrough(mPassThrough);
+        mMicrophoneSource->SetUserInputChannelCount(mUserInputChannelCount);
       }
 
     protected:
       RefPtr<MediaEngineWebRTCMicrophoneSource> mMicrophoneSource;
       bool mPassThrough;
+      uint32_t mUserInputChannelCount;
     };
 
     bool passThrough = !(aPrefs.mAecOn || aPrefs.mAgcOn || aPrefs.mNoiseOn);
     if (graph) {
-      graph->AppendMessage(MakeUnique<Message>(that, passThrough));
+    graph->AppendMessage(MakeUnique<Message>(that,
+                                             passThrough,
+                                             aPrefs.mChannels));
     }
 
     return NS_OK;
   }));
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
@@ -602,59 +633,51 @@ MediaEngineWebRTCMicrophoneSource::Deall
 {
   AssertIsOnOwningThread();
 
   size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
   MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex);
   MOZ_DIAGNOSTIC_ASSERT(!mAllocations[i].mEnabled,
                         "Source should be stopped for the track before removing");
 
-  LOG(("Mic source %p allocation %p Deallocate()", this, aHandle.get()));
-
   if (mAllocations[i].mStream && IsTrackIDExplicit(mAllocations[i].mTrackID)) {
     mAllocations[i].mStream->EndTrack(mAllocations[i].mTrackID);
   }
 
   {
     MutexAutoLock lock(mMutex);
     mAllocations.RemoveElementAt(i);
   }
 
   if (mAllocations.IsEmpty()) {
     // If empty, no callbacks to deliver data should be occuring
     MOZ_ASSERT(mState != kReleased, "Source not allocated");
     MOZ_ASSERT(mState != kStarted, "Source not stopped");
-    MOZ_ASSERT(sChannelsOpen > 0);
-    --sChannelsOpen;
+    MOZ_ASSERT(mChannelsOpen > 0);
+    --mChannelsOpen;
 
     MutexAutoLock lock(mMutex);
     mState = kReleased;
-    LOG(("Audio device %d deallocated", mCapIndex));
+    LOG(("Audio device %s deallocated", NS_ConvertUTF16toUTF8(mDeviceName).get()));
   } else {
-    LOG(("Audio device %d deallocated but still in use", mCapIndex));
+    LOG(("Audio device %s deallocated but still in use", NS_ConvertUTF16toUTF8(mDeviceName).get()));
   }
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::SetTrack(const RefPtr<const AllocationHandle>& aHandle,
                                             const RefPtr<SourceMediaStream>& aStream,
                                             TrackID aTrackID,
                                             const PrincipalHandle& aPrincipal)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aStream);
   MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
 
-  LOG(("Mic source %p allocation %p SetTrack() stream=%p, track=%" PRId32,
-       this, aHandle.get(), aStream.get(), aTrackID));
-
-  // Until we fix bug 1400488 we need to block a second tab (OuterWindow)
-  // from opening an already-open device.  If it's the same tab, they
-  // will share a Graph(), and we can allow it.
   if (!mAllocations.IsEmpty() &&
       mAllocations[0].mStream &&
       mAllocations[0].mStream->Graph() != aStream->Graph()) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
   MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex);
@@ -683,27 +706,32 @@ MediaEngineWebRTCMicrophoneSource::SetTr
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Start(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
-  if (sChannelsOpen == 0) {
+  if (mChannelsOpen == 0) {
     return NS_ERROR_FAILURE;
   }
 
-  LOG(("Mic source %p allocation %p Start()", this, aHandle.get()));
-
   size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
   MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex,
                         "Can't start track that hasn't been added");
   Allocation& allocation = mAllocations[i];
 
+  // For now, we only allow opening a single audio input device per page,
+  // because we can only have one MSG per page.
+  if (allocation.mStream->GraphImpl()->InputDeviceID() &&
+      allocation.mStream->GraphImpl()->InputDeviceID() != mDeviceID) {
+    return NS_ERROR_FAILURE;
+  }
+
   MOZ_ASSERT(!allocation.mEnabled, "Source already started");
   {
     // This spans setting both the enabled state and mState.
     MutexAutoLock lock(mMutex);
     allocation.mEnabled = true;
 
 #ifdef DEBUG
     // Ensure that callback-tracking state is reset when callbacks start coming.
@@ -715,17 +743,17 @@ MediaEngineWebRTCMicrophoneSource::Start
     if (!mListener) {
       mListener = new WebRTCAudioDataListener(this);
     }
 
     // Make sure logger starts before capture
     AsyncLatencyLogger::Get(true);
 
     // Must be *before* StartSend() so it will notice we selected external input (full_duplex)
-    mAudioInput->StartRecording(allocation.mStream, mListener);
+    allocation.mStream->OpenAudioInput(mDeviceID, mListener);
 
     MOZ_ASSERT(mState != kReleased);
     mState = kStarted;
   }
 
   ApplySettings(mNetPrefs, allocation.mStream->GraphImpl());
 
   return NS_OK;
@@ -748,17 +776,17 @@ MediaEngineWebRTCMicrophoneSource::Stop(
     return NS_OK;
   }
 
   {
     // This spans setting both the enabled state and mState.
     MutexAutoLock lock(mMutex);
     allocation.mEnabled = false;
 
-    mAudioInput->StopRecording(allocation.mStream);
+    allocation.mStream->CloseAudioInput(mDeviceID, mListener);
 
     if (HasEnabledTrack()) {
       // Another track is keeping us from stopping
       return NS_OK;
     }
 
     MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
     mState = kStopped;
@@ -783,16 +811,17 @@ MediaEngineWebRTCMicrophoneSource::GetSe
 void
 MediaEngineWebRTCMicrophoneSource::Pull(const RefPtr<const AllocationHandle>& aHandle,
                                         const RefPtr<SourceMediaStream>& aStream,
                                         TrackID aTrackID,
                                         StreamTime aDesiredTime,
                                         const PrincipalHandle& aPrincipalHandle)
 {
   StreamTime delta;
+  LOG_FRAMES(("NotifyPull, desired = %" PRId64, (int64_t) aDesiredTime));
 
   {
     MutexAutoLock lock(mMutex);
     size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
     if (i == mAllocations.NoIndex) {
       // This handle must have been deallocated. That's fine, and its track
       // will already be ended. No need to do anything.
       return;
@@ -813,22 +842,25 @@ MediaEngineWebRTCMicrophoneSource::Pull(
     }
 
     if (delta < 0) {
       LOG_FRAMES(("Not appending silence for allocation %p; %" PRId64 " frames already buffered",
                   mAllocations[i].mHandle.get(), -delta));
       return;
     }
 
-    LOG_FRAMES(("Pulling %" PRId64 " frames of silence for allocation %p",
-                delta, mAllocations[i].mHandle.get()));
+    delta = aDesiredTime - aStream->GetEndOfAppendedData(aTrackID);
+    if (delta <= 0) {
+      return;
+    }
 
     // This assertion fails when we append silence here in the same iteration
     // as there were real audio samples already appended by the audio callback.
-    // Note that this is exempted until live samples and a subsequent chunk of silence have been appended to the track. This will cover cases like:
+    // Note that this is exempted until live samples and a subsequent chunk of
+    // silence have been appended to the track. This will cover cases like:
     // - After Start(), there is silence (maybe multiple times) appended before
     //   the first audio callback.
     // - After Start(), there is real data (maybe multiple times) appended
     //   before the first graph iteration.
     // And other combinations of order of audio sample sources.
     MOZ_ASSERT_IF(
       mAllocations[i].mEnabled &&
       mAllocations[i].mLiveFramesAppended &&
@@ -1063,28 +1095,16 @@ MediaEngineWebRTCMicrophoneSource::Packe
                            processedOutputChannelPointersConst,
                            mPacketizerInput->PacketSize(),
                            allocation.mPrincipal);
       allocation.mStream->AppendToTrack(allocation.mTrackID, &segment);
     }
   }
 }
 
-bool
-MediaEngineWebRTCMicrophoneSource::PassThrough() const
-{
-  return mSkipProcessing;
-}
-
-void
-MediaEngineWebRTCMicrophoneSource::SetPassThrough(bool aPassThrough)
-{
-  mSkipProcessing = aPassThrough;
-}
-
 template<typename T>
 void
 MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
                                                  size_t aFrames,
                                                  uint32_t aChannels)
 {
   MutexAutoLock lock(mMutex);