Bug 1156472 - Part 5 - Add MediaEngineWebRTCAudioCaptureSource as a new audio source, and "audioCapture" as a new MediaSource. r=jesup,bz
authorPaul Adenot <paul@paul.cx>
Fri, 24 Jul 2015 14:28:16 +0200
changeset 286419 2964352ce228d724637be19bf213a2c4c4021ca4
parent 286418 244d8d88808e1a363a93a3dc35f7c454ad5c6f47
child 286420 946e320a1776177d827a3e388fa8e84de9c21dba
push id5067
push userraliiev@mozilla.com
push dateMon, 21 Sep 2015 14:04:52 +0000
treeherdermozilla-beta@14221ffe5b2f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup, bz
bugs1156472
milestone42.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1156472 - Part 5 - Add MediaEngineWebRTCAudioCaptureSource as a new audio source, and "audioCapture" as a new MediaSource. r=jesup,bz
dom/media/MediaManager.cpp
dom/media/MediaManager.h
dom/media/webrtc/MediaEngineWebRTC.cpp
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/webidl/Constraints.webidl
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -295,30 +295,29 @@ protected:
 };
 
 /**
  * nsIMediaDevice implementation.
  */
 NS_IMPL_ISUPPORTS(MediaDevice, nsIMediaDevice)
 
 MediaDevice::MediaDevice(MediaEngineSource* aSource, bool aIsVideo)
-  : mSource(aSource)
+  : mMediaSource(aSource->GetMediaSource())
+  , mSource(aSource)
   , mIsVideo(aIsVideo)
 {
   mSource->GetName(mName);
   nsCString id;
   mSource->GetUUID(id);
   CopyUTF8toUTF16(id, mID);
 }
 
 VideoDevice::VideoDevice(MediaEngineVideoSource* aSource)
   : MediaDevice(aSource, true)
-{
-  mMediaSource = aSource->GetMediaSource();
-}
+{}
 
 /**
  * Helper functions that implement the constraints algorithm from
  * http://dev.w3.org/2011/webrtc/editor/getusermedia.html#methods-5
  */
 
 bool
 MediaDevice::StringsContain(const OwningStringOrStringSequence& aStrings,
@@ -434,16 +433,18 @@ MediaDevice::SetId(const nsAString& aID)
   mID.Assign(aID);
 }
 
 NS_IMETHODIMP
 MediaDevice::GetMediaSource(nsAString& aMediaSource)
 {
   if (mMediaSource == dom::MediaSourceEnum::Microphone) {
     aMediaSource.Assign(NS_LITERAL_STRING("microphone"));
+  } else if (mMediaSource == dom::MediaSourceEnum::AudioCapture) {
+    aMediaSource.Assign(NS_LITERAL_STRING("audioCapture"));
   } else if (mMediaSource == dom::MediaSourceEnum::Window) { // this will go away
     aMediaSource.Assign(NS_LITERAL_STRING("window"));
   } else { // all the rest are shared
     aMediaSource.Assign(NS_ConvertUTF8toUTF16(
       dom::MediaSourceEnumValues::strings[uint32_t(mMediaSource)].value));
   }
   return NS_OK;
 }
@@ -779,89 +780,100 @@ public:
         branch->GetBoolPref("media.getusermedia.agc_enabled", &agc_on);
         branch->GetIntPref("media.getusermedia.agc", &agc);
         branch->GetBoolPref("media.getusermedia.noise_enabled", &noise_on);
         branch->GetIntPref("media.getusermedia.noise", &noise);
         branch->GetIntPref("media.getusermedia.playout_delay", &playout_delay);
       }
     }
 #endif
-    // Create a media stream.
-    nsRefPtr<nsDOMUserMediaStream> trackunion =
-      nsDOMUserMediaStream::CreateTrackUnionStream(window, mListener,
-                                                   mAudioSource, mVideoSource);
-    if (!trackunion || sInShutdown) {
+
+    MediaStreamGraph* msg = MediaStreamGraph::GetInstance();
+    nsRefPtr<SourceMediaStream> stream = msg->CreateSourceStream(nullptr);
+
+    nsRefPtr<DOMLocalMediaStream> domStream;
+    // AudioCapture is a special case, here, in the sense that we're not really
+    // using the audio source and the SourceMediaStream, which acts as
+    // placeholders. We re-route a number of stream internaly in the MSG and mix
+    // them down instead.
+    if (mAudioSource &&
+        mAudioSource->GetMediaSource() == dom::MediaSourceEnum::AudioCapture) {
+      domStream = DOMLocalMediaStream::CreateAudioCaptureStream(window);
+      msg->RegisterCaptureStreamForWindow(
+            mWindowID, domStream->GetStream()->AsProcessedStream());
+      window->SetAudioCapture(true);
+    } else {
+      // Normal case, connect the source stream to the track union stream to
+      // avoid us blocking
+      nsRefPtr<nsDOMUserMediaStream> trackunion =
+        nsDOMUserMediaStream::CreateTrackUnionStream(window, mListener,
+                                                     mAudioSource, mVideoSource);
+      trackunion->GetStream()->AsProcessedStream()->SetAutofinish(true);
+      nsRefPtr<MediaInputPort> port = trackunion->GetStream()->AsProcessedStream()->
+        AllocateInputPort(stream, MediaInputPort::FLAG_BLOCK_OUTPUT);
+      trackunion->mSourceStream = stream;
+      trackunion->mPort = port.forget();
+      // Log the relationship between SourceMediaStream and TrackUnion stream
+      // Make sure logger starts before capture
+      AsyncLatencyLogger::Get(true);
+      LogLatency(AsyncLatencyLogger::MediaStreamCreate,
+          reinterpret_cast<uint64_t>(stream.get()),
+          reinterpret_cast<int64_t>(trackunion->GetStream()));
+
+      nsCOMPtr<nsIPrincipal> principal;
+      if (mPeerIdentity) {
+        principal = nsNullPrincipal::Create();
+        trackunion->SetPeerIdentity(mPeerIdentity.forget());
+      } else {
+        principal = window->GetExtantDoc()->NodePrincipal();
+      }
+      trackunion->CombineWithPrincipal(principal);
+
+      domStream = trackunion.forget();
+    }
+
+    if (!domStream || sInShutdown) {
       nsCOMPtr<nsIDOMGetUserMediaErrorCallback> onFailure = mOnFailure.forget();
       LOG(("Returning error for getUserMedia() - no stream"));
 
       nsGlobalWindow* window = nsGlobalWindow::GetInnerWindowWithId(mWindowID);
       if (window) {
         nsRefPtr<MediaStreamError> error = new MediaStreamError(window,
             NS_LITERAL_STRING("InternalError"),
             sInShutdown ? NS_LITERAL_STRING("In shutdown") :
                           NS_LITERAL_STRING("No stream."));
         onFailure->OnError(error);
       }
       return NS_OK;
     }
-    trackunion->AudioConfig(aec_on, (uint32_t) aec,
-                            agc_on, (uint32_t) agc,
-                            noise_on, (uint32_t) noise,
-                            playout_delay);
-
-
-    MediaStreamGraph* gm = MediaStreamGraph::GetInstance();
-    nsRefPtr<SourceMediaStream> stream = gm->CreateSourceStream(nullptr);
-
-    // connect the source stream to the track union stream to avoid us blocking
-    trackunion->GetStream()->AsProcessedStream()->SetAutofinish(true);
-    nsRefPtr<MediaInputPort> port = trackunion->GetStream()->AsProcessedStream()->
-      AllocateInputPort(stream, MediaInputPort::FLAG_BLOCK_OUTPUT);
-    trackunion->mSourceStream = stream;
-    trackunion->mPort = port.forget();
-    // Log the relationship between SourceMediaStream and TrackUnion stream
-    // Make sure logger starts before capture
-    AsyncLatencyLogger::Get(true);
-    LogLatency(AsyncLatencyLogger::MediaStreamCreate,
-               reinterpret_cast<uint64_t>(stream.get()),
-               reinterpret_cast<int64_t>(trackunion->GetStream()));
-
-    nsCOMPtr<nsIPrincipal> principal;
-    if (mPeerIdentity) {
-      principal = nsNullPrincipal::Create();
-      trackunion->SetPeerIdentity(mPeerIdentity.forget());
-    } else {
-      principal = window->GetExtantDoc()->NodePrincipal();
-    }
-    trackunion->CombineWithPrincipal(principal);
 
     // The listener was added at the beginning in an inactive state.
     // Activate our listener. We'll call Start() on the source when get a callback
     // that the MediaStream has started consuming. The listener is freed
     // when the page is invalidated (on navigation or close).
     mListener->Activate(stream.forget(), mAudioSource, mVideoSource);
 
     // Note: includes JS callbacks; must be released on MainThread
     TracksAvailableCallback* tracksAvailableCallback =
-      new TracksAvailableCallback(mManager, mOnSuccess, mWindowID, trackunion);
+      new TracksAvailableCallback(mManager, mOnSuccess, mWindowID, domStream);
 
     mListener->AudioConfig(aec_on, (uint32_t) aec,
                            agc_on, (uint32_t) agc,
                            noise_on, (uint32_t) noise,
                            playout_delay);
 
     // Dispatch to the media thread to ask it to start the sources,
     // because that can take a while.
     // Pass ownership of trackunion to the MediaOperationTask
     // to ensure it's kept alive until the MediaOperationTask runs (at least).
-    MediaManager::PostTask(FROM_HERE,
-      new MediaOperationTask(MEDIA_START, mListener, trackunion,
-                             tracksAvailableCallback,
-                             mAudioSource, mVideoSource, false, mWindowID,
-                             mOnFailure.forget()));
+    MediaManager::PostTask(
+      FROM_HERE, new MediaOperationTask(MEDIA_START, mListener, domStream,
+                                        tracksAvailableCallback, mAudioSource,
+                                        mVideoSource, false, mWindowID,
+                                        mOnFailure.forget()));
     // We won't need mOnFailure now.
     mOnFailure = nullptr;
 
     if (!MediaManager::IsPrivateBrowsing(window)) {
       // Call GetOriginKey again, this time w/persist = true, to promote
       // deviceIds to persistent, in case they're not already. Fire'n'forget.
       nsRefPtr<Pledge<nsCString>> p = media::GetOriginKey(mOrigin, false, true);
     }
@@ -2070,17 +2082,17 @@ StopSharingCallback(MediaManager *aThis,
     auto length = aListeners->Length();
     for (size_t i = 0; i < length; ++i) {
       GetUserMediaCallbackMediaStreamListener *listener = aListeners->ElementAt(i);
 
       if (listener->Stream()) { // aka HasBeenActivate()ed
         listener->Invalidate();
       }
       listener->Remove();
-      listener->StopScreenWindowSharing();
+      listener->StopSharing();
     }
     aListeners->Clear();
     aThis->RemoveWindowID(aWindowID);
   }
 }
 
 
 void
@@ -2393,17 +2405,17 @@ MediaManager::Observe(nsISupports* aSubj
   } else if (!strcmp(aTopic, "getUserMedia:revoke")) {
     nsresult rv;
     // may be windowid or screen:windowid
     nsDependentString data(aData);
     if (Substring(data, 0, strlen("screen:")).EqualsLiteral("screen:")) {
       uint64_t windowID = PromiseFlatString(Substring(data, strlen("screen:"))).ToInteger64(&rv);
       MOZ_ASSERT(NS_SUCCEEDED(rv));
       if (NS_SUCCEEDED(rv)) {
-        LOG(("Revoking Screeen/windowCapture access for window %llu", windowID));
+        LOG(("Revoking Screen/windowCapture access for window %llu", windowID));
         StopScreensharing(windowID);
       }
     } else {
       uint64_t windowID = nsString(aData).ToInteger64(&rv);
       MOZ_ASSERT(NS_SUCCEEDED(rv));
       if (NS_SUCCEEDED(rv)) {
         LOG(("Revoking MediaCapture access for window %llu", windowID));
         OnNavigation(windowID);
@@ -2574,17 +2586,17 @@ static void
 StopScreensharingCallback(MediaManager *aThis,
                           uint64_t aWindowID,
                           StreamListeners *aListeners,
                           void *aData)
 {
   if (aListeners) {
     auto length = aListeners->Length();
     for (size_t i = 0; i < length; ++i) {
-      aListeners->ElementAt(i)->StopScreenWindowSharing();
+      aListeners->ElementAt(i)->StopSharing();
     }
   }
 }
 
 void
 MediaManager::StopScreensharing(uint64_t aWindowID)
 {
   // We need to stop window/screensharing for all streams in all innerwindows that
@@ -2736,29 +2748,36 @@ GetUserMediaCallbackMediaStreamListener:
                            this, nullptr, nullptr,
                            mAudioSource, mVideoSource,
                            mFinished, mWindowID, nullptr));
 }
 
 // Doesn't kill audio
 // XXX refactor to combine with Invalidate()?
 void
-GetUserMediaCallbackMediaStreamListener::StopScreenWindowSharing()
+GetUserMediaCallbackMediaStreamListener::StopSharing()
 {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
   if (mVideoSource && !mStopped &&
       (mVideoSource->GetMediaSource() == dom::MediaSourceEnum::Screen ||
        mVideoSource->GetMediaSource() == dom::MediaSourceEnum::Application ||
        mVideoSource->GetMediaSource() == dom::MediaSourceEnum::Window)) {
     // Stop the whole stream if there's no audio; just the video track if we have both
     MediaManager::PostTask(FROM_HERE,
       new MediaOperationTask(mAudioSource ? MEDIA_STOP_TRACK : MEDIA_STOP,
                              this, nullptr, nullptr,
                              nullptr, mVideoSource,
                              mFinished, mWindowID, nullptr));
+  } else if (mAudioSource &&
+             mAudioSource->GetMediaSource() == dom::MediaSourceEnum::AudioCapture) {
+    nsCOMPtr<nsPIDOMWindow> window = nsGlobalWindow::GetInnerWindowWithId(mWindowID);
+    MOZ_ASSERT(window);
+    window->SetAudioCapture(false);
+    MediaStreamGraph::GetInstance()->UnregisterCaptureStreamForWindow(mWindowID);
+    mStream->Destroy();
   }
 }
 
 // Stop backend for track
 
 void
 GetUserMediaCallbackMediaStreamListener::StopTrack(TrackID aID, bool aIsAudio)
 {
--- a/dom/media/MediaManager.h
+++ b/dom/media/MediaManager.h
@@ -98,17 +98,17 @@ public:
   {
     NS_ASSERTION(mStream,"Getting stream from never-activated GUMCMSListener");
     if (!mStream) {
       return nullptr;
     }
     return mStream->AsSourceStream();
   }
 
-  void StopScreenWindowSharing();
+  void StopSharing();
 
   void StopTrack(TrackID aID, bool aIsAudio);
 
   // mVideo/AudioSource are set by Activate(), so we assume they're capturing
   // if set and represent a real capture device.
   bool CapturingVideo()
   {
     NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
--- a/dom/media/webrtc/MediaEngineWebRTC.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTC.cpp
@@ -286,16 +286,23 @@ void
 MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
                                          nsTArray<nsRefPtr<MediaEngineAudioSource> >* aASources)
 {
   ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;
   ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
   // We spawn threads to handle gUM runnables, so we must protect the member vars
   MutexAutoLock lock(mMutex);
 
+  if (aMediaSource == dom::MediaSourceEnum::AudioCapture) {
+    nsRefPtr<MediaEngineWebRTCAudioCaptureSource> audioCaptureSource =
+      new MediaEngineWebRTCAudioCaptureSource(nullptr);
+    aASources->AppendElement(audioCaptureSource);
+    return;
+  }
+
 #ifdef MOZ_WIDGET_ANDROID
   jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef();
 
   // get the JVM
   JavaVM *jvm = mozilla::AndroidBridge::Bridge()->GetVM();
   JNIEnv *env = GetJNIForThread();
 
   if (webrtc::VoiceEngine::SetAndroidObjects(jvm, env, (void*)context) != 0) {
@@ -353,17 +360,17 @@ MediaEngineWebRTC::EnumerateAudioDevices
     }
 
     if (uniqueId[0] == '\0') {
       // Mac and Linux don't set uniqueId!
       MOZ_ASSERT(sizeof(deviceName) == sizeof(uniqueId)); // total paranoia
       strcpy(uniqueId,deviceName); // safe given assert and initialization/error-check
     }
 
-    nsRefPtr<MediaEngineWebRTCMicrophoneSource> aSource;
+    nsRefPtr<MediaEngineAudioSource> aSource;
     NS_ConvertUTF8toUTF16 uuid(uniqueId);
     if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) {
       // We've already seen this device, just append.
       aASources->AppendElement(aSource.get());
     } else {
       aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, i,
                                                       deviceName, uniqueId);
       mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
@@ -379,19 +386,18 @@ ClearVideoSource (const nsAString&, // u
 {
   if (aData) {
     aData->Shutdown();
   }
   return PL_DHASH_NEXT;
 }
 
 static PLDHashOperator
-ClearAudioSource (const nsAString&, // unused
-                  MediaEngineWebRTCAudioSource* aData,
-                  void *userArg)
+ClearAudioSource(const nsAString &, // unused
+                 MediaEngineAudioSource *aData, void *userArg)
 {
   if (aData) {
     aData->Shutdown();
   }
   return PL_DHASH_NEXT;
 }
 
 void
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -128,16 +128,77 @@ private:
 
   int mMinFps; // Min rate we want to accept
   dom::MediaSourceEnum mMediaSource; // source of media (camera | application | screen)
 
   size_t NumCapabilities() override;
   void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) override;
 };
 
+class MediaEngineWebRTCAudioCaptureSource : public MediaEngineAudioSource
+{
+public:
+  NS_DECL_THREADSAFE_ISUPPORTS
+
+  explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid)
+    : MediaEngineAudioSource(kReleased)
+  {
+  }
+  void GetName(nsAString& aName) override;
+  void GetUUID(nsACString& aUUID) override;
+  nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
+                    const MediaEnginePrefs& aPrefs,
+                    const nsString& aDeviceId) override
+  {
+    // Nothing to do here, everything is managed in MediaManager.cpp
+    return NS_OK;
+  }
+  nsresult Deallocate() override
+  {
+    // Nothing to do here, everything is managed in MediaManager.cpp
+    return NS_OK;
+  }
+  void Shutdown() override
+  {
+    // Nothing to do here, everything is managed in MediaManager.cpp
+  }
+  nsresult Start(SourceMediaStream* aMediaStream, TrackID aId) override;
+  nsresult Stop(SourceMediaStream* aMediaStream, TrackID aId) override;
+  void SetDirectListeners(bool aDirect) override
+  {}
+  nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn,
+                  uint32_t aAGC, bool aNoiseOn, uint32_t aNoise,
+                  int32_t aPlayoutDelay) override
+  {
+    return NS_OK;
+  }
+  void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource,
+                  TrackID aID, StreamTime aDesiredTime) override
+  {}
+  const dom::MediaSourceEnum GetMediaSource() override
+  {
+    return dom::MediaSourceEnum::AudioCapture;
+  }
+  bool IsFake() override
+  {
+    return false;
+  }
+  nsresult TakePhoto(PhotoCallback* aCallback) override
+  {
+    return NS_ERROR_NOT_IMPLEMENTED;
+  }
+  uint32_t GetBestFitnessDistance(
+    const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
+    const nsString& aDeviceId) override;
+
+protected:
+  virtual ~MediaEngineWebRTCAudioCaptureSource() { Shutdown(); }
+  nsCString mUUID;
+};
+
 class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
                                           public webrtc::VoEMediaProcess,
                                           private MediaConstraintsHelper
 {
 public:
   MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
                                     webrtc::VoiceEngine* aVoiceEnginePtr,
                                     int aIndex,
@@ -292,15 +353,14 @@ private:
   bool mBrowserEngineInit;
   bool mWinEngineInit;
   bool mAppEngineInit;
   bool mHasTabVideoSource;
 
   // Store devices we've already seen in a hashtable for quick return.
   // Maps UUID to MediaEngineSource (one set for audio, one for video).
   nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
-  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCMicrophoneSource>
-  mAudioSources;
+  nsRefPtrHashtable<nsStringHashKey, MediaEngineAudioSource> mAudioSources;
 };
 
 }
 
 #endif /* NSMEDIAENGINEWEBRTC_H_ */
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -39,16 +39,17 @@ namespace mozilla {
 extern PRLogModuleInfo* GetMediaManagerLog();
 #define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
 #define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
 
 /**
  * Webrtc microphone source source.
  */
 NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
+NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioCaptureSource)
 
 // XXX temp until MSG supports registration
 StaticRefPtr<AudioOutputObserver> gFarendObserver;
 
 AudioOutputObserver::AudioOutputObserver()
   : mPlayoutFreq(0)
   , mPlayoutChannels(0)
   , mChunkSize(0)
@@ -615,9 +616,60 @@ MediaEngineWebRTCMicrophoneSource::Proce
                                           mTrackID, segment, (AudioSegment *) nullptr),
                     NS_DISPATCH_NORMAL);
     }
   }
 
   return;
 }
 
+void
+MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName)
+{
+  aName.AssignLiteral("AudioCapture");
 }
+void
+MediaEngineWebRTCAudioCaptureSource::GetUUID(nsACString &aUUID)
+{
+  nsID uuid;
+  char uuidBuffer[NSID_LENGTH];
+  nsCString asciiString;
+  ErrorResult rv;
+
+  rv = nsContentUtils::GenerateUUIDInPlace(uuid);
+  if (rv.Failed()) {
+    aUUID.AssignLiteral("");
+    return;
+  }
+
+
+  uuid.ToProvidedString(uuidBuffer);
+  asciiString.AssignASCII(uuidBuffer);
+
+  // Remove {} and the null terminator
+  aUUID.Assign(Substring(asciiString, 1, NSID_LENGTH - 3));
+}
+
+nsresult
+MediaEngineWebRTCAudioCaptureSource::Start(SourceMediaStream *aMediaStream,
+                                           TrackID aId)
+{
+  aMediaStream->AddTrack(aId, 0, new AudioSegment());
+  return NS_OK;
+}
+
+nsresult
+MediaEngineWebRTCAudioCaptureSource::Stop(SourceMediaStream *aMediaStream,
+                                          TrackID aId)
+{
+  aMediaStream->EndAllTrackAndFinish();
+  return NS_OK;
+}
+
+uint32_t
+MediaEngineWebRTCAudioCaptureSource::GetBestFitnessDistance(
+    const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
+    const nsString& aDeviceId)
+{
+  // There is only one way of capturing audio for now, and it's always adequate.
+  return 0;
+}
+}
--- a/dom/webidl/Constraints.webidl
+++ b/dom/webidl/Constraints.webidl
@@ -20,16 +20,17 @@ enum VideoFacingModeEnum {
 
 enum MediaSourceEnum {
     "camera",
     "screen",
     "application",
     "window",
     "browser",
     "microphone",
+    "audioCapture",
     "other"
 };
 
 dictionary ConstrainLongRange {
     long min;
     long max;
     long exact;
     long ideal;