Backed out 14 changesets (bug 1156472) for test_getUserMedia_audioCapture.html failures on b2g emulator
authorWes Kocher <wkocher@mozilla.com>
Fri, 24 Jul 2015 13:15:57 -0700
changeset 254580 6a28170cf9a2440e1d84cf7ca134094ba3a42b01
parent 254579 2603fb3e8f71329a5380b4d313848a7f28d328b2
child 254581 fffd41a27a92c473dfc7d22bb70b2757ac2bbae2
push id29108
push userryanvm@gmail.com
push dateMon, 27 Jul 2015 14:12:01 +0000
treeherdermozilla-central@27ae736ef960 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1156472
milestone42.0a1
backs outdeec8eb18346f28dae27e12a0a25c5077791006e
0f5bec4c05baf3142ce2e2420de33d9d2f78d1c2
2dd83ac00bf9cba62afb212468181e435a1fb823
abd4e47887f732286f45f370fc71deed66fa1b3f
4824d987466351de4268c09b030329da9ad275ab
12805598e6fa6eabc8a25b7db9eb5a47985f806b
e2f0062a1f67ab2f9a428256dcbd4de71d147244
99ef8e436a7fa272b86f5fee3ce7b9fff4879758
65bbfc1546af4e617d9e70d9108b897a5df6ef36
2ab4f16eaf0af89a53d017ddc61045953d7d0db0
7f565685e20a85448d673fc37557c57b7c9045f3
28c03c98cb2b1679800eccebcfd6a2ad78dcdbb4
d477cfba6e1d35bfad4259d89e843b7f7807ed94
9819fa56caa1e491f0a25750f7666b84ebd6903a
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 14 changesets (bug 1156472) for test_getUserMedia_audioCapture.html failures on b2g emulator Backed out changeset deec8eb18346 (bug 1156472) Backed out changeset 0f5bec4c05ba (bug 1156472) Backed out changeset 2dd83ac00bf9 (bug 1156472) Backed out changeset abd4e47887f7 (bug 1156472) Backed out changeset 4824d9874663 (bug 1156472) Backed out changeset 12805598e6fa (bug 1156472) Backed out changeset e2f0062a1f67 (bug 1156472) Backed out changeset 99ef8e436a7f (bug 1156472) Backed out changeset 65bbfc1546af (bug 1156472) Backed out changeset 2ab4f16eaf0a (bug 1156472) Backed out changeset 7f565685e20a (bug 1156472) Backed out changeset 28c03c98cb2b (bug 1156472) Backed out changeset d477cfba6e1d (bug 1156472) Backed out changeset 9819fa56caa1 (bug 1156472)
browser/locales/en-US/chrome/browser/browser.dtd
browser/locales/en-US/chrome/browser/browser.properties
browser/modules/ContentWebRTC.jsm
browser/modules/webrtcUI.jsm
dom/audiochannel/AudioChannelAgent.cpp
dom/audiochannel/AudioChannelAgent.h
dom/audiochannel/AudioChannelService.cpp
dom/audiochannel/AudioChannelService.h
dom/audiochannel/nsIAudioChannelAgent.idl
dom/base/nsGlobalWindow.cpp
dom/base/nsPIDOMWindow.h
dom/fmradio/FMRadio.cpp
dom/html/HTMLMediaElement.cpp
dom/html/HTMLMediaElement.h
dom/media/AudioCaptureStream.cpp
dom/media/AudioCaptureStream.h
dom/media/AudioChannelFormat.cpp
dom/media/AudioChannelFormat.h
dom/media/AudioMixer.h
dom/media/AudioSegment.cpp
dom/media/AudioSegment.h
dom/media/DOMMediaStream.cpp
dom/media/DOMMediaStream.h
dom/media/DecodedStream.cpp
dom/media/DecodedStream.h
dom/media/MediaDecoder.cpp
dom/media/MediaDecoder.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/MediaManager.cpp
dom/media/MediaManager.h
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/MediaStreamGraphImpl.h
dom/media/moz.build
dom/media/tests/mochitest/head.js
dom/media/tests/mochitest/mochitest.ini
dom/media/tests/mochitest/pc.js
dom/media/tests/mochitest/test_getUserMedia_audioCapture.html
dom/media/tests/mochitest/test_peerConnection_replaceTrack.html
dom/media/tests/mochitest/test_peerConnection_webAudio.html
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioDestinationNode.h
dom/media/webrtc/MediaEngineWebRTC.cpp
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/webidl/Constraints.webidl
modules/libpref/init/all.js
--- a/browser/locales/en-US/chrome/browser/browser.dtd
+++ b/browser/locales/en-US/chrome/browser/browser.dtd
@@ -753,17 +753,16 @@ you can use these alternative items. Oth
 <!ENTITY social.markpageMenu.label "Save Page To…">
 <!ENTITY social.marklinkMenu.accesskey "L">
 <!ENTITY social.marklinkMenu.label "Save Link To…">
 
 <!ENTITY getUserMedia.selectCamera.label "Camera to share:">
 <!ENTITY getUserMedia.selectCamera.accesskey "C">
 <!ENTITY getUserMedia.selectMicrophone.label "Microphone to share:">
 <!ENTITY getUserMedia.selectMicrophone.accesskey "M">
-<!ENTITY getUserMedia.audioCapture.label "Audio from the tab will be shared.">
 <!ENTITY getUserMedia.allWindowsShared.message "All visible windows on your screen will be shared.">
 
 <!-- Bad Content Blocker Doorhanger Notification -->
 <!ENTITY badContentBlocked.moreinfo "Most websites will work properly even if content is blocked.">
 
 <!ENTITY mixedContentBlocked2.message "Insecure content">
 <!ENTITY mixedContentBlocked2.moreinfo "Some unencrypted elements on this website have been blocked.">
 <!ENTITY mixedContentBlocked2.learnMore "Learn More">
--- a/browser/locales/en-US/chrome/browser/browser.properties
+++ b/browser/locales/en-US/chrome/browser/browser.properties
@@ -548,27 +548,23 @@ identity.next.accessKey = n
 # LOCALIZATION NOTE: shown in the popup notification when a user successfully logs into a website
 # LOCALIZATION NOTE (identity.loggedIn.description): %S is the user's identity (e.g. user@example.com)
 identity.loggedIn.description = Signed in as: %S
 identity.loggedIn.signOut.label = Sign Out
 identity.loggedIn.signOut.accessKey = O
 
 # LOCALIZATION NOTE (getUserMedia.shareCamera.message, getUserMedia.shareMicrophone.message,
 #                    getUserMedia.shareScreen.message, getUserMedia.shareCameraAndMicrophone.message,
-#                    getUserMedia.shareScreenAndMicrophone.message, getUserMedia.shareCameraAndAudioCapture.message,
-#                    getUserMedia.shareAudioCapture.message, getUserMedia.shareScreenAndAudioCapture.message):
+#                    getUserMedia.shareScreenAndMicrophone.message):
 #  %S is the website origin (e.g. www.mozilla.org)
 getUserMedia.shareCamera.message = Would you like to share your camera with %S?
 getUserMedia.shareMicrophone.message = Would you like to share your microphone with %S?
 getUserMedia.shareScreen.message = Would you like to share your screen with %S?
 getUserMedia.shareCameraAndMicrophone.message = Would you like to share your camera and microphone with %S?
-getUserMedia.shareCameraAndAudioCapture.message = Would you like to share your camera and this tab's audio with %S?
 getUserMedia.shareScreenAndMicrophone.message = Would you like to share your microphone and screen with %S?
-getUserMedia.shareScreenAndAudioCapture.message = Would you like to share this tab's audio and your screen with %S?
-getUserMedia.shareAudioCapture.message = Would you like to share this tab's audio with %S?
 getUserMedia.selectWindow.label=Window to share:
 getUserMedia.selectWindow.accesskey=W
 getUserMedia.selectScreen.label=Screen to share:
 getUserMedia.selectScreen.accesskey=S
 getUserMedia.selectApplication.label=Application to share:
 getUserMedia.selectApplication.accesskey=A
 getUserMedia.noVideo.label = No Video
 getUserMedia.noApplication.label = No Application
@@ -600,79 +596,62 @@ getUserMedia.never.label = Never Share
 getUserMedia.never.accesskey = N
 getUserMedia.sharingCamera.message2 = You are currently sharing your camera with this page.
 getUserMedia.sharingMicrophone.message2 = You are currently sharing your microphone with this page.
 getUserMedia.sharingCameraAndMicrophone.message2 = You are currently sharing your camera and microphone with this page.
 getUserMedia.sharingApplication.message = You are currently sharing an application with this page.
 getUserMedia.sharingScreen.message = You are currently sharing your screen with this page.
 getUserMedia.sharingWindow.message = You are currently sharing a window with this page.
 getUserMedia.sharingBrowser.message = You are currently sharing a tab with this page.
-getUserMedia.sharingAudioCapture.message = You are currently sharing a tab's audio with this page.
 getUserMedia.continueSharing.label = Continue Sharing
 getUserMedia.continueSharing.accesskey = C
 getUserMedia.stopSharing.label = Stop Sharing
 getUserMedia.stopSharing.accesskey = S
 
 getUserMedia.sharingMenu.label = Tabs sharing devices
 getUserMedia.sharingMenu.accesskey = d
 # LOCALIZATION NOTE (getUserMedia.sharingMenuCamera
 #                    getUserMedia.sharingMenuMicrophone,
-#                    getUserMedia.sharingMenuAudioCapture,
 #                    getUserMedia.sharingMenuApplication,
 #                    getUserMedia.sharingMenuScreen,
 #                    getUserMedia.sharingMenuWindow,
 #                    getUserMedia.sharingMenuBrowser,
 #                    getUserMedia.sharingMenuCameraMicrophone,
 #                    getUserMedia.sharingMenuCameraMicrophoneApplication,
 #                    getUserMedia.sharingMenuCameraMicrophoneScreen,
 #                    getUserMedia.sharingMenuCameraMicrophoneWindow,
 #                    getUserMedia.sharingMenuCameraMicrophoneBrowser,
-#                    getUserMedia.sharingMenuCameraAudioCapture,
-#                    getUserMedia.sharingMenuCameraAudioCaptureApplication,
-#                    getUserMedia.sharingMenuCameraAudioCaptureScreen,
-#                    getUserMedia.sharingMenuCameraAudioCaptureWindow,
-#                    getUserMedia.sharingMenuCameraAudioCaptureBrowser,
 #                    getUserMedia.sharingMenuCameraApplication,
 #                    getUserMedia.sharingMenuCameraScreen,
 #                    getUserMedia.sharingMenuCameraWindow,
 #                    getUserMedia.sharingMenuCameraBrowser,
 #                    getUserMedia.sharingMenuMicrophoneApplication,
 #                    getUserMedia.sharingMenuMicrophoneScreen,
 #                    getUserMedia.sharingMenuMicrophoneWindow,
 #                    getUserMedia.sharingMenuMicrophoneBrowser):
 # %S is the website origin (e.g. www.mozilla.org)
 getUserMedia.sharingMenuCamera = %S (camera)
 getUserMedia.sharingMenuMicrophone = %S (microphone)
-getUserMedia.sharingMenuAudioCapture = %S (tab audio)
 getUserMedia.sharingMenuApplication = %S (application)
 getUserMedia.sharingMenuScreen = %S (screen)
 getUserMedia.sharingMenuWindow = %S (window)
 getUserMedia.sharingMenuBrowser = %S (tab)
 getUserMedia.sharingMenuCameraMicrophone = %S (camera and microphone)
 getUserMedia.sharingMenuCameraMicrophoneApplication = %S (camera, microphone and application)
 getUserMedia.sharingMenuCameraMicrophoneScreen = %S (camera, microphone and screen)
 getUserMedia.sharingMenuCameraMicrophoneWindow = %S (camera, microphone and window)
 getUserMedia.sharingMenuCameraMicrophoneBrowser = %S (camera, microphone and tab)
-getUserMedia.sharingMenuCameraAudioCapture = %S (camera and tab audio)
-getUserMedia.sharingMenuCameraAudioCaptureApplication = %S (camera, tab audio and application)
-getUserMedia.sharingMenuCameraAudioCaptureScreen = %S (camera, tab audio and screen)
-getUserMedia.sharingMenuCameraAudioCaptureWindow = %S (camera, tab audio and window)
-getUserMedia.sharingMenuCameraAudioCaptureBrowser = %S (camera, tab audio and tab)
 getUserMedia.sharingMenuCameraApplication = %S (camera and application)
 getUserMedia.sharingMenuCameraScreen = %S (camera and screen)
 getUserMedia.sharingMenuCameraWindow = %S (camera and window)
 getUserMedia.sharingMenuCameraBrowser = %S (camera and tab)
 getUserMedia.sharingMenuMicrophoneApplication = %S (microphone and application)
 getUserMedia.sharingMenuMicrophoneScreen = %S (microphone and screen)
 getUserMedia.sharingMenuMicrophoneWindow = %S (microphone and window)
 getUserMedia.sharingMenuMicrophoneBrowser = %S (microphone and tab)
-getUserMedia.sharingMenuMicrophoneApplication = %S (tab audio and application)
-getUserMedia.sharingMenuMicrophoneScreen = %S (tab audio and screen)
-getUserMedia.sharingMenuMicrophoneWindow = %S (tab audio and window)
-getUserMedia.sharingMenuMicrophoneBrowser = %S (tab audio and tab)
 # LOCALIZATION NOTE(getUserMedia.sharingMenuUnknownHost): this is used for the website
 # origin for the sharing menu if no readable origin could be deduced from the URL.
 getUserMedia.sharingMenuUnknownHost = Unknown origin
 
 # LOCALIZATION NOTE(emeNotifications.drmContentPlaying.message2): %S is brandShortName.
 emeNotifications.drmContentPlaying.message2 = Some audio or video on this site uses DRM software, which may limit what %S can let you do with it.
 emeNotifications.drmContentPlaying.button.label = Configure…
 emeNotifications.drmContentPlaying.button.accesskey = C
--- a/browser/modules/ContentWebRTC.jsm
+++ b/browser/modules/ContentWebRTC.jsm
@@ -81,31 +81,24 @@ function handleRequest(aSubject, aTopic,
 
 function prompt(aContentWindow, aWindowID, aCallID, aConstraints, aDevices, aSecure) {
   let audioDevices = [];
   let videoDevices = [];
   let devices = [];
 
   // MediaStreamConstraints defines video as 'boolean or MediaTrackConstraints'.
   let video = aConstraints.video || aConstraints.picture;
-  let audio = aConstraints.audio;
   let sharingScreen = video && typeof(video) != "boolean" &&
                       video.mediaSource != "camera";
-  let sharingAudio = audio && typeof(audio) != "boolean" &&
-                     audio.mediaSource != "microphone";
   for (let device of aDevices) {
     device = device.QueryInterface(Ci.nsIMediaDevice);
     switch (device.type) {
       case "audio":
-        // Check that if we got a microphone, we have not requested an audio
-        // capture, and if we have requested an audio capture, we are not
-        // getting a microphone instead.
-        if (audio && (device.mediaSource == "microphone") != sharingAudio) {
-          audioDevices.push({name: device.name, deviceIndex: devices.length,
-                             mediaSource: device.mediaSource});
+        if (aConstraints.audio) {
+          audioDevices.push({name: device.name, deviceIndex: devices.length});
           devices.push(device);
         }
         break;
       case "video":
         // Verify that if we got a camera, we haven't requested a screen share,
         // or that if we requested a screen share we aren't getting a camera.
         if (video && (device.mediaSource == "camera") != sharingScreen) {
           videoDevices.push({name: device.name, deviceIndex: devices.length,
@@ -115,17 +108,17 @@ function prompt(aContentWindow, aWindowI
         break;
     }
   }
 
   let requestTypes = [];
   if (videoDevices.length)
     requestTypes.push(sharingScreen ? "Screen" : "Camera");
   if (audioDevices.length)
-    requestTypes.push(sharingAudio ? "AudioCapture" : "Microphone");
+    requestTypes.push("Microphone");
 
   if (!requestTypes.length) {
     denyRequest({callID: aCallID}, "NotFoundError");
     return;
   }
 
   if (!aContentWindow.pendingGetUserMediaRequests) {
     aContentWindow.pendingGetUserMediaRequests = new Map();
@@ -135,17 +128,16 @@ function prompt(aContentWindow, aWindowI
 
   let request = {
     callID: aCallID,
     windowID: aWindowID,
     documentURI: aContentWindow.document.documentURI,
     secure: aSecure,
     requestTypes: requestTypes,
     sharingScreen: sharingScreen,
-    sharingAudio: sharingAudio,
     audioDevices: audioDevices,
     videoDevices: videoDevices
   };
 
   let mm = getMessageManagerForWindow(aContentWindow);
   mm.sendAsyncMessage("webrtc:Request", request);
 }
 
--- a/browser/modules/webrtcUI.jsm
+++ b/browser/modules/webrtcUI.jsm
@@ -183,30 +183,30 @@ function getHost(uri, href) {
       host = bundle.GetStringFromName("getUserMedia.sharingMenuUnknownHost");
     }
   }
   return host;
 }
 
 function prompt(aBrowser, aRequest) {
   let {audioDevices: audioDevices, videoDevices: videoDevices,
-       sharingScreen: sharingScreen, sharingAudio: sharingAudio,
-       requestTypes: requestTypes} = aRequest;
+       sharingScreen: sharingScreen, requestTypes: requestTypes} = aRequest;
   let uri = Services.io.newURI(aRequest.documentURI, null, null);
   let host = getHost(uri);
   let chromeDoc = aBrowser.ownerDocument;
   let chromeWin = chromeDoc.defaultView;
   let stringBundle = chromeWin.gNavigatorBundle;
   let stringId = "getUserMedia.share" + requestTypes.join("And") + ".message";
   let message = stringBundle.getFormattedString(stringId, [host]);
 
   let mainLabel;
-  if (sharingScreen || sharingAudio) {
+  if (sharingScreen) {
     mainLabel = stringBundle.getString("getUserMedia.shareSelectedItems.label");
-  } else {
+  }
+  else {
     let string = stringBundle.getString("getUserMedia.shareSelectedDevices.label");
     mainLabel = PluralForm.get(requestTypes.length, string);
   }
 
   let notification; // Used by action callbacks.
   let mainAction = {
     label: mainLabel,
     accessKey: stringBundle.getString("getUserMedia.shareSelectedDevices.accesskey"),
@@ -220,38 +220,38 @@ function prompt(aBrowser, aRequest) {
     {
       label: stringBundle.getString("getUserMedia.denyRequest.label"),
       accessKey: stringBundle.getString("getUserMedia.denyRequest.accesskey"),
       callback: function () {
         denyRequest(notification.browser, aRequest);
       }
     }
   ];
-  // Bug 1037438: implement 'never' for screen sharing.
-  if (!sharingScreen && !sharingAudio) {
+
+  if (!sharingScreen) { // Bug 1037438: implement 'never' for screen sharing.
     secondaryActions.push({
       label: stringBundle.getString("getUserMedia.never.label"),
       accessKey: stringBundle.getString("getUserMedia.never.accesskey"),
       callback: function () {
         denyRequest(notification.browser, aRequest);
         // Let someone save "Never" for http sites so that they can be stopped from
         // bothering you with doorhangers.
         let perms = Services.perms;
         if (audioDevices.length)
           perms.add(uri, "microphone", perms.DENY_ACTION);
         if (videoDevices.length)
           perms.add(uri, "camera", perms.DENY_ACTION);
       }
     });
   }
 
-  if (aRequest.secure && !sharingScreen && !sharingAudio) {
+  if (aRequest.secure && !sharingScreen) {
     // Don't show the 'Always' action if the connection isn't secure, or for
-    // screen/audio sharing (because we can't guess which window the user wants
-    // to share without prompting).
+    // screen sharing (because we can't guess which window the user wants to
+    // share without prompting).
     secondaryActions.unshift({
       label: stringBundle.getString("getUserMedia.always.label"),
       accessKey: stringBundle.getString("getUserMedia.always.accesskey"),
       callback: function () {
         mainAction.callback(true);
       }
     });
   }
@@ -261,18 +261,17 @@ function prompt(aBrowser, aRequest) {
       if (aTopic == "swapping")
         return true;
 
       let chromeDoc = this.browser.ownerDocument;
 
       if (aTopic == "shown") {
         let PopupNotifications = chromeDoc.defaultView.PopupNotifications;
         let popupId = "Devices";
-        if (requestTypes.length == 1 && (requestTypes[0] == "Microphone" ||
-                                         requestTypes[0] == "AudioCapture"))
+        if (requestTypes.length == 1 && requestTypes[0] == "Microphone")
           popupId = "Microphone";
         if (requestTypes.indexOf("Screen") != -1)
           popupId = "Screen";
         PopupNotifications.panel.firstChild.setAttribute("popupid", "webRTC-share" + popupId);
       }
 
       if (aTopic != "showing")
         return false;
@@ -380,35 +379,31 @@ function prompt(aBrowser, aRequest) {
         menuitem.setAttribute("tooltiptext", deviceName);
         if (type)
           menuitem.setAttribute("devicetype", type);
         menupopup.appendChild(menuitem);
       }
 
       chromeDoc.getElementById("webRTC-selectCamera").hidden = !videoDevices.length || sharingScreen;
       chromeDoc.getElementById("webRTC-selectWindowOrScreen").hidden = !sharingScreen || !videoDevices.length;
-      chromeDoc.getElementById("webRTC-selectMicrophone").hidden = !audioDevices.length || sharingAudio;
+      chromeDoc.getElementById("webRTC-selectMicrophone").hidden = !audioDevices.length;
 
       let camMenupopup = chromeDoc.getElementById("webRTC-selectCamera-menupopup");
       let windowMenupopup = chromeDoc.getElementById("webRTC-selectWindow-menupopup");
       let micMenupopup = chromeDoc.getElementById("webRTC-selectMicrophone-menupopup");
       if (sharingScreen)
         listScreenShareDevices(windowMenupopup, videoDevices);
       else
         listDevices(camMenupopup, videoDevices);
-
-      if (!sharingAudio)
-        listDevices(micMenupopup, audioDevices);
-
+      listDevices(micMenupopup, audioDevices);
       if (requestTypes.length == 2) {
         let stringBundle = chromeDoc.defaultView.gNavigatorBundle;
         if (!sharingScreen)
           addDeviceToList(camMenupopup, stringBundle.getString("getUserMedia.noVideo.label"), "-1");
-        if (!sharingAudio)
-          addDeviceToList(micMenupopup, stringBundle.getString("getUserMedia.noAudio.label"), "-1");
+        addDeviceToList(micMenupopup, stringBundle.getString("getUserMedia.noAudio.label"), "-1");
       }
 
       this.mainAction.callback = function(aRemember) {
         let allowedDevices = [];
         let perms = Services.perms;
         if (videoDevices.length) {
           let listId = "webRTC-select" + (sharingScreen ? "Window" : "Camera") + "-menulist";
           let videoDeviceIndex = chromeDoc.getElementById(listId).value;
@@ -416,28 +411,23 @@ function prompt(aBrowser, aRequest) {
           if (allowCamera)
             allowedDevices.push(videoDeviceIndex);
           if (aRemember) {
             perms.add(uri, "camera",
                       allowCamera ? perms.ALLOW_ACTION : perms.DENY_ACTION);
           }
         }
         if (audioDevices.length) {
-          if (!sharingAudio) {
-            let audioDeviceIndex = chromeDoc.getElementById("webRTC-selectMicrophone-menulist").value;
-            let allowMic = audioDeviceIndex != "-1";
-            if (allowMic)
-              allowedDevices.push(audioDeviceIndex);
-            if (aRemember) {
-              perms.add(uri, "microphone",
-                        allowMic ? perms.ALLOW_ACTION : perms.DENY_ACTION);
-            }
-          } else {
-            // Only one device possible for audio capture.
-            allowedDevices.push(0);
+          let audioDeviceIndex = chromeDoc.getElementById("webRTC-selectMicrophone-menulist").value;
+          let allowMic = audioDeviceIndex != "-1";
+          if (allowMic)
+            allowedDevices.push(audioDeviceIndex);
+          if (aRemember) {
+            perms.add(uri, "microphone",
+                      allowMic ? perms.ALLOW_ACTION : perms.DENY_ACTION);
           }
         }
 
         if (!allowedDevices.length) {
           denyRequest(notification.browser, aRequest);
           return;
         }
 
--- a/dom/audiochannel/AudioChannelAgent.cpp
+++ b/dom/audiochannel/AudioChannelAgent.cpp
@@ -30,17 +30,16 @@ NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(
   NS_INTERFACE_MAP_ENTRY(nsISupports)
 NS_INTERFACE_MAP_END
 
 NS_IMPL_CYCLE_COLLECTING_ADDREF(AudioChannelAgent)
 NS_IMPL_CYCLE_COLLECTING_RELEASE(AudioChannelAgent)
 
 AudioChannelAgent::AudioChannelAgent()
   : mAudioChannelType(AUDIO_AGENT_CHANNEL_ERROR)
-  , mInnerWindowID(0)
   , mIsRegToService(false)
 {
 }
 
 AudioChannelAgent::~AudioChannelAgent()
 {
   Shutdown();
 }
@@ -100,20 +99,16 @@ AudioChannelAgent::InitInternal(nsIDOMWi
 
   if (mAudioChannelType != AUDIO_AGENT_CHANNEL_ERROR ||
       aChannelType > AUDIO_AGENT_CHANNEL_PUBLICNOTIFICATION ||
       aChannelType < AUDIO_AGENT_CHANNEL_NORMAL) {
     return NS_ERROR_FAILURE;
   }
 
   if (aWindow) {
-    nsCOMPtr<nsPIDOMWindow> pInnerWindow = do_QueryInterface(aWindow);
-    MOZ_ASSERT(pInnerWindow->IsInnerWindow());
-    mInnerWindowID = pInnerWindow->WindowID();
-
     nsCOMPtr<nsIDOMWindow> topWindow;
     aWindow->GetScriptableTop(getter_AddRefs(topWindow));
     mWindow = do_QueryInterface(topWindow);
     if (mWindow) {
       mWindow = mWindow->GetOuterWindow();
     }
   }
 
@@ -191,23 +186,8 @@ AudioChannelAgent::WindowVolumeChanged()
   callback->WindowVolumeChanged(volume, muted);
 }
 
 uint64_t
 AudioChannelAgent::WindowID() const
 {
   return mWindow ? mWindow->WindowID() : 0;
 }
-
-void
-AudioChannelAgent::WindowAudioCaptureChanged(uint64_t aInnerWindowID)
-{
-  if (aInnerWindowID != mInnerWindowID) {
-    return;
-  }
-
-  nsCOMPtr<nsIAudioChannelAgentCallback> callback = GetCallback();
-  if (!callback) {
-    return;
-  }
-
-  callback->WindowAudioCaptureChanged();
-}
--- a/dom/audiochannel/AudioChannelAgent.h
+++ b/dom/audiochannel/AudioChannelAgent.h
@@ -29,17 +29,16 @@ public:
   NS_DECL_CYCLE_COLLECTING_ISUPPORTS
   NS_DECL_NSIAUDIOCHANNELAGENT
 
   NS_DECL_CYCLE_COLLECTION_CLASS(AudioChannelAgent)
 
   AudioChannelAgent();
 
   void WindowVolumeChanged();
-  void WindowAudioCaptureChanged(uint64_t aInnerWindowID);
 
   nsPIDOMWindow* Window() const
   {
     return mWindow;
   }
 
   uint64_t WindowID() const;
 
@@ -57,17 +56,16 @@ private:
   void Shutdown();
 
   nsCOMPtr<nsPIDOMWindow> mWindow;
   nsCOMPtr<nsIAudioChannelAgentCallback> mCallback;
 
   nsWeakPtr mWeakCallback;
 
   int32_t mAudioChannelType;
-  uint64_t mInnerWindowID;
   bool mIsRegToService;
 };
 
 } // namespace dom
 } // namespace mozilla
 
 
 #endif
--- a/dom/audiochannel/AudioChannelService.cpp
+++ b/dom/audiochannel/AudioChannelService.cpp
@@ -541,48 +541,16 @@ AudioChannelService::RefreshAgentsVolume
 
   nsTObserverArray<AudioChannelAgent*>::ForwardIterator
     iter(winData->mAgents);
   while (iter.HasMore()) {
     iter.GetNext()->WindowVolumeChanged();
   }
 }
 
-void
-AudioChannelService::RefreshAgentsCapture(nsPIDOMWindow* aWindow,
-                                          uint64_t aInnerWindowID)
-{
-  MOZ_ASSERT(aWindow);
-  MOZ_ASSERT(aWindow->IsOuterWindow());
-
-  nsCOMPtr<nsIDOMWindow> topWindow;
-  aWindow->GetScriptableTop(getter_AddRefs(topWindow));
-  nsCOMPtr<nsPIDOMWindow> pTopWindow = do_QueryInterface(topWindow);
-  if (!pTopWindow) {
-    return;
-  }
-
-  AudioChannelWindow* winData = GetWindowData(pTopWindow->WindowID());
-
-  // This can happen, but only during shutdown, because the the outer window
-  // changes ScriptableTop, so that its ID is different.
-  // In this case either we are capturing, and it's too late because the window
-  // has been closed anyways, or we are un-capturing, and everything has already
-  // been cleaned up by the HTMLMediaElements or the AudioContexts.
-  if (!winData) {
-    return;
-  }
-
-  nsTObserverArray<AudioChannelAgent*>::ForwardIterator
-    iter(winData->mAgents);
-  while (iter.HasMore()) {
-    iter.GetNext()->WindowAudioCaptureChanged(aInnerWindowID);
-  }
-}
-
 /* static */ const nsAttrValue::EnumTable*
 AudioChannelService::GetAudioChannelTable()
 {
   return kMozAudioChannelAttributeTable;
 }
 
 /* static */ AudioChannel
 AudioChannelService::GetAudioChannel(const nsAString& aChannel)
--- a/dom/audiochannel/AudioChannelService.h
+++ b/dom/audiochannel/AudioChannelService.h
@@ -97,24 +97,16 @@ public:
    */
   virtual void SetDefaultVolumeControlChannel(int32_t aChannel,
                                               bool aVisible);
 
   bool AnyAudioChannelIsActive();
 
   void RefreshAgentsVolume(nsPIDOMWindow* aWindow);
 
-  // This method needs to know the inner window that wants to capture audio. We
-  // group agents per top outer window, but we can have multiple innerWindow per
-  // top outerWindow (subiframes, etc.) and we have to identify all the agents
-  // just for a particular innerWindow.
-  void RefreshAgentsCapture(nsPIDOMWindow* aWindow,
-                            uint64_t aInnerWindowID);
-
-
 #ifdef MOZ_WIDGET_GONK
   void RegisterSpeakerManager(SpeakerManagerService* aSpeakerManager)
   {
     if (!mSpeakerManager.Contains(aSpeakerManager)) {
       mSpeakerManager.AppendElement(aSpeakerManager);
     }
   }
 
--- a/dom/audiochannel/nsIAudioChannelAgent.idl
+++ b/dom/audiochannel/nsIAudioChannelAgent.idl
@@ -1,28 +1,23 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsISupports.idl"
 
 interface nsIDOMWindow;
 
-[uuid(5fe83b24-38b9-4901-a4a1-d1bd57d3fe18)]
+[uuid(4f537c88-3722-4946-9a09-ce559fa0591d)]
 interface nsIAudioChannelAgentCallback : nsISupports
 {
   /**
    * Notified when the window volume/mute is changed
    */
   void windowVolumeChanged(in float aVolume, in bool aMuted);
-
-  /**
-   * Notified when the capture state is changed.
-   */
-  void windowAudioCaptureChanged();
 };
 
 /**
  * This interface provides an agent for gecko components to participate
  * in the audio channel service. Gecko components are responsible for
  *   1. Indicating what channel type they are using (via the init() member
  *      function).
  *   2. Before playing, checking the playable status of the channel.
--- a/dom/base/nsGlobalWindow.cpp
+++ b/dom/base/nsGlobalWindow.cpp
@@ -559,17 +559,17 @@ nsPIDOMWindow::nsPIDOMWindow(nsPIDOMWind
 : mFrameElement(nullptr), mDocShell(nullptr), mModalStateDepth(0),
   mRunningTimeout(nullptr), mMutationBits(0), mIsDocumentLoaded(false),
   mIsHandlingResizeEvent(false), mIsInnerWindow(aOuterWindow != nullptr),
   mMayHavePaintEventListener(false), mMayHaveTouchEventListener(false),
   mMayHaveMouseEnterLeaveEventListener(false),
   mMayHavePointerEnterLeaveEventListener(false),
   mIsModalContentWindow(false),
   mIsActive(false), mIsBackground(false),
-  mAudioMuted(false), mAudioVolume(1.0), mAudioCaptured(false),
+  mAudioMuted(false), mAudioVolume(1.0),
   mDesktopModeViewport(false), mInnerWindow(nullptr),
   mOuterWindow(aOuterWindow),
   // Make sure no actual window ends up with mWindowID == 0
   mWindowID(NextWindowID()), mHasNotifiedGlobalCreated(false),
   mMarkedCCGeneration(0), mServiceWorkersTestingEnabled(false)
  {}
 
 nsPIDOMWindow::~nsPIDOMWindow() {}
@@ -3740,36 +3740,16 @@ nsPIDOMWindow::SetAudioVolume(float aVol
 
 void
 nsPIDOMWindow::RefreshMediaElements()
 {
   nsRefPtr<AudioChannelService> service = AudioChannelService::GetOrCreate();
   service->RefreshAgentsVolume(GetOuterWindow());
 }
 
-bool
-nsPIDOMWindow::GetAudioCaptured() const
-{
-  MOZ_ASSERT(IsInnerWindow());
-  return mAudioCaptured;
-}
-
-nsresult
-nsPIDOMWindow::SetAudioCapture(bool aCapture)
-{
-  MOZ_ASSERT(IsInnerWindow());
-
-  mAudioCaptured = aCapture;
-
-  nsRefPtr<AudioChannelService> service = AudioChannelService::GetOrCreate();
-  service->RefreshAgentsCapture(GetOuterWindow(), mWindowID);
-
-  return NS_OK;
-}
-
 // nsISpeechSynthesisGetter
 
 #ifdef MOZ_WEBSPEECH
 SpeechSynthesis*
 nsGlobalWindow::GetSpeechSynthesis(ErrorResult& aError)
 {
   MOZ_RELEASE_ASSERT(IsInnerWindow());
 
--- a/dom/base/nsPIDOMWindow.h
+++ b/dom/base/nsPIDOMWindow.h
@@ -180,19 +180,16 @@ public:
 
   // Audio API
   bool GetAudioMuted() const;
   void SetAudioMuted(bool aMuted);
 
   float GetAudioVolume() const;
   nsresult SetAudioVolume(float aVolume);
 
-  bool GetAudioCaptured() const;
-  nsresult SetAudioCapture(bool aCapture);
-
   virtual void SetServiceWorkersTestingEnabled(bool aEnabled)
   {
     MOZ_ASSERT(IsOuterWindow());
     mServiceWorkersTestingEnabled = aEnabled;
   }
 
   bool GetServiceWorkersTestingEnabled()
   {
@@ -820,18 +817,16 @@ protected:
   // Tracks whether our docshell is active.  If it is, mIsBackground
   // is false.  Too bad we have so many different concepts of
   // "active".  Only used on outer windows.
   bool                   mIsBackground;
 
   bool                   mAudioMuted;
   float                  mAudioVolume;
 
-  bool                   mAudioCaptured;
-
   // current desktop mode flag.
   bool                   mDesktopModeViewport;
 
   // And these are the references between inner and outer windows.
   nsPIDOMWindow* MOZ_NON_OWNING_REF mInnerWindow;
   nsCOMPtr<nsPIDOMWindow> mOuterWindow;
 
   // the element within the document that is currently focused when this
--- a/dom/fmradio/FMRadio.cpp
+++ b/dom/fmradio/FMRadio.cpp
@@ -466,22 +466,16 @@ FMRadio::EnableAudioChannelAgent()
 NS_IMETHODIMP
 FMRadio::WindowVolumeChanged(float aVolume, bool aMuted)
 {
   IFMRadioService::Singleton()->EnableAudio(!aMuted);
   // TODO: what about the volume?
   return NS_OK;
 }
 
-NS_IMETHODIMP
-FMRadio::WindowAudioCaptureChanged()
-{
-  return NS_OK;
-}
-
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(FMRadio)
   NS_INTERFACE_MAP_ENTRY(nsISupportsWeakReference)
   NS_INTERFACE_MAP_ENTRY(nsIAudioChannelAgentCallback)
 NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
 
 NS_IMPL_ADDREF_INHERITED(FMRadio, DOMEventTargetHelper)
 NS_IMPL_RELEASE_INHERITED(FMRadio, DOMEventTargetHelper)
 
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -2025,17 +2025,16 @@ HTMLMediaElement::HTMLMediaElement(alrea
     mAutoplaying(true),
     mAutoplayEnabled(true),
     mPaused(true),
     mMuted(0),
     mStatsShowing(false),
     mAllowCasting(false),
     mIsCasting(false),
     mAudioCaptured(false),
-    mAudioCapturedByWindow(false),
     mPlayingBeforeSeek(false),
     mPlayingThroughTheAudioChannelBeforeSeek(false),
     mPausedForInactiveDocumentOrChannel(false),
     mEventDeliveryPaused(false),
     mWaitingFired(false),
     mIsRunningLoadMethod(false),
     mIsDoingExplicitLoad(false),
     mIsLoadingFromSourceChildren(false),
@@ -2093,21 +2092,16 @@ HTMLMediaElement::~HTMLMediaElement()
   }
   if (mProgressTimer) {
     StopProgress();
   }
   if (mSrcStream) {
     EndSrcMediaStreamPlayback();
   }
 
-  if (mCaptureStreamPort) {
-    mCaptureStreamPort->Destroy();
-    mCaptureStreamPort = nullptr;
-  }
-
   NS_ASSERTION(MediaElementTableCount(this, mLoadingSrc) == 0,
     "Destroyed media element should no longer be in element table");
 
   if (mChannel) {
     mChannel->Cancel(NS_BINDING_ABORTED);
   }
 
   WakeLockRelease();
@@ -4476,48 +4470,45 @@ void HTMLMediaElement::UpdateAudioChanne
   if (!UseAudioChannelService()) {
     return;
   }
 
   bool playingThroughTheAudioChannel =
      (!mPaused &&
       (HasAttr(kNameSpaceID_None, nsGkAtoms::loop) ||
        (mReadyState >= nsIDOMHTMLMediaElement::HAVE_CURRENT_DATA &&
-        !IsPlaybackEnded()) ||
+        !IsPlaybackEnded() &&
+        (!mSrcStream || HasAudio())) ||
        mPlayingThroughTheAudioChannelBeforeSeek));
   if (playingThroughTheAudioChannel != mPlayingThroughTheAudioChannel) {
     mPlayingThroughTheAudioChannel = playingThroughTheAudioChannel;
 
     // If we are not playing, we don't need to create a new audioChannelAgent.
     if (!mAudioChannelAgent && !mPlayingThroughTheAudioChannel) {
        return;
     }
 
     if (!mAudioChannelAgent) {
       nsresult rv;
       mAudioChannelAgent = do_CreateInstance("@mozilla.org/audiochannelagent;1", &rv);
       if (!mAudioChannelAgent) {
         return;
       }
-      mAudioChannelAgent->InitWithWeakCallback(OwnerDoc()->GetInnerWindow(),
+      mAudioChannelAgent->InitWithWeakCallback(OwnerDoc()->GetWindow(),
                                                static_cast<int32_t>(mAudioChannel),
                                                this);
     }
 
     NotifyAudioChannelAgent(mPlayingThroughTheAudioChannel);
   }
 }
 
 void
 HTMLMediaElement::NotifyAudioChannelAgent(bool aPlaying)
 {
-  // Immediately check if this should go to the MSG instead of the normal
-  // media playback route.
-  WindowAudioCaptureChanged();
-
   // This is needed to pass nsContentUtils::IsCallerChrome().
   // AudioChannel API should not called from content but it can happen that
   // this method has some content JS in its stack.
   AutoNoJSAPI nojsapi;
 
   if (aPlaying) {
     float volume = 0.0;
     bool muted = true;
@@ -4678,63 +4669,16 @@ HTMLMediaElement::GetTopLevelPrincipal()
   if (!doc) {
     return nullptr;
   }
   principal = doc->NodePrincipal();
   return principal.forget();
 }
 #endif // MOZ_EME
 
-NS_IMETHODIMP HTMLMediaElement::WindowAudioCaptureChanged()
-{
-   MOZ_ASSERT(mAudioChannelAgent);
-
-  if (!OwnerDoc()->GetInnerWindow()) {
-    return NS_OK;
-  }
-  bool captured = OwnerDoc()->GetInnerWindow()->GetAudioCaptured();
-
-  if (captured != mAudioCapturedByWindow) {
-    if (captured) {
-      mAudioCapturedByWindow = true;
-      nsCOMPtr<nsPIDOMWindow> window =
-        do_QueryInterface(OwnerDoc()->GetParentObject());
-      uint64_t id = window->WindowID();
-      MediaStreamGraph* msg = MediaStreamGraph::GetInstance();
-
-      if (!mPlaybackStream) {
-        nsRefPtr<DOMMediaStream> stream = CaptureStreamInternal(false, msg);
-        mCaptureStreamPort = msg->ConnectToCaptureStream(id, stream->GetStream());
-      } else {
-        mCaptureStreamPort = msg->ConnectToCaptureStream(id, mPlaybackStream->GetStream());
-      }
-    } else {
-      mAudioCapturedByWindow = false;
-      if (mDecoder) {
-        ProcessedMediaStream* ps =
-          mCaptureStreamPort->GetSource()->AsProcessedStream();
-        MOZ_ASSERT(ps);
-
-        for (uint32_t i = 0; i < mOutputStreams.Length(); i++) {
-          if (mOutputStreams[i].mStream->GetStream() == ps) {
-            mOutputStreams.RemoveElementAt(i);
-            break;
-          }
-        }
-
-        mDecoder->RemoveOutputStream(ps);
-      }
-      mCaptureStreamPort->Destroy();
-      mCaptureStreamPort = nullptr;
-    }
-  }
-
-   return NS_OK;
-}
-
 AudioTrackList*
 HTMLMediaElement::AudioTracks()
 {
   if (!mAudioTrackList) {
     nsCOMPtr<nsPIDOMWindow> window = do_QueryInterface(OwnerDoc()->GetParentObject());
     mAudioTrackList = new AudioTrackList(window, this);
   }
   return mAudioTrackList;
--- a/dom/html/HTMLMediaElement.h
+++ b/dom/html/HTMLMediaElement.h
@@ -1069,19 +1069,16 @@ protected:
   // Holds a reference to the DOM wrapper for the MediaStream that we're
   // actually playing.
   // At most one of mDecoder and mSrcStream can be non-null.
   nsRefPtr<DOMMediaStream> mSrcStream;
 
   // Holds a reference to a MediaInputPort connecting mSrcStream to mPlaybackStream.
   nsRefPtr<MediaInputPort> mPlaybackStreamInputPort;
 
-  // Holds a reference to the stream connecting this stream to the capture sink.
-  nsRefPtr<MediaInputPort> mCaptureStreamPort;
-
   // Holds a reference to a stream with mSrcStream as input but intended for
   // playback. Used so we don't block playback of other video elements
   // playing the same mSrcStream.
   nsRefPtr<DOMMediaStream> mPlaybackStream;
 
   // Holds references to the DOM wrappers for the MediaStreams that we're
   // writing to.
   struct OutputMediaStream {
@@ -1281,19 +1278,16 @@ protected:
   // True if casting is currently allowed
   bool mAllowCasting;
   // True if currently casting this video
   bool mIsCasting;
 
   // True if the sound is being captured.
   bool mAudioCaptured;
 
-  // True if the sound is being captured by the window.
-  bool mAudioCapturedByWindow;
-
   // If TRUE then the media element was actively playing before the currently
   // in progress seeking. If FALSE then the media element is either not seeking
   // or was not actively playing before the current seek. Used to decide whether
   // to raise the 'waiting' event as per 4.7.1.8 in HTML 5 specification.
   bool mPlayingBeforeSeek;
 
   // if TRUE then the seek started while content was in active playing state
   // if FALSE then the seek started while the content was not playing.
deleted file mode 100644
--- a/dom/media/AudioCaptureStream.cpp
+++ /dev/null
@@ -1,133 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "MediaStreamGraphImpl.h"
-#include "mozilla/MathAlgorithms.h"
-#include "mozilla/unused.h"
-
-#include "AudioSegment.h"
-#include "mozilla/Logging.h"
-#include "mozilla/Attributes.h"
-#include "AudioCaptureStream.h"
-#include "ImageContainer.h"
-#include "AudioNodeEngine.h"
-#include "AudioNodeStream.h"
-#include "AudioNodeExternalInputStream.h"
-#include "webaudio/MediaStreamAudioDestinationNode.h"
-#include <algorithm>
-#include "DOMMediaStream.h"
-
-using namespace mozilla::layers;
-using namespace mozilla::dom;
-using namespace mozilla::gfx;
-
-namespace mozilla
-{
-
-// We are mixing to mono until PeerConnection can accept stereo
-static const uint32_t MONO = 1;
-
-AudioCaptureStream::AudioCaptureStream(DOMMediaStream* aWrapper)
-  : ProcessedMediaStream(aWrapper), mTrackCreated(false)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  MOZ_COUNT_CTOR(AudioCaptureStream);
-  mMixer.AddCallback(this);
-}
-
-AudioCaptureStream::~AudioCaptureStream()
-{
-  MOZ_COUNT_DTOR(AudioCaptureStream);
-  mMixer.RemoveCallback(this);
-}
-
-void
-AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
-                                 uint32_t aFlags)
-{
-  uint32_t inputCount = mInputs.Length();
-  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
-  // Notify the DOM everything is in order.
-  if (!mTrackCreated) {
-    for (uint32_t i = 0; i < mListeners.Length(); i++) {
-      MediaStreamListener* l = mListeners[i];
-      AudioSegment tmp;
-      l->NotifyQueuedTrackChanges(
-        Graph(), AUDIO_TRACK, 0, MediaStreamListener::TRACK_EVENT_CREATED, tmp);
-      l->NotifyFinishedTrackCreation(Graph());
-    }
-    mTrackCreated = true;
-  }
-
-  // If the captured stream is connected back to a object on the page (be it an
-  // HTMLMediaElement with a stream as source, or an AudioContext), a cycle
-  // situation occur. This can work if it's an AudioContext with at least one
-  // DelayNode, but the MSG will mute the whole cycle otherwise.
-  bool blocked = mFinished || mBlocked.GetAt(aFrom);
-  if (blocked || InMutedCycle() || inputCount == 0) {
-    track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
-  } else {
-    // We mix down all the tracks of all inputs, to a stereo track. Everything
-    // is {up,down}-mixed to stereo.
-    mMixer.StartMixing();
-    AudioSegment output;
-    for (uint32_t i = 0; i < inputCount; i++) {
-      MediaStream* s = mInputs[i]->GetSource();
-      StreamBuffer::TrackIter tracks(s->GetStreamBuffer(), MediaSegment::AUDIO);
-      while (!tracks.IsEnded()) {
-        AudioSegment* inputSegment = tracks->Get<AudioSegment>();
-        StreamTime inputStart = s->GraphTimeToStreamTime(aFrom);
-        StreamTime inputEnd = s->GraphTimeToStreamTime(aTo);
-        AudioSegment toMix;
-        toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
-        // Care for streams blocked in the [aTo, aFrom] range.
-        if (inputEnd - inputStart < aTo - aFrom) {
-          toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
-        }
-        toMix.Mix(mMixer, MONO, Graph()->GraphRate());
-        tracks.Next();
-      }
-    }
-    // This calls MixerCallback below
-    mMixer.FinishMixing();
-  }
-
-  // Regardless of the status of the input tracks, we go foward.
-  mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTime((aTo)));
-}
-
-void
-AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer,
-                                  AudioSampleFormat aFormat, uint32_t aChannels,
-                                  uint32_t aFrames, uint32_t aSampleRate)
-{
-  nsAutoTArray<nsTArray<AudioDataValue>, MONO> output;
-  nsAutoTArray<const AudioDataValue*, MONO> bufferPtrs;
-  output.SetLength(MONO);
-  bufferPtrs.SetLength(MONO);
-
-  uint32_t written = 0;
-  // We need to copy here, because the mixer will reuse the storage, we should
-  // not hold onto it. Buffers are in planar format.
-  for (uint32_t channel = 0; channel < aChannels; channel++) {
-    AudioDataValue* out = output[channel].AppendElements(aFrames);
-    PodCopy(out, aMixedBuffer + written, aFrames);
-    bufferPtrs[channel] = out;
-    written += aFrames;
-  }
-  AudioChunk chunk;
-  chunk.mBuffer = new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output);
-  chunk.mDuration = aFrames;
-  chunk.mBufferFormat = aFormat;
-  chunk.mVolume = 1.0f;
-  chunk.mChannelData.SetLength(MONO);
-  for (uint32_t channel = 0; channel < aChannels; channel++) {
-    chunk.mChannelData[channel] = bufferPtrs[channel];
-  }
-
-  // Now we have mixed data, simply append it to out track.
-  EnsureTrack(AUDIO_TRACK)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk);
-}
-}
deleted file mode 100644
--- a/dom/media/AudioCaptureStream.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef MOZILLA_AUDIOCAPTURESTREAM_H_
-#define MOZILLA_AUDIOCAPTURESTREAM_H_
-
-#include "MediaStreamGraph.h"
-#include "AudioMixer.h"
-#include <algorithm>
-
-namespace mozilla
-{
-
-class DOMMediaStream;
-
-/**
- * See MediaStreamGraph::CreateAudioCaptureStream.
- */
-class AudioCaptureStream : public ProcessedMediaStream,
-                           public MixerCallbackReceiver
-{
-public:
-  explicit AudioCaptureStream(DOMMediaStream* aWrapper);
-  virtual ~AudioCaptureStream();
-
-  void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
-
-protected:
-  enum { AUDIO_TRACK = 1 };
-  void MixerCallback(AudioDataValue* aMixedBuffer, AudioSampleFormat aFormat,
-                     uint32_t aChannels, uint32_t aFrames,
-                     uint32_t aSampleRate) override;
-  AudioMixer mMixer;
-  bool mTrackCreated;
-};
-}
-
-#endif /* MOZILLA_AUDIOCAPTURESTREAM_H_ */
--- a/dom/media/AudioChannelFormat.cpp
+++ b/dom/media/AudioChannelFormat.cpp
@@ -1,19 +1,34 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioChannelFormat.h"
+#include "nsTArray.h"
 
 #include <algorithm>
 
 namespace mozilla {
 
+enum {
+  SURROUND_L,
+  SURROUND_R,
+  SURROUND_C,
+  SURROUND_LFE,
+  SURROUND_SL,
+  SURROUND_SR
+};
+
+static const uint32_t CUSTOM_CHANNEL_LAYOUTS = 6;
+
+static const int IGNORE = CUSTOM_CHANNEL_LAYOUTS;
+static const float IGNORE_F = 0.0f;
+
 uint32_t
 GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2)
 {
   return std::max(aChannels1, aChannels2);
 }
 
 /**
  * UpMixMatrix represents a conversion matrix by exploiting the fact that
@@ -43,28 +58,31 @@ gUpMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(C
   { { 0, 1, 2, IGNORE, IGNORE, IGNORE } },
   // Upmixes from quad
   { { 0, 1, 2, 3, IGNORE } },
   { { 0, 1, IGNORE, IGNORE, 2, 3 } },
   // Upmixes from 5-channel
   { { 0, 1, 2, 3, 4, IGNORE } }
 };
 
+static const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] =
+  { 0, 5, 9, 12, 14 };
+
 void
 AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
                    uint32_t aOutputChannelCount,
                    const void* aZeroChannel)
 {
   uint32_t inputChannelCount = aChannelArray->Length();
   uint32_t outputChannelCount =
     GetAudioChannelsSuperset(aOutputChannelCount, inputChannelCount);
   NS_ASSERTION(outputChannelCount > inputChannelCount,
                "No up-mix needed");
-  MOZ_ASSERT(inputChannelCount > 0, "Bad number of channels");
-  MOZ_ASSERT(outputChannelCount > 0, "Bad number of channels");
+  NS_ASSERTION(inputChannelCount > 0, "Bad number of channels");
+  NS_ASSERTION(outputChannelCount > 0, "Bad number of channels");
 
   aChannelArray->SetLength(outputChannelCount);
 
   if (inputChannelCount < CUSTOM_CHANNEL_LAYOUTS &&
       outputChannelCount <= CUSTOM_CHANNEL_LAYOUTS) {
     const UpMixMatrix& m = gUpMixMatrices[
       gMixingMatrixIndexByChannels[inputChannelCount - 1] +
       outputChannelCount - inputChannelCount - 1];
@@ -85,9 +103,99 @@ AudioChannelsUpMix(nsTArray<const void*>
     return;
   }
 
   for (uint32_t i = inputChannelCount; i < outputChannelCount; ++i) {
     aChannelArray->ElementAt(i) = aZeroChannel;
   }
 }
 
+/**
+ * DownMixMatrix represents a conversion matrix efficiently by exploiting the
+ * fact that each input channel contributes to at most one output channel,
+ * except possibly for the C input channel in layouts that have one. Also,
+ * every input channel is multiplied by the same coefficient for every output
+ * channel it contributes to.
+ */
+struct DownMixMatrix {
+  // Every input channel c is copied to output channel mInputDestination[c]
+  // after multiplying by mInputCoefficient[c].
+  uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS];
+  // If not IGNORE, then the C channel is copied to this output channel after
+  // multiplying by its coefficient.
+  uint8_t mCExtraDestination;
+  float mInputCoefficient[CUSTOM_CHANNEL_LAYOUTS];
+};
+
+static const DownMixMatrix
+gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
+{
+  // Downmixes to mono
+  { { 0, 0 }, IGNORE, { 0.5f, 0.5f } },
+  { { 0, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F } },
+  { { 0, 0, 0, 0 }, IGNORE, { 0.25f, 0.25f, 0.25f, 0.25f } },
+  { { 0, IGNORE, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F, IGNORE_F, IGNORE_F } },
+  { { 0, 0, 0, IGNORE, 0, 0 }, IGNORE, { 0.7071f, 0.7071f, 1.0f, IGNORE_F, 0.5f, 0.5f } },
+  // Downmixes to stereo
+  { { 0, 1, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F } },
+  { { 0, 1, 0, 1 }, IGNORE, { 0.5f, 0.5f, 0.5f, 0.5f } },
+  { { 0, 1, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
+  { { 0, 1, 0, IGNORE, 0, 1 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 0.7071f, 0.7071f } },
+  // Downmixes to 3-channel
+  { { 0, 1, 2, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F } },
+  { { 0, 1, 2, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F } },
+  { { 0, 1, 2, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
+  // Downmixes to quad
+  { { 0, 1, 2, 3, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } },
+  { { 0, 1, 0, IGNORE, 2, 3 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 1.0f, 1.0f } },
+  // Downmixes to 5-channel
+  { { 0, 1, 2, 3, 4, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } }
+};
+
+void
+AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
+                     float** aOutputChannels,
+                     uint32_t aOutputChannelCount,
+                     uint32_t aDuration)
+{
+  uint32_t inputChannelCount = aChannelArray.Length();
+  const void* const* inputChannels = aChannelArray.Elements();
+  NS_ASSERTION(inputChannelCount > aOutputChannelCount, "Nothing to do");
+
+  if (inputChannelCount > 6) {
+    // Just drop the unknown channels.
+    for (uint32_t o = 0; o < aOutputChannelCount; ++o) {
+      memcpy(aOutputChannels[o], inputChannels[o], aDuration*sizeof(float));
+    }
+    return;
+  }
+
+  // Ignore unknown channels, they're just dropped.
+  inputChannelCount = std::min<uint32_t>(6, inputChannelCount);
+
+  const DownMixMatrix& m = gDownMixMatrices[
+    gMixingMatrixIndexByChannels[aOutputChannelCount - 1] +
+    inputChannelCount - aOutputChannelCount - 1];
+
+  // This is slow, but general. We can define custom code for special
+  // cases later.
+  for (uint32_t s = 0; s < aDuration; ++s) {
+    // Reserve an extra junk channel at the end for the cases where we
+    // want an input channel to contribute to nothing
+    float outputChannels[CUSTOM_CHANNEL_LAYOUTS + 1];
+    memset(outputChannels, 0, sizeof(float)*(CUSTOM_CHANNEL_LAYOUTS));
+    for (uint32_t c = 0; c < inputChannelCount; ++c) {
+      outputChannels[m.mInputDestination[c]] +=
+        m.mInputCoefficient[c]*(static_cast<const float*>(inputChannels[c]))[s];
+    }
+    // Utilize the fact that in every layout, C is the third channel.
+    if (m.mCExtraDestination != IGNORE) {
+      outputChannels[m.mCExtraDestination] +=
+        m.mInputCoefficient[SURROUND_C]*(static_cast<const float*>(inputChannels[SURROUND_C]))[s];
+    }
+
+    for (uint32_t c = 0; c < aOutputChannelCount; ++c) {
+      aOutputChannels[c][s] = outputChannels[c];
+    }
+  }
+}
+
 } // namespace mozilla
--- a/dom/media/AudioChannelFormat.h
+++ b/dom/media/AudioChannelFormat.h
@@ -4,18 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #ifndef MOZILLA_AUDIOCHANNELFORMAT_H_
 #define MOZILLA_AUDIOCHANNELFORMAT_H_
 
 #include <stdint.h>
 
 #include "nsTArrayForwardDeclare.h"
-#include "AudioSampleFormat.h"
-#include "nsTArray.h"
 
 namespace mozilla {
 
 /*
  * This file provides utilities for upmixing and downmixing channels.
  *
  * The channel layouts, upmixing and downmixing are consistent with the
  * Web Audio spec.
@@ -26,36 +24,16 @@ namespace mozilla {
  *          { L, R, C }
  *   quad   { L, R, SL, SR }
  *          { L, R, C, SL, SR }
  *   5.1    { L, R, C, LFE, SL, SR }
  *
  * Only 1, 2, 4 and 6 are currently defined in Web Audio.
  */
 
-enum {
-  SURROUND_L,
-  SURROUND_R,
-  SURROUND_C,
-  SURROUND_LFE,
-  SURROUND_SL,
-  SURROUND_SR
-};
-
-const uint32_t CUSTOM_CHANNEL_LAYOUTS = 6;
-
-// This is defined by some Windows SDK header.
-#undef IGNORE
-
-const int IGNORE = CUSTOM_CHANNEL_LAYOUTS;
-const float IGNORE_F = 0.0f;
-
-const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] =
-  { 0, 5, 9, 12, 14 };
-
 /**
  * Return a channel count whose channel layout includes all the channels from
  * aChannels1 and aChannels2.
  */
 uint32_t
 GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2);
 
 /**
@@ -70,108 +48,25 @@ GetAudioChannelsSuperset(uint32_t aChann
  * GetAudioChannelsSuperset calls resulting in aOutputChannelCount,
  * no downmixing will be required.
  */
 void
 AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
                    uint32_t aOutputChannelCount,
                    const void* aZeroChannel);
 
-
 /**
- * DownMixMatrix represents a conversion matrix efficiently by exploiting the
- * fact that each input channel contributes to at most one output channel,
- * except possibly for the C input channel in layouts that have one. Also,
- * every input channel is multiplied by the same coefficient for every output
- * channel it contributes to.
- */
-struct DownMixMatrix {
-  // Every input channel c is copied to output channel mInputDestination[c]
-  // after multiplying by mInputCoefficient[c].
-  uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS];
-  // If not IGNORE, then the C channel is copied to this output channel after
-  // multiplying by its coefficient.
-  uint8_t mCExtraDestination;
-  float mInputCoefficient[CUSTOM_CHANNEL_LAYOUTS];
-};
-
-static const DownMixMatrix
-gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
-{
-  // Downmixes to mono
-  { { 0, 0 }, IGNORE, { 0.5f, 0.5f } },
-  { { 0, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F } },
-  { { 0, 0, 0, 0 }, IGNORE, { 0.25f, 0.25f, 0.25f, 0.25f } },
-  { { 0, IGNORE, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F, IGNORE_F, IGNORE_F } },
-  { { 0, 0, 0, IGNORE, 0, 0 }, IGNORE, { 0.7071f, 0.7071f, 1.0f, IGNORE_F, 0.5f, 0.5f } },
-  // Downmixes to stereo
-  { { 0, 1, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F } },
-  { { 0, 1, 0, 1 }, IGNORE, { 0.5f, 0.5f, 0.5f, 0.5f } },
-  { { 0, 1, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
-  { { 0, 1, 0, IGNORE, 0, 1 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 0.7071f, 0.7071f } },
-  // Downmixes to 3-channel
-  { { 0, 1, 2, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F } },
-  { { 0, 1, 2, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F } },
-  { { 0, 1, 2, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
-  // Downmixes to quad
-  { { 0, 1, 2, 3, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } },
-  { { 0, 1, 0, IGNORE, 2, 3 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 1.0f, 1.0f } },
-  // Downmixes to 5-channel
-  { { 0, 1, 2, 3, 4, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } }
-};
-
-/**
- * Given an array of input channels, downmix to aOutputChannelCount, and copy
- * the results to the channel buffers in aOutputChannels.  Don't call this with
- * input count <= output count.
+ * Given an array of input channels (which must be float format!),
+ * downmix to aOutputChannelCount, and copy the results to the
+ * channel buffers in aOutputChannels.
+ * Don't call this with input count <= output count.
  */
-template<typename T>
-void AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
-                     T** aOutputChannels,
+void
+AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
+                     float** aOutputChannels,
                      uint32_t aOutputChannelCount,
-                     uint32_t aDuration)
-{
-  uint32_t inputChannelCount = aChannelArray.Length();
-  const void* const* inputChannels = aChannelArray.Elements();
-  NS_ASSERTION(inputChannelCount > aOutputChannelCount, "Nothing to do");
-
-  if (inputChannelCount > 6) {
-    // Just drop the unknown channels.
-    for (uint32_t o = 0; o < aOutputChannelCount; ++o) {
-      memcpy(aOutputChannels[o], inputChannels[o], aDuration*sizeof(T));
-    }
-    return;
-  }
-
-  // Ignore unknown channels, they're just dropped.
-  inputChannelCount = std::min<uint32_t>(6, inputChannelCount);
+                     uint32_t aDuration);
 
-  const DownMixMatrix& m = gDownMixMatrices[
-    gMixingMatrixIndexByChannels[aOutputChannelCount - 1] +
-    inputChannelCount - aOutputChannelCount - 1];
-
-  // This is slow, but general. We can define custom code for special
-  // cases later.
-  for (uint32_t s = 0; s < aDuration; ++s) {
-    // Reserve an extra junk channel at the end for the cases where we
-    // want an input channel to contribute to nothing
-    T outputChannels[CUSTOM_CHANNEL_LAYOUTS + 1];
-    memset(outputChannels, 0, sizeof(T)*(CUSTOM_CHANNEL_LAYOUTS));
-    for (uint32_t c = 0; c < inputChannelCount; ++c) {
-      outputChannels[m.mInputDestination[c]] +=
-        m.mInputCoefficient[c]*(static_cast<const T*>(inputChannels[c]))[s];
-    }
-    // Utilize the fact that in every layout, C is the third channel.
-    if (m.mCExtraDestination != IGNORE) {
-      outputChannels[m.mCExtraDestination] +=
-        m.mInputCoefficient[SURROUND_C]*(static_cast<const T*>(inputChannels[SURROUND_C]))[s];
-    }
-
-    for (uint32_t c = 0; c < aOutputChannelCount; ++c) {
-      aOutputChannels[c][s] = outputChannels[c];
-    }
-  }
-}
-
+// A version of AudioChannelsDownMix that downmixes int16_ts may be required.
 
 } // namespace mozilla
 
 #endif /* MOZILLA_AUDIOCHANNELFORMAT_H_ */
--- a/dom/media/AudioMixer.h
+++ b/dom/media/AudioMixer.h
@@ -21,19 +21,17 @@ struct MixerCallbackReceiver {
                              uint32_t aFrames,
                              uint32_t aSampleRate) = 0;
 };
 /**
  * This class mixes multiple streams of audio together to output a single audio
  * stream.
  *
  * AudioMixer::Mix is to be called repeatedly with buffers that have the same
- * length, sample rate, sample format and channel count. This class works with
- * interleaved and plannar buffers, but the buffer mixed must be of the same
- * type during a mixing cycle.
+ * length, sample rate, sample format and channel count.
  *
  * When all the tracks have been mixed, calling FinishMixing will call back with
  * a buffer containing the mixed audio data.
  *
  * This class is not thread safe.
  */
 class AudioMixer
 {
@@ -68,17 +66,17 @@ public:
                                    mChannels,
                                    mFrames,
                                    mSampleRate);
     }
     PodZero(mMixedAudio.Elements(), mMixedAudio.Length());
     mSampleRate = mChannels = mFrames = 0;
   }
 
-  /* Add a buffer to the mix. */
+  /* Add a buffer to the mix. aSamples is interleaved. */
   void Mix(AudioDataValue* aSamples,
            uint32_t aChannels,
            uint32_t aFrames,
            uint32_t aSampleRate) {
     if (!mFrames && !mChannels) {
       mFrames = aFrames;
       mChannels = aChannels;
       mSampleRate = aSampleRate;
--- a/dom/media/AudioSegment.cpp
+++ b/dom/media/AudioSegment.cpp
@@ -141,113 +141,16 @@ void AudioSegment::ResampleChunks(SpeexR
       Resample<int16_t>(aResampler, aInRate, aOutRate);
     break;
     default:
       MOZ_ASSERT(false);
     break;
   }
 }
 
-// This helps to to safely get a pointer to the position we want to start
-// writing a planar audio buffer, depending on the channel and the offset in the
-// buffer.
-static AudioDataValue*
-PointerForOffsetInChannel(AudioDataValue* aData, size_t aLengthSamples,
-                          uint32_t aChannelCount, uint32_t aChannel,
-                          uint32_t aOffsetSamples)
-{
-  size_t samplesPerChannel = aLengthSamples / aChannelCount;
-  size_t beginningOfChannel = samplesPerChannel * aChannel;
-  MOZ_ASSERT(aChannel * samplesPerChannel + aOffsetSamples < aLengthSamples,
-             "Offset request out of bounds.");
-  return aData + beginningOfChannel + aOffsetSamples;
-}
-
-void
-AudioSegment::Mix(AudioMixer& aMixer, uint32_t aOutputChannels,
-                  uint32_t aSampleRate)
-{
-  nsAutoTArray<AudioDataValue, AUDIO_PROCESSING_FRAMES* GUESS_AUDIO_CHANNELS>
-  buf;
-  nsAutoTArray<const void*, GUESS_AUDIO_CHANNELS> channelData;
-  uint32_t offsetSamples = 0;
-  uint32_t duration = GetDuration();
-
-  if (duration <= 0) {
-    MOZ_ASSERT(duration == 0);
-    return;
-  }
-
-  uint32_t outBufferLength = duration * aOutputChannels;
-  buf.SetLength(outBufferLength);
-
-  for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
-    AudioChunk& c = *ci;
-    uint32_t frames = c.mDuration;
-
-    // If the chunk is silent, simply write the right number of silence in the
-    // buffers.
-    if (c.mBufferFormat == AUDIO_FORMAT_SILENCE) {
-      for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
-        AudioDataValue* ptr =
-          PointerForOffsetInChannel(buf.Elements(), outBufferLength,
-                                    aOutputChannels, channel, offsetSamples);
-        PodZero(ptr, frames);
-      }
-    } else {
-      // Othewise, we need to upmix or downmix appropriately, depending on the
-      // desired input and output channels.
-      channelData.SetLength(c.mChannelData.Length());
-      for (uint32_t i = 0; i < channelData.Length(); ++i) {
-        channelData[i] = c.mChannelData[i];
-      }
-      if (channelData.Length() < aOutputChannels) {
-        // Up-mix.
-        AudioChannelsUpMix(&channelData, aOutputChannels, gZeroChannel);
-        for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
-          AudioDataValue* ptr =
-            PointerForOffsetInChannel(buf.Elements(), outBufferLength,
-                                      aOutputChannels, channel, offsetSamples);
-          PodCopy(ptr, reinterpret_cast<const AudioDataValue*>(channelData[channel]),
-                  frames);
-        }
-        MOZ_ASSERT(channelData.Length() == aOutputChannels);
-      } else if (channelData.Length() > aOutputChannels) {
-        // Down mix.
-        nsAutoTArray<AudioDataValue*, GUESS_AUDIO_CHANNELS> outChannelPtrs;
-        outChannelPtrs.SetLength(aOutputChannels);
-        uint32_t offsetSamples = 0;
-        for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
-          outChannelPtrs[channel] =
-            PointerForOffsetInChannel(buf.Elements(), outBufferLength,
-                                      aOutputChannels, channel, offsetSamples);
-        }
-        AudioChannelsDownMix(channelData, outChannelPtrs.Elements(),
-                             aOutputChannels, frames);
-      } else {
-        // The channel count is already what we want, just copy it over.
-        for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
-          AudioDataValue* ptr =
-            PointerForOffsetInChannel(buf.Elements(), outBufferLength,
-                                      aOutputChannels, channel, offsetSamples);
-          PodCopy(ptr, reinterpret_cast<const AudioDataValue*>(channelData[channel]),
-                  frames);
-        }
-      }
-    }
-    offsetSamples += frames;
-  }
-
-  if (offsetSamples) {
-    MOZ_ASSERT(offsetSamples == outBufferLength / aOutputChannels,
-               "We forgot to write some samples?");
-    aMixer.Mix(buf.Elements(), aOutputChannels, offsetSamples, aSampleRate);
-  }
-}
-
 void
 AudioSegment::WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aOutputChannels, uint32_t aSampleRate)
 {
   nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
   nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
   // Offset in the buffer that will end up sent to the AudioStream, in samples.
   uint32_t offset = 0;
 
--- a/dom/media/AudioSegment.h
+++ b/dom/media/AudioSegment.h
@@ -294,24 +294,17 @@ public:
     chunk->mVolume = aChunk->mVolume;
     chunk->mBufferFormat = aChunk->mBufferFormat;
 #ifdef MOZILLA_INTERNAL_API
     chunk->mTimeStamp = TimeStamp::Now();
 #endif
     return chunk;
   }
   void ApplyVolume(float aVolume);
-  // Mix the segment into a mixer, interleaved. This is useful to output a
-  // segment to a system audio callback. It up or down mixes to aChannelCount
-  // channels.
-  void WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aChannelCount,
-               uint32_t aSampleRate);
-  // Mix the segment into a mixer, keeping it planar, up or down mixing to
-  // aChannelCount channels.
-  void Mix(AudioMixer& aMixer, uint32_t aChannelCount, uint32_t aSampleRate);
+  void WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aChannelCount, uint32_t aSampleRate);
 
   int ChannelCount() {
     NS_WARN_IF_FALSE(!mChunks.IsEmpty(),
         "Cannot query channel count on a AudioSegment with no chunks.");
     // Find the first chunk that has non-zero channels. A chunk that hs zero
     // channels is just silence and we can simply discard it.
     for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
       if (ci->ChannelCount()) {
--- a/dom/media/DOMMediaStream.cpp
+++ b/dom/media/DOMMediaStream.cpp
@@ -297,28 +297,16 @@ DOMMediaStream::InitTrackUnionStream(nsI
 
   if (!aGraph) {
     aGraph = MediaStreamGraph::GetInstance();
   }
   InitStreamCommon(aGraph->CreateTrackUnionStream(this));
 }
 
 void
-DOMMediaStream::InitAudioCaptureStream(nsIDOMWindow* aWindow,
-                                       MediaStreamGraph* aGraph)
-{
-  mWindow = aWindow;
-
-  if (!aGraph) {
-    aGraph = MediaStreamGraph::GetInstance();
-  }
-  InitStreamCommon(aGraph->CreateAudioCaptureStream(this));
-}
-
-void
 DOMMediaStream::InitStreamCommon(MediaStream* aStream)
 {
   mStream = aStream;
 
   // Setup track listener
   mListener = new StreamListener(this);
   aStream->AddListener(mListener);
 }
@@ -336,25 +324,16 @@ already_AddRefed<DOMMediaStream>
 DOMMediaStream::CreateTrackUnionStream(nsIDOMWindow* aWindow,
                                        MediaStreamGraph* aGraph)
 {
   nsRefPtr<DOMMediaStream> stream = new DOMMediaStream();
   stream->InitTrackUnionStream(aWindow, aGraph);
   return stream.forget();
 }
 
-already_AddRefed<DOMMediaStream>
-DOMMediaStream::CreateAudioCaptureStream(nsIDOMWindow* aWindow,
-                                         MediaStreamGraph* aGraph)
-{
-  nsRefPtr<DOMMediaStream> stream = new DOMMediaStream();
-  stream->InitAudioCaptureStream(aWindow, aGraph);
-  return stream.forget();
-}
-
 void
 DOMMediaStream::SetTrackEnabled(TrackID aTrackID, bool aEnabled)
 {
   if (mStream) {
     mStream->SetTrackEnabled(aTrackID, aEnabled);
   }
 }
 
@@ -669,25 +648,16 @@ already_AddRefed<DOMLocalMediaStream>
 DOMLocalMediaStream::CreateTrackUnionStream(nsIDOMWindow* aWindow,
                                             MediaStreamGraph* aGraph)
 {
   nsRefPtr<DOMLocalMediaStream> stream = new DOMLocalMediaStream();
   stream->InitTrackUnionStream(aWindow, aGraph);
   return stream.forget();
 }
 
-already_AddRefed<DOMLocalMediaStream>
-DOMLocalMediaStream::CreateAudioCaptureStream(nsIDOMWindow* aWindow,
-                                              MediaStreamGraph* aGraph)
-{
-  nsRefPtr<DOMLocalMediaStream> stream = new DOMLocalMediaStream();
-  stream->InitAudioCaptureStream(aWindow, aGraph);
-  return stream.forget();
-}
-
 DOMAudioNodeMediaStream::DOMAudioNodeMediaStream(AudioNode* aNode)
 : mStreamNode(aNode)
 {
 }
 
 DOMAudioNodeMediaStream::~DOMAudioNodeMediaStream()
 {
 }
--- a/dom/media/DOMMediaStream.h
+++ b/dom/media/DOMMediaStream.h
@@ -193,23 +193,16 @@ public:
                                                              MediaStreamGraph* aGraph = nullptr);
 
   /**
    * Create an nsDOMMediaStream whose underlying stream is a TrackUnionStream.
    */
   static already_AddRefed<DOMMediaStream> CreateTrackUnionStream(nsIDOMWindow* aWindow,
                                                                  MediaStreamGraph* aGraph = nullptr);
 
-  /**
-   * Create an nsDOMMediaStream whose underlying stream is an
-   * AudioCaptureStream
-   */
-  static already_AddRefed<DOMMediaStream> CreateAudioCaptureStream(
-    nsIDOMWindow* aWindow, MediaStreamGraph* aGraph = nullptr);
-
   void SetLogicalStreamStartTime(StreamTime aTime)
   {
     mLogicalStreamStartTime = aTime;
   }
 
   // Notifications from StreamListener.
   // BindDOMTrack should only be called when it's safe to run script.
   MediaStreamTrack* BindDOMTrack(TrackID aTrackID, MediaSegment::Type aType);
@@ -263,18 +256,16 @@ public:
 protected:
   virtual ~DOMMediaStream();
 
   void Destroy();
   void InitSourceStream(nsIDOMWindow* aWindow,
                         MediaStreamGraph* aGraph = nullptr);
   void InitTrackUnionStream(nsIDOMWindow* aWindow,
                             MediaStreamGraph* aGraph = nullptr);
-  void InitAudioCaptureStream(nsIDOMWindow* aWindow,
-                              MediaStreamGraph* aGraph = nullptr);
   void InitStreamCommon(MediaStream* aStream);
   already_AddRefed<AudioTrack> CreateAudioTrack(AudioStreamTrack* aStreamTrack);
   already_AddRefed<VideoTrack> CreateVideoTrack(VideoStreamTrack* aStreamTrack);
 
   // Called when MediaStreamGraph has finished an iteration where tracks were
   // created.
   void TracksCreated();
 
@@ -355,22 +346,16 @@ public:
 
   /**
    * Create an nsDOMLocalMediaStream whose underlying stream is a TrackUnionStream.
    */
   static already_AddRefed<DOMLocalMediaStream>
   CreateTrackUnionStream(nsIDOMWindow* aWindow,
                          MediaStreamGraph* aGraph = nullptr);
 
-  /**
-   * Create an nsDOMLocalMediaStream whose underlying stream is an
-   * AudioCaptureStream. */
-  static already_AddRefed<DOMLocalMediaStream> CreateAudioCaptureStream(
-    nsIDOMWindow* aWindow, MediaStreamGraph* aGraph = nullptr);
-
 protected:
   virtual ~DOMLocalMediaStream();
 };
 
 NS_DEFINE_STATIC_IID_ACCESSOR(DOMLocalMediaStream,
                               NS_DOMLOCALMEDIASTREAM_IID)
 
 class DOMAudioNodeMediaStream : public DOMMediaStream
--- a/dom/media/DecodedStream.cpp
+++ b/dom/media/DecodedStream.cpp
@@ -284,24 +284,16 @@ DecodedStream::RecreateData(MediaStreamG
 nsTArray<OutputStreamData>&
 DecodedStream::OutputStreams()
 {
   MOZ_ASSERT(NS_IsMainThread());
   GetReentrantMonitor().AssertCurrentThreadIn();
   return mOutputStreams;
 }
 
-bool
-DecodedStream::HasConsumers() const
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
-  return mOutputStreams.IsEmpty();
-}
-
 ReentrantMonitor&
 DecodedStream::GetReentrantMonitor() const
 {
   return mMonitor;
 }
 
 void
 DecodedStream::Connect(OutputStreamData* aStream)
--- a/dom/media/DecodedStream.h
+++ b/dom/media/DecodedStream.h
@@ -109,17 +109,16 @@ public:
   void DestroyData();
   void RecreateData();
   void Connect(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
   void Remove(MediaStream* aStream);
   void SetPlaying(bool aPlaying);
   int64_t AudioEndTime() const;
   int64_t GetPosition() const;
   bool IsFinished() const;
-  bool HasConsumers() const;
 
   // Return true if stream is finished.
   bool SendData(double aVolume, bool aIsSameOrigin);
 
 protected:
   virtual ~DecodedStream() {}
 
 private:
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -321,23 +321,16 @@ void MediaDecoder::SetVolume(double aVol
 void MediaDecoder::AddOutputStream(ProcessedMediaStream* aStream,
                                    bool aFinishWhenEnded)
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
   mDecoderStateMachine->AddOutputStream(aStream, aFinishWhenEnded);
 }
 
-void MediaDecoder::RemoveOutputStream(MediaStream* aStream)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
-  mDecoderStateMachine->RemoveOutputStream(aStream);
-}
-
 double MediaDecoder::GetDuration()
 {
   MOZ_ASSERT(NS_IsMainThread());
   return mDuration;
 }
 
 AbstractCanonical<media::NullableTimeUnit>*
 MediaDecoder::CanonicalDurationOrNull()
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -394,18 +394,16 @@ public:
   // captureStream(UntilEnded). Seeking creates a new source stream, as does
   // replaying after the input as ended. In the latter case, the new source is
   // not connected to streams created by captureStreamUntilEnded.
 
   // Add an output stream. All decoder output will be sent to the stream.
   // The stream is initially blocked. The decoder is responsible for unblocking
   // it while it is playing back.
   virtual void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
-  // Remove an output stream added with AddOutputStream.
-  virtual void RemoveOutputStream(MediaStream* aStream);
 
   // Return the duration of the video in seconds.
   virtual double GetDuration();
 
   // A media stream is assumed to be infinite if the metadata doesn't
   // contain the duration, and range requests are not supported, and
   // no headers give a hint of a possible duration (Content-Length,
   // Content-Duration, and variants), and we cannot seek in the media
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -3172,54 +3172,25 @@ void MediaDecoderStateMachine::DispatchA
         self->mDecodedStream->StartPlayback(self->GetMediaTime(), self->mInfo);
       }
       self->ScheduleStateMachine();
     }
   });
   OwnerThread()->Dispatch(r.forget());
 }
 
-void MediaDecoderStateMachine::DispatchAudioUncaptured()
-{
-  nsRefPtr<MediaDecoderStateMachine> self = this;
-  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self] () -> void
-  {
-    MOZ_ASSERT(self->OnTaskQueue());
-    ReentrantMonitorAutoEnter mon(self->mDecoder->GetReentrantMonitor());
-    if (self->mAudioCaptured) {
-      // Start again the audio sink
-      self->mAudioCaptured = false;
-      if (self->IsPlaying()) {
-        self->StartAudioThread();
-      }
-      self->ScheduleStateMachine();
-    }
-  });
-  OwnerThread()->Dispatch(r.forget());
-}
-
 void MediaDecoderStateMachine::AddOutputStream(ProcessedMediaStream* aStream,
                                                bool aFinishWhenEnded)
 {
   MOZ_ASSERT(NS_IsMainThread());
   DECODER_LOG("AddOutputStream aStream=%p!", aStream);
   mDecodedStream->Connect(aStream, aFinishWhenEnded);
   DispatchAudioCaptured();
 }
 
-void MediaDecoderStateMachine::RemoveOutputStream(MediaStream* aStream)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  DECODER_LOG("RemoveOutputStream=%p!", aStream);
-  mDecodedStream->Remove(aStream);
-  if (!mDecodedStream->HasConsumers()) {
-    DispatchAudioUncaptured();
-  }
-}
-
 } // namespace mozilla
 
 // avoid redefined macro in unified build
 #undef LOG
 #undef DECODER_LOG
 #undef VERBOSE_LOG
 #undef DECODER_WARN
 #undef DECODER_WARN_HELPER
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -144,30 +144,27 @@ public:
     DECODER_STATE_SEEKING,
     DECODER_STATE_BUFFERING,
     DECODER_STATE_COMPLETED,
     DECODER_STATE_SHUTDOWN,
     DECODER_STATE_ERROR
   };
 
   void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
-  // Remove an output stream added with AddOutputStream.
-  void RemoveOutputStream(MediaStream* aStream);
 
   // Set/Unset dormant state.
   void SetDormant(bool aDormant);
 
 private:
   // Initialization that needs to happen on the task queue. This is the first
   // task that gets run on the task queue, and is dispatched from the MDSM
   // constructor immediately after the task queue is created.
   void InitializationTask();
 
   void DispatchAudioCaptured();
-  void DispatchAudioUncaptured();
 
   void Shutdown();
 public:
 
   void DispatchShutdown()
   {
     nsCOMPtr<nsIRunnable> runnable =
       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::Shutdown);
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -295,29 +295,30 @@ protected:
 };
 
 /**
  * nsIMediaDevice implementation.
  */
 NS_IMPL_ISUPPORTS(MediaDevice, nsIMediaDevice)
 
 MediaDevice::MediaDevice(MediaEngineSource* aSource, bool aIsVideo)
-  : mMediaSource(aSource->GetMediaSource())
-  , mSource(aSource)
+  : mSource(aSource)
   , mIsVideo(aIsVideo)
 {
   mSource->GetName(mName);
   nsCString id;
   mSource->GetUUID(id);
   CopyUTF8toUTF16(id, mID);
 }
 
 VideoDevice::VideoDevice(MediaEngineVideoSource* aSource)
   : MediaDevice(aSource, true)
-{}
+{
+  mMediaSource = aSource->GetMediaSource();
+}
 
 /**
  * Helper functions that implement the constraints algorithm from
  * http://dev.w3.org/2011/webrtc/editor/getusermedia.html#methods-5
  */
 
 bool
 MediaDevice::StringsContain(const OwningStringOrStringSequence& aStrings,
@@ -433,18 +434,16 @@ MediaDevice::SetId(const nsAString& aID)
   mID.Assign(aID);
 }
 
 NS_IMETHODIMP
 MediaDevice::GetMediaSource(nsAString& aMediaSource)
 {
   if (mMediaSource == dom::MediaSourceEnum::Microphone) {
     aMediaSource.Assign(NS_LITERAL_STRING("microphone"));
-  } else if (mMediaSource == dom::MediaSourceEnum::AudioCapture) {
-    aMediaSource.Assign(NS_LITERAL_STRING("audioCapture"));
   } else if (mMediaSource == dom::MediaSourceEnum::Window) { // this will go away
     aMediaSource.Assign(NS_LITERAL_STRING("window"));
   } else { // all the rest are shared
     aMediaSource.Assign(NS_ConvertUTF8toUTF16(
       dom::MediaSourceEnumValues::strings[uint32_t(mMediaSource)].value));
   }
   return NS_OK;
 }
@@ -780,103 +779,89 @@ public:
         branch->GetBoolPref("media.getusermedia.agc_enabled", &agc_on);
         branch->GetIntPref("media.getusermedia.agc", &agc);
         branch->GetBoolPref("media.getusermedia.noise_enabled", &noise_on);
         branch->GetIntPref("media.getusermedia.noise", &noise);
         branch->GetIntPref("media.getusermedia.playout_delay", &playout_delay);
       }
     }
 #endif
-
-    MediaStreamGraph* msg = MediaStreamGraph::GetInstance();
-    nsRefPtr<SourceMediaStream> stream = msg->CreateSourceStream(nullptr);
-
-    nsRefPtr<DOMLocalMediaStream> domStream;
-    // AudioCapture is a special case, here, in the sense that we're not really
-    // using the audio source and the SourceMediaStream, which acts as
-    // placeholders. We re-route a number of stream internaly in the MSG and mix
-    // them down instead.
-    if (mAudioSource &&
-        mAudioSource->GetMediaSource() == dom::MediaSourceEnum::AudioCapture) {
-      domStream = DOMLocalMediaStream::CreateAudioCaptureStream(window);
-      // It should be possible to pipe the capture stream to anything. CORS is
-      // not a problem here, we got explicit user content.
-      domStream->SetPrincipal(window->GetExtantDoc()->NodePrincipal());
-      msg->RegisterCaptureStreamForWindow(
-            mWindowID, domStream->GetStream()->AsProcessedStream());
-      window->SetAudioCapture(true);
-    } else {
-      // Normal case, connect the source stream to the track union stream to
-      // avoid us blocking
-      nsRefPtr<nsDOMUserMediaStream> trackunion =
-        nsDOMUserMediaStream::CreateTrackUnionStream(window, mListener,
-                                                     mAudioSource, mVideoSource);
-      trackunion->GetStream()->AsProcessedStream()->SetAutofinish(true);
-      nsRefPtr<MediaInputPort> port = trackunion->GetStream()->AsProcessedStream()->
-        AllocateInputPort(stream, MediaInputPort::FLAG_BLOCK_OUTPUT);
-      trackunion->mSourceStream = stream;
-      trackunion->mPort = port.forget();
-      // Log the relationship between SourceMediaStream and TrackUnion stream
-      // Make sure logger starts before capture
-      AsyncLatencyLogger::Get(true);
-      LogLatency(AsyncLatencyLogger::MediaStreamCreate,
-          reinterpret_cast<uint64_t>(stream.get()),
-          reinterpret_cast<int64_t>(trackunion->GetStream()));
-
-      nsCOMPtr<nsIPrincipal> principal;
-      if (mPeerIdentity) {
-        principal = nsNullPrincipal::Create();
-        trackunion->SetPeerIdentity(mPeerIdentity.forget());
-      } else {
-        principal = window->GetExtantDoc()->NodePrincipal();
-      }
-      trackunion->CombineWithPrincipal(principal);
-
-      domStream = trackunion.forget();
-    }
-
-    if (!domStream || sInShutdown) {
+    // Create a media stream.
+    nsRefPtr<nsDOMUserMediaStream> trackunion =
+      nsDOMUserMediaStream::CreateTrackUnionStream(window, mListener,
+                                                   mAudioSource, mVideoSource);
+    if (!trackunion || sInShutdown) {
       nsCOMPtr<nsIDOMGetUserMediaErrorCallback> onFailure = mOnFailure.forget();
       LOG(("Returning error for getUserMedia() - no stream"));
 
       nsGlobalWindow* window = nsGlobalWindow::GetInnerWindowWithId(mWindowID);
       if (window) {
         nsRefPtr<MediaStreamError> error = new MediaStreamError(window,
             NS_LITERAL_STRING("InternalError"),
             sInShutdown ? NS_LITERAL_STRING("In shutdown") :
                           NS_LITERAL_STRING("No stream."));
         onFailure->OnError(error);
       }
       return NS_OK;
     }
+    trackunion->AudioConfig(aec_on, (uint32_t) aec,
+                            agc_on, (uint32_t) agc,
+                            noise_on, (uint32_t) noise,
+                            playout_delay);
+
+
+    MediaStreamGraph* gm = MediaStreamGraph::GetInstance();
+    nsRefPtr<SourceMediaStream> stream = gm->CreateSourceStream(nullptr);
+
+    // connect the source stream to the track union stream to avoid us blocking
+    trackunion->GetStream()->AsProcessedStream()->SetAutofinish(true);
+    nsRefPtr<MediaInputPort> port = trackunion->GetStream()->AsProcessedStream()->
+      AllocateInputPort(stream, MediaInputPort::FLAG_BLOCK_OUTPUT);
+    trackunion->mSourceStream = stream;
+    trackunion->mPort = port.forget();
+    // Log the relationship between SourceMediaStream and TrackUnion stream
+    // Make sure logger starts before capture
+    AsyncLatencyLogger::Get(true);
+    LogLatency(AsyncLatencyLogger::MediaStreamCreate,
+               reinterpret_cast<uint64_t>(stream.get()),
+               reinterpret_cast<int64_t>(trackunion->GetStream()));
+
+    nsCOMPtr<nsIPrincipal> principal;
+    if (mPeerIdentity) {
+      principal = nsNullPrincipal::Create();
+      trackunion->SetPeerIdentity(mPeerIdentity.forget());
+    } else {
+      principal = window->GetExtantDoc()->NodePrincipal();
+    }
+    trackunion->CombineWithPrincipal(principal);
 
     // The listener was added at the beginning in an inactive state.
     // Activate our listener. We'll call Start() on the source when get a callback
     // that the MediaStream has started consuming. The listener is freed
     // when the page is invalidated (on navigation or close).
     mListener->Activate(stream.forget(), mAudioSource, mVideoSource);
 
     // Note: includes JS callbacks; must be released on MainThread
     TracksAvailableCallback* tracksAvailableCallback =
-      new TracksAvailableCallback(mManager, mOnSuccess, mWindowID, domStream);
+      new TracksAvailableCallback(mManager, mOnSuccess, mWindowID, trackunion);
 
     mListener->AudioConfig(aec_on, (uint32_t) aec,
                            agc_on, (uint32_t) agc,
                            noise_on, (uint32_t) noise,
                            playout_delay);
 
     // Dispatch to the media thread to ask it to start the sources,
     // because that can take a while.
     // Pass ownership of trackunion to the MediaOperationTask
     // to ensure it's kept alive until the MediaOperationTask runs (at least).
-    MediaManager::PostTask(
-      FROM_HERE, new MediaOperationTask(MEDIA_START, mListener, domStream,
-                                        tracksAvailableCallback, mAudioSource,
-                                        mVideoSource, false, mWindowID,
-                                        mOnFailure.forget()));
+    MediaManager::PostTask(FROM_HERE,
+      new MediaOperationTask(MEDIA_START, mListener, trackunion,
+                             tracksAvailableCallback,
+                             mAudioSource, mVideoSource, false, mWindowID,
+                             mOnFailure.forget()));
     // We won't need mOnFailure now.
     mOnFailure = nullptr;
 
     if (!MediaManager::IsPrivateBrowsing(window)) {
       // Call GetOriginKey again, this time w/persist = true, to promote
       // deviceIds to persistent, in case they're not already. Fire'n'forget.
       nsRefPtr<Pledge<nsCString>> p = media::GetOriginKey(mOrigin, false, true);
     }
@@ -1255,19 +1240,17 @@ static auto& MediaManager_ToJSArray = Me
 static auto& MediaManager_AnonymizeDevices = MediaManager::AnonymizeDevices;
 
 /**
  * EnumerateRawDevices - Enumerate a list of audio & video devices that
  * satisfy passed-in constraints. List contains raw id's.
  */
 
 already_AddRefed<MediaManager::PledgeSourceSet>
-MediaManager::EnumerateRawDevices(uint64_t aWindowId,
-                                  MediaSourceEnum aVideoType,
-                                  MediaSourceEnum aAudioType,
+MediaManager::EnumerateRawDevices(uint64_t aWindowId, MediaSourceEnum aVideoType,
                                   bool aFake, bool aFakeTracks)
 {
   MOZ_ASSERT(NS_IsMainThread());
   nsRefPtr<PledgeSourceSet> p = new PledgeSourceSet();
   uint32_t id = mOutstandingPledges.Append(*p);
 
   // Check if the preference for using audio/video loopback devices is
   // enabled. This is currently used for automated media tests only.
@@ -1287,18 +1270,17 @@ MediaManager::EnumerateRawDevices(uint64
       }
     } else {
       aFake = false;
     }
   }
 
   MediaManager::PostTask(FROM_HERE, NewTaskFrom([id, aWindowId, audioLoopDev,
                                                  videoLoopDev, aVideoType,
-                                                 aAudioType, aFake,
-                                                 aFakeTracks]() mutable {
+                                                 aFake, aFakeTracks]() mutable {
     nsRefPtr<MediaEngine> backend;
     if (aFake) {
       backend = new MediaEngineDefault(aFakeTracks);
     } else {
       nsRefPtr<MediaManager> manager = MediaManager_GetInstance();
       backend = manager->GetBackend(aWindowId);
     }
 
@@ -1307,17 +1289,17 @@ MediaManager::EnumerateRawDevices(uint64
     nsTArray<nsRefPtr<VideoDevice>> videos;
     GetSources(backend, aVideoType, &MediaEngine::EnumerateVideoDevices, videos,
                videoLoopDev);
     for (auto& source : videos) {
       result->AppendElement(source);
     }
 
     nsTArray<nsRefPtr<AudioDevice>> audios;
-    GetSources(backend, aAudioType,
+    GetSources(backend, dom::MediaSourceEnum::Microphone,
                &MediaEngine::EnumerateAudioDevices, audios, audioLoopDev);
     for (auto& source : audios) {
       result->AppendElement(source);
     }
 
     SourceSet* handoff = result.forget();
     NS_DispatchToMainThread(do_AddRef(NewRunnableFrom([id, handoff]() mutable {
       ScopedDeletePtr<SourceSet> result(handoff); // grab result
@@ -1629,17 +1611,16 @@ MediaManager::GetUserMedia(nsPIDOMWindow
     return rv;
   }
 
   if (!Preferences::GetBool("media.navigator.video.enabled", true)) {
     c.mVideo.SetAsBoolean() = false;
   }
 
   MediaSourceEnum videoType = dom::MediaSourceEnum::Camera;
-  MediaSourceEnum audioType = dom::MediaSourceEnum::Microphone;
 
   if (c.mVideo.IsMediaTrackConstraints()) {
     auto& vc = c.mVideo.GetAsMediaTrackConstraints();
     videoType = StringToEnum(dom::MediaSourceEnumValues::strings,
                              vc.mMediaSource,
                              videoType);
     switch (videoType) {
       case dom::MediaSourceEnum::Camera:
@@ -1718,33 +1699,16 @@ MediaManager::GetUserMedia(nsPIDOMWindow
     // Loop has implicit permissions within Firefox, as it is built-in,
     // and will manage the active tab and provide appropriate UI.
     if (loop && (videoType == dom::MediaSourceEnum::Window ||
                  videoType == dom::MediaSourceEnum::Application ||
                  videoType == dom::MediaSourceEnum::Screen)) {
        privileged = false;
     }
   }
-
-  if (c.mAudio.IsMediaTrackConstraints()) {
-    auto& ac = c.mAudio.GetAsMediaTrackConstraints();
-    audioType = StringToEnum(dom::MediaSourceEnumValues::strings,
-                             ac.mMediaSource,
-                             audioType);
-    // Only enable AudioCapture if the pref is enabled. If it's not, we can deny
-    // right away.
-    if (audioType == dom::MediaSourceEnum::AudioCapture &&
-        !Preferences::GetBool("media.getusermedia.audiocapture.enabled")) {
-      nsRefPtr<MediaStreamError> error =
-        new MediaStreamError(aWindow,
-            NS_LITERAL_STRING("PermissionDeniedError"));
-      onFailure->OnError(error);
-      return NS_OK;
-    }
-  }
   StreamListeners* listeners = AddWindowID(windowID);
 
   // Create a disabled listener to act as a placeholder
   nsRefPtr<GetUserMediaCallbackMediaStreamListener> listener =
     new GetUserMediaCallbackMediaStreamListener(mMediaThread, windowID);
 
   // No need for locking because we always do this in the main thread.
   listeners->AppendElement(listener);
@@ -1797,18 +1761,17 @@ MediaManager::GetUserMedia(nsPIDOMWindow
       Preferences::GetBool("media.navigator.streams.fake");
 
   bool fakeTracks = c.mFakeTracks.WasPassed()? c.mFakeTracks.Value() : false;
 
   bool askPermission = !privileged &&
       (!fake || Preferences::GetBool("media.navigator.permission.fake"));
 
   nsRefPtr<PledgeSourceSet> p = EnumerateDevicesImpl(windowID, videoType,
-                                                     audioType, fake,
-                                                     fakeTracks);
+                                                     fake, fakeTracks);
   p->Then([this, onSuccess, onFailure, windowID, c, listener, askPermission,
            prefs, isHTTPS, callID, origin](SourceSet*& aDevices) mutable {
     ScopedDeletePtr<SourceSet> devices(aDevices); // grab result
 
     // Ensure this pointer is still valid, and window is still alive.
     nsRefPtr<MediaManager> mgr = MediaManager::GetInstance();
     nsRefPtr<nsPIDOMWindow> window = static_cast<nsPIDOMWindow*>
         (nsGlobalWindow::GetInnerWindowWithId(windowID));
@@ -1954,19 +1917,17 @@ MediaManager::ToJSArray(SourceSet& aDevi
     }
   } else {
     var->SetAsEmptyArray(); // because SetAsArray() fails on zero length arrays.
   }
   return var.forget();
 }
 
 already_AddRefed<MediaManager::PledgeSourceSet>
-MediaManager::EnumerateDevicesImpl(uint64_t aWindowId,
-                                   MediaSourceEnum aVideoType,
-                                   MediaSourceEnum aAudioType,
+MediaManager::EnumerateDevicesImpl(uint64_t aWindowId, MediaSourceEnum aVideoType,
                                    bool aFake, bool aFakeTracks)
 {
   MOZ_ASSERT(NS_IsMainThread());
   nsPIDOMWindow *window = static_cast<nsPIDOMWindow*>
       (nsGlobalWindow::GetInnerWindowWithId(aWindowId));
 
   // This function returns a pledge, a promise-like object with the future result
   nsRefPtr<PledgeSourceSet> pledge = new PledgeSourceSet();
@@ -1985,23 +1946,22 @@ MediaManager::EnumerateDevicesImpl(uint6
 
   // GetOriginKey is an async API that returns a pledge (a promise-like
   // pattern). We use .Then() to pass in a lambda to run back on this same
   // thread later once GetOriginKey resolves. Needed variables are "captured"
   // (passed by value) safely into the lambda.
 
   nsRefPtr<Pledge<nsCString>> p = media::GetOriginKey(origin, privateBrowsing,
                                                       persist);
-  p->Then([id, aWindowId, aVideoType, aAudioType,
+  p->Then([id, aWindowId, aVideoType,
            aFake, aFakeTracks](const nsCString& aOriginKey) mutable {
     MOZ_ASSERT(NS_IsMainThread());
     nsRefPtr<MediaManager> mgr = MediaManager_GetInstance();
 
-    nsRefPtr<PledgeSourceSet> p = mgr->EnumerateRawDevices(aWindowId,
-                                                           aVideoType, aAudioType,
+    nsRefPtr<PledgeSourceSet> p = mgr->EnumerateRawDevices(aWindowId, aVideoType,
                                                            aFake, aFakeTracks);
     p->Then([id, aWindowId, aOriginKey](SourceSet*& aDevices) mutable {
       ScopedDeletePtr<SourceSet> devices(aDevices); // secondary result
 
       // Only run if window is still on our active list.
       nsRefPtr<MediaManager> mgr = MediaManager_GetInstance();
       if (!mgr) {
         return NS_OK;
@@ -2030,17 +1990,16 @@ MediaManager::EnumerateDevices(nsPIDOMWi
   uint64_t windowId = aWindow->WindowID();
 
   AddWindowID(windowId);
 
   bool fake = Preferences::GetBool("media.navigator.streams.fake");
 
   nsRefPtr<PledgeSourceSet> p = EnumerateDevicesImpl(windowId,
                                                      dom::MediaSourceEnum::Camera,
-                                                     dom::MediaSourceEnum::Microphone,
                                                      fake);
   p->Then([onSuccess](SourceSet*& aDevices) mutable {
     ScopedDeletePtr<SourceSet> devices(aDevices); // grab result
     nsCOMPtr<nsIWritableVariant> array = MediaManager_ToJSArray(*devices);
     onSuccess->OnSuccess(array);
   }, [onFailure](MediaStreamError& reason) mutable {
     onFailure->OnError(&reason);
   });
@@ -2111,17 +2070,17 @@ StopSharingCallback(MediaManager *aThis,
     auto length = aListeners->Length();
     for (size_t i = 0; i < length; ++i) {
       GetUserMediaCallbackMediaStreamListener *listener = aListeners->ElementAt(i);
 
       if (listener->Stream()) { // aka HasBeenActivate()ed
         listener->Invalidate();
       }
       listener->Remove();
-      listener->StopSharing();
+      listener->StopScreenWindowSharing();
     }
     aListeners->Clear();
     aThis->RemoveWindowID(aWindowID);
   }
 }
 
 
 void
@@ -2434,17 +2393,17 @@ MediaManager::Observe(nsISupports* aSubj
   } else if (!strcmp(aTopic, "getUserMedia:revoke")) {
     nsresult rv;
     // may be windowid or screen:windowid
     nsDependentString data(aData);
     if (Substring(data, 0, strlen("screen:")).EqualsLiteral("screen:")) {
       uint64_t windowID = PromiseFlatString(Substring(data, strlen("screen:"))).ToInteger64(&rv);
       MOZ_ASSERT(NS_SUCCEEDED(rv));
       if (NS_SUCCEEDED(rv)) {
-        LOG(("Revoking Screen/windowCapture access for window %llu", windowID));
+        LOG(("Revoking Screeen/windowCapture access for window %llu", windowID));
         StopScreensharing(windowID);
       }
     } else {
       uint64_t windowID = nsString(aData).ToInteger64(&rv);
       MOZ_ASSERT(NS_SUCCEEDED(rv));
       if (NS_SUCCEEDED(rv)) {
         LOG(("Revoking MediaCapture access for window %llu", windowID));
         OnNavigation(windowID);
@@ -2615,17 +2574,17 @@ static void
 StopScreensharingCallback(MediaManager *aThis,
                           uint64_t aWindowID,
                           StreamListeners *aListeners,
                           void *aData)
 {
   if (aListeners) {
     auto length = aListeners->Length();
     for (size_t i = 0; i < length; ++i) {
-      aListeners->ElementAt(i)->StopSharing();
+      aListeners->ElementAt(i)->StopScreenWindowSharing();
     }
   }
 }
 
 void
 MediaManager::StopScreensharing(uint64_t aWindowID)
 {
   // We need to stop window/screensharing for all streams in all innerwindows that
@@ -2777,36 +2736,29 @@ GetUserMediaCallbackMediaStreamListener:
                            this, nullptr, nullptr,
                            mAudioSource, mVideoSource,
                            mFinished, mWindowID, nullptr));
 }
 
 // Doesn't kill audio
 // XXX refactor to combine with Invalidate()?
 void
-GetUserMediaCallbackMediaStreamListener::StopSharing()
+GetUserMediaCallbackMediaStreamListener::StopScreenWindowSharing()
 {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
   if (mVideoSource && !mStopped &&
       (mVideoSource->GetMediaSource() == dom::MediaSourceEnum::Screen ||
        mVideoSource->GetMediaSource() == dom::MediaSourceEnum::Application ||
        mVideoSource->GetMediaSource() == dom::MediaSourceEnum::Window)) {
     // Stop the whole stream if there's no audio; just the video track if we have both
     MediaManager::PostTask(FROM_HERE,
       new MediaOperationTask(mAudioSource ? MEDIA_STOP_TRACK : MEDIA_STOP,
                              this, nullptr, nullptr,
                              nullptr, mVideoSource,
                              mFinished, mWindowID, nullptr));
-  } else if (mAudioSource &&
-             mAudioSource->GetMediaSource() == dom::MediaSourceEnum::AudioCapture) {
-    nsCOMPtr<nsPIDOMWindow> window = nsGlobalWindow::GetInnerWindowWithId(mWindowID);
-    MOZ_ASSERT(window);
-    window->SetAudioCapture(false);
-    MediaStreamGraph::GetInstance()->UnregisterCaptureStreamForWindow(mWindowID);
-    mStream->Destroy();
   }
 }
 
 // Stop backend for track
 
 void
 GetUserMediaCallbackMediaStreamListener::StopTrack(TrackID aID, bool aIsAudio)
 {
--- a/dom/media/MediaManager.h
+++ b/dom/media/MediaManager.h
@@ -98,17 +98,17 @@ public:
   {
     NS_ASSERTION(mStream,"Getting stream from never-activated GUMCMSListener");
     if (!mStream) {
       return nullptr;
     }
     return mStream->AsSourceStream();
   }
 
-  void StopSharing();
+  void StopScreenWindowSharing();
 
   void StopTrack(TrackID aID, bool aIsAudio);
 
   // mVideo/AudioSource are set by Activate(), so we assume they're capturing
   // if set and represent a real capture device.
   bool CapturingVideo()
   {
     NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
@@ -592,24 +592,20 @@ private:
   static bool IsLoop(nsIURI* aDocURI);
   static nsresult GenerateUUID(nsAString& aResult);
   static nsresult AnonymizeId(nsAString& aId, const nsACString& aOriginKey);
 public: // TODO: make private once we upgrade to GCC 4.8+ on linux.
   static void AnonymizeDevices(SourceSet& aDevices, const nsACString& aOriginKey);
   static already_AddRefed<nsIWritableVariant> ToJSArray(SourceSet& aDevices);
 private:
   already_AddRefed<PledgeSourceSet>
-  EnumerateRawDevices(uint64_t aWindowId,
-                      dom::MediaSourceEnum aVideoType,
-                      dom::MediaSourceEnum aAudioType,
+  EnumerateRawDevices(uint64_t aWindowId, dom::MediaSourceEnum aSrcType,
                       bool aFake, bool aFakeTracks);
   already_AddRefed<PledgeSourceSet>
-  EnumerateDevicesImpl(uint64_t aWindowId,
-                       dom::MediaSourceEnum aVideoSrcType,
-                       dom::MediaSourceEnum aAudioSrcType,
+  EnumerateDevicesImpl(uint64_t aWindowId, dom::MediaSourceEnum aSrcType,
                        bool aFake = false, bool aFakeTracks = false);
 
   StreamListeners* AddWindowID(uint64_t aWindowId);
   WindowTable *GetActiveWindows() {
     NS_ASSERTION(NS_IsMainThread(), "Only access windowlist on main thread");
     return &mActiveWindows;
   }
 
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -13,17 +13,16 @@
 #include "nsIObserver.h"
 #include "nsPrintfCString.h"
 #include "nsServiceManagerUtils.h"
 #include "prerror.h"
 #include "mozilla/Logging.h"
 #include "mozilla/Attributes.h"
 #include "TrackUnionStream.h"
 #include "ImageContainer.h"
-#include "AudioCaptureStream.h"
 #include "AudioChannelService.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
 #include "AudioNodeExternalInputStream.h"
 #include "mozilla/dom/AudioContextBinding.h"
 #include <algorithm>
 #include "DOMMediaStream.h"
 #include "GeckoProfiler.h"
@@ -3188,27 +3187,16 @@ MediaStreamGraph::CreateTrackUnionStream
   TrackUnionStream* stream = new TrackUnionStream(aWrapper);
   NS_ADDREF(stream);
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   stream->SetGraphImpl(graph);
   graph->AppendMessage(new CreateMessage(stream));
   return stream;
 }
 
-ProcessedMediaStream*
-MediaStreamGraph::CreateAudioCaptureStream(DOMMediaStream* aWrapper)
-{
-  AudioCaptureStream* stream = new AudioCaptureStream(aWrapper);
-  NS_ADDREF(stream);
-  MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
-  stream->SetGraphImpl(graph);
-  graph->AppendMessage(new CreateMessage(stream));
-  return stream;
-}
-
 AudioNodeExternalInputStream*
 MediaStreamGraph::CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (!aSampleRate) {
     aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
   }
   AudioNodeExternalInputStream* stream = new AudioNodeExternalInputStream(
@@ -3563,70 +3551,9 @@ MediaStreamGraph::StartNonRealtimeProces
 
 void
 ProcessedMediaStream::AddInput(MediaInputPort* aPort)
 {
   mInputs.AppendElement(aPort);
   GraphImpl()->SetStreamOrderDirty();
 }
 
-void
-MediaStreamGraph::RegisterCaptureStreamForWindow(
-    uint64_t aWindowId, ProcessedMediaStream* aCaptureStream)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
-  graphImpl->RegisterCaptureStreamForWindow(aWindowId, aCaptureStream);
-}
-
-void
-MediaStreamGraphImpl::RegisterCaptureStreamForWindow(
-  uint64_t aWindowId, ProcessedMediaStream* aCaptureStream)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  WindowAndStream winAndStream;
-  winAndStream.mWindowId = aWindowId;
-  winAndStream.mCaptureStreamSink = aCaptureStream;
-  mWindowCaptureStreams.AppendElement(winAndStream);
-}
-
-void
-MediaStreamGraph::UnregisterCaptureStreamForWindow(uint64_t aWindowId)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
-  graphImpl->UnregisterCaptureStreamForWindow(aWindowId);
-}
-
-void
-MediaStreamGraphImpl::UnregisterCaptureStreamForWindow(uint64_t aWindowId)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  for (uint32_t i = 0; i < mWindowCaptureStreams.Length(); i++) {
-    if (mWindowCaptureStreams[i].mWindowId == aWindowId) {
-      mWindowCaptureStreams.RemoveElementAt(i);
-    }
-  }
-}
-
-already_AddRefed<MediaInputPort>
-MediaStreamGraph::ConnectToCaptureStream(uint64_t aWindowId,
-                                         MediaStream* aMediaStream)
-{
-  return aMediaStream->GraphImpl()->ConnectToCaptureStream(aWindowId,
-                                                           aMediaStream);
-}
-
-already_AddRefed<MediaInputPort>
-MediaStreamGraphImpl::ConnectToCaptureStream(uint64_t aWindowId,
-                                             MediaStream* aMediaStream)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  for (uint32_t i = 0; i < mWindowCaptureStreams.Length(); i++) {
-    if (mWindowCaptureStreams[i].mWindowId == aWindowId) {
-      ProcessedMediaStream* sink = mWindowCaptureStreams[i].mCaptureStreamSink;
-      return sink->AllocateInputPort(aMediaStream, 0);
-    }
-  }
-  return nullptr;
-}
-
 } // namespace mozilla
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -1257,20 +1257,16 @@ public:
    * removed tracks immediately end.
    * For each added track, the track ID of the output track is the track ID
    * of the input track or one plus the maximum ID of all previously added
    * tracks, whichever is greater.
    * TODO at some point we will probably need to add API to select
    * particular tracks of each input stream.
    */
   ProcessedMediaStream* CreateTrackUnionStream(DOMMediaStream* aWrapper);
-  /**
-   * Create a stream that will mix all its audio input.
-   */
-  ProcessedMediaStream* CreateAudioCaptureStream(DOMMediaStream* aWrapper);
   // Internal AudioNodeStreams can only pass their output to another
   // AudioNode, whereas external AudioNodeStreams can pass their output
   // to an nsAudioStream for playback.
   enum AudioNodeStreamKind { SOURCE_STREAM, INTERNAL_STREAM, EXTERNAL_STREAM };
   /**
    * Create a stream that will process audio for an AudioNode.
    * Takes ownership of aEngine.  aSampleRate is the sampling rate used
    * for the stream.  If 0 is passed, the sampling rate of the engine's
@@ -1317,22 +1313,16 @@ public:
     *mPendingUpdateRunnables.AppendElement() = aRunnable;
   }
 
   /**
    * Returns graph sample rate in Hz.
    */
   TrackRate GraphRate() const { return mSampleRate; }
 
-  void RegisterCaptureStreamForWindow(uint64_t aWindowId,
-                                      ProcessedMediaStream* aCaptureStream);
-  void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
-  already_AddRefed<MediaInputPort> ConnectToCaptureStream(
-    uint64_t aWindowId, MediaStream* aMediaStream);
-
 protected:
   explicit MediaStreamGraph(TrackRate aSampleRate)
     : mNextGraphUpdateIndex(1)
     , mSampleRate(aSampleRate)
   {
     MOZ_COUNT_CTOR(MediaStreamGraph);
   }
   virtual ~MediaStreamGraph()
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -527,23 +527,16 @@ public:
   void EnsureNextIterationLocked()
   {
     mNeedAnotherIteration = true; // atomic
     if (mGraphDriverAsleep) { // atomic
       CurrentDriver()->WakeUp(); // Might not be the same driver; might have woken already
     }
   }
 
-  // Capture Stream API. This allows to get a mixed-down output for a window.
-  void RegisterCaptureStreamForWindow(uint64_t aWindowId,
-                                      ProcessedMediaStream* aCaptureStream);
-  void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
-  already_AddRefed<MediaInputPort>
-  ConnectToCaptureStream(uint64_t aWindowId, MediaStream* aMediaStream);
-
   // Data members
   //
   /**
    * Graphs own owning references to their driver, until shutdown. When a driver
    * switch occur, previous driver is either deleted, or it's ownership is
    * passed to a event that will take care of the asynchronous cleanup, as
    * audio stream can take some time to shut down.
    */
@@ -757,26 +750,16 @@ private:
    * nsRefPtr to itself, giving it a ref-count of 1 during its entire lifetime,
    * and Destroy() nulls this self-reference in order to trigger self-deletion.
    */
   nsRefPtr<MediaStreamGraphImpl> mSelfRef;
   /**
    * Used to pass memory report information across threads.
    */
   nsTArray<AudioNodeSizes> mAudioStreamSizes;
-
-  struct WindowAndStream
-  {
-    uint64_t mWindowId;
-    nsRefPtr<ProcessedMediaStream> mCaptureStreamSink;
-  };
-  /**
-   * Stream for window audio capture.
-   */
-  nsTArray<WindowAndStream> mWindowCaptureStreams;
   /**
    * Indicates that the MSG thread should gather data for a memory report.
    */
   bool mNeedsMemoryReport;
 
 #ifdef DEBUG
   /**
    * Used to assert when AppendMessage() runs ControlMessages synchronously.
--- a/dom/media/moz.build
+++ b/dom/media/moz.build
@@ -191,17 +191,16 @@ EXPORTS.mozilla.dom += [
     'VideoPlaybackQuality.h',
     'VideoStreamTrack.h',
     'VideoTrack.h',
     'VideoTrackList.h',
 ]
 
 UNIFIED_SOURCES += [
     'AbstractThread.cpp',
-    'AudioCaptureStream.cpp',
     'AudioChannelFormat.cpp',
     'AudioCompactor.cpp',
     'AudioSegment.cpp',
     'AudioSink.cpp',
     'AudioStream.cpp',
     'AudioStreamTrack.cpp',
     'AudioTrack.cpp',
     'AudioTrackList.cpp',
--- a/dom/media/tests/mochitest/head.js
+++ b/dom/media/tests/mochitest/head.js
@@ -15,124 +15,16 @@ try {
   dump('TEST DEVICES: Using media devices:\n');
   dump('audio: ' + audioDevice + '\nvideo: ' + videoDevice + '\n');
   FAKE_ENABLED = false;
 } catch (e) {
   dump('TEST DEVICES: No test devices found (in media.{audio,video}_loopback_dev, using fake streams.\n');
   FAKE_ENABLED = true;
 }
 
-/**
- * This class provides helpers around analysing the audio content in a stream
- * using WebAudio AnalyserNodes.
- *
- * @constructor
- * @param {object} stream
- *                 A MediaStream object whose audio track we shall analyse.
- */
-function AudioStreamAnalyser(ac, stream) {
-  if (stream.getAudioTracks().length === 0) {
-    throw new Error("No audio track in stream");
-  }
-  this.audioContext = ac;
-  this.stream = stream;
-  this.sourceNode = this.audioContext.createMediaStreamSource(this.stream);
-  this.analyser = this.audioContext.createAnalyser();
-  this.sourceNode.connect(this.analyser);
-  this.data = new Uint8Array(this.analyser.frequencyBinCount);
-}
-
-AudioStreamAnalyser.prototype = {
-  /**
-   * Get an array of frequency domain data for our stream's audio track.
-   *
-   * @returns {array} A Uint8Array containing the frequency domain data.
-   */
-  getByteFrequencyData: function() {
-    this.analyser.getByteFrequencyData(this.data);
-    return this.data;
-  },
-
-  /**
-   * Append a canvas to the DOM where the frequency data are drawn.
-   * Useful to debug tests.
-   */
-  enableDebugCanvas: function() {
-    var cvs = document.createElement("canvas");
-    document.getElementById("content").appendChild(cvs);
-
-    // Easy: 1px per bin
-    cvs.width = this.analyser.frequencyBinCount;
-    cvs.height = 256;
-    cvs.style.border = "1px solid red";
-
-    var c = cvs.getContext('2d');
-
-    var self = this;
-    function render() {
-      c.clearRect(0, 0, cvs.width, cvs.height);
-      var array = self.getByteFrequencyData();
-      for (var i = 0; i < array.length; i++) {
-        c.fillRect(i, (256 - (array[i])), 1, 256);
-      }
-      requestAnimationFrame(render);
-    }
-    requestAnimationFrame(render);
-  },
-
-  /**
-   * Return a Promise, that will be resolved when the function passed as
-   * argument, when called, returns true (meaning the analysis was a
-   * success).
-   *
-   * @param {function} analysisFunction
-   *        A fonction that performs an analysis, and returns true if the
-   *        analysis was a success (i.e. it found what it was looking for)
-   */
-  waitForAnalysisSuccess: function(analysisFunction) {
-    var self = this;
-    return new Promise((resolve, reject) => {
-      function analysisLoop() {
-        var success = analysisFunction(self.getByteFrequencyData());
-        if (success) {
-          resolve();
-          return;
-        }
-        // else, we need more time
-        requestAnimationFrame(analysisLoop);
-      }
-      analysisLoop();
-    });
-  },
-
-  /**
-   * Return the FFT bin index for a given frequency.
-   *
-   * @param {double} frequency
-   *        The frequency for whicht to return the bin number.
-   * @returns {integer} the index of the bin in the FFT array.
-   */
-  binIndexForFrequency: function(frequency) {
-    return 1 + Math.round(frequency *
-                          this.analyser.fftSize /
-                          this.audioContext.sampleRate);
-  },
-
-  /**
-   * Reverse operation, get the frequency for a bin index.
-   *
-   * @param {integer} index an index in an FFT array
-   * @returns {double} the frequency for this bin
-   */
-  frequencyForBinIndex: function(index) {
-    return (index - 1) *
-           this.audioContext.sampleRate /
-           this.analyser.fftSize;
-  }
-};
 
 /**
  * Create the necessary HTML elements for head and body as used by Mochitests
  *
  * @param {object} meta
  *        Meta information of the test
  * @param {string} meta.title
  *        Description of the test
@@ -239,20 +131,17 @@ function setupEnvironment() {
       ['media.peerconnection.enabled', true],
       ['media.peerconnection.identity.enabled', true],
       ['media.peerconnection.identity.timeout', 120000],
       ['media.peerconnection.ice.stun_client_maximum_transmits', 14],
       ['media.peerconnection.ice.trickle_grace_period', 30000],
       ['media.navigator.permission.disabled', true],
       ['media.navigator.streams.fake', FAKE_ENABLED],
       ['media.getusermedia.screensharing.enabled', true],
-      ['media.getusermedia.screensharing.allowed_domains', "mochi.test"],
-      ['media.getusermedia.audiocapture.enabled', true],
-      ['media.useAudioChannelService', true],
-      ['media.recorder.audio_node.enabled', true]
+      ['media.getusermedia.screensharing.allowed_domains', "mochi.test"]
     ]
   }, setTestOptions);
 
   // We don't care about waiting for this to complete, we just want to ensure
   // that we don't build up a huge backlog of GC work.
   SpecialPowers.exactGC(window);
 }
 
--- a/dom/media/tests/mochitest/mochitest.ini
+++ b/dom/media/tests/mochitest/mochitest.ini
@@ -25,17 +25,16 @@ skip-if = toolkit == 'gonk' || buildapp 
 [test_dataChannel_basicDataOnly.html]
 [test_dataChannel_basicVideo.html]
 skip-if = toolkit == 'gonk' || buildapp == 'mulet' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
 [test_dataChannel_bug1013809.html]
 skip-if = toolkit == 'gonk' || buildapp == 'mulet' # b2g emulator seems to be too slow (Bug 1016498 and 1008080)
 [test_dataChannel_noOffer.html]
 [test_enumerateDevices.html]
 skip-if = buildapp == 'mulet'
-[test_getUserMedia_audioCapture.html]
 [test_getUserMedia_basicAudio.html]
 skip-if = (toolkit == 'gonk' || buildapp == 'mulet' && debug) # debug-only failure
 [test_getUserMedia_basicVideo.html]
 skip-if = (toolkit == 'gonk' || buildapp == 'mulet' && debug) # debug-only failure
 [test_getUserMedia_basicVideo_playAfterLoadedmetadata.html]
 skip-if = (toolkit == 'gonk' || buildapp == 'mulet' && debug) # debug-only failure
 [test_getUserMedia_basicScreenshare.html]
 skip-if = buildapp == 'mulet' || buildapp == 'b2g' || toolkit == 'android' # no screenshare on b2g/android # Bug 1141029 Mulet parity with B2G Desktop for TC
--- a/dom/media/tests/mochitest/pc.js
+++ b/dom/media/tests/mochitest/pc.js
@@ -638,16 +638,49 @@ DataChannelWrapper.prototype = {
    */
   toString: function() {
     return "DataChannelWrapper (" + this._pc.label + '_' + this._channel.label + ")";
   }
 };
 
 
 /**
+ * This class provides helpers around analysing the audio content in a stream
+ * using WebAudio AnalyserNodes.
+ *
+ * @constructor
+ * @param {object} stream
+ *                 A MediaStream object whose audio track we shall analyse.
+ */
+function AudioStreamAnalyser(stream) {
+  if (stream.getAudioTracks().length === 0) {
+    throw new Error("No audio track in stream");
+  }
+  this.stream = stream;
+  this.audioContext = new AudioContext();
+  this.sourceNode = this.audioContext.createMediaStreamSource(this.stream);
+  this.analyser = this.audioContext.createAnalyser();
+  this.sourceNode.connect(this.analyser);
+  this.data = new Uint8Array(this.analyser.frequencyBinCount);
+}
+
+AudioStreamAnalyser.prototype = {
+  /**
+   * Get an array of frequency domain data for our stream's audio track.
+   *
+   * @returns {array} A Uint8Array containing the frequency domain data.
+   */
+  getByteFrequencyData: function() {
+    this.analyser.getByteFrequencyData(this.data);
+    return this.data;
+  }
+};
+
+
+/**
  * This class acts as a wrapper around a PeerConnection instance.
  *
  * @constructor
  * @param {string} label
  *        Description for the peer connection instance
  * @param {object} configuration
  *        Configuration for the peer connection instance
  */
@@ -1521,30 +1554,30 @@ PeerConnectionWrapper.prototype = {
    * audio data in the frequency domain.
    *
    * @param {object} from
    *        A PeerConnectionWrapper whose audio RTPSender we use as source for
    *        the audio flow check.
    * @returns {Promise}
    *        A promise that resolves when we're receiving the tone from |from|.
    */
-  checkReceivingToneFrom : function(audiocontext, from) {
+  checkReceivingToneFrom : function(from) {
     var inputElem = from.localMediaElements[0];
 
     // As input we use the stream of |from|'s first available audio sender.
     var inputSenderTracks = from._pc.getSenders().map(sn => sn.track);
     var inputAudioStream = from._pc.getLocalStreams()
       .find(s => s.getAudioTracks().some(t => inputSenderTracks.some(t2 => t == t2)));
-    var inputAnalyser = new AudioStreamAnalyser(audiocontext, inputAudioStream);
+    var inputAnalyser = new AudioStreamAnalyser(inputAudioStream);
 
     // It would have been nice to have a working getReceivers() here, but until
     // we do, let's use what remote streams we have.
     var outputAudioStream = this._pc.getRemoteStreams()
       .find(s => s.getAudioTracks().length > 0);
-    var outputAnalyser = new AudioStreamAnalyser(audiocontext, outputAudioStream);
+    var outputAnalyser = new AudioStreamAnalyser(outputAudioStream);
 
     var maxWithIndex = (a, b, i) => (b >= a.value) ? { value: b, index: i } : a;
     var initial = { value: -1, index: -1 };
 
     return new Promise((resolve, reject) => inputElem.ontimeupdate = () => {
       var inputData = inputAnalyser.getByteFrequencyData();
       var outputData = outputAnalyser.getByteFrequencyData();
 
deleted file mode 100644
--- a/dom/media/tests/mochitest/test_getUserMedia_audioCapture.html
+++ /dev/null
@@ -1,110 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<head>
-  <title>Test AudioCapture </title>
-  <script type="application/javascript" src="mediaStreamPlayback.js"></script>
-</head>
-<body>
-<pre id="test">
-<script>
-
-createHTML({
-  bug: "1156472",
-  title: "Test AudioCapture with regular HTMLMediaElement, AudioContext, and HTMLMediaElement playing a MediaStream",
-  visible: true
-});
-
-scriptsReady
-.then(() => FAKE_ENABLED = false)
-.then(() => {
-  runTestWhenReady(function() {
-    // Get an opus file containing a sine wave at maximum amplitude, of duration
-    // `lengthSeconds`, and of frequency `frequency`.
-    function getSineWaveFile(frequency, lengthSeconds, callback) {
-      var chunks = [];
-      var off = new OfflineAudioContext(1, lengthSeconds * 48000, 48000);
-      var osc = off.createOscillator();
-      var rec = new MediaRecorder(osc);
-      rec.ondataavailable = function(e) {
-        chunks.push(e.data);
-      };
-      rec.onstop = function(e) {
-        var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
-        callback(blob);
-      }
-      osc.frequency.value = frequency;
-      osc.start();
-      rec.start();
-      off.startRendering().then(function(buffer) {
-        rec.stop();
-      });
-    }
-    /**
-     * Get two HTMLMediaElements:
-     * - One playing a sine tone from a blob (of an opus file created on the fly)
-     * - One being the output for an AudioContext's OscillatorNode, connected to
-     *   a MediaSourceDestinationNode.
-     *
-     * Also, use the AudioContext playing through its AudioDestinationNode another
-     * tone, using another OscillatorNode.
-     *
-     * Capture the output of the document, feed that back into the AudioContext,
-     * with an AnalyserNode, and check the frequency content to make sure we
-     * have recorded the three sources.
-     *
-     * The three sine tones have frequencies far apart from each other, so that we
-     * can check that the spectrum of the capture stream contains three
-     * components with a high magnitude.
-     */
-    var wavtone = createMediaElement("audio", "WaveTone");
-    var acTone = createMediaElement("audio", "audioContextTone");
-    var ac = new AudioContext();
-
-    var oscThroughMediaElement = ac.createOscillator();
-    oscThroughMediaElement.frequency.value = 1000;
-    var oscThroughAudioDestinationNode = ac.createOscillator();
-    oscThroughAudioDestinationNode.frequency.value = 5000;
-    var msDest = ac.createMediaStreamDestination();
-
-    oscThroughMediaElement.connect(msDest);
-    oscThroughAudioDestinationNode.connect(ac.destination);
-
-    acTone.mozSrcObject = msDest.stream;
-
-    getSineWaveFile(10000, 10, function(blob) {
-      wavtone.src = URL.createObjectURL(blob);
-      oscThroughMediaElement.start();
-      oscThroughAudioDestinationNode.start();
-      wavtone.loop = true;
-      wavtone.play();
-      acTone.play();
-    });
-
-    var constraints = {audio: {mediaSource: "audioCapture"}};
-
-    return getUserMedia(constraints).then((stream) => {
-      checkMediaStreamTracks(constraints, stream);
-      window.grip = stream;
-      var analyser = new AudioStreamAnalyser(ac, stream);
-      analyser.enableDebugCanvas();
-      return analyser.waitForAnalysisSuccess(function(array) {
-        // We want to find three frequency components here, around 1000, 5000
-        // and 10000Hz. Frequency are logarithmic. Also make sure we have low
-        // energy in between, not just a flat white noise.
-        return (array[analyser.binIndexForFrequency(50)]    < 50 &&
-                array[analyser.binIndexForFrequency(1000)]  > 200 &&
-                array[analyser.binIndexForFrequency(2500)]  < 50 &&
-                array[analyser.binIndexForFrequency(5000)]  > 200 &&
-                array[analyser.binIndexForFrequency(7500)]  < 50 &&
-                array[analyser.binIndexForFrequency(10000)] > 200);
-      }).then(finish);
-    }).catch(finish);
-  });
-});
-
-
-
-</script>
-</pre>
-</body>
-</html>
--- a/dom/media/tests/mochitest/test_peerConnection_replaceTrack.html
+++ b/dom/media/tests/mochitest/test_peerConnection_replaceTrack.html
@@ -131,17 +131,17 @@
             ok(pc.getLocalStreams().some(s => s.getTracks()
                                                .some(t => t == sender.track)),
                "track exists among pc's local streams");
           });
       }
     ]);
     test.chain.append([
       function PC_LOCAL_CHECK_WEBAUDIO_FLOW_PRESENT(test) {
-        return test.pcRemote.checkReceivingToneFrom(test.audioCtx, test.pcLocal);
+        return test.pcRemote.checkReceivingToneFrom(test.pcLocal);
       }
     ]);
     test.chain.append([
       function PC_LOCAL_INVALID_ADD_VIDEOTRACKS(test) {
         var stream = test.pcLocal._pc.getLocalStreams()[0];
         var track = stream.getVideoTracks()[0];
         try {
           test.pcLocal._pc.addTrack(track, stream);
--- a/dom/media/tests/mochitest/test_peerConnection_webAudio.html
+++ b/dom/media/tests/mochitest/test_peerConnection_webAudio.html
@@ -27,17 +27,17 @@ runNetworkTest(function() {
       oscillator.start();
       var dest = test.audioContext.createMediaStreamDestination();
       oscillator.connect(dest);
       test.pcLocal.attachMedia(dest.stream, 'audio', 'local');
     }
   ]);
   test.chain.append([
     function CHECK_AUDIO_FLOW(test) {
-      return test.pcRemote.checkReceivingToneFrom(test.audioContext, test.pcLocal);
+      return test.pcRemote.checkReceivingToneFrom(test.pcLocal);
     }
   ]);
   test.run();
 });
 </script>
 </pre>
 </body>
 </html>
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -308,27 +308,29 @@ NS_INTERFACE_MAP_END_INHERITING(AudioNod
 
 NS_IMPL_ADDREF_INHERITED(AudioDestinationNode, AudioNode)
 NS_IMPL_RELEASE_INHERITED(AudioDestinationNode, AudioNode)
 
 AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
                                            bool aIsOffline,
                                            AudioChannel aChannel,
                                            uint32_t aNumberOfChannels,
-                                           uint32_t aLength, float aSampleRate)
-  : AudioNode(aContext, aIsOffline ? aNumberOfChannels : 2,
-              ChannelCountMode::Explicit, ChannelInterpretation::Speakers)
+                                           uint32_t aLength,
+                                           float aSampleRate)
+  : AudioNode(aContext,
+              aIsOffline ? aNumberOfChannels : 2,
+              ChannelCountMode::Explicit,
+              ChannelInterpretation::Speakers)
   , mFramesToProduce(aLength)
   , mAudioChannel(AudioChannel::Normal)
   , mIsOffline(aIsOffline)
   , mAudioChannelAgentPlaying(false)
   , mExtraCurrentTime(0)
   , mExtraCurrentTimeSinceLastStartedBlocking(0)
   , mExtraCurrentTimeUpdatedSinceLastStableState(false)
-  , mCaptured(false)
 {
   bool startWithAudioDriver = true;
   MediaStreamGraph* graph = aIsOffline ?
                             MediaStreamGraph::CreateNonRealtimeInstance(aSampleRate) :
                             MediaStreamGraph::GetInstance(startWithAudioDriver, aChannel);
   AudioNodeEngine* engine = aIsOffline ?
                             new OfflineDestinationNodeEngine(this, aNumberOfChannels,
                                                              aLength, aSampleRate) :
@@ -498,43 +500,16 @@ AudioDestinationNode::WindowVolumeChange
                 : NS_LITERAL_STRING("mozinterruptbegin"));
     }
   }
 
   SetCanPlay(aVolume, aMuted);
   return NS_OK;
 }
 
-NS_IMETHODIMP
-AudioDestinationNode::WindowAudioCaptureChanged()
-{
-  MOZ_ASSERT(mAudioChannelAgent);
-
-  if (!mStream || Context()->IsOffline()) {
-    return NS_OK;
-  }
-
-  bool captured = GetOwner()->GetAudioCaptured();
-
-  if (captured != mCaptured) {
-    if (captured) {
-      nsCOMPtr<nsPIDOMWindow> window = Context()->GetParentObject();
-      uint64_t id = window->WindowID();
-      mCaptureStreamPort =
-        mStream->Graph()->ConnectToCaptureStream(id, mStream);
-    } else {
-      mCaptureStreamPort->Disconnect();
-      mCaptureStreamPort->Destroy();
-    }
-    mCaptured = captured;
-  }
-
-  return NS_OK;
-}
-
 AudioChannel
 AudioDestinationNode::MozAudioChannelType() const
 {
   return mAudioChannel;
 }
 
 void
 AudioDestinationNode::SetMozAudioChannelType(AudioChannel aValue, ErrorResult& aRv)
@@ -611,18 +586,16 @@ AudioDestinationNode::CreateAudioChannel
   mAudioChannelAgent = new AudioChannelAgent();
   mAudioChannelAgent->InitWithWeakCallback(GetOwner(),
                                            static_cast<int32_t>(mAudioChannel),
                                            this);
 
   // The AudioChannelAgent must start playing immediately in order to avoid
   // race conditions with mozinterruptbegin/end events.
   InputMuted(false);
-
-  WindowAudioCaptureChanged();
 }
 
 void
 AudioDestinationNode::NotifyStableState()
 {
   mExtraCurrentTimeUpdatedSinceLastStableState = false;
 }
 
@@ -704,14 +677,13 @@ AudioDestinationNode::InputMuted(bool aM
 
   float volume = 0.0;
   bool muted = true;
   nsresult rv = mAudioChannelAgent->NotifyStartedPlaying(&volume, &muted);
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return;
   }
 
-  WindowAudioCaptureChanged();
   WindowVolumeChanged(volume, muted);
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/AudioDestinationNode.h
+++ b/dom/media/webaudio/AudioDestinationNode.h
@@ -94,29 +94,27 @@ private:
 
   void NotifyStableState();
   void ScheduleStableStateNotification();
 
   SelfReference<AudioDestinationNode> mOfflineRenderingRef;
   uint32_t mFramesToProduce;
 
   nsCOMPtr<nsIAudioChannelAgent> mAudioChannelAgent;
-  nsRefPtr<MediaInputPort> mCaptureStreamPort;
 
   nsRefPtr<Promise> mOfflineRenderingPromise;
 
   // Audio Channel Type.
   AudioChannel mAudioChannel;
   bool mIsOffline;
   bool mAudioChannelAgentPlaying;
 
   TimeStamp mStartedBlockingDueToBeingOnlyNode;
   double mExtraCurrentTime;
   double mExtraCurrentTimeSinceLastStartedBlocking;
   bool mExtraCurrentTimeUpdatedSinceLastStableState;
-  bool mCaptured;
 };
 
 } // namespace dom
 } // namespace mozilla
 
 #endif
 
--- a/dom/media/webrtc/MediaEngineWebRTC.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTC.cpp
@@ -286,23 +286,16 @@ void
 MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
                                          nsTArray<nsRefPtr<MediaEngineAudioSource> >* aASources)
 {
   ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;
   ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw;
   // We spawn threads to handle gUM runnables, so we must protect the member vars
   MutexAutoLock lock(mMutex);
 
-  if (aMediaSource == dom::MediaSourceEnum::AudioCapture) {
-    nsRefPtr<MediaEngineWebRTCAudioCaptureSource> audioCaptureSource =
-      new MediaEngineWebRTCAudioCaptureSource(nullptr);
-    aASources->AppendElement(audioCaptureSource);
-    return;
-  }
-
 #ifdef MOZ_WIDGET_ANDROID
   jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef();
 
   // get the JVM
   JavaVM *jvm = mozilla::AndroidBridge::Bridge()->GetVM();
   JNIEnv *env = GetJNIForThread();
 
   if (webrtc::VoiceEngine::SetAndroidObjects(jvm, env, (void*)context) != 0) {
@@ -360,24 +353,25 @@ MediaEngineWebRTC::EnumerateAudioDevices
     }
 
     if (uniqueId[0] == '\0') {
       // Mac and Linux don't set uniqueId!
       MOZ_ASSERT(sizeof(deviceName) == sizeof(uniqueId)); // total paranoia
       strcpy(uniqueId,deviceName); // safe given assert and initialization/error-check
     }
 
-    nsRefPtr<MediaEngineAudioSource> aSource;
+    nsRefPtr<MediaEngineWebRTCAudioSource> aSource;
     NS_ConvertUTF8toUTF16 uuid(uniqueId);
     if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) {
       // We've already seen this device, just append.
       aASources->AppendElement(aSource.get());
     } else {
-      aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, i,
-                                                      deviceName, uniqueId);
+      aSource = new MediaEngineWebRTCAudioSource(
+        mThread, mVoiceEngine, i, deviceName, uniqueId
+      );
       mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
       aASources->AppendElement(aSource);
     }
   }
 }
 
 static PLDHashOperator
 ClearVideoSource (const nsAString&, // unused
@@ -386,18 +380,19 @@ ClearVideoSource (const nsAString&, // u
 {
   if (aData) {
     aData->Shutdown();
   }
   return PL_DHASH_NEXT;
 }
 
 static PLDHashOperator
-ClearAudioSource(const nsAString &, // unused
-                 MediaEngineAudioSource *aData, void *userArg)
+ClearAudioSource (const nsAString&, // unused
+                  MediaEngineWebRTCAudioSource* aData,
+                  void *userArg)
 {
   if (aData) {
     aData->Shutdown();
   }
   return PL_DHASH_NEXT;
 }
 
 void
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -128,87 +128,23 @@ private:
 
   int mMinFps; // Min rate we want to accept
   dom::MediaSourceEnum mMediaSource; // source of media (camera | application | screen)
 
   size_t NumCapabilities() override;
   void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) override;
 };
 
-class MediaEngineWebRTCAudioCaptureSource : public MediaEngineAudioSource
+class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
+                                     public webrtc::VoEMediaProcess,
+                                     private MediaConstraintsHelper
 {
 public:
-  NS_DECL_THREADSAFE_ISUPPORTS
-
-  explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid)
-    : MediaEngineAudioSource(kReleased)
-  {
-  }
-  void GetName(nsAString& aName) override;
-  void GetUUID(nsACString& aUUID) override;
-  nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
-                    const MediaEnginePrefs& aPrefs,
-                    const nsString& aDeviceId) override
-  {
-    // Nothing to do here, everything is managed in MediaManager.cpp
-    return NS_OK;
-  }
-  nsresult Deallocate() override
-  {
-    // Nothing to do here, everything is managed in MediaManager.cpp
-    return NS_OK;
-  }
-  void Shutdown() override
-  {
-    // Nothing to do here, everything is managed in MediaManager.cpp
-  }
-  nsresult Start(SourceMediaStream* aMediaStream, TrackID aId) override;
-  nsresult Stop(SourceMediaStream* aMediaStream, TrackID aId) override;
-  void SetDirectListeners(bool aDirect) override
-  {}
-  nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn,
-                  uint32_t aAGC, bool aNoiseOn, uint32_t aNoise,
-                  int32_t aPlayoutDelay) override
-  {
-    return NS_OK;
-  }
-  void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource,
-                  TrackID aID, StreamTime aDesiredTime) override
-  {}
-  const dom::MediaSourceEnum GetMediaSource() override
-  {
-    return dom::MediaSourceEnum::AudioCapture;
-  }
-  bool IsFake() override
-  {
-    return false;
-  }
-  nsresult TakePhoto(PhotoCallback* aCallback) override
-  {
-    return NS_ERROR_NOT_IMPLEMENTED;
-  }
-  uint32_t GetBestFitnessDistance(
-    const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
-    const nsString& aDeviceId) override;
-
-protected:
-  virtual ~MediaEngineWebRTCAudioCaptureSource() { Shutdown(); }
-  nsCString mUUID;
-};
-
-class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
-                                          public webrtc::VoEMediaProcess,
-                                          private MediaConstraintsHelper
-{
-public:
-  MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
-                                    webrtc::VoiceEngine* aVoiceEnginePtr,
-                                    int aIndex,
-                                    const char* name,
-                                    const char* uuid)
+  MediaEngineWebRTCAudioSource(nsIThread* aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
+                               int aIndex, const char* name, const char* uuid)
     : MediaEngineAudioSource(kReleased)
     , mVoiceEngine(aVoiceEnginePtr)
     , mMonitor("WebRTCMic.Monitor")
     , mThread(aThread)
     , mCapIndex(aIndex)
     , mChannel(-1)
     , mInitDone(false)
     , mStarted(false)
@@ -266,17 +202,17 @@ public:
                int16_t audio10ms[], int length,
                int samplingFreq, bool isStereo) override;
 
   NS_DECL_THREADSAFE_ISUPPORTS
 
   virtual void Shutdown() override;
 
 protected:
-  ~MediaEngineWebRTCMicrophoneSource() { Shutdown(); }
+  ~MediaEngineWebRTCAudioSource() { Shutdown(); }
 
 private:
   void Init();
 
   webrtc::VoiceEngine* mVoiceEngine;
   ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
   ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
   ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
@@ -353,14 +289,14 @@ private:
   bool mBrowserEngineInit;
   bool mWinEngineInit;
   bool mAppEngineInit;
   bool mHasTabVideoSource;
 
   // Store devices we've already seen in a hashtable for quick return.
   // Maps UUID to MediaEngineSource (one set for audio, one for video).
   nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
-  nsRefPtrHashtable<nsStringHashKey, MediaEngineAudioSource> mAudioSources;
+  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource> mAudioSources;
 };
 
 }
 
 #endif /* NSMEDIAENGINEWEBRTC_H_ */
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -36,20 +36,19 @@ namespace mozilla {
 #undef LOG
 #endif
 
 extern PRLogModuleInfo* GetMediaManagerLog();
 #define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
 #define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
 
 /**
- * Webrtc microphone source source.
+ * Webrtc audio source.
  */
-NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
-NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioCaptureSource)
+NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource)
 
 // XXX temp until MSG supports registration
 StaticRefPtr<AudioOutputObserver> gFarendObserver;
 
 AudioOutputObserver::AudioOutputObserver()
   : mPlayoutFreq(0)
   , mPlayoutChannels(0)
   , mChunkSize(0)
@@ -173,40 +172,40 @@ AudioOutputObserver::InsertFarEnd(const 
         mSaved = nullptr;
         mSamplesSaved = 0;
       }
     }
   }
 }
 
 void
-MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName)
+MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
 {
   if (mInitDone) {
     aName.Assign(mDeviceName);
   }
 
   return;
 }
 
 void
-MediaEngineWebRTCMicrophoneSource::GetUUID(nsACString& aUUID)
+MediaEngineWebRTCAudioSource::GetUUID(nsACString& aUUID)
 {
   if (mInitDone) {
     aUUID.Assign(mDeviceUUID);
   }
 
   return;
 }
 
 nsresult
-MediaEngineWebRTCMicrophoneSource::Config(bool aEchoOn, uint32_t aEcho,
-                                          bool aAgcOn, uint32_t aAGC,
-                                          bool aNoiseOn, uint32_t aNoise,
-                                          int32_t aPlayoutDelay)
+MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
+                                     bool aAgcOn, uint32_t aAGC,
+                                     bool aNoiseOn, uint32_t aNoise,
+                                     int32_t aPlayoutDelay)
 {
   LOG(("Audio config: aec: %d, agc: %d, noise: %d",
        aEchoOn ? aEcho : -1,
        aAgcOn ? aAGC : -1,
        aNoiseOn ? aNoise : -1));
 
   bool update_echo = (mEchoOn != aEchoOn);
   bool update_agc = (mAgcOn != aAgcOn);
@@ -263,33 +262,33 @@ MediaEngineWebRTCMicrophoneSource::Confi
 // GetBestFitnessDistance returns the best distance the capture device can offer
 // as a whole, given an accumulated number of ConstraintSets.
 // Ideal values are considered in the first ConstraintSet only.
 // Plain values are treated as Ideal in the first ConstraintSet.
 // Plain values are treated as Exact in subsequent ConstraintSets.
 // Infinity = UINT32_MAX e.g. device cannot satisfy accumulated ConstraintSets.
 // A finite result may be used to calculate this device's ranking as a choice.
 
-uint32_t MediaEngineWebRTCMicrophoneSource::GetBestFitnessDistance(
+uint32_t MediaEngineWebRTCAudioSource::GetBestFitnessDistance(
     const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
     const nsString& aDeviceId)
 {
   uint32_t distance = 0;
 
   for (const MediaTrackConstraintSet* cs : aConstraintSets) {
     distance = GetMinimumFitnessDistance(*cs, false, aDeviceId);
     break; // distance is read from first entry only
   }
   return distance;
 }
 
 nsresult
-MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
-                                            const MediaEnginePrefs &aPrefs,
-                                            const nsString& aDeviceId)
+MediaEngineWebRTCAudioSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
+                                       const MediaEnginePrefs &aPrefs,
+                                       const nsString& aDeviceId)
 {
   if (mState == kReleased) {
     if (mInitDone) {
       ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine));
       if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) {
         return NS_ERROR_FAILURE;
       }
       mState = kAllocated;
@@ -305,17 +304,17 @@ MediaEngineWebRTCMicrophoneSource::Alloc
     } else {
       LOG(("Audio device %d allocated shared", mCapIndex));
     }
   }
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCMicrophoneSource::Deallocate()
+MediaEngineWebRTCAudioSource::Deallocate()
 {
   bool empty;
   {
     MonitorAutoLock lock(mMonitor);
     empty = mSources.IsEmpty();
   }
   if (empty) {
     // If empty, no callbacks to deliver data should be occuring
@@ -327,18 +326,17 @@ MediaEngineWebRTCMicrophoneSource::Deall
     LOG(("Audio device %d deallocated", mCapIndex));
   } else {
     LOG(("Audio device %d deallocated but still in use", mCapIndex));
   }
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
-                                         TrackID aID)
+MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
 {
   if (!mInitDone || !aStream) {
     return NS_ERROR_FAILURE;
   }
 
   {
     MonitorAutoLock lock(mMonitor);
     mSources.AppendElement(aStream);
@@ -381,17 +379,17 @@ MediaEngineWebRTCMicrophoneSource::Start
 
   // Attach external media processor, so this::Process will be called.
   mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
 
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
+MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
 {
   {
     MonitorAutoLock lock(mMonitor);
 
     if (!mSources.RemoveElement(aSource)) {
       // Already stopped - this is allowed
       return NS_OK;
     }
@@ -418,27 +416,27 @@ MediaEngineWebRTCMicrophoneSource::Stop(
   }
   if (mVoEBase->StopReceive(mChannel)) {
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
 }
 
 void
-MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
-                                              SourceMediaStream *aSource,
-                                              TrackID aID,
-                                              StreamTime aDesiredTime)
+MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph,
+                                         SourceMediaStream *aSource,
+                                         TrackID aID,
+                                         StreamTime aDesiredTime)
 {
   // Ignore - we push audio data
   LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
 }
 
 void
-MediaEngineWebRTCMicrophoneSource::Init()
+MediaEngineWebRTCAudioSource::Init()
 {
   mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
 
   mVoEBase->Init();
 
   mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
   if (!mVoERender) {
     return;
@@ -493,17 +491,17 @@ MediaEngineWebRTCMicrophoneSource::Init(
   codec.pltype = 0; // Default payload type
 
   if (!ptrVoECodec->SetSendCodec(mChannel, codec)) {
     mInitDone = true;
   }
 }
 
 void
-MediaEngineWebRTCMicrophoneSource::Shutdown()
+MediaEngineWebRTCAudioSource::Shutdown()
 {
   if (!mInitDone) {
     // duplicate these here in case we failed during Init()
     if (mChannel != -1 && mVoENetwork) {
       mVoENetwork->DeRegisterExternalTransport(mChannel);
     }
 
     delete mNullTransport;
@@ -548,20 +546,19 @@ MediaEngineWebRTCMicrophoneSource::Shutd
 
   mState = kReleased;
   mInitDone = false;
 }
 
 typedef int16_t sample;
 
 void
-MediaEngineWebRTCMicrophoneSource::Process(int channel,
-                                           webrtc::ProcessingTypes type,
-                                           sample *audio10ms, int length,
-                                           int samplingFreq, bool isStereo)
+MediaEngineWebRTCAudioSource::Process(int channel,
+  webrtc::ProcessingTypes type, sample* audio10ms,
+  int length, int samplingFreq, bool isStereo)
 {
   // On initial capture, throw away all far-end data except the most recent sample
   // since it's already irrelevant and we want to keep avoid confusing the AEC far-end
   // input code with "old" audio.
   if (!mStarted) {
     mStarted  = true;
     while (gFarendObserver->Size() > 1) {
       free(gFarendObserver->Pop()); // only call if size() > 0
@@ -616,60 +613,9 @@ MediaEngineWebRTCMicrophoneSource::Proce
                                           mTrackID, segment, (AudioSegment *) nullptr),
                     NS_DISPATCH_NORMAL);
     }
   }
 
   return;
 }
 
-void
-MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName)
-{
-  aName.AssignLiteral("AudioCapture");
 }
-void
-MediaEngineWebRTCAudioCaptureSource::GetUUID(nsACString &aUUID)
-{
-  nsID uuid;
-  char uuidBuffer[NSID_LENGTH];
-  nsCString asciiString;
-  ErrorResult rv;
-
-  rv = nsContentUtils::GenerateUUIDInPlace(uuid);
-  if (rv.Failed()) {
-    aUUID.AssignLiteral("");
-    return;
-  }
-
-
-  uuid.ToProvidedString(uuidBuffer);
-  asciiString.AssignASCII(uuidBuffer);
-
-  // Remove {} and the null terminator
-  aUUID.Assign(Substring(asciiString, 1, NSID_LENGTH - 3));
-}
-
-nsresult
-MediaEngineWebRTCAudioCaptureSource::Start(SourceMediaStream *aMediaStream,
-                                           TrackID aId)
-{
-  aMediaStream->AddTrack(aId, 0, new AudioSegment());
-  return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCAudioCaptureSource::Stop(SourceMediaStream *aMediaStream,
-                                          TrackID aId)
-{
-  aMediaStream->EndAllTrackAndFinish();
-  return NS_OK;
-}
-
-uint32_t
-MediaEngineWebRTCAudioCaptureSource::GetBestFitnessDistance(
-    const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
-    const nsString& aDeviceId)
-{
-  // There is only one way of capturing audio for now, and it's always adequate.
-  return 0;
-}
-}
--- a/dom/webidl/Constraints.webidl
+++ b/dom/webidl/Constraints.webidl
@@ -20,17 +20,16 @@ enum VideoFacingModeEnum {
 
 enum MediaSourceEnum {
     "camera",
     "screen",
     "application",
     "window",
     "browser",
     "microphone",
-    "audioCapture",
     "other"
 };
 
 dictionary ConstrainLongRange {
     long min;
     long max;
     long exact;
     long ideal;
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -440,18 +440,16 @@ pref("media.getusermedia.screensharing.e
 pref("media.getusermedia.screensharing.allowed_domains", "webex.com,*.webex.com,ciscospark.com,*.ciscospark.com,projectsquared.com,*.projectsquared.com,*.room.co,room.co,beta.talky.io,talky.io,*.clearslide.com,appear.in,*.appear.in,tokbox.com,*.tokbox.com,*.sso.francetelecom.fr,*.si.francetelecom.fr,*.sso.infra.ftgroup,*.multimedia-conference.orange-business.com,*.espacecollaboration.orange-business.com,free.gotomeeting.com,g2m.me,*.g2m.me,example.com,*.mypurecloud.com,*.mypurecloud.com.au,spreed.me,*.spreed.me,*.spreed.com");
 #else
  // temporary value, not intended for release - bug 1049087
 pref("media.getusermedia.screensharing.allowed_domains", "mozilla.github.io,webex.com,*.webex.com,ciscospark.com,*.ciscospark.com,projectsquared.com,*.projectsquared.com,*.room.co,room.co,beta.talky.io,talky.io,*.clearslide.com,appear.in,*.appear.in,tokbox.com,*.tokbox.com,*.sso.francetelecom.fr,*.si.francetelecom.fr,*.sso.infra.ftgroup,*.multimedia-conference.orange-business.com,*.espacecollaboration.orange-business.com,free.gotomeeting.com,g2m.me,*.g2m.me,example.com,*.mypurecloud.com,*.mypurecloud.com.au,spreed.me,*.spreed.me,*.spreed.com");
 #endif
 // OS/X 10.6 and XP have screen/window sharing off by default due to various issues - Caveat emptor
 pref("media.getusermedia.screensharing.allow_on_old_platforms", false);
 
-pref("media.getusermedia.audiocapture.enabled", false);
-
 // TextTrack support
 pref("media.webvtt.enabled", true);
 pref("media.webvtt.regions.enabled", false);
 
 // AudioTrack and VideoTrack support
 pref("media.track.enabled", false);
 
 // Whether to enable MediaSource support.