Backed out 14 changesets (bug 1652884) for VideoFrameConverter related failures CLOSED TREE
authorBogdan Tara <btara@mozilla.com>
Wed, 19 Aug 2020 22:21:31 +0300
changeset 545361 e8054b77177ab72fe86d6f2038f1c23cc44af0b8
parent 545360 2cf48c80e9436f154f899f5f472df80015bc9370
child 545362 b803fd88181eaf33ed1932e959cbf538f7d95e58
push id37713
push userabutkovits@mozilla.com
push dateThu, 20 Aug 2020 09:32:09 +0000
treeherdermozilla-central@8cb700c12bd3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1652884
milestone81.0a1
backs out28c4e8c373f08bceba73d69cdefa24193a79b05b
658ba8f39abe4553e0c06706b867fc86f9239625
8e67fe040e4a789fb23f8f7fd1efbff781b49b4a
6f5833203763300a865aa15deae199bdf79ae489
569ff85dfc2e1b433c6fa22294f74ac39636db21
eaa17164344727e662a3752bc9feecfe03cbb84e
6b37b60b66620295194ea2ff3eddd09a7fe9ab50
438cce7456fb7add5a59a0f871bdd3f671c4b2fa
e6ed13952b67a958665a91db581e9dba0bb9b605
e0b1266231bf472595395b33aed343780d199065
32f4aae2b5fe9f9de0b4d061fe6dcb0923aaa338
76b4abccd61b0bdcc4c8ed3440735a4bf990f602
9010365ffa661be4ec5272342f3ffc1e794baf62
763f39eb5c137f27425b2922a79899145dfe98b6
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 14 changesets (bug 1652884) for VideoFrameConverter related failures CLOSED TREE Backed out changeset 28c4e8c373f0 (bug 1652884) Backed out changeset 658ba8f39abe (bug 1652884) Backed out changeset 8e67fe040e4a (bug 1652884) Backed out changeset 6f5833203763 (bug 1652884) Backed out changeset 569ff85dfc2e (bug 1652884) Backed out changeset eaa171643447 (bug 1652884) Backed out changeset 6b37b60b6662 (bug 1652884) Backed out changeset 438cce7456fb (bug 1652884) Backed out changeset e6ed13952b67 (bug 1652884) Backed out changeset e0b1266231bf (bug 1652884) Backed out changeset 32f4aae2b5fe (bug 1652884) Backed out changeset 76b4abccd61b (bug 1652884) Backed out changeset 9010365ffa66 (bug 1652884) Backed out changeset 763f39eb5c13 (bug 1652884)
browser/actors/WebRTCChild.jsm
browser/base/content/test/webrtc/browser_devices_get_user_media_paused.js
browser/base/content/test/webrtc/get_user_media.html
dom/html/HTMLMediaElement.cpp
dom/media/ForwardedInputTrack.cpp
dom/media/ForwardedInputTrack.h
dom/media/MediaManager.cpp
dom/media/MediaManager.h
dom/media/MediaStreamTrack.cpp
dom/media/MediaStreamTrack.h
dom/media/MediaTrackGraph.cpp
dom/media/MediaTrackGraph.h
dom/media/VideoFrameConverter.h
dom/media/VideoOutput.h
dom/media/gtest/TestVideoTrackEncoder.cpp
dom/media/webaudio/AudioDestinationNode.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
modules/libpref/init/StaticPrefList.yaml
--- a/browser/actors/WebRTCChild.jsm
+++ b/browser/actors/WebRTCChild.jsm
@@ -117,30 +117,16 @@ class WebRTCChild extends JSWindowActorC
         break;
       case "webrtc:StopSharing":
         Services.obs.notifyObservers(
           null,
           "getUserMedia:revoke",
           aMessage.data
         );
         break;
-      case "webrtc:MuteCamera":
-        Services.obs.notifyObservers(
-          null,
-          "getUserMedia:muteVideo",
-          aMessage.data
-        );
-        break;
-      case "webrtc:UnmuteCamera":
-        Services.obs.notifyObservers(
-          null,
-          "getUserMedia:unmuteVideo",
-          aMessage.data
-        );
-        break;
     }
   }
 }
 
 function getActorForWindow(window) {
   let windowGlobal = window.windowGlobalChild;
   try {
     if (windowGlobal) {
--- a/browser/base/content/test/webrtc/browser_devices_get_user_media_paused.js
+++ b/browser/base/content/test/webrtc/browser_devices_get_user_media_paused.js
@@ -1,58 +1,27 @@
 /* Any copyright is dedicated to the Public Domain.
  * http://creativecommons.org/publicdomain/zero/1.0/ */
 
-async function setCameraMuted(mute) {
-  const windowId = gBrowser.selectedBrowser.innerWindowID;
-  return SpecialPowers.spawn(
-    gBrowser.selectedBrowser,
-    [{ mute, windowId }],
-    function(args) {
-      Services.obs.notifyObservers(
-        content.window,
-        args.mute ? "getUserMedia:muteVideo" : "getUserMedia:unmuteVideo",
-        JSON.stringify(args.windowId)
-      );
-    }
-  );
-}
-
 function setTrackEnabled(audio, video) {
   return SpecialPowers.spawn(
     gBrowser.selectedBrowser,
     [{ audio, video }],
     function(args) {
       let stream = content.wrappedJSObject.gStreams[0];
       if (args.audio != null) {
         stream.getAudioTracks()[0].enabled = args.audio;
       }
       if (args.video != null) {
         stream.getVideoTracks()[0].enabled = args.video;
       }
     }
   );
 }
 
-async function getVideoTrackMuted() {
-  return SpecialPowers.spawn(
-    gBrowser.selectedBrowser,
-    [],
-    () => content.wrappedJSObject.gStreams[0].getVideoTracks()[0].muted
-  );
-}
-
-async function getVideoTrackEvents() {
-  return SpecialPowers.spawn(
-    gBrowser.selectedBrowser,
-    [],
-    () => content.wrappedJSObject.gVideoEvents
-  );
-}
-
 function cloneTracks(audio, video) {
   return SpecialPowers.spawn(
     gBrowser.selectedBrowser,
     [{ audio, video }],
     function(args) {
       if (!content.wrappedJSObject.gClones) {
         content.wrappedJSObject.gClones = [];
       }
@@ -89,17 +58,17 @@ function stopClonedTracks(audio, video) 
     }
   );
 }
 
 var gTests = [
   {
     desc:
       "getUserMedia audio+video: disabling the stream shows the paused indicator",
-    run: async function checkDisabled() {
+    run: async function checkPaused() {
       let observerPromise = expectObserverCalled("getUserMedia:request");
       let promise = promisePopupNotificationShown("webRTC-shareDevices");
       await promiseRequestDevice(true, true);
       await promise;
       await observerPromise;
       checkDeviceSelectors(true, true);
 
       let indicator = promiseIndicatorWindow();
@@ -122,17 +91,18 @@ var gTests = [
         video: STATE_CAPTURE_ENABLED,
         audio: STATE_CAPTURE_ENABLED,
       });
 
       // Disable both audio and video.
       observerPromise = expectObserverCalled("recording-device-events", 2);
       await setTrackEnabled(false, false);
 
-      // Wait for capture state to propagate to the UI asynchronously.
+      // It sometimes takes a bit longer before the change propagates to the UI,
+      // wait for it to avoid intermittents.
       await BrowserTestUtils.waitForCondition(
         () =>
           window.gIdentityHandler._sharingState.webRTC.camera ==
           STATE_CAPTURE_DISABLED,
         "video should be disabled"
       );
 
       await observerPromise;
@@ -182,17 +152,17 @@ var gTests = [
       });
       await closeStream();
     },
   },
 
   {
     desc:
       "getUserMedia audio+video: disabling the original tracks and stopping enabled clones shows the paused indicator",
-    run: async function checkDisabledAfterCloneStop() {
+    run: async function checkPausedAfterCloneStop() {
       let observerPromise = expectObserverCalled("getUserMedia:request");
       let promise = promisePopupNotificationShown("webRTC-shareDevices");
       await promiseRequestDevice(true, true);
       await promise;
       await observerPromise;
       checkDeviceSelectors(true, true);
 
       let indicator = promiseIndicatorWindow();
@@ -222,17 +192,18 @@ var gTests = [
       // Disable both audio and video.
       await setTrackEnabled(false, false);
 
       observerPromise = expectObserverCalled("recording-device-events", 2);
 
       // Stop the clones. This should disable the sharing indicators.
       await stopClonedTracks(true, true);
 
-      // Wait for capture state to propagate to the UI asynchronously.
+      // It sometimes takes a bit longer before the change propagates to the UI,
+      // wait for it to avoid intermittents.
       await BrowserTestUtils.waitForCondition(
         () =>
           window.gIdentityHandler._sharingState.webRTC.camera ==
             STATE_CAPTURE_DISABLED &&
           window.gIdentityHandler._sharingState.webRTC.microphone ==
             STATE_CAPTURE_DISABLED,
         "video and audio should be disabled"
       );
@@ -284,17 +255,17 @@ var gTests = [
       });
       await closeStream();
     },
   },
 
   {
     desc:
       "getUserMedia screen: disabling the stream shows the paused indicator",
-    run: async function checkScreenDisabled() {
+    run: async function checkScreenPaused() {
       let observerPromise = expectObserverCalled("getUserMedia:request");
       let promise = promisePopupNotificationShown("webRTC-shareDevices");
       await promiseRequestDevice(false, true, null, "screen");
       await promise;
       await observerPromise;
 
       is(
         PopupNotifications.getNotification("webRTC-shareDevices").anchorID,
@@ -326,17 +297,18 @@ var gTests = [
       );
 
       await indicator;
       await checkSharingUI({ screen: "Screen" });
 
       observerPromise = expectObserverCalled("recording-device-events");
       await setTrackEnabled(null, false);
 
-      // Wait for capture state to propagate to the UI asynchronously.
+      // It sometimes takes a bit longer before the change propagates to the UI,
+      // wait for it to avoid intermittents.
       await BrowserTestUtils.waitForCondition(
         () =>
           window.gIdentityHandler._sharingState.webRTC.screen == "ScreenPaused",
         "screen should be disabled"
       );
       await observerPromise;
       await checkSharingUI({ screen: "ScreenPaused" }, window, {
         screen: "Screen",
@@ -349,321 +321,16 @@ var gTests = [
         () => window.gIdentityHandler._sharingState.webRTC.screen == "Screen",
         "screen should be enabled"
       );
       await observerPromise;
       await checkSharingUI({ screen: "Screen" });
       await closeStream();
     },
   },
-
-  {
-    desc:
-      "getUserMedia audio+video: muting the camera shows the muted indicator",
-    run: async function checkMuted() {
-      let observerPromise = expectObserverCalled("getUserMedia:request");
-      let promise = promisePopupNotificationShown("webRTC-shareDevices");
-      await promiseRequestDevice(true, true);
-      await promise;
-      await observerPromise;
-      checkDeviceSelectors(true, true);
-
-      let indicator = promiseIndicatorWindow();
-      let observerPromise1 = expectObserverCalled(
-        "getUserMedia:response:allow"
-      );
-      let observerPromise2 = expectObserverCalled("recording-device-events");
-      await promiseMessage("ok", () => {
-        PopupNotifications.panel.firstElementChild.button.click();
-      });
-      await observerPromise1;
-      await observerPromise2;
-      Assert.deepEqual(
-        await getMediaCaptureState(),
-        { audio: true, video: true },
-        "expected camera and microphone to be shared"
-      );
-      await indicator;
-      await checkSharingUI({
-        video: STATE_CAPTURE_ENABLED,
-        audio: STATE_CAPTURE_ENABLED,
-      });
-      is(await getVideoTrackMuted(), false, "video track starts unmuted");
-      Assert.deepEqual(
-        await getVideoTrackEvents(),
-        [],
-        "no video track events fired yet"
-      );
-
-      // Mute camera.
-      observerPromise = expectObserverCalled("recording-device-events");
-      await setCameraMuted(true);
-
-      // Wait for capture state to propagate to the UI asynchronously.
-      await BrowserTestUtils.waitForCondition(
-        () =>
-          window.gIdentityHandler._sharingState.webRTC.camera ==
-          STATE_CAPTURE_DISABLED,
-        "video should be muted"
-      );
-
-      await observerPromise;
-
-      // The identity UI should show only camera as disabled.
-      await checkSharingUI({
-        video: STATE_CAPTURE_DISABLED,
-        audio: STATE_CAPTURE_ENABLED,
-      });
-      is(await getVideoTrackMuted(), true, "video track is muted");
-      Assert.deepEqual(await getVideoTrackEvents(), ["mute"], "mute fired");
-
-      // Unmute video again.
-      observerPromise = expectObserverCalled("recording-device-events");
-      await setCameraMuted(false);
-
-      await BrowserTestUtils.waitForCondition(
-        () =>
-          window.gIdentityHandler._sharingState.webRTC.camera ==
-          STATE_CAPTURE_ENABLED,
-        "video should be enabled"
-      );
-
-      await observerPromise;
-
-      // Both streams should show as running.
-      await checkSharingUI({
-        video: STATE_CAPTURE_ENABLED,
-        audio: STATE_CAPTURE_ENABLED,
-      });
-      is(await getVideoTrackMuted(), false, "video track is unmuted");
-      Assert.deepEqual(
-        await getVideoTrackEvents(),
-        ["mute", "unmute"],
-        "unmute fired"
-      );
-      await closeStream();
-    },
-  },
-
-  {
-    desc: "getUserMedia audio+video: disabling & muting camera in combination",
-    // Test the following combinations of disabling and muting camera:
-    // 1. Disable video track only.
-    // 2. Mute camera & disable audio (to have a condition to wait for)
-    // 3. Enable both audio and video tracks (only audio should flow).
-    // 4. Unmute camera again (video should flow).
-    // 5. Mute camera & disable both tracks.
-    // 6. Unmute camera & enable audio (only audio should flow)
-    // 7. Enable video track again (video should flow).
-    run: async function checkDisabledMutedCombination() {
-      let observerPromise = expectObserverCalled("getUserMedia:request");
-      let promise = promisePopupNotificationShown("webRTC-shareDevices");
-      await promiseRequestDevice(true, true);
-      await promise;
-      await observerPromise;
-      checkDeviceSelectors(true, true);
-
-      let indicator = promiseIndicatorWindow();
-      let observerPromise1 = expectObserverCalled(
-        "getUserMedia:response:allow"
-      );
-      let observerPromise2 = expectObserverCalled("recording-device-events");
-      await promiseMessage("ok", () => {
-        PopupNotifications.panel.firstElementChild.button.click();
-      });
-      await observerPromise1;
-      await observerPromise2;
-      Assert.deepEqual(
-        await getMediaCaptureState(),
-        { audio: true, video: true },
-        "expected camera and microphone to be shared"
-      );
-      await indicator;
-      await checkSharingUI({
-        video: STATE_CAPTURE_ENABLED,
-        audio: STATE_CAPTURE_ENABLED,
-      });
-
-      // 1. Disable video track only.
-      observerPromise = expectObserverCalled("recording-device-events");
-      await setTrackEnabled(null, false);
-
-      // Wait for capture state to propagate to the UI asynchronously.
-      await BrowserTestUtils.waitForCondition(
-        () =>
-          window.gIdentityHandler._sharingState.webRTC.camera ==
-          STATE_CAPTURE_DISABLED,
-        "video should be disabled"
-      );
-
-      await observerPromise;
-
-      // The identity UI should show only video as disabled.
-      await checkSharingUI({
-        video: STATE_CAPTURE_DISABLED,
-        audio: STATE_CAPTURE_ENABLED,
-      });
-      is(await getVideoTrackMuted(), false, "video track still unmuted");
-      Assert.deepEqual(
-        await getVideoTrackEvents(),
-        [],
-        "no video track events fired yet"
-      );
-
-      // 2. Mute camera & disable audio (to have a condition to wait for)
-      observerPromise = expectObserverCalled("recording-device-events", 2);
-      await setCameraMuted(true);
-      await setTrackEnabled(false, null);
-
-      await BrowserTestUtils.waitForCondition(
-        () =>
-          window.gIdentityHandler._sharingState.webRTC.microphone ==
-          STATE_CAPTURE_DISABLED,
-        "audio should be disabled"
-      );
-
-      await observerPromise;
-
-      // The identity UI should show both as disabled.
-      await checkSharingUI({
-        video: STATE_CAPTURE_DISABLED,
-        audio: STATE_CAPTURE_DISABLED,
-      });
-      is(await getVideoTrackMuted(), true, "video track is muted");
-      Assert.deepEqual(
-        await getVideoTrackEvents(),
-        ["mute"],
-        "mute is still fired even though track was disabled"
-      );
-
-      // 3. Enable both audio and video tracks (only audio should flow).
-      observerPromise = expectObserverCalled("recording-device-events", 2);
-      await setTrackEnabled(true, true);
-
-      await BrowserTestUtils.waitForCondition(
-        () =>
-          window.gIdentityHandler._sharingState.webRTC.microphone ==
-          STATE_CAPTURE_ENABLED,
-        "audio should be enabled"
-      );
-
-      await observerPromise;
-
-      // The identity UI should show only audio as enabled, as video is muted.
-      await checkSharingUI({
-        video: STATE_CAPTURE_DISABLED,
-        audio: STATE_CAPTURE_ENABLED,
-      });
-      is(await getVideoTrackMuted(), true, "video track is still muted");
-      Assert.deepEqual(await getVideoTrackEvents(), ["mute"], "no new events");
-
-      // 4. Unmute camera again (video should flow).
-      observerPromise = expectObserverCalled("recording-device-events");
-      await setCameraMuted(false);
-
-      await BrowserTestUtils.waitForCondition(
-        () =>
-          window.gIdentityHandler._sharingState.webRTC.camera ==
-          STATE_CAPTURE_ENABLED,
-        "video should be enabled"
-      );
-
-      await observerPromise;
-
-      // Both streams should show as running.
-      await checkSharingUI({
-        video: STATE_CAPTURE_ENABLED,
-        audio: STATE_CAPTURE_ENABLED,
-      });
-      is(await getVideoTrackMuted(), false, "video track is unmuted");
-      Assert.deepEqual(
-        await getVideoTrackEvents(),
-        ["mute", "unmute"],
-        "unmute fired"
-      );
-
-      // 5. Mute camera & disable both tracks.
-      observerPromise = expectObserverCalled("recording-device-events", 3);
-      await setCameraMuted(true);
-      await setTrackEnabled(false, false);
-
-      await BrowserTestUtils.waitForCondition(
-        () =>
-          window.gIdentityHandler._sharingState.webRTC.camera ==
-          STATE_CAPTURE_DISABLED,
-        "video should be disabled"
-      );
-
-      await observerPromise;
-
-      // The identity UI should show both as disabled.
-      await checkSharingUI({
-        video: STATE_CAPTURE_DISABLED,
-        audio: STATE_CAPTURE_DISABLED,
-      });
-      is(await getVideoTrackMuted(), true, "video track is muted");
-      Assert.deepEqual(
-        await getVideoTrackEvents(),
-        ["mute", "unmute", "mute"],
-        "mute fired afain"
-      );
-
-      // 6. Unmute camera & enable audio (only audio should flow)
-      observerPromise = expectObserverCalled("recording-device-events", 2);
-      await setCameraMuted(false);
-      await setTrackEnabled(true, null);
-
-      await BrowserTestUtils.waitForCondition(
-        () =>
-          window.gIdentityHandler._sharingState.webRTC.microphone ==
-          STATE_CAPTURE_ENABLED,
-        "audio should be enabled"
-      );
-
-      await observerPromise;
-
-      // Only audio should show as running, as video track is still disabled.
-      await checkSharingUI({
-        video: STATE_CAPTURE_DISABLED,
-        audio: STATE_CAPTURE_ENABLED,
-      });
-      is(await getVideoTrackMuted(), false, "video track is unmuted");
-      Assert.deepEqual(
-        await getVideoTrackEvents(),
-        ["mute", "unmute", "mute", "unmute"],
-        "unmute fired even though track is disabled"
-      );
-
-      // 7. Enable video track again (video should flow).
-      observerPromise = expectObserverCalled("recording-device-events");
-      await setTrackEnabled(null, true);
-
-      await BrowserTestUtils.waitForCondition(
-        () =>
-          window.gIdentityHandler._sharingState.webRTC.camera ==
-          STATE_CAPTURE_ENABLED,
-        "video should be enabled"
-      );
-
-      await observerPromise;
-
-      // The identity UI should show both as running again.
-      await checkSharingUI({
-        video: STATE_CAPTURE_ENABLED,
-        audio: STATE_CAPTURE_ENABLED,
-      });
-      is(await getVideoTrackMuted(), false, "video track remains unmuted");
-      Assert.deepEqual(
-        await getVideoTrackEvents(),
-        ["mute", "unmute", "mute", "unmute"],
-        "no new events fired"
-      );
-      await closeStream();
-    },
-  },
 ];
 
 add_task(async function test() {
   await SpecialPowers.pushPrefEnv({
     set: [
       ["media.getusermedia.camera.off_while_disabled.delay_ms", 0],
       ["media.getusermedia.microphone.off_while_disabled.delay_ms", 0],
     ],
--- a/browser/base/content/test/webrtc/get_user_media.html
+++ b/browser/base/content/test/webrtc/get_user_media.html
@@ -19,22 +19,24 @@ try {
 
 function message(m) {
   // eslint-disable-next-line no-unsanitized/property
   document.getElementById("message").innerHTML = m;
   parent.postMessage(m, "*");
 }
 
 var gStreams = [];
-var gVideoEvents = [];
 
-async function requestDevice(aAudio, aVideo, aShare, aBadDevice = false) {
-  const opts = {video: aVideo, audio: aAudio};
+function requestDevice(aAudio, aVideo, aShare, aBadDevice = false) {
+  var opts = {video: aVideo, audio: aAudio};
   if (aShare) {
-    opts.video = { mediaSource: aShare };
+    opts.video = {
+      mozMediaSource: aShare,
+      mediaSource: aShare,
+    };
   }
   if (useFakeStreams) {
     opts.fake = true;
   }
 
   if (aVideo && aBadDevice) {
     opts.video = {
       deviceId: "bad device",
@@ -44,38 +46,29 @@ async function requestDevice(aAudio, aVi
 
   if (aAudio && aBadDevice) {
     opts.audio = {
       deviceId: "bad device",
     };
     opts.fake = true;
   }
 
-  try {
-    const stream = await navigator.mediaDevices.getUserMedia(opts)
-    gStreams.push(stream);
-    const track = stream.getVideoTracks()[0];
-    if (track) {
-      for (const name of ["mute", "unmute", "ended"]) {
-        track.addEventListener(name, () => gVideoEvents.push(name));
-      }
-    }
-    message("ok");
-  } catch (err) {
-    message("error: " + err);
-  }
+  navigator.mediaDevices.getUserMedia(opts)
+    .then(stream => {
+      gStreams.push(stream);
+      message("ok");
+    }, err => message("error: " + err));
 }
 message("pending");
 
 function closeStream() {
   for (let stream of gStreams) {
     if (stream) {
       stream.getTracks().forEach(t => t.stop());
       stream = null;
     }
   }
   gStreams = [];
-  gVideoEvents = [];
   message("closed");
 }
 </script>
 </body>
 </html>
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -1125,18 +1125,18 @@ class HTMLMediaElement::MediaElementTrac
     mCapturedTrack->AddConsumer(this);
     mCapturedTrackSource->RegisterSink(this);
   }
 
   void SetEnabled(bool aEnabled) {
     if (!mTrack) {
       return;
     }
-    mTrack->SetDisabledTrackMode(aEnabled ? DisabledTrackMode::ENABLED
-                                          : DisabledTrackMode::SILENCE_FREEZE);
+    mTrack->SetEnabled(aEnabled ? DisabledTrackMode::ENABLED
+                                : DisabledTrackMode::SILENCE_FREEZE);
   }
 
   void SetPrincipal(RefPtr<nsIPrincipal> aPrincipal) {
     mPrincipal = std::move(aPrincipal);
     MediaStreamTrackSource::PrincipalChanged();
   }
 
   void SetMutedByElement(OutputMuteState aMuteState) {
--- a/dom/media/ForwardedInputTrack.cpp
+++ b/dom/media/ForwardedInputTrack.cpp
@@ -61,45 +61,34 @@ void ForwardedInputTrack::RemoveInput(Me
   for (const auto& listener : mOwnedDirectListeners) {
     MediaTrack* source = mInputPort->GetSource();
     TRACK_LOG(LogLevel::Debug,
               ("ForwardedInputTrack %p removing direct listener "
                "%p. Forwarding to input track %p.",
                this, listener.get(), aPort->GetSource()));
     source->RemoveDirectListenerImpl(listener);
   }
-
-  DisabledTrackMode oldMode = CombinedDisabledMode();
-  mInputDisabledMode = DisabledTrackMode::ENABLED;
-  NotifyIfDisabledModeChangedFrom(oldMode);
-
   mInputPort = nullptr;
   ProcessedMediaTrack::RemoveInput(aPort);
 }
 
 void ForwardedInputTrack::SetInput(MediaInputPort* aPort) {
   MOZ_ASSERT(aPort);
   MOZ_ASSERT(aPort->GetSource());
   MOZ_ASSERT(aPort->GetSource()->GetData());
   MOZ_ASSERT(!mInputPort);
-  MOZ_ASSERT(mInputDisabledMode == DisabledTrackMode::ENABLED);
-
   mInputPort = aPort;
 
   for (const auto& listener : mOwnedDirectListeners) {
     MediaTrack* source = mInputPort->GetSource();
     TRACK_LOG(LogLevel::Debug, ("ForwardedInputTrack %p adding direct listener "
                                 "%p. Forwarding to input track %p.",
                                 this, listener.get(), aPort->GetSource()));
     source->AddDirectListenerImpl(do_AddRef(listener));
   }
-
-  DisabledTrackMode oldMode = CombinedDisabledMode();
-  mInputDisabledMode = mInputPort->GetSource()->CombinedDisabledMode();
-  NotifyIfDisabledModeChangedFrom(oldMode);
 }
 
 void ForwardedInputTrack::ProcessInputImpl(MediaTrack* aSource,
                                            MediaSegment* aSegment,
                                            GraphTime aFrom, GraphTime aTo,
                                            uint32_t aFlags) {
   GraphTime next;
   for (GraphTime t = aFrom; t < aTo; t = next) {
@@ -176,29 +165,17 @@ void ForwardedInputTrack::ProcessInput(G
     MOZ_CRASH("Unknown segment type");
   }
 
   if (mEnded) {
     RemoveAllDirectListenersImpl();
   }
 }
 
-DisabledTrackMode ForwardedInputTrack::CombinedDisabledMode() const {
-  if (mDisabledMode == DisabledTrackMode::SILENCE_BLACK ||
-      mInputDisabledMode == DisabledTrackMode::SILENCE_BLACK) {
-    return DisabledTrackMode::SILENCE_BLACK;
-  }
-  if (mDisabledMode == DisabledTrackMode::SILENCE_FREEZE ||
-      mInputDisabledMode == DisabledTrackMode::SILENCE_FREEZE) {
-    return DisabledTrackMode::SILENCE_FREEZE;
-  }
-  return DisabledTrackMode::ENABLED;
-}
-
-void ForwardedInputTrack::SetDisabledTrackModeImpl(DisabledTrackMode aMode) {
+void ForwardedInputTrack::SetEnabledImpl(DisabledTrackMode aMode) {
   bool enabled = aMode == DisabledTrackMode::ENABLED;
   TRACK_LOG(LogLevel::Info, ("ForwardedInputTrack %p was explicitly %s", this,
                              enabled ? "enabled" : "disabled"));
   for (DirectMediaTrackListener* listener : mOwnedDirectListeners) {
     DisabledTrackMode oldMode = mDisabledMode;
     bool oldEnabled = oldMode == DisabledTrackMode::ENABLED;
     if (!oldEnabled && enabled) {
       TRACK_LOG(LogLevel::Debug, ("ForwardedInputTrack %p setting "
@@ -207,32 +184,17 @@ void ForwardedInputTrack::SetDisabledTra
       listener->DecreaseDisabled(oldMode);
     } else if (oldEnabled && !enabled) {
       TRACK_LOG(LogLevel::Debug, ("ForwardedInputTrack %p setting "
                                   "direct listener disabled",
                                   this));
       listener->IncreaseDisabled(aMode);
     }
   }
-  MediaTrack::SetDisabledTrackModeImpl(aMode);
-}
-
-void ForwardedInputTrack::OnInputDisabledModeChanged(
-    DisabledTrackMode aInputMode) {
-  MOZ_ASSERT(mInputs.Length() == 1);
-  MOZ_ASSERT(mInputs[0]->GetSource());
-  DisabledTrackMode oldMode = CombinedDisabledMode();
-  if (mInputDisabledMode == DisabledTrackMode::SILENCE_BLACK &&
-      aInputMode == DisabledTrackMode::SILENCE_FREEZE) {
-    // Don't allow demoting from SILENCE_BLACK to SILENCE_FREEZE. Frames will
-    // remain black so we shouldn't notify that the track got enabled.
-    aInputMode = DisabledTrackMode::SILENCE_BLACK;
-  }
-  mInputDisabledMode = aInputMode;
-  NotifyIfDisabledModeChangedFrom(oldMode);
+  MediaTrack::SetEnabledImpl(aMode);
 }
 
 void ForwardedInputTrack::AddDirectListenerImpl(
     already_AddRefed<DirectMediaTrackListener> aListener) {
   RefPtr<DirectMediaTrackListener> listener = aListener;
   mOwnedDirectListeners.AppendElement(listener);
 
   DisabledTrackMode currentMode = mDisabledMode;
--- a/dom/media/ForwardedInputTrack.h
+++ b/dom/media/ForwardedInputTrack.h
@@ -21,19 +21,17 @@ class ForwardedInputTrack : public Proce
 
   virtual ForwardedInputTrack* AsForwardedInputTrack() override { return this; }
   friend class DOMMediaStream;
 
   void AddInput(MediaInputPort* aPort) override;
   void RemoveInput(MediaInputPort* aPort) override;
   void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
 
-  DisabledTrackMode CombinedDisabledMode() const override;
-  void SetDisabledTrackModeImpl(DisabledTrackMode aMode) override;
-  void OnInputDisabledModeChanged(DisabledTrackMode aInputMode) override;
+  void SetEnabledImpl(DisabledTrackMode aMode) override;
 
   friend class MediaTrackGraphImpl;
 
  protected:
   // Set up this track from a specific input.
   void SetInput(MediaInputPort* aPort);
 
   // MediaSegment-agnostic ProcessInput.
@@ -49,18 +47,13 @@ class ForwardedInputTrack : public Proce
   // ForwardedInputTrack-track. While an input is set, these are forwarded to
   // the input track. We will update these when this track's disabled status
   // changes.
   nsTArray<RefPtr<DirectMediaTrackListener>> mOwnedDirectListeners;
 
   // Set if an input has been added, nullptr otherwise. Adding more than one
   // input is an error.
   MediaInputPort* mInputPort = nullptr;
-
-  // This track's input's associated disabled mode. ENABLED if there is no
-  // input. This is used with MediaTrackListener::NotifyEnabledStateChanged(),
-  // which affects only video tracks. This is set only on ForwardedInputTracks.
-  DisabledTrackMode mInputDisabledMode = DisabledTrackMode::ENABLED;
 };
 
 }  // namespace mozilla
 
 #endif /* MOZILLA_FORWARDEDINPUTTRACK_H_ */
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -205,41 +205,35 @@ struct DeviceState {
     MOZ_ASSERT(mDevice);
     MOZ_ASSERT(mTrackSource);
   }
 
   // true if we have stopped mDevice, this is a terminal state.
   // MainThread only.
   bool mStopped = false;
 
-  // true if mDevice is currently enabled.
-  // A device must be both enabled and unmuted to be turned on and capturing.
+  // true if mDevice is currently enabled, i.e., turned on and capturing.
   // MainThread only.
   bool mDeviceEnabled = false;
 
-  // true if mDevice is currently muted.
-  // A device that is either muted or disabled is turned off and not capturing.
-  // MainThread only.
-  bool mDeviceMuted;
-
   // true if the application has currently enabled mDevice.
   // MainThread only.
   bool mTrackEnabled = false;
 
   // Time when the application last enabled mDevice.
   // MainThread only.
   TimeStamp mTrackEnabledTime;
 
   // true if an operation to Start() or Stop() mDevice has been dispatched to
   // the media thread and is not finished yet.
   // MainThread only.
   bool mOperationInProgress = false;
 
   // true if we are allowed to turn off the underlying source while all tracks
-  // are disabled. Only affects disabling; always turns off on user-agent mute.
+  // are disabled.
   // MainThread only.
   bool mOffWhileDisabled = false;
 
   // Timer triggered by a MediaStreamTrackSource signaling that all tracks got
   // disabled. When the timer fires we initiate Stop()ing mDevice.
   // If set we allow dynamically stopping and starting mDevice.
   // Any thread.
   const RefPtr<MediaTimer> mDisableTimer = new MediaTimer();
@@ -314,18 +308,17 @@ class SourceListener : public SupportsWe
   void Register(GetUserMediaWindowListener* aListener);
 
   /**
    * Marks this listener as active and creates internal device states.
    */
   void Activate(RefPtr<MediaDevice> aAudioDevice,
                 RefPtr<LocalTrackSource> aAudioTrackSource,
                 RefPtr<MediaDevice> aVideoDevice,
-                RefPtr<LocalTrackSource> aVideoTrackSource,
-                bool aStartVideoMuted, bool aStartAudioMuted);
+                RefPtr<LocalTrackSource> aVideoTrackSource);
 
   /**
    * Posts a task to initialize and start all associated devices.
    */
   RefPtr<SourceListenerPromise> InitializeAsync();
 
   /**
    * Stops all live tracks, ends the associated MediaTrack and cleans up the
@@ -381,40 +374,21 @@ class SourceListener : public SupportsWe
    *
    * The delay is in place to prevent misuse by malicious sites. If a track is
    * re-enabled before the delay has passed, the device will not be touched
    * until another disable followed by the full delay happens.
    */
   void SetEnabledFor(MediaTrack* aTrack, bool aEnabled);
 
   /**
-   * Posts a task to set the muted state of the device associated with
-   * aTrackSource to aMuted and notifies the associated window listener that a
-   * track's state has changed.
-   *
-   * Turning the hardware off while the device is muted is supported for:
-   * - Camera (enabled by default, controlled by pref
-   *   "media.getusermedia.camera.off_while_disabled.enabled")
-   * - Microphone (disabled by default, controlled by pref
-   *   "media.getusermedia.microphone.off_while_disabled.enabled")
-   * Screen-, app-, or windowsharing is not supported at this time.
-   */
-  void SetMutedFor(LocalTrackSource* aTrackSource, bool aMuted);
-
-  /**
    * Stops all screen/app/window/audioCapture sharing, but not camera or
    * microphone.
    */
   void StopSharing();
 
-  /**
-   * Mutes or unmutes the associated video device if it is a camera.
-   */
-  void MuteOrUnmuteCamera(bool aMute);
-
   MediaDevice* GetAudioDevice() const {
     return mAudioDeviceState ? mAudioDeviceState->mDevice.get() : nullptr;
   }
 
   MediaDevice* GetVideoDevice() const {
     return mVideoDeviceState ? mVideoDeviceState->mDevice.get() : nullptr;
   }
 
@@ -432,25 +406,16 @@ class SourceListener : public SupportsWe
       MediaTrack* aTrack, const MediaTrackConstraints& aConstraints,
       CallerType aCallerType);
 
   PrincipalHandle GetPrincipalHandle() const;
 
  private:
   virtual ~SourceListener() = default;
 
-  using DeviceOperationPromise =
-      MozPromise<nsresult, bool, /* IsExclusive = */ true>;
-
-  /**
-   * Posts a task to start or stop the device associated with aTrack, based on
-   * a passed-in boolean. Private method used by SetEnabledFor and SetMutedFor.
-   */
-  RefPtr<DeviceOperationPromise> UpdateDevice(MediaTrack* aTrack, bool aOn);
-
   /**
    * Returns a pointer to the device state for aTrack.
    *
    * This is intended for internal use where we need to figure out which state
    * corresponds to aTrack, not for availability checks. As such, we assert
    * that the device does indeed exist.
    *
    * Since this is a raw pointer and the state lifetime depends on the
@@ -523,18 +488,17 @@ class GetUserMediaWindowListener {
     MOZ_ASSERT(aListener);
     MOZ_ASSERT(!aListener->Activated());
     MOZ_ASSERT(mInactiveListeners.Contains(aListener),
                "Must be registered to activate");
     MOZ_ASSERT(!mActiveListeners.Contains(aListener), "Already activated");
 
     mInactiveListeners.RemoveElement(aListener);
     aListener->Activate(std::move(aAudioDevice), std::move(aAudioTrackSource),
-                        std::move(aVideoDevice), std::move(aVideoTrackSource),
-                        mCamerasAreMuted, /* aStartAudioMuted */ false);
+                        std::move(aVideoDevice), std::move(aVideoTrackSource));
     mActiveListeners.AppendElement(std::move(aListener));
   }
 
   /**
    * Removes all SourceListeners from this window listener.
    * Removes this window listener from the list of active windows, so callers
    * need to make sure to hold a strong reference.
    */
@@ -669,18 +633,16 @@ class GetUserMediaWindowListener {
     NS_ProxyRelease(__func__, mainTarget, aListener.forget(), true);
     return true;
   }
 
   void StopSharing();
 
   void StopRawID(const nsString& removedDeviceID);
 
-  void MuteOrUnmuteCameras(bool aMute);
-
   /**
    * Called by one of our SourceListeners when one of its tracks has changed so
    * that chrome state is affected.
    * Schedules an event for the next stable state to update chrome.
    */
   void ChromeAffectingStateChanged();
 
   /**
@@ -744,22 +706,16 @@ class GetUserMediaWindowListener {
   const PrincipalHandle mPrincipalHandle;
 
   // true if we have scheduled a task to notify chrome in the next stable state.
   // The task will reset this to false. MainThread only.
   bool mChromeNotificationTaskPosted;
 
   nsTArray<RefPtr<SourceListener>> mInactiveListeners;
   nsTArray<RefPtr<SourceListener>> mActiveListeners;
-
-  // Whether camera access in this window is currently User Agent (UA) muted.
-  // When true, new camera tracks must start out muted, to avoid JS
-  // circumventing UA mute by calling getUserMedia again.
-  // Per-camera UA muting is not supported.
-  bool mCamerasAreMuted = false;
 };
 
 class LocalTrackSource : public MediaStreamTrackSource {
  public:
   LocalTrackSource(nsIPrincipal* aPrincipal, const nsString& aLabel,
                    const RefPtr<SourceListener>& aListener,
                    MediaSourceEnum aSource, MediaTrack* aTrack,
                    RefPtr<PeerIdentity> aPeerIdentity)
@@ -810,26 +766,16 @@ class LocalTrackSource : public MediaStr
   }
 
   void Enable() override {
     if (mListener) {
       mListener->SetEnabledFor(mTrack, true);
     }
   }
 
-  void Mute() {
-    MutedChanged(true);
-    mTrack->SetDisabledTrackMode(DisabledTrackMode::SILENCE_BLACK);
-  }
-
-  void Unmute() {
-    MutedChanged(false);
-    mTrack->SetDisabledTrackMode(DisabledTrackMode::ENABLED);
-  }
-
   const MediaSourceEnum mSource;
   const RefPtr<MediaTrack> mTrack;
   const RefPtr<const PeerIdentity> mPeerIdentity;
 
  protected:
   ~LocalTrackSource() {
     MOZ_ASSERT(NS_IsMainThread());
     MOZ_ASSERT(mTrack->IsDestroyed());
@@ -2044,20 +1990,16 @@ MediaManager* MediaManager::Get() {
       obs->AddObserver(sSingleton, "last-pb-context-exited", false);
       obs->AddObserver(sSingleton, "getUserMedia:got-device-permission", false);
       obs->AddObserver(sSingleton, "getUserMedia:privileged:allow", false);
       obs->AddObserver(sSingleton, "getUserMedia:response:allow", false);
       obs->AddObserver(sSingleton, "getUserMedia:response:deny", false);
       obs->AddObserver(sSingleton, "getUserMedia:response:noOSPermission",
                        false);
       obs->AddObserver(sSingleton, "getUserMedia:revoke", false);
-      obs->AddObserver(sSingleton, "getUserMedia:muteVideo", false);
-      obs->AddObserver(sSingleton, "getUserMedia:unmuteVideo", false);
-      obs->AddObserver(sSingleton, "application-background", false);
-      obs->AddObserver(sSingleton, "application-foreground", false);
     }
     // else MediaManager won't work properly and will leak (see bug 837874)
     nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
     if (prefs) {
       prefs->AddObserver("media.navigator.video.default_width", sSingleton,
                          false);
       prefs->AddObserver("media.navigator.video.default_height", sSingleton,
                          false);
@@ -3445,40 +3387,28 @@ void MediaManager::OnNavigation(uint64_t
           MOZ_ASSERT(!self->GetWindowListener(windowID));
         });
   } else {
     RemoveWindowID(aWindowID);
   }
   MOZ_ASSERT(!GetWindowListener(aWindowID));
 }
 
-void MediaManager::OnCameraMute(bool aMute) {
-  MOZ_ASSERT(NS_IsMainThread());
-  LOG("OnCameraMute for all windows");
-  mCamerasMuted = aMute;
-  // This is safe since we're on main-thread, and the windowlist can only
-  // be added to from the main-thread
-  for (auto iter = mActiveWindows.Iter(); !iter.Done(); iter.Next()) {
-    iter.UserData()->MuteOrUnmuteCameras(aMute);
-  }
-}
-
 void MediaManager::AddWindowID(uint64_t aWindowId,
                                RefPtr<GetUserMediaWindowListener> aListener) {
   MOZ_ASSERT(NS_IsMainThread());
   // Store the WindowID in a hash table and mark as active. The entry is removed
   // when this window is closed or navigated away from.
   // This is safe since we're on main-thread, and the windowlist can only
   // be invalidated from the main-thread (see OnNavigation)
   if (IsWindowStillActive(aWindowId)) {
     MOZ_ASSERT(false, "Window already added");
     return;
   }
 
-  aListener->MuteOrUnmuteCameras(mCamerasMuted);
   GetActiveWindows()->Put(aWindowId, std::move(aListener));
 }
 
 void MediaManager::RemoveWindowID(uint64_t aWindowId) {
   mActiveWindows.Remove(aWindowId);
 
   // get outer windowID
   auto* window = nsGlobalWindowInner::GetInnerWindowWithId(aWindowId);
@@ -3584,20 +3514,16 @@ void MediaManager::Shutdown() {
   nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
 
   obs->RemoveObserver(this, "last-pb-context-exited");
   obs->RemoveObserver(this, "getUserMedia:privileged:allow");
   obs->RemoveObserver(this, "getUserMedia:response:allow");
   obs->RemoveObserver(this, "getUserMedia:response:deny");
   obs->RemoveObserver(this, "getUserMedia:response:noOSPermission");
   obs->RemoveObserver(this, "getUserMedia:revoke");
-  obs->RemoveObserver(this, "getUserMedia:muteVideo");
-  obs->RemoveObserver(this, "getUserMedia:unmuteVideo");
-  obs->RemoveObserver(this, "application-background");
-  obs->RemoveObserver(this, "application-foreground");
 
   nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
   if (prefs) {
     prefs->RemoveObserver("media.navigator.video.default_width", this);
     prefs->RemoveObserver("media.navigator.video.default_height", this);
     prefs->RemoveObserver("media.navigator.video.default_fps", this);
     prefs->RemoveObserver("media.navigator.audio.fake_frequency", this);
 #ifdef MOZ_WEBRTC
@@ -3739,33 +3665,16 @@ bool IsGUMResponseNoAccess(const char* a
   if (!strcmp(aTopic, "getUserMedia:response:noOSPermission")) {
     aErrorName = MediaMgrError::Name::NotFoundError;
     return true;
   }
 
   return false;
 }
 
-static MediaSourceEnum ParseScreenColonWindowID(const char16_t* aData,
-                                                uint64_t* aWindowIDOut) {
-  MOZ_ASSERT(aWindowIDOut);
-  // may be windowid or screen:windowid
-  const nsDependentString data(aData);
-  if (Substring(data, 0, strlen("screen:")).EqualsLiteral("screen:")) {
-    nsresult rv;
-    *aWindowIDOut = Substring(data, strlen("screen:")).ToInteger64(&rv);
-    MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
-    return MediaSourceEnum::Screen;
-  }
-  nsresult rv;
-  *aWindowIDOut = data.ToInteger64(&rv);
-  MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
-  return MediaSourceEnum::Camera;
-}
-
 nsresult MediaManager::Observe(nsISupports* aSubject, const char* aTopic,
                                const char16_t* aData) {
   MOZ_ASSERT(NS_IsMainThread());
 
   MediaMgrError::Name gumNoAccessError = MediaMgrError::Name::NotAllowedError;
 
   if (!strcmp(aTopic, NS_PREFBRANCH_PREFCHANGE_TOPIC_ID)) {
     nsCOMPtr<nsIPrefBranch> branch(do_QueryInterface(aSubject));
@@ -3860,42 +3769,38 @@ nsresult MediaManager::Observe(nsISuppor
         return NS_OK;
       }
       array->RemoveElement(key);
       SendPendingGUMRequest();
     }
     return NS_OK;
 
   } else if (!strcmp(aTopic, "getUserMedia:revoke")) {
-    uint64_t windowID;
-    if (ParseScreenColonWindowID(aData, &windowID) == MediaSourceEnum::Screen) {
-      LOG("Revoking ScreenCapture access for window %" PRIu64, windowID);
-      StopScreensharing(windowID);
+    nsresult rv;
+    // may be windowid or screen:windowid
+    const nsDependentString data(aData);
+    if (Substring(data, 0, strlen("screen:")).EqualsLiteral("screen:")) {
+      uint64_t windowID = Substring(data, strlen("screen:")).ToInteger64(&rv);
+      MOZ_ASSERT(NS_SUCCEEDED(rv));
+      if (NS_SUCCEEDED(rv)) {
+        LOG("Revoking Screen/windowCapture access for window %" PRIu64,
+            windowID);
+        StopScreensharing(windowID);
+      }
     } else {
-      LOG("Revoking MediaCapture access for window %" PRIu64, windowID);
-      OnNavigation(windowID);
+      uint64_t windowID = data.ToInteger64(&rv);
+      MOZ_ASSERT(NS_SUCCEEDED(rv));
+      if (NS_SUCCEEDED(rv)) {
+        LOG("Revoking MediaCapture access for window %" PRIu64, windowID);
+        OnNavigation(windowID);
+      }
     }
     return NS_OK;
-  } else if (!strcmp(aTopic, "getUserMedia:muteVideo") ||
-             !strcmp(aTopic, "getUserMedia:unmuteVideo")) {
-    OnCameraMute(!strcmp(aTopic, "getUserMedia:muteVideo"));
-    return NS_OK;
-  } else if ((!strcmp(aTopic, "application-background") ||
-              !strcmp(aTopic, "application-foreground")) &&
-             StaticPrefs::media_getusermedia_camera_background_mute_enabled()) {
-    // TODO: These don't fire in the content process yet (see bug 1660049).
-    //
-    // On mobile we turn off any cameras (but not mics) while in the background.
-    // Keeping things simple for now by duplicating test-covered code above.
-    //
-    // NOTE: If a mobile device ever wants to implement "getUserMedia:muteVideo"
-    // as well, it'd need to update this code to handle & test the combinations.
-    OnCameraMute(!strcmp(aTopic, "application-background"));
-    return NS_OK;
   }
+
   return NS_OK;
 }
 
 nsresult MediaManager::GetActiveMediaCaptureWindows(nsIArray** aArray) {
   MOZ_ASSERT(aArray);
 
   nsCOMPtr<nsIMutableArray> array = nsArray::Create();
 
@@ -4133,18 +4038,17 @@ void SourceListener::Register(GetUserMed
 
   mPrincipalHandle = aListener->GetPrincipalHandle();
   mWindowListener = aListener;
 }
 
 void SourceListener::Activate(RefPtr<MediaDevice> aAudioDevice,
                               RefPtr<LocalTrackSource> aAudioTrackSource,
                               RefPtr<MediaDevice> aVideoDevice,
-                              RefPtr<LocalTrackSource> aVideoTrackSource,
-                              bool aStartVideoMuted, bool aStartAudioMuted) {
+                              RefPtr<LocalTrackSource> aVideoTrackSource) {
   MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
 
   LOG("SourceListener %p activating audio=%p video=%p", this,
       aAudioDevice.get(), aVideoDevice.get());
 
   MOZ_ASSERT(!mStopped, "Cannot activate stopped source listener");
   MOZ_ASSERT(!Activated(), "Already activated");
 
@@ -4152,70 +4056,58 @@ void SourceListener::Activate(RefPtr<Med
   if (aAudioDevice) {
     bool offWhileDisabled =
         aAudioDevice->GetMediaSource() == MediaSourceEnum::Microphone &&
         Preferences::GetBool(
             "media.getusermedia.microphone.off_while_disabled.enabled", true);
     mAudioDeviceState =
         MakeUnique<DeviceState>(std::move(aAudioDevice),
                                 std::move(aAudioTrackSource), offWhileDisabled);
-    mAudioDeviceState->mDeviceMuted = aStartAudioMuted;
-    if (aStartAudioMuted) {
-      mAudioDeviceState->mTrackSource->Mute();
-    }
   }
 
   if (aVideoDevice) {
     bool offWhileDisabled =
         aVideoDevice->GetMediaSource() == MediaSourceEnum::Camera &&
         Preferences::GetBool(
             "media.getusermedia.camera.off_while_disabled.enabled", true);
     mVideoDeviceState =
         MakeUnique<DeviceState>(std::move(aVideoDevice),
                                 std::move(aVideoTrackSource), offWhileDisabled);
-    mVideoDeviceState->mDeviceMuted = aStartVideoMuted;
-    if (aStartVideoMuted) {
-      mVideoDeviceState->mTrackSource->Mute();
-    }
   }
 }
 
 RefPtr<SourceListener::SourceListenerPromise>
 SourceListener::InitializeAsync() {
   MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
   MOZ_DIAGNOSTIC_ASSERT(!mStopped);
 
   return MediaManager::Dispatch<SourceListenerPromise>(
              __func__,
              [principal = GetPrincipalHandle(),
               audioDevice =
                   mAudioDeviceState ? mAudioDeviceState->mDevice : nullptr,
               audioStream = mAudioDeviceState
                                 ? mAudioDeviceState->mTrackSource->mTrack
                                 : nullptr,
-              audioDeviceMuted =
-                  mAudioDeviceState ? mAudioDeviceState->mDeviceMuted : false,
               videoDevice =
                   mVideoDeviceState ? mVideoDeviceState->mDevice : nullptr,
               videoStream = mVideoDeviceState
                                 ? mVideoDeviceState->mTrackSource->mTrack
-                                : nullptr,
-              videoDeviceMuted =
-                  mVideoDeviceState ? mVideoDeviceState->mDeviceMuted : false](
+                                : nullptr](
                  MozPromiseHolder<SourceListenerPromise>& aHolder) {
                if (audioDevice) {
                  audioDevice->SetTrack(audioStream->AsSourceTrack(), principal);
                }
 
                if (videoDevice) {
                  videoDevice->SetTrack(videoStream->AsSourceTrack(), principal);
                }
 
                if (audioDevice) {
-                 nsresult rv = audioDeviceMuted ? NS_OK : audioDevice->Start();
+                 nsresult rv = audioDevice->Start();
                  if (rv == NS_ERROR_NOT_AVAILABLE) {
                    PR_Sleep(200);
                    rv = audioDevice->Start();
                  }
                  if (NS_FAILED(rv)) {
                    nsString log;
                    if (rv == NS_ERROR_NOT_AVAILABLE) {
                      log.AssignLiteral("Concurrent mic process limit.");
@@ -4229,17 +4121,17 @@ SourceListener::InitializeAsync() {
                    aHolder.Reject(MakeRefPtr<MediaMgrError>(
                                       MediaMgrError::Name::AbortError, log),
                                   __func__);
                    return;
                  }
                }
 
                if (videoDevice) {
-                 nsresult rv = videoDeviceMuted ? NS_OK : videoDevice->Start();
+                 nsresult rv = videoDevice->Start();
                  if (NS_FAILED(rv)) {
                    if (audioDevice) {
                      if (NS_WARN_IF(NS_FAILED(audioDevice->Stop()))) {
                        MOZ_ASSERT_UNREACHABLE("Stopping audio failed");
                      }
                    }
                    nsString log;
                    log.AssignLiteral("Starting video failed");
@@ -4378,90 +4270,16 @@ void SourceListener::GetSettingsFor(Medi
   MediaSourceEnum mediaSource = state.mDevice->GetMediaSource();
   if (mediaSource == MediaSourceEnum::Camera ||
       mediaSource == MediaSourceEnum::Microphone) {
     aOutSettings.mDeviceId.Construct(state.mDevice->mID);
     aOutSettings.mGroupId.Construct(state.mDevice->mGroupID);
   }
 }
 
-static bool SameGroupAsCurrentAudioOutput(const nsString& aGroupId) {
-  CubebDeviceEnumerator* enumerator = CubebDeviceEnumerator::GetInstance();
-  // Get the current graph's device info. This is always the
-  // default audio output device for now.
-  RefPtr<AudioDeviceInfo> outputDevice =
-      enumerator->DefaultDevice(CubebDeviceEnumerator::Side::OUTPUT);
-  return outputDevice && outputDevice->GroupID().Equals(aGroupId);
-}
-
-auto SourceListener::UpdateDevice(MediaTrack* aTrack, bool aOn)
-    -> RefPtr<DeviceOperationPromise> {
-  MOZ_ASSERT(NS_IsMainThread());
-  RefPtr<SourceListener> self = this;
-  DeviceState& state = GetDeviceStateFor(aTrack);
-  nsString groupId;
-  state.mDevice->GetRawGroupId(groupId);
-
-  return MediaManager::Dispatch<DeviceOperationPromise>(
-             __func__,
-             [self, device = state.mDevice, aOn,
-              groupId](MozPromiseHolder<DeviceOperationPromise>& h) {
-               if (device->mKind == dom::MediaDeviceKind::Audioinput && !aOn &&
-                   SameGroupAsCurrentAudioOutput(groupId)) {
-                 // Don't turn off the microphone of a device that is on the
-                 // same physical device as the output.
-                 //
-                 // Also don't take this branch when turning on, in case the
-                 // default audio output device has changed. The AudioInput
-                 // source start/stop are idempotent, so this works.
-                 LOG("Not turning device off, as it matches audio output (%s)",
-                     NS_ConvertUTF16toUTF8(groupId).get());
-                 h.Resolve(NS_OK, __func__);
-                 return;
-               }
-               LOG("Turning %s device (%s)", aOn ? "on" : "off",
-                   NS_ConvertUTF16toUTF8(groupId).get());
-               h.Resolve(aOn ? device->Start() : device->Stop(), __func__);
-             })
-      ->Then(
-          GetMainThreadSerialEventTarget(), __func__,
-          [self, this, &state, track = RefPtr<MediaTrack>(aTrack),
-           aOn](nsresult aResult) {
-            if (state.mStopped) {
-              // Device was stopped on main thread during the operation. Done.
-              return DeviceOperationPromise::CreateAndResolve(aResult,
-                                                              __func__);
-            }
-            LOG("SourceListener %p turning %s %s input device for track %p %s",
-                this, aOn ? "on" : "off",
-                &state == mAudioDeviceState.get() ? "audio" : "video",
-                track.get(), NS_SUCCEEDED(aResult) ? "succeeded" : "failed");
-
-            if (NS_FAILED(aResult) && aResult != NS_ERROR_ABORT) {
-              // This path handles errors from starting or stopping the device.
-              // NS_ERROR_ABORT are for cases where *we* aborted. They need
-              // graceful handling.
-              if (aOn) {
-                // Starting the device failed. Stopping the track here will make
-                // the MediaStreamTrack end after a pass through the
-                // MediaTrackGraph.
-                StopTrack(track);
-              } else {
-                // Stopping the device failed. This is odd, but not fatal.
-                MOZ_ASSERT_UNREACHABLE("The device should be stoppable");
-              }
-            }
-            return DeviceOperationPromise::CreateAndResolve(aResult, __func__);
-          },
-          []() {
-            MOZ_ASSERT_UNREACHABLE("Unexpected and unhandled reject");
-            return DeviceOperationPromise::CreateAndReject(false, __func__);
-          });
-}
-
 void SourceListener::SetEnabledFor(MediaTrack* aTrack, bool aEnable) {
   MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
   MOZ_ASSERT(Activated(), "No device to set enabled state for");
 
   DeviceState& state = GetDeviceStateFor(aTrack);
 
   LOG("SourceListener %p %s %s track for track %p", this,
       aEnable ? "enabling" : "disabling",
@@ -4504,16 +4322,18 @@ void SourceListener::SetEnabledFor(Media
             3000));
     const TimeDuration durationEnabled =
         TimeStamp::Now() - state.mTrackEnabledTime;
     const TimeDuration delay = TimeDuration::Max(
         TimeDuration::FromMilliseconds(0), maxDelay - durationEnabled);
     timerPromise = state.mDisableTimer->WaitFor(delay, __func__);
   }
 
+  typedef MozPromise<nsresult, bool, /* IsExclusive = */ true>
+      DeviceOperationPromise;
   RefPtr<SourceListener> self = this;
   timerPromise
       ->Then(
           GetMainThreadSerialEventTarget(), __func__,
           [self, this, &state, track = RefPtr<MediaTrack>(aTrack),
            aEnable]() mutable {
             MOZ_ASSERT(state.mDeviceEnabled != aEnable,
                        "Device operation hasn't started");
@@ -4532,24 +4352,63 @@ void SourceListener::SetEnabledFor(Media
                                                               __func__);
             }
 
             state.mDeviceEnabled = aEnable;
 
             if (mWindowListener) {
               mWindowListener->ChromeAffectingStateChanged();
             }
-            if (!state.mOffWhileDisabled || state.mDeviceMuted) {
+            if (!state.mOffWhileDisabled) {
               // If the feature to turn a device off while disabled is itself
-              // disabled, or the device is currently user agent muted, then
-              // we shortcut the device operation and tell the
+              // disabled we shortcut the device operation and tell the
               // ux-updating code that everything went fine.
               return DeviceOperationPromise::CreateAndResolve(NS_OK, __func__);
             }
-            return UpdateDevice(track, aEnable);
+
+            nsString inputDeviceGroupId;
+            state.mDevice->GetRawGroupId(inputDeviceGroupId);
+
+            return MediaManager::Dispatch<DeviceOperationPromise>(
+                __func__,
+                [self, device = state.mDevice, aEnable, inputDeviceGroupId](
+                    MozPromiseHolder<DeviceOperationPromise>& h) {
+                  // Only take this branch when muting, to avoid muting, in case
+                  // the default audio output device has changed and we need to
+                  // really call `Start` on the source. The AudioInput source
+                  // start/stop are idempotent, so this works.
+                  if (device->mKind == dom::MediaDeviceKind::Audioinput &&
+                      !aEnable) {
+                    // Don't turn off the microphone of a device that is on the
+                    // same physical device as the output.
+                    CubebDeviceEnumerator* enumerator =
+                        CubebDeviceEnumerator::GetInstance();
+                    // Get the current graph's device info. This is always the
+                    // default audio output device for now.
+                    RefPtr<AudioDeviceInfo> outputDevice =
+                        enumerator->DefaultDevice(
+                            CubebDeviceEnumerator::Side::OUTPUT);
+                    if (outputDevice &&
+                        outputDevice->GroupID().Equals(inputDeviceGroupId)) {
+                      LOG("Device group id match when %s, "
+                          "not turning the input device off (%s)",
+                          aEnable ? "unmuting" : "muting",
+                          NS_ConvertUTF16toUTF8(outputDevice->GroupID()).get());
+                      h.Resolve(NS_OK, __func__);
+                      return;
+                    }
+                  }
+
+                  LOG("Device group id don't match when %s, "
+                      "not turning the audio input device off (%s)",
+                      aEnable ? "unmuting" : "muting",
+                      NS_ConvertUTF16toUTF8(inputDeviceGroupId).get());
+                  h.Resolve(aEnable ? device->Start() : device->Stop(),
+                            __func__);
+                });
           },
           []() {
             // Timer was canceled by us. We signal this with NS_ERROR_ABORT.
             return DeviceOperationPromise::CreateAndResolve(NS_ERROR_ABORT,
                                                             __func__);
           })
       ->Then(
           GetMainThreadSerialEventTarget(), __func__,
@@ -4561,81 +4420,63 @@ void SourceListener::SetEnabledFor(Media
             state.mOperationInProgress = false;
 
             if (state.mStopped) {
               // Device was stopped on main thread during the operation. Nothing
               // to do.
               return;
             }
 
-            if (NS_FAILED(aResult) && aResult != NS_ERROR_ABORT && !aEnable) {
-              // To keep our internal state sane in this case, we disallow
-              // future stops due to disable.
-              state.mOffWhileDisabled = false;
+            LOG("SourceListener %p %s %s track for track %p %s", this,
+                aEnable ? "enabling" : "disabling",
+                &state == mAudioDeviceState.get() ? "audio" : "video",
+                track.get(), NS_SUCCEEDED(aResult) ? "succeeded" : "failed");
+
+            if (NS_FAILED(aResult) && aResult != NS_ERROR_ABORT) {
+              // This path handles errors from starting or stopping the device.
+              // NS_ERROR_ABORT are for cases where *we* aborted. They need
+              // graceful handling.
+              if (aEnable) {
+                // Starting the device failed. Stopping the track here will make
+                // the MediaStreamTrack end after a pass through the
+                // MediaTrackGraph.
+                StopTrack(track);
+              } else {
+                // Stopping the device failed. This is odd, but not fatal.
+                MOZ_ASSERT_UNREACHABLE("The device should be stoppable");
+
+                // To keep our internal state sane in this case, we disallow
+                // future stops due to disable.
+                state.mOffWhileDisabled = false;
+              }
               return;
             }
 
             // This path is for a device operation aResult that was success or
             // NS_ERROR_ABORT (*we* canceled the operation).
             // At this point we have to follow up on the intended state, i.e.,
             // update the device state if the track state changed in the
             // meantime.
 
-            if (state.mTrackEnabled != state.mDeviceEnabled) {
-              // Track state changed during this operation. We'll start over.
-              SetEnabledFor(track, state.mTrackEnabled);
+            if (state.mTrackEnabled == state.mDeviceEnabled) {
+              // Intended state is same as device's current state.
+              // Nothing more to do.
+              return;
+            }
+
+            // Track state changed during this operation. We'll start over.
+            if (state.mTrackEnabled) {
+              SetEnabledFor(track, true);
+            } else {
+              SetEnabledFor(track, false);
             }
           },
           []() { MOZ_ASSERT_UNREACHABLE("Unexpected and unhandled reject"); });
 }
 
-void SourceListener::SetMutedFor(LocalTrackSource* aTrackSource, bool aMute) {
-  MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
-  MOZ_ASSERT(Activated(), "No device to set muted state for");
-
-  MediaTrack* track = aTrackSource->mTrack;
-  DeviceState& state = GetDeviceStateFor(track);
-
-  LOG("SourceListener %p %s %s track for track %p", this,
-      aMute ? "muting" : "unmuting",
-      &state == mAudioDeviceState.get() ? "audio" : "video", track);
-
-  if (state.mStopped) {
-    // Device terminally stopped. Updating device state is pointless.
-    return;
-  }
-
-  if (state.mDeviceMuted == aMute) {
-    // Device is already in the desired state.
-    return;
-  }
-
-  LOG("SourceListener %p %s %s track for track %p - starting device operation",
-      this, aMute ? "muting" : "unmuting",
-      &state == mAudioDeviceState.get() ? "audio" : "video", track);
-
-  state.mDeviceMuted = aMute;
-
-  if (mWindowListener) {
-    mWindowListener->ChromeAffectingStateChanged();
-  }
-  // Update trackSource to fire mute/unmute events on all its tracks
-  if (aMute) {
-    aTrackSource->Mute();
-  } else {
-    aTrackSource->Unmute();
-  }
-  if (state.mOffWhileDisabled && !state.mDeviceEnabled &&
-      state.mDevice->mKind == dom::MediaDeviceKind::Videoinput) {
-    // Camera is already off. TODO: Revisit once we support UA-muting mics.
-    return;
-  }
-  UpdateDevice(track, !aMute);
-}
-
 void SourceListener::StopSharing() {
   MOZ_ASSERT(NS_IsMainThread());
 
   if (mStopped) {
     return;
   }
 
   MOZ_RELEASE_ASSERT(mWindowListener);
@@ -4653,32 +4494,16 @@ void SourceListener::StopSharing() {
   }
   if (mAudioDeviceState && mAudioDeviceState->mDevice->GetMediaSource() ==
                                MediaSourceEnum::AudioCapture) {
     static_cast<AudioCaptureTrackSource*>(mAudioDeviceState->mTrackSource.get())
         ->Stop();
   }
 }
 
-void SourceListener::MuteOrUnmuteCamera(bool aMute) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  if (mStopped) {
-    return;
-  }
-
-  MOZ_RELEASE_ASSERT(mWindowListener);
-  LOG("SourceListener %p MuteOrUnmuteCamera", this);
-
-  if (mVideoDeviceState && (mVideoDeviceState->mDevice->GetMediaSource() ==
-                            MediaSourceEnum::Camera)) {
-    SetMutedFor(mVideoDeviceState->mTrackSource, aMute);
-  }
-}
-
 bool SourceListener::CapturingVideo() const {
   MOZ_ASSERT(NS_IsMainThread());
   return Activated() && mVideoDeviceState && !mVideoDeviceState->mStopped &&
          (!mVideoDeviceState->mDevice->mSource->IsFake() ||
           Preferences::GetBool("media.navigator.permission.fake"));
 }
 
 bool SourceListener::CapturingAudio() const {
@@ -4710,19 +4535,19 @@ CaptureState SourceListener::CapturingSo
   if ((aSource == MediaSourceEnum::Camera ||
        aSource == MediaSourceEnum::Microphone) &&
       state.mDevice->mSource->IsFake() &&
       !Preferences::GetBool("media.navigator.permission.fake")) {
     // Fake Camera and Microphone only count if there is no fake permission
     return CaptureState::Off;
   }
 
-  // Source is a match and is active and unmuted
-
-  if (state.mDeviceEnabled && !state.mDeviceMuted) {
+  // Source is a match and is active
+
+  if (state.mDeviceEnabled) {
     return CaptureState::Enabled;
   }
 
   return CaptureState::Disabled;
 }
 
 RefPtr<SourceListener::SourceListenerPromise>
 SourceListener::ApplyConstraintsToTrack(
@@ -4818,31 +4643,16 @@ void GetUserMediaWindowListener::StopRaw
       source->GetVideoDevice()->GetRawId(id);
       if (removedDeviceID.Equals(id)) {
         source->StopVideoTrack();
       }
     }
   }
 }
 
-void GetUserMediaWindowListener::MuteOrUnmuteCameras(bool aMute) {
-  MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
-
-  if (mCamerasAreMuted == aMute) {
-    return;
-  }
-  mCamerasAreMuted = aMute;
-
-  for (auto& source : mActiveListeners) {
-    if (source->GetVideoDevice()) {
-      source->MuteOrUnmuteCamera(aMute);
-    }
-  }
-}
-
 void GetUserMediaWindowListener::ChromeAffectingStateChanged() {
   MOZ_ASSERT(NS_IsMainThread());
 
   // We wait until stable state before notifying chrome so chrome only does one
   // update if more updates happen in this event loop.
 
   if (mChromeNotificationTaskPosted) {
     return;
--- a/dom/media/MediaManager.h
+++ b/dom/media/MediaManager.h
@@ -249,17 +249,16 @@ class MediaManager final : public nsIMed
   //   Currently, this happens only on non-default default devices
   //   and non https connections. TODO, authorization model to allow
   //   an application to play audio through the device (Bug 1493982).
   // NS_ERROR_ABORT: General error.
   RefPtr<SinkInfoPromise> GetSinkDevice(nsPIDOMWindowInner* aWindow,
                                         const nsString& aDeviceId);
 
   void OnNavigation(uint64_t aWindowID);
-  void OnCameraMute(bool aMute);
   bool IsActivelyCapturingOrHasAPermission(uint64_t aWindowId);
 
   MediaEventSource<void>& DeviceListChangeEvent() {
     return mDeviceListChangeEvent;
   }
 
   MediaEnginePrefs mPrefs;
 
@@ -337,17 +336,16 @@ class MediaManager final : public nsIMed
   void RemoveMediaDevicesCallback(uint64_t aWindowID);
   void DeviceListChanged();
 
   // ONLY access from MainThread so we don't need to lock
   WindowTable mActiveWindows;
   nsRefPtrHashtable<nsStringHashKey, GetUserMediaTask> mActiveCallbacks;
   nsClassHashtable<nsUint64HashKey, nsTArray<nsString>> mCallIds;
   nsTArray<RefPtr<dom::GetUserMediaRequest>> mPendingGUMRequest;
-  bool mCamerasMuted = false;
 
   // Always exists
   const RefPtr<TaskQueue> mMediaThread;
   nsCOMPtr<nsIAsyncShutdownBlocker> mShutdownBlocker;
 
   // ONLY accessed from MediaManagerThread
   RefPtr<MediaEngine> mBackend;
 
--- a/dom/media/MediaStreamTrack.cpp
+++ b/dom/media/MediaStreamTrack.cpp
@@ -290,18 +290,18 @@ void MediaStreamTrack::SetEnabled(bool a
   }
 
   mEnabled = aEnabled;
 
   if (Ended()) {
     return;
   }
 
-  mTrack->SetDisabledTrackMode(mEnabled ? DisabledTrackMode::ENABLED
-                                        : DisabledTrackMode::SILENCE_BLACK);
+  mTrack->SetEnabled(mEnabled ? DisabledTrackMode::ENABLED
+                              : DisabledTrackMode::SILENCE_BLACK);
   NotifyEnabledChanged();
 }
 
 void MediaStreamTrack::Stop() {
   LOG(LogLevel::Info, ("MediaStreamTrack %p Stop()", this));
 
   if (Ended()) {
     LOG(LogLevel::Warning, ("MediaStreamTrack %p Already ended", this));
@@ -454,21 +454,16 @@ void MediaStreamTrack::MutedChanged(bool
   if (mMuted == aNewState) {
     return;
   }
 
   LOG(LogLevel::Info,
       ("MediaStreamTrack %p became %s", this, aNewState ? "muted" : "unmuted"));
 
   mMuted = aNewState;
-
-  if (Ended()) {
-    return;
-  }
-
   nsString eventName = aNewState ? u"mute"_ns : u"unmute"_ns;
   DispatchTrustedEvent(eventName);
 }
 
 void MediaStreamTrack::NotifyEnded() {
   MOZ_ASSERT(mReadyState == MediaStreamTrackState::Ended);
 
   for (const auto& consumer : mConsumers.Clone()) {
--- a/dom/media/MediaStreamTrack.h
+++ b/dom/media/MediaStreamTrack.h
@@ -584,17 +584,16 @@ class MediaStreamTrack : public DOMEvent
 
   /**
    * Called when mSource's muted state has changed.
    */
   void MutedChanged(bool aNewState);
 
   /**
    * Sets this track's muted state without raising any events.
-   * Only really set by cloning. See MutedChanged for runtime changes.
    */
   void SetMuted(bool aMuted) { mMuted = aMuted; }
 
   virtual void Destroy();
 
   /**
    * Sets the principal and notifies PrincipalChangeObservers if it changes.
    */
--- a/dom/media/MediaTrackGraph.cpp
+++ b/dom/media/MediaTrackGraph.cpp
@@ -859,18 +859,17 @@ void MediaTrackGraphImpl::ProcessInputDa
   if (!mInputData) {
     return;
   }
 
   nsTArray<RefPtr<AudioDataListener>>* listeners =
       mInputDeviceUsers.GetValue(mInputDeviceID);
   MOZ_ASSERT(listeners);
   for (auto& listener : *listeners) {
-    listener->NotifyInputData(this, mInputData, mInputFrames, GraphRate(),
-                              mInputChannelCount);
+    listener->NotifyInputData(this, mInputData, mInputFrames, GraphRate(), mInputChannelCount);
   }
 
   mInputData = nullptr;
   mInputFrames = 0;
   mInputChannelCount = 0;
 }
 
 void MediaTrackGraphImpl::DeviceChangedImpl() {
@@ -2176,17 +2175,17 @@ void MediaTrack::AddListenerImpl(
   mTrackListeners.AppendElement(move(l));
 
   PrincipalHandle lastPrincipalHandle = mSegment->GetLastPrincipalHandle();
   mTrackListeners.LastElement()->NotifyPrincipalHandleChanged(
       Graph(), lastPrincipalHandle);
   if (mNotifiedEnded) {
     mTrackListeners.LastElement()->NotifyEnded(Graph());
   }
-  if (CombinedDisabledMode() == DisabledTrackMode::SILENCE_BLACK) {
+  if (mDisabledMode == DisabledTrackMode::SILENCE_BLACK) {
     mTrackListeners.LastElement()->NotifyEnabledStateChanged(Graph(), false);
   }
 }
 
 void MediaTrack::AddListener(MediaTrackListener* aListener) {
   class Message : public ControlMessage {
    public:
     Message(MediaTrack* aTrack, MediaTrackListener* aListener)
@@ -2311,32 +2310,41 @@ void MediaTrack::RunAfterPendingUpdates(
   };
 
   if (mMainThreadDestroyed) {
     return;
   }
   graph->AppendMessage(MakeUnique<Message>(this, runnable.forget()));
 }
 
-void MediaTrack::SetDisabledTrackModeImpl(DisabledTrackMode aMode) {
-  MOZ_DIAGNOSTIC_ASSERT(
-      aMode == DisabledTrackMode::ENABLED ||
-          mDisabledMode == DisabledTrackMode::ENABLED,
-      "Changing disabled track mode for a track is not allowed");
-  DisabledTrackMode oldMode = CombinedDisabledMode();
-  mDisabledMode = aMode;
-  NotifyIfDisabledModeChangedFrom(oldMode);
+void MediaTrack::SetEnabledImpl(DisabledTrackMode aMode) {
+  if (aMode == DisabledTrackMode::ENABLED) {
+    mDisabledMode = DisabledTrackMode::ENABLED;
+    for (const auto& l : mTrackListeners) {
+      l->NotifyEnabledStateChanged(Graph(), true);
+    }
+  } else {
+    MOZ_DIAGNOSTIC_ASSERT(
+        mDisabledMode == DisabledTrackMode::ENABLED,
+        "Changing disabled track mode for a track is not allowed");
+    mDisabledMode = aMode;
+    if (aMode == DisabledTrackMode::SILENCE_BLACK) {
+      for (const auto& l : mTrackListeners) {
+        l->NotifyEnabledStateChanged(Graph(), false);
+      }
+    }
+  }
 }
 
-void MediaTrack::SetDisabledTrackMode(DisabledTrackMode aMode) {
+void MediaTrack::SetEnabled(DisabledTrackMode aMode) {
   class Message : public ControlMessage {
    public:
     Message(MediaTrack* aTrack, DisabledTrackMode aMode)
         : ControlMessage(aTrack), mMode(aMode) {}
-    void Run() override { mTrack->SetDisabledTrackModeImpl(mMode); }
+    void Run() override { mTrack->SetEnabledImpl(mMode); }
     DisabledTrackMode mMode;
   };
   if (mMainThreadDestroyed) {
     return;
   }
   GraphImpl()->AppendMessage(MakeUnique<Message>(this, aMode));
 }
 
@@ -2410,34 +2418,16 @@ void MediaTrack::AdvanceTimeVaryingValue
   if (time < mForgottenTime + minChunkSize) {
     return;
   }
 
   mForgottenTime = std::min(GetEnd() - 1, time);
   mSegment->ForgetUpTo(mForgottenTime);
 }
 
-void MediaTrack::NotifyIfDisabledModeChangedFrom(DisabledTrackMode aOldMode) {
-  DisabledTrackMode mode = CombinedDisabledMode();
-  if (aOldMode == mode) {
-    return;
-  }
-
-  for (const auto& listener : mTrackListeners) {
-    listener->NotifyEnabledStateChanged(
-        Graph(), mode != DisabledTrackMode::SILENCE_BLACK);
-  }
-
-  for (const auto& c : mConsumers) {
-    if (c->GetDestination()) {
-      c->GetDestination()->OnInputDisabledModeChanged(mode);
-    }
-  }
-}
-
 SourceMediaTrack::SourceMediaTrack(MediaSegment::Type aType,
                                    TrackRate aSampleRate)
     : MediaTrack(aSampleRate, aType,
                  aType == MediaSegment::AUDIO
                      ? static_cast<MediaSegment*>(new AudioSegment())
                      : static_cast<MediaSegment*>(new VideoSegment())),
       mMutex("mozilla::media::SourceMediaTrack") {
   mUpdateTrack = MakeUnique<TrackData>();
@@ -2761,20 +2751,16 @@ void SourceMediaTrack::AddDirectListener
 
   mDirectTrackListeners.AppendElement(listener);
 
   LOG(LogLevel::Debug,
       ("%p: Added direct track listener %p", GraphImpl(), listener.get()));
   listener->NotifyDirectListenerInstalled(
       DirectMediaTrackListener::InstallationResult::SUCCESS);
 
-  if (mDisabledMode != DisabledTrackMode::ENABLED) {
-    listener->IncreaseDisabled(mDisabledMode);
-  }
-
   if (mEnded) {
     return;
   }
 
   // Pass buffered data to the listener
   VideoSegment bufferedData;
   size_t videoFrames = 0;
   VideoSegment& segment = *GetData<VideoSegment>();
@@ -2811,19 +2797,16 @@ void SourceMediaTrack::AddDirectListener
 }
 
 void SourceMediaTrack::RemoveDirectListenerImpl(
     DirectMediaTrackListener* aListener) {
   MutexAutoLock lock(mMutex);
   for (int32_t i = mDirectTrackListeners.Length() - 1; i >= 0; --i) {
     const RefPtr<DirectMediaTrackListener>& l = mDirectTrackListeners[i];
     if (l == aListener) {
-      if (mDisabledMode != DisabledTrackMode::ENABLED) {
-        aListener->DecreaseDisabled(mDisabledMode);
-      }
       aListener->NotifyDirectListenerUninstalled();
       mDirectTrackListeners.RemoveElementAt(i);
     }
   }
 }
 
 void SourceMediaTrack::End() {
   MutexAutoLock lock(mMutex);
@@ -2835,17 +2818,17 @@ void SourceMediaTrack::End() {
   if (auto graph = GraphImpl()) {
     MonitorAutoLock lock(graph->GetMonitor());
     if (graph->CurrentDriver()) {  // graph has not completed forced shutdown
       graph->EnsureNextIteration();
     }
   }
 }
 
-void SourceMediaTrack::SetDisabledTrackModeImpl(DisabledTrackMode aMode) {
+void SourceMediaTrack::SetEnabledImpl(DisabledTrackMode aMode) {
   {
     MutexAutoLock lock(mMutex);
     for (const auto& l : mDirectTrackListeners) {
       DisabledTrackMode oldMode = mDisabledMode;
       bool oldEnabled = oldMode == DisabledTrackMode::ENABLED;
       if (!oldEnabled && aMode == DisabledTrackMode::ENABLED) {
         LOG(LogLevel::Debug, ("%p: SourceMediaTrack %p setting "
                               "direct listener enabled",
@@ -2854,17 +2837,17 @@ void SourceMediaTrack::SetDisabledTrackM
       } else if (oldEnabled && aMode != DisabledTrackMode::ENABLED) {
         LOG(LogLevel::Debug, ("%p: SourceMediaTrack %p setting "
                               "direct listener disabled",
                               GraphImpl(), this));
         l->IncreaseDisabled(aMode);
       }
     }
   }
-  MediaTrack::SetDisabledTrackModeImpl(aMode);
+  MediaTrack::SetEnabledImpl(aMode);
 }
 
 void SourceMediaTrack::RemoveAllDirectListenersImpl() {
   GraphImpl()->AssertOnGraphThreadOrNotRunning();
   MutexAutoLock lock(mMutex);
 
   for (auto& l : mDirectTrackListeners.Clone()) {
     l->NotifyDirectListenerUninstalled();
--- a/dom/media/MediaTrackGraph.h
+++ b/dom/media/MediaTrackGraph.h
@@ -324,17 +324,17 @@ class MediaTrack : public mozilla::Linke
    * source and this track has been broken. The caller doesn't have to care
    * about this, removing when the source cannot be found, or when the listener
    * had already been removed does nothing.
    */
   virtual void RemoveDirectListener(DirectMediaTrackListener* aListener);
 
   // A disabled track has video replaced by black, and audio replaced by
   // silence.
-  void SetDisabledTrackMode(DisabledTrackMode aMode);
+  void SetEnabled(DisabledTrackMode aMode);
 
   // End event will be notified by calling methods of aListener. It is the
   // responsibility of the caller to remove aListener before it is destroyed.
   void AddMainThreadListener(MainThreadMediaTrackListener* aListener);
   // It's safe to call this even if aListener is not currently a listener;
   // the call will be ignored.
   void RemoveMainThreadListener(MainThreadMediaTrackListener* aListener) {
     MOZ_ASSERT(NS_IsMainThread());
@@ -406,31 +406,25 @@ class MediaTrack : public mozilla::Linke
   virtual void RemoveAllDirectListenersImpl() {}
   void RemoveAllResourcesAndListenersImpl();
 
   virtual void AddListenerImpl(already_AddRefed<MediaTrackListener> aListener);
   virtual void RemoveListenerImpl(MediaTrackListener* aListener);
   virtual void AddDirectListenerImpl(
       already_AddRefed<DirectMediaTrackListener> aListener);
   virtual void RemoveDirectListenerImpl(DirectMediaTrackListener* aListener);
-  virtual void SetDisabledTrackModeImpl(DisabledTrackMode aMode);
+  virtual void SetEnabledImpl(DisabledTrackMode aMode);
 
   void AddConsumer(MediaInputPort* aPort) { mConsumers.AppendElement(aPort); }
   void RemoveConsumer(MediaInputPort* aPort) {
     mConsumers.RemoveElement(aPort);
   }
   GraphTime StartTime() const { return mStartTime; }
   bool Ended() const { return mEnded; }
 
-  // The DisabledTrackMode after combining the explicit mode and that of the
-  // input, if any.
-  virtual DisabledTrackMode CombinedDisabledMode() const {
-    return mDisabledMode;
-  }
-
   template <class SegmentType>
   SegmentType* GetData() const {
     if (!mSegment) {
       return nullptr;
     }
     if (mSegment->GetType() != SegmentType::StaticType()) {
       return nullptr;
     }
@@ -527,20 +521,16 @@ class MediaTrack : public mozilla::Linke
     if (!mMainThreadEnded || mEndedNotificationSent) {
       return false;
     }
 
     mEndedNotificationSent = true;
     return true;
   }
 
-  // Notifies listeners and consumers of the change in disabled mode when the
-  // current combined mode is different from aMode.
-  void NotifyIfDisabledModeChangedFrom(DisabledTrackMode aOldMode);
-
   // This state is all initialized on the main thread but
   // otherwise modified only on the media graph thread.
 
   // Buffered data. The start of the buffer corresponds to mStartTime.
   // Conceptually the buffer contains everything this track has ever played,
   // but we forget some prefix of the buffered data to bound the space usage.
   // Note that this may be null for tracks that never contain data, like
   // non-external AudioNodeTracks.
@@ -676,17 +666,17 @@ class SourceMediaTrack : public MediaTra
   /**
    * Indicate that this track has ended. Do not do any more API calls affecting
    * this track.
    */
   void End();
 
   // Overriding allows us to hold the mMutex lock while changing the track
   // enable status
-  void SetDisabledTrackModeImpl(DisabledTrackMode aMode) override;
+  void SetEnabledImpl(DisabledTrackMode aMode) override;
 
   // Overriding allows us to ensure mMutex is locked while changing the track
   // enable status
   void ApplyTrackDisabling(MediaSegment* aSegment,
                            MediaSegment* aRawSegment = nullptr) override {
     mMutex.AssertCurrentThreadOwns();
     MediaTrack::ApplyTrackDisabling(aSegment, aRawSegment);
   }
@@ -966,19 +956,16 @@ class ProcessedMediaTrack : public Media
                             uint32_t aFlags) = 0;
   void SetAutoendImpl(bool aAutoend) { mAutoend = aAutoend; }
 
   // Only valid after MediaTrackGraphImpl::UpdateTrackOrder() has run.
   // A DelayNode is considered to break a cycle and so this will not return
   // true for echo loops, only for muted cycles.
   bool InMutedCycle() const { return mCycleMarker; }
 
-  // Used by ForwardedInputTrack to propagate the disabled mode along the graph.
-  virtual void OnInputDisabledModeChanged(DisabledTrackMode aMode) {}
-
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override {
     size_t amount = MediaTrack::SizeOfExcludingThis(aMallocSizeOf);
     // Not owned:
     // - mInputs elements
     // - mSuspendedInputs elements
     amount += mInputs.ShallowSizeOfExcludingThis(aMallocSizeOf);
     amount += mSuspendedInputs.ShallowSizeOfExcludingThis(aMallocSizeOf);
     return amount;
--- a/dom/media/VideoFrameConverter.h
+++ b/dom/media/VideoFrameConverter.h
@@ -134,32 +134,24 @@ class VideoFrameConverter {
         [self = RefPtr<VideoFrameConverter>(this), this, aTrackEnabled] {
           if (mTrackEnabled == aTrackEnabled) {
             return;
           }
           MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
                   ("VideoFrameConverter Track is now %s",
                    aTrackEnabled ? "enabled" : "disabled"));
           mTrackEnabled = aTrackEnabled;
-          if (!aTrackEnabled) {
-            // After disabling we immediately send a frame as black, so it can
-            // be seen quickly, even if no frames are flowing.
-            if (mLastFrameConverted) {
-              // This track has already seen frames so we re-send the last one
-              // as black.
-              ProcessVideoFrame(
-                  FrameToProcess{nullptr, TimeStamp::Now(),
-                                 gfx::IntSize(mLastFrameConverted->width(),
-                                              mLastFrameConverted->height()),
-                                 true});
-            } else {
-              // This track has not yet seen any frame. We make one up.
-              ProcessVideoFrame(FrameToProcess{nullptr, TimeStamp::Now(),
-                                               gfx::IntSize(640, 480), true});
-            }
+          if (!aTrackEnabled && mLastFrameConverted) {
+            // After disabling, we re-send the last frame as black in case the
+            // source had already stopped and no frame is coming soon.
+            ProcessVideoFrame(
+                FrameToProcess{nullptr, TimeStamp::Now(),
+                               gfx::IntSize(mLastFrameConverted->width(),
+                                            mLastFrameConverted->height()),
+                               true});
           }
         }));
     MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
     Unused << rv;
   }
 
   void AddListener(const RefPtr<VideoConverterListener>& aListener) {
     nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
--- a/dom/media/VideoOutput.h
+++ b/dom/media/VideoOutput.h
@@ -180,39 +180,22 @@ class VideoOutput : public DirectMediaTr
     mFrames.RemoveElementsAt(0, mFrames.Length() - 1);
     SendFrames();
     mFrames.ClearAndRetainStorage();
   }
   void NotifyEnabledStateChanged(MediaTrackGraph* aGraph,
                                  bool aEnabled) override {
     MutexAutoLock lock(mMutex);
     mEnabled = aEnabled;
-    DropPastFrames();
-    if (!mEnabled || mFrames.Length() > 1) {
-      // Re-send frames when disabling, as new frames may not arrive. When
-      // enabling we keep them black until new frames arrive, or re-send if we
-      // already have frames in the future. If we're disabling and there are no
-      // frames available yet, we invent one. Unfortunately with a hardcoded
-      // size.
-      //
-      // Since mEnabled will affect whether
-      // frames are real, or black, we assign new FrameIDs whenever we re-send
-      // frames after an mEnabled change.
-      for (auto& idChunkPair : mFrames) {
-        idChunkPair.first = mVideoFrameContainer->NewFrameID();
-      }
-      if (mFrames.IsEmpty()) {
-        VideoSegment v;
-        v.AppendFrame(nullptr, gfx::IntSize(640, 480), PRINCIPAL_HANDLE_NONE,
-                      true, TimeStamp::Now());
-        mFrames.AppendElement(std::make_pair(mVideoFrameContainer->NewFrameID(),
-                                             *v.GetLastChunk()));
-      }
-      SendFramesEnsureLocked();
+    // Since mEnabled will affect whether frames are real, or black, we assign
+    // new FrameIDs whenever this changes.
+    for (auto& idChunkPair : mFrames) {
+      idChunkPair.first = mVideoFrameContainer->NewFrameID();
     }
+    SendFramesEnsureLocked();
   }
 
   Mutex mMutex;
   TimeStamp mLastFrameTime;
   // Once the frame is forced to black, we initialize mBlackImage for use in any
   // following forced-black frames.
   RefPtr<Image> mBlackImage;
   bool mEnabled = true;
--- a/dom/media/gtest/TestVideoTrackEncoder.cpp
+++ b/dom/media/gtest/TestVideoTrackEncoder.cpp
@@ -1274,56 +1274,16 @@ TEST(VP8VideoTrackEncoder, DisableBetwee
 
   // [50ms, 100ms)
   EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->mDuration);
 
   // [100ms, 200ms)
   EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->mDuration);
 }
 
-// Test that an encoding which is disabled before the first frame becomes black
-// immediately.
-TEST(VP8VideoTrackEncoder, DisableBeforeFirstFrame)
-{
-  TestVP8TrackEncoder encoder;
-  YUVBufferGenerator generator;
-  generator.Init(mozilla::gfx::IntSize(640, 480));
-  nsTArray<RefPtr<EncodedFrame>> frames;
-  TimeStamp now = TimeStamp::Now();
-
-  // Disable the track at t=0.
-  // Pass a frame in at t=50ms.
-  // Enable the track at t=100ms.
-  // Stop encoding at t=200ms.
-  // Should yield 2 frames, 1 black [0, 100); 1 real [100, 200).
-
-  VideoSegment segment;
-  segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
-                      PRINCIPAL_HANDLE_NONE, false,
-                      now + TimeDuration::FromMilliseconds(50));
-
-  encoder.SetStartOffset(now);
-  encoder.Disable(now);
-  encoder.AppendVideoSegment(std::move(segment));
-
-  encoder.Enable(now + TimeDuration::FromMilliseconds(100));
-  encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
-  encoder.NotifyEndOfStream();
-  ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
-  EXPECT_TRUE(encoder.IsEncodingComplete());
-
-  ASSERT_EQ(2UL, frames.Length());
-
-  // [0, 100ms)
-  EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration);
-
-  // [100ms, 200ms)
-  EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[1]->mDuration);
-}
-
 // Test that an encoding which is enabled on a frame timestamp encodes
 // frames as expected.
 TEST(VP8VideoTrackEncoder, EnableOnFrameTime)
 {
   TestVP8TrackEncoder encoder;
   YUVBufferGenerator generator;
   generator.Init(mozilla::gfx::IntSize(640, 480));
   nsTArray<RefPtr<EncodedFrame>> frames;
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -542,17 +542,17 @@ AudioDestinationNode::WindowSuspendChang
       "AudioDestinationNode %p WindowSuspendChanged, "
       "aSuspend = %s\n",
       this, SuspendTypeToStr(aSuspend));
 
   mAudioChannelSuspended = suspended;
 
   DisabledTrackMode disabledMode =
       suspended ? DisabledTrackMode::SILENCE_BLACK : DisabledTrackMode::ENABLED;
-  mTrack->SetDisabledTrackMode(disabledMode);
+  mTrack->SetEnabled(disabledMode);
 
   AudioChannelService::AudibleState audible =
       aSuspend == nsISuspendedTypes::NONE_SUSPENDED
           ? AudioChannelService::AudibleState::eAudible
           : AudioChannelService::AudibleState::eNotAudible;
   if (mAudible != audible) {
     mAudible = audible;
     mAudioChannelAgent->NotifyStartedAudible(
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -719,21 +719,21 @@ class MediaPipelineTransmit::PipelineLis
   }
 
   void OnVideoFrameConverted(const webrtc::VideoFrame& aVideoFrame) {
     MOZ_RELEASE_ASSERT(mConduit->type() == MediaSessionConduit::VIDEO);
     static_cast<VideoSessionConduit*>(mConduit.get())
         ->SendVideoFrame(aVideoFrame);
   }
 
+  void SetTrackEnabled(MediaStreamTrack* aTrack, bool aEnabled);
+
   // Implement MediaTrackListener
   void NotifyQueuedChanges(MediaTrackGraph* aGraph, TrackTime aOffset,
                            const MediaSegment& aQueuedMedia) override;
-  void NotifyEnabledStateChanged(MediaTrackGraph* aGraph,
-                                 bool aEnabled) override;
 
   // Implement DirectMediaTrackListener
   void NotifyRealtimeTrackData(MediaTrackGraph* aGraph, TrackTime aOffset,
                                const MediaSegment& aMedia) override;
   void NotifyDirectListenerInstalled(InstallationResult aResult) override;
   void NotifyDirectListenerUninstalled() override;
 
  private:
@@ -748,16 +748,39 @@ class MediaPipelineTransmit::PipelineLis
   // enabled is true if the media access control permits sending
   // actual content; when false you get black/silence
   mozilla::Atomic<bool> mEnabled;
 
   // Written and read on the MediaTrackGraph thread
   bool mDirectConnect;
 };
 
+// MediaStreamTrackConsumer inherits from SupportsWeakPtr, which is
+// main-thread-only.
+class MediaPipelineTransmit::PipelineListenerTrackConsumer
+    : public MediaStreamTrackConsumer {
+  virtual ~PipelineListenerTrackConsumer() { MOZ_ASSERT(NS_IsMainThread()); }
+
+  const RefPtr<PipelineListener> mListener;
+
+ public:
+  NS_INLINE_DECL_REFCOUNTING(PipelineListenerTrackConsumer)
+
+  explicit PipelineListenerTrackConsumer(RefPtr<PipelineListener> aListener)
+      : mListener(std::move(aListener)) {
+    MOZ_ASSERT(NS_IsMainThread());
+  }
+
+  // Implement MediaStreamTrackConsumer
+  void NotifyEnabledChanged(MediaStreamTrack* aTrack, bool aEnabled) override {
+    MOZ_ASSERT(NS_IsMainThread());
+    mListener->SetTrackEnabled(aTrack, aEnabled);
+  }
+};
+
 // Implements VideoConverterListener for MediaPipeline.
 //
 // We pass converted frames on to MediaPipelineTransmit::PipelineListener
 // where they are further forwarded to VideoConduit.
 // MediaPipelineTransmit calls Detach() during shutdown to ensure there is
 // no cyclic dependencies between us and PipelineListener.
 class MediaPipelineTransmit::VideoFrameFeeder : public VideoConverterListener {
  public:
@@ -794,16 +817,20 @@ MediaPipelineTransmit::MediaPipelineTran
     RefPtr<nsISerialEventTarget> aMainThread,
     RefPtr<nsISerialEventTarget> aStsThread, bool aIsVideo,
     RefPtr<MediaSessionConduit> aConduit)
     : MediaPipeline(aPc, std::move(aTransportHandler), DirectionType::TRANSMIT,
                     std::move(aMainThread), std::move(aStsThread),
                     std::move(aConduit)),
       mIsVideo(aIsVideo),
       mListener(new PipelineListener(mConduit)),
+      mTrackConsumer(
+          MakeAndAddRef<nsMainThreadPtrHolder<PipelineListenerTrackConsumer>>(
+              "MediaPipelineTransmit::mTrackConsumer",
+              MakeAndAddRef<PipelineListenerTrackConsumer>(mListener))),
       mFeeder(aIsVideo ? MakeAndAddRef<VideoFrameFeeder>(mListener)
                        : nullptr),  // For video we send frames to an
                                     // async VideoFrameConverter that
                                     // calls back to a VideoFrameFeeder
                                     // that feeds I420 frames to
                                     // VideoConduit.
       mTransmitting(false) {
   if (!IsVideo()) {
@@ -903,20 +930,20 @@ void MediaPipelineTransmit::Start() {
   // TODO(ekr@rtfm.com): Check for errors
   MOZ_LOG(
       gMediaPipelineLog, LogLevel::Debug,
       ("Attaching pipeline to track %p conduit type=%s", this,
        (mConduit->type() == MediaSessionConduit::AUDIO ? "audio" : "video")));
 
   mSendTrack->Resume();
 
-  mSendTrack->AddListener(mListener);
   if (mSendTrack->mType == MediaSegment::VIDEO) {
     mSendTrack->AddDirectListener(mListener);
   }
+  mSendTrack->AddListener(mListener);
 }
 
 bool MediaPipelineTransmit::IsVideo() const { return mIsVideo; }
 
 void MediaPipelineTransmit::UpdateSinkIdentity_m(
     const MediaStreamTrack* aTrack, nsIPrincipal* aPrincipal,
     const PeerIdentity* aSinkIdentity) {
   ASSERT_ON_THREAD(mMainThread);
@@ -946,17 +973,20 @@ void MediaPipelineTransmit::UpdateSinkId
   }
 
   mListener->SetEnabled(enableTrack);
 }
 
 void MediaPipelineTransmit::DetachMedia() {
   ASSERT_ON_THREAD(mMainThread);
   MOZ_ASSERT(!mTransmitting);
-  mDomTrack = nullptr;
+  if (mDomTrack) {
+    mDomTrack->RemoveConsumer(mTrackConsumer);
+    mDomTrack = nullptr;
+  }
   if (mSendPort) {
     mSendPort->Destroy();
     mSendPort = nullptr;
   }
   if (mSendTrack) {
     mSendTrack->Destroy();
     mSendTrack = nullptr;
   }
@@ -1001,16 +1031,19 @@ nsresult MediaPipelineTransmit::SetTrack
     std::string track_id(NS_ConvertUTF16toUTF8(nsTrackId).get());
     MOZ_LOG(
         gMediaPipelineLog, LogLevel::Debug,
         ("Reattaching pipeline to track %p track %s conduit type: %s",
          &aDomTrack, track_id.c_str(),
          (mConduit->type() == MediaSessionConduit::AUDIO ? "audio" : "video")));
   }
 
+  if (mDomTrack) {
+    mDomTrack->RemoveConsumer(mTrackConsumer);
+  }
   if (mSendPort) {
     mSendPort->Destroy();
     mSendPort = nullptr;
   }
 
   if (aDomTrack && mDomTrack && !aDomTrack->Ended() && !mDomTrack->Ended() &&
       aDomTrack->Graph() != mDomTrack->Graph() && mSendTrack) {
     // Recreate the send track if the new stream resides in different MTG.
@@ -1038,16 +1071,20 @@ nsresult MediaPipelineTransmit::SetTrack
       if (!mSendTrack) {
         // Create the send track when the first live track is set or when the
         // new track resides in different MTG.
         SetSendTrack(mDomTrack->Graph()->CreateForwardedInputTrack(
             mDomTrack->GetTrack()->mType));
       }
       mSendPort = mSendTrack->AllocateInputPort(mDomTrack->GetTrack());
     }
+    mDomTrack->AddConsumer(mTrackConsumer);
+    if (mConverter) {
+      mConverter->SetTrackEnabled(mDomTrack->Enabled());
+    }
   }
 
   return NS_OK;
 }
 
 void MediaPipelineTransmit::SetSendTrack(
     RefPtr<ProcessedMediaTrack> aSendTrack) {
   MOZ_ASSERT(NS_IsMainThread());
@@ -1168,21 +1205,23 @@ void MediaPipelineTransmit::PipelineList
     rate = aGraph->GraphRate();
   } else {
     // When running tests, graph may be null. In that case use a default.
     rate = 16000;
   }
   NewData(aQueuedMedia, rate);
 }
 
-void MediaPipelineTransmit::PipelineListener::NotifyEnabledStateChanged(
-    MediaTrackGraph* aGraph, bool aEnabled) {
+void MediaPipelineTransmit::PipelineListener::SetTrackEnabled(
+    MediaStreamTrack* aTrack, bool aEnabled) {
+  MOZ_ASSERT(NS_IsMainThread());
   if (mConduit->type() != MediaSessionConduit::VIDEO) {
     return;
   }
+
   MOZ_ASSERT(mConverter);
   mConverter->SetTrackEnabled(aEnabled);
 }
 
 void MediaPipelineTransmit::PipelineListener::NotifyDirectListenerInstalled(
     InstallationResult aResult) {
   MOZ_LOG(gMediaPipelineLog, LogLevel::Info,
           ("MediaPipeline::NotifyDirectListenerInstalled() listener=%p,"
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
@@ -305,28 +305,32 @@ class MediaPipelineTransmit : public Med
   // Replace a track with a different one.
   nsresult SetTrack(RefPtr<dom::MediaStreamTrack> aDomTrack);
 
   // Set the track whose data we will transmit. For internal and test use.
   void SetSendTrack(RefPtr<ProcessedMediaTrack> aSendTrack);
 
   // Separate classes to allow ref counting
   class PipelineListener;
+  class PipelineListenerTrackConsumer;
   class VideoFrameFeeder;
 
  protected:
   ~MediaPipelineTransmit();
 
   void SetDescription();
 
  private:
   void AsyncStart(const RefPtr<GenericPromise>& aPromise);
 
   const bool mIsVideo;
   const RefPtr<PipelineListener> mListener;
+  // Listens for changes in enabled state on the attached MediaStreamTrack, and
+  // notifies mListener.
+  const nsMainThreadPtrHandle<PipelineListenerTrackConsumer> mTrackConsumer;
   const RefPtr<VideoFrameFeeder> mFeeder;
   RefPtr<AudioProxyThread> mAudioProcessing;
   RefPtr<VideoFrameConverter> mConverter;
   RefPtr<dom::MediaStreamTrack> mDomTrack;
   // Input port connecting mDomTrack's MediaTrack to mSendTrack.
   RefPtr<MediaInputPort> mSendPort;
   // MediaTrack that we send over the network. This allows changing mDomTrack.
   RefPtr<ProcessedMediaTrack> mSendTrack;
--- a/modules/libpref/init/StaticPrefList.yaml
+++ b/modules/libpref/init/StaticPrefList.yaml
@@ -7400,23 +7400,16 @@
 
 # The getDisplayMedia method is always SecureContext regardless of the above two
 # prefs. But it is not implemented on android, and can be turned off elsewhere.
 - name: media.getdisplaymedia.enabled
   type: bool
   value: @IS_NOT_ANDROID@
   mirror: always
 
-# Turn off any cameras (but not mics) while in the background. This is desirable
-# on mobile.
-- name: media.getusermedia.camera.background.mute.enabled
-  type: bool
-  value: @IS_ANDROID@
-  mirror: always
-
 # WebRTC prefs follow
 
 # Enables RTCPeerConnection support. Note that, when true, this pref enables
 # navigator.mediaDevices and getUserMedia() support as well.
 # See also media.navigator.enabled
 - name: media.peerconnection.enabled
   type: bool
   value: true