Backed out 2 changesets (bug 1553215) for mda failures on test_audioContextSuspendResumeClose.html.
authorCosmin Sabou <csabou@mozilla.com>
Mon, 10 Jun 2019 20:48:26 +0300
changeset 478111 bb2560cbaecfd954eb0f2c14948c95a03ff12f09
parent 478110 3b0058b93c76c6050e40c3d2cea73b0cb633e430
child 478112 3b2489b9f469cc76a60ca025fb289cfadd7250cd
push id113405
push usernerli@mozilla.com
push dateTue, 11 Jun 2019 03:22:35 +0000
treeherdermozilla-inbound@e3bbbcf873c2 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1553215
milestone69.0a1
backs out71154c8ef814a3e6b10c9789325d4ed5529cf641
80181b642f8195e0788009b4e1aa7af23e11009c
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 2 changesets (bug 1553215) for mda failures on test_audioContextSuspendResumeClose.html. Backed out changeset 71154c8ef814 (bug 1553215) Backed out changeset 80181b642f81 (bug 1553215)
dom/media/webaudio/MediaElementAudioSourceNode.cpp
dom/media/webaudio/MediaStreamAudioSourceNode.cpp
dom/media/webaudio/MediaStreamAudioSourceNode.h
dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html
testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-ctor.html
testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-routing.html
--- a/dom/media/webaudio/MediaElementAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaElementAudioSourceNode.cpp
@@ -9,18 +9,17 @@
 #include "AudioDestinationNode.h"
 #include "nsIScriptError.h"
 #include "AudioNodeStream.h"
 
 namespace mozilla {
 namespace dom {
 
 MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext* aContext)
-    : MediaStreamAudioSourceNode(aContext, TrackChangeBehavior::FollowChanges) {
-}
+    : MediaStreamAudioSourceNode(aContext) {}
 
 /* static */
 already_AddRefed<MediaElementAudioSourceNode>
 MediaElementAudioSourceNode::Create(
     AudioContext& aAudioContext, const MediaElementAudioSourceOptions& aOptions,
     ErrorResult& aRv) {
   if (aAudioContext.IsOffline()) {
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
@@ -8,17 +8,16 @@
 #include "mozilla/dom/MediaStreamAudioSourceNodeBinding.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeExternalInputStream.h"
 #include "AudioStreamTrack.h"
 #include "mozilla/dom/Document.h"
 #include "mozilla/CORSMode.h"
 #include "nsContentUtils.h"
 #include "nsIScriptError.h"
-#include "nsID.h"
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(MediaStreamAudioSourceNode)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(MediaStreamAudioSourceNode)
   tmp->Destroy();
@@ -33,21 +32,19 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
 
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(MediaStreamAudioSourceNode)
 NS_INTERFACE_MAP_END_INHERITING(AudioNode)
 
 NS_IMPL_ADDREF_INHERITED(MediaStreamAudioSourceNode, AudioNode)
 NS_IMPL_RELEASE_INHERITED(MediaStreamAudioSourceNode, AudioNode)
 
-MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(
-    AudioContext* aContext, TrackChangeBehavior aBehavior)
+MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* aContext)
     : AudioNode(aContext, 2, ChannelCountMode::Max,
-                ChannelInterpretation::Speakers),
-      mBehavior(aBehavior) {}
+                ChannelInterpretation::Speakers) {}
 
 /* static */
 already_AddRefed<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::Create(
     AudioContext& aAudioContext, const MediaStreamAudioSourceOptions& aOptions,
     ErrorResult& aRv) {
   if (aAudioContext.IsOffline()) {
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
     return nullptr;
@@ -61,17 +58,17 @@ already_AddRefed<MediaStreamAudioSourceN
                                     NS_LITERAL_CSTRING("Web Audio"), document,
                                     nsContentUtils::eDOM_PROPERTIES,
                                     "MediaStreamAudioSourceNodeDifferentRate");
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
     return nullptr;
   }
 
   RefPtr<MediaStreamAudioSourceNode> node =
-      new MediaStreamAudioSourceNode(&aAudioContext, LockOnTrackPicked);
+      new MediaStreamAudioSourceNode(&aAudioContext);
 
   node->Init(aOptions.mMediaStream, aRv);
   if (aRv.Failed()) {
     return nullptr;
   }
 
   return node.forget();
 }
@@ -94,17 +91,17 @@ void MediaStreamAudioSourceNode::Init(DO
   AudioNodeEngine* engine = new MediaStreamAudioSourceNodeEngine(this);
   mStream = AudioNodeExternalInputStream::Create(graph, engine);
   mInputStream->AddConsumerToKeepAlive(ToSupports(this));
 
   mInputStream->RegisterTrackListener(this);
   if (mInputStream->Active()) {
     NotifyActive();
   }
-  AttachToRightTrack(mInputStream, aRv);
+  AttachToFirstTrack(mInputStream);
 }
 
 void MediaStreamAudioSourceNode::Destroy() {
   if (mInputStream) {
     mInputStream->UnregisterTrackListener(this);
     mInputStream = nullptr;
   }
   DetachFromTrack();
@@ -135,82 +132,56 @@ void MediaStreamAudioSourceNode::DetachF
     mInputTrack = nullptr;
   }
   if (mInputPort) {
     mInputPort->Destroy();
     mInputPort = nullptr;
   }
 }
 
-static int AudioTrackCompare(const RefPtr<AudioStreamTrack>& aLhs,
-                             const RefPtr<AudioStreamTrack>& aRhs) {
-  nsAutoStringN<NSID_LENGTH> IDLhs;
-  nsAutoStringN<NSID_LENGTH> IDRhs;
-  aLhs->GetId(IDLhs);
-  aRhs->GetId(IDRhs);
-  return NS_ConvertUTF16toUTF8(IDLhs).Compare(
-      NS_ConvertUTF16toUTF8(IDRhs).get());
-}
-
-void MediaStreamAudioSourceNode::AttachToRightTrack(
-    const RefPtr<DOMMediaStream>& aMediaStream, ErrorResult& aRv) {
+void MediaStreamAudioSourceNode::AttachToFirstTrack(
+    const RefPtr<DOMMediaStream>& aMediaStream) {
   nsTArray<RefPtr<AudioStreamTrack>> tracks;
   aMediaStream->GetAudioTracks(tracks);
 
-  if (tracks.IsEmpty() && mBehavior == LockOnTrackPicked) {
-    aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
-    return;
-  }
-
-  // Sort the track to have a stable order, on their ID by lexicographic
-  // ordering on sequences of code unit values.
-  tracks.Sort(AudioTrackCompare);
-
   for (const RefPtr<AudioStreamTrack>& track : tracks) {
-    if (mBehavior == FollowChanges) {
-      if (track->Ended()) {
-        continue;
-      }
+    if (track->Ended()) {
+      continue;
     }
 
     AttachToTrack(track);
     MarkActive();
     return;
   }
 
   // There was no track available. We'll allow the node to be garbage collected.
   MarkInactive();
 }
 
 void MediaStreamAudioSourceNode::NotifyTrackAdded(
     const RefPtr<MediaStreamTrack>& aTrack) {
-  if (mBehavior != FollowChanges) {
-    return;
-  }
   if (mInputTrack) {
     return;
   }
 
   if (!aTrack->AsAudioStreamTrack()) {
     return;
   }
 
   AttachToTrack(aTrack);
 }
 
 void MediaStreamAudioSourceNode::NotifyTrackRemoved(
     const RefPtr<MediaStreamTrack>& aTrack) {
-  if (mBehavior == FollowChanges) {
-    if (aTrack != mInputTrack) {
-      return;
-    }
+  if (aTrack != mInputTrack) {
+    return;
+  }
 
-    DetachFromTrack();
-    AttachToRightTrack(mInputStream, IgnoreErrors());
-  }
+  DetachFromTrack();
+  AttachToFirstTrack(mInputStream);
 }
 
 void MediaStreamAudioSourceNode::NotifyActive() {
   MOZ_ASSERT(mInputStream);
   Context()->StartBlockedAudioContextIfAllowed();
 }
 
 /**
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.h
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.h
@@ -75,50 +75,34 @@ class MediaStreamAudioSourceNode
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
   // Attaches to aTrack so that its audio content will be used as input.
   void AttachToTrack(const RefPtr<MediaStreamTrack>& aTrack);
 
   // Detaches from the currently attached track if there is one.
   void DetachFromTrack();
 
-  // Attaches to the first audio track in the MediaStream, when the tracks are
-  // ordered by id.
-  void AttachToRightTrack(const RefPtr<DOMMediaStream>& aMediaStream,
-                          ErrorResult& aRv);
+  // Attaches to the first available audio track in aMediaStream.
+  void AttachToFirstTrack(const RefPtr<DOMMediaStream>& aMediaStream);
 
   // From DOMMediaStream::TrackListener.
   void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override;
   void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override;
   void NotifyActive() override;
 
   // From PrincipalChangeObserver<MediaStreamTrack>.
   void PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) override;
 
-  // This allows implementing the correct behaviour for both
-  // MediaElementAudioSourceNode and MediaStreamAudioSourceNode, that have most
-  // of their behaviour shared.
-  enum TrackChangeBehavior {
-    // MediaStreamAudioSourceNode locks on the track it picked, and never
-    // changes.
-    LockOnTrackPicked,
-    // MediaElementAudioSourceNode can change track, depending on what the
-    // HTMLMediaElement does.
-    FollowChanges
-  };
-
  protected:
-  MediaStreamAudioSourceNode(AudioContext* aContext,
-                             TrackChangeBehavior aBehavior);
+  explicit MediaStreamAudioSourceNode(AudioContext* aContext);
   void Init(DOMMediaStream* aMediaStream, ErrorResult& aRv);
   virtual void Destroy();
   virtual ~MediaStreamAudioSourceNode();
 
  private:
-  const TrackChangeBehavior mBehavior;
   RefPtr<MediaInputPort> mInputPort;
   RefPtr<DOMMediaStream> mInputStream;
 
   // On construction we set this to the first audio track of mInputStream.
   RefPtr<MediaStreamTrack> mInputTrack;
 };
 
 }  // namespace dom
--- a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html
+++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html
@@ -7,54 +7,51 @@
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
 </head>
 <body>
 <pre id="test">
 <script class="testbody" type="text/javascript">
 SimpleTest.waitForExplicitFinish();
 
 var audio = new Audio("http://example.org:80/tests/dom/media/webaudio/test/small-shot.ogg");
-audio.load();
 var context = new AudioContext();
-audio.onloadedmetadata = function() {
-  var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
-  var sp = context.createScriptProcessor(2048, 1);
-  node.connect(sp);
-  var nonzeroSampleCount = 0;
-  var complete = false;
-  var iterationCount = 0;
-
-  // This test ensures we receive at least expectedSampleCount nonzero samples
-  function processSamples(e) {
-    if (complete) {
-      return;
-    }
+var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
+var sp = context.createScriptProcessor(2048, 1);
+node.connect(sp);
+var nonzeroSampleCount = 0;
+var complete = false;
+var iterationCount = 0;
 
-    if (iterationCount == 0) {
-      // Don't start playing the audio until the AudioContext stuff is connected
-      // and running.
-      audio.play();
-    }
-    ++iterationCount;
+// This test ensures we receive at least expectedSampleCount nonzero samples
+function processSamples(e) {
+  if (complete) {
+    return;
+  }
 
-    var buf = e.inputBuffer.getChannelData(0);
-    var nonzeroSamplesThisBuffer = 0;
-    for (var i = 0; i < buf.length; ++i) {
-      if (buf[i] != 0) {
-        ++nonzeroSamplesThisBuffer;
-      }
-    }
-    is(nonzeroSamplesThisBuffer, 0,
-       "Checking all samples are zero");
-    if (iterationCount >= 20) {
-      SimpleTest.finish();
-      complete = true;
+  if (iterationCount == 0) {
+    // Don't start playing the audio until the AudioContext stuff is connected
+    // and running.
+    audio.play();
+  }
+  ++iterationCount;
+
+  var buf = e.inputBuffer.getChannelData(0);
+  var nonzeroSamplesThisBuffer = 0;
+  for (var i = 0; i < buf.length; ++i) {
+    if (buf[i] != 0) {
+      ++nonzeroSamplesThisBuffer;
     }
   }
+  is(nonzeroSamplesThisBuffer, 0,
+     "Checking all samples are zero");
+  if (iterationCount >= 20) {
+    SimpleTest.finish();
+    complete = true;
+  }
+}
 
-  audio.oncanplaythrough = function() {
-    sp.onaudioprocess = processSamples;
-  };
-}
+audio.oncanplaythrough = function() {
+  sp.onaudioprocess = processSamples;
+};
 </script>
 </pre>
 </body>
 </html>
deleted file mode 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-ctor.html
+++ /dev/null
@@ -1,73 +0,0 @@
-<!DOCTYPE html>
-
-<html class="a">
-  <head>
-    <title>MediaStreamAudioSourceNode</title>
-    <script src="/resources/testharness.js"></script>
-    <script src="/resources/testharnessreport.js"></script>
-  </head>
-  <body class="a">
-    <div id="log"></div>
-    <script>
-      setup({explicit_done: true});
-      // Wait until the DOM is ready to be able to get a reference to the canvas
-      // element.
-      window.addEventListener("load", function() {
-        const ac = new AudioContext();
-        const emptyStream = new MediaStream();
-
-        test(function() {
-          assert_throws(
-            "InvalidStateError",
-            function() {
-              ac.createMediaStreamSource(emptyStream);
-            },
-            `A MediaStreamAudioSourceNode can only be constructed via the factory
-    method with a MediaStream that has at least one track of kind "audio"`
-          );
-        }, "MediaStreamAudioSourceNode created with factory method and MediaStream with no tracks");
-
-        test(function() {
-          assert_throws(
-            "InvalidStateError",
-            function() {
-              new MediaStreamAudioSourceNode(ac, { mediaStream: emptyStream });
-            },
-            `A MediaStreamAudioSourceNode can only be constructed via the constructor
-          with a MediaStream that has at least one track of kind "audio"`
-          );
-        }, "MediaStreamAudioSourceNode created with constructor and MediaStream with no tracks");
-
-        const canvas = document.querySelector("canvas");
-        const ctx = canvas.getContext("2d");
-        const videoOnlyStream = canvas.captureStream();
-
-        test(function() {
-          assert_throws(
-            "InvalidStateError",
-            function() {
-              ac.createMediaStreamSource(videoOnlyStream);
-            },
-            `A MediaStreamAudioSourceNode can only be constructed via the factory with a
-          MediaStream that has at least one track of kind "audio"`
-          );
-        }, `MediaStreamAudioSourceNode created with the factory method and MediaStream with only a video track`);
-
-        test(function() {
-          assert_throws(
-            "InvalidStateError",
-            function() {
-              new MediaStreamAudioSourceNode(ac, {
-                mediaStream: videoOnlyStream,
-              });
-            },
-            `A MediaStreamAudioSourceNode can only be constructed via the factory with a
-          MediaStream that has at least one track of kind "audio"`
-          );
-        }, `MediaStreamAudioSourceNode created with constructor and MediaStream with only a video track`);
-        done();
-      });
-    </script>
-  </body>
-  <canvas></canvas>
-</html>
deleted file mode 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-routing.html
+++ /dev/null
@@ -1,134 +0,0 @@
-<!DOCTYPE html>
-
-<html class="a">
-  <head>
-    <title>MediaStreamAudioSourceNode</title>
-    <script src="/resources/testharness.js"></script>
-    <script src="/resources/testharnessreport.js"></script>
-  </head>
-  <body class="a">
-    <div id="log"></div>
-    <script>
-      function binIndexForFrequency(frequency, analyser) {
-        return (
-          1 +
-          Math.round(
-            (frequency * analyser.fftSize) / analyser.context.sampleRate
-          )
-        );
-      }
-
-      const t = async_test(
-        "MediaStreamAudioSourceNode captures the right track."
-      );
-      const ac = new AudioContext();
-      // Test that the right track is captured. Set up a MediaStream that has two
-      // tracks, one with a tone at 100Hz and one with a tone at 1000Hz.
-      const dest0 = ac.createMediaStreamDestination();
-      const dest1 = ac.createMediaStreamDestination();
-      const osc0 = ac.createOscillator();
-      const osc1 = ac.createOscillator();
-      osc0.frequency.value = 100;
-      osc1.frequency.value = 1000;
-      osc0.connect(dest0);
-      osc1.connect(dest1);
-      osc0.start(0);
-      osc1.start(0);
-      const track0 = dest0.stream.getAudioTracks()[0];
-      const track0id = track0.id;
-      const track1 = dest1.stream.getAudioTracks()[0];
-      const track1id = track1.id;
-
-      let ids = [track0id, track1id];
-      ids.sort();
-      let targetFrequency;
-      let otherFrequency;
-      if (ids[0] == track0id) {
-        targetFrequency = 100;
-        otherFrequency = 1000;
-      } else {
-        targetFrequency = 1000;
-        otherFrequency = 100;
-      }
-
-      let twoTrackMediaStream = new MediaStream();
-      twoTrackMediaStream.addTrack(track0);
-      twoTrackMediaStream.addTrack(track1);
-
-      const twoTrackSource = ac.createMediaStreamSource(twoTrackMediaStream);
-      const analyser = ac.createAnalyser();
-
-      twoTrackSource.connect(analyser);
-
-      const indexToCheckForHighEnergy = binIndexForFrequency(
-        targetFrequency,
-        analyser
-      );
-      const indexToCheckForLowEnergy = binIndexForFrequency(
-        otherFrequency,
-        analyser
-      );
-      let frequencyData = new Float32Array(1024);
-      let checkCount = 0;
-      let numberOfRemovals = 0;
-      let stopped = false;
-      function analyse() {
-        analyser.getFloatFrequencyData(frequencyData);
-        // there should be high energy in the right bin, higher than 40dbfs because
-        // it's supposed to be a sine wave at 0dbfs
-        if (frequencyData[indexToCheckForHighEnergy] > -40 && !stopped) {
-          assert_true(true, "Correct track routed to the AudioContext.");
-          checkCount++;
-        }
-        if (stopped && frequencyData[indexToCheckForHighEnergy] < -40) {
-          assert_true(
-            true,
-            `After stopping the track, low energy is found in the
-              same bin`
-          );
-          checkCount++;
-        }
-        // Don't assert(false) immediately here if the bin is still higher than
-        // -40db the analyzer node has a window and it's expecte that it takes some
-        // time for the volume of this bin to decrease.
-        if (frequencyData[indexToCheckForLowEnergy] < -80) {
-          assert_true(true, "Correct track routed to the AudioContext.");
-        } else {
-          assert_true(
-            false,
-            "Other track seem to be routed to the AudioContext?"
-          );
-        }
-        if (checkCount > 5 && checkCount < 20) {
-          twoTrackMediaStream.getAudioTracks().forEach(track => {
-            if (track.id == ids[0]) {
-              numberOfRemovals++;
-              window.removedTrack = track;
-              twoTrackMediaStream.removeTrack(track);
-            }
-          });
-          assert_true(
-            numberOfRemovals == 1,
-            `The mediastreamtrack can only be
-        removed once from the mediastream`
-          );
-        } else if (checkCount >= 20 && checkCount < 30) {
-          window.removedTrack.stop();
-          stopped = true;
-        } else if (checkCount >= 30) {
-          assert_true(
-            numberOfRemovals == 1,
-            `After removing the track from the
-        mediastream, it's still routed to the graph.`
-          );
-          // After some time, consider that it worked.
-          t.done();
-          return;
-        }
-
-        t.step_timeout(analyse, 100);
-      }
-      t.step_timeout(analyse, 100);
-    </script>
-  </body>
-</html>