Bug 1553215 - Implement the correct behaviour for MediaStreamAudioSource. r=karlt,pehrsons
authorPaul Adenot <paul@paul.cx>
Wed, 19 Jun 2019 23:06:33 +0000
changeset 479269 85228e5172e3c45a2e4a7397e922d79dc2a94221
parent 479268 d5a6a97abef4df56fc7ed27fc60395d6d62c1a6f
child 479270 de9be674a6db6949aae3fa523a9999282c086055
push id36177
push userrmaries@mozilla.com
push dateThu, 20 Jun 2019 09:46:31 +0000
treeherdermozilla-central@a440f0629814 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskarlt, pehrsons
bugs1553215
milestone69.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1553215 - Implement the correct behaviour for MediaStreamAudioSource. r=karlt,pehrsons It should pick and lock to the right track, regardless of if it's still in the MediaStream. The test fix is because we don't expose tracks until the HTMLMediaElement has loaded loading I think. This alignes with a couple spec changes: - https://github.com/WebAudio/web-audio-api/issues/264 - https://github.com/WebAudio/web-audio-api/pull/1829/files and also throws when no valid track are found in the MediaStream, either because it's all video tracks or because it has no tracks. Differential Revision: https://phabricator.services.mozilla.com/D32176
dom/media/webaudio/MediaElementAudioSourceNode.cpp
dom/media/webaudio/MediaStreamAudioSourceNode.cpp
dom/media/webaudio/MediaStreamAudioSourceNode.h
dom/media/webaudio/test/test_audioContextSuspendResumeClose.html
dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html
--- a/dom/media/webaudio/MediaElementAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaElementAudioSourceNode.cpp
@@ -9,17 +9,18 @@
 #include "AudioDestinationNode.h"
 #include "nsIScriptError.h"
 #include "AudioNodeStream.h"
 
 namespace mozilla {
 namespace dom {
 
 MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext* aContext)
-    : MediaStreamAudioSourceNode(aContext) {}
+    : MediaStreamAudioSourceNode(aContext, TrackChangeBehavior::FollowChanges) {
+}
 
 /* static */
 already_AddRefed<MediaElementAudioSourceNode>
 MediaElementAudioSourceNode::Create(
     AudioContext& aAudioContext, const MediaElementAudioSourceOptions& aOptions,
     ErrorResult& aRv) {
   if (aAudioContext.IsOffline()) {
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
@@ -8,16 +8,17 @@
 #include "mozilla/dom/MediaStreamAudioSourceNodeBinding.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeExternalInputStream.h"
 #include "AudioStreamTrack.h"
 #include "mozilla/dom/Document.h"
 #include "mozilla/CORSMode.h"
 #include "nsContentUtils.h"
 #include "nsIScriptError.h"
+#include "nsID.h"
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(MediaStreamAudioSourceNode)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(MediaStreamAudioSourceNode)
   tmp->Destroy();
@@ -32,19 +33,21 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
 
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(MediaStreamAudioSourceNode)
 NS_INTERFACE_MAP_END_INHERITING(AudioNode)
 
 NS_IMPL_ADDREF_INHERITED(MediaStreamAudioSourceNode, AudioNode)
 NS_IMPL_RELEASE_INHERITED(MediaStreamAudioSourceNode, AudioNode)
 
-MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* aContext)
+MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(
+    AudioContext* aContext, TrackChangeBehavior aBehavior)
     : AudioNode(aContext, 2, ChannelCountMode::Max,
-                ChannelInterpretation::Speakers) {}
+                ChannelInterpretation::Speakers),
+      mBehavior(aBehavior) {}
 
 /* static */
 already_AddRefed<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::Create(
     AudioContext& aAudioContext, const MediaStreamAudioSourceOptions& aOptions,
     ErrorResult& aRv) {
   if (aAudioContext.IsOffline()) {
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
     return nullptr;
@@ -58,17 +61,17 @@ already_AddRefed<MediaStreamAudioSourceN
                                     NS_LITERAL_CSTRING("Web Audio"), document,
                                     nsContentUtils::eDOM_PROPERTIES,
                                     "MediaStreamAudioSourceNodeDifferentRate");
     aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
     return nullptr;
   }
 
   RefPtr<MediaStreamAudioSourceNode> node =
-      new MediaStreamAudioSourceNode(&aAudioContext);
+      new MediaStreamAudioSourceNode(&aAudioContext, LockOnTrackPicked);
 
   node->Init(aOptions.mMediaStream, aRv);
   if (aRv.Failed()) {
     return nullptr;
   }
 
   return node.forget();
 }
@@ -91,17 +94,17 @@ void MediaStreamAudioSourceNode::Init(DO
   AudioNodeEngine* engine = new MediaStreamAudioSourceNodeEngine(this);
   mStream = AudioNodeExternalInputStream::Create(graph, engine);
   mInputStream->AddConsumerToKeepAlive(ToSupports(this));
 
   mInputStream->RegisterTrackListener(this);
   if (mInputStream->Active()) {
     NotifyActive();
   }
-  AttachToFirstTrack(mInputStream);
+  AttachToRightTrack(mInputStream, aRv);
 }
 
 void MediaStreamAudioSourceNode::Destroy() {
   if (mInputStream) {
     mInputStream->UnregisterTrackListener(this);
     mInputStream = nullptr;
   }
   DetachFromTrack();
@@ -132,56 +135,82 @@ void MediaStreamAudioSourceNode::DetachF
     mInputTrack = nullptr;
   }
   if (mInputPort) {
     mInputPort->Destroy();
     mInputPort = nullptr;
   }
 }
 
-void MediaStreamAudioSourceNode::AttachToFirstTrack(
-    const RefPtr<DOMMediaStream>& aMediaStream) {
+static int AudioTrackCompare(const RefPtr<AudioStreamTrack>& aLhs,
+                             const RefPtr<AudioStreamTrack>& aRhs) {
+  nsAutoStringN<NSID_LENGTH> IDLhs;
+  nsAutoStringN<NSID_LENGTH> IDRhs;
+  aLhs->GetId(IDLhs);
+  aRhs->GetId(IDRhs);
+  return NS_ConvertUTF16toUTF8(IDLhs).Compare(
+      NS_ConvertUTF16toUTF8(IDRhs).get());
+}
+
+void MediaStreamAudioSourceNode::AttachToRightTrack(
+    const RefPtr<DOMMediaStream>& aMediaStream, ErrorResult& aRv) {
   nsTArray<RefPtr<AudioStreamTrack>> tracks;
   aMediaStream->GetAudioTracks(tracks);
 
+  if (tracks.IsEmpty() && mBehavior == LockOnTrackPicked) {
+    aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
+    return;
+  }
+
+  // Sort the track to have a stable order, on their ID by lexicographic
+  // ordering on sequences of code unit values.
+  tracks.Sort(AudioTrackCompare);
+
   for (const RefPtr<AudioStreamTrack>& track : tracks) {
-    if (track->Ended()) {
-      continue;
+    if (mBehavior == FollowChanges) {
+      if (track->Ended()) {
+        continue;
+      }
     }
 
     AttachToTrack(track);
     MarkActive();
     return;
   }
 
   // There was no track available. We'll allow the node to be garbage collected.
   MarkInactive();
 }
 
 void MediaStreamAudioSourceNode::NotifyTrackAdded(
     const RefPtr<MediaStreamTrack>& aTrack) {
+  if (mBehavior != FollowChanges) {
+    return;
+  }
   if (mInputTrack) {
     return;
   }
 
   if (!aTrack->AsAudioStreamTrack()) {
     return;
   }
 
   AttachToTrack(aTrack);
 }
 
 void MediaStreamAudioSourceNode::NotifyTrackRemoved(
     const RefPtr<MediaStreamTrack>& aTrack) {
-  if (aTrack != mInputTrack) {
-    return;
+  if (mBehavior == FollowChanges) {
+    if (aTrack != mInputTrack) {
+      return;
+    }
+
+    DetachFromTrack();
+    AttachToRightTrack(mInputStream, IgnoreErrors());
   }
-
-  DetachFromTrack();
-  AttachToFirstTrack(mInputStream);
 }
 
 void MediaStreamAudioSourceNode::NotifyActive() {
   MOZ_ASSERT(mInputStream);
   Context()->StartBlockedAudioContextIfAllowed();
 }
 
 /**
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.h
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.h
@@ -75,34 +75,50 @@ class MediaStreamAudioSourceNode
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
   // Attaches to aTrack so that its audio content will be used as input.
   void AttachToTrack(const RefPtr<MediaStreamTrack>& aTrack);
 
   // Detaches from the currently attached track if there is one.
   void DetachFromTrack();
 
-  // Attaches to the first available audio track in aMediaStream.
-  void AttachToFirstTrack(const RefPtr<DOMMediaStream>& aMediaStream);
+  // Attaches to the first audio track in the MediaStream, when the tracks are
+  // ordered by id.
+  void AttachToRightTrack(const RefPtr<DOMMediaStream>& aMediaStream,
+                          ErrorResult& aRv);
 
   // From DOMMediaStream::TrackListener.
   void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override;
   void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override;
   void NotifyActive() override;
 
   // From PrincipalChangeObserver<MediaStreamTrack>.
   void PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) override;
 
+  // This allows implementing the correct behaviour for both
+  // MediaElementAudioSourceNode and MediaStreamAudioSourceNode, that have most
+  // of their behaviour shared.
+  enum TrackChangeBehavior {
+    // MediaStreamAudioSourceNode locks on the track it picked, and never
+    // changes.
+    LockOnTrackPicked,
+    // MediaElementAudioSourceNode can change track, depending on what the
+    // HTMLMediaElement does.
+    FollowChanges
+  };
+
  protected:
-  explicit MediaStreamAudioSourceNode(AudioContext* aContext);
+  MediaStreamAudioSourceNode(AudioContext* aContext,
+                             TrackChangeBehavior aBehavior);
   void Init(DOMMediaStream* aMediaStream, ErrorResult& aRv);
   virtual void Destroy();
   virtual ~MediaStreamAudioSourceNode();
 
  private:
+  const TrackChangeBehavior mBehavior;
   RefPtr<MediaInputPort> mInputPort;
   RefPtr<DOMMediaStream> mInputStream;
 
   // On construction we set this to the first audio track of mInputStream.
   RefPtr<MediaStreamTrack> mInputTrack;
 };
 
 }  // namespace dom
--- a/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html
+++ b/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html
@@ -28,17 +28,17 @@ function tryToCreateNodeOnClosedContext(
     { name: "createChannelSplitter" },
     { name: "createChannelMerger" },
     { name: "createDynamicsCompressor" },
     { name: "createOscillator" },
     { name: "createMediaElementSource",
       args: [new Audio()],
       onOfflineAudioContext: false },
     { name: "createMediaStreamSource",
-      args: [new Audio().mozCaptureStream()],
+      args: [(new AudioContext()).createMediaStreamDestination().stream],
       onOfflineAudioContext: false } ].forEach(function(e) {
 
       if (e.onOfflineAudioContext == false &&
           ctx instanceof OfflineAudioContext) {
         return;
       }
 
       expectNoException(function() {
@@ -69,17 +69,16 @@ function tryLegalOpeerationsOnClosedCont
   ].forEach(function(e) {
     expectNoException(function() {
       ctx[e.name].apply(ctx, e.args);
     });
   });
   loadFile("ting-44.1k-1ch.ogg", function(buf) {
     ctx.decodeAudioData(buf).then(function(decodedBuf) {
       ok(true, "decodeAudioData on a closed context should work, it did.")
-      todo(false, "0 " + (ctx instanceof OfflineAudioContext ? "Offline" : "Realtime"));
       finish();
     }).catch(function(e){
       ok(false, "decodeAudioData on a closed context should work, it did not");
       finish();
     });
   });
 }
 
@@ -116,27 +115,24 @@ function testMultiContextOutput() {
       var input = e.inputBuffer.getChannelData(0);
       var silent = true;
       for (var i = 0; i < input.length; i++) {
         if (input[i] != 0.0) {
           silent = false;
         }
       }
 
-      todo(false, "input buffer is " + (silent ? "silent" : "noisy"));
-
       if (silent) {
         silentBuffersInARow++;
         if (silentBuffersInARow == 10) {
           ok(true,
               "MediaStreams produce silence when their input is blocked.");
           sp2.onaudioprocess = null;
           ac1.close();
           ac2.close();
-          todo(false,"1");
           finish();
         }
       } else {
         is(silentBuffersInARow, 0,
             "No non silent buffer inbetween silent buffers.");
       }
     }
 
@@ -177,17 +173,16 @@ function testMultiContextInput() {
         var delta = Math.abs(inputBuffer[1] - sp2.value),
             theoreticalIncrement = 2048 * 3 * Math.PI * 2 * osc1.frequency.value / ac1.sampleRate;
         ok(delta >= theoreticalIncrement,
             "Buffering did not occur when the context was suspended (delta:" + delta + " increment: " + theoreticalIncrement+")");
         ac1.close();
         ac2.close();
         sp1.onaudioprocess = null;
         sp2.onaudioprocess = null;
-        todo(false, "2");
         finish();
       }
     }
 
     sp2.onaudioprocess = function(e) {
       var inputBuffer = e.inputBuffer.getChannelData(0);
       sp2.value = inputBuffer[inputBuffer.length - 1];
       ac2.suspend().then(function() {
@@ -226,17 +221,16 @@ function testScriptProcessNodeSuspended(
             ac.resume().then(function() {
               remainingIterations = 30;
               afterResume = true;
             });
           });
         }
       } else {
         sp.onaudioprocess = null;
-        todo(false,"3");
         finish();
       }
     }
   }
   sp.connect(ac.destination);
 }
 
 // Take an AudioContext, make sure it switches to running when the audio starts
--- a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html
+++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html
@@ -7,51 +7,54 @@
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
 </head>
 <body>
 <pre id="test">
 <script class="testbody" type="text/javascript">
 SimpleTest.waitForExplicitFinish();
 
 var audio = new Audio("http://example.org:80/tests/dom/media/webaudio/test/small-shot.ogg");
+audio.load();
 var context = new AudioContext();
-var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
-var sp = context.createScriptProcessor(2048, 1);
-node.connect(sp);
-var nonzeroSampleCount = 0;
-var complete = false;
-var iterationCount = 0;
+audio.onloadedmetadata = function() {
+  var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
+  var sp = context.createScriptProcessor(2048, 1);
+  node.connect(sp);
+  var nonzeroSampleCount = 0;
+  var complete = false;
+  var iterationCount = 0;
+
+  // This test ensures we receive at least expectedSampleCount nonzero samples
+  function processSamples(e) {
+    if (complete) {
+      return;
+    }
 
-// This test ensures we receive at least expectedSampleCount nonzero samples
-function processSamples(e) {
-  if (complete) {
-    return;
+    if (iterationCount == 0) {
+      // Don't start playing the audio until the AudioContext stuff is connected
+      // and running.
+      audio.play();
+    }
+    ++iterationCount;
+
+    var buf = e.inputBuffer.getChannelData(0);
+    var nonzeroSamplesThisBuffer = 0;
+    for (var i = 0; i < buf.length; ++i) {
+      if (buf[i] != 0) {
+        ++nonzeroSamplesThisBuffer;
+      }
+    }
+    is(nonzeroSamplesThisBuffer, 0,
+       "Checking all samples are zero");
+    if (iterationCount >= 20) {
+      SimpleTest.finish();
+      complete = true;
+    }
   }
 
-  if (iterationCount == 0) {
-    // Don't start playing the audio until the AudioContext stuff is connected
-    // and running.
-    audio.play();
-  }
-  ++iterationCount;
-
-  var buf = e.inputBuffer.getChannelData(0);
-  var nonzeroSamplesThisBuffer = 0;
-  for (var i = 0; i < buf.length; ++i) {
-    if (buf[i] != 0) {
-      ++nonzeroSamplesThisBuffer;
-    }
-  }
-  is(nonzeroSamplesThisBuffer, 0,
-     "Checking all samples are zero");
-  if (iterationCount >= 20) {
-    SimpleTest.finish();
-    complete = true;
-  }
+  audio.oncanplaythrough = function() {
+    sp.onaudioprocess = processSamples;
+  };
 }
-
-audio.oncanplaythrough = function() {
-  sp.onaudioprocess = processSamples;
-};
 </script>
 </pre>
 </body>
 </html>