Bug 1156472 - Part 10 - Test AudioCaptureStream. r=pehrsons
authorPaul Adenot <paul@paul.cx>
Fri, 24 Jul 2015 14:28:17 +0200
changeset 254777 092dcf4d57f208d1703f8afd91c7154acb43b6d7
parent 254776 235175205cdecfd3d56f485a4a8784a658c024e9
child 254778 a34909cbfb37fa90f9fec51650a5865ce920a291
push id16720
push userryanvm@gmail.com
push dateMon, 27 Jul 2015 19:45:38 +0000
treeherderb2g-inbound@13354b414396 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspehrsons
bugs1156472
milestone42.0a1
Bug 1156472 - Part 10 - Test AudioCaptureStream. r=pehrsons
dom/media/tests/mochitest/head.js
dom/media/tests/mochitest/mochitest.ini
dom/media/tests/mochitest/pc.js
dom/media/tests/mochitest/test_getUserMedia_audioCapture.html
dom/media/tests/mochitest/test_peerConnection_replaceTrack.html
dom/media/tests/mochitest/test_peerConnection_webAudio.html
--- a/dom/media/tests/mochitest/head.js
+++ b/dom/media/tests/mochitest/head.js
@@ -15,16 +15,124 @@ try {
   dump('TEST DEVICES: Using media devices:\n');
   dump('audio: ' + audioDevice + '\nvideo: ' + videoDevice + '\n');
   FAKE_ENABLED = false;
 } catch (e) {
   dump('TEST DEVICES: No test devices found (in media.{audio,video}_loopback_dev, using fake streams.\n');
   FAKE_ENABLED = true;
 }
 
+/**
+ * This class provides helpers around analysing the audio content in a stream
+ * using WebAudio AnalyserNodes.
+ *
+ * @constructor
+ * @param {object} stream
+ *                 A MediaStream object whose audio track we shall analyse.
+ */
+function AudioStreamAnalyser(ac, stream) {
+  if (stream.getAudioTracks().length === 0) {
+    throw new Error("No audio track in stream");
+  }
+  this.audioContext = ac;
+  this.stream = stream;
+  this.sourceNode = this.audioContext.createMediaStreamSource(this.stream);
+  this.analyser = this.audioContext.createAnalyser();
+  this.sourceNode.connect(this.analyser);
+  this.data = new Uint8Array(this.analyser.frequencyBinCount);
+}
+
+AudioStreamAnalyser.prototype = {
+  /**
+   * Get an array of frequency domain data for our stream's audio track.
+   *
+   * @returns {array} A Uint8Array containing the frequency domain data.
+   */
+  getByteFrequencyData: function() {
+    this.analyser.getByteFrequencyData(this.data);
+    return this.data;
+  },
+
+  /**
+   * Append a canvas to the DOM where the frequency data are drawn.
+   * Useful to debug tests.
+   */
+  enableDebugCanvas: function() {
+    var cvs = document.createElement("canvas");
+    document.getElementById("content").appendChild(cvs);
+
+    // Easy: 1px per bin
+    cvs.width = this.analyser.frequencyBinCount;
+    cvs.height = 256;
+    cvs.style.border = "1px solid red";
+
+    var c = cvs.getContext('2d');
+
+    var self = this;
+    function render() {
+      c.clearRect(0, 0, cvs.width, cvs.height);
+      var array = self.getByteFrequencyData();
+      for (var i = 0; i < array.length; i++) {
+        c.fillRect(i, (256 - (array[i])), 1, 256);
+      }
+      requestAnimationFrame(render);
+    }
+    requestAnimationFrame(render);
+  },
+
+  /**
+   * Return a Promise, that will be resolved when the function passed as
+   * argument, when called, returns true (meaning the analysis was a
+   * success).
+   *
+   * @param {function} analysisFunction
+   *        A fonction that performs an analysis, and returns true if the
+   *        analysis was a success (i.e. it found what it was looking for)
+   */
+  waitForAnalysisSuccess: function(analysisFunction) {
+    var self = this;
+    return new Promise((resolve, reject) => {
+      function analysisLoop() {
+        var success = analysisFunction(self.getByteFrequencyData());
+        if (success) {
+          resolve();
+          return;
+        }
+        // else, we need more time
+        requestAnimationFrame(analysisLoop);
+      }
+      analysisLoop();
+    });
+  },
+
+  /**
+   * Return the FFT bin index for a given frequency.
+   *
+   * @param {double} frequency
+   *        The frequency for whicht to return the bin number.
+   * @returns {integer} the index of the bin in the FFT array.
+   */
+  binIndexForFrequency: function(frequency) {
+    return 1 + Math.round(frequency *
+                          this.analyser.fftSize /
+                          this.audioContext.sampleRate);
+  },
+
+  /**
+   * Reverse operation, get the frequency for a bin index.
+   *
+   * @param {integer} index an index in an FFT array
+   * @returns {double} the frequency for this bin
+   */
+  frequencyForBinIndex: function(index) {
+    return (index - 1) *
+           this.audioContext.sampleRate /
+           this.analyser.fftSize;
+  }
+};
 
 /**
  * Create the necessary HTML elements for head and body as used by Mochitests
  *
  * @param {object} meta
  *        Meta information of the test
  * @param {string} meta.title
  *        Description of the test
@@ -131,17 +239,20 @@ function setupEnvironment() {
       ['media.peerconnection.enabled', true],
       ['media.peerconnection.identity.enabled', true],
       ['media.peerconnection.identity.timeout', 120000],
       ['media.peerconnection.ice.stun_client_maximum_transmits', 14],
       ['media.peerconnection.ice.trickle_grace_period', 30000],
       ['media.navigator.permission.disabled', true],
       ['media.navigator.streams.fake', FAKE_ENABLED],
       ['media.getusermedia.screensharing.enabled', true],
-      ['media.getusermedia.screensharing.allowed_domains', "mochi.test"]
+      ['media.getusermedia.screensharing.allowed_domains', "mochi.test"],
+      ['media.getusermedia.audiocapture.enabled', true],
+      ['media.useAudioChannelService', true],
+      ['media.recorder.audio_node.enabled', true]
     ]
   }, setTestOptions);
 
   // We don't care about waiting for this to complete, we just want to ensure
   // that we don't build up a huge backlog of GC work.
   SpecialPowers.exactGC(window);
 }
 
--- a/dom/media/tests/mochitest/mochitest.ini
+++ b/dom/media/tests/mochitest/mochitest.ini
@@ -25,16 +25,17 @@ skip-if = toolkit == 'gonk' || buildapp 
 [test_dataChannel_basicDataOnly.html]
 [test_dataChannel_basicVideo.html]
 skip-if = toolkit == 'gonk' || buildapp == 'mulet' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
 [test_dataChannel_bug1013809.html]
 skip-if = toolkit == 'gonk' || buildapp == 'mulet' # b2g emulator seems to be too slow (Bug 1016498 and 1008080)
 [test_dataChannel_noOffer.html]
 [test_enumerateDevices.html]
 skip-if = buildapp == 'mulet'
+[test_getUserMedia_audioCapture.html]
 [test_getUserMedia_basicAudio.html]
 skip-if = (toolkit == 'gonk' || buildapp == 'mulet' && debug) # debug-only failure
 [test_getUserMedia_basicVideo.html]
 skip-if = (toolkit == 'gonk' || buildapp == 'mulet' && debug) # debug-only failure
 [test_getUserMedia_basicVideo_playAfterLoadedmetadata.html]
 skip-if = (toolkit == 'gonk' || buildapp == 'mulet' && debug) # debug-only failure
 [test_getUserMedia_basicScreenshare.html]
 skip-if = buildapp == 'mulet' || buildapp == 'b2g' || toolkit == 'android' # no screenshare on b2g/android # Bug 1141029 Mulet parity with B2G Desktop for TC
--- a/dom/media/tests/mochitest/pc.js
+++ b/dom/media/tests/mochitest/pc.js
@@ -638,49 +638,16 @@ DataChannelWrapper.prototype = {
    */
   toString: function() {
     return "DataChannelWrapper (" + this._pc.label + '_' + this._channel.label + ")";
   }
 };
 
 
 /**
- * This class provides helpers around analysing the audio content in a stream
- * using WebAudio AnalyserNodes.
- *
- * @constructor
- * @param {object} stream
- *                 A MediaStream object whose audio track we shall analyse.
- */
-function AudioStreamAnalyser(stream) {
-  if (stream.getAudioTracks().length === 0) {
-    throw new Error("No audio track in stream");
-  }
-  this.stream = stream;
-  this.audioContext = new AudioContext();
-  this.sourceNode = this.audioContext.createMediaStreamSource(this.stream);
-  this.analyser = this.audioContext.createAnalyser();
-  this.sourceNode.connect(this.analyser);
-  this.data = new Uint8Array(this.analyser.frequencyBinCount);
-}
-
-AudioStreamAnalyser.prototype = {
-  /**
-   * Get an array of frequency domain data for our stream's audio track.
-   *
-   * @returns {array} A Uint8Array containing the frequency domain data.
-   */
-  getByteFrequencyData: function() {
-    this.analyser.getByteFrequencyData(this.data);
-    return this.data;
-  }
-};
-
-
-/**
  * This class acts as a wrapper around a PeerConnection instance.
  *
  * @constructor
  * @param {string} label
  *        Description for the peer connection instance
  * @param {object} configuration
  *        Configuration for the peer connection instance
  */
@@ -1554,30 +1521,30 @@ PeerConnectionWrapper.prototype = {
    * audio data in the frequency domain.
    *
    * @param {object} from
    *        A PeerConnectionWrapper whose audio RTPSender we use as source for
    *        the audio flow check.
    * @returns {Promise}
    *        A promise that resolves when we're receiving the tone from |from|.
    */
-  checkReceivingToneFrom : function(from) {
+  checkReceivingToneFrom : function(audiocontext, from) {
     var inputElem = from.localMediaElements[0];
 
     // As input we use the stream of |from|'s first available audio sender.
     var inputSenderTracks = from._pc.getSenders().map(sn => sn.track);
     var inputAudioStream = from._pc.getLocalStreams()
       .find(s => s.getAudioTracks().some(t => inputSenderTracks.some(t2 => t == t2)));
-    var inputAnalyser = new AudioStreamAnalyser(inputAudioStream);
+    var inputAnalyser = new AudioStreamAnalyser(audiocontext, inputAudioStream);
 
     // It would have been nice to have a working getReceivers() here, but until
     // we do, let's use what remote streams we have.
     var outputAudioStream = this._pc.getRemoteStreams()
       .find(s => s.getAudioTracks().length > 0);
-    var outputAnalyser = new AudioStreamAnalyser(outputAudioStream);
+    var outputAnalyser = new AudioStreamAnalyser(audiocontext, outputAudioStream);
 
     var maxWithIndex = (a, b, i) => (b >= a.value) ? { value: b, index: i } : a;
     var initial = { value: -1, index: -1 };
 
     return new Promise((resolve, reject) => inputElem.ontimeupdate = () => {
       var inputData = inputAnalyser.getByteFrequencyData();
       var outputData = outputAnalyser.getByteFrequencyData();
 
new file mode 100644
--- /dev/null
+++ b/dom/media/tests/mochitest/test_getUserMedia_audioCapture.html
@@ -0,0 +1,110 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <title>Test AudioCapture </title>
+  <script type="application/javascript" src="mediaStreamPlayback.js"></script>
+</head>
+<body>
+<pre id="test">
+<script>
+
+createHTML({
+  bug: "1156472",
+  title: "Test AudioCapture with regular HTMLMediaElement, AudioContext, and HTMLMediaElement playing a MediaStream",
+  visible: true
+});
+
+scriptsReady
+.then(() => FAKE_ENABLED = false)
+.then(() => {
+  runTestWhenReady(function() {
+    // Get an opus file containing a sine wave at maximum amplitude, of duration
+    // `lengthSeconds`, and of frequency `frequency`.
+    function getSineWaveFile(frequency, lengthSeconds, callback) {
+      var chunks = [];
+      var off = new OfflineAudioContext(1, lengthSeconds * 48000, 48000);
+      var osc = off.createOscillator();
+      var rec = new MediaRecorder(osc);
+      rec.ondataavailable = function(e) {
+        chunks.push(e.data);
+      };
+      rec.onstop = function(e) {
+        var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
+        callback(blob);
+      }
+      osc.frequency.value = frequency;
+      osc.start();
+      rec.start();
+      off.startRendering().then(function(buffer) {
+        rec.stop();
+      });
+    }
+    /**
+     * Get two HTMLMediaElements:
+     * - One playing a sine tone from a blob (of an opus file created on the fly)
+     * - One being the output for an AudioContext's OscillatorNode, connected to
+     *   a MediaSourceDestinationNode.
+     *
+     * Also, use the AudioContext playing through its AudioDestinationNode another
+     * tone, using another OscillatorNode.
+     *
+     * Capture the output of the document, feed that back into the AudioContext,
+     * with an AnalyserNode, and check the frequency content to make sure we
+     * have recorded the three sources.
+     *
+     * The three sine tones have frequencies far apart from each other, so that we
+     * can check that the spectrum of the capture stream contains three
+     * components with a high magnitude.
+     */
+    var wavtone = createMediaElement("audio", "WaveTone");
+    var acTone = createMediaElement("audio", "audioContextTone");
+    var ac = new AudioContext();
+
+    var oscThroughMediaElement = ac.createOscillator();
+    oscThroughMediaElement.frequency.value = 1000;
+    var oscThroughAudioDestinationNode = ac.createOscillator();
+    oscThroughAudioDestinationNode.frequency.value = 5000;
+    var msDest = ac.createMediaStreamDestination();
+
+    oscThroughMediaElement.connect(msDest);
+    oscThroughAudioDestinationNode.connect(ac.destination);
+
+    acTone.mozSrcObject = msDest.stream;
+
+    getSineWaveFile(10000, 10, function(blob) {
+      wavtone.src = URL.createObjectURL(blob);
+      oscThroughMediaElement.start();
+      oscThroughAudioDestinationNode.start();
+      wavtone.loop = true;
+      wavtone.play();
+      acTone.play();
+    });
+
+    var constraints = {audio: {mediaSource: "audioCapture"}};
+
+    return getUserMedia(constraints).then((stream) => {
+      checkMediaStreamTracks(constraints, stream);
+      window.grip = stream;
+      var analyser = new AudioStreamAnalyser(ac, stream);
+      analyser.enableDebugCanvas();
+      return analyser.waitForAnalysisSuccess(function(array) {
+        // We want to find three frequency components here, around 1000, 5000
+        // and 10000Hz. Frequency are logarithmic. Also make sure we have low
+        // energy in between, not just a flat white noise.
+        return (array[analyser.binIndexForFrequency(50)]    < 50 &&
+                array[analyser.binIndexForFrequency(1000)]  > 200 &&
+                array[analyser.binIndexForFrequency(2500)]  < 50 &&
+                array[analyser.binIndexForFrequency(5000)]  > 200 &&
+                array[analyser.binIndexForFrequency(7500)]  < 50 &&
+                array[analyser.binIndexForFrequency(10000)] > 200);
+      }).then(finish);
+    }).catch(finish);
+  });
+});
+
+
+
+</script>
+</pre>
+</body>
+</html>
--- a/dom/media/tests/mochitest/test_peerConnection_replaceTrack.html
+++ b/dom/media/tests/mochitest/test_peerConnection_replaceTrack.html
@@ -131,17 +131,17 @@
             ok(pc.getLocalStreams().some(s => s.getTracks()
                                                .some(t => t == sender.track)),
                "track exists among pc's local streams");
           });
       }
     ]);
     test.chain.append([
       function PC_LOCAL_CHECK_WEBAUDIO_FLOW_PRESENT(test) {
-        return test.pcRemote.checkReceivingToneFrom(test.pcLocal);
+        return test.pcRemote.checkReceivingToneFrom(test.audioCtx, test.pcLocal);
       }
     ]);
     test.chain.append([
       function PC_LOCAL_INVALID_ADD_VIDEOTRACKS(test) {
         var stream = test.pcLocal._pc.getLocalStreams()[0];
         var track = stream.getVideoTracks()[0];
         try {
           test.pcLocal._pc.addTrack(track, stream);
--- a/dom/media/tests/mochitest/test_peerConnection_webAudio.html
+++ b/dom/media/tests/mochitest/test_peerConnection_webAudio.html
@@ -27,17 +27,17 @@ runNetworkTest(function() {
       oscillator.start();
       var dest = test.audioContext.createMediaStreamDestination();
       oscillator.connect(dest);
       test.pcLocal.attachMedia(dest.stream, 'audio', 'local');
     }
   ]);
   test.chain.append([
     function CHECK_AUDIO_FLOW(test) {
-      return test.pcRemote.checkReceivingToneFrom(test.pcLocal);
+      return test.pcRemote.checkReceivingToneFrom(test.audioContext, test.pcLocal);
     }
   ]);
   test.run();
 });
 </script>
 </pre>
 </body>
 </html>