Bug 1492316 [wpt PR 13061] - Sub-sample accurate start for ABSN, a=testonly
authorRaymond Toy <rtoy@chromium.org>
Fri, 01 Feb 2019 13:41:11 +0000
changeset 458190 5dd641d1e2caf98da9efc757dc2122129929a628
parent 458189 8e798a6a25cbf180a60e48615cdfe7c7f5ac2564
child 458191 8da31a6aa79d2f3eaf040f60dbaecb02a93f604b
push id35518
push useropoprus@mozilla.com
push dateFri, 08 Feb 2019 09:55:14 +0000
treeherdermozilla-central@3a3e393396f4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstestonly
bugs1492316, 13061, 876917, 1212270, 626753
milestone67.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1492316 [wpt PR 13061] - Sub-sample accurate start for ABSN, a=testonly Automatic update from web-platform-tests Sub-sample accurate start for ABSN Implement sub-sample accurate start for AudioBufferSourceNode. Previously, if the start time was between sample boundaries, we would start the output at the frame before the start time. This is actually incorrect because we haven't actually started yet. We should start the output at the next boundary, and interpolating the value based on the true start time and the sample boundary. Many tests needed to be updated. Basically for each test that needed to be changed, the sample rate is set to a power of two and all ABSN sources are updated to make sure the source starts exactly on a frame boundary. We also took the opportunity to adjust the error thresholds for the tests in case lower values could be used. Some additional notes for the tests: audiobuffersource-playbackrate-zero.html: Add new test to make sure sub-sample accurate start handles a zero playback rate. audiobuffersource-loop-points.html: add some code to save the actual output. This is needed because a new reference file is needed since the sample rate has changed. Also manually tested all of the modified tests with Firefox nightly. They all pass still (except for the new sub-sample test because Firefox doesn't do sub-sample accurate start/stop). Bug: 876917 Test: the-audiobuffersourcenode-interface/sub-sample-scheduling.html Change-Id: Ib13ba30eaa160cfd10739feabac961bf074ee309 Reviewed-on: https://chromium-review.googlesource.com/c/1212270 Commit-Queue: Raymond Toy <rtoy@chromium.org> Reviewed-by: Hongchan Choi <hongchan@chromium.org> Cr-Commit-Position: refs/heads/master@{#626753} -- wpt-commits: 00fa50687cab43b660296389acad6cc48717f1d1 wpt-pr: 13061
testing/web-platform/tests/webaudio/resources/biquad-testing.js
testing/web-platform/tests/webaudio/resources/distance-model-testing.js
testing/web-platform/tests/webaudio/resources/note-grain-on-testing.js
testing/web-platform/tests/webaudio/resources/panner-model-testing.js
testing/web-platform/tests/webaudio/resources/stereopanner-testing.js
testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-playbackrate-zero.html
testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/resources/sub-sample-scheduling.html
testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-channel-rules.html
testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowpass.html
testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain.html
--- a/testing/web-platform/tests/webaudio/resources/biquad-testing.js
+++ b/testing/web-platform/tests/webaudio/resources/biquad-testing.js
@@ -1,24 +1,25 @@
 // Globals, to make testing and debugging easier.
 let context;
 let filter;
 let signal;
 let renderedBuffer;
 let renderedData;
 
-let sampleRate = 44100.0;
+// Use a power of two to eliminate round-off in converting frame to time
+let sampleRate = 32768;
 let pulseLengthFrames = .1 * sampleRate;
 
 // Maximum allowed error for the test to succeed.  Experimentally determined.
 let maxAllowedError = 5.9e-8;
 
-// This must be large enough so that the filtered result is
-// essentially zero.  See comments for createTestAndRun.
-let timeStep = .1;
+// This must be large enough so that the filtered result is essentially zero.
+// See comments for createTestAndRun.  This must be a whole number of frames.
+let timeStep = Math.ceil(.1 * sampleRate) / sampleRate;
 
 // Maximum number of filters we can process (mostly for setting the
 // render length correctly.)
 let maxFilters = 5;
 
 // How long to render.  Must be long enough for all of the filters we
 // want to test.
 let renderLengthSeconds = timeStep * (maxFilters + 1);
--- a/testing/web-platform/tests/webaudio/resources/distance-model-testing.js
+++ b/testing/web-platform/tests/webaudio/resources/distance-model-testing.js
@@ -1,15 +1,18 @@
-let sampleRate = 44100.0;
+// Use a power of two to eliminate round-off when converting frames to time and
+// vice versa.
+let sampleRate = 32768;
 
 // How many panner nodes to create for the test.
 let nodesToCreate = 100;
 
-// Time step when each panner node starts.
-let timeStep = 0.001;
+// Time step when each panner node starts.  Make sure it starts on a frame
+// boundary.
+let timeStep = Math.floor(0.001 * sampleRate) / sampleRate;
 
 // Make sure we render long enough to get all of our nodes.
 let renderLengthSeconds = timeStep * (nodesToCreate + 1);
 
 // Length of an impulse signal.
 let pulseLengthFrames = Math.round(timeStep * sampleRate);
 
 // Globals to make debugging a little easier.
@@ -129,17 +132,17 @@ function equalPowerGain() {
 }
 
 function checkDistanceResult(renderedBuffer, model, should) {
   renderedData = renderedBuffer.getChannelData(0);
 
   // The max allowed error between the actual gain and the expected
   // value.  This is determined experimentally.  Set to 0 to see
   // what the actual errors are.
-  let maxAllowedError = 3.3e-6;
+  let maxAllowedError = 2.2720e-6;
 
   let success = true;
 
   // Number of impulses we found in the rendered result.
   let impulseCount = 0;
 
   // Maximum relative error in the gain of the impulses.
   let maxError = 0;
--- a/testing/web-platform/tests/webaudio/resources/note-grain-on-testing.js
+++ b/testing/web-platform/tests/webaudio/resources/note-grain-on-testing.js
@@ -1,22 +1,28 @@
-let sampleRate = 44100.0;
+// Use a power of two to eliminate round-off converting from frames to time.
+let sampleRate = 32768;
 
 // How many grains to play.
 let numberOfTests = 100;
 
-// Duration of each grain to be played
-let duration = 0.01;
+// Duration of each grain to be played.  Make a whole number of frames
+let duration = Math.floor(0.01 * sampleRate) / sampleRate;
+
+// A little extra bit of silence between grain boundaries.  Must be a whole
+// number of frames.
+let grainGap = Math.floor(0.005 * sampleRate) / sampleRate;
 
 // Time step between the start of each grain.  We need to add a little
 // bit of silence so we can detect grain boundaries
-let timeStep = duration + .005;
+let timeStep = duration + grainGap;
 
-// Time step between the start for each grain.
-let grainOffsetStep = 0.001;
+// Time step between the start for each grain.  Must be a whole number of
+// frames.
+let grainOffsetStep = Math.floor(0.001 * sampleRate) / sampleRate;
 
 // How long to render to cover all of the grains.
 let renderTime = (numberOfTests + 1) * timeStep;
 
 let context;
 let renderedData;
 
 // Create a buffer containing the data that we want.  The function f
--- a/testing/web-platform/tests/webaudio/resources/panner-model-testing.js
+++ b/testing/web-platform/tests/webaudio/resources/panner-model-testing.js
@@ -1,14 +1,17 @@
-let sampleRate = 44100.0;
+// Use a power of two to eliminate round-off when converting frames to time and
+// vice versa.
+let sampleRate = 32768;
 
 let numberOfChannels = 1;
 
-// Time step when each panner node starts.
-let timeStep = 0.001;
+// Time step when each panner node starts.  Make sure it starts on a frame
+// boundary.
+let timeStep = Math.floor(0.001 * sampleRate) / sampleRate;
 
 // Length of the impulse signal.
 let pulseLengthFrames = Math.round(timeStep * sampleRate);
 
 // How many panner nodes to create for the test
 let nodesToCreate = 100;
 
 // Be sure we render long enough for all of our nodes.
@@ -109,17 +112,17 @@ function equalPowerGain(angle) {
 
 function checkResult(renderedBuffer, should) {
   renderedLeft = renderedBuffer.getChannelData(0);
   renderedRight = renderedBuffer.getChannelData(1);
 
   // The max error we allow between the rendered impulse and the
   // expected value.  This value is experimentally determined.  Set
   // to 0 to make the test fail to see what the actual error is.
-  let maxAllowedError = 1.3e-6;
+  let maxAllowedError = 1.1597e-6;
 
   let success = true;
 
   // Number of impulses found in the rendered result.
   let impulseCount = 0;
 
   // Max (relative) error and the index of the maxima for the left
   // and right channels.
--- a/testing/web-platform/tests/webaudio/resources/stereopanner-testing.js
+++ b/testing/web-platform/tests/webaudio/resources/stereopanner-testing.js
@@ -1,17 +1,19 @@
 let StereoPannerTest = (function() {
 
   // Constants
   let PI_OVER_TWO = Math.PI * 0.5;
 
-  let gSampleRate = 44100;
+  // Use a power of two to eliminate any round-off when converting frames to
+  // time.
+  let gSampleRate = 32768;
 
-  // Time step when each panner node starts.
-  let gTimeStep = 0.001;
+  // Time step when each panner node starts.  Make sure this is on a frame boundary.
+  let gTimeStep = Math.floor(0.001 * gSampleRate) / gSampleRate;
 
   // How many panner nodes to create for the test
   let gNodesToCreate = 100;
 
   // Total render length for all of our nodes.
   let gRenderLength = gTimeStep * (gNodesToCreate + 1) + gSampleRate;
 
   // Calculates channel gains based on equal power panning model.
@@ -72,17 +74,17 @@ let StereoPannerTest = (function() {
     this.errors = [];
 
     // The index of the current impulse being verified.
     this.impulseIndex = 0;
 
     // The max error we allow between the rendered impulse and the
     // expected value.  This value is experimentally determined.  Set
     // to 0 to make the test fail to see what the actual error is.
-    this.maxAllowedError = 1.3e-6;
+    this.maxAllowedError = 9.8015e-8;
 
     // Max (absolute) error and the index of the maxima for the left
     // and right channels.
     this.maxErrorL = 0;
     this.maxErrorR = 0;
     this.maxErrorIndexL = 0;
     this.maxErrorIndexR = 0;
 
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-playbackrate-zero.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-playbackrate-zero.html
@@ -69,12 +69,48 @@
                       'held the sample value correctly',
                       'should hold the sample value. ' +
                           'Expected ' + rampData[half] + ' but got ' + data[i] +
                           ' at the index ' + i);
             })
             .then(() => task.done());
       });
 
+      audit.define('subsample start with playback rate 0', (task, should) => {
+        let context = new OfflineAudioContext(1, renderLength, sampleRate);
+        let rampBuffer = new AudioBuffer(
+            {length: renderLength, sampleRate: context.sampleRate});
+        let data = new Float32Array(renderLength);
+        let startValue = 5;
+        for (let k = 0; k < data.length; ++k) {
+          data[k] = k + startValue;
+        }
+        rampBuffer.copyToChannel(data, 0);
+
+        let src = new AudioBufferSourceNode(
+            context, {buffer: rampBuffer, playbackRate: 0});
+
+        src.connect(context.destination);
+
+        // Purposely start the source between frame boundaries
+        let startFrame = 27.3;
+        src.start(startFrame / context.sampleRate);
+
+        context.startRendering()
+            .then(audioBuffer => {
+              let actualStartFrame = Math.ceil(startFrame);
+              let audio = audioBuffer.getChannelData(0);
+
+              should(
+                  audio.slice(0, actualStartFrame),
+                  `output[0:${actualStartFrame - 1}]`)
+                  .beConstantValueOf(0);
+              should(
+                  audio.slice(actualStartFrame), `output[${actualStartFrame}:]`)
+                  .beConstantValueOf(startValue);
+            })
+            .then(() => task.done());
+      });
+
       audit.run();
     </script>
   </body>
 </html>
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/resources/sub-sample-scheduling.html
@@ -0,0 +1,423 @@
+<!doctype html>
+<html>
+  <head>
+    <title>
+      Test Sub-Sample Accurate Scheduling for ABSN
+    </title>
+    <script src="/resources/testharness.js"></script>
+    <script src="/resources/testharnessreport.js"></script>
+    <script src="/webaudio/resources/audit-util.js"></script>
+    <script src="/webaudio/resources/audit.js"></script>
+  </head>
+  <body>
+    <script>
+      // Power of two so there's no roundoff converting from integer frames to
+      // time.
+      let sampleRate = 32768;
+
+      let audit = Audit.createTaskRunner();
+
+      audit.define('sub-sample accurate start', (task, should) => {
+        // There are two channels, one for each source.  Only need to render
+        // quanta for this test.
+        let context = new OfflineAudioContext(
+            {numberOfChannels: 2, length: 8192, sampleRate: sampleRate});
+        let merger = new ChannelMergerNode(
+            context, {numberOfInputs: context.destination.channelCount});
+
+        merger.connect(context.destination);
+
+        // Use a simple linear ramp for the sources with integer steps starting
+        // at 1 to make it easy to verify and test that have sub-sample accurate
+        // start.  Ramp starts at 1 so we can easily tell when the source
+        // starts.
+        let rampBuffer = new AudioBuffer(
+            {length: context.length, sampleRate: context.sampleRate});
+        let r = rampBuffer.getChannelData(0);
+        for (let k = 0; k < r.length; ++k) {
+          r[k] = k + 1;
+        }
+
+        const src0 = new AudioBufferSourceNode(context, {buffer: rampBuffer});
+        const src1 = new AudioBufferSourceNode(context, {buffer: rampBuffer});
+
+        // Frame where sources should start. This is pretty arbitrary, but one
+        // should be close to an integer and the other should be close to the
+        // next integer.  We do this to catch the case where rounding of the
+        // start frame is being done.  Rounding is incorrect.
+        const startFrame = 33;
+        const startFrame0 = startFrame + 0.1;
+        const startFrame1 = startFrame + 0.9;
+
+        src0.connect(merger, 0, 0);
+        src1.connect(merger, 0, 1);
+
+        src0.start(startFrame0 / context.sampleRate);
+        src1.start(startFrame1 / context.sampleRate);
+
+        context.startRendering()
+            .then(audioBuffer => {
+              const output0 = audioBuffer.getChannelData(0);
+              const output1 = audioBuffer.getChannelData(1);
+
+              // Compute the expected output by interpolating the ramp buffer of
+              // the sources if they started at the given frame.
+              const ramp = rampBuffer.getChannelData(0);
+              const expected0 = interpolateRamp(ramp, startFrame0);
+              const expected1 = interpolateRamp(ramp, startFrame1);
+
+              // Verify output0 has the correct values
+
+              // For information only
+              should(startFrame0, 'src0 start frame').beEqualTo(startFrame0);
+
+              // Output must be zero before the source start frame, and it must
+              // be interpolated correctly after the start frame.  The
+              // absoluteThreshold below is currently set for Chrome which does
+              // linear interpolation.  This needs to be updated eventually if
+              // other browsers do not user interpolation. 
+              should(
+                  output0.slice(0, startFrame + 1), `output0[0:${startFrame}]`)
+                  .beConstantValueOf(0);
+              should(
+                  output0.slice(startFrame + 1, expected0.length),
+                  `output0[${startFrame + 1}:${expected0.length - 1}]`)
+                  .beCloseToArray(
+                      expected0.slice(startFrame + 1), {absoluteThreshold: 0});
+
+              // Verify output1 has the correct values.  Same approach as for
+              // output0.
+              should(startFrame1, 'src1 start frame').beEqualTo(startFrame1);
+
+              should(
+                  output1.slice(0, startFrame + 1), `output1[0:${startFrame}]`)
+                  .beConstantValueOf(0);
+              should(
+                  output1.slice(startFrame + 1, expected1.length),
+                  `output1[${startFrame + 1}:${expected1.length - 1}]`)
+                  .beCloseToArray(
+                      expected1.slice(startFrame + 1), {absoluteThreshold: 0});
+            })
+            .then(() => task.done());
+      });
+
+      audit.define('sub-sample accurate stop', (task, should) => {
+        // There are threes channesl, one for each source.  Only need to render
+        // quanta for this test.
+        let context = new OfflineAudioContext(
+            {numberOfChannels: 3, length: 128, sampleRate: sampleRate});
+        let merger = new ChannelMergerNode(
+            context, {numberOfInputs: context.destination.channelCount});
+
+        merger.connect(context.destination);
+
+        // The source can be as simple constant for this test.
+        let buffer = new AudioBuffer(
+            {length: context.length, sampleRate: context.sampleRate});
+        buffer.getChannelData(0).fill(1);
+
+        const src0 = new AudioBufferSourceNode(context, {buffer: buffer});
+        const src1 = new AudioBufferSourceNode(context, {buffer: buffer});
+        const src2 = new AudioBufferSourceNode(context, {buffer: buffer});
+
+        // Frame where sources should start. This is pretty arbitrary, but one
+        // should be an integer, one should be close to an integer and the other
+        // should be close to the next integer.  This is to catch the case where
+        // rounding is used for the end frame.  Rounding is incorrect.
+        const endFrame = 33;
+        const endFrame1 = endFrame + 0.1;
+        const endFrame2 = endFrame + 0.9;
+
+        src0.connect(merger, 0, 0);
+        src1.connect(merger, 0, 1);
+        src2.connect(merger, 0, 2);
+
+        src0.start(0);
+        src1.start(0);
+        src2.start(0);
+        src0.stop(endFrame / context.sampleRate);
+        src1.stop(endFrame1 / context.sampleRate);
+        src2.stop(endFrame2 / context.sampleRate);
+
+        context.startRendering()
+          .then(audioBuffer => {
+            let actual0 = audioBuffer.getChannelData(0);
+            let actual1 = audioBuffer.getChannelData(1);
+            let actual2 = audioBuffer.getChannelData(2);
+
+            // Just verify that we stopped at the right time.
+
+            // This is case where the end frame is an integer.  Since the first
+            // output ends on an exact frame, the output must be zero at that
+            // frame number.  We print the end frame for information only; it
+            // makes interpretation of the rest easier.
+            should(endFrame - 1, 'src0 end frame')
+              .beEqualTo(endFrame - 1);
+            should(actual0[endFrame - 1], `output0[${endFrame - 1}]`)
+              .notBeEqualTo(0);
+            should(actual0.slice(endFrame),
+                   `output0[${endFrame}:]`)
+              .beConstantValueOf(0);
+
+            // The case where the end frame is just a little above an integer.
+            // The output must not be zero just before the end and must be zero
+            // after.
+            should(endFrame1, 'src1 end frame')
+              .beEqualTo(endFrame1);
+            should(actual1[endFrame], `output1[${endFrame}]`)
+              .notBeEqualTo(0);
+            should(actual1.slice(endFrame + 1),
+                   `output1[${endFrame + 1}:]`)
+              .beConstantValueOf(0);
+
+            // The case where the end frame is just a little below an integer.
+            // The output must not be zero just before the end and must be zero
+            // after.
+            should(endFrame2, 'src2 end frame')
+              .beEqualTo(endFrame2);
+            should(actual2[endFrame], `output2[${endFrame}]`)
+              .notBeEqualTo(0);
+            should(actual2.slice(endFrame + 1),
+                   `output2[${endFrame + 1}:]`)
+              .beConstantValueOf(0);
+          })
+          .then(() => task.done());
+      });
+
+      audit.define('sub-sample-grain', (task, should) => {
+        let context = new OfflineAudioContext(
+            {numberOfChannels: 2, length: 128, sampleRate: sampleRate});
+
+        let merger = new ChannelMergerNode(
+            context, {numberOfInputs: context.destination.channelCount});
+
+        merger.connect(context.destination);
+
+        // The source can be as simple constant for this test.
+        let buffer = new AudioBuffer(
+            {length: context.length, sampleRate: context.sampleRate});
+        buffer.getChannelData(0).fill(1);
+
+        let src0 = new AudioBufferSourceNode(context, {buffer: buffer});
+        let src1 = new AudioBufferSourceNode(context, {buffer: buffer});
+
+        src0.connect(merger, 0, 0);
+        src1.connect(merger, 0, 1);
+
+        // Start a short grain.
+        const src0StartGrain = 3.1;
+        const src0EndGrain = 37.2;
+        src0.start(
+            src0StartGrain / context.sampleRate, 0,
+            (src0EndGrain - src0StartGrain) / context.sampleRate);
+
+        const src1StartGrain = 5.8;
+        const src1EndGrain = 43.9;
+        src1.start(
+            src1StartGrain / context.sampleRate, 0,
+            (src1EndGrain - src1StartGrain) / context.sampleRate);
+
+        context.startRendering()
+            .then(audioBuffer => {
+              let output0 = audioBuffer.getChannelData(0);
+              let output1 = audioBuffer.getChannelData(1);
+
+              let expected = new Float32Array(context.length);
+
+              // Compute the expected output for output0 and verify the actual
+              // output matches.
+              expected.fill(1);
+              for (let k = 0; k <= Math.floor(src0StartGrain); ++k) {
+                expected[k] = 0;
+              }
+              for (let k = Math.ceil(src0EndGrain); k < expected.length; ++k) {
+                expected[k] = 0;
+              }
+
+              verifyGrain(should, output0, {
+                startGrain: src0StartGrain,
+                endGrain: src0EndGrain,
+                sourceName: 'src0',
+                outputName: 'output0'
+              });
+
+              verifyGrain(should, output1, {
+                startGrain: src1StartGrain,
+                endGrain: src1EndGrain,
+                sourceName: 'src1',
+                outputName: 'output1'
+              });
+            })
+            .then(() => task.done());
+      });
+
+      audit.define(
+          'sub-sample accurate start with playbackRate', (task, should) => {
+            // There are two channels, one for each source.  Only need to render
+            // quanta for this test.
+            let context = new OfflineAudioContext(
+                {numberOfChannels: 2, length: 8192, sampleRate: sampleRate});
+            let merger = new ChannelMergerNode(
+                context, {numberOfInputs: context.destination.channelCount});
+
+            merger.connect(context.destination);
+
+            // Use a simple linear ramp for the sources with integer steps
+            // starting at 1 to make it easy to verify and test that have
+            // sub-sample accurate start.  Ramp starts at 1 so we can easily
+            // tell when the source starts.
+            let buffer = new AudioBuffer(
+                {length: context.length, sampleRate: context.sampleRate});
+            let r = buffer.getChannelData(0);
+            for (let k = 0; k < r.length; ++k) {
+              r[k] = k + 1;
+            }
+
+            // Two sources with different playback rates
+            const src0 = new AudioBufferSourceNode(
+                context, {buffer: buffer, playbackRate: .25});
+            const src1 = new AudioBufferSourceNode(
+                context, {buffer: buffer, playbackRate: 4});
+
+            // Frame where sources start.  Pretty arbitrary but should not be an
+            // integer.
+            const startFrame = 17.8;
+
+            src0.connect(merger, 0, 0);
+            src1.connect(merger, 0, 1);
+
+            src0.start(startFrame / context.sampleRate);
+            src1.start(startFrame / context.sampleRate);
+
+            context.startRendering()
+                .then(audioBuffer => {
+                  const output0 = audioBuffer.getChannelData(0);
+                  const output1 = audioBuffer.getChannelData(1);
+
+                  const frameBefore = Math.floor(startFrame);
+                  const frameAfter = frameBefore + 1;
+
+                  // Informative message so we know what the following output
+                  // indices really mean.
+                  should(startFrame, 'Source start frame')
+                      .beEqualTo(startFrame);
+
+                  // Verify the output
+
+                  // With a startFrame of 17.8, the first output is at frame 18,
+                  // but the actual start is at 17.8.  So we would interpolate
+                  // the output 0.2 fraction of the way between 17.8 and 18, for
+                  // an output of 1.2 for our ramp.  But the playback rate is
+                  // 0.25, so we're really only 1/4 as far along as we think so
+                  // the output is .2*0.25 of the way between 1 and 2 or 1.05.
+
+                  const ramp0 = buffer.getChannelData(0)[0];
+                  const ramp1 = buffer.getChannelData(0)[1];
+
+                  const src0Output = ramp0 +
+                      (ramp1 - ramp0) * (frameAfter - startFrame) *
+                          src0.playbackRate.value;
+
+                  let playbackMessage =
+                      `With playbackRate ${src0.playbackRate.value}:`;
+
+                  should(
+                      output0[frameBefore],
+                      `${playbackMessage} output0[${frameBefore}]`)
+                      .beEqualTo(0);
+                  should(
+                      output0[frameAfter],
+                      `${playbackMessage} output0[${frameAfter}]`)
+                      .beCloseTo(src0Output, {threshold: 4.542e-8});
+
+                  const src1Output = ramp0 +
+                      (ramp1 - ramp0) * (frameAfter - startFrame) *
+                          src1.playbackRate.value;
+
+                  playbackMessage =
+                      `With playbackRate ${src1.playbackRate.value}:`;
+
+                  should(
+                      output1[frameBefore],
+                      `${playbackMessage} output1[${frameBefore}]`)
+                      .beEqualTo(0);
+                  should(
+                      output1[frameAfter],
+                      `${playbackMessage} output1[${frameAfter}]`)
+                      .beCloseTo(src1Output, {threshold: 4.542e-8});
+                })
+                .then(() => task.done());
+          });
+
+      audit.run();
+
+      // Given an input ramp in |rampBuffer|, interpolate the signal assuming
+      // this ramp is used for an ABSN that starts at frame |startFrame|, which
+      // is not necessarily an integer.  For simplicity we just use linear
+      // interpolation here.  The interpolation is not part of the spec but
+      // this should be pretty close to whatever interpolation is being done.
+      function interpolateRamp(rampBuffer, startFrame) {
+        // |start| is the last zero sample before the ABSN actually starts.
+        const start = Math.floor(startFrame);
+        // One less than the rampBuffer because we can't linearly interpolate
+        // the last frame.
+        let result = new Float32Array(rampBuffer.length - 1);
+
+        for (let k = 0; k <= start; ++k) {
+          result[k] = 0;
+        }
+
+        // Now start linear interpolation.
+        let frame = startFrame;
+        let index = 1;
+        for (let k = start + 1; k < result.length; ++k) {
+          let s0 = rampBuffer[index];
+          let s1 = rampBuffer[index - 1];
+          let delta = frame - k;
+          let s = s1 - delta * (s0 - s1);
+          result[k] = s;
+          ++frame;
+          ++index;
+        }
+
+        return result;
+      }
+
+      function verifyGrain(should, output, options) {
+        let {startGrain, endGrain, sourceName, outputName} = options;
+        let expected = new Float32Array(output.length);
+        // Compute the expected output for output and verify the actual
+        // output matches.
+        expected.fill(1);
+        for (let k = 0; k <= Math.floor(startGrain); ++k) {
+          expected[k] = 0;
+        }
+        for (let k = Math.ceil(endGrain); k < expected.length; ++k) {
+          expected[k] = 0;
+        }
+
+        should(startGrain, `${sourceName} grain start`).beEqualTo(startGrain);
+        should(endGrain - startGrain, `${sourceName} grain duration`)
+            .beEqualTo(endGrain - startGrain);
+        should(endGrain, `${sourceName} grain end`).beEqualTo(endGrain);
+        should(output, outputName).beEqualToArray(expected);
+        should(
+            output[Math.floor(startGrain)],
+            `${outputName}[${Math.floor(startGrain)}]`)
+            .beEqualTo(0);
+        should(
+            output[1 + Math.floor(startGrain)],
+            `${outputName}[${1 + Math.floor(startGrain)}]`)
+            .notBeEqualTo(0);
+        should(
+            output[Math.floor(endGrain)],
+            `${outputName}[${Math.floor(endGrain)}]`)
+            .notBeEqualTo(0);
+        should(
+            output[1 + Math.floor(endGrain)],
+            `${outputName}[${1 + Math.floor(endGrain)}]`)
+            .beEqualTo(0);
+      }
+    </script>
+  </body>
+</html>
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-channel-rules.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-channel-rules.html
@@ -9,17 +9,18 @@
     <script src="/webaudio/resources/audit-util.js"></script>
     <script src="/webaudio/resources/audit.js"></script>
     <script src="/webaudio/resources/mixing-rules.js"></script>
   </head>
   <body>
     <script id="layout-test-code">
       let audit = Audit.createTaskRunner();
       let context = 0;
-      let sampleRate = 44100;
+      // Use a power of two to eliminate round-off converting frames to time.
+      let sampleRate = 32768;
       let renderNumberOfChannels = 8;
       let singleTestFrameLength = 8;
       let testBuffers;
 
       // A list of connections to an AudioNode input, each of which is to be
       // used in one or more specific test cases.  Each element in the list is a
       // string, with the number of connections corresponding to the length of
       // the string, and each character in the string is from '1' to '8'
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowpass.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowpass.html
@@ -29,17 +29,17 @@
               {cutoff: 1, q: 1, gain: 1},
               {cutoff: 0.25, q: 1, gain: 1},
               {cutoff: 0.25, q: 1, gain: 1, detune: 100},
               {cutoff: 0.01, q: 1, gain: 1, detune: -200},
             ];
 
             createTestAndRun(context, 'lowpass', {
               should: should,
-              threshold: 9.7869e-8,
+              threshold: 4.6943e-8,
               filterParameters: filterParameters
             }).then(task.done.bind(task));
           });
 
       audit.run();
     </script>
   </body>
 </html>
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain.html
@@ -13,21 +13,29 @@
     <script id="layout-test-code">
       // Tests that GainNode is properly scaling the gain.  We'll render 11
       // notes, starting at a gain of 1.0, decreasing in gain by 0.1.  The 11th
       // note will be of gain 0.0, so it should be silent (at the end in the
       // rendered output).
 
       let audit = Audit.createTaskRunner();
 
-      let sampleRate = 44100.0;
-      let bufferDurationSeconds = 0.125;
+      // Use a power of two to eliminate any round-off when converting frame to
+      // time.
+      let sampleRate = 32768;
+      // Make sure the buffer duration and spacing are all exact frame lengths
+      // so that the note spacing is also on frame boundaries to eliminate
+      // sub-sample accurate start of a ABSN.
+      let bufferDurationSeconds = Math.floor(0.125 * sampleRate) / sampleRate;
       let numberOfNotes = 11;
-      let noteSpacing = bufferDurationSeconds +
-          0.020;  // leave 20ms of silence between each "note"
+      // Leave about 20ms of silence, being sure this is an exact frame
+      // duration.
+      let noteSilence = Math.floor(0.020 * sampleRate) / sampleRate;
+      let noteSpacing = bufferDurationSeconds + noteSilence;
+          
       let lengthInSeconds = numberOfNotes * noteSpacing;
 
       let context = 0;
       let sinWaveBuffer = 0;
 
       // Create a stereo AudioBuffer of duration |lengthInSeconds| consisting of
       // a pure sine wave with the given |frequency|.  Both channels contain the
       // same data.
@@ -126,28 +134,28 @@
                       reference0[startFrame + n] *= gain;
                       reference1[startFrame + n] *= gain;
                     }
                   }
 
                   // Verify the channels are clsoe to the reference.
                   should(actual0, 'Left output from gain node')
                       .beCloseToArray(
-                          reference0, {relativeThreshold: 1.1908e-7});
+                          reference0, {relativeThreshold: 1.1877e-7});
                   should(actual1, 'Right output from gain node')
                       .beCloseToArray(
-                          reference1, {relativeThreshold: 1.1908e-7});
+                          reference1, {relativeThreshold: 1.1877e-7});
 
                   // Test the SNR too for both channels.
                   let snr0 = 10 * Math.log10(computeSNR(actual0, reference0));
                   let snr1 = 10 * Math.log10(computeSNR(actual1, reference1));
                   should(snr0, 'Left SNR (in dB)')
-                      .beGreaterThanOrEqualTo(148.69);
+                      .beGreaterThanOrEqualTo(148.71);
                   should(snr1, 'Right SNR (in dB)')
-                      .beGreaterThanOrEqualTo(148.69);
+                      .beGreaterThanOrEqualTo(148.71);
                 })
                 .then(() => task.done());
             ;
           });
 
       audit.run();
     </script>
   </body>