--- a/content/media/webaudio/AudioBufferSourceNode.cpp
+++ b/content/media/webaudio/AudioBufferSourceNode.cpp
@@ -3,64 +3,62 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioBufferSourceNode.h"
#include "mozilla/dom/AudioBufferSourceNodeBinding.h"
#include "nsMathUtils.h"
#include "AudioNodeEngine.h"
+#include "AudioNodeStream.h"
+#include "AudioDestinationNode.h"
#include "PannerNode.h"
-#include "GainProcessor.h"
#include "speex/speex_resampler.h"
#include <limits>
namespace mozilla {
namespace dom {
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioBufferSourceNode)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mBuffer)
NS_IMPL_CYCLE_COLLECTION_UNLINK(mPlaybackRate)
- NS_IMPL_CYCLE_COLLECTION_UNLINK(mGain)
if (tmp->Context()) {
// AudioNode's Unlink implementation disconnects us from the graph
// too, but we need to do this right here to make sure that
// UnregisterAudioBufferSourceNode can properly untangle us from
// the possibly connected PannerNodes.
tmp->DisconnectFromGraph();
tmp->Context()->UnregisterAudioBufferSourceNode(tmp);
}
NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioBufferSourceNode, AudioNode)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mBuffer)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPlaybackRate)
- NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mGain)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioBufferSourceNode)
NS_INTERFACE_MAP_END_INHERITING(AudioNode)
NS_IMPL_ADDREF_INHERITED(AudioBufferSourceNode, AudioNode)
NS_IMPL_RELEASE_INHERITED(AudioBufferSourceNode, AudioNode)
-class AudioBufferSourceNodeEngine : public AudioNodeEngine,
- public GainProcessor
+class AudioBufferSourceNodeEngine : public AudioNodeEngine
{
public:
explicit AudioBufferSourceNodeEngine(AudioNode* aNode,
AudioDestinationNode* aDestination) :
AudioNodeEngine(aNode),
- GainProcessor(aDestination),
mStart(0), mStop(TRACK_TICKS_MAX),
mResampler(nullptr),
mOffset(0), mDuration(0),
mLoopStart(0), mLoopEnd(0),
mBufferSampleRate(0), mPosition(0), mChannels(0), mPlaybackRate(1.0f),
mDopplerShift(1.0f),
+ mDestination(static_cast<AudioNodeStream*>(aDestination->Stream())),
mPlaybackRateTimeline(1.0f), mLoop(false)
{}
~AudioBufferSourceNodeEngine()
{
if (mResampler) {
speex_resampler_destroy(mResampler);
}
@@ -79,19 +77,16 @@ public:
if (mResampler && mPlaybackRateTimeline.HasSimpleValue() &&
mPlaybackRateTimeline.GetValue() == 1.0 &&
mBufferSampleRate == aSampleRate) {
speex_resampler_destroy(mResampler);
mResampler = nullptr;
}
WebAudioUtils::ConvertAudioParamToTicks(mPlaybackRateTimeline, nullptr, mDestination);
break;
- case AudioBufferSourceNode::GAIN:
- SetGainParameter(aValue);
- break;
default:
NS_ERROR("Bad AudioBufferSourceNodeEngine TimelineParameter");
}
}
virtual void SetStreamTimeParameter(uint32_t aIndex, TrackTicks aParam)
{
switch (aIndex) {
case AudioBufferSourceNode::START: mStart = aParam; break;
@@ -396,31 +391,16 @@ public:
if (mOffset + t < mDuration) {
CopyFromBuffer(aStream, aOutput, channels, &written, ¤tPosition, mOffset + t, mDuration);
} else {
FillWithZeroes(aOutput, channels, &written, ¤tPosition, TRACK_TICKS_MAX);
}
}
}
- // Process the gain on the AudioBufferSourceNode
- if (!aOutput->IsNull()) {
- if (!mGain.HasSimpleValue() &&
- aOutput->mBuffer == mBuffer) {
- // If we have borrowed out buffer, make sure to allocate a new one in case
- // the gain value is not a simple value.
- nsTArray<const void*> oldChannels;
- oldChannels.AppendElements(aOutput->mChannelData);
- AllocateAudioBlock(channels, aOutput);
- ProcessGain(aStream, 1.0f, oldChannels, aOutput);
- } else {
- ProcessGain(aStream, 1.0f, aOutput->mChannelData, aOutput);
- }
- }
-
// We've finished if we've gone past mStop, or if we're past mDuration when
// looping is disabled.
if (currentPosition >= mStop ||
(!mLoop && currentPosition - mStart + mOffset > mDuration)) {
*aFinished = true;
}
}
@@ -432,39 +412,38 @@ public:
int32_t mDuration;
int32_t mLoopStart;
int32_t mLoopEnd;
int32_t mBufferSampleRate;
uint32_t mPosition;
uint32_t mChannels;
float mPlaybackRate;
float mDopplerShift;
+ AudioNodeStream* mDestination;
AudioParamTimeline mPlaybackRateTimeline;
bool mLoop;
};
AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext)
: AudioNode(aContext,
2,
ChannelCountMode::Max,
ChannelInterpretation::Speakers)
, mLoopStart(0.0)
, mLoopEnd(0.0)
, mOffset(0.0)
, mDuration(std::numeric_limits<double>::min())
, mPlaybackRate(new AudioParam(this, SendPlaybackRateToStream, 1.0f))
- , mGain(new AudioParam(this, SendGainToStream, 1.0f))
, mLoop(false)
, mStartCalled(false)
, mStopped(false)
{
- AudioBufferSourceNodeEngine* engine =
- new AudioBufferSourceNodeEngine(this, aContext->Destination());
- mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
- engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
+ mStream = aContext->Graph()->CreateAudioNodeStream(
+ new AudioBufferSourceNodeEngine(this, aContext->Destination()),
+ MediaStreamGraph::INTERNAL_STREAM);
mStream->AddMainThreadListener(this);
}
AudioBufferSourceNode::~AudioBufferSourceNode()
{
if (Context()) {
Context()->UnregisterAudioBufferSourceNode(this);
}
@@ -638,23 +617,16 @@ AudioBufferSourceNode::NotifyMainThreadS
void
AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode)
{
AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode);
SendTimelineParameterToStream(This, PLAYBACKRATE, *This->mPlaybackRate);
}
void
-AudioBufferSourceNode::SendGainToStream(AudioNode* aNode)
-{
- AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode);
- SendTimelineParameterToStream(This, GAIN, *This->mGain);
-}
-
-void
AudioBufferSourceNode::SendDopplerShiftToStream(double aDopplerShift)
{
SendDoubleParameterToStream(DOPPLERSHIFT, aDopplerShift);
}
void
AudioBufferSourceNode::SendLoopParametersToStream()
{
--- a/content/media/webaudio/AudioBufferSourceNode.h
+++ b/content/media/webaudio/AudioBufferSourceNode.h
@@ -71,20 +71,16 @@ public:
mBuffer = aBuffer;
SendBufferParameterToStream(aCx);
SendLoopParametersToStream();
}
AudioParam* PlaybackRate() const
{
return mPlaybackRate;
}
- AudioParam* Gain() const
- {
- return mGain;
- }
bool Loop() const
{
return mLoop;
}
void SetLoop(bool aLoop)
{
mLoop = aLoop;
SendLoopParametersToStream();
@@ -123,36 +119,33 @@ private:
START,
STOP,
OFFSET,
DURATION,
LOOP,
LOOPSTART,
LOOPEND,
PLAYBACKRATE,
- GAIN,
DOPPLERSHIFT
};
void SendLoopParametersToStream();
void SendBufferParameterToStream(JSContext* aCx);
void SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream,
double aOffset,
double aDuration);
static void SendPlaybackRateToStream(AudioNode* aNode);
- static void SendGainToStream(AudioNode* aNode);
private:
double mLoopStart;
double mLoopEnd;
double mOffset;
double mDuration;
nsRefPtr<AudioBuffer> mBuffer;
nsRefPtr<AudioParam> mPlaybackRate;
- nsRefPtr<AudioParam> mGain;
SelfReference<AudioBufferSourceNode> mPlayingRef; // a reference to self while playing
bool mLoop;
bool mStartCalled;
bool mStopped;
};
}
}
--- a/content/media/webaudio/GainNode.cpp
+++ b/content/media/webaudio/GainNode.cpp
@@ -2,78 +2,108 @@
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "GainNode.h"
#include "mozilla/dom/GainNodeBinding.h"
#include "AudioNodeEngine.h"
-#include "GainProcessor.h"
+#include "AudioNodeStream.h"
+#include "AudioDestinationNode.h"
+#include "WebAudioUtils.h"
namespace mozilla {
namespace dom {
NS_IMPL_CYCLE_COLLECTION_INHERITED_1(GainNode, AudioNode,
mGain)
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(GainNode)
NS_INTERFACE_MAP_END_INHERITING(AudioNode)
NS_IMPL_ADDREF_INHERITED(GainNode, AudioNode)
NS_IMPL_RELEASE_INHERITED(GainNode, AudioNode)
-class GainNodeEngine : public AudioNodeEngine,
- public GainProcessor
+class GainNodeEngine : public AudioNodeEngine
{
public:
GainNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination)
: AudioNodeEngine(aNode)
- , GainProcessor(aDestination)
+ , mSource(nullptr)
+ , mDestination(static_cast<AudioNodeStream*> (aDestination->Stream()))
+ // Keep the default value in sync with the default value in GainNode::GainNode.
+ , mGain(1.f)
{
}
+ void SetSourceStream(AudioNodeStream* aSource)
+ {
+ mSource = aSource;
+ }
+
enum Parameters {
GAIN
};
void SetTimelineParameter(uint32_t aIndex,
const AudioParamTimeline& aValue,
TrackRate aSampleRate) MOZ_OVERRIDE
{
switch (aIndex) {
case GAIN:
- SetGainParameter(aValue);
+ MOZ_ASSERT(mSource && mDestination);
+ mGain = aValue;
+ WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
break;
default:
NS_ERROR("Bad GainNodeEngine TimelineParameter");
}
}
virtual void ProduceAudioBlock(AudioNodeStream* aStream,
const AudioChunk& aInput,
AudioChunk* aOutput,
bool* aFinished)
{
MOZ_ASSERT(mSource == aStream, "Invalid source stream");
if (aInput.IsNull()) {
// If input is silent, so is the output
aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
+ } else if (mGain.HasSimpleValue()) {
+ // Optimize the case where we only have a single value set as the volume
+ *aOutput = aInput;
+ aOutput->mVolume *= mGain.GetValue();
} else {
- if (mGain.HasSimpleValue()) {
- // Copy the input chunk to the output chunk, since we will only be
- // changing the mVolume member.
- *aOutput = aInput;
- } else {
- // Create a new output chunk to avoid modifying the input chunk.
- AllocateAudioBlock(aInput.mChannelData.Length(), aOutput);
+ // First, compute a vector of gains for each track tick based on the
+ // timeline at hand, and then for each channel, multiply the values
+ // in the buffer with the gain vector.
+ AllocateAudioBlock(aInput.mChannelData.Length(), aOutput);
+
+ // Compute the gain values for the duration of the input AudioChunk
+ // XXX we need to add a method to AudioEventTimeline to compute this buffer directly.
+ float computedGain[WEBAUDIO_BLOCK_SIZE];
+ for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
+ TrackTicks tick = aStream->GetCurrentPosition();
+ computedGain[counter] = mGain.GetValueAtTime(tick, counter) * aInput.mVolume;
}
- ProcessGain(aStream, aInput.mVolume, aInput.mChannelData, aOutput);
+
+ // Apply the gain to the output buffer
+ for (size_t channel = 0; channel < aOutput->mChannelData.Length(); ++channel) {
+ const float* inputBuffer = static_cast<const float*> (aInput.mChannelData[channel]);
+ float* buffer = static_cast<float*> (const_cast<void*>
+ (aOutput->mChannelData[channel]));
+ AudioBlockCopyChannelWithScale(inputBuffer, computedGain, buffer);
+ }
}
}
+
+ AudioNodeStream* mSource;
+ AudioNodeStream* mDestination;
+ AudioParamTimeline mGain;
};
GainNode::GainNode(AudioContext* aContext)
: AudioNode(aContext,
2,
ChannelCountMode::Max,
ChannelInterpretation::Speakers)
, mGain(new AudioParam(this, SendGainToStream, 1.0f))
deleted file mode 100644
--- a/content/media/webaudio/GainProcessor.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef GainProcessor_h_
-#define GainProcessor_h_
-
-#include "AudioNodeStream.h"
-#include "AudioDestinationNode.h"
-#include "WebAudioUtils.h"
-
-namespace mozilla {
-namespace dom {
-
-// This class implements the gain processing logic used by GainNodeEngine
-// and AudioBufferSourceNodeEngine.
-class GainProcessor
-{
-public:
- explicit GainProcessor(AudioDestinationNode* aDestination)
- : mSource(nullptr)
- , mDestination(static_cast<AudioNodeStream*> (aDestination->Stream()))
- , mGain(1.f)
- {
- }
-
- void SetSourceStream(AudioNodeStream* aSource)
- {
- mSource = aSource;
- }
-
- void SetGainParameter(const AudioParamTimeline& aValue)
- {
- MOZ_ASSERT(mSource && mDestination);
- mGain = aValue;
- WebAudioUtils::ConvertAudioParamToTicks(mGain, mSource, mDestination);
- }
-
- void ProcessGain(AudioNodeStream* aStream,
- float aInputVolume,
- const nsTArray<const void*>& aInputChannelData,
- AudioChunk* aOutput)
- {
- MOZ_ASSERT(mSource == aStream, "Invalid source stream");
-
- if (mGain.HasSimpleValue()) {
- // Optimize the case where we only have a single value set as the volume
- aOutput->mVolume *= mGain.GetValue();
- } else {
- // First, compute a vector of gains for each track tick based on the
- // timeline at hand, and then for each channel, multiply the values
- // in the buffer with the gain vector.
-
- // Compute the gain values for the duration of the input AudioChunk
- // XXX we need to add a method to AudioEventTimeline to compute this buffer directly.
- float computedGain[WEBAUDIO_BLOCK_SIZE];
- for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
- TrackTicks tick = aStream->GetCurrentPosition();
- computedGain[counter] = mGain.GetValueAtTime(tick, counter) * aInputVolume;
- }
-
- // Apply the gain to the output buffer
- MOZ_ASSERT(aInputChannelData.Length() == aOutput->mChannelData.Length());
- for (size_t channel = 0; channel < aOutput->mChannelData.Length(); ++channel) {
- const float* inputBuffer = static_cast<const float*> (aInputChannelData[channel]);
- float* buffer = static_cast<float*> (const_cast<void*>
- (aOutput->mChannelData[channel]));
- AudioBlockCopyChannelWithScale(inputBuffer, computedGain, buffer);
- }
- }
- }
-
-protected:
- AudioNodeStream* mSource;
- AudioNodeStream* mDestination;
- AudioParamTimeline mGain;
-};
-
-}
-}
-
-#endif
-
--- a/content/media/webaudio/test/Makefile.in
+++ b/content/media/webaudio/test/Makefile.in
@@ -35,34 +35,31 @@ MOCHITEST_FILES := \
test_audioParamLinearRamp.html \
test_audioParamSetCurveAtTime.html \
test_audioParamSetCurveAtTimeZeroDuration.html \
test_audioParamSetTargetAtTime.html \
test_audioParamSetValueAtTime.html \
test_audioParamTimelineDestinationOffset.html \
test_audioBufferSourceNode.html \
test_audioBufferSourceNodeEnded.html \
- test_audioBufferSourceNodeGain.html \
- test_audioBufferSourceNodeGainInLoop.html \
test_audioBufferSourceNodeLazyLoopParam.html \
test_audioBufferSourceNodeLoop.html \
test_audioBufferSourceNodeLoopStartEnd.html \
test_audioBufferSourceNodeLoopStartEndSame.html \
test_audioBufferSourceNodeNullBuffer.html \
test_badConnect.html \
test_biquadFilterNode.html \
test_channelMergerNode.html \
test_channelMergerNodeWithVolume.html \
test_channelSplitterNode.html \
test_channelSplitterNodeWithVolume.html \
test_currentTime.html \
test_delayNode.html \
test_delayNodeSmallMaxDelay.html \
test_delayNodeWithGain.html \
- test_delayNodeWithGainAlternate.html \
test_dynamicsCompressorNode.html \
test_gainNode.html \
test_gainNodeInLoop.html \
test_mediaDecoding.html \
test_mixingRules.html \
test_nodeToParamConnection.html \
test_OfflineAudioContext.html \
test_offlineDestinationChannelCountLess.html \
--- a/content/media/webaudio/test/test_audioBufferSourceNode.html
+++ b/content/media/webaudio/test/test_audioBufferSourceNode.html
@@ -14,18 +14,16 @@ var gTest = {
length: 4096,
createGraph: function(context) {
var buffer = context.createBuffer(1, 2048, context.sampleRate);
for (var i = 0; i < 2048; ++i) {
buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
}
var source = context.createBufferSource();
- ok("gain" in source, "AudioBufferSourceNode.gain must exist");
- is(source.gain.value, 1, "AudioBufferSourceNode.gain's default value must be 1");
var sp = context.createScriptProcessor(2048);
source.start(0);
source.buffer = buffer;
return source;
},
createExpectedBuffers: function(context) {
var buffers = [];
deleted file mode 100644
--- a/content/media/webaudio/test/test_audioBufferSourceNodeGain.html
+++ /dev/null
@@ -1,45 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<head>
- <title>Test AudioBufferSourceNode.gain</title>
- <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
- <script type="text/javascript" src="webaudio.js"></script>
- <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test">
-<script class="testbody" type="text/javascript">
-
-var gTest = {
- length: 2048,
- numberOfChannels: 1,
- createGraph: function(context) {
- var buffer = context.createBuffer(1, 2048, context.sampleRate);
- for (var i = 0; i < 2048; ++i) {
- buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
- }
-
- var source = context.createBufferSource();
-
- source.buffer = buffer;
-
- source.gain.value = 0.5;
-
- source.start(0);
- return source;
- },
- createExpectedBuffers: function(context) {
- var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
- for (var i = 0; i < 2048; ++i) {
- expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate) / 2;
- }
- return expectedBuffer;
- },
-};
-
-runTest();
-
-</script>
-</pre>
-</body>
-</html>
deleted file mode 100644
--- a/content/media/webaudio/test/test_audioBufferSourceNodeGainInLoop.html
+++ /dev/null
@@ -1,46 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<head>
- <title>Test AudioBufferSourceNode.gain in presence of loops</title>
- <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
- <script type="text/javascript" src="webaudio.js"></script>
- <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test">
-<script class="testbody" type="text/javascript">
-
-var gTest = {
- length: 4096,
- numberOfChannels: 1,
- createGraph: function(context) {
- var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
- for (var i = 0; i < 2048; ++i) {
- sourceBuffer.getChannelData(0)[i] = 1;
- }
-
- var source = context.createBufferSource();
- source.buffer = sourceBuffer;
- source.loop = true;
- source.start(0);
- source.stop(sourceBuffer.duration * 2);
-
- // Adjust the gain in a way that we don't just end up modifying AudioChunk::mVolume
- source.gain.setValueAtTime(0.5, 0);
- return source;
- },
- createExpectedBuffers: function(context) {
- var expectedBuffer = context.createBuffer(1, 4096, context.sampleRate);
- for (var i = 0; i < 4096; ++i) {
- expectedBuffer.getChannelData(0)[i] = 0.5;
- }
- return expectedBuffer;
- },
-};
-
-runTest();
-
-</script>
-</pre>
-</body>
-</html>
deleted file mode 100644
--- a/content/media/webaudio/test/test_delayNodeWithGainAlternate.html
+++ /dev/null
@@ -1,51 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<head>
- <title>Test DelayNode with an AudioBufferSourceNode.gain value</title>
- <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
- <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test">
-<script src="webaudio.js" type="text/javascript"></script>
-<script class="testbody" type="text/javascript">
-
-var gTest = {
- length: 4096,
- numberOfChannels: 1,
- createGraph: function(context) {
- var buffer = context.createBuffer(1, 2048, context.sampleRate);
- for (var i = 0; i < 2048; ++i) {
- buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
- }
-
- var source = context.createBufferSource();
-
- var delay = context.createDelay();
-
- source.buffer = buffer;
- source.gain.value = 0.5;
-
- source.connect(delay);
-
- // Delay the source stream by 2048 frames
- delay.delayTime.value = 2048 / context.sampleRate;
-
- source.start(0);
- return delay;
- },
- createExpectedBuffers: function(context) {
- var expectedBuffer = context.createBuffer(1, 2048 * 2, context.sampleRate);
- for (var i = 2048; i < 2048 * 2; ++i) {
- expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i - 2048) / context.sampleRate) / 2;
- }
- return expectedBuffer;
- },
-};
-
-runTest();
-
-</script>
-</pre>
-</body>
-</html>
--- a/dom/bindings/Bindings.conf
+++ b/dom/bindings/Bindings.conf
@@ -102,17 +102,17 @@ DOMInterfaces = {
'AudioContext': {
'implicitJSContext': [ 'createBuffer' ],
'nativeOwnership': 'refcounted',
'resultNotAddRefed': [ 'destination', 'listener' ],
},
'AudioBufferSourceNode': {
'implicitJSContext': [ 'buffer' ],
- 'resultNotAddRefed': [ 'gain', 'playbackRate' ],
+ 'resultNotAddRefed': [ 'playbackRate' ],
},
'AudioListener' : {
'nativeOwnership': 'refcounted'
},
'AudioNode' : {
'concrete': False,
--- a/dom/webidl/AudioBufferSourceNode.webidl
+++ b/dom/webidl/AudioBufferSourceNode.webidl
@@ -11,17 +11,16 @@
*/
[PrefControlled]
interface AudioBufferSourceNode : AudioNode {
attribute AudioBuffer? buffer;
readonly attribute AudioParam playbackRate;
- readonly attribute AudioParam gain;
attribute boolean loop;
attribute double loopStart;
attribute double loopEnd;
[Throws]
void start(optional double when = 0, optional double grainOffset = 0,
optional double grainDuration);