author | Paul Adenot <paul@paul.cx> |
Thu, 16 Aug 2012 18:10:36 -0700 | |
changeset 102927 | 79e9fb28b8e17a0efcffd3153c50a0b5870e52eb |
parent 102926 | 94f6e5a00d8b387296defb979ee03955c2a244b7 |
child 102928 | 6cfd16fe1cb3052056e9be8e99a44b70d1453bc9 |
push id | 23317 |
push user | ryanvm@gmail.com |
push date | Wed, 22 Aug 2012 02:05:02 +0000 |
treeherder | mozilla-central@abc17059522b [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | kinetik |
bugs | 775319 |
milestone | 17.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/configure.in +++ b/configure.in @@ -4139,16 +4139,18 @@ MOZ_AUTH_EXTENSION=1 MOZ_OGG=1 MOZ_RAW= MOZ_SYDNEYAUDIO= MOZ_SPEEX_RESAMPLER=1 MOZ_CUBEB= MOZ_VORBIS= MOZ_TREMOR= MOZ_WAVE=1 +MOZ_SAMPLE_TYPE_FLOAT32= +MOZ_SAMPLE_TYPE_S16LE= MOZ_MEDIA= MOZ_OPUS=1 MOZ_WEBM=1 MOZ_WEBRTC=1 MOZ_WEBRTC_SIGNALING= MOZ_MEDIA_PLUGINS= MOZ_MEDIA_NAVIGATOR= MOZ_OMX_PLUGIN= @@ -5187,16 +5189,29 @@ if test -n "$MOZ_WEBRTC"; then MOZ_RAW=1 MOZ_VP8=1 MOZ_VP8_ENCODER=1 MOZ_VP8_ERROR_CONCEALMENT=1 fi AC_SUBST(MOZ_WEBRTC) +case "$target_cpu" in +arm*) + MOZ_SAMPLE_TYPE_S16LE=1 + AC_DEFINE(MOZ_SAMPLE_TYPE_S16LE) + AC_SUBST(MOZ_SAMPLE_TYPE_S16LE) +;; +*) + MOZ_SAMPLE_TYPE_FLOAT32=1 + AC_DEFINE(MOZ_SAMPLE_TYPE_FLOAT32) + AC_SUBST(MOZ_SAMPLE_TYPE_FLOAT32) +;; +esac + dnl ======================================================== dnl = Enable Raw Codecs dnl ======================================================== MOZ_ARG_ENABLE_BOOL(raw, [ --enable-raw Enable support for RAW media], MOZ_RAW=1, MOZ_RAW=) @@ -5215,24 +5230,16 @@ MOZ_ARG_DISABLE_BOOL(ogg, MOZ_OGG=, MOZ_OGG=1) if test -n "$MOZ_OGG"; then AC_DEFINE(MOZ_OGG) MOZ_SYDNEYAUDIO=1 MOZ_CUBEB=1 MOZ_MEDIA=1 - case "$target_cpu" in - arm*) - MOZ_TREMOR=1 - ;; - *) - MOZ_VORBIS=1 - ;; - esac dnl Checks for __attribute__(aligned()) directive AC_CACHE_CHECK([__attribute__ ((aligned ())) support], [ac_cv_c_attribute_aligned], [ac_cv_c_attribute_aligned=0 CFLAGS_save="${CFLAGS}" CFLAGS="${CFLAGS} -Werror" for ac_cv_c_attr_align_try in 64 32 16 8; do @@ -5362,24 +5369,21 @@ fi AC_SUBST(MOZ_NATIVE_LIBVPX) AC_SUBST(MOZ_LIBVPX_CFLAGS) AC_SUBST(MOZ_LIBVPX_LIBS) if test "$MOZ_WEBM"; then MOZ_SYDNEYAUDIO=1 MOZ_CUBEB=1 MOZ_MEDIA=1 - case "$target_cpu" in - arm*) + if test "$MOZ_SAMPLE_TYPE_FLOAT32"; then + MOZ_VORBIS=1 + else MOZ_TREMOR=1 - ;; - *) - MOZ_VORBIS=1 - ;; - esac + fi fi if test -n "$MOZ_VP8" -a -z "$MOZ_NATIVE_LIBVPX"; then dnl Detect if we can use an assembler to compile optimized assembly for libvpx. dnl We currently require yasm on all x86 platforms and require yasm 1.1.0 on Win32. dnl We currently require gcc on all arm platforms. VPX_AS=$YASM
--- a/content/html/content/src/nsHTMLAudioElement.cpp +++ b/content/html/content/src/nsHTMLAudioElement.cpp @@ -111,18 +111,17 @@ nsHTMLAudioElement::MozSetup(PRUint32 aC return NS_ERROR_FAILURE; } if (mAudioStream) { mAudioStream->Shutdown(); } mAudioStream = nsAudioStream::AllocateStream(); - nsresult rv = mAudioStream->Init(aChannels, aRate, - nsAudioStream::FORMAT_FLOAT32); + nsresult rv = mAudioStream->Init(aChannels, aRate); if (NS_FAILED(rv)) { mAudioStream->Shutdown(); mAudioStream = nullptr; return rv; } MetadataLoaded(aChannels, aRate, true, nullptr); mAudioStream->SetVolume(mVolume); @@ -164,17 +163,40 @@ nsHTMLAudioElement::MozWriteAudio(const // on number of channels. if (dataLength % mChannels != 0) { return NS_ERROR_DOM_INDEX_SIZE_ERR; } // Don't write more than can be written without blocking. PRUint32 writeLen = NS_MIN(mAudioStream->Available(), dataLength / mChannels); - nsresult rv = mAudioStream->Write(JS_GetFloat32ArrayData(tsrc, aCx), writeLen); + float* frames = JS_GetFloat32ArrayData(tsrc, aCx); +#ifdef MOZ_SAMPLE_TYPE_S16LE + // Convert the samples back to integers as we are using fixed point audio in + // the nsAudioStream. + nsAutoArrayPtr<short> shortsArray(new short[writeLen * mChannels]); + // Hard clip the samples. + for (PRUint32 i = 0; i < writeLen * mChannels; ++i) { + float scaled_value = floorf(0.5 + 32768 * frames[i]); + if (frames[i] < 0.0) { + shortsArray[i] = (scaled_value < -32768.0) ? + -32768 : + short(scaled_value); + } else { + shortsArray[i] = (scaled_value > 32767.0) ? + 32767 : + short(scaled_value); + } + } + nsresult rv = mAudioStream->Write(shortsArray, writeLen); +#else + nsresult rv = mAudioStream->Write(frames, writeLen); +#endif + + if (NS_FAILED(rv)) { return rv; } // Return the actual amount written. *aRetVal = writeLen * mChannels; return rv; }
--- a/content/media/MediaStreamGraph.cpp +++ b/content/media/MediaStreamGraph.cpp @@ -1145,18 +1145,17 @@ MediaStreamGraphImpl::CreateOrDestroyAud // stream ... AudioSegment* audio = tracks->Get<AudioSegment>(); MediaStream::AudioOutputStream* audioOutputStream = aStream->mAudioOutputStreams.AppendElement(); audioOutputStream->mAudioPlaybackStartTime = aAudioOutputStartTime; audioOutputStream->mBlockedAudioTime = 0; audioOutputStream->mStream = nsAudioStream::AllocateStream(); audioOutputStream->mStream->Init(audio->GetChannels(), - tracks->GetRate(), - audio->GetFirstFrameFormat()); + tracks->GetRate()); audioOutputStream->mTrackID = tracks->GetID(); } } } for (PRInt32 i = audioOutputStreamsFound.Length() - 1; i >= 0; --i) { if (!audioOutputStreamsFound[i]) { aStream->mAudioOutputStreams[i].mStream->Shutdown();
--- a/content/media/nsAudioStream.cpp +++ b/content/media/nsAudioStream.cpp @@ -55,17 +55,17 @@ static const PRInt64 MS_PER_S = 1000; class nsNativeAudioStream : public nsAudioStream { public: NS_DECL_ISUPPORTS ~nsNativeAudioStream(); nsNativeAudioStream(); - nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat); + nsresult Init(PRInt32 aNumChannels, PRInt32 aRate); void Shutdown(); nsresult Write(const void* aBuf, PRUint32 aFrames); PRUint32 Available(); void SetVolume(double aVolume); void Drain(); void Pause(); void Resume(); PRInt64 GetPosition(); @@ -90,17 +90,17 @@ class nsNativeAudioStream : public nsAud class nsRemotedAudioStream : public nsAudioStream { public: NS_DECL_ISUPPORTS nsRemotedAudioStream(); ~nsRemotedAudioStream(); - nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat); + nsresult Init(PRInt32 aNumChannels, PRInt32 aRate); void Shutdown(); nsresult Write(const void* aBuf, PRUint32 aFrames); PRUint32 Available(); void SetVolume(double aVolume); void Drain(); void Pause(); void Resume(); PRInt64 GetPosition(); @@ -417,21 +417,21 @@ nsNativeAudioStream::nsNativeAudioStream nsNativeAudioStream::~nsNativeAudioStream() { Shutdown(); } NS_IMPL_THREADSAFE_ISUPPORTS0(nsNativeAudioStream) -nsresult nsNativeAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat) +nsresult nsNativeAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate) { mRate = aRate; mChannels = aNumChannels; - mFormat = aFormat; + mFormat = MOZ_AUDIO_DATA_FORMAT; if (sa_stream_create_pcm(reinterpret_cast<sa_stream_t**>(&mAudioHandle), NULL, SA_MODE_WRONLY, SA_PCM_FORMAT_S16_NE, aRate, aNumChannels) != SA_SUCCESS) { mAudioHandle = nullptr; @@ -469,63 +469,50 @@ nsresult nsNativeAudioStream::Write(cons if (mInError) return NS_ERROR_FAILURE; PRUint32 samples = aFrames * mChannels; nsAutoArrayPtr<short> s_data(new short[samples]); if (s_data) { double scaled_volume = GetVolumeScale() * mVolume; - switch (mFormat) { - case FORMAT_U8: { - const PRUint8* buf = static_cast<const PRUint8*>(aBuf); - PRInt32 volume = PRInt32((1 << 16) * scaled_volume); - for (PRUint32 i = 0; i < samples; ++i) { - s_data[i] = short(((PRInt32(buf[i]) - 128) * volume) >> 8); - } - break; - } - case FORMAT_S16_LE: { - const short* buf = static_cast<const short*>(aBuf); - PRInt32 volume = PRInt32((1 << 16) * scaled_volume); - for (PRUint32 i = 0; i < samples; ++i) { - short s = buf[i]; +#ifdef MOZ_SAMPLE_TYPE_S16LE + const short* buf = static_cast<const short*>(aBuf); + PRInt32 volume = PRInt32((1 << 16) * scaled_volume); + for (PRUint32 i = 0; i < samples; ++i) { + short s = buf[i]; #if defined(IS_BIG_ENDIAN) - s = ((s & 0x00ff) << 8) | ((s & 0xff00) >> 8); + s = ((s & 0x00ff) << 8) | ((s & 0xff00) >> 8); #endif - s_data[i] = short((PRInt32(s) * volume) >> 16); - } - break; - } - case FORMAT_FLOAT32: { - const float* buf = static_cast<const float*>(aBuf); - for (PRUint32 i = 0; i < samples; ++i) { - float scaled_value = floorf(0.5 + 32768 * buf[i] * scaled_volume); - if (buf[i] < 0.0) { - s_data[i] = (scaled_value < -32768.0) ? - -32768 : - short(scaled_value); - } else { - s_data[i] = (scaled_value > 32767.0) ? - 32767 : - short(scaled_value); - } - } - break; + s_data[i] = short((PRInt32(s) * volume) >> 16); + } +#else /* MOZ_SAMPLE_TYPE_FLOAT32 */ + const SampleType* buf = static_cast<const SampleType*>(aBuf); + for (PRUint32 i = 0; i < samples; ++i) { + float scaled_value = floorf(0.5 + 32768 * buf[i] * scaled_volume); + if (buf[i] < 0.0) { + s_data[i] = (scaled_value < -32768.0) ? + -32768 : + short(scaled_value); + } else { + s_data[i] = (scaled_value > 32767.0) ? + 32767 : + short(scaled_value); } } +#endif + } - if (sa_stream_write(static_cast<sa_stream_t*>(mAudioHandle), - s_data.get(), - samples * sizeof(short)) != SA_SUCCESS) - { - PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_write error")); - mInError = true; - return NS_ERROR_FAILURE; - } + if (sa_stream_write(static_cast<sa_stream_t*>(mAudioHandle), + s_data.get(), + samples * sizeof(short)) != SA_SUCCESS) + { + PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_write error")); + mInError = true; + return NS_ERROR_FAILURE; } return NS_OK; } PRUint32 nsNativeAudioStream::Available() { // If the audio backend failed to open, lie and say we'll accept some // data. @@ -639,36 +626,22 @@ nsRemotedAudioStream::~nsRemotedAudioStr { Shutdown(); } NS_IMPL_THREADSAFE_ISUPPORTS0(nsRemotedAudioStream) nsresult nsRemotedAudioStream::Init(PRInt32 aNumChannels, - PRInt32 aRate, - SampleFormat aFormat) + PRInt32 aRate) { mRate = aRate; mChannels = aNumChannels; - mFormat = aFormat; - - switch (mFormat) { - case FORMAT_U8: { - mBytesPerFrame = sizeof(PRUint8) * mChannels; - break; - } - case FORMAT_S16_LE: { - mBytesPerFrame = sizeof(short) * mChannels; - break; - } - case FORMAT_FLOAT32: { - mBytesPerFrame = sizeof(float) * mChannels; - } - } + mFormat = MOZ_AUDIO_DATA_FORMAT; + mBytesPerFrame = sizeof(SampleType) * mChannels; nsCOMPtr<nsIRunnable> event = new AudioInitEvent(this); NS_DispatchToMainThread(event, NS_DISPATCH_SYNC); return NS_OK; } void nsRemotedAudioStream::Shutdown() @@ -856,17 +829,17 @@ private: class nsBufferedAudioStream : public nsAudioStream { public: NS_DECL_ISUPPORTS nsBufferedAudioStream(); ~nsBufferedAudioStream(); - nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat); + nsresult Init(PRInt32 aNumChannels, PRInt32 aRate); void Shutdown(); nsresult Write(const void* aBuf, PRUint32 aFrames); PRUint32 Available(); void SetVolume(double aVolume); void Drain(); void Pause(); void Resume(); PRInt64 GetPosition(); @@ -958,43 +931,37 @@ nsBufferedAudioStream::nsBufferedAudioSt nsBufferedAudioStream::~nsBufferedAudioStream() { Shutdown(); } NS_IMPL_THREADSAFE_ISUPPORTS0(nsBufferedAudioStream) nsresult -nsBufferedAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat) +nsBufferedAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate) { cubeb* cubebContext = GetCubebContext(); if (!cubebContext || aNumChannels < 0 || aRate < 0) { return NS_ERROR_FAILURE; } mRate = aRate; mChannels = aNumChannels; - mFormat = aFormat; + mFormat = MOZ_AUDIO_DATA_FORMAT; cubeb_stream_params params; params.rate = aRate; params.channels = aNumChannels; - switch (aFormat) { - case FORMAT_S16_LE: - params.format = CUBEB_SAMPLE_S16LE; - mBytesPerFrame = sizeof(short) * aNumChannels; - break; - case FORMAT_FLOAT32: - params.format = CUBEB_SAMPLE_FLOAT32NE; - mBytesPerFrame = sizeof(float) * aNumChannels; - break; - default: - return NS_ERROR_FAILURE; - } +#ifdef MOZ_SAMPLE_TYPE_S16LE + params.format = CUBEB_SAMPLE_S16NE; +#else /* MOZ_SAMPLE_TYPE_FLOAT32 */ + params.format = CUBEB_SAMPLE_FLOAT32NE; +#endif + mBytesPerFrame = sizeof(float) * aNumChannels; { cubeb_stream* stream; if (cubeb_stream_init(cubebContext, &stream, "nsBufferedAudioStream", params, GetCubebLatency(), DataCallback_S, StateCallback_S, this) == CUBEB_OK) { mCubebStream.own(stream); } } @@ -1216,40 +1183,32 @@ nsBufferedAudioStream::DataCallback(void PRUint8* output = reinterpret_cast<PRUint8*>(aBuffer); for (int i = 0; i < 2; ++i) { // Fast path for unity volume case. if (scaled_volume == 1.0) { memcpy(output, input[i], input_size[i]); output += input_size[i]; } else { // Adjust volume as each sample is copied out. - switch (mFormat) { - case FORMAT_S16_LE: { - PRInt32 volume = PRInt32(1 << 16) * scaled_volume; +#ifdef MOZ_SAMPLE_TYPE_S16LE + PRInt32 volume = PRInt32(1 << 16) * scaled_volume; - const short* src = static_cast<const short*>(input[i]); - short* dst = reinterpret_cast<short*>(output); - for (PRUint32 j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) { - dst[j] = short((PRInt32(src[j]) * volume) >> 16); - } - output += input_size[i]; - break; + const short* src = static_cast<const short*>(input[i]); + short* dst = reinterpret_cast<short*>(output); + for (PRUint32 j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) { + dst[j] = short((PRInt32(src[j]) * volume) >> 16); } - case FORMAT_FLOAT32: { - const float* src = static_cast<const float*>(input[i]); - float* dst = reinterpret_cast<float*>(output); - for (PRUint32 j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) { - dst[j] = src[j] * scaled_volume; - } - output += input_size[i]; - break; +#else /* MOZ_SAMPLE_TYPE_FLOAT32 */ + const float* src = static_cast<const float*>(input[i]); + float* dst = reinterpret_cast<float*>(output); + for (PRUint32 j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) { + dst[j] = src[j] * scaled_volume; } - default: - return -1; - } +#endif + output += input_size[i]; } } NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames"); // Notify any blocked Write() call that more space is available in mBuffer. mon.NotifyAll();
--- a/content/media/nsAudioStream.h +++ b/content/media/nsAudioStream.h @@ -6,16 +6,24 @@ #if !defined(nsAudioStream_h_) #define nsAudioStream_h_ #include "nscore.h" #include "nsISupportsImpl.h" #include "nsIThread.h" #include "nsAutoPtr.h" +#ifdef MOZ_SAMPLE_TYPE_S16LE +#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_S16_LE) +typedef short SampleType; +#else +#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_FLOAT32) +typedef float SampleType; +#endif + // Access to a single instance of this class must be synchronized by // callers, or made from a single thread. One exception is that access to // GetPosition, GetPositionInFrames, SetVolume, and Get{Rate,Channels,Format} // is thread-safe without external synchronization. class nsAudioStream : public nsISupports { public: @@ -23,18 +31,17 @@ public: { FORMAT_U8, FORMAT_S16_LE, FORMAT_FLOAT32 }; nsAudioStream() : mRate(0), - mChannels(0), - mFormat(FORMAT_S16_LE) + mChannels(0) {} virtual ~nsAudioStream(); // Initialize Audio Library. Some Audio backends require initializing the // library before using it. static void InitLibrary(); @@ -51,17 +58,17 @@ public: // you may receive an implementation which forwards to a compositing process. static nsAudioStream* AllocateStream(); // Initialize the audio stream. aNumChannels is the number of audio // channels (1 for mono, 2 for stereo, etc) and aRate is the sample rate // (22050Hz, 44100Hz, etc). // Unsafe to call with a monitor held due to synchronous event execution // on the main thread, which may attempt to acquire any held monitor. - virtual nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat) = 0; + virtual nsresult Init(PRInt32 aNumChannels, PRInt32 aRate) = 0; // Closes the stream. All future use of the stream is an error. // Unsafe to call with a monitor held due to synchronous event execution // on the main thread, which may attempt to acquire any held monitor. virtual void Shutdown() = 0; // Write audio data to the audio hardware. aBuf is an array of frames in // the format specified by mFormat of length aCount. If aFrames is larger @@ -101,17 +108,17 @@ public: // Returns the minimum number of audio frames which must be written before // you can be sure that something will be played. // Unsafe to call with a monitor held due to synchronous event execution // on the main thread, which may attempt to acquire any held monitor. virtual PRInt32 GetMinWriteSize() = 0; int GetRate() { return mRate; } int GetChannels() { return mChannels; } - SampleFormat GetFormat() { return mFormat; } + SampleFormat GetFormat() { return MOZ_AUDIO_DATA_FORMAT; } protected: nsCOMPtr<nsIThread> mAudioPlaybackThread; int mRate; int mChannels; SampleFormat mFormat; };
--- a/content/media/nsBuiltinDecoderReader.h +++ b/content/media/nsBuiltinDecoderReader.h @@ -48,39 +48,35 @@ public: // True if we have an active audio bitstream. bool mHasAudio; // True if we have an active video bitstream. bool mHasVideo; }; -#ifdef MOZ_TREMOR +#ifdef MOZ_SAMPLE_TYPE_S16LE #include <ogg/os_types.h> typedef ogg_int32_t VorbisPCMValue; typedef short AudioDataValue; -#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_S16_LE) #define MOZ_CLIP_TO_15(x) ((x)<-32768?-32768:(x)<=32767?(x):32767) // Convert the output of vorbis_synthesis_pcmout to a AudioDataValue #define MOZ_CONVERT_VORBIS_SAMPLE(x) \ (static_cast<AudioDataValue>(MOZ_CLIP_TO_15((x)>>9))) // Convert a AudioDataValue to a float for the Audio API #define MOZ_CONVERT_AUDIO_SAMPLE(x) ((x)*(1.F/32768)) -#define MOZ_SAMPLE_TYPE_S16LE 1 -#else /*MOZ_VORBIS*/ +#else /* MOZ_SAMPLE_TYPE_FLOAT32 */ typedef float VorbisPCMValue; typedef float AudioDataValue; -#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_FLOAT32) #define MOZ_CONVERT_VORBIS_SAMPLE(x) (x) #define MOZ_CONVERT_AUDIO_SAMPLE(x) (x) -#define MOZ_SAMPLE_TYPE_FLOAT32 1 #endif // Holds chunk a decoded audio frames. class AudioData { public: typedef mozilla::SharedBuffer SharedBuffer;
--- a/content/media/nsBuiltinDecoderStateMachine.cpp +++ b/content/media/nsBuiltinDecoderStateMachine.cpp @@ -1003,17 +1003,17 @@ void nsBuiltinDecoderStateMachine::Audio // monitor held, as on Android those methods do a synchronous dispatch to // the main thread. If the audio thread holds the decoder monitor while // it does a synchronous dispatch to the main thread, we can get deadlocks // if the main thread tries to acquire the decoder monitor before the // dispatched event has finished (or even started!) running. Methods which // are unsafe to call with the decoder monitor held are documented as such // in nsAudioStream.h. nsRefPtr<nsAudioStream> audioStream = nsAudioStream::AllocateStream(); - audioStream->Init(channels, rate, MOZ_AUDIO_DATA_FORMAT); + audioStream->Init(channels, rate); { // We must hold the monitor while setting mAudioStream or whenever we query // the playback position off the audio thread. This ensures the audio stream // is always alive when we use it off the audio thread. Note that querying // the playback position does not do a synchronous dispatch to the main // thread, so it's safe to call with the decoder monitor held. ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
--- a/dom/ipc/AudioParent.cpp +++ b/dom/ipc/AudioParent.cpp @@ -292,19 +292,17 @@ AudioParent::SendWriteDone() return true; } AudioParent::AudioParent(PRInt32 aNumChannels, PRInt32 aRate, PRInt32 aFormat) : mIPCOpen(true) { mStream = nsAudioStream::AllocateStream(); NS_ASSERTION(mStream, "AudioStream allocation failed."); - if (NS_FAILED(mStream->Init(aNumChannels, - aRate, - (nsAudioStream::SampleFormat) aFormat))) { + if (NS_FAILED(mStream->Init(aNumChannels, aRate))) { NS_WARNING("AudioStream initialization failed."); mStream = nullptr; return; } mTimer = do_CreateInstance("@mozilla.org/timer;1"); mTimer->InitWithCallback(this, 1000, nsITimer::TYPE_REPEATING_SLACK); }