author | Paul Adenot <paul@paul.cx> |
Wed, 28 Nov 2012 20:40:07 +0100 | |
changeset 114394 | d8fb7a8bdae1b6f5110944eabff75634a85e2769 |
parent 114393 | 6289f763c7aa93870d0920c314fc3c34214a37ff |
child 114395 | 87d836ed8e2a3d0417e3cd6acd6574a66011119f |
push id | 18740 |
push user | paul@paul.cx |
push date | Wed, 28 Nov 2012 19:41:55 +0000 |
treeherder | mozilla-inbound@d8fb7a8bdae1 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | cpearce |
bugs | 815194 |
milestone | 20.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/content/media/AudioSampleFormat.h +++ b/content/media/AudioSampleFormat.h @@ -1,154 +1,154 @@ -/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ -/* vim:set ts=2 sw=2 sts=2 et cindent: */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -#ifndef MOZILLA_AUDIOSAMPLEFORMAT_H_ -#define MOZILLA_AUDIOSAMPLEFORMAT_H_ - -#include "nsAlgorithm.h" - -namespace mozilla { - -/** - * Audio formats supported in MediaStreams and media elements. - * - * Only one of these is supported by AudioStream, and that is determined - * at compile time (roughly, FLOAT32 on desktops, S16 on mobile). Media decoders - * produce that format only; queued AudioData always uses that format. - */ -enum AudioSampleFormat -{ - // Native-endian signed 16-bit audio samples - AUDIO_FORMAT_S16, - // Signed 32-bit float samples - AUDIO_FORMAT_FLOAT32, - // The format used for output by AudioStream. -#ifdef MOZ_SAMPLE_TYPE_S16 - AUDIO_OUTPUT_FORMAT = AUDIO_FORMAT_S16 -#else - AUDIO_OUTPUT_FORMAT = AUDIO_FORMAT_FLOAT32 -#endif -}; - -template <AudioSampleFormat Format> class AudioSampleTraits; - -template <> class AudioSampleTraits<AUDIO_FORMAT_FLOAT32> { -public: - typedef float Type; -}; -template <> class AudioSampleTraits<AUDIO_FORMAT_S16> { -public: - typedef int16_t Type; -}; - -typedef AudioSampleTraits<AUDIO_OUTPUT_FORMAT>::Type AudioDataValue; - -// Single-sample conversion - -/* - * Use "2^N" conversion since it's simple, fast, "bit transparent", used by - * many other libraries and apparently behaves reasonably. - * http://blog.bjornroche.com/2009/12/int-float-int-its-jungle-out-there.html - * http://blog.bjornroche.com/2009/12/linearity-and-dynamic-range-in-int.html - */ -inline float -AudioSampleToFloat(float aValue) -{ - return aValue; -} -inline float -AudioSampleToFloat(int16_t aValue) -{ - return aValue/32768.0f; -} - -template <typename T> T FloatToAudioSample(float aValue); - -template <> inline float -FloatToAudioSample<float>(float aValue) -{ - return aValue; -} -template <> inline int16_t -FloatToAudioSample<int16_t>(float aValue) -{ - float v = aValue*32768.0f; - float clamped = NS_MAX(-32768.0f, NS_MIN(32767.0f, v)); - return int16_t(clamped); -} - -// Sample buffer conversion - -template <typename From, typename To> inline void -ConvertAudioSamples(const From* aFrom, To* aTo, int aCount) -{ - for (int i = 0; i < aCount; ++i) { - aTo[i] = FloatToAudioSample<To>(AudioSampleToFloat(aFrom[i])); - } -} -inline void -ConvertAudioSamples(const int16_t* aFrom, int16_t* aTo, int aCount) -{ - memcpy(aTo, aFrom, sizeof(*aTo)*aCount); -} -inline void -ConvertAudioSamples(const float* aFrom, float* aTo, int aCount) -{ - memcpy(aTo, aFrom, sizeof(*aTo)*aCount); -} - -// Sample buffer conversion with scale - -template <typename From, typename To> inline void -ConvertAudioSamplesWithScale(const From* aFrom, To* aTo, int aCount, float aScale) -{ - if (aScale == 1.0f) { - ConvertAudioSamples(aFrom, aTo, aCount); - return; - } - for (int i = 0; i < aCount; ++i) { - aTo[i] = FloatToAudioSample<To>(AudioSampleToFloat(aFrom[i])*aScale); - } -} -inline void -ConvertAudioSamplesWithScale(const int16_t* aFrom, int16_t* aTo, int aCount, float aScale) -{ - if (aScale == 1.0f) { - ConvertAudioSamples(aFrom, aTo, aCount); - return; - } - if (0.0f <= aScale && aScale < 1.0f) { - int32_t scale = int32_t((1 << 16) * aScale); - for (int i = 0; i < aCount; ++i) { - aTo[i] = int16_t((int32_t(aFrom[i]) * scale) >> 16); - } - return; - } - for (int i = 0; i < aCount; ++i) { - aTo[i] = FloatToAudioSample<int16_t>(AudioSampleToFloat(aFrom[i])*aScale); - } -} - -// In place audio sample scaling. -inline void -ScaleAudioSamples(float* aBuffer, int aCount, float aScale) -{ - for (int32_t i = 0; i < aCount; ++i) { - aBuffer[i] *= aScale; - } -} - - -inline void -ScaleAudioSamples(short* aBuffer, int aCount, float aScale) -{ - int32_t volume = int32_t(1 << 16) * aScale; - for (int32_t i = 0; i < aCount; ++i) { - aBuffer[i] = short((int32_t(aBuffer[i]) * volume) >> 16); - } -} - -} // namespace mozilla - -#endif /* MOZILLA_AUDIOSAMPLEFORMAT_H_ */ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#ifndef MOZILLA_AUDIOSAMPLEFORMAT_H_ +#define MOZILLA_AUDIOSAMPLEFORMAT_H_ + +#include "nsAlgorithm.h" + +namespace mozilla { + +/** + * Audio formats supported in MediaStreams and media elements. + * + * Only one of these is supported by AudioStream, and that is determined + * at compile time (roughly, FLOAT32 on desktops, S16 on mobile). Media decoders + * produce that format only; queued AudioData always uses that format. + */ +enum AudioSampleFormat +{ + // Native-endian signed 16-bit audio samples + AUDIO_FORMAT_S16, + // Signed 32-bit float samples + AUDIO_FORMAT_FLOAT32, + // The format used for output by AudioStream. +#ifdef MOZ_SAMPLE_TYPE_S16 + AUDIO_OUTPUT_FORMAT = AUDIO_FORMAT_S16 +#else + AUDIO_OUTPUT_FORMAT = AUDIO_FORMAT_FLOAT32 +#endif +}; + +template <AudioSampleFormat Format> class AudioSampleTraits; + +template <> class AudioSampleTraits<AUDIO_FORMAT_FLOAT32> { +public: + typedef float Type; +}; +template <> class AudioSampleTraits<AUDIO_FORMAT_S16> { +public: + typedef int16_t Type; +}; + +typedef AudioSampleTraits<AUDIO_OUTPUT_FORMAT>::Type AudioDataValue; + +// Single-sample conversion + +/* + * Use "2^N" conversion since it's simple, fast, "bit transparent", used by + * many other libraries and apparently behaves reasonably. + * http://blog.bjornroche.com/2009/12/int-float-int-its-jungle-out-there.html + * http://blog.bjornroche.com/2009/12/linearity-and-dynamic-range-in-int.html + */ +inline float +AudioSampleToFloat(float aValue) +{ + return aValue; +} +inline float +AudioSampleToFloat(int16_t aValue) +{ + return aValue/32768.0f; +} + +template <typename T> T FloatToAudioSample(float aValue); + +template <> inline float +FloatToAudioSample<float>(float aValue) +{ + return aValue; +} +template <> inline int16_t +FloatToAudioSample<int16_t>(float aValue) +{ + float v = aValue*32768.0f; + float clamped = NS_MAX(-32768.0f, NS_MIN(32767.0f, v)); + return int16_t(clamped); +} + +// Sample buffer conversion + +template <typename From, typename To> inline void +ConvertAudioSamples(const From* aFrom, To* aTo, int aCount) +{ + for (int i = 0; i < aCount; ++i) { + aTo[i] = FloatToAudioSample<To>(AudioSampleToFloat(aFrom[i])); + } +} +inline void +ConvertAudioSamples(const int16_t* aFrom, int16_t* aTo, int aCount) +{ + memcpy(aTo, aFrom, sizeof(*aTo)*aCount); +} +inline void +ConvertAudioSamples(const float* aFrom, float* aTo, int aCount) +{ + memcpy(aTo, aFrom, sizeof(*aTo)*aCount); +} + +// Sample buffer conversion with scale + +template <typename From, typename To> inline void +ConvertAudioSamplesWithScale(const From* aFrom, To* aTo, int aCount, float aScale) +{ + if (aScale == 1.0f) { + ConvertAudioSamples(aFrom, aTo, aCount); + return; + } + for (int i = 0; i < aCount; ++i) { + aTo[i] = FloatToAudioSample<To>(AudioSampleToFloat(aFrom[i])*aScale); + } +} +inline void +ConvertAudioSamplesWithScale(const int16_t* aFrom, int16_t* aTo, int aCount, float aScale) +{ + if (aScale == 1.0f) { + ConvertAudioSamples(aFrom, aTo, aCount); + return; + } + if (0.0f <= aScale && aScale < 1.0f) { + int32_t scale = int32_t((1 << 16) * aScale); + for (int i = 0; i < aCount; ++i) { + aTo[i] = int16_t((int32_t(aFrom[i]) * scale) >> 16); + } + return; + } + for (int i = 0; i < aCount; ++i) { + aTo[i] = FloatToAudioSample<int16_t>(AudioSampleToFloat(aFrom[i])*aScale); + } +} + +// In place audio sample scaling. +inline void +ScaleAudioSamples(float* aBuffer, int aCount, float aScale) +{ + for (int32_t i = 0; i < aCount; ++i) { + aBuffer[i] *= aScale; + } +} + + +inline void +ScaleAudioSamples(short* aBuffer, int aCount, float aScale) +{ + int32_t volume = int32_t(1 << 16) * aScale; + for (int32_t i = 0; i < aCount; ++i) { + aBuffer[i] = short((int32_t(aBuffer[i]) * volume) >> 16); + } +} + +} // namespace mozilla + +#endif /* MOZILLA_AUDIOSAMPLEFORMAT_H_ */
--- a/content/media/AudioSegment.cpp +++ b/content/media/AudioSegment.cpp @@ -1,123 +1,123 @@ -/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this file, - * You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#include "AudioSegment.h" - -#include "AudioStream.h" - -namespace mozilla { - -template <class SrcT, class DestT> -static void -InterleaveAndConvertBuffer(const SrcT* aSource, int32_t aSourceLength, - int32_t aLength, - float aVolume, - int32_t aChannels, - DestT* aOutput) -{ - DestT* output = aOutput; - for (int32_t i = 0; i < aLength; ++i) { - for (int32_t channel = 0; channel < aChannels; ++channel) { - float v = AudioSampleToFloat(aSource[channel*aSourceLength + i])*aVolume; - *output = FloatToAudioSample<DestT>(v); - ++output; - } - } -} - -static inline void -InterleaveAndConvertBuffer(const int16_t* aSource, int32_t aSourceLength, - int32_t aLength, - float aVolume, - int32_t aChannels, - int16_t* aOutput) -{ - int16_t* output = aOutput; - if (0.0f <= aVolume && aVolume <= 1.0f) { - int32_t scale = int32_t((1 << 16) * aVolume); - for (int32_t i = 0; i < aLength; ++i) { - for (int32_t channel = 0; channel < aChannels; ++channel) { - int16_t s = aSource[channel*aSourceLength + i]; - *output = int16_t((int32_t(s) * scale) >> 16); - ++output; - } - } - return; - } - - for (int32_t i = 0; i < aLength; ++i) { - for (int32_t channel = 0; channel < aChannels; ++channel) { - float v = AudioSampleToFloat(aSource[channel*aSourceLength + i])*aVolume; - *output = FloatToAudioSample<int16_t>(v); - ++output; - } - } -} - -static void -InterleaveAndConvertBuffer(const void* aSource, AudioSampleFormat aSourceFormat, - int32_t aSourceLength, - int32_t aOffset, int32_t aLength, - float aVolume, - int32_t aChannels, - AudioDataValue* aOutput) -{ - switch (aSourceFormat) { - case AUDIO_FORMAT_FLOAT32: - InterleaveAndConvertBuffer(static_cast<const float*>(aSource) + aOffset, - aSourceLength, - aLength, - aVolume, - aChannels, - aOutput); - break; - case AUDIO_FORMAT_S16: - InterleaveAndConvertBuffer(static_cast<const int16_t*>(aSource) + aOffset, - aSourceLength, - aLength, - aVolume, - aChannels, - aOutput); - break; - } -} - -void -AudioSegment::ApplyVolume(float aVolume) -{ - for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { - ci->mVolume *= aVolume; - } -} - -static const int STATIC_AUDIO_SAMPLES = 10000; - -void -AudioSegment::WriteTo(AudioStream* aOutput) -{ - NS_ASSERTION(mChannels == aOutput->GetChannels(), "Wrong number of channels"); - nsAutoTArray<AudioDataValue,STATIC_AUDIO_SAMPLES> buf; - for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { - AudioChunk& c = *ci; - if (uint64_t(mChannels)*c.mDuration > INT32_MAX) { - NS_ERROR("Buffer overflow"); - return; - } - buf.SetLength(int32_t(mChannels*c.mDuration)); - if (c.mBuffer) { - InterleaveAndConvertBuffer(c.mBuffer->Data(), c.mBufferFormat, c.mBufferLength, - c.mOffset, int32_t(c.mDuration), - c.mVolume, - aOutput->GetChannels(), - buf.Elements()); - } else { - // Assumes that a bit pattern of zeroes == 0.0f - memset(buf.Elements(), 0, buf.Length()*sizeof(AudioDataValue)); - } - aOutput->Write(buf.Elements(), int32_t(c.mDuration)); - } -} - -} +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioSegment.h" + +#include "AudioStream.h" + +namespace mozilla { + +template <class SrcT, class DestT> +static void +InterleaveAndConvertBuffer(const SrcT* aSource, int32_t aSourceLength, + int32_t aLength, + float aVolume, + int32_t aChannels, + DestT* aOutput) +{ + DestT* output = aOutput; + for (int32_t i = 0; i < aLength; ++i) { + for (int32_t channel = 0; channel < aChannels; ++channel) { + float v = AudioSampleToFloat(aSource[channel*aSourceLength + i])*aVolume; + *output = FloatToAudioSample<DestT>(v); + ++output; + } + } +} + +static inline void +InterleaveAndConvertBuffer(const int16_t* aSource, int32_t aSourceLength, + int32_t aLength, + float aVolume, + int32_t aChannels, + int16_t* aOutput) +{ + int16_t* output = aOutput; + if (0.0f <= aVolume && aVolume <= 1.0f) { + int32_t scale = int32_t((1 << 16) * aVolume); + for (int32_t i = 0; i < aLength; ++i) { + for (int32_t channel = 0; channel < aChannels; ++channel) { + int16_t s = aSource[channel*aSourceLength + i]; + *output = int16_t((int32_t(s) * scale) >> 16); + ++output; + } + } + return; + } + + for (int32_t i = 0; i < aLength; ++i) { + for (int32_t channel = 0; channel < aChannels; ++channel) { + float v = AudioSampleToFloat(aSource[channel*aSourceLength + i])*aVolume; + *output = FloatToAudioSample<int16_t>(v); + ++output; + } + } +} + +static void +InterleaveAndConvertBuffer(const void* aSource, AudioSampleFormat aSourceFormat, + int32_t aSourceLength, + int32_t aOffset, int32_t aLength, + float aVolume, + int32_t aChannels, + AudioDataValue* aOutput) +{ + switch (aSourceFormat) { + case AUDIO_FORMAT_FLOAT32: + InterleaveAndConvertBuffer(static_cast<const float*>(aSource) + aOffset, + aSourceLength, + aLength, + aVolume, + aChannels, + aOutput); + break; + case AUDIO_FORMAT_S16: + InterleaveAndConvertBuffer(static_cast<const int16_t*>(aSource) + aOffset, + aSourceLength, + aLength, + aVolume, + aChannels, + aOutput); + break; + } +} + +void +AudioSegment::ApplyVolume(float aVolume) +{ + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + ci->mVolume *= aVolume; + } +} + +static const int STATIC_AUDIO_SAMPLES = 10000; + +void +AudioSegment::WriteTo(AudioStream* aOutput) +{ + NS_ASSERTION(mChannels == aOutput->GetChannels(), "Wrong number of channels"); + nsAutoTArray<AudioDataValue,STATIC_AUDIO_SAMPLES> buf; + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + AudioChunk& c = *ci; + if (uint64_t(mChannels)*c.mDuration > INT32_MAX) { + NS_ERROR("Buffer overflow"); + return; + } + buf.SetLength(int32_t(mChannels*c.mDuration)); + if (c.mBuffer) { + InterleaveAndConvertBuffer(c.mBuffer->Data(), c.mBufferFormat, c.mBufferLength, + c.mOffset, int32_t(c.mDuration), + c.mVolume, + aOutput->GetChannels(), + buf.Elements()); + } else { + // Assumes that a bit pattern of zeroes == 0.0f + memset(buf.Elements(), 0, buf.Length()*sizeof(AudioDataValue)); + } + aOutput->Write(buf.Elements(), int32_t(c.mDuration)); + } +} + +}
--- a/content/media/AudioStream.cpp +++ b/content/media/AudioStream.cpp @@ -41,21 +41,21 @@ namespace mozilla { PRLogModuleInfo* gAudioStreamLog = nullptr; #endif static const uint32_t FAKE_BUFFER_SIZE = 176400; // Number of milliseconds per second. static const int64_t MS_PER_S = 1000; -class nsNativeAudioStream : public AudioStream +class NativeAudioStream : public AudioStream { public: - ~nsNativeAudioStream(); - nsNativeAudioStream(); + ~NativeAudioStream(); + NativeAudioStream(); nsresult Init(int32_t aNumChannels, int32_t aRate, const dom::AudioChannelType aAudioChannelType); void Shutdown(); nsresult Write(const AudioDataValue* aBuf, uint32_t aFrames); uint32_t Available(); void SetVolume(double aVolume); void Drain(); @@ -273,97 +273,97 @@ nsresult AudioStream::SetPreservesPitch( mTimeStretcher->setRate(mAudioClock.GetPlaybackRate()); } mAudioClock.SetPreservesPitch(aPreservesPitch); return NS_OK; } -nsNativeAudioStream::nsNativeAudioStream() : +NativeAudioStream::NativeAudioStream() : mVolume(1.0), mAudioHandle(0), mPaused(false), mInError(false) { } -nsNativeAudioStream::~nsNativeAudioStream() +NativeAudioStream::~NativeAudioStream() { Shutdown(); } -nsresult nsNativeAudioStream::Init(int32_t aNumChannels, int32_t aRate, +nsresult NativeAudioStream::Init(int32_t aNumChannels, int32_t aRate, const dom::AudioChannelType aAudioChannelType) { mInRate = mOutRate = aRate; mChannels = aNumChannels; if (sa_stream_create_pcm(reinterpret_cast<sa_stream_t**>(&mAudioHandle), NULL, SA_MODE_WRONLY, SA_PCM_FORMAT_S16_NE, aRate, aNumChannels) != SA_SUCCESS) { mAudioHandle = nullptr; mInError = true; - PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_create_pcm error")); + PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("NativeAudioStream: sa_stream_create_pcm error")); return NS_ERROR_FAILURE; } int saError = sa_stream_set_stream_type(static_cast<sa_stream_t*>(mAudioHandle), ConvertChannelToSAType(aAudioChannelType)); if (saError != SA_SUCCESS && saError != SA_ERROR_NOT_SUPPORTED) { mAudioHandle = nullptr; mInError = true; - PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_set_stream_type error")); + PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("NativeAudioStream: sa_stream_set_stream_type error")); return NS_ERROR_FAILURE; } if (sa_stream_open(static_cast<sa_stream_t*>(mAudioHandle)) != SA_SUCCESS) { sa_stream_destroy(static_cast<sa_stream_t*>(mAudioHandle)); mAudioHandle = nullptr; mInError = true; - PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_open error")); + PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("NativeAudioStream: sa_stream_open error")); return NS_ERROR_FAILURE; } mInError = false; mAudioClock.Init(); return NS_OK; } -void nsNativeAudioStream::Shutdown() +void NativeAudioStream::Shutdown() { if (!mAudioHandle) return; sa_stream_destroy(static_cast<sa_stream_t*>(mAudioHandle)); mAudioHandle = nullptr; mInError = true; } -int32_t nsNativeAudioStream::WriteToBackend(const AudioDataValue* aBuffer, uint32_t aSamples) +int32_t NativeAudioStream::WriteToBackend(const AudioDataValue* aBuffer, uint32_t aSamples) { double scaledVolume = GetVolumeScale() * mVolume; nsAutoArrayPtr<short> outputBuffer(new short[aSamples]); ConvertAudioSamplesWithScale(aBuffer, outputBuffer.get(), aSamples, scaledVolume); if (sa_stream_write(static_cast<sa_stream_t*>(mAudioHandle), outputBuffer, aSamples * sizeof(short)) != SA_SUCCESS) { return -1; } mAudioClock.UpdateWritePosition(aSamples / mChannels); return aSamples; } -nsresult nsNativeAudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames) +nsresult NativeAudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames) { NS_ASSERTION(!mPaused, "Don't write audio when paused, you'll block"); if (mInError) return NS_ERROR_FAILURE; uint32_t samples = aFrames * mChannels; int32_t written = -1; @@ -386,111 +386,111 @@ nsresult nsNativeAudioStream::Write(cons } else { written = 0; } } else { written = WriteToBackend(aBuf, samples); } if (written == -1) { - PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_write error")); + PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("NativeAudioStream: sa_stream_write error")); mInError = true; return NS_ERROR_FAILURE; } return NS_OK; } -uint32_t nsNativeAudioStream::Available() +uint32_t NativeAudioStream::Available() { // If the audio backend failed to open, lie and say we'll accept some // data. if (mInError) return FAKE_BUFFER_SIZE; size_t s = 0; if (sa_stream_get_write_size(static_cast<sa_stream_t*>(mAudioHandle), &s) != SA_SUCCESS) return 0; return s / mChannels / sizeof(short); } -void nsNativeAudioStream::SetVolume(double aVolume) +void NativeAudioStream::SetVolume(double aVolume) { NS_ASSERTION(aVolume >= 0.0 && aVolume <= 1.0, "Invalid volume"); #if defined(SA_PER_STREAM_VOLUME) if (sa_stream_set_volume_abs(static_cast<sa_stream_t*>(mAudioHandle), aVolume) != SA_SUCCESS) { - PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_set_volume_abs error")); + PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("NativeAudioStream: sa_stream_set_volume_abs error")); mInError = true; } #else mVolume = aVolume; #endif } -void nsNativeAudioStream::Drain() +void NativeAudioStream::Drain() { NS_ASSERTION(!mPaused, "Don't drain audio when paused, it won't finish!"); // Write all the frames still in the time stretcher pipeline. if (mTimeStretcher) { uint32_t numFrames = mTimeStretcher->numSamples(); uint32_t arraySize = numFrames * mChannels * sizeof(AudioDataValue); nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[arraySize]); uint32_t framesAvailable = mTimeStretcher->receiveSamples(data, numFrames); int32_t written = 0; if (framesAvailable) { written = WriteToBackend(data, framesAvailable * mChannels); } if (written == -1) { - PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_write error")); + PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("NativeAudioStream: sa_stream_write error")); mInError = true; } NS_ASSERTION(mTimeStretcher->numSamples() == 0, "We did not get all the data from the SoundTouch pipeline."); } if (mInError) return; int r = sa_stream_drain(static_cast<sa_stream_t*>(mAudioHandle)); if (r != SA_SUCCESS && r != SA_ERROR_INVALID) { - PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_drain error")); + PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("NativeAudioStream: sa_stream_drain error")); mInError = true; } } -void nsNativeAudioStream::Pause() +void NativeAudioStream::Pause() { if (mInError) return; mPaused = true; sa_stream_pause(static_cast<sa_stream_t*>(mAudioHandle)); } -void nsNativeAudioStream::Resume() +void NativeAudioStream::Resume() { if (mInError) return; mPaused = false; sa_stream_resume(static_cast<sa_stream_t*>(mAudioHandle)); } -int64_t nsNativeAudioStream::GetPosition() +int64_t NativeAudioStream::GetPosition() { return mAudioClock.GetPosition(); } -int64_t nsNativeAudioStream::GetPositionInFrames() +int64_t NativeAudioStream::GetPositionInFrames() { return mAudioClock.GetPositionInFrames(); } -int64_t nsNativeAudioStream::GetPositionInFramesInternal() +int64_t NativeAudioStream::GetPositionInFramesInternal() { if (mInError) { return -1; } sa_position_t positionType = SA_POSITION_WRITE_SOFTWARE; #if defined(XP_WIN) positionType = SA_POSITION_WRITE_HARDWARE; @@ -499,22 +499,22 @@ int64_t nsNativeAudioStream::GetPosition if (sa_stream_get_position(static_cast<sa_stream_t*>(mAudioHandle), positionType, &position) == SA_SUCCESS) { return position / mChannels / sizeof(short); } return -1; } -bool nsNativeAudioStream::IsPaused() +bool NativeAudioStream::IsPaused() { return mPaused; } -int32_t nsNativeAudioStream::GetMinWriteSize() +int32_t NativeAudioStream::GetMinWriteSize() { size_t size; int r = sa_stream_get_min_write(static_cast<sa_stream_t*>(mAudioHandle), &size); if (r == SA_ERROR_NOT_SUPPORTED) return 1; else if (r != SA_SUCCESS || size > INT32_MAX) return -1; @@ -583,21 +583,21 @@ public: private: nsAutoArrayPtr<uint8_t> mBuffer; uint32_t mCapacity; uint32_t mStart; uint32_t mCount; }; -class nsBufferedAudioStream : public AudioStream +class BufferedAudioStream : public AudioStream { public: - nsBufferedAudioStream(); - ~nsBufferedAudioStream(); + BufferedAudioStream(); + ~BufferedAudioStream(); nsresult Init(int32_t aNumChannels, int32_t aRate, const dom::AudioChannelType aAudioChannelType); void Shutdown(); nsresult Write(const AudioDataValue* aBuf, uint32_t aFrames); uint32_t Available(); void SetVolume(double aVolume); void Drain(); @@ -607,22 +607,22 @@ class nsBufferedAudioStream : public Aud int64_t GetPositionInFrames(); int64_t GetPositionInFramesInternal(); bool IsPaused(); int32_t GetMinWriteSize(); private: static long DataCallback_S(cubeb_stream*, void* aThis, void* aBuffer, long aFrames) { - return static_cast<nsBufferedAudioStream*>(aThis)->DataCallback(aBuffer, aFrames); + return static_cast<BufferedAudioStream*>(aThis)->DataCallback(aBuffer, aFrames); } static void StateCallback_S(cubeb_stream*, void* aThis, cubeb_state aState) { - static_cast<nsBufferedAudioStream*>(aThis)->StateCallback(aState); + static_cast<BufferedAudioStream*>(aThis)->StateCallback(aState); } long DataCallback(void* aBuffer, long aFrames); void StateCallback(cubeb_state aState); long GetUnprocessed(void* aBuffer, long aFrames); long GetTimeStretched(void* aBuffer, long aFrames); @@ -683,36 +683,36 @@ private: StreamState mState; }; #endif AudioStream* AudioStream::AllocateStream() { #if defined(MOZ_CUBEB) if (GetUseCubeb()) { - return new nsBufferedAudioStream(); + return new BufferedAudioStream(); } #endif - return new nsNativeAudioStream(); + return new NativeAudioStream(); } #if defined(MOZ_CUBEB) -nsBufferedAudioStream::nsBufferedAudioStream() - : mMonitor("nsBufferedAudioStream"), mLostFrames(0), mVolume(1.0), +BufferedAudioStream::BufferedAudioStream() + : mMonitor("BufferedAudioStream"), mLostFrames(0), mVolume(1.0), mBytesPerFrame(0), mState(INITIALIZED) { } -nsBufferedAudioStream::~nsBufferedAudioStream() +BufferedAudioStream::~BufferedAudioStream() { Shutdown(); } nsresult -nsBufferedAudioStream::Init(int32_t aNumChannels, int32_t aRate, +BufferedAudioStream::Init(int32_t aNumChannels, int32_t aRate, const dom::AudioChannelType aAudioChannelType) { cubeb* cubebContext = GetCubebContext(); if (!cubebContext || aNumChannels < 0 || aRate < 0) { return NS_ERROR_FAILURE; } @@ -728,17 +728,17 @@ nsBufferedAudioStream::Init(int32_t aNum params.format = CUBEB_SAMPLE_FLOAT32NE; } mBytesPerFrame = sizeof(AudioDataValue) * aNumChannels; mAudioClock.Init(); { cubeb_stream* stream; - if (cubeb_stream_init(cubebContext, &stream, "nsBufferedAudioStream", params, + if (cubeb_stream_init(cubebContext, &stream, "BufferedAudioStream", params, GetCubebLatency(), DataCallback_S, StateCallback_S, this) == CUBEB_OK) { mCubebStream.own(stream); } } if (!mCubebStream) { return NS_ERROR_FAILURE; } @@ -749,28 +749,28 @@ nsBufferedAudioStream::Init(int32_t aNum uint32_t bufferLimit = FramesToBytes(aRate); NS_ABORT_IF_FALSE(bufferLimit % mBytesPerFrame == 0, "Must buffer complete frames"); mBuffer.SetCapacity(bufferLimit); return NS_OK; } void -nsBufferedAudioStream::Shutdown() +BufferedAudioStream::Shutdown() { if (mState == STARTED) { Pause(); } if (mCubebStream) { mCubebStream.reset(); } } nsresult -nsBufferedAudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames) +BufferedAudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames) { MonitorAutoLock mon(mMonitor); if (!mCubebStream || mState == ERRORED) { return NS_ERROR_FAILURE; } NS_ASSERTION(mState == INITIALIZED || mState == STARTED, "Stream write in unexpected state."); @@ -803,52 +803,52 @@ nsBufferedAudioStream::Write(const Audio mon.Wait(); } } return NS_OK; } uint32_t -nsBufferedAudioStream::Available() +BufferedAudioStream::Available() { MonitorAutoLock mon(mMonitor); NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Buffer invariant violated."); return BytesToFrames(mBuffer.Available()); } int32_t -nsBufferedAudioStream::GetMinWriteSize() +BufferedAudioStream::GetMinWriteSize() { return 1; } void -nsBufferedAudioStream::SetVolume(double aVolume) +BufferedAudioStream::SetVolume(double aVolume) { MonitorAutoLock mon(mMonitor); NS_ABORT_IF_FALSE(aVolume >= 0.0 && aVolume <= 1.0, "Invalid volume"); mVolume = aVolume; } void -nsBufferedAudioStream::Drain() +BufferedAudioStream::Drain() { MonitorAutoLock mon(mMonitor); if (mState != STARTED) { return; } mState = DRAINING; while (mState == DRAINING) { mon.Wait(); } } void -nsBufferedAudioStream::Pause() +BufferedAudioStream::Pause() { MonitorAutoLock mon(mMonitor); if (!mCubebStream || mState != STARTED) { return; } int r; { @@ -856,17 +856,17 @@ nsBufferedAudioStream::Pause() r = cubeb_stream_stop(mCubebStream); } if (mState != ERRORED && r == CUBEB_OK) { mState = STOPPED; } } void -nsBufferedAudioStream::Resume() +BufferedAudioStream::Resume() { MonitorAutoLock mon(mMonitor); if (!mCubebStream || mState != STOPPED) { return; } int r; { @@ -874,43 +874,43 @@ nsBufferedAudioStream::Resume() r = cubeb_stream_start(mCubebStream); } if (mState != ERRORED && r == CUBEB_OK) { mState = STARTED; } } int64_t -nsBufferedAudioStream::GetPosition() +BufferedAudioStream::GetPosition() { return mAudioClock.GetPosition(); } // This function is miscompiled by PGO with MSVC 2010. See bug 768333. #ifdef _MSC_VER #pragma optimize("", off) #endif int64_t -nsBufferedAudioStream::GetPositionInFrames() +BufferedAudioStream::GetPositionInFrames() { return mAudioClock.GetPositionInFrames(); } #ifdef _MSC_VER #pragma optimize("", on) #endif int64_t -nsBufferedAudioStream::GetPositionInFramesInternal() +BufferedAudioStream::GetPositionInFramesInternal() { MonitorAutoLock mon(mMonitor); return GetPositionInFramesUnlocked(); } int64_t -nsBufferedAudioStream::GetPositionInFramesUnlocked() +BufferedAudioStream::GetPositionInFramesUnlocked() { mMonitor.AssertCurrentThreadOwns(); if (!mCubebStream || mState == ERRORED) { return -1; } uint64_t position = 0; @@ -926,24 +926,24 @@ nsBufferedAudioStream::GetPositionInFram uint64_t adjustedPosition = 0; if (position >= mLostFrames) { adjustedPosition = position - mLostFrames; } return NS_MIN<uint64_t>(adjustedPosition, INT64_MAX); } bool -nsBufferedAudioStream::IsPaused() +BufferedAudioStream::IsPaused() { MonitorAutoLock mon(mMonitor); return mState == STOPPED; } long -nsBufferedAudioStream::GetUnprocessed(void* aBuffer, long aFrames) +BufferedAudioStream::GetUnprocessed(void* aBuffer, long aFrames) { uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); // Flush the timestretcher pipeline, if we were playing using a playback rate // other than 1.0. uint32_t flushedFrames = 0; if (mTimeStretcher && mTimeStretcher->numSamples()) { flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames); @@ -957,17 +957,17 @@ nsBufferedAudioStream::GetUnprocessed(vo mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]); memcpy(wpos, input[0], input_size[0]); wpos += input_size[0]; memcpy(wpos, input[1], input_size[1]); return BytesToFrames(available) + flushedFrames; } long -nsBufferedAudioStream::GetTimeStretched(void* aBuffer, long aFrames) +BufferedAudioStream::GetTimeStretched(void* aBuffer, long aFrames) { long processedFrames = 0; if (!EnsureTimeStretcherInitialized()) { return -1; } uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); double playbackRate = static_cast<double>(mInRate) / mOutRate; uint32_t toPopBytes = FramesToBytes(ceil(aFrames / playbackRate)); @@ -992,17 +992,17 @@ nsBufferedAudioStream::GetTimeStretched( wpos += FramesToBytes(receivedFrames); processedFrames += receivedFrames; } while (processedFrames < aFrames && !lowOnBufferedData); return processedFrames; } long -nsBufferedAudioStream::DataCallback(void* aBuffer, long aFrames) +BufferedAudioStream::DataCallback(void* aBuffer, long aFrames) { MonitorAutoLock mon(mMonitor); uint32_t available = NS_MIN(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames"); uint32_t underrunFrames = 0; uint32_t servicedFrames = 0; if (available) { @@ -1031,17 +1031,17 @@ nsBufferedAudioStream::DataCallback(void servicedFrames += underrunFrames; } mAudioClock.UpdateWritePosition(servicedFrames); return servicedFrames; } void -nsBufferedAudioStream::StateCallback(cubeb_state aState) +BufferedAudioStream::StateCallback(cubeb_state aState) { MonitorAutoLock mon(mMonitor); if (aState == CUBEB_STATE_DRAINED) { mState = DRAINED; } else if (aState == CUBEB_STATE_ERROR) { mState = ERRORED; } mon.NotifyAll();
--- a/content/media/MediaDecoderReader.cpp +++ b/content/media/MediaDecoderReader.cpp @@ -75,17 +75,17 @@ IsYV12Format(const VideoData::YCbCrBuffe aYPlane.mHeight % 2 == 0 && aYPlane.mWidth / 2 == aCbPlane.mWidth && aYPlane.mHeight / 2 == aCbPlane.mHeight && aCbPlane.mWidth == aCrPlane.mWidth && aCbPlane.mHeight == aCrPlane.mHeight; } bool -nsVideoInfo::ValidateVideoRegion(const nsIntSize& aFrame, +VideoInfo::ValidateVideoRegion(const nsIntSize& aFrame, const nsIntRect& aPicture, const nsIntSize& aDisplay) { return aFrame.width <= PlanarYCbCrImage::MAX_DIMENSION && aFrame.height <= PlanarYCbCrImage::MAX_DIMENSION && aFrame.width * aFrame.height <= MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT && aFrame.width * aFrame.height != 0 && @@ -134,17 +134,17 @@ VideoData::VideoData(int64_t aOffset, } VideoData::~VideoData() { MOZ_COUNT_DTOR(VideoData); } -VideoData* VideoData::Create(nsVideoInfo& aInfo, +VideoData* VideoData::Create(VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset, int64_t aTime, int64_t aEndTime, const YCbCrBuffer& aBuffer, bool aKeyframe, int64_t aTimecode, nsIntRect aPicture) @@ -236,17 +236,17 @@ VideoData* VideoData::Create(nsVideoInfo data.mStereoMode = aInfo.mStereoMode; videoImage->SetDelayedConversion(true); videoImage->SetData(data); return v.forget(); } #ifdef MOZ_WIDGET_GONK -VideoData* VideoData::Create(nsVideoInfo& aInfo, +VideoData* VideoData::Create(VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset, int64_t aTime, int64_t aEndTime, mozilla::layers::GraphicBufferLocked *aBuffer, bool aKeyframe, int64_t aTimecode, nsIntRect aPicture)
--- a/content/media/MediaDecoderReader.h +++ b/content/media/MediaDecoderReader.h @@ -16,19 +16,19 @@ #include "MediaResource.h" #include "nsHTMLMediaElement.h" namespace mozilla { class AbstractMediaDecoder; // Stores info relevant to presenting media frames. -class nsVideoInfo { +class VideoInfo { public: - nsVideoInfo() + VideoInfo() : mAudioRate(44100), mAudioChannels(2), mDisplay(0,0), mStereoMode(STEREO_MODE_MONO), mHasAudio(false), mHasVideo(false) {} @@ -133,27 +133,27 @@ public: }; // Constructs a VideoData object. Makes a copy of YCbCr data in aBuffer. // aTimecode is a codec specific number representing the timestamp of // the frame of video data. Returns nullptr if an error occurs. This may // indicate that memory couldn't be allocated to create the VideoData // object, or it may indicate some problem with the input data (e.g. // negative stride). - static VideoData* Create(nsVideoInfo& aInfo, + static VideoData* Create(VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset, int64_t aTime, int64_t aEndTime, const YCbCrBuffer &aBuffer, bool aKeyframe, int64_t aTimecode, nsIntRect aPicture); - static VideoData* Create(nsVideoInfo& aInfo, + static VideoData* Create(VideoInfo& aInfo, ImageContainer* aContainer, int64_t aOffset, int64_t aTime, int64_t aEndTime, layers::GraphicBufferLocked *aBuffer, bool aKeyframe, int64_t aTimecode, nsIntRect aPicture); @@ -381,17 +381,17 @@ public: virtual bool HasAudio() = 0; virtual bool HasVideo() = 0; // Read header data for all bitstreams in the file. Fills aInfo with // the data required to present the media, and optionally fills *aTags // with tag metadata from the file. // Returns NS_OK on success, or NS_ERROR_FAILURE on failure. - virtual nsresult ReadMetadata(nsVideoInfo* aInfo, + virtual nsresult ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags) = 0; // Stores the presentation time of the first frame we'd be able to play if // we started playback at the current position. Returns the first video // frame, if we have video. virtual VideoData* FindStartTime(int64_t& aOutStartTime); // Moves the decode head to aTime microseconds. aStartTime and aEndTime @@ -486,14 +486,14 @@ protected: // Pumps the decode until we reach frames required to play at time aTarget // (usecs). nsresult DecodeToTarget(int64_t aTarget); // Reference to the owning decoder object. AbstractMediaDecoder* mDecoder; // Stores presentation info required for playback. - nsVideoInfo mInfo; + VideoInfo mInfo; }; } // namespace mozilla #endif
--- a/content/media/MediaDecoderStateMachine.cpp +++ b/content/media/MediaDecoderStateMachine.cpp @@ -1732,17 +1732,17 @@ nsresult MediaDecoderStateMachine::Decod { NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); mDecoder->GetReentrantMonitor().AssertCurrentThreadIn(); NS_ASSERTION(mState == DECODER_STATE_DECODING_METADATA, "Only call when in metadata decoding state"); LOG(PR_LOG_DEBUG, ("%p Decoding Media Headers", mDecoder.get())); nsresult res; - nsVideoInfo info; + VideoInfo info; MetadataTags* tags; { ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); res = mReader->ReadMetadata(&info, &tags); } mInfo = info; if (NS_FAILED(res) || (!info.mHasVideo && !info.mHasAudio)) {
--- a/content/media/MediaDecoderStateMachine.h +++ b/content/media/MediaDecoderStateMachine.h @@ -779,13 +779,13 @@ private: // Manager for queuing and dispatching MozAudioAvailable events. The // event manager is accessed from the state machine and audio threads, // and takes care of synchronizing access to its internal queue. AudioAvailableEventManager mEventManager; // Stores presentation info required for playback. The decoder monitor // must be held when accessing this. - nsVideoInfo mInfo; + VideoInfo mInfo; }; } // namespace mozilla; #endif
--- a/content/media/VideoUtils.h +++ b/content/media/VideoUtils.h @@ -121,17 +121,17 @@ static const int64_t USECS_PER_MS = 1000 // The maximum height and width of the video. Used for // sanitizing the memory allocation of the RGB buffer. // The maximum resolution we anticipate encountering in the // wild is 2160p - 3840x2160 pixels. static const int32_t MAX_VIDEO_WIDTH = 4000; static const int32_t MAX_VIDEO_HEIGHT = 3000; // Scales the display rect aDisplay by aspect ratio aAspectRatio. -// Note that aDisplay must be validated by nsVideoInfo::ValidateVideoRegion() +// Note that aDisplay must be validated by VideoInfo::ValidateVideoRegion() // before being used! void ScaleDisplayByAspectRatio(nsIntSize& aDisplay, float aAspectRatio); // The amount of virtual memory reserved for thread stacks. #if (defined(XP_WIN) || defined(XP_MACOSX) || defined(LINUX)) && \ !defined(MOZ_ASAN) #define MEDIA_THREAD_STACK_SIZE (128 * 1024) #else
--- a/content/media/VorbisUtils.h +++ b/content/media/VorbisUtils.h @@ -1,27 +1,27 @@ -/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ -/* vim:set ts=2 sw=2 sts=2 et cindent: */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef VORBISUTILS_H_ -#define VORBISUTILS_H_ - -#ifdef MOZ_SAMPLE_TYPE_S16 -#include <ogg/os_types.h> -typedef ogg_int32_t VorbisPCMValue; - -#define MOZ_CLIP_TO_15(x) ((x)<-32768?-32768:(x)<=32767?(x):32767) -// Convert the output of vorbis_synthesis_pcmout to a AudioDataValue -#define MOZ_CONVERT_VORBIS_SAMPLE(x) \ - (static_cast<AudioDataValue>(MOZ_CLIP_TO_15((x)>>9))) - -#else /* MOZ_SAMPLE_TYPE_FLOAT32 */ - -typedef float VorbisPCMValue; - -#define MOZ_CONVERT_VORBIS_SAMPLE(x) (x) - -#endif - -#endif /* VORBISUTILS_H_ */ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef VORBISUTILS_H_ +#define VORBISUTILS_H_ + +#ifdef MOZ_SAMPLE_TYPE_S16 +#include <ogg/os_types.h> +typedef ogg_int32_t VorbisPCMValue; + +#define MOZ_CLIP_TO_15(x) ((x)<-32768?-32768:(x)<=32767?(x):32767) +// Convert the output of vorbis_synthesis_pcmout to a AudioDataValue +#define MOZ_CONVERT_VORBIS_SAMPLE(x) \ + (static_cast<AudioDataValue>(MOZ_CLIP_TO_15((x)>>9))) + +#else /* MOZ_SAMPLE_TYPE_FLOAT32 */ + +typedef float VorbisPCMValue; + +#define MOZ_CONVERT_VORBIS_SAMPLE(x) (x) + +#endif + +#endif /* VORBISUTILS_H_ */
--- a/content/media/dash/DASHReader.cpp +++ b/content/media/dash/DASHReader.cpp @@ -110,33 +110,33 @@ DASHReader::DecodeVideoFrame(bool &aKeyf bool DASHReader::DecodeAudioData() { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); return (mAudioReader ? mAudioReader->DecodeAudioData() : false); } nsresult -DASHReader::ReadMetadata(nsVideoInfo* aInfo, +DASHReader::ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); // Wait for MPD to be parsed and child readers created. LOG1("Waiting for metadata download."); nsresult rv = WaitForMetadata(); // If we get an abort, return silently; the decoder is shutting down. if (NS_ERROR_ABORT == rv) { return NS_OK; } // Verify no other errors before continuing. NS_ENSURE_SUCCESS(rv, rv); // Get metadata from child readers. - nsVideoInfo audioInfo, videoInfo; + VideoInfo audioInfo, videoInfo; if (mVideoReader) { rv = mVideoReader->ReadMetadata(&videoInfo, aTags); NS_ENSURE_SUCCESS(rv, rv); mInfo.mHasVideo = videoInfo.mHasVideo; mInfo.mDisplay = videoInfo.mDisplay; } if (mAudioReader) {
--- a/content/media/dash/DASHReader.h +++ b/content/media/dash/DASHReader.h @@ -42,17 +42,17 @@ public: // Adds a pointer to a audio/video reader for a media |Representation|. // Called on the main thread only. void AddAudioReader(MediaDecoderReader* aAudioReader); void AddVideoReader(MediaDecoderReader* aVideoReader); // Waits for metadata bytes to be downloaded, then reads and parses them. // Called on the decode thread only. - nsresult ReadMetadata(nsVideoInfo* aInfo, + nsresult ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags); // Waits for |ReadyToReadMetadata| or |NotifyDecoderShuttingDown| // notification, whichever comes first. Ensures no attempt to read metadata // during |DASHDecoder|::|Shutdown|. Called on decode thread only. nsresult WaitForMetadata() { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); ReentrantMonitorAutoEnter mon(mReadMetadataMonitor);
--- a/content/media/gstreamer/GStreamerReader.cpp +++ b/content/media/gstreamer/GStreamerReader.cpp @@ -190,17 +190,17 @@ void GStreamerReader::PlayBinSourceSetup gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_RANDOM_ACCESS); } else { /* make the demuxer work in push mode so that seeking is kept to a minimum */ gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_SEEKABLE); } } -nsresult GStreamerReader::ReadMetadata(nsVideoInfo* aInfo, +nsresult GStreamerReader::ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); nsresult ret = NS_OK; /* We do 3 attempts here: decoding audio and video, decoding video only, * decoding audio only. This allows us to play streams that have one broken * stream but that are otherwise decodeable.
--- a/content/media/gstreamer/GStreamerReader.h +++ b/content/media/gstreamer/GStreamerReader.h @@ -23,17 +23,17 @@ public: GStreamerReader(AbstractMediaDecoder* aDecoder); virtual ~GStreamerReader(); virtual nsresult Init(MediaDecoderReader* aCloneDonor); virtual nsresult ResetDecode(); virtual bool DecodeAudioData(); virtual bool DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold); - virtual nsresult ReadMetadata(nsVideoInfo* aInfo, + virtual nsresult ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags); virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime); virtual nsresult GetBuffered(nsTimeRanges* aBuffered, int64_t aStartTime); virtual bool IsSeekableInBufferedRanges() {
--- a/content/media/ogg/OggCodecState.cpp +++ b/content/media/ogg/OggCodecState.cpp @@ -284,17 +284,17 @@ bool TheoraState::Init() { mPixelAspectRatio = (n == 0 || d == 0) ? 1.0f : static_cast<float>(n) / static_cast<float>(d); // Ensure the frame and picture regions aren't larger than our prescribed // maximum, or zero sized. nsIntSize frame(mInfo.frame_width, mInfo.frame_height); nsIntRect picture(mInfo.pic_x, mInfo.pic_y, mInfo.pic_width, mInfo.pic_height); - if (!nsVideoInfo::ValidateVideoRegion(frame, picture, frame)) { + if (!VideoInfo::ValidateVideoRegion(frame, picture, frame)) { return mActive = false; } mCtx = th_decode_alloc(&mInfo, mSetup); if (mCtx == NULL) { return mActive = false; }
--- a/content/media/ogg/OggReader.cpp +++ b/content/media/ogg/OggReader.cpp @@ -156,17 +156,17 @@ void OggReader::BuildSerialList(nsTArray if (mVorbisState) { aTracks.AppendElement(mVorbisState->mSerial); } else if(mOpusState) { aTracks.AppendElement(mOpusState->mSerial); } } } -nsresult OggReader::ReadMetadata(nsVideoInfo* aInfo, +nsresult OggReader::ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); // We read packets until all bitstreams have read all their header packets. // We record the offset of the first non-header page so that we know // what page to seek to when seeking to the media start. @@ -265,17 +265,17 @@ nsresult OggReader::ReadMetadata(nsVideo mTheoraState->mInfo.pic_height); // Apply the aspect ratio to produce the intrinsic display size we report // to the element. ScaleDisplayByAspectRatio(displaySize, mTheoraState->mPixelAspectRatio); nsIntSize frameSize(mTheoraState->mInfo.frame_width, mTheoraState->mInfo.frame_height); - if (nsVideoInfo::ValidateVideoRegion(frameSize, picture, displaySize)) { + if (VideoInfo::ValidateVideoRegion(frameSize, picture, displaySize)) { // Video track's frame sizes will not overflow. Activate the video track. mInfo.mHasVideo = true; mInfo.mDisplay = displaySize; mPicture = picture; VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); if (container) { container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height),
--- a/content/media/ogg/OggReader.h +++ b/content/media/ogg/OggReader.h @@ -41,17 +41,17 @@ public: return (mVorbisState != 0 && mVorbisState->mActive) || (mOpusState != 0 && mOpusState->mActive); } virtual bool HasVideo() { return mTheoraState != 0 && mTheoraState->mActive; } - virtual nsresult ReadMetadata(nsVideoInfo* aInfo, + virtual nsresult ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags); virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime); virtual nsresult GetBuffered(nsTimeRanges* aBuffered, int64_t aStartTime); // We use bisection to seek in buffered range. virtual bool IsSeekableInBufferedRanges() { return true; }
--- a/content/media/omx/MediaOmxReader.cpp +++ b/content/media/omx/MediaOmxReader.cpp @@ -37,17 +37,17 @@ MediaOmxReader::~MediaOmxReader() ResetDecode(); } nsresult MediaOmxReader::Init(MediaDecoderReader* aCloneDonor) { return NS_OK; } -nsresult MediaOmxReader::ReadMetadata(nsVideoInfo* aInfo, +nsresult MediaOmxReader::ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); *aTags = nullptr; if (!mOmxDecoder) { mOmxDecoder = new OmxDecoder(mDecoder->GetResource(), mDecoder); @@ -66,17 +66,17 @@ nsresult MediaOmxReader::ReadMetadata(ns int32_t width, height; mOmxDecoder->GetVideoParameters(&width, &height); nsIntRect pictureRect(0, 0, width, height); // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize displaySize(width, height); nsIntSize frameSize(width, height); - if (!nsVideoInfo::ValidateVideoRegion(frameSize, pictureRect, displaySize)) { + if (!VideoInfo::ValidateVideoRegion(frameSize, pictureRect, displaySize)) { return NS_ERROR_FAILURE; } // Video track's frame sizes will not overflow. Activate the video track. mHasVideo = mInfo.mHasVideo = true; mInfo.mDisplay = displaySize; mPicture = pictureRect; mInitialFrame = frameSize;
--- a/content/media/omx/MediaOmxReader.h +++ b/content/media/omx/MediaOmxReader.h @@ -45,17 +45,17 @@ public: return mHasAudio; } virtual bool HasVideo() { return mHasVideo; } - virtual nsresult ReadMetadata(nsVideoInfo* aInfo, + virtual nsresult ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags); virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime); virtual nsresult GetBuffered(nsTimeRanges* aBuffered, int64_t aStartTime); virtual bool IsSeekableInBufferedRanges() { return true; } };
--- a/content/media/plugins/MediaPluginReader.cpp +++ b/content/media/plugins/MediaPluginReader.cpp @@ -33,17 +33,17 @@ MediaPluginReader::~MediaPluginReader() ResetDecode(); } nsresult MediaPluginReader::Init(MediaDecoderReader* aCloneDonor) { return NS_OK; } -nsresult MediaPluginReader::ReadMetadata(nsVideoInfo* aInfo, +nsresult MediaPluginReader::ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); if (!mPlugin) { mPlugin = GetMediaPluginHost()->CreateDecoder(mDecoder->GetResource(), mType); if (!mPlugin) { return NS_ERROR_FAILURE; @@ -62,17 +62,17 @@ nsresult MediaPluginReader::ReadMetadata int32_t width, height; mPlugin->GetVideoParameters(mPlugin, &width, &height); nsIntRect pictureRect(0, 0, width, height); // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize displaySize(width, height); nsIntSize frameSize(width, height); - if (!nsVideoInfo::ValidateVideoRegion(frameSize, pictureRect, displaySize)) { + if (!VideoInfo::ValidateVideoRegion(frameSize, pictureRect, displaySize)) { return NS_ERROR_FAILURE; } // Video track's frame sizes will not overflow. Activate the video track. mHasVideo = mInfo.mHasVideo = true; mInfo.mDisplay = displaySize; mPicture = pictureRect; mInitialFrame = frameSize;
--- a/content/media/plugins/MediaPluginReader.h +++ b/content/media/plugins/MediaPluginReader.h @@ -45,17 +45,17 @@ public: return mHasAudio; } virtual bool HasVideo() { return mHasVideo; } - virtual nsresult ReadMetadata(nsVideoInfo* aInfo, + virtual nsresult ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags); virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime); virtual nsresult GetBuffered(nsTimeRanges* aBuffered, int64_t aStartTime); virtual bool IsSeekableInBufferedRanges() { return true; } };
--- a/content/media/raw/RawReader.cpp +++ b/content/media/raw/RawReader.cpp @@ -29,17 +29,17 @@ nsresult RawReader::Init(MediaDecoderRea } nsresult RawReader::ResetDecode() { mCurrentFrame = 0; return MediaDecoderReader::ResetDecode(); } -nsresult RawReader::ReadMetadata(nsVideoInfo* aInfo, +nsresult RawReader::ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); MediaResource* resource = mDecoder->GetResource(); NS_ASSERTION(resource, "Decoder has no media resource"); @@ -64,17 +64,17 @@ nsresult RawReader::ReadMetadata(nsVideo // Determine and verify frame display size. float pixelAspectRatio = static_cast<float>(mMetadata.aspectNumerator) / mMetadata.aspectDenominator; nsIntSize display(mMetadata.frameWidth, mMetadata.frameHeight); ScaleDisplayByAspectRatio(display, pixelAspectRatio); mPicture = nsIntRect(0, 0, mMetadata.frameWidth, mMetadata.frameHeight); nsIntSize frameSize(mMetadata.frameWidth, mMetadata.frameHeight); - if (!nsVideoInfo::ValidateVideoRegion(frameSize, mPicture, display)) { + if (!VideoInfo::ValidateVideoRegion(frameSize, mPicture, display)) { // Video track's frame sizes will overflow. Fail. return NS_ERROR_FAILURE; } mInfo.mHasVideo = true; mInfo.mHasAudio = false; mInfo.mDisplay = display;
--- a/content/media/raw/RawReader.h +++ b/content/media/raw/RawReader.h @@ -29,17 +29,17 @@ public: return false; } virtual bool HasVideo() { return true; } - virtual nsresult ReadMetadata(nsVideoInfo* aInfo, + virtual nsresult ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags); virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime); virtual nsresult GetBuffered(nsTimeRanges* aBuffered, int64_t aStartTime); // By seeking in the media resource, it is possible to seek. bool IsSeekableInBufferedRanges() { return true; }
--- a/content/media/test/test_defaultMuted.html +++ b/content/media/test/test_defaultMuted.html @@ -1,54 +1,54 @@ -<!DOCTYPE HTML> -<html> -<head> - <title>Media test: defaultMuted</title> - <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> - <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> - <script type="text/javascript" src="manifest.js"></script> - <script type="text/javascript" src="../../html/content/test/reflect.js"></script> -</head> -<body> - <a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=706731">Mozilla Bug 706731</a> - <p id="display"></p> - <div id="content" style="display: none"></div> - <video id='v1'></video><audio id='a1'></audio> - <video id='v2' muted></video><audio id='a2' muted></audio> -<pre id="test"> -<script class="testbody" type="text/javascript"> - reflectBoolean({ - element: document.createElement("video"), - attribute: { content: "muted", idl: "defaultMuted" }, - }); - - reflectBoolean({ - element: document.createElement("audio"), - attribute: { content: "muted", idl: "defaultMuted" }, - }); - - var v1 = document.getElementById('v1'); - var a1 = document.getElementById('a1'); - var v2 = document.getElementById('v2'); - var a2 = document.getElementById('a2'); - - // Check that muted state correspond to the default value. - is(v1.muted, false, "v1.muted should be false by default"); - is(a1.muted, false, "a1.muted should be false by default"); - is(v2.muted, true, "v2.muted should be true by default"); - is(a2.muted, true, "a2.muted should be true by default"); - - // Changing defaultMuted value should not change current muted state. - v1.defaultMuted = true; - a1.defaultMuted = true; - v2.defaultMuted = false; - a2.defaultMuted = false; - - is(v1.muted, false, "v1.muted should not have changed"); - is(a1.muted, false, "a1.muted should not have changed"); - is(v2.muted, true, "v2.muted should not have changed"); - is(a2.muted, true, "a2.muted should not have changed"); - - mediaTestCleanup(); -</script> -</pre> -</body> -</html> +<!DOCTYPE HTML> +<html> +<head> + <title>Media test: defaultMuted</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + <script type="text/javascript" src="manifest.js"></script> + <script type="text/javascript" src="../../html/content/test/reflect.js"></script> +</head> +<body> + <a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=706731">Mozilla Bug 706731</a> + <p id="display"></p> + <div id="content" style="display: none"></div> + <video id='v1'></video><audio id='a1'></audio> + <video id='v2' muted></video><audio id='a2' muted></audio> +<pre id="test"> +<script class="testbody" type="text/javascript"> + reflectBoolean({ + element: document.createElement("video"), + attribute: { content: "muted", idl: "defaultMuted" }, + }); + + reflectBoolean({ + element: document.createElement("audio"), + attribute: { content: "muted", idl: "defaultMuted" }, + }); + + var v1 = document.getElementById('v1'); + var a1 = document.getElementById('a1'); + var v2 = document.getElementById('v2'); + var a2 = document.getElementById('a2'); + + // Check that muted state correspond to the default value. + is(v1.muted, false, "v1.muted should be false by default"); + is(a1.muted, false, "a1.muted should be false by default"); + is(v2.muted, true, "v2.muted should be true by default"); + is(a2.muted, true, "a2.muted should be true by default"); + + // Changing defaultMuted value should not change current muted state. + v1.defaultMuted = true; + a1.defaultMuted = true; + v2.defaultMuted = false; + a2.defaultMuted = false; + + is(v1.muted, false, "v1.muted should not have changed"); + is(a1.muted, false, "a1.muted should not have changed"); + is(v2.muted, true, "v2.muted should not have changed"); + is(a2.muted, true, "a2.muted should not have changed"); + + mediaTestCleanup(); +</script> +</pre> +</body> +</html>
--- a/content/media/wave/WaveReader.cpp +++ b/content/media/wave/WaveReader.cpp @@ -115,17 +115,17 @@ WaveReader::~WaveReader() MOZ_COUNT_DTOR(WaveReader); } nsresult WaveReader::Init(MediaDecoderReader* aCloneDonor) { return NS_OK; } -nsresult WaveReader::ReadMetadata(nsVideoInfo* aInfo, +nsresult WaveReader::ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); bool loaded = LoadRIFFChunk() && LoadFormatChunk() && FindDataOffset(); if (!loaded) { return NS_ERROR_FAILURE; }
--- a/content/media/wave/WaveReader.h +++ b/content/media/wave/WaveReader.h @@ -28,17 +28,17 @@ public: return true; } virtual bool HasVideo() { return false; } - virtual nsresult ReadMetadata(nsVideoInfo* aInfo, + virtual nsresult ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags); virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime); virtual nsresult GetBuffered(nsTimeRanges* aBuffered, int64_t aStartTime); // To seek in a buffered range, we just have to seek the stream. virtual bool IsSeekableInBufferedRanges() { return true; }
--- a/content/media/webm/WebMReader.cpp +++ b/content/media/webm/WebMReader.cpp @@ -177,17 +177,17 @@ nsresult WebMReader::ResetDecode() void WebMReader::Cleanup() { if (mContext) { nestegg_destroy(mContext); mContext = nullptr; } } -nsresult WebMReader::ReadMetadata(nsVideoInfo* aInfo, +nsresult WebMReader::ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags) { NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread."); LOG(PR_LOG_DEBUG, ("Reader [%p] for Decoder [%p]: Reading WebM Metadata: " "init bytes [%d - %d] cues bytes [%d - %d]", this, mDecoder, mInitByteRange.mStart, mInitByteRange.mEnd, @@ -252,17 +252,17 @@ nsresult WebMReader::ReadMetadata(nsVide pictureRect.width = params.width; pictureRect.height = params.height; } // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize displaySize(params.display_width, params.display_height); nsIntSize frameSize(params.width, params.height); - if (!nsVideoInfo::ValidateVideoRegion(frameSize, pictureRect, displaySize)) { + if (!VideoInfo::ValidateVideoRegion(frameSize, pictureRect, displaySize)) { // Video track's frame sizes will overflow. Ignore the video track. continue; } mVideoTrack = track; mHasVideo = true; mInfo.mHasVideo = true;
--- a/content/media/webm/WebMReader.h +++ b/content/media/webm/WebMReader.h @@ -125,17 +125,17 @@ public: return mHasVideo; } // Bug 575140, cannot seek in webm if no cue is present. bool IsSeekableInBufferedRanges() { return false; } - virtual nsresult ReadMetadata(nsVideoInfo* aInfo, + virtual nsresult ReadMetadata(VideoInfo* aInfo, MetadataTags** aTags); virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime); virtual nsresult GetBuffered(nsTimeRanges* aBuffered, int64_t aStartTime); virtual void NotifyDataArrived(const char* aBuffer, uint32_t aLength, int64_t aOffset); // Sets byte range for initialization (EBML); used by DASH. void SetInitByteRange(MediaByteRange &aByteRange) { mInitByteRange = aByteRange;