author | Randell Jesup <rjesup@jesup.org> |
Wed, 02 Apr 2014 17:11:12 -0400 (2014-04-02) | |
changeset 176798 | ac6cbaa47f343b9b3055b335a9fc5d2eee725676 |
parent 176797 | 16cf810656c7406fda903426aca909546e000ba0 |
child 176846 | 57b4d6a3a4015eaad9c315d1ffb6124641bc2d40 |
child 176862 | 21bfbc433bceba0bd0af8483c69b051d1cd0f7b1 |
child 176894 | 90ed8a0beaf7fc2ffd9d5c31f020881d80cbd78e |
push id | 26532 |
push user | rjesup@wgate.com |
push date | Thu, 03 Apr 2014 04:20:48 +0000 (2014-04-03) |
treeherder | mozilla-central@ac6cbaa47f34 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | backout |
milestone | 31.0a1 |
backs out | 965c622894278e1c07b30b56fdc71a1d2c294c1a |
first release with | nightly linux32
ac6cbaa47f34
/
31.0a1
/
20140403030201
/
files
nightly linux64
ac6cbaa47f34
/
31.0a1
/
20140403030201
/
files
nightly mac
ac6cbaa47f34
/
31.0a1
/
20140403030201
/
files
nightly win32
ac6cbaa47f34
/
31.0a1
/
20140403030201
/
files
nightly win64
ac6cbaa47f34
/
31.0a1
/
20140403030201
/
files
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
releases | nightly linux32
31.0a1
/
20140403030201
/
pushlog to previous
nightly linux64
31.0a1
/
20140403030201
/
pushlog to previous
nightly mac
31.0a1
/
20140403030201
/
pushlog to previous
nightly win32
31.0a1
/
20140403030201
/
pushlog to previous
nightly win64
31.0a1
/
20140403030201
/
pushlog to previous
|
deleted file mode 100644 --- a/content/media/AudioMixer.h +++ /dev/null @@ -1,85 +0,0 @@ -/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this file, - * You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef MOZILLA_AUDIOMIXER_H_ -#define MOZILLA_AUDIOMIXER_H_ - -#include "AudioSampleFormat.h" -#include "nsTArray.h" -#include "mozilla/PodOperations.h" - -namespace mozilla { -typedef void(*MixerFunc)(AudioDataValue* aMixedBuffer, - AudioSampleFormat aFormat, - uint32_t aChannels, - uint32_t aFrames); - -/** - * This class mixes multiple streams of audio together to output a single audio - * stream. - * - * AudioMixer::Mix is to be called repeatedly with buffers that have the same - * length, sample rate, sample format and channel count. - * - * When all the tracks have been mixed, calling FinishMixing will call back with - * a buffer containing the mixed audio data. - * - * This class is not thread safe. - */ -class AudioMixer -{ -public: - AudioMixer(MixerFunc aCallback) - : mCallback(aCallback), - mFrames(0), - mChannels(0) - { } - - /* Get the data from the mixer. This is supposed to be called when all the - * tracks have been mixed in. The caller should not hold onto the data. */ - void FinishMixing() { - mCallback(mMixedAudio.Elements(), - AudioSampleTypeToFormat<AudioDataValue>::Format, - mChannels, - mFrames); - PodZero(mMixedAudio.Elements(), mMixedAudio.Length()); - mChannels = mFrames = 0; - } - - /* Add a buffer to the mix. aSamples is interleaved. */ - void Mix(AudioDataValue* aSamples, uint32_t aChannels, uint32_t aFrames) { - if (!mFrames && !mChannels) { - mFrames = aFrames; - mChannels = aChannels; - EnsureCapacityAndSilence(); - } - - MOZ_ASSERT(aFrames == mFrames); - MOZ_ASSERT(aChannels == mChannels); - - for (uint32_t i = 0; i < aFrames * aChannels; i++) { - mMixedAudio[i] += aSamples[i]; - } - } -private: - void EnsureCapacityAndSilence() { - if (mFrames * mChannels > mMixedAudio.Length()) { - mMixedAudio.SetLength(mFrames* mChannels); - } - PodZero(mMixedAudio.Elements(), mMixedAudio.Length()); - } - - /* Function that is called when the mixing is done. */ - MixerFunc mCallback; - /* Number of frames for this mixing block. */ - uint32_t mFrames; - /* Number of channels for this mixing block. */ - uint32_t mChannels; - /* Buffer containing the mixed audio data. */ - nsTArray<AudioDataValue> mMixedAudio; -}; -} - -#endif // MOZILLA_AUDIOMIXER_H_
--- a/content/media/AudioNodeExternalInputStream.cpp +++ b/content/media/AudioNodeExternalInputStream.cpp @@ -102,16 +102,25 @@ ResampleChannelBuffer(SpeexResamplerStat WebAudioUtils::SpeexResamplerProcess(aResampler, aChannel, aInput + processed, &in, output, &out); processed += in; aOutput->SetLength(prevLength + out); } } +class SharedChannelArrayBuffer : public ThreadSharedObject { +public: + SharedChannelArrayBuffer(nsTArray<nsTArray<float> >* aBuffers) + { + mBuffers.SwapElements(*aBuffers); + } + nsTArray<nsTArray<float> > mBuffers; +}; + void AudioNodeExternalInputStream::TrackMapEntry::ResampleChannels(const nsTArray<const void*>& aBuffers, uint32_t aInputDuration, AudioSampleFormat aFormat, float aVolume) { NS_ASSERTION(aBuffers.Length() == mResamplerChannelCount, "Channel count must be correct here"); @@ -164,17 +173,17 @@ AudioNodeExternalInputStream::TrackMapEn } bufferPtrs[i] = resampledBuffers[i].Elements(); NS_ASSERTION(i == 0 || resampledBuffers[i].Length() == resampledBuffers[0].Length(), "Resampler made different decisions for different channels!"); } uint32_t length = resampledBuffers[0].Length(); - nsRefPtr<ThreadSharedObject> buf = new SharedChannelArrayBuffer<float>(&resampledBuffers); + nsRefPtr<ThreadSharedObject> buf = new SharedChannelArrayBuffer(&resampledBuffers); mResampledData.AppendFrames(buf.forget(), bufferPtrs, length); } void AudioNodeExternalInputStream::TrackMapEntry::ResampleInputData(AudioSegment* aSegment) { AudioSegment::ChunkIterator ci(*aSegment); while (!ci.IsEnded()) {
--- a/content/media/AudioSampleFormat.h +++ b/content/media/AudioSampleFormat.h @@ -44,29 +44,18 @@ public: }; template <> class AudioSampleTraits<AUDIO_FORMAT_S16> { public: typedef int16_t Type; }; typedef AudioSampleTraits<AUDIO_OUTPUT_FORMAT>::Type AudioDataValue; -template<typename T> class AudioSampleTypeToFormat; - -template <> class AudioSampleTypeToFormat<float> { -public: - static const AudioSampleFormat Format = AUDIO_FORMAT_FLOAT32; -}; +// Single-sample conversion -template <> class AudioSampleTypeToFormat<short> { -public: - static const AudioSampleFormat Format = AUDIO_FORMAT_S16; -}; - -// Single-sample conversion /* * Use "2^N" conversion since it's simple, fast, "bit transparent", used by * many other libraries and apparently behaves reasonably. * http://blog.bjornroche.com/2009/12/int-float-int-its-jungle-out-there.html * http://blog.bjornroche.com/2009/12/linearity-and-dynamic-range-in-int.html */ inline float AudioSampleToFloat(float aValue)
--- a/content/media/AudioSegment.cpp +++ b/content/media/AudioSegment.cpp @@ -1,20 +1,18 @@ /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "AudioSegment.h" #include "AudioStream.h" -#include "AudioMixer.h" #include "AudioChannelFormat.h" #include "Latency.h" -#include "speex/speex_resampler.h" namespace mozilla { template <class SrcT, class DestT> static void InterleaveAndConvertBuffer(const SrcT** aSourceChannels, int32_t aLength, float aVolume, int32_t aChannels, @@ -106,105 +104,77 @@ DownmixAndInterleave(const nsTArray<cons if (channelData.Length() > aOutputChannels) { AudioChannelsDownMix(channelData, outputChannelBuffers.Elements(), aOutputChannels, aDuration); } InterleaveAndConvertBuffer(outputChannelData.Elements(), AUDIO_FORMAT_FLOAT32, aDuration, aVolume, aOutputChannels, aOutput); } -void AudioSegment::ResampleChunks(SpeexResamplerState* aResampler) -{ - uint32_t inRate, outRate; - - if (mChunks.IsEmpty()) { - return; - } - - speex_resampler_get_rate(aResampler, &inRate, &outRate); - - switch (mChunks[0].mBufferFormat) { - case AUDIO_FORMAT_FLOAT32: - Resample<float>(aResampler, inRate, outRate); - break; - case AUDIO_FORMAT_S16: - Resample<int16_t>(aResampler, inRate, outRate); - break; - default: - MOZ_ASSERT(false); - break; - } -} - void -AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer) +AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput) { uint32_t outputChannels = aOutput->GetChannels(); nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf; nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData; - if (!GetDuration()) { - return; - } - - uint32_t outBufferLength = GetDuration() * outputChannels; - buf.SetLength(outBufferLength); - - // Offset in the buffer that will end up sent to the AudioStream. - uint32_t offset = 0; - for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { AudioChunk& c = *ci; - uint32_t frames = c.mDuration; + TrackTicks offset = 0; + while (offset < c.mDuration) { + TrackTicks durationTicks = + std::min<TrackTicks>(c.mDuration - offset, AUDIO_PROCESSING_FRAMES); + if (uint64_t(outputChannels)*durationTicks > INT32_MAX || offset > INT32_MAX) { + NS_ERROR("Buffer overflow"); + return; + } + + uint32_t duration = uint32_t(durationTicks); - // If we have written data in the past, or we have real (non-silent) data - // to write, we can proceed. Otherwise, it means we just started the - // AudioStream, and we don't have real data to write to it (just silence). - // To avoid overbuffering in the AudioStream, we simply drop the silence, - // here. The stream will underrun and output silence anyways. - if (c.mBuffer || aOutput->GetWritten()) { - if (c.mBuffer) { - channelData.SetLength(c.mChannelData.Length()); - for (uint32_t i = 0; i < channelData.Length(); ++i) { - channelData[i] = c.mChannelData[i]; - } + // If we have written data in the past, or we have real (non-silent) data + // to write, we can proceed. Otherwise, it means we just started the + // AudioStream, and we don't have real data to write to it (just silence). + // To avoid overbuffering in the AudioStream, we simply drop the silence, + // here. The stream will underrun and output silence anyways. + if (c.mBuffer || aOutput->GetWritten()) { + buf.SetLength(outputChannels*duration); + if (c.mBuffer) { + channelData.SetLength(c.mChannelData.Length()); + for (uint32_t i = 0; i < channelData.Length(); ++i) { + channelData[i] = + AddAudioSampleOffset(c.mChannelData[i], c.mBufferFormat, int32_t(offset)); + } - if (channelData.Length() < outputChannels) { - // Up-mix. Note that this might actually make channelData have more - // than outputChannels temporarily. - AudioChannelsUpMix(&channelData, outputChannels, gZeroChannel); - } + if (channelData.Length() < outputChannels) { + // Up-mix. Note that this might actually make channelData have more + // than outputChannels temporarily. + AudioChannelsUpMix(&channelData, outputChannels, gZeroChannel); + } - if (channelData.Length() > outputChannels) { - // Down-mix. - DownmixAndInterleave(channelData, c.mBufferFormat, frames, - c.mVolume, outputChannels, buf.Elements() + offset); + if (channelData.Length() > outputChannels) { + // Down-mix. + DownmixAndInterleave(channelData, c.mBufferFormat, duration, + c.mVolume, outputChannels, buf.Elements()); + } else { + InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat, + duration, c.mVolume, + outputChannels, + buf.Elements()); + } } else { - InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat, - frames, c.mVolume, - outputChannels, - buf.Elements() + offset); + // Assumes that a bit pattern of zeroes == 0.0f + memset(buf.Elements(), 0, buf.Length()*sizeof(AudioDataValue)); } - } else { - // Assumes that a bit pattern of zeroes == 0.0f - memset(buf.Elements() + offset, 0, outputChannels * frames * sizeof(AudioDataValue)); + aOutput->Write(buf.Elements(), int32_t(duration), &(c.mTimeStamp)); } + if(!c.mTimeStamp.IsNull()) { + TimeStamp now = TimeStamp::Now(); + // would be more efficient to c.mTimeStamp to ms on create time then pass here + LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID, + (now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp); + } + offset += duration; } - - offset += frames * outputChannels; - - if (!c.mTimeStamp.IsNull()) { - TimeStamp now = TimeStamp::Now(); - // would be more efficient to c.mTimeStamp to ms on create time then pass here - LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID, - (now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp); - } - } - - aOutput->Write(buf.Elements(), GetDuration(), &(mChunks[mChunks.Length() - 1].mTimeStamp)); - - if (aMixer) { - aMixer->Mix(buf.Elements(), outputChannels, GetDuration()); } aOutput->Start(); } }
--- a/content/media/AudioSegment.h +++ b/content/media/AudioSegment.h @@ -4,35 +4,23 @@ * You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef MOZILLA_AUDIOSEGMENT_H_ #define MOZILLA_AUDIOSEGMENT_H_ #include "MediaSegment.h" #include "AudioSampleFormat.h" #include "SharedBuffer.h" -#include "WebAudioUtils.h" #ifdef MOZILLA_INTERNAL_API #include "mozilla/TimeStamp.h" #endif namespace mozilla { -template<typename T> -class SharedChannelArrayBuffer : public ThreadSharedObject { -public: - SharedChannelArrayBuffer(nsTArray<nsTArray<T>>* aBuffers) - { - mBuffers.SwapElements(*aBuffers); - } - nsTArray<nsTArray<T>> mBuffers; -}; - class AudioStream; -class AudioMixer; /** * For auto-arrays etc, guess this as the common number of channels. */ const int GUESS_AUDIO_CHANNELS = 2; // We ensure that the graph advances in steps that are multiples of the Web // Audio block size @@ -118,64 +106,26 @@ struct AudioChunk { nsTArray<const void*> mChannelData; // one pointer per channel; empty if and only if mBuffer is null float mVolume; // volume multiplier to apply (1.0f if mBuffer is nonnull) SampleFormat mBufferFormat; // format of frames in mBuffer (only meaningful if mBuffer is nonnull) #ifdef MOZILLA_INTERNAL_API mozilla::TimeStamp mTimeStamp; // time at which this has been fetched from the MediaEngine #endif }; - /** * A list of audio samples consisting of a sequence of slices of SharedBuffers. * The audio rate is determined by the track, not stored in this class. */ class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> { public: typedef mozilla::AudioSampleFormat SampleFormat; AudioSegment() : MediaSegmentBase<AudioSegment, AudioChunk>(AUDIO) {} - // Resample the whole segment in place. - template<typename T> - void Resample(SpeexResamplerState* aResampler, uint32_t aInRate, uint32_t aOutRate) - { - mDuration = 0; - - for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { - nsAutoTArray<nsTArray<T>, GUESS_AUDIO_CHANNELS> output; - nsAutoTArray<const T*, GUESS_AUDIO_CHANNELS> bufferPtrs; - AudioChunk& c = *ci; - uint32_t channels = c.mChannelData.Length(); - output.SetLength(channels); - bufferPtrs.SetLength(channels); - uint32_t inFrames = c.mDuration, - outFrames = c.mDuration * aOutRate / aInRate; - for (uint32_t i = 0; i < channels; i++) { - const T* in = static_cast<const T*>(c.mChannelData[i]); - T* out = output[i].AppendElements(outFrames); - - dom::WebAudioUtils::SpeexResamplerProcess(aResampler, i, - in, &inFrames, - out, &outFrames); - - bufferPtrs[i] = out; - output[i].SetLength(outFrames); - } - c.mBuffer = new mozilla::SharedChannelArrayBuffer<T>(&output); - for (uint32_t i = 0; i < channels; i++) { - c.mChannelData[i] = bufferPtrs[i]; - } - c.mDuration = outFrames; - mDuration += c.mDuration; - } - } - - void ResampleChunks(SpeexResamplerState* aResampler); - void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer, const nsTArray<const float*>& aChannelData, int32_t aDuration) { AudioChunk* chunk = AppendChunk(aDuration); chunk->mBuffer = aBuffer; for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) { chunk->mChannelData.AppendElement(aChannelData[channel]); @@ -211,22 +161,16 @@ public: chunk->mVolume = aChunk->mVolume; chunk->mBufferFormat = aChunk->mBufferFormat; #ifdef MOZILLA_INTERNAL_API chunk->mTimeStamp = TimeStamp::Now(); #endif return chunk; } void ApplyVolume(float aVolume); - void WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer = nullptr); - - int ChannelCount() { - NS_WARN_IF_FALSE(!mChunks.IsEmpty(), - "Cannot query channel count on a AudioSegment with no chunks."); - return mChunks.IsEmpty() ? 0 : mChunks[0].mChannelData.Length(); - } + void WriteTo(uint64_t aID, AudioStream* aOutput); static Type StaticType() { return AUDIO; } }; } #endif /* MOZILLA_AUDIOSEGMENT_H_ */
--- a/content/media/MediaSegment.h +++ b/content/media/MediaSegment.h @@ -262,18 +262,19 @@ protected: aSource->mChunks.RemoveElementAt(0); } mChunks.MoveElementsFrom(aSource->mChunks); } void AppendSliceInternal(const MediaSegmentBase<C, Chunk>& aSource, TrackTicks aStart, TrackTicks aEnd) { - MOZ_ASSERT(aStart <= aEnd, "Endpoints inverted"); - MOZ_ASSERT(aStart >= 0 && aEnd <= aSource.mDuration, "Slice out of range"); + NS_ASSERTION(aStart <= aEnd, "Endpoints inverted"); + NS_WARN_IF_FALSE(aStart >= 0 && aEnd <= aSource.mDuration, + "Slice out of range"); mDuration += aEnd - aStart; TrackTicks offset = 0; for (uint32_t i = 0; i < aSource.mChunks.Length() && offset < aEnd; ++i) { const Chunk& c = aSource.mChunks[i]; TrackTicks start = std::max(aStart, offset); TrackTicks nextOffset = offset + c.GetDuration(); TrackTicks end = std::min(aEnd, nextOffset); if (start < end) {
--- a/content/media/MediaStreamGraph.cpp +++ b/content/media/MediaStreamGraph.cpp @@ -21,18 +21,16 @@ #include "AudioChannelCommon.h" #include "AudioNodeEngine.h" #include "AudioNodeStream.h" #include "AudioNodeExternalInputStream.h" #include <algorithm> #include "DOMMediaStream.h" #include "GeckoProfiler.h" #include "mozilla/unused.h" -#include "speex/speex_resampler.h" -#include "AudioOutputObserver.h" using namespace mozilla::layers; using namespace mozilla::dom; using namespace mozilla::gfx; namespace mozilla { #ifdef PR_LOGGING @@ -169,26 +167,25 @@ MediaStreamGraphImpl::ExtractPendingInpu finished = aStream->mUpdateFinished; for (int32_t i = aStream->mUpdateTracks.Length() - 1; i >= 0; --i) { SourceMediaStream::TrackData* data = &aStream->mUpdateTracks[i]; aStream->ApplyTrackDisabling(data->mID, data->mData); for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) { MediaStreamListener* l = aStream->mListeners[j]; TrackTicks offset = (data->mCommands & SourceMediaStream::TRACK_CREATE) ? data->mStart : aStream->mBuffer.FindTrack(data->mID)->GetSegment()->GetDuration(); - l->NotifyQueuedTrackChanges(this, data->mID, data->mOutputRate, + l->NotifyQueuedTrackChanges(this, data->mID, data->mRate, offset, data->mCommands, *data->mData); } if (data->mCommands & SourceMediaStream::TRACK_CREATE) { MediaSegment* segment = data->mData.forget(); STREAM_LOG(PR_LOG_DEBUG, ("SourceMediaStream %p creating track %d, rate %d, start %lld, initial end %lld", - aStream, data->mID, data->mOutputRate, int64_t(data->mStart), + aStream, data->mID, data->mRate, int64_t(data->mStart), int64_t(segment->GetDuration()))); - - aStream->mBuffer.AddTrack(data->mID, data->mOutputRate, data->mStart, segment); + aStream->mBuffer.AddTrack(data->mID, data->mRate, data->mStart, segment); // The track has taken ownership of data->mData, so let's replace // data->mData with an empty clone. data->mData = segment->CreateEmptyClone(); data->mCommands &= ~SourceMediaStream::TRACK_CREATE; } else if (data->mData->GetDuration() > 0) { MediaSegment* dest = aStream->mBuffer.FindTrack(data->mID)->GetSegment(); STREAM_LOG(PR_LOG_DEBUG+1, ("SourceMediaStream %p track %d, advancing end from %lld to %lld", aStream, data->mID, @@ -330,17 +327,17 @@ MediaStreamGraphImpl::GetAudioPosition(M if (aStream->mAudioOutputStreams.IsEmpty()) { return mCurrentTime; } int64_t positionInFrames = aStream->mAudioOutputStreams[0].mStream->GetPositionInFrames(); if (positionInFrames < 0) { return mCurrentTime; } return aStream->mAudioOutputStreams[0].mAudioPlaybackStartTime + - TicksToTimeRoundDown(IdealAudioRate(), + TicksToTimeRoundDown(aStream->mAudioOutputStreams[0].mStream->GetRate(), positionInFrames); } void MediaStreamGraphImpl::UpdateCurrentTime() { GraphTime prevCurrentTime, nextCurrentTime; if (mRealtime) { @@ -573,63 +570,37 @@ MediaStreamGraphImpl::UpdateStreamOrderF aStack->popLast(); stream->mIsOnOrderingStack = false; } stream->mHasBeenOrdered = true; *mStreams.AppendElement() = stream.forget(); } -static void AudioMixerCallback(AudioDataValue* aMixedBuffer, - AudioSampleFormat aFormat, - uint32_t aChannels, - uint32_t aFrames) -{ - // Need an api to register mixer callbacks, bug 989921 - if (aFrames > 0 && aChannels > 0) { - // XXX need Observer base class and registration API - if (gFarendObserver) { - gFarendObserver->InsertFarEnd(aMixedBuffer, aFrames, false, - IdealAudioRate(), aChannels, aFormat); - } - } -} - void MediaStreamGraphImpl::UpdateStreamOrder() { mOldStreams.SwapElements(mStreams); mStreams.ClearAndRetainStorage(); - bool shouldMix = false; for (uint32_t i = 0; i < mOldStreams.Length(); ++i) { MediaStream* stream = mOldStreams[i]; stream->mHasBeenOrdered = false; stream->mIsConsumed = false; stream->mIsOnOrderingStack = false; stream->mInBlockingSet = false; - if (stream->AsSourceStream() && - stream->AsSourceStream()->NeedsMixing()) { - shouldMix = true; - } ProcessedMediaStream* ps = stream->AsProcessedStream(); if (ps) { ps->mInCycle = false; AudioNodeStream* ns = ps->AsAudioNodeStream(); if (ns) { ns->Unmute(); } } } - if (!mMixer && shouldMix) { - mMixer = new AudioMixer(AudioMixerCallback); - } else if (mMixer && !shouldMix) { - mMixer = nullptr; - } - mozilla::LinkedList<MediaStream> stack; for (uint32_t i = 0; i < mOldStreams.Length(); ++i) { nsRefPtr<MediaStream>& s = mOldStreams[i]; if (s->IsIntrinsicallyConsumed()) { MarkConsumed(s); } if (!s->mHasBeenOrdered) { UpdateStreamOrderForStream(&stack, s.forget()); @@ -832,21 +803,20 @@ MediaStreamGraphImpl::CreateOrDestroyAud // XXX allocating a AudioStream could be slow so we're going to have to do // something here ... preallocation, async allocation, multiplexing onto a single // stream ... MediaStream::AudioOutputStream* audioOutputStream = aStream->mAudioOutputStreams.AppendElement(); audioOutputStream->mAudioPlaybackStartTime = aAudioOutputStartTime; audioOutputStream->mBlockedAudioTime = 0; - audioOutputStream->mLastTickWritten = 0; audioOutputStream->mStream = new AudioStream(); // XXX for now, allocate stereo output. But we need to fix this to // match the system's ideal channel configuration. - audioOutputStream->mStream->Init(2, IdealAudioRate(), AUDIO_CHANNEL_NORMAL, AudioStream::LowLatency); + audioOutputStream->mStream->Init(2, tracks->GetRate(), AUDIO_CHANNEL_NORMAL, AudioStream::LowLatency); audioOutputStream->mTrackID = tracks->GetID(); LogLatency(AsyncLatencyLogger::AudioStreamCreate, reinterpret_cast<uint64_t>(aStream), reinterpret_cast<int64_t>(audioOutputStream->mStream.get())); } } } @@ -854,124 +824,92 @@ MediaStreamGraphImpl::CreateOrDestroyAud for (int32_t i = audioOutputStreamsFound.Length() - 1; i >= 0; --i) { if (!audioOutputStreamsFound[i]) { aStream->mAudioOutputStreams[i].mStream->Shutdown(); aStream->mAudioOutputStreams.RemoveElementAt(i); } } } -TrackTicks +void MediaStreamGraphImpl::PlayAudio(MediaStream* aStream, GraphTime aFrom, GraphTime aTo) { MOZ_ASSERT(mRealtime, "Should only attempt to play audio in realtime mode"); - TrackTicks ticksWritten = 0; - // We compute the number of needed ticks by converting a difference of graph - // time rather than by substracting two converted stream time to ensure that - // the rounding between {Graph,Stream}Time and track ticks is not dependant - // on the absolute value of the {Graph,Stream}Time, and so that number of - // ticks to play is the same for each cycle. - TrackTicks ticksNeeded = TimeToTicksRoundDown(IdealAudioRate(), aTo) - TimeToTicksRoundDown(IdealAudioRate(), aFrom); - if (aStream->mAudioOutputStreams.IsEmpty()) { - return 0; + return; } // When we're playing multiple copies of this stream at the same time, they're // perfectly correlated so adding volumes is the right thing to do. float volume = 0.0f; for (uint32_t i = 0; i < aStream->mAudioOutputs.Length(); ++i) { volume += aStream->mAudioOutputs[i].mVolume; } for (uint32_t i = 0; i < aStream->mAudioOutputStreams.Length(); ++i) { MediaStream::AudioOutputStream& audioOutput = aStream->mAudioOutputStreams[i]; StreamBuffer::Track* track = aStream->mBuffer.FindTrack(audioOutput.mTrackID); AudioSegment* audio = track->Get<AudioSegment>(); - AudioSegment output; - MOZ_ASSERT(track->GetRate() == IdealAudioRate()); - - // offset and audioOutput.mLastTickWritten can differ by at most one sample, - // because of the rounding issue. We track that to ensure we don't skip a - // sample, or play a sample twice. - TrackTicks offset = track->TimeToTicksRoundDown(GraphTimeToStreamTime(aStream, aFrom)); - if (!audioOutput.mLastTickWritten) { - audioOutput.mLastTickWritten = offset; - } - if (audioOutput.mLastTickWritten != offset) { - // If there is a global underrun of the MSG, this property won't hold, and - // we reset the sample count tracking. - if (std::abs(audioOutput.mLastTickWritten - offset) != 1) { - audioOutput.mLastTickWritten = offset; - } else { - offset = audioOutput.mLastTickWritten; - } - } // We don't update aStream->mBufferStartTime here to account for // time spent blocked. Instead, we'll update it in UpdateCurrentTime after the // blocked period has completed. But we do need to make sure we play from the // right offsets in the stream buffer, even if we've already written silence for // some amount of blocked time after the current time. GraphTime t = aFrom; - while (ticksNeeded) { + while (t < aTo) { GraphTime end; bool blocked = aStream->mBlocked.GetAt(t, &end); end = std::min(end, aTo); - // Check how many ticks of sound we can provide if we are blocked some - // time in the middle of this cycle. - TrackTicks toWrite = 0; - if (end >= aTo) { - toWrite = ticksNeeded; - } else { - toWrite = TimeToTicksRoundDown(IdealAudioRate(), end - aFrom); - } + AudioSegment output; + if (blocked) { + // Track total blocked time in aStream->mBlockedAudioTime so that + // the amount of silent samples we've inserted for blocking never gets + // more than one sample away from the ideal amount. + TrackTicks startTicks = + TimeToTicksRoundDown(track->GetRate(), audioOutput.mBlockedAudioTime); + audioOutput.mBlockedAudioTime += end - t; + TrackTicks endTicks = + TimeToTicksRoundDown(track->GetRate(), audioOutput.mBlockedAudioTime); - if (blocked) { - output.InsertNullDataAtStart(toWrite); - STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing %ld blocking-silence samples for %f to %f (%ld to %ld)\n", - aStream, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end), - offset, offset + toWrite)); - ticksNeeded -= toWrite; + output.InsertNullDataAtStart(endTicks - startTicks); + STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing blocking-silence samples for %f to %f", + aStream, MediaTimeToSeconds(t), MediaTimeToSeconds(end))); } else { - TrackTicks endTicksNeeded = offset + toWrite; - TrackTicks endTicksAvailable = audio->GetDuration(); - if (endTicksNeeded <= endTicksAvailable) { - output.AppendSlice(*audio, offset, endTicksNeeded); - } else { - MOZ_ASSERT(track->IsEnded(), "Not enough data, and track not ended."); - // If we are at the end of the track, maybe write the remaining - // samples, and pad with/output silence. - if (endTicksNeeded > endTicksAvailable && - offset < endTicksAvailable) { - output.AppendSlice(*audio, offset, endTicksAvailable); - ticksNeeded -= endTicksAvailable - offset; - toWrite -= endTicksAvailable - offset; - } - output.AppendNullData(toWrite); + TrackTicks startTicks = + track->TimeToTicksRoundDown(GraphTimeToStreamTime(aStream, t)); + TrackTicks endTicks = + track->TimeToTicksRoundDown(GraphTimeToStreamTime(aStream, end)); + + // If startTicks is before the track start, then that part of 'audio' + // will just be silence, which is fine here. But if endTicks is after + // the track end, then 'audio' won't be long enough, so we'll need + // to explicitly play silence. + TrackTicks sliceEnd = std::min(endTicks, audio->GetDuration()); + if (sliceEnd > startTicks) { + output.AppendSlice(*audio, startTicks, sliceEnd); } + // Play silence where the track has ended + output.AppendNullData(endTicks - sliceEnd); + NS_ASSERTION(endTicks == sliceEnd || track->IsEnded(), + "Ran out of data but track not ended?"); output.ApplyVolume(volume); - STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing %ld samples for %f to %f (samples %ld to %ld)\n", - aStream, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end), - offset, endTicksNeeded)); - ticksNeeded -= toWrite; + STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing samples for %f to %f (samples %lld to %lld)", + aStream, MediaTimeToSeconds(t), MediaTimeToSeconds(end), + startTicks, endTicks)); } + // Need unique id for stream & track - and we want it to match the inserter + output.WriteTo(LATENCY_STREAM_ID(aStream, track->GetID()), + audioOutput.mStream); t = end; - offset += toWrite; - audioOutput.mLastTickWritten += toWrite; } - - // Need unique id for stream & track - and we want it to match the inserter - output.WriteTo(LATENCY_STREAM_ID(aStream, track->GetID()), - audioOutput.mStream, mMixer); } - return ticksWritten; } static void SetImageToBlackPixel(PlanarYCbCrImage* aImage) { uint8_t blackPixel[] = { 0x10, 0x80, 0x80 }; PlanarYCbCrData data; @@ -1296,19 +1234,16 @@ MediaStreamGraphImpl::RunThread() // Figure out which streams are blocked and when. GraphTime prevComputedTime = mStateComputedTime; RecomputeBlocking(endBlockingDecisions); // Play stream contents. bool allBlockedForever = true; // True when we've done ProcessInput for all processed streams. bool doneAllProducing = false; - // This is the number of frame that are written to the AudioStreams, for - // this cycle. - TrackTicks ticksPlayed = 0; // Figure out what each stream wants to do for (uint32_t i = 0; i < mStreams.Length(); ++i) { MediaStream* stream = mStreams[i]; if (!doneAllProducing) { ProcessedMediaStream* ps = stream->AsProcessedStream(); if (ps) { AudioNodeStream* n = stream->AsAudioNodeStream(); if (n) { @@ -1335,39 +1270,28 @@ MediaStreamGraphImpl::RunThread() "Stream did not produce enough data"); } } } NotifyHasCurrentData(stream); if (mRealtime) { // Only playback audio and video in real-time mode CreateOrDestroyAudioStreams(prevComputedTime, stream); - TrackTicks ticksPlayedForThisStream = PlayAudio(stream, prevComputedTime, mStateComputedTime); - if (!ticksPlayed) { - ticksPlayed = ticksPlayedForThisStream; - } else { - MOZ_ASSERT(!ticksPlayedForThisStream || ticksPlayedForThisStream == ticksPlayed, - "Each stream should have the same number of frame."); - } + PlayAudio(stream, prevComputedTime, mStateComputedTime); PlayVideo(stream); } SourceMediaStream* is = stream->AsSourceStream(); if (is) { UpdateBufferSufficiencyState(is); } GraphTime end; if (!stream->mBlocked.GetAt(mCurrentTime, &end) || end < GRAPH_TIME_MAX) { allBlockedForever = false; } } - - if (mMixer) { - mMixer->FinishMixing(); - } - if (ensureNextIteration || !allBlockedForever) { EnsureNextIteration(); } // Send updates to the main thread and wait for the next control loop // iteration. { MonitorAutoLock lock(mMonitor); @@ -1463,28 +1387,35 @@ MediaStreamGraphImpl::ForceShutDown() STREAM_LOG(PR_LOG_DEBUG, ("MediaStreamGraph %p ForceShutdown", this)); { MonitorAutoLock lock(mMonitor); mForceShutDown = true; EnsureImmediateWakeUpLocked(lock); } } +void +MediaStreamGraphImpl::Init() +{ + AudioStream::InitPreferredSampleRate(); +} + namespace { class MediaStreamGraphInitThreadRunnable : public nsRunnable { public: explicit MediaStreamGraphInitThreadRunnable(MediaStreamGraphImpl* aGraph) : mGraph(aGraph) { } NS_IMETHOD Run() { char aLocal; profiler_register_thread("MediaStreamGraph", &aLocal); + mGraph->Init(); mGraph->RunThread(); return NS_OK; } private: MediaStreamGraphImpl* mGraph; }; class MediaStreamGraphThreadRunnable : public nsRunnable { @@ -1846,17 +1777,17 @@ MediaStream::GetProcessingGraphUpdateInd StreamBuffer::Track* MediaStream::EnsureTrack(TrackID aTrackId, TrackRate aSampleRate) { StreamBuffer::Track* track = mBuffer.FindTrack(aTrackId); if (!track) { nsAutoPtr<MediaSegment> segment(new AudioSegment()); for (uint32_t j = 0; j < mListeners.Length(); ++j) { MediaStreamListener* l = mListeners[j]; - l->NotifyQueuedTrackChanges(Graph(), aTrackId, IdealAudioRate(), 0, + l->NotifyQueuedTrackChanges(Graph(), aTrackId, aSampleRate, 0, MediaStreamListener::TRACK_EVENT_CREATED, *segment); } track = &mBuffer.AddTrack(aTrackId, aSampleRate, 0, segment.forget()); } return track; } @@ -2193,51 +2124,26 @@ SourceMediaStream::SetPullEnabled(bool a void SourceMediaStream::AddTrack(TrackID aID, TrackRate aRate, TrackTicks aStart, MediaSegment* aSegment) { MutexAutoLock lock(mMutex); TrackData* data = mUpdateTracks.AppendElement(); data->mID = aID; - data->mInputRate = aRate; - // We resample all audio input tracks to the sample rate of the audio mixer. - data->mOutputRate = aSegment->GetType() == MediaSegment::AUDIO ? - IdealAudioRate() : aRate; + data->mRate = aRate; data->mStart = aStart; data->mCommands = TRACK_CREATE; data->mData = aSegment; data->mHaveEnough = false; if (!mDestroyed) { GraphImpl()->EnsureNextIteration(); } } -void -SourceMediaStream::ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSegment* aSegment) -{ - if (aSegment->GetType() != MediaSegment::AUDIO || - aTrackData->mInputRate == IdealAudioRate()) { - return; - } - AudioSegment* segment = static_cast<AudioSegment*>(aSegment); - if (!aTrackData->mResampler) { - int channels = segment->ChannelCount(); - SpeexResamplerState* state = speex_resampler_init(channels, - aTrackData->mInputRate, - IdealAudioRate(), - SPEEX_RESAMPLER_QUALITY_DEFAULT, - nullptr); - if (state) { - aTrackData->mResampler.own(state); - } - } - segment->ResampleChunks(aTrackData->mResampler); -} - bool SourceMediaStream::AppendToTrack(TrackID aID, MediaSegment* aSegment, MediaSegment *aRawSegment) { MutexAutoLock lock(mMutex); // ::EndAllTrackAndFinished() can end these before the sources notice bool appended = false; if (!mFinished) { TrackData *track = FindDataForTrack(aID); @@ -2247,18 +2153,16 @@ SourceMediaStream::AppendToTrack(TrackID // 0-10ms of delay before data gets to direct listeners. // Indirect listeners (via subsequent TrackUnion nodes) are synced to // playout time, and so can be delayed by buffering. // Apply track disabling before notifying any consumers directly // or inserting into the graph ApplyTrackDisabling(aID, aSegment, aRawSegment); - ResampleAudioToGraphSampleRate(track, aSegment); - // Must notify first, since AppendFrom() will empty out aSegment NotifyDirectConsumers(track, aRawSegment ? aRawSegment : aSegment); track->mData->AppendFrom(aSegment); // note: aSegment is now dead appended = true; } else { aSegment->Clear(); } } @@ -2273,17 +2177,17 @@ SourceMediaStream::NotifyDirectConsumers MediaSegment *aSegment) { // Call with mMutex locked MOZ_ASSERT(aTrack); for (uint32_t j = 0; j < mDirectListeners.Length(); ++j) { MediaStreamDirectListener* l = mDirectListeners[j]; TrackTicks offset = 0; // FIX! need a separate TrackTicks.... or the end of the internal buffer - l->NotifyRealtimeData(static_cast<MediaStreamGraph*>(GraphImpl()), aTrack->mID, aTrack->mOutputRate, + l->NotifyRealtimeData(static_cast<MediaStreamGraph*>(GraphImpl()), aTrack->mID, aTrack->mRate, offset, aTrack->mCommands, *aSegment); } } void SourceMediaStream::AddDirectListener(MediaStreamDirectListener* aListener) { MutexAutoLock lock(mMutex); @@ -2387,30 +2291,16 @@ SourceMediaStream::GetBufferedTicks(Trac track->TimeToTicksRoundDown( GraphTimeToStreamTime(GraphImpl()->mStateComputedTime)); } } return 0; } void -SourceMediaStream::RegisterForAudioMixing() -{ - MutexAutoLock lock(mMutex); - mNeedsMixing = true; -} - -bool -SourceMediaStream::NeedsMixing() -{ - MutexAutoLock lock(mMutex); - return mNeedsMixing; -} - -void MediaInputPort::Init() { STREAM_LOG(PR_LOG_DEBUG, ("Adding MediaInputPort %p (from %p to %p) to the graph", this, mSource, mDest)); mSource->AddConsumer(this); mDest->AddInput(this); // mPortCount decremented via MediaInputPort::Destroy's message ++mDest->GraphImpl()->mPortCount; @@ -2584,17 +2474,16 @@ MediaStreamGraphImpl::MediaStreamGraphIm , mForceShutDown(false) , mPostedRunInStableStateEvent(false) , mDetectedNotRunning(false) , mPostedRunInStableState(false) , mRealtime(aRealtime) , mNonRealtimeProcessing(false) , mStreamOrderDirty(false) , mLatencyLog(AsyncLatencyLogger::Get()) - , mMixer(nullptr) { #ifdef PR_LOGGING if (!gMediaStreamGraphLog) { gMediaStreamGraphLog = PR_NewLogModule("MediaStreamGraph"); } #endif mCurrentTimeStamp = mInitialTimeStamp = mLastMainThreadUpdate = TimeStamp::Now(); @@ -2627,18 +2516,16 @@ MediaStreamGraph::GetInstance() if (!gGraph) { if (!gShutdownObserverRegistered) { gShutdownObserverRegistered = true; nsContentUtils::RegisterShutdownObserver(new MediaStreamGraphShutdownObserver()); } gGraph = new MediaStreamGraphImpl(true); STREAM_LOG(PR_LOG_DEBUG, ("Starting up MediaStreamGraph %p", gGraph)); - - AudioStream::InitPreferredSampleRate(); } return gGraph; } MediaStreamGraph* MediaStreamGraph::CreateNonRealtimeInstance() {
--- a/content/media/MediaStreamGraph.h +++ b/content/media/MediaStreamGraph.h @@ -11,29 +11,19 @@ #include "AudioStream.h" #include "nsTArray.h" #include "nsIRunnable.h" #include "StreamBuffer.h" #include "TimeVarying.h" #include "VideoFrameContainer.h" #include "VideoSegment.h" #include "MainThreadUtils.h" -#include "nsAutoRef.h" -#include "speex/speex_resampler.h" -#include "AudioMixer.h" class nsIRunnable; -template <> -class nsAutoRefTraits<SpeexResamplerState> : public nsPointerRefTraits<SpeexResamplerState> -{ - public: - static void Release(SpeexResamplerState* aState) { speex_resampler_destroy(aState); } -}; - namespace mozilla { class DOMMediaStream; #ifdef PR_LOGGING extern PRLogModuleInfo* gMediaStreamGraphLog; #endif @@ -568,18 +558,16 @@ protected: // audio track. struct AudioOutputStream { // When we started audio playback for this track. // Add mStream->GetPosition() to find the current audio playback position. GraphTime mAudioPlaybackStartTime; // Amount of time that we've wanted to play silence because of the stream // blocking. MediaTime mBlockedAudioTime; - // Last tick written to the audio output. - TrackTicks mLastTickWritten; nsAutoPtr<AudioStream> mStream; TrackID mTrackID; }; nsTArray<AudioOutputStream> mAudioOutputStreams; /** * When true, this means the stream will be finished once all * buffered data has been consumed. @@ -669,19 +657,16 @@ public: /** * Add a new track to the stream starting at the given base time (which * must be greater than or equal to the last time passed to * AdvanceKnownTracksTime). Takes ownership of aSegment. aSegment should * contain data starting after aStart. */ void AddTrack(TrackID aID, TrackRate aRate, TrackTicks aStart, MediaSegment* aSegment); - - struct TrackData; - void ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSegment* aSegment); /** * Append media data to a track. Ownership of aSegment remains with the caller, * but aSegment is emptied. * Returns false if the data was not appended because no such track exists * or the stream was already finished. */ bool AppendToTrack(TrackID aID, MediaSegment* aSegment, MediaSegment *aRawSegment = nullptr); /** @@ -762,37 +747,28 @@ public: TRACK_CREATE = MediaStreamListener::TRACK_EVENT_CREATED, TRACK_END = MediaStreamListener::TRACK_EVENT_ENDED }; /** * Data for each track that hasn't ended. */ struct TrackData { TrackID mID; - // Sample rate of the input data. - TrackRate mInputRate; - // Sample rate of the output data, always equal to IdealAudioRate() - TrackRate mOutputRate; - // Resampler if the rate of the input track does not match the - // MediaStreamGraph's. - nsAutoRef<SpeexResamplerState> mResampler; + TrackRate mRate; TrackTicks mStart; // Each time the track updates are flushed to the media graph thread, // this is cleared. uint32_t mCommands; // Each time the track updates are flushed to the media graph thread, // the segment buffer is emptied. nsAutoPtr<MediaSegment> mData; nsTArray<ThreadAndRunnable> mDispatchWhenNotEnough; bool mHaveEnough; }; - void RegisterForAudioMixing(); - bool NeedsMixing(); - protected: TrackData* FindDataForTrack(TrackID aID) { for (uint32_t i = 0; i < mUpdateTracks.Length(); ++i) { if (mUpdateTracks[i].mID == aID) { return &mUpdateTracks[i]; } } @@ -816,17 +792,16 @@ protected: Mutex mMutex; // protected by mMutex StreamTime mUpdateKnownTracksTime; nsTArray<TrackData> mUpdateTracks; nsTArray<nsRefPtr<MediaStreamDirectListener> > mDirectListeners; bool mPullEnabled; bool mUpdateFinished; bool mDestroyed; - bool mNeedsMixing; }; /** * Represents a connection between a ProcessedMediaStream and one of its * input streams. * We make these refcounted so that stream-related messages with MediaInputPort* * pointers can be sent to the main thread safely. * @@ -1023,17 +998,17 @@ protected: // The list of all inputs that are currently enabled or waiting to be enabled. nsTArray<MediaInputPort*> mInputs; bool mAutofinish; // True if and only if this stream is in a cycle. // Updated by MediaStreamGraphImpl::UpdateStreamOrder. bool mInCycle; }; -// Returns ideal audio rate for processing. +// Returns ideal audio rate for processing inline TrackRate IdealAudioRate() { return AudioStream::PreferredSampleRate(); } /** * Initially, at least, we will have a singleton MediaStreamGraph per * process. Each OfflineAudioContext object creates its own MediaStreamGraph * object too. */ class MediaStreamGraph {
--- a/content/media/MediaStreamGraphImpl.h +++ b/content/media/MediaStreamGraphImpl.h @@ -8,25 +8,22 @@ #include "MediaStreamGraph.h" #include "mozilla/Monitor.h" #include "mozilla/TimeStamp.h" #include "nsIThread.h" #include "nsIRunnable.h" #include "Latency.h" -#include "mozilla/WeakPtr.h" namespace mozilla { template <typename T> class LinkedList; -class AudioMixer; - /** * Assume we can run an iteration of the MediaStreamGraph loop in this much time * or less. * We try to run the control loop at this rate. */ static const int MEDIA_GRAPH_TARGET_PERIOD_MS = 10; /** @@ -321,19 +318,19 @@ public: /** * If aStream needs an audio stream but doesn't have one, create it. * If aStream doesn't need an audio stream but has one, destroy it. */ void CreateOrDestroyAudioStreams(GraphTime aAudioOutputStartTime, MediaStream* aStream); /** * Queue audio (mix of stream audio and silence for blocked intervals) - * to the audio output stream. Returns the number of frames played. + * to the audio output stream. */ - TrackTicks PlayAudio(MediaStream* aStream, GraphTime aFrom, GraphTime aTo); + void PlayAudio(MediaStream* aStream, GraphTime aFrom, GraphTime aTo); /** * Set the correct current video frame for stream aStream. */ void PlayVideo(MediaStream* aStream); /** * No more data will be forthcoming for aStream. The stream will end * at the current buffer end point. The StreamBuffer's tracks must be * explicitly set to finished by the caller. @@ -569,17 +566,13 @@ public: * True when a change has happened which requires us to recompute the stream * blocking order. */ bool mStreamOrderDirty; /** * Hold a ref to the Latency logger */ nsRefPtr<AsyncLatencyLogger> mLatencyLog; - /** - * If this is not null, all the audio output for the MSG will be mixed down. - */ - nsAutoPtr<AudioMixer> mMixer; }; } #endif /* MEDIASTREAMGRAPHIMPL_H_ */
deleted file mode 100644 --- a/content/media/compiledtest/TestAudioMixer.cpp +++ /dev/null @@ -1,155 +0,0 @@ -/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this file, - * You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#include "AudioMixer.h" -#include <assert.h> - -using mozilla::AudioDataValue; -using mozilla::AudioSampleFormat; - -/* In this test, the different audio stream and channels are always created to - * cancel each other. */ -void MixingDone(AudioDataValue* aData, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames) -{ - bool silent = true; - for (uint32_t i = 0; i < aChannels * aFrames; i++) { - if (aData[i] != 0.0) { - if (aFormat == mozilla::AUDIO_FORMAT_S16) { - fprintf(stderr, "Sample at %d is not silent: %d\n", i, (short)aData[i]); - } else { - fprintf(stderr, "Sample at %d is not silent: %f\n", i, (float)aData[i]); - } - silent = false; - } - } - if (!silent) { - MOZ_CRASH(); - } -} - -/* Helper function to give us the maximum and minimum value that don't clip, - * for a given sample format (integer or floating-point). */ -template<typename T> -T GetLowValue(); - -template<typename T> -T GetHighValue(); - -template<> -float GetLowValue<float>() { - return -1.0; -} - -template<> -short GetLowValue<short>() { - return -INT16_MAX; -} - -template<> -float GetHighValue<float>() { - return 1.0; -} - -template<> -short GetHighValue<short>() { - return INT16_MAX; -} - -void FillBuffer(AudioDataValue* aBuffer, uint32_t aLength, AudioDataValue aValue) -{ - AudioDataValue* end = aBuffer + aLength; - while (aBuffer != end) { - *aBuffer++ = aValue; - } -} - -int main(int argc, char* argv[]) { - const uint32_t CHANNEL_LENGTH = 256; - AudioDataValue a[CHANNEL_LENGTH * 2]; - AudioDataValue b[CHANNEL_LENGTH * 2]; - FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>()); - FillBuffer(a + CHANNEL_LENGTH, CHANNEL_LENGTH, GetHighValue<AudioDataValue>()); - FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>()); - FillBuffer(b + CHANNEL_LENGTH, CHANNEL_LENGTH, GetLowValue<AudioDataValue>()); - - { - int iterations = 2; - mozilla::AudioMixer mixer(MixingDone); - - fprintf(stderr, "Test AudioMixer constant buffer length.\n"); - - while (iterations--) { - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); - mixer.FinishMixing(); - } - } - - { - mozilla::AudioMixer mixer(MixingDone); - - fprintf(stderr, "Test AudioMixer variable buffer length.\n"); - - FillBuffer(a, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>()); - FillBuffer(a + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>()); - FillBuffer(b, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>()); - FillBuffer(b + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>()); - mixer.Mix(a, 2, CHANNEL_LENGTH / 2); - mixer.Mix(b, 2, CHANNEL_LENGTH / 2); - mixer.FinishMixing(); - FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>()); - FillBuffer(a + CHANNEL_LENGTH, CHANNEL_LENGTH, GetHighValue<AudioDataValue>()); - FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>()); - FillBuffer(b + CHANNEL_LENGTH, CHANNEL_LENGTH, GetLowValue<AudioDataValue>()); - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); - mixer.FinishMixing(); - FillBuffer(a, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>()); - FillBuffer(a + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>()); - FillBuffer(b, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>()); - FillBuffer(b + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>()); - mixer.Mix(a, 2, CHANNEL_LENGTH / 2); - mixer.Mix(b, 2, CHANNEL_LENGTH / 2); - mixer.FinishMixing(); - } - - FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>()); - FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>()); - - { - mozilla::AudioMixer mixer(MixingDone); - fprintf(stderr, "Test AudioMixer variable channel count.\n"); - - mixer.Mix(a, 1, CHANNEL_LENGTH); - mixer.Mix(b, 1, CHANNEL_LENGTH); - mixer.FinishMixing(); - mixer.Mix(a, 1, CHANNEL_LENGTH); - mixer.Mix(b, 1, CHANNEL_LENGTH); - mixer.FinishMixing(); - mixer.Mix(a, 1, CHANNEL_LENGTH); - mixer.Mix(b, 1, CHANNEL_LENGTH); - mixer.FinishMixing(); - } - - { - mozilla::AudioMixer mixer(MixingDone); - fprintf(stderr, "Test AudioMixer variable stream count.\n"); - - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); - mixer.FinishMixing(); - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); - mixer.FinishMixing(); - mixer.Mix(a, 2, CHANNEL_LENGTH); - mixer.Mix(b, 2, CHANNEL_LENGTH); - mixer.FinishMixing(); - } - - return 0; -} -
deleted file mode 100644 --- a/content/media/compiledtest/moz.build +++ /dev/null @@ -1,16 +0,0 @@ -# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*- -# vim: set filetype=python: -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. - -CPP_UNIT_TESTS += [ - 'TestAudioMixer.cpp', -] - -FAIL_ON_WARNINGS = True - -LOCAL_INCLUDES += [ - '..', -] -
--- a/content/media/moz.build +++ b/content/media/moz.build @@ -7,18 +7,16 @@ PARALLEL_DIRS += [ 'encoder', 'mediasource', 'ogg', 'webaudio', 'webvtt' ] -TEST_TOOL_DIRS += ['compiledtest'] - if CONFIG['MOZ_RAW']: PARALLEL_DIRS += ['raw'] if CONFIG['MOZ_WAVE']: PARALLEL_DIRS += ['wave'] if CONFIG['MOZ_WEBM']: PARALLEL_DIRS += ['webm'] @@ -55,17 +53,16 @@ TEST_DIRS += [ ] EXPORTS += [ 'AbstractMediaDecoder.h', 'AudioAvailableEventManager.h', 'AudioChannelFormat.h', 'AudioCompactor.h', 'AudioEventTimeline.h', - 'AudioMixer.h', 'AudioNodeEngine.h', 'AudioNodeExternalInputStream.h', 'AudioNodeStream.h', 'AudioSampleFormat.h', 'AudioSegment.h', 'AudioStream.h', 'BufferDecoder.h', 'BufferMediaResource.h',
--- a/content/media/webaudio/WebAudioUtils.cpp +++ b/content/media/webaudio/WebAudioUtils.cpp @@ -85,30 +85,10 @@ WebAudioUtils::SpeexResamplerProcess(Spe #else tmp.SetLength(*aInLen); ConvertAudioSamples(aIn, tmp.Elements(), *aInLen); int result = speex_resampler_process_float(aResampler, aChannel, tmp.Elements(), aInLen, aOut, aOutLen); return result; #endif } -int -WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler, - uint32_t aChannel, - const int16_t* aIn, uint32_t* aInLen, - int16_t* aOut, uint32_t* aOutLen) -{ -#ifdef MOZ_SAMPLE_TYPE_S16 - return speex_resampler_process_int(aResampler, aChannel, aIn, aInLen, aOut, aOutLen); -#else - nsAutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE*4> tmp1; - nsAutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE*4> tmp2; - tmp1.SetLength(*aInLen); - tmp2.SetLength(*aOutLen); - ConvertAudioSamples(aIn, tmp1.Elements(), *aInLen); - int result = speex_resampler_process_float(aResampler, aChannel, tmp1.Elements(), aInLen, tmp2.Elements(), aOutLen); - ConvertAudioSamples(tmp2.Elements(), aOut, *aOutLen); - return result; -#endif -} - } }
--- a/content/media/webaudio/WebAudioUtils.h +++ b/content/media/webaudio/WebAudioUtils.h @@ -14,16 +14,17 @@ #include "MediaSegment.h" // Forward declaration typedef struct SpeexResamplerState_ SpeexResamplerState; namespace mozilla { class AudioNodeStream; +class MediaStream; namespace dom { class AudioParamTimeline; struct WebAudioUtils { static const uint32_t MaxChannelCount; @@ -204,21 +205,15 @@ struct WebAudioUtils { const float* aIn, uint32_t* aInLen, float* aOut, uint32_t* aOutLen); static int SpeexResamplerProcess(SpeexResamplerState* aResampler, uint32_t aChannel, const int16_t* aIn, uint32_t* aInLen, float* aOut, uint32_t* aOutLen); - - static int - SpeexResamplerProcess(SpeexResamplerState* aResampler, - uint32_t aChannel, - const int16_t* aIn, uint32_t* aInLen, - int16_t* aOut, uint32_t* aOutLen); - }; +}; } } #endif
deleted file mode 100644 --- a/content/media/webrtc/AudioOutputObserver.h +++ /dev/null @@ -1,55 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this file, - * You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef AUDIOOUTPUTOBSERVER_H_ -#define AUDIOOUTPUTOBSERVER_H_ - -#include "mozilla/StaticPtr.h" - -namespace webrtc { -class SingleRwFifo; -} - -namespace mozilla { - -typedef struct FarEndAudioChunk_ { - uint16_t mSamples; - bool mOverrun; - int16_t mData[1]; // variable-length -} FarEndAudioChunk; - -// XXX Really a singleton currently -class AudioOutputObserver // : public MSGOutputObserver -{ -public: - AudioOutputObserver(); - virtual ~AudioOutputObserver(); - - void Clear(); - void InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aSamples, bool aOverran, - int aFreq, int aChannels, AudioSampleFormat aFormat); - uint32_t PlayoutFrequency() { return mPlayoutFreq; } - uint32_t PlayoutChannels() { return mPlayoutChannels; } - - FarEndAudioChunk *Pop(); - uint32_t Size(); - -private: - uint32_t mPlayoutFreq; - uint32_t mPlayoutChannels; - - nsAutoPtr<webrtc::SingleRwFifo> mPlayoutFifo; - uint32_t mChunkSize; - - // chunking to 10ms support - nsAutoPtr<FarEndAudioChunk> mSaved; - uint32_t mSamplesSaved; -}; - -// XXX until there's a registration API in MSG -extern StaticAutoPtr<AudioOutputObserver> gFarendObserver; - -} - -#endif
--- a/content/media/webrtc/MediaEngine.h +++ b/content/media/webrtc/MediaEngine.h @@ -96,18 +96,17 @@ public: TrackTicks &aLastEndTime) = 0; /* Stop the device and release the corresponding MediaStream */ virtual nsresult Stop(SourceMediaStream *aSource, TrackID aID) = 0; /* Change device configuration. */ virtual nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise, - int32_t aPlayoutDelay) = 0; + bool aNoiseOn, uint32_t aNoise) = 0; /* Returns true if a source represents a fake capture device and * false otherwise */ virtual bool IsFake() = 0; /* Return false if device is currently allocated or started */ bool IsAvailable() {
--- a/content/media/webrtc/MediaEngineDefault.h +++ b/content/media/webrtc/MediaEngineDefault.h @@ -43,18 +43,17 @@ public: virtual nsresult Allocate(const MediaEnginePrefs &aPrefs); virtual nsresult Deallocate(); virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile); virtual nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise, - int32_t aPlayoutDelay) { return NS_OK; }; + bool aNoiseOn, uint32_t aNoise) { return NS_OK; }; virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aId, StreamTime aDesiredTime, TrackTicks &aLastEndTime); virtual bool IsFake() { return true; @@ -96,18 +95,17 @@ public: virtual nsresult Allocate(const MediaEnginePrefs &aPrefs); virtual nsresult Deallocate(); virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile); virtual nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise, - int32_t aPlayoutDelay) { return NS_OK; }; + bool aNoiseOn, uint32_t aNoise) { return NS_OK; }; virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aId, StreamTime aDesiredTime, TrackTicks &aLastEndTime) {} virtual bool IsFake() { return true;
--- a/content/media/webrtc/MediaEngineTabVideoSource.cpp +++ b/content/media/webrtc/MediaEngineTabVideoSource.cpp @@ -274,17 +274,17 @@ MediaEngineTabVideoSource::Draw() { nsresult MediaEngineTabVideoSource::Stop(mozilla::SourceMediaStream*, mozilla::TrackID) { NS_DispatchToMainThread(new StopRunnable(this)); return NS_OK; } nsresult -MediaEngineTabVideoSource::Config(bool, uint32_t, bool, uint32_t, bool, uint32_t, int32_t) +MediaEngineTabVideoSource::Config(bool, uint32_t, bool, uint32_t, bool, uint32_t) { return NS_OK; } bool MediaEngineTabVideoSource::IsFake() { return false;
--- a/content/media/webrtc/MediaEngineTabVideoSource.h +++ b/content/media/webrtc/MediaEngineTabVideoSource.h @@ -21,17 +21,17 @@ class MediaEngineTabVideoSource : public virtual void GetName(nsAString_internal&); virtual void GetUUID(nsAString_internal&); virtual nsresult Allocate(const mozilla::MediaEnginePrefs&); virtual nsresult Deallocate(); virtual nsresult Start(mozilla::SourceMediaStream*, mozilla::TrackID); virtual nsresult Snapshot(uint32_t, nsIDOMFile**); virtual void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, mozilla::TrackTicks&); virtual nsresult Stop(mozilla::SourceMediaStream*, mozilla::TrackID); - virtual nsresult Config(bool, uint32_t, bool, uint32_t, bool, uint32_t, int32_t); + virtual nsresult Config(bool, uint32_t, bool, uint32_t, bool, uint32_t); virtual bool IsFake(); void Draw(); class StartRunnable : public nsRunnable { public: StartRunnable(MediaEngineTabVideoSource *videoSource) : mVideoSource(videoSource) {} NS_IMETHOD Run(); nsRefPtr<MediaEngineTabVideoSource> mVideoSource;
--- a/content/media/webrtc/MediaEngineWebRTC.cpp +++ b/content/media/webrtc/MediaEngineWebRTC.cpp @@ -55,18 +55,16 @@ MediaEngineWebRTC::MediaEngineWebRTC(Med nsCOMPtr<nsIComponentRegistrar> compMgr; NS_GetComponentRegistrar(getter_AddRefs(compMgr)); if (compMgr) { compMgr->IsContractIDRegistered(NS_TABSOURCESERVICE_CONTRACTID, &mHasTabVideoSource); } #else AsyncLatencyLogger::Get()->AddRef(); #endif - // XXX - gFarendObserver = new AudioOutputObserver(); } void MediaEngineWebRTC::EnumerateVideoDevices(nsTArray<nsRefPtr<MediaEngineVideoSource> >* aVSources) { #ifdef MOZ_B2G_CAMERA MutexAutoLock lock(mMutex);
--- a/content/media/webrtc/MediaEngineWebRTC.h +++ b/content/media/webrtc/MediaEngineWebRTC.h @@ -35,34 +35,32 @@ #include "webrtc/voice_engine/include/voe_base.h" #include "webrtc/voice_engine/include/voe_codec.h" #include "webrtc/voice_engine/include/voe_hardware.h" #include "webrtc/voice_engine/include/voe_network.h" #include "webrtc/voice_engine/include/voe_audio_processing.h" #include "webrtc/voice_engine/include/voe_volume_control.h" #include "webrtc/voice_engine/include/voe_external_media.h" #include "webrtc/voice_engine/include/voe_audio_processing.h" -#include "webrtc/voice_engine/include/voe_call_report.h" // Video Engine #include "webrtc/video_engine/include/vie_base.h" #include "webrtc/video_engine/include/vie_codec.h" #include "webrtc/video_engine/include/vie_render.h" #include "webrtc/video_engine/include/vie_capture.h" #ifdef MOZ_B2G_CAMERA #include "CameraControlListener.h" #include "ICameraControl.h" #include "ImageContainer.h" #include "nsGlobalWindow.h" #include "prprf.h" #include "mozilla/Hal.h" #endif #include "NullTransport.h" -#include "AudioOutputObserver.h" namespace mozilla { #ifdef MOZ_B2G_CAMERA class CameraAllocateRunnable; class GetCameraNameRunnable; #endif @@ -144,18 +142,17 @@ public: virtual void GetUUID(nsAString&); virtual nsresult Allocate(const MediaEnginePrefs &aPrefs); virtual nsresult Deallocate(); virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile); virtual nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise, - int32_t aPlayoutDelay) { return NS_OK; }; + bool aNoiseOn, uint32_t aNoise) { return NS_OK; }; virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aId, StreamTime aDesiredTime, TrackTicks &aLastEndTime); virtual bool IsFake() { return false; @@ -256,23 +253,20 @@ class MediaEngineWebRTCAudioSource : pub public: MediaEngineWebRTCAudioSource(webrtc::VoiceEngine* aVoiceEnginePtr, int aIndex, const char* name, const char* uuid) : mVoiceEngine(aVoiceEnginePtr) , mMonitor("WebRTCMic.Monitor") , mCapIndex(aIndex) , mChannel(-1) , mInitDone(false) - , mStarted(false) - , mSamples(0) , mEchoOn(false), mAgcOn(false), mNoiseOn(false) , mEchoCancel(webrtc::kEcDefault) , mAGC(webrtc::kAgcDefault) , mNoiseSuppress(webrtc::kNsDefault) - , mPlayoutDelay(0) , mNullTransport(nullptr) { MOZ_ASSERT(aVoiceEnginePtr); mState = kReleased; mDeviceName.Assign(NS_ConvertUTF8toUTF16(name)); mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid)); Init(); } ~MediaEngineWebRTCAudioSource() { Shutdown(); } @@ -282,18 +276,17 @@ public: virtual nsresult Allocate(const MediaEnginePrefs &aPrefs); virtual nsresult Deallocate(); virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile); virtual nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise, - int32_t aPlayoutDelay); + bool aNoiseOn, uint32_t aNoise); virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aId, StreamTime aDesiredTime, TrackTicks &aLastEndTime); virtual bool IsFake() { @@ -314,54 +307,48 @@ private: void Init(); void Shutdown(); webrtc::VoiceEngine* mVoiceEngine; ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase; ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender; ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork; ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing; - ScopedCustomReleasePtr<webrtc::VoECallReport> mVoECallReport; // mMonitor protects mSources[] access/changes, and transitions of mState // from kStarted to kStopped (which are combined with EndTrack()). // mSources[] is accessed from webrtc threads. Monitor mMonitor; nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW int mCapIndex; int mChannel; TrackID mTrackID; bool mInitDone; - bool mStarted; - int mSamples; // int to avoid conversions when comparing/etc to samplingFreq & length nsString mDeviceName; nsString mDeviceUUID; bool mEchoOn, mAgcOn, mNoiseOn; webrtc::EcModes mEchoCancel; webrtc::AgcModes mAGC; webrtc::NsModes mNoiseSuppress; - int32_t mPlayoutDelay; NullTransport *mNullTransport; }; class MediaEngineWebRTC : public MediaEngine { public: MediaEngineWebRTC(MediaEnginePrefs &aPrefs); ~MediaEngineWebRTC() { Shutdown(); #ifdef MOZ_B2G_CAMERA AsyncLatencyLogger::Get()->Release(); #endif - // XXX - gFarendObserver = nullptr; } // Clients should ensure to clean-up sources video/audio sources // before invoking Shutdown on this class. void Shutdown(); virtual void EnumerateVideoDevices(nsTArray<nsRefPtr<MediaEngineVideoSource> >*); virtual void EnumerateAudioDevices(nsTArray<nsRefPtr<MediaEngineAudioSource> >*);
--- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp +++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp @@ -1,38 +1,22 @@ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "MediaEngineWebRTC.h" -#include <stdio.h> -#include <algorithm> -#include "mozilla/Assertions.h" - -// scoped_ptr.h uses FF -#ifdef FF -#undef FF -#endif -#include "webrtc/modules/audio_device/opensl/single_rw_fifo.h" #define CHANNELS 1 #define ENCODING "L16" #define DEFAULT_PORT 5555 #define SAMPLE_RATE 256000 #define SAMPLE_FREQUENCY 16000 #define SAMPLE_LENGTH ((SAMPLE_FREQUENCY*10)/1000) -// These are restrictions from the webrtc.org code -#define MAX_CHANNELS 2 -#define MAX_SAMPLING_FREQ 48000 // Hz - multiple of 100 - -#define MAX_AEC_FIFO_DEPTH 200 // ms - multiple of 10 -static_assert(!(MAX_AEC_FIFO_DEPTH % 10), "Invalid MAX_AEC_FIFO_DEPTH"); - namespace mozilla { #ifdef LOG #undef LOG #endif #ifdef PR_LOGGING extern PRLogModuleInfo* GetMediaManagerLog(); @@ -41,127 +25,16 @@ extern PRLogModuleInfo* GetMediaManagerL #define LOG(msg) #endif /** * Webrtc audio source. */ NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource) -// XXX temp until MSG supports registration -StaticAutoPtr<AudioOutputObserver> gFarendObserver; - -AudioOutputObserver::AudioOutputObserver() - : mPlayoutFreq(0) - , mPlayoutChannels(0) - , mChunkSize(0) - , mSamplesSaved(0) -{ - // Buffers of 10ms chunks - mPlayoutFifo = new webrtc::SingleRwFifo(MAX_AEC_FIFO_DEPTH/10); -} - -AudioOutputObserver::~AudioOutputObserver() -{ -} - -void -AudioOutputObserver::Clear() -{ - while (mPlayoutFifo->size() > 0) { - (void) mPlayoutFifo->Pop(); - } -} - -FarEndAudioChunk * -AudioOutputObserver::Pop() -{ - return (FarEndAudioChunk *) mPlayoutFifo->Pop(); -} - -uint32_t -AudioOutputObserver::Size() -{ - return mPlayoutFifo->size(); -} - -// static -void -AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aSamples, bool aOverran, - int aFreq, int aChannels, AudioSampleFormat aFormat) -{ - if (mPlayoutChannels != 0) { - if (mPlayoutChannels != static_cast<uint32_t>(aChannels)) { - MOZ_CRASH(); - } - } else { - MOZ_ASSERT(aChannels <= MAX_CHANNELS); - mPlayoutChannels = static_cast<uint32_t>(aChannels); - } - if (mPlayoutFreq != 0) { - if (mPlayoutFreq != static_cast<uint32_t>(aFreq)) { - MOZ_CRASH(); - } - } else { - MOZ_ASSERT(aFreq <= MAX_SAMPLING_FREQ); - MOZ_ASSERT(!(aFreq % 100), "Sampling rate for far end data should be multiple of 100."); - mPlayoutFreq = aFreq; - mChunkSize = aFreq/100; // 10ms - } - -#ifdef LOG_FAREND_INSERTION - static FILE *fp = fopen("insertfarend.pcm","wb"); -#endif - - if (mSaved) { - // flag overrun as soon as possible, and only once - mSaved->mOverrun = aOverran; - aOverran = false; - } - // Rechunk to 10ms. - // The AnalyzeReverseStream() and WebRtcAec_BufferFarend() functions insist on 10ms - // samples per call. Annoying... - while (aSamples) { - if (!mSaved) { - mSaved = (FarEndAudioChunk *) moz_xmalloc(sizeof(FarEndAudioChunk) + - (mChunkSize * aChannels - 1)*sizeof(int16_t)); - mSaved->mSamples = mChunkSize; - mSaved->mOverrun = aOverran; - aOverran = false; - } - uint32_t to_copy = mChunkSize - mSamplesSaved; - if (to_copy > aSamples) { - to_copy = aSamples; - } - - int16_t *dest = &(mSaved->mData[mSamplesSaved * aChannels]); - ConvertAudioSamples(aBuffer, dest, to_copy * aChannels); - -#ifdef LOG_FAREND_INSERTION - if (fp) { - fwrite(&(mSaved->mData[mSamplesSaved * aChannels]), to_copy * aChannels, sizeof(int16_t), fp); - } -#endif - aSamples -= to_copy; - mSamplesSaved += to_copy; - - if (mSamplesSaved >= mChunkSize) { - int free_slots = mPlayoutFifo->capacity() - mPlayoutFifo->size(); - if (free_slots <= 0) { - // XXX We should flag an overrun for the reader. We can't drop data from it due to - // thread safety issues. - break; - } else { - mPlayoutFifo->Push((int8_t *) mSaved.forget()); // takes ownership - mSamplesSaved = 0; - } - } - } -} - void MediaEngineWebRTCAudioSource::GetName(nsAString& aName) { if (mInitDone) { aName.Assign(mDeviceName); } return; @@ -175,64 +48,55 @@ MediaEngineWebRTCAudioSource::GetUUID(ns } return; } nsresult MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise, - int32_t aPlayoutDelay) + bool aNoiseOn, uint32_t aNoise) { LOG(("Audio config: aec: %d, agc: %d, noise: %d", aEchoOn ? aEcho : -1, aAgcOn ? aAGC : -1, aNoiseOn ? aNoise : -1)); - bool update_echo = (mEchoOn != aEchoOn); - bool update_agc = (mAgcOn != aAgcOn); - bool update_noise = (mNoiseOn != aNoiseOn); - mEchoOn = aEchoOn; + bool update_agc = (mAgcOn == aAgcOn); + bool update_noise = (mNoiseOn == aNoiseOn); mAgcOn = aAgcOn; mNoiseOn = aNoiseOn; - if ((webrtc::EcModes) aEcho != webrtc::kEcUnchanged) { - if (mEchoCancel != (webrtc::EcModes) aEcho) { - update_echo = true; - mEchoCancel = (webrtc::EcModes) aEcho; - } - } if ((webrtc::AgcModes) aAGC != webrtc::kAgcUnchanged) { if (mAGC != (webrtc::AgcModes) aAGC) { update_agc = true; mAGC = (webrtc::AgcModes) aAGC; } } if ((webrtc::NsModes) aNoise != webrtc::kNsUnchanged) { if (mNoiseSuppress != (webrtc::NsModes) aNoise) { update_noise = true; mNoiseSuppress = (webrtc::NsModes) aNoise; } } - mPlayoutDelay = aPlayoutDelay; if (mInitDone) { int error; +#if 0 + // Until we can support feeding our full output audio from the browser + // through the MediaStream, this won't work. Or we need to move AEC to + // below audio input and output, perhaps invoked from here. + mEchoOn = aEchoOn; + if ((webrtc::EcModes) aEcho != webrtc::kEcUnchanged) + mEchoCancel = (webrtc::EcModes) aEcho; + mVoEProcessing->SetEcStatus(mEchoOn, aEcho); +#else + (void) aEcho; (void) aEchoOn; (void) mEchoCancel; // suppress warnings +#endif - if (update_echo && - 0 != (error = mVoEProcessing->SetEcStatus(mEchoOn, (webrtc::EcModes) aEcho))) { - LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error)); - // Overhead of capturing all the time is very low (<0.1% of an audio only call) - if (mEchoOn) { - if (0 != (error = mVoEProcessing->SetEcMetricsStatus(true))) { - LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error)); - } - } - } if (update_agc && 0 != (error = mVoEProcessing->SetAgcStatus(mAgcOn, (webrtc::AgcModes) aAGC))) { LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error)); } if (update_noise && 0 != (error = mVoEProcessing->SetNsStatus(mNoiseOn, (webrtc::NsModes) aNoise))) { LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error)); } @@ -289,40 +153,32 @@ MediaEngineWebRTCAudioSource::Start(Sour { MonitorAutoLock lock(mMonitor); mSources.AppendElement(aStream); } AudioSegment* segment = new AudioSegment(); aStream->AddTrack(aID, SAMPLE_FREQUENCY, 0, segment); aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX); - // XXX Make this based on the pref. - aStream->RegisterForAudioMixing(); LOG(("Start audio for stream %p", aStream)); if (mState == kStarted) { MOZ_ASSERT(aID == mTrackID); return NS_OK; } mState = kStarted; mTrackID = aID; // Make sure logger starts before capture AsyncLatencyLogger::Get(true); - // Register output observer - // XXX - MOZ_ASSERT(gFarendObserver); - gFarendObserver->Clear(); - // Configure audio processing in webrtc code Config(mEchoOn, webrtc::kEcUnchanged, mAgcOn, webrtc::kAgcUnchanged, - mNoiseOn, webrtc::kNsUnchanged, - mPlayoutDelay); + mNoiseOn, webrtc::kNsUnchanged); if (mVoEBase->StartReceive(mChannel)) { return NS_ERROR_FAILURE; } if (mVoEBase->StartSend(mChannel)) { return NS_ERROR_FAILURE; } @@ -405,21 +261,16 @@ MediaEngineWebRTCAudioSource::Init() return; } mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine); if (!mVoEProcessing) { return; } - mVoECallReport = webrtc::VoECallReport::GetInterface(mVoiceEngine); - if (!mVoECallReport) { - return; - } - mChannel = mVoEBase->CreateChannel(); if (mChannel < 0) { return; } mNullTransport = new NullTransport(); if (mVoENetwork->RegisterExternalTransport(mChannel, *mNullTransport)) { return; } @@ -506,60 +357,16 @@ MediaEngineWebRTCAudioSource::Shutdown() typedef int16_t sample; void MediaEngineWebRTCAudioSource::Process(int channel, webrtc::ProcessingTypes type, sample* audio10ms, int length, int samplingFreq, bool isStereo) { - // On initial capture, throw away all far-end data except the most recent sample - // since it's already irrelevant and we want to keep avoid confusing the AEC far-end - // input code with "old" audio. - if (!mStarted) { - mStarted = true; - while (gFarendObserver->Size() > 1) { - FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 - free(buffer); - } - } - - while (gFarendObserver->Size() > 0) { - FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 - if (buffer) { - int length = buffer->mSamples; - if (mVoERender->ExternalPlayoutData(buffer->mData, - gFarendObserver->PlayoutFrequency(), - gFarendObserver->PlayoutChannels(), - mPlayoutDelay, - length) == -1) { - return; - } - } - free(buffer); - } - -#ifdef PR_LOGGING - mSamples += length; - if (mSamples > samplingFreq) { - mSamples %= samplingFreq; // just in case mSamples >> samplingFreq - if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) { - webrtc::EchoStatistics echo; - - mVoECallReport->GetEchoMetricSummary(echo); -#define DUMP_STATVAL(x) (x).min, (x).max, (x).average - LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d", - DUMP_STATVAL(echo.erl), - DUMP_STATVAL(echo.erle), - DUMP_STATVAL(echo.rerl), - DUMP_STATVAL(echo.a_nlp))); - } - } -#endif - MonitorAutoLock lock(mMonitor); if (mState != kStarted) return; uint32_t len = mSources.Length(); for (uint32_t i = 0; i < len; i++) { nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
--- a/content/media/webrtc/moz.build +++ b/content/media/webrtc/moz.build @@ -7,18 +7,17 @@ XPIDL_MODULE = 'content_webrtc' EXPORTS += [ 'MediaEngine.h', 'MediaEngineDefault.h', ] if CONFIG['MOZ_WEBRTC']: - EXPORTS += ['AudioOutputObserver.h', - 'LoadManager.h', + EXPORTS += ['LoadManager.h', 'LoadManagerFactory.h', 'LoadMonitor.h', 'MediaEngineWebRTC.h'] UNIFIED_SOURCES += [ 'LoadManagerFactory.cpp', 'MediaEngineTabVideoSource.cpp', 'MediaEngineWebRTCAudio.cpp', 'MediaEngineWebRTCVideo.cpp',
--- a/dom/media/MediaManager.cpp +++ b/dom/media/MediaManager.cpp @@ -395,40 +395,23 @@ MediaDevice::GetSource() /** * A subclass that we only use to stash internal pointers to MediaStreamGraph objects * that need to be cleaned up. */ class nsDOMUserMediaStream : public DOMLocalMediaStream { public: static already_AddRefed<nsDOMUserMediaStream> - CreateTrackUnionStream(nsIDOMWindow* aWindow, - MediaEngineSource *aAudioSource, - MediaEngineSource *aVideoSource) + CreateTrackUnionStream(nsIDOMWindow* aWindow, uint32_t aHintContents) { - DOMMediaStream::TrackTypeHints hints = - (aAudioSource ? DOMMediaStream::HINT_CONTENTS_AUDIO : 0) | - (aVideoSource ? DOMMediaStream::HINT_CONTENTS_VIDEO : 0); - - nsRefPtr<nsDOMUserMediaStream> stream = new nsDOMUserMediaStream(aAudioSource); - stream->InitTrackUnionStream(aWindow, hints); + nsRefPtr<nsDOMUserMediaStream> stream = new nsDOMUserMediaStream(); + stream->InitTrackUnionStream(aWindow, aHintContents); return stream.forget(); } - nsDOMUserMediaStream(MediaEngineSource *aAudioSource) : - mAudioSource(aAudioSource), - mEchoOn(true), - mAgcOn(false), - mNoiseOn(true), - mEcho(webrtc::kEcDefault), - mAgc(webrtc::kAgcDefault), - mNoise(webrtc::kNsDefault), - mPlayoutDelay(20) - {} - virtual ~nsDOMUserMediaStream() { Stop(); if (mPort) { mPort->Destroy(); } if (mSourceStream) { @@ -448,31 +431,16 @@ public: { if (mSourceStream) { mSourceStream->AddDirectListener(aListener); return true; // application should ignore NotifyQueuedTrackData } return false; } - virtual void - AudioConfig(bool aEchoOn, uint32_t aEcho, - bool aAgcOn, uint32_t aAgc, - bool aNoiseOn, uint32_t aNoise, - int32_t aPlayoutDelay) - { - mEchoOn = aEchoOn; - mEcho = aEcho; - mAgcOn = aAgcOn; - mAgc = aAgc; - mNoiseOn = aNoiseOn; - mNoise = aNoise; - mPlayoutDelay = aPlayoutDelay; - } - virtual void RemoveDirectListener(MediaStreamDirectListener *aListener) MOZ_OVERRIDE { if (mSourceStream) { mSourceStream->RemoveDirectListener(aListener); } } // let us intervene for direct listeners when someone does track.enabled = false @@ -485,24 +453,16 @@ public: // forward the request to the source and translate the ID GetStream()->AsProcessedStream()->ForwardTrackEnabled(aID, aEnabled); } // The actual MediaStream is a TrackUnionStream. But these resources need to be // explicitly destroyed too. nsRefPtr<SourceMediaStream> mSourceStream; nsRefPtr<MediaInputPort> mPort; - nsRefPtr<MediaEngineSource> mAudioSource; // so we can turn on AEC - bool mEchoOn; - bool mAgcOn; - bool mNoiseOn; - uint32_t mEcho; - uint32_t mAgc; - uint32_t mNoise; - uint32_t mPlayoutDelay; }; /** * Creates a MediaStream, attaches a listener and fires off a success callback * to the DOM with the stream. We also pass in the error callback so it can * be released correctly. * * All of this must be done on the main thread! @@ -573,67 +533,41 @@ public: // the desired tracks in the MediaStreamGraph) or when // DOMMediaStream::NotifyMediaStreamGraphShutdown is called. nsRefPtr<DOMMediaStream> mStream; }; NS_IMETHOD Run() { - int32_t aec = (int32_t) webrtc::kEcUnchanged; - int32_t agc = (int32_t) webrtc::kAgcUnchanged; - int32_t noise = (int32_t) webrtc::kNsUnchanged; - bool aec_on = false, agc_on = false, noise_on = false; - int32_t playout_delay = 0; - NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); nsPIDOMWindow *window = static_cast<nsPIDOMWindow*> (nsGlobalWindow::GetInnerWindowWithId(mWindowID)); // We're on main-thread, and the windowlist can only // be invalidated from the main-thread (see OnNavigation) StreamListeners* listeners = mManager->GetWindowListeners(mWindowID); if (!listeners || !window || !window->GetExtantDoc()) { // This window is no longer live. mListener has already been removed return NS_OK; } -#ifdef MOZ_WEBRTC - // Right now these configs are only of use if webrtc is available - nsresult rv; - nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); - if (NS_SUCCEEDED(rv)) { - nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); + // Create a media stream. + DOMMediaStream::TrackTypeHints hints = + (mAudioSource ? DOMMediaStream::HINT_CONTENTS_AUDIO : 0) | + (mVideoSource ? DOMMediaStream::HINT_CONTENTS_VIDEO : 0); - if (branch) { - branch->GetBoolPref("media.getusermedia.aec_enabled", &aec_on); - branch->GetIntPref("media.getusermedia.aec", &aec); - branch->GetBoolPref("media.getusermedia.agc_enabled", &agc_on); - branch->GetIntPref("media.getusermedia.agc", &agc); - branch->GetBoolPref("media.getusermedia.noise_enabled", &noise_on); - branch->GetIntPref("media.getusermedia.noise", &noise); - branch->GetIntPref("media.getusermedia.playout_delay", &playout_delay); - } - } -#endif - // Create a media stream. nsRefPtr<nsDOMUserMediaStream> trackunion = - nsDOMUserMediaStream::CreateTrackUnionStream(window, mAudioSource, - mVideoSource); + nsDOMUserMediaStream::CreateTrackUnionStream(window, hints); if (!trackunion) { nsCOMPtr<nsIDOMGetUserMediaErrorCallback> error = mError.forget(); LOG(("Returning error for getUserMedia() - no stream")); error->OnError(NS_LITERAL_STRING("NO_STREAM")); return NS_OK; } - trackunion->AudioConfig(aec_on, (uint32_t) aec, - agc_on, (uint32_t) agc, - noise_on, (uint32_t) noise, - playout_delay); - MediaStreamGraph* gm = MediaStreamGraph::GetInstance(); nsRefPtr<SourceMediaStream> stream = gm->CreateSourceStream(nullptr); // connect the source stream to the track union stream to avoid us blocking trackunion->GetStream()->AsProcessedStream()->SetAutofinish(true); nsRefPtr<MediaInputPort> port = trackunion->GetStream()->AsProcessedStream()-> AllocateInputPort(stream, MediaInputPort::FLAG_BLOCK_OUTPUT); @@ -653,35 +587,55 @@ public: // that the MediaStream has started consuming. The listener is freed // when the page is invalidated (on navigation or close). mListener->Activate(stream.forget(), mAudioSource, mVideoSource); // Note: includes JS callbacks; must be released on MainThread TracksAvailableCallback* tracksAvailableCallback = new TracksAvailableCallback(mManager, mSuccess, mWindowID, trackunion); -#ifdef MOZ_WEBRTC - mListener->AudioConfig(aec_on, (uint32_t) aec, - agc_on, (uint32_t) agc, - noise_on, (uint32_t) noise, - playout_delay); -#endif - // Dispatch to the media thread to ask it to start the sources, // because that can take a while. // Pass ownership of trackunion to the MediaOperationRunnable // to ensure it's kept alive until the MediaOperationRunnable runs (at least). nsIThread *mediaThread = MediaManager::GetThread(); nsRefPtr<MediaOperationRunnable> runnable( new MediaOperationRunnable(MEDIA_START, mListener, trackunion, tracksAvailableCallback, mAudioSource, mVideoSource, false, mWindowID, mError.forget())); mediaThread->Dispatch(runnable, NS_DISPATCH_NORMAL); +#ifdef MOZ_WEBRTC + // Right now these configs are only of use if webrtc is available + nsresult rv; + nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); + if (NS_SUCCEEDED(rv)) { + nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); + + if (branch) { + int32_t aec = (int32_t) webrtc::kEcUnchanged; + int32_t agc = (int32_t) webrtc::kAgcUnchanged; + int32_t noise = (int32_t) webrtc::kNsUnchanged; + bool aec_on = false, agc_on = false, noise_on = false; + + branch->GetBoolPref("media.peerconnection.aec_enabled", &aec_on); + branch->GetIntPref("media.peerconnection.aec", &aec); + branch->GetBoolPref("media.peerconnection.agc_enabled", &agc_on); + branch->GetIntPref("media.peerconnection.agc", &agc); + branch->GetBoolPref("media.peerconnection.noise_enabled", &noise_on); + branch->GetIntPref("media.peerconnection.noise", &noise); + + mListener->AudioConfig(aec_on, (uint32_t) aec, + agc_on, (uint32_t) agc, + noise_on, (uint32_t) noise); + } + } +#endif + // We won't need mError now. mError = nullptr; return NS_OK; } private: nsCOMPtr<nsIDOMGetUserMediaSuccessCallback> mSuccess; nsCOMPtr<nsIDOMGetUserMediaErrorCallback> mError;
--- a/dom/media/MediaManager.h +++ b/dom/media/MediaManager.h @@ -122,26 +122,25 @@ public: // implement in .cpp to avoid circular dependency with MediaOperationRunnable // Can be invoked from EITHER MainThread or MSG thread void Invalidate(); void AudioConfig(bool aEchoOn, uint32_t aEcho, bool aAgcOn, uint32_t aAGC, - bool aNoiseOn, uint32_t aNoise, - int32_t aPlayoutDelay) + bool aNoiseOn, uint32_t aNoise) { if (mAudioSource) { #ifdef MOZ_WEBRTC // Right now these configs are only of use if webrtc is available RUN_ON_THREAD(mMediaThread, WrapRunnable(nsRefPtr<MediaEngineSource>(mAudioSource), // threadsafe &MediaEngineSource::Config, - aEchoOn, aEcho, aAgcOn, aAGC, aNoiseOn, aNoise, aPlayoutDelay), + aEchoOn, aEcho, aAgcOn, aAGC, aNoiseOn, aNoise), NS_DISPATCH_NORMAL); #endif } } void Remove() {
--- a/media/webrtc/moz.build +++ b/media/webrtc/moz.build @@ -9,21 +9,18 @@ include('/build/gyp.mozbuild') webrtc_non_unified_sources = [ 'trunk/webrtc/common_audio/vad/vad_core.c', # Because of name clash in the kInitCheck variable 'trunk/webrtc/common_audio/vad/webrtc_vad.c', # Because of name clash in the kInitCheck variable 'trunk/webrtc/modules/audio_coding/codecs/g722/g722_decode.c', # Because of name clash in the saturate function 'trunk/webrtc/modules/audio_coding/codecs/g722/g722_encode.c', # Because of name clash in the saturate function 'trunk/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c', # Because of name clash in the kDampFilter variable 'trunk/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c', # Because of name clash in the kDampFilter variable 'trunk/webrtc/modules/audio_coding/neteq4/audio_vector.cc', # Because of explicit template specializations - 'trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc', # Because of LATE() - 'trunk/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc',# Because of LATE() 'trunk/webrtc/modules/audio_device/opensl/opensles_input.cc', # Because of name clash in the kOption variable 'trunk/webrtc/modules/audio_device/opensl/opensles_output.cc', # Because of name clash in the kOption variable - 'trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.cc', # Because of name clash with #define FF 'trunk/webrtc/modules/audio_device/win/audio_device_core_win.cc', # Because of ordering assumptions in strsafe.h 'trunk/webrtc/modules/audio_processing/aec/aec_core.c', # Because of name clash in the ComfortNoise function 'trunk/webrtc/modules/audio_processing/aecm/aecm_core.c', # Because of name clash in the ComfortNoise function 'trunk/webrtc/modules/audio_processing/aecm/echo_control_mobile.c', # Because of name clash in the kInitCheck variable 'trunk/webrtc/modules/audio_processing/agc/analog_agc.c', # Because of name clash in the kInitCheck variable 'trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc', # Because of name clash in the MapError function 'trunk/webrtc/modules/audio_processing/echo_control_mobile_impl.cc', # Because of name clash in the MapError function 'trunk/webrtc/modules/audio_processing/gain_control_impl.cc', # Because of name clash in the Handle typedef
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp +++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp @@ -408,21 +408,37 @@ WebrtcAudioConduit::ConfigureSendMediaCo #ifdef MOZILLA_INTERNAL_API // TEMPORARY - see bug 694814 comment 2 nsresult rv; nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); if (NS_SUCCEEDED(rv)) { nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); if (branch) { + int32_t aec = 0; // 0 == unchanged + bool aec_on = false; + + branch->GetBoolPref("media.peerconnection.aec_enabled", &aec_on); + branch->GetIntPref("media.peerconnection.aec", &aec); + + CSFLogDebug(logTag,"Audio config: aec: %d", aec_on ? aec : -1); + mEchoOn = aec_on; + if (static_cast<webrtc::EcModes>(aec) != webrtc::kEcUnchanged) + mEchoCancel = static_cast<webrtc::EcModes>(aec); + branch->GetIntPref("media.peerconnection.capture_delay", &mCaptureDelay); } } #endif + if (0 != (error = mPtrVoEProcessing->SetEcStatus(mEchoOn, mEchoCancel))) { + CSFLogError(logTag,"%s Error setting EVStatus: %d ",__FUNCTION__, error); + return kMediaConduitUnknownError; + } + //Let's Send Transport State-machine on the Engine if(mPtrVoEBase->StartSend(mChannel) == -1) { error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s StartSend failed %d", __FUNCTION__, error); return kMediaConduitUnknownError; } @@ -906,17 +922,17 @@ WebrtcAudioConduit::IsSamplingFreqSuppor /* Return block-length of 10 ms audio frame in number of samples */ unsigned int WebrtcAudioConduit::GetNum10msSamplesForFrequency(int samplingFreqHz) const { switch(samplingFreqHz) { case 16000: return 160; //160 samples case 32000: return 320; //320 samples - case 44100: return 441; //441 samples + case 44000: return 440; //440 samples case 48000: return 480; //480 samples default: return 0; // invalid or unsupported } } //Copy the codec passed into Conduit's database bool WebrtcAudioConduit::CopyCodecToDB(const AudioCodecConfig* codecInfo)
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h +++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h @@ -157,16 +157,18 @@ public: mShutDown(false), mVoiceEngine(nullptr), mTransport(nullptr), mEngineTransmitting(false), mEngineReceiving(false), mChannel(-1), mCurSendCodecConfig(nullptr), mCaptureDelay(150), + mEchoOn(true), + mEchoCancel(webrtc::kEcAec), #ifdef MOZILLA_INTERNAL_API mLastTimestamp(0), #endif // MOZILLA_INTERNAL_API mSamples(0), mLastSyncLog(0) { } @@ -257,16 +259,19 @@ private: int mChannel; RecvCodecList mRecvCodecList; AudioCodecConfig* mCurSendCodecConfig; // Current "capture" delay (really output plus input delay) int32_t mCaptureDelay; + bool mEchoOn; + webrtc::EcModes mEchoCancel; + #ifdef MOZILLA_INTERNAL_API uint32_t mLastTimestamp; #endif // MOZILLA_INTERNAL_API uint32_t mSamples; uint32_t mLastSyncLog; };
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/single_rw_fifo.cc +++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/single_rw_fifo.cc @@ -4,34 +4,27 @@ * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "webrtc/modules/audio_device/android/single_rw_fifo.h" -#if defined(_MSC_VER) -#include <windows.h> -#endif static int UpdatePos(int pos, int capacity) { return (pos + 1) % capacity; } namespace webrtc { namespace subtle { inline void MemoryBarrier() { -#if defined(_MSC_VER) - ::MemoryBarrier(); -#else __sync_synchronize(); -#endif } } // namespace subtle SingleRwFifo::SingleRwFifo(int capacity) : capacity_(capacity), size_(0), read_pos_(0),
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi +++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi @@ -118,29 +118,26 @@ 'win/audio_device_core_win.cc', 'win/audio_device_core_win.h', 'win/audio_device_wave_win.cc', 'win/audio_device_wave_win.h', 'win/audio_device_utility_win.cc', 'win/audio_device_utility_win.h', 'win/audio_mixer_manager_win.cc', 'win/audio_mixer_manager_win.h', - # used externally for getUserMedia - 'opensl/single_rw_fifo.cc', - 'opensl/single_rw_fifo.h', ], 'conditions': [ ['OS=="android"', { - 'sources': [ + 'sources': [ 'opensl/audio_manager_jni.cc', 'opensl/audio_manager_jni.h', - 'android/audio_device_jni_android.cc', - 'android/audio_device_jni_android.h', + 'android/audio_device_jni_android.cc', + 'android/audio_device_jni_android.h', ], - }], + }], ['OS=="android" or moz_widget_toolkit_gonk==1', { 'link_settings': { 'libraries': [ '-llog', '-lOpenSLES', ], }, 'conditions': [ @@ -152,25 +149,27 @@ 'opensl/fine_audio_buffer.h', 'opensl/low_latency_event_posix.cc', 'opensl/low_latency_event.h', 'opensl/opensles_common.cc', 'opensl/opensles_common.h', 'opensl/opensles_input.cc', 'opensl/opensles_input.h', 'opensl/opensles_output.h', - 'shared/audio_device_utility_shared.cc', - 'shared/audio_device_utility_shared.h', + 'opensl/single_rw_fifo.cc', + 'opensl/single_rw_fifo.h', + 'shared/audio_device_utility_shared.cc', + 'shared/audio_device_utility_shared.h', ], }, { 'sources': [ - 'shared/audio_device_utility_shared.cc', - 'shared/audio_device_utility_shared.h', - 'android/audio_device_jni_android.cc', - 'android/audio_device_jni_android.h', + 'shared/audio_device_utility_shared.cc', + 'shared/audio_device_utility_shared.h', + 'android/audio_device_jni_android.cc', + 'android/audio_device_jni_android.h', ], }], ['enable_android_opensl_output==1', { 'sources': [ 'opensl/opensles_output.cc' ], 'defines': [ 'WEBRTC_ANDROID_OPENSLES_OUTPUT',
--- a/media/webrtc/trunk/webrtc/voice_engine/include/mock/fake_voe_external_media.h +++ b/media/webrtc/trunk/webrtc/voice_engine/include/mock/fake_voe_external_media.h @@ -38,19 +38,16 @@ class FakeVoEExternalMedia : public VoEE WEBRTC_STUB(SetExternalRecordingStatus, (bool enable)); WEBRTC_STUB(SetExternalPlayoutStatus, (bool enable)); WEBRTC_STUB(ExternalRecordingInsertData, (const int16_t speechData10ms[], int lengthSamples, int samplingFreqHz, int current_delay_ms)); WEBRTC_STUB(ExternalPlayoutGetData, (int16_t speechData10ms[], int samplingFreqHz, int current_delay_ms, int& lengthSamples)); - WEBRTC_STUB(ExternalPlayoutData, - (int16_t speechData10ms[], int samplingFreqHz, - int num_channels, int current_delay_ms, int& lengthSamples)); WEBRTC_STUB(GetAudioFrame, (int channel, int desired_sample_rate_hz, AudioFrame* frame)); WEBRTC_STUB(SetExternalMixing, (int channel, bool enable)); // Use this to trigger the Process() callback to a registered media processor. // If |audio| is NULL, a zero array of the correct length will be forwarded. void CallProcess(ProcessingTypes type, int16_t* audio, int samples_per_channel, int sample_rate_hz,
--- a/media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h +++ b/media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h @@ -92,28 +92,20 @@ public: // This function accepts externally recorded audio. During transmission, // this method should be called at as regular an interval as possible // with frames of corresponding size. virtual int ExternalRecordingInsertData( const int16_t speechData10ms[], int lengthSamples, int samplingFreqHz, int current_delay_ms) = 0; - // This function inserts audio written to the OS audio drivers for use - // as the far-end signal for AEC processing. The length of the block - // must be 160, 320, 441 or 480 samples (for 16000, 32000, 44100 or - // 48000 kHz sampling rates respectively). - virtual int ExternalPlayoutData( - int16_t speechData10ms[], int samplingFreqHz, int num_channels, - int current_delay_ms, int& lengthSamples) = 0; - // This function gets audio for an external playout sink. // During transmission, this function should be called every ~10 ms // to obtain a new 10 ms frame of audio. The length of the block will - // be 160, 320, 441 or 480 samples (for 16000, 32000, 44100 or 48000 + // be 160, 320, 440 or 480 samples (for 16000, 32000, 44100 or 48000 // kHz sampling rates respectively). virtual int ExternalPlayoutGetData( int16_t speechData10ms[], int samplingFreqHz, int current_delay_ms, int& lengthSamples) = 0; // Pulls an audio frame from the specified |channel| for external mixing. // If the |desired_sample_rate_hz| is 0, the signal will be returned with // its native frequency, otherwise it will be resampled. Valid frequencies
--- a/media/webrtc/trunk/webrtc/voice_engine/output_mixer.cc +++ b/media/webrtc/trunk/webrtc/voice_engine/output_mixer.cc @@ -561,17 +561,17 @@ OutputMixer::DoOperationsOnCombinedSigna } assert(_audioFrame.num_channels_ == 2); AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame); } // --- Far-end Voice Quality Enhancement (AudioProcessing Module) - APMAnalyzeReverseStream(_audioFrame); + APMAnalyzeReverseStream(); // --- External media processing if (_externalMedia) { CriticalSectionScoped cs(&_callbackCritSect); const bool isStereo = (_audioFrame.num_channels_ == 2); if (_externalMediaCallbackPtr) @@ -587,35 +587,35 @@ OutputMixer::DoOperationsOnCombinedSigna } // --- Measure audio level (0-9) for the combined signal _audioLevel.ComputeLevel(_audioFrame); return 0; } -void OutputMixer::APMAnalyzeReverseStream(AudioFrame &audioFrame) { +// ---------------------------------------------------------------------------- +// Private methods +// ---------------------------------------------------------------------------- + +void OutputMixer::APMAnalyzeReverseStream() { // Convert from mixing to AudioProcessing sample rate, determined by the send // side. Downmix to mono. AudioFrame frame; frame.num_channels_ = 1; frame.sample_rate_hz_ = _audioProcessingModulePtr->sample_rate_hz(); - if (RemixAndResample(audioFrame, &audioproc_resampler_, &frame) == -1) + if (RemixAndResample(_audioFrame, &audioproc_resampler_, &frame) == -1) return; if (_audioProcessingModulePtr->AnalyzeReverseStream(&frame) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1), "AudioProcessingModule::AnalyzeReverseStream() => error"); } } -// ---------------------------------------------------------------------------- -// Private methods -// ---------------------------------------------------------------------------- - int OutputMixer::InsertInbandDtmfTone() { uint16_t sampleRate(0); _dtmfGenerator.GetSampleRate(sampleRate); if (sampleRate != _audioFrame.sample_rate_hz_) { // Update sample rate of Dtmf tone since the mixing frequency changed.
--- a/media/webrtc/trunk/webrtc/voice_engine/output_mixer.h +++ b/media/webrtc/trunk/webrtc/voice_engine/output_mixer.h @@ -113,21 +113,19 @@ public: // For file recording void PlayNotification(int32_t id, uint32_t durationMs); void RecordNotification(int32_t id, uint32_t durationMs); void PlayFileEnded(int32_t id); void RecordFileEnded(int32_t id); - // so ExternalPlayoutData() can insert far-end audio from the audio drivers - void APMAnalyzeReverseStream(AudioFrame &audioFrame); - private: OutputMixer(uint32_t instanceId); + void APMAnalyzeReverseStream(); int InsertInbandDtmfTone(); // uses Statistics* _engineStatisticsPtr; AudioProcessing* _audioProcessingModulePtr; // owns CriticalSectionWrapper& _callbackCritSect;
--- a/media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.cc +++ b/media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.cc @@ -275,78 +275,16 @@ int VoEExternalMediaImpl::SetExternalPla return 0; #else shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, "SetExternalPlayoutStatus() external playout is not supported"); return -1; #endif } -// This inserts a copy of the raw audio sent to the output drivers to use -// as the "far end" signal for the AEC. Currently only 10ms chunks are -// supported unfortunately. Since we have to rechunk to 10ms to call this, -// thre isn't much gained by allowing N*10ms here; external code can loop -// if needed. -int VoEExternalMediaImpl::ExternalPlayoutData( - int16_t speechData10ms[], - int samplingFreqHz, - int num_channels, - int current_delay_ms, - int& lengthSamples) -{ - WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1), - "ExternalPlayoutData(speechData10ms=0x%x," - " lengthSamples=%u, samplingFreqHz=%d, current_delay_ms=%d)", - &speechData10ms[0], lengthSamples, samplingFreqHz, - current_delay_ms); - -#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT - if (!shared_->statistics().Initialized()) - { - shared_->SetLastError(VE_NOT_INITED, kTraceError); - return -1; - } - // FIX(jesup) - check if this is enabled? - if (shared_->NumOfSendingChannels() == 0) - { - shared_->SetLastError(VE_ALREADY_SENDING, kTraceError, - "SetExternalRecordingStatus() no channel is sending"); - return -1; - } - if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) && - (48000 != samplingFreqHz) && (44100 != samplingFreqHz)) - { - shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError, - "SetExternalRecordingStatus() invalid sample rate"); - return -1; - } - if (current_delay_ms < 0) - { - shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError, - "SetExternalRecordingStatus() invalid delay)"); - return -1; - } - - // Far-end data is inserted without going through neteq/etc. - // Only supports 10ms chunks; AnalyzeReverseStream() enforces that - // lower down. - AudioFrame audioFrame; - audioFrame.UpdateFrame(-1, 0xFFFFFFFF, - speechData10ms, - lengthSamples, - samplingFreqHz, - AudioFrame::kNormalSpeech, - AudioFrame::kVadUnknown, - num_channels); - - shared_->output_mixer()->APMAnalyzeReverseStream(audioFrame); -#endif - return 0; -} - int VoEExternalMediaImpl::ExternalPlayoutGetData( int16_t speechData10ms[], int samplingFreqHz, int current_delay_ms, int& lengthSamples) { WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1), "ExternalPlayoutGetData(speechData10ms=0x%x, samplingFreqHz=%d"
--- a/media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.h +++ b/media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.h @@ -34,24 +34,16 @@ public: virtual int SetExternalPlayoutStatus(bool enable); virtual int ExternalRecordingInsertData( const int16_t speechData10ms[], int lengthSamples, int samplingFreqHz, int current_delay_ms); - // Insertion of far-end data as actually played out to the OS audio driver - virtual int ExternalPlayoutData( - int16_t speechData10ms[], - int samplingFreqHz, - int num_channels, - int current_delay_ms, - int& lengthSamples); - virtual int ExternalPlayoutGetData(int16_t speechData10ms[], int samplingFreqHz, int current_delay_ms, int& lengthSamples); virtual int GetAudioFrame(int channel, int desired_sample_rate_hz, AudioFrame* frame);
--- a/modules/libpref/src/init/all.js +++ b/modules/libpref/src/init/all.js @@ -259,43 +259,37 @@ pref("media.peerconnection.use_document_ // Do not enable identity before fixing domain comparison: see Bug 958741 // Do not enable identity before fixing origin spoofing: see Bug 968335 pref("media.peerconnection.identity.enabled", false); pref("media.peerconnection.identity.timeout", 5000); // These values (aec, agc, and noice) are from media/webrtc/trunk/webrtc/common_types.h // kXxxUnchanged = 0, kXxxDefault = 1, and higher values are specific to each // setting (for Xxx = Ec, Agc, or Ns). Defaults are all set to kXxxDefault here. pref("media.peerconnection.turn.disable", false); -pref("media.getusermedia.aec_enabled", true); -pref("media.getusermedia.aec", 1); -pref("media.getusermedia.agc_enabled", false); -pref("media.getusermedia.agc", 1); -pref("media.getusermedia.noise_enabled", true); -pref("media.getusermedia.noise", 1); -// Adjustments for OS-specific input delay (lower bound) -// Adjustments for OS-specific AudioStream+cubeb+output delay (lower bound) +pref("media.peerconnection.aec_enabled", true); +pref("media.peerconnection.aec", 1); +pref("media.peerconnection.agc_enabled", false); +pref("media.peerconnection.agc", 1); +pref("media.peerconnection.noise_enabled", false); +pref("media.peerconnection.noise", 1); +// Adjustments for OS mediastream+output+OS+input delay (lower bound) #if defined(XP_MACOSX) pref("media.peerconnection.capture_delay", 50); -pref("media.getusermedia.playout_delay", 10); #elif defined(XP_WIN) pref("media.peerconnection.capture_delay", 50); -pref("media.getusermedia.playout_delay", 40); #elif defined(ANDROID) pref("media.peerconnection.capture_delay", 100); -pref("media.getusermedia.playout_delay", 100); // Whether to enable Webrtc Hardware acceleration support pref("media.navigator.hardware.vp8_encode.acceleration_enabled", false); pref("media.navigator.hardware.vp8_decode.acceleration_enabled", false); #elif defined(XP_LINUX) pref("media.peerconnection.capture_delay", 70); -pref("media.getusermedia.playout_delay", 50); #else // *BSD, others - merely a guess for now pref("media.peerconnection.capture_delay", 50); -pref("media.getusermedia.playout_delay", 50); #endif #else #ifdef ANDROID pref("media.navigator.enabled", true); #endif #endif pref("media.tabstreaming.width", 320);