Bug 1240420. Part 1 - move checks of mismatched sample rate or channel numbers to AudioStream. r=kinetik.
authorJW Wang <jwwang@mozilla.com>
Thu, 21 Jan 2016 21:11:14 +0800
changeset 281134 52b05e612df9ee6e1006452aec6f820409b8767e
parent 281133 2f5e4d2d6f3588c5f001afc8b7299a1a016fa904
child 281135 632c8912c742a43ed6deb4153759a7113f3c051a
push id29930
push usercbook@mozilla.com
push dateFri, 22 Jan 2016 11:05:50 +0000
treeherdermozilla-central@7104d650a97d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskinetik
bugs1240420
milestone46.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1240420. Part 1 - move checks of mismatched sample rate or channel numbers to AudioStream. r=kinetik.
dom/media/AudioStream.cpp
dom/media/AudioStream.h
dom/media/mediasink/DecodedAudioDataSink.cpp
--- a/dom/media/AudioStream.cpp
+++ b/dom/media/AudioStream.cpp
@@ -530,28 +530,35 @@ AudioStream::GetPositionInFramesUnlocked
 bool
 AudioStream::IsPaused()
 {
   MonitorAutoLock mon(mMonitor);
   return mState == STOPPED;
 }
 
 bool
-AudioStream::Downmix(AudioDataValue* aBuffer, uint32_t aFrames)
+AudioStream::Downmix(Chunk* aChunk)
 {
-  if (mChannels > 8) {
+  if (aChunk->Rate() != mInRate) {
+    LOGW("mismatched sample %u, mInRate=%u", aChunk->Rate(), mInRate);
     return false;
   }
 
-  if (mChannels > 2 && mChannels <= 8) {
-    DownmixAudioToStereo(aBuffer, mChannels, aFrames);
+  if (aChunk->Channels() > 8) {
+    return false;
   }
 
-  if (mChannels >= 2 && mIsMonoAudioEnabled) {
-    DownmixStereoToMono(aBuffer, aFrames);
+  if (aChunk->Channels() > 2 && aChunk->Channels() <= 8) {
+    DownmixAudioToStereo(aChunk->GetWritable(),
+                         aChunk->Channels(),
+                         aChunk->Frames());
+  }
+
+  if (aChunk->Channels() >= 2 && mIsMonoAudioEnabled) {
+    DownmixStereoToMono(aChunk->GetWritable(), aChunk->Frames());
   }
 
   return true;
 }
 
 void
 AudioStream::GetUnprocessed(AudioBufferWriter& aWriter)
 {
@@ -572,17 +579,17 @@ AudioStream::GetUnprocessed(AudioBufferW
   }
 
   while (aWriter.Available() > 0) {
     UniquePtr<Chunk> c = mDataSource.PopFrames(aWriter.Available());
     if (c->Frames() == 0) {
       break;
     }
     MOZ_ASSERT(c->Frames() <= aWriter.Available());
-    if (Downmix(c->GetWritable(), c->Frames())) {
+    if (Downmix(c.get())) {
       aWriter.Write(c->Data(), c->Frames());
     } else {
       // Write silence if downmixing fails.
       aWriter.WriteZeros(c->Frames());
     }
   }
 }
 
@@ -600,17 +607,17 @@ AudioStream::GetTimeStretched(AudioBuffe
   uint32_t toPopFrames = ceil(aWriter.Available() * playbackRate);
 
   while (mTimeStretcher->numSamples() < aWriter.Available()) {
     UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
     if (c->Frames() == 0) {
       break;
     }
     MOZ_ASSERT(c->Frames() <= toPopFrames);
-    if (Downmix(c->GetWritable(), c->Frames())) {
+    if (Downmix(c.get())) {
       mTimeStretcher->putSamples(c->Data(), c->Frames());
     } else {
       // Write silence if downmixing fails.
       nsAutoTArray<AudioDataValue, 1000> buf;
       buf.SetLength(mOutChannels * c->Frames());
       memset(buf.Elements(), 0, buf.Length() * sizeof(AudioDataValue));
       mTimeStretcher->putSamples(buf.Elements(), c->Frames());
     }
--- a/dom/media/AudioStream.h
+++ b/dom/media/AudioStream.h
@@ -220,16 +220,20 @@ public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioStream)
 
   class Chunk {
   public:
     // Return a pointer to the audio data.
     virtual const AudioDataValue* Data() const = 0;
     // Return the number of frames in this chunk.
     virtual uint32_t Frames() const = 0;
+    // Return the number of audio channels.
+    virtual uint32_t Channels() const = 0;
+    // Return the sample rate of this chunk.
+    virtual uint32_t Rate() const = 0;
     // Return a writable pointer for downmixing.
     virtual AudioDataValue* GetWritable() const = 0;
     virtual ~Chunk() {}
   };
 
   class DataSource {
   public:
     // Return a chunk which contains at most aFrames frames or zero if no
@@ -318,17 +322,17 @@ private:
 
 
   long DataCallback(void* aBuffer, long aFrames);
   void StateCallback(cubeb_state aState);
 
   nsresult EnsureTimeStretcherInitializedUnlocked();
 
   // Return true if downmixing succeeds otherwise false.
-  bool Downmix(AudioDataValue* aBuffer, uint32_t aFrames);
+  bool Downmix(Chunk* aChunk);
 
   void GetUnprocessed(AudioBufferWriter& aWriter);
   void GetTimeStretched(AudioBufferWriter& aWriter);
 
   void StartUnlocked();
 
   // The monitor is held to protect all access to member variables.
   Monitor mMonitor;
--- a/dom/media/mediasink/DecodedAudioDataSink.cpp
+++ b/dom/media/mediasink/DecodedAudioDataSink.cpp
@@ -167,35 +167,43 @@ DecodedAudioDataSink::PopFrames(uint32_t
 {
   class Chunk : public AudioStream::Chunk {
   public:
     Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
       : mBuffer(aBuffer), mFrames(aFrames), mData(aData) {}
     Chunk() : mFrames(0), mData(nullptr) {}
     const AudioDataValue* Data() const { return mData; }
     uint32_t Frames() const { return mFrames; }
+    uint32_t Channels() const { return mBuffer ? mBuffer->mChannels: 0; }
+    uint32_t Rate() const { return mBuffer ? mBuffer->mRate : 0; }
     AudioDataValue* GetWritable() const { return mData; }
   private:
     const RefPtr<AudioData> mBuffer;
     const uint32_t mFrames;
     AudioDataValue* const mData;
   };
 
   class SilentChunk : public AudioStream::Chunk {
   public:
-    SilentChunk(uint32_t aFrames, uint32_t aChannels)
+    SilentChunk(uint32_t aFrames, uint32_t aChannels, uint32_t aRate)
       : mFrames(aFrames)
+      , mChannels(aChannels)
+      , mRate(aRate)
       , mData(MakeUnique<AudioDataValue[]>(aChannels * aFrames)) {
       memset(mData.get(), 0, aChannels * aFrames * sizeof(AudioDataValue));
     }
     const AudioDataValue* Data() const { return mData.get(); }
     uint32_t Frames() const { return mFrames; }
+    uint32_t Channels() const { return mChannels; }
+    uint32_t Rate() const { return mRate; }
     AudioDataValue* GetWritable() const { return mData.get(); }
   private:
     const uint32_t mFrames;
+    const uint32_t mChannels;
+    const uint32_t mRate;
     UniquePtr<AudioDataValue[]> mData;
   };
 
   if (!mCurrentData) {
     // No data in the queue. Return an empty chunk.
     if (AudioQueue().GetSize() == 0) {
       return MakeUnique<Chunk>();
     }
@@ -219,40 +227,32 @@ DecodedAudioDataSink::PopFrames(uint32_t
     if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
       // The next audio chunk begins some time after the end of the last chunk
       // we pushed to the audio hardware. We must push silence into the audio
       // hardware so that the next audio chunk begins playback at the correct
       // time.
       missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
       auto framesToPop = std::min<uint32_t>(missingFrames.value(), aFrames);
       mWritten += framesToPop;
-      return MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels);
+      return MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels, mInfo.mRate);
     }
 
     mCurrentData = dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
     mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
                                             mCurrentData->mChannels,
                                             mCurrentData->mFrames);
   }
 
   auto framesToPop = std::min(aFrames, mCursor->Available());
 
   SINK_LOG_V("playing audio at time=%lld offset=%u length=%u",
              mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);
 
-  UniquePtr<AudioStream::Chunk> chunk;
-
-  if (mCurrentData->mRate == mInfo.mRate &&
-      mCurrentData->mChannels == mInfo.mChannels) {
-    chunk = MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
-  } else {
-    SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]",
-               mInfo.mRate, mInfo.mChannels, mCurrentData->mRate, mCurrentData->mChannels);
-    chunk = MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels);
-  }
+  UniquePtr<AudioStream::Chunk> chunk =
+    MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
 
   mWritten += framesToPop;
   mCursor->Advance(framesToPop);
 
   // All frames are popped. Reset mCurrentData so we can pop new elements from
   // the audio queue in next calls to PopFrames().
   if (mCursor->Available() == 0) {
     mCurrentData = nullptr;