Backed out changeset 198add1ad218 (bug 1264199) for test failures in test_WaitingOnMissingData_mp4.html
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Thu, 21 Apr 2016 16:32:21 +0200
changeset 332175 acaefb4a5fc814ad6ee2404d17f6539da82aa839
parent 332174 39ea6efa54bb354e12a7c489bb88f0ac79318871
child 332176 2562ba9f1ceae12c36fb2d9752b87651b60a1d7c
child 332200 969d250bffc4b78d0cb69571a37789af123fba44
push id6048
push userkmoir@mozilla.com
push dateMon, 06 Jun 2016 19:02:08 +0000
treeherdermozilla-beta@46d72a56c57d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1264199
milestone48.0a1
backs out198add1ad218719bb6034299afa6b841fcd1ec46
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset 198add1ad218 (bug 1264199) for test failures in test_WaitingOnMissingData_mp4.html
dom/media/MediaDecoderStateMachine.cpp
dom/media/mediasink/DecodedAudioDataSink.cpp
dom/media/mediasink/DecodedAudioDataSink.h
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -356,17 +356,17 @@ MediaDecoderStateMachine::Initialization
 
 media::MediaSink*
 MediaDecoderStateMachine::CreateAudioSink()
 {
   RefPtr<MediaDecoderStateMachine> self = this;
   auto audioSinkCreator = [self] () {
     MOZ_ASSERT(self->OnTaskQueue());
     return new DecodedAudioDataSink(
-      self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
+      self->mAudioQueue, self->GetMediaTime(),
       self->mInfo.mAudio, self->mAudioChannel);
   };
   return new AudioSinkWrapper(mTaskQueue, audioSinkCreator);
 }
 
 already_AddRefed<media::MediaSink>
 MediaDecoderStateMachine::CreateMediaSink(bool aAudioCaptured)
 {
--- a/dom/media/mediasink/DecodedAudioDataSink.cpp
+++ b/dom/media/mediasink/DecodedAudioDataSink.cpp
@@ -24,66 +24,46 @@ extern LazyLogModule gMediaDecoderLog;
   MOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, \
   ("DecodedAudioDataSink=%p " msg, this, ##__VA_ARGS__))
 
 namespace media {
 
 // The amount of audio frames that is used to fuzz rounding errors.
 static const int64_t AUDIO_FUZZ_FRAMES = 1;
 
-// Amount of audio frames we will be processing ahead of use
-static const int32_t LOW_AUDIO_USECS = 300000;
-
-DecodedAudioDataSink::DecodedAudioDataSink(AbstractThread* aThread,
-                                           MediaQueue<MediaData>& aAudioQueue,
+DecodedAudioDataSink::DecodedAudioDataSink(MediaQueue<MediaData>& aAudioQueue,
                                            int64_t aStartTime,
                                            const AudioInfo& aInfo,
                                            dom::AudioChannel aChannel)
   : AudioSink(aAudioQueue)
   , mStartTime(aStartTime)
   , mWritten(0)
   , mLastGoodPosition(0)
   , mInfo(aInfo)
   , mChannel(aChannel)
   , mPlaying(true)
-  , mErrored(false)
   , mPlaybackComplete(false)
-  , mOwnerThread(aThread)
-  , mProcessedQueueLength(0)
-  , mFramesParsed(0)
-  , mLastEndTime(0)
 {
   bool resampling = gfxPrefs::AudioSinkResampling();
   uint32_t resamplingRate = gfxPrefs::AudioSinkResampleRate();
-  mOutputRate = resampling ? resamplingRate : mInfo.mRate;
-  mOutputChannels = mInfo.mChannels > 2 && gfxPrefs::AudioSinkForceStereo()
-                      ? 2 : mInfo.mChannels;
   mConverter =
     MakeUnique<AudioConverter>(
       AudioConfig(mInfo.mChannels, mInfo.mRate),
-      AudioConfig(mOutputChannels, mOutputRate));
+      AudioConfig(mInfo.mChannels > 2 && gfxPrefs::AudioSinkForceStereo()
+                    ? 2 : mInfo.mChannels,
+                  resampling ? resamplingRate : mInfo.mRate));
 }
 
 DecodedAudioDataSink::~DecodedAudioDataSink()
 {
 }
 
 RefPtr<GenericPromise>
 DecodedAudioDataSink::Init(const PlaybackParams& aParams)
 {
-  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
-
-  mAudioQueueListener = mAudioQueue.PushEvent().Connect(
-    mOwnerThread, this, &DecodedAudioDataSink::OnAudioPushed);
-  mProcessedQueueListener = mProcessedQueue.PopEvent().Connect(
-    mOwnerThread, this, &DecodedAudioDataSink::OnAudioPopped);
-
-  // To ensure at least one audio packet will be popped from AudioQueue and
-  // ready to be played.
-  NotifyAudioNeeded();
   RefPtr<GenericPromise> p = mEndPromise.Ensure(__func__);
   nsresult rv = InitializeAudioStream(aParams);
   if (NS_FAILED(rv)) {
     mEndPromise.Reject(rv, __func__);
   }
   return p;
 }
 
@@ -110,26 +90,20 @@ DecodedAudioDataSink::HasUnplayedFrames(
   // Experimentation suggests that GetPositionInFrames() is zero-indexed,
   // so we need to add 1 here before comparing it to mWritten.
   return mAudioStream && mAudioStream->GetPositionInFrames() + 1 < mWritten;
 }
 
 void
 DecodedAudioDataSink::Shutdown()
 {
-  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
-
-  mAudioQueueListener.Disconnect();
-  mProcessedQueueListener.Disconnect();
-
   if (mAudioStream) {
     mAudioStream->Shutdown();
     mAudioStream = nullptr;
   }
-  mProcessedQueue.Reset();
   mEndPromise.ResolveIfExists(true, __func__);
 }
 
 void
 DecodedAudioDataSink::SetVolume(double aVolume)
 {
   if (mAudioStream) {
     mAudioStream->SetVolume(aVolume);
@@ -167,17 +141,19 @@ DecodedAudioDataSink::SetPlaying(bool aP
   }
   mPlaying = aPlaying;
 }
 
 nsresult
 DecodedAudioDataSink::InitializeAudioStream(const PlaybackParams& aParams)
 {
   mAudioStream = new AudioStream(*this);
-  nsresult rv = mAudioStream->Init(mOutputChannels, mOutputRate, mChannel);
+  nsresult rv = mAudioStream->Init(mConverter->OutputConfig().Channels(),
+                                   mConverter->OutputConfig().Rate(),
+                                   mChannel);
   if (NS_FAILED(rv)) {
     mAudioStream->Shutdown();
     mAudioStream = nullptr;
     return rv;
   }
 
   // Set playback params before calling Start() so they can take effect
   // as soon as the 1st DataCallback of the AudioStream fires.
@@ -187,24 +163,23 @@ DecodedAudioDataSink::InitializeAudioStr
   mAudioStream->Start();
 
   return NS_OK;
 }
 
 int64_t
 DecodedAudioDataSink::GetEndTime() const
 {
-  CheckedInt64 playedUsecs = FramesToUsecs(mWritten, mOutputRate) + mStartTime;
+  CheckedInt64 playedUsecs =
+    FramesToUsecs(mWritten, mConverter->OutputConfig().Rate()) + mStartTime;
   if (!playedUsecs.isValid()) {
     NS_WARNING("Int overflow calculating audio end time");
     return -1;
   }
-  // As we may be resampling, rounding errors may occur. Ensure we never get
-  // past the original end time.
-  return std::min<int64_t>(mLastEndTime, playedUsecs.value());
+  return playedUsecs.value();
 }
 
 UniquePtr<AudioStream::Chunk>
 DecodedAudioDataSink::PopFrames(uint32_t aFrames)
 {
   class Chunk : public AudioStream::Chunk {
   public:
     Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
@@ -237,45 +212,108 @@ DecodedAudioDataSink::PopFrames(uint32_t
     AudioDataValue* GetWritable() const { return mData.get(); }
   private:
     const uint32_t mFrames;
     const uint32_t mChannels;
     const uint32_t mRate;
     UniquePtr<AudioDataValue[]> mData;
   };
 
-  if (!mCurrentData) {
+  while (!mCurrentData) {
     // No data in the queue. Return an empty chunk.
-    if (!mProcessedQueue.GetSize()) {
+    if (AudioQueue().GetSize() == 0) {
       return MakeUnique<Chunk>();
     }
 
-    mCurrentData = dont_AddRef(mProcessedQueue.PopFront().take());
+    AudioData* a = AudioQueue().PeekFront()->As<AudioData>();
+
+    // Ignore the element with 0 frames and try next.
+    if (a->mFrames == 0) {
+      RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
+      continue;
+    }
+
+    // Ignore invalid samples.
+    if (a->mRate != mInfo.mRate || a->mChannels != mInfo.mChannels) {
+      NS_WARNING(nsPrintfCString(
+        "mismatched sample format, data=%p rate=%u channels=%u frames=%u",
+        a->mAudioData.get(), a->mRate, a->mChannels, a->mFrames).get());
+      RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
+      continue;
+    }
+
+    // See if there's a gap in the audio. If there is, push silence into the
+    // audio hardware, so we can play across the gap.
+    // Calculate the timestamp of the next chunk of audio in numbers of
+    // samples.
+    CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime,
+                                            mConverter->OutputConfig().Rate());
+    // Calculate the number of frames that have been pushed onto the audio hardware.
+    CheckedInt64 playedFrames = UsecsToFrames(mStartTime,
+                                              mConverter->OutputConfig().Rate()) +
+                                static_cast<int64_t>(mWritten);
+    CheckedInt64 missingFrames = sampleTime - playedFrames;
+
+    if (!missingFrames.isValid() || !sampleTime.isValid()) {
+      NS_WARNING("Int overflow in DecodedAudioDataSink");
+      mErrored = true;
+      return MakeUnique<Chunk>();
+    }
+
+    const uint32_t rate = mConverter->OutputConfig().Rate();
+    const uint32_t channels = mConverter->OutputConfig().Channels();
+
+    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
+      // The next audio chunk begins some time after the end of the last chunk
+      // we pushed to the audio hardware. We must push silence into the audio
+      // hardware so that the next audio chunk begins playback at the correct
+      // time.
+      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
+      auto framesToPop = std::min<uint32_t>(missingFrames.value(), aFrames);
+      mWritten += framesToPop;
+      return MakeUnique<SilentChunk>(framesToPop, channels, rate);
+    }
+
+    RefPtr<AudioData> data =
+      dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
+    if (mConverter->InputConfig() != mConverter->OutputConfig()) {
+      AlignedAudioBuffer convertedData =
+        mConverter->Process(AudioSampleBuffer(Move(data->mAudioData))).Forget();
+      mCurrentData =
+        new AudioData(data->mOffset,
+                      data->mTime,
+                      data->mDuration,
+                      convertedData.Length() / channels,
+                      Move(convertedData),
+                      channels,
+                      rate);
+    } else {
+      mCurrentData = Move(data);
+    }
+
     mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
                                             mCurrentData->mChannels,
                                             mCurrentData->mFrames);
     MOZ_ASSERT(mCurrentData->mFrames > 0);
-    mProcessedQueueLength -=
-      FramesToUsecs(mCurrentData->mFrames, mOutputRate).value();
   }
 
   auto framesToPop = std::min(aFrames, mCursor->Available());
 
   SINK_LOG_V("playing audio at time=%lld offset=%u length=%u",
              mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);
 
   UniquePtr<AudioStream::Chunk> chunk =
     MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
 
   mWritten += framesToPop;
   mCursor->Advance(framesToPop);
 
   // All frames are popped. Reset mCurrentData so we can pop new elements from
   // the audio queue in next calls to PopFrames().
-  if (!mCursor->Available()) {
+  if (mCursor->Available() == 0) {
     mCurrentData = nullptr;
   }
 
   return chunk;
 }
 
 bool
 DecodedAudioDataSink::Ended() const
@@ -287,131 +325,10 @@ DecodedAudioDataSink::Ended() const
 void
 DecodedAudioDataSink::Drained()
 {
   SINK_LOG("Drained");
   mPlaybackComplete = true;
   mEndPromise.ResolveIfExists(true, __func__);
 }
 
-void
-DecodedAudioDataSink::OnAudioPopped(const RefPtr<MediaData>& aSample)
-{
-  SINK_LOG_V("AudioStream has used an audio packet.");
-  NotifyAudioNeeded();
-}
-
-void
-DecodedAudioDataSink::OnAudioPushed(const RefPtr<MediaData>& aSample)
-{
-  SINK_LOG_V("One new audio packet available.");
-  NotifyAudioNeeded();
-}
-
-void
-DecodedAudioDataSink::NotifyAudioNeeded()
-{
-  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn(),
-             "Not called from the owner's thread");
-
-  // Always ensure we have two processed frames pending to allow for processing
-  // latency.
-  while (AudioQueue().GetSize() && (mProcessedQueueLength < LOW_AUDIO_USECS ||
-                                    mProcessedQueue.GetSize() < 2)) {
-    RefPtr<AudioData> data =
-      dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
-
-    // Ignore the element with 0 frames and try next.
-    if (!data->mFrames) {
-      continue;
-    }
-
-    // Ignore invalid samples.
-    if (data->mRate != mConverter->InputConfig().Rate() ||
-        data->mChannels != mConverter->InputConfig().Channels()) {
-      NS_WARNING(nsPrintfCString(
-        "mismatched sample format, data=%p rate=%u channels=%u frames=%u",
-        data->mAudioData.get(), data->mRate, data->mChannels, data->mFrames).get());
-      continue;
-    }
-
-    // See if there's a gap in the audio. If there is, push silence into the
-    // audio hardware, so we can play across the gap.
-    // Calculate the timestamp of the next chunk of audio in numbers of
-    // samples.
-    CheckedInt64 sampleTime = UsecsToFrames(data->mTime - mStartTime,
-                                            data->mRate);
-    // Calculate the number of frames that have been pushed onto the audio hardware.
-    CheckedInt64 missingFrames = sampleTime - mFramesParsed;
-
-    if (!missingFrames.isValid()) {
-      NS_WARNING("Int overflow in DecodedAudioDataSink");
-      mErrored = true;
-      return;
-    }
-
-    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
-      // The next audio packet begins some time after the end of the last packet
-      // we pushed to the audio hardware. We must push silence into the audio
-      // hardware so that the next audio packet begins playback at the correct
-      // time.
-      missingFrames = std::min<int64_t>(INT32_MAX, missingFrames.value());
-      mFramesParsed += missingFrames.value();
-      AlignedAudioBuffer silenceData(missingFrames.value() * mOutputChannels);
-      if (!silenceData) {
-        NS_WARNING("OOM in DecodedAudioDataSink");
-        mErrored = true;
-        return;
-      }
-      RefPtr<AudioData> silence = CreateAudioFromBuffer(Move(silenceData), data);
-      PushProcessedAudio(silence);
-    }
-
-    mLastEndTime = data->GetEndTime();
-    mFramesParsed += data->mFrames;
-
-    if (mConverter->InputConfig() != mConverter->OutputConfig()) {
-      AlignedAudioBuffer convertedData =
-        mConverter->Process(AudioSampleBuffer(Move(data->mAudioData))).Forget();
-      data = CreateAudioFromBuffer(Move(convertedData), data);
-    }
-    PushProcessedAudio(data);
-  }
-}
-
-uint32_t
-DecodedAudioDataSink::PushProcessedAudio(AudioData* aData)
-{
-  if (!aData || !aData->mFrames) {
-    return 0;
-  }
-  mProcessedQueue.Push(aData);
-  mProcessedQueueLength += FramesToUsecs(aData->mFrames, mOutputRate).value();
-  return aData->mFrames;
-}
-
-already_AddRefed<AudioData>
-DecodedAudioDataSink::CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
-                                            AudioData* aReference)
-{
-  uint32_t frames = aBuffer.Length() / mOutputChannels;
-  if (!frames) {
-    return nullptr;
-  }
-  CheckedInt64 duration = FramesToUsecs(frames, mOutputRate);
-  if (!duration.isValid()) {
-    NS_WARNING("Int overflow in DecodedAudioDataSink");
-    mErrored = true;
-    return nullptr;
-  }
-  RefPtr<AudioData> data =
-    new AudioData(aReference->mOffset,
-                  aReference->mTime,
-                  duration.value(),
-                  frames,
-                  Move(aBuffer),
-                  mOutputChannels,
-                  mOutputRate);
-  return data.forget();
-}
-
 } // namespace media
 } // namespace mozilla
--- a/dom/media/mediasink/DecodedAudioDataSink.h
+++ b/dom/media/mediasink/DecodedAudioDataSink.h
@@ -23,18 +23,17 @@ namespace mozilla {
 
 class AudioConverter;
 
 namespace media {
 
 class DecodedAudioDataSink : public AudioSink,
                              private AudioStream::DataSource {
 public:
-  DecodedAudioDataSink(AbstractThread* aThread,
-                       MediaQueue<MediaData>& aAudioQueue,
+  DecodedAudioDataSink(MediaQueue<MediaData>& aAudioQueue,
                        int64_t aStartTime,
                        const AudioInfo& aInfo,
                        dom::AudioChannel aChannel);
 
   // Return a promise which will be resolved when DecodedAudioDataSink
   // finishes playing, or rejected if any error.
   RefPtr<GenericPromise> Init(const PlaybackParams& aParams) override;
 
@@ -98,44 +97,20 @@ private:
    * Members to implement AudioStream::DataSource.
    * Used on the callback thread of cubeb.
    */
   // The AudioData at which AudioStream::DataSource is reading.
   RefPtr<AudioData> mCurrentData;
   // Keep track of the read position of mCurrentData.
   UniquePtr<AudioBufferCursor> mCursor;
   // True if there is any error in processing audio data like overflow.
-  Atomic<bool> mErrored;
+  bool mErrored = false;
 
   // Set on the callback thread of cubeb once the stream has drained.
   Atomic<bool> mPlaybackComplete;
 
-  const RefPtr<AbstractThread> mOwnerThread;
-
-  // Audio Processing objects and methods
-  void OnAudioPopped(const RefPtr<MediaData>& aSample);
-  void OnAudioPushed(const RefPtr<MediaData>& aSample);
-  void NotifyAudioNeeded();
-  already_AddRefed<AudioData> CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
-                                                    AudioData* aReference);
-  // Add data to the processsed queue, update mProcessedQueueLength and
-  // return the number of frames added.
-  uint32_t PushProcessedAudio(AudioData* aData);
   UniquePtr<AudioConverter> mConverter;
-  MediaQueue<AudioData> mProcessedQueue;
-  // Length in microseconds of the ProcessedQueue
-  Atomic<int32_t> mProcessedQueueLength;
-  MediaEventListener mAudioQueueListener;
-  MediaEventListener mProcessedQueueListener;
-  // Number of frames processed from AudioQueue(). Used to determine gaps in
-  // the input stream. It indicates the time in frames since playback started
-  // at the current input framerate.
-  int64_t mFramesParsed;
-  int64_t mLastEndTime;
-  // Never modifed after construction.
-  uint32_t mOutputRate;
-  uint32_t mOutputChannels;
 };
 
 } // namespace media
 } // namespace mozilla
 
 #endif