Bug 1182737. Part 1 - have DecodedStream keep reference to media queues of MDSM so we don't have to pass them everytime.
authorJW Wang <jwwang@mozilla.com>
Sat, 04 Jul 2015 09:30:15 +0800
changeset 252679 70e9ca5808581d62a91f605ef84a1e9528a8bb67
parent 252678 07ee075d8e6ccf8f731c3472e22e3688e29aa5d9
child 252680 24352a7d9b7321456c5fd109c07e7b6d87a6333d
push id62213
push userjwwang@mozilla.com
push dateTue, 14 Jul 2015 02:59:29 +0000
treeherdermozilla-inbound@ed6f4b96c7fe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1182737
milestone42.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1182737. Part 1 - have DecodedStream keep reference to media queues of MDSM so we don't have to pass them everytime.
dom/media/DecodedStream.cpp
dom/media/DecodedStream.h
dom/media/MediaDecoderStateMachine.cpp
--- a/dom/media/DecodedStream.cpp
+++ b/dom/media/DecodedStream.cpp
@@ -180,19 +180,22 @@ OutputStreamData::~OutputStreamData()
 void
 OutputStreamData::Init(DecodedStream* aDecodedStream, ProcessedMediaStream* aStream)
 {
   mStream = aStream;
   mListener = new OutputStreamListener(aDecodedStream, aStream);
   aStream->AddListener(mListener);
 }
 
-DecodedStream::DecodedStream()
+DecodedStream::DecodedStream(MediaQueue<AudioData>& aAudioQueue,
+                             MediaQueue<VideoData>& aVideoQueue)
   : mMonitor("DecodedStream::mMonitor")
   , mPlaying(false)
+  , mAudioQueue(aAudioQueue)
+  , mVideoQueue(aVideoQueue)
 {
   //
 }
 
 void
 DecodedStream::DestroyData()
 {
   MOZ_ASSERT(NS_IsMainThread());
@@ -449,50 +452,49 @@ SendStreamAudio(DecodedStreamData* aStre
   aOutput->ApplyVolume(aVolume);
 
   aStream->mNextAudioTime = aAudio->GetEndTime();
 }
 
 void
 DecodedStream::SendAudio(int64_t aStartTime,
                          const MediaInfo& aInfo,
-                         MediaQueue<AudioData>& aQueue,
                          double aVolume, bool aIsSameOrigin)
 {
   GetReentrantMonitor().AssertCurrentThreadIn();
 
   if (!aInfo.HasAudio()) {
     return;
   }
 
   AudioSegment output;
   uint32_t rate = aInfo.mAudio.mRate;
   nsAutoTArray<nsRefPtr<AudioData>,10> audio;
   TrackID audioTrackId = aInfo.mAudio.mTrackId;
   SourceMediaStream* sourceStream = mData->mStream;
 
   // It's OK to hold references to the AudioData because AudioData
   // is ref-counted.
-  aQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
+  mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
   for (uint32_t i = 0; i < audio.Length(); ++i) {
     SendStreamAudio(mData.get(), aStartTime, audio[i], &output, rate, aVolume);
   }
 
   if (!aIsSameOrigin) {
     output.ReplaceWithDisabled();
   }
 
   // |mNextAudioTime| is updated as we process each audio sample in
   // SendStreamAudio(). This is consistent with how |mNextVideoTime|
   // is updated for video samples.
   if (output.GetDuration() > 0) {
     sourceStream->AppendToTrack(audioTrackId, &output);
   }
 
-  if (aQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
+  if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
     sourceStream->EndTrack(audioTrackId);
     mData->mHaveSentFinishAudio = true;
   }
 }
 
 static void
 WriteVideoToMediaStream(MediaStream* aStream,
                         layers::Image* aImage,
@@ -517,33 +519,32 @@ ZeroDurationAtLastChunk(VideoSegment& aI
   StreamTime lastVideoStratTime;
   aInput.GetLastFrame(&lastVideoStratTime);
   return lastVideoStratTime == aInput.GetDuration();
 }
 
 void
 DecodedStream::SendVideo(int64_t aStartTime,
                          const MediaInfo& aInfo,
-                         MediaQueue<VideoData>& aQueue,
                          bool aIsSameOrigin)
 {
   GetReentrantMonitor().AssertCurrentThreadIn();
 
   if (!aInfo.HasVideo()) {
     return;
   }
 
   VideoSegment output;
   TrackID videoTrackId = aInfo.mVideo.mTrackId;
   nsAutoTArray<nsRefPtr<VideoData>, 10> video;
   SourceMediaStream* sourceStream = mData->mStream;
 
   // It's OK to hold references to the VideoData because VideoData
   // is ref-counted.
-  aQueue.GetElementsAfter(mData->mNextVideoTime, &video);
+  mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video);
 
   for (uint32_t i = 0; i < video.Length(); ++i) {
     VideoData* v = video[i];
 
     if (mData->mNextVideoTime < v->mTime) {
       // Write last video frame to catch up. mLastVideoImage can be null here
       // which is fine, it just means there's no video.
 
@@ -575,17 +576,17 @@ DecodedStream::SendVideo(int64_t aStartT
   if (!aIsSameOrigin) {
     output.ReplaceWithDisabled();
   }
 
   if (output.GetDuration() > 0) {
     sourceStream->AppendToTrack(videoTrackId, &output);
   }
 
-  if (aQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
+  if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
     if (mData->mEOSVideoCompensation) {
       VideoSegment endSegment;
       // Calculate the deviation clock time from DecodedStream.
       int64_t deviation_usec = sourceStream->StreamTimeToMicroseconds(1);
       WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
           mData->mNextVideoTime + deviation_usec, mData->mNextVideoTime,
           mData->mLastVideoImageDisplaySize, &endSegment);
       mData->mNextVideoTime += deviation_usec;
@@ -622,29 +623,27 @@ DecodedStream::AdvanceTracks(int64_t aSt
   if (!mData->mHaveSentFinish) {
     mData->mStream->AdvanceKnownTracksTime(endPosition);
   }
 }
 
 bool
 DecodedStream::SendData(int64_t aStartTime,
                         const MediaInfo& aInfo,
-                        MediaQueue<AudioData>& aAudioQueue,
-                        MediaQueue<VideoData>& aVideoQueue,
                         double aVolume, bool aIsSameOrigin)
 {
   ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
 
   InitTracks(aStartTime, aInfo);
-  SendAudio(aStartTime, aInfo, aAudioQueue, aVolume, aIsSameOrigin);
-  SendVideo(aStartTime, aInfo, aVideoQueue, aIsSameOrigin);
+  SendAudio(aStartTime, aInfo, aVolume, aIsSameOrigin);
+  SendVideo(aStartTime, aInfo, aIsSameOrigin);
   AdvanceTracks(aStartTime, aInfo);
 
-  bool finished = (!aInfo.HasAudio() || aAudioQueue.IsFinished()) &&
-                  (!aInfo.HasVideo() || aVideoQueue.IsFinished());
+  bool finished = (!aInfo.HasAudio() || mAudioQueue.IsFinished()) &&
+                  (!aInfo.HasVideo() || mVideoQueue.IsFinished());
 
   if (finished && !mData->mHaveSentFinish) {
     mData->mHaveSentFinish = true;
     mData->mStream->Finish();
   }
 
   return finished;
 }
--- a/dom/media/DecodedStream.h
+++ b/dom/media/DecodedStream.h
@@ -91,67 +91,66 @@ public:
   // mPort connects DecodedStreamData::mStream to our mStream.
   nsRefPtr<MediaInputPort> mPort;
   nsRefPtr<OutputStreamListener> mListener;
 };
 
 class DecodedStream {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStream);
 public:
-  DecodedStream();
+  DecodedStream(MediaQueue<AudioData>& aAudioQueue,
+                MediaQueue<VideoData>& aVideoQueue);
   void DestroyData();
   void RecreateData();
   void Connect(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
   void Remove(MediaStream* aStream);
   void SetPlaying(bool aPlaying);
   bool HaveEnoughAudio(const MediaInfo& aInfo) const;
   bool HaveEnoughVideo(const MediaInfo& aInfo) const;
   CheckedInt64 AudioEndTime(int64_t aStartTime, uint32_t aRate) const;
   int64_t GetPosition() const;
   bool IsFinished() const;
 
   // Return true if stream is finished.
   bool SendData(int64_t aStartTime,
                 const MediaInfo& aInfo,
-                MediaQueue<AudioData>& aAudioQueue,
-                MediaQueue<VideoData>& aVideoQueue,
                 double aVolume, bool aIsSameOrigin);
 
 protected:
   virtual ~DecodedStream() {}
 
 private:
   ReentrantMonitor& GetReentrantMonitor() const;
   void RecreateData(MediaStreamGraph* aGraph);
   void Connect(OutputStreamData* aStream);
   nsTArray<OutputStreamData>& OutputStreams();
   void InitTracks(int64_t aStartTime, const MediaInfo& aInfo);
   void AdvanceTracks(int64_t aStartTime, const MediaInfo& aInfo);
 
   void SendAudio(int64_t aStartTime,
                  const MediaInfo& aInfo,
-                 MediaQueue<AudioData>& aQueue,
                  double aVolume, bool aIsSameOrigin);
 
   void SendVideo(int64_t aStartTime,
                  const MediaInfo& aInfo,
-                 MediaQueue<VideoData>& aQueue,
                  bool aIsSameOrigin);
 
   UniquePtr<DecodedStreamData> mData;
   // Data about MediaStreams that are being fed by the decoder.
   nsTArray<OutputStreamData> mOutputStreams;
 
   // TODO: This is a temp solution to get rid of decoder monitor on the main
   // thread in MDSM::AddOutputStream and MDSM::RecreateDecodedStream as
   // required by bug 1146482. DecodedStream needs to release monitor before
   // calling back into MDSM functions in order to prevent deadlocks.
   //
   // Please move all capture-stream related code from MDSM into DecodedStream
   // and apply "dispatch + mirroring" to get rid of this monitor in the future.
   mutable ReentrantMonitor mMonitor;
 
   bool mPlaying;
+  MediaQueue<AudioData>& mAudioQueue;
+  MediaQueue<VideoData>& mVideoQueue;
 };
 
 } // namespace mozilla
 
 #endif // DecodedStream_h_
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -239,17 +239,17 @@ MediaDecoderStateMachine::MediaDecoderSt
   mDecodeToSeekTarget(false),
   mCurrentTimeBeforeSeek(0),
   mCorruptFrames(30),
   mDisabledHardwareAcceleration(false),
   mDecodingFrozenAtStateDecoding(false),
   mSentLoadedMetadataEvent(false),
   mSentFirstFrameLoadedEvent(false),
   mSentPlaybackEndedEvent(false),
-  mDecodedStream(new DecodedStream())
+  mDecodedStream(new DecodedStream(mAudioQueue, mVideoQueue))
 {
   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   // Dispatch initialization that needs to happen on that task queue.
   nsCOMPtr<nsIRunnable> r = NS_NewRunnableMethod(this, &MediaDecoderStateMachine::InitializationTask);
   mTaskQueue->Dispatch(r.forget());
 
@@ -367,18 +367,17 @@ int64_t MediaDecoderStateMachine::GetDec
 
 void MediaDecoderStateMachine::SendStreamData()
 {
   MOZ_ASSERT(OnTaskQueue());
   AssertCurrentThreadInMonitor();
   MOZ_ASSERT(!mAudioSink, "Should've been stopped in RunStateMachine()");
 
   bool finished = mDecodedStream->SendData(
-      mStreamStartTime, mInfo, AudioQueue(), VideoQueue(),
-      mVolume, mDecoder->IsSameOriginMedia());
+      mStreamStartTime, mInfo, mVolume, mDecoder->IsSameOriginMedia());
 
   if (mInfo.HasAudio()) {
     CheckedInt64 playedUsecs = mDecodedStream->AudioEndTime(
         mStreamStartTime, mInfo.mAudio.mRate);
     if (playedUsecs.isValid()) {
       OnAudioEndTimeUpdate(playedUsecs.value());
     }
   }