Bug 938022. Part 1: Update mAudioEndTime from SendStreamData. r=cpearce
authorRobert O'Callahan <robert@ocallahan.org>
Sat, 23 Nov 2013 00:33:24 +1300
changeset 158174 c7994af691f501e9fe93e6843ba1a664e85f6542
parent 158173 105c2046f92c3fc552d8526be326faaa18484eaa
child 158175 4d75ae037706f5f83f88e69b4935d6496979e982
push id36943
push userrocallahan@mozilla.com
push dateMon, 02 Dec 2013 01:08:51 +0000
treeherdermozilla-inbound@08fd80f4b2bf [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerscpearce
bugs938022
milestone28.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 938022. Part 1: Update mAudioEndTime from SendStreamData. r=cpearce
content/media/MediaDecoderStateMachine.cpp
content/media/test/mochitest.ini
content/media/test/test_streams_element_capture_playback.html
--- a/content/media/MediaDecoderStateMachine.cpp
+++ b/content/media/MediaDecoderStateMachine.cpp
@@ -622,119 +622,117 @@ void MediaDecoderStateMachine::SendStrea
 
   DecodedStreamData* stream = mDecoder->GetDecodedStream();
   if (!stream)
     return;
 
   if (mState == DECODER_STATE_DECODING_METADATA)
     return;
 
-  if (!mDecoder->IsSameOriginMedia()) {
-    return;
-  }
-
   // If there's still an audio thread alive, then we can't send any stream
   // data yet since both SendStreamData and the audio thread want to be in
   // charge of popping the audio queue. We're waiting for the audio thread
   // to die before sending anything to our stream.
   if (mAudioThread)
     return;
 
   int64_t minLastAudioPacketTime = INT64_MAX;
-  SourceMediaStream* mediaStream = stream->mStream;
-  StreamTime endPosition = 0;
-
-  if (!stream->mStreamInitialized) {
-    if (mInfo.HasAudio()) {
-      AudioSegment* audio = new AudioSegment();
-      mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudio.mRate, 0, audio);
-    }
-    if (mInfo.HasVideo()) {
-      VideoSegment* video = new VideoSegment();
-      mediaStream->AddTrack(TRACK_VIDEO, RATE_VIDEO, 0, video);
-    }
-    stream->mStreamInitialized = true;
-  }
-
-  if (mInfo.HasAudio()) {
-    nsAutoTArray<AudioData*,10> audio;
-    // It's OK to hold references to the AudioData because while audio
-    // is captured, only the decoder thread pops from the queue (see below).
-    mReader->AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio);
-    AudioSegment output;
-    for (uint32_t i = 0; i < audio.Length(); ++i) {
-      SendStreamAudio(audio[i], stream, &output);
-    }
-    if (output.GetDuration() > 0) {
-      mediaStream->AppendToTrack(TRACK_AUDIO, &output);
-    }
-    if (mReader->AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) {
-      mediaStream->EndTrack(TRACK_AUDIO);
-      stream->mHaveSentFinishAudio = true;
-    }
-    minLastAudioPacketTime = std::min(minLastAudioPacketTime, stream->mLastAudioPacketTime);
-    endPosition = std::max(endPosition,
-        TicksToTimeRoundDown(mInfo.mAudio.mRate, stream->mAudioFramesWritten));
-  }
-
-  if (mInfo.HasVideo()) {
-    nsAutoTArray<VideoData*,10> video;
-    // It's OK to hold references to the VideoData only the decoder thread
-    // pops from the queue.
-    mReader->VideoQueue().GetElementsAfter(stream->mNextVideoTime + mStartTime, &video);
-    VideoSegment output;
-    for (uint32_t i = 0; i < video.Length(); ++i) {
-      VideoData* v = video[i];
-      if (stream->mNextVideoTime + mStartTime < v->mTime) {
-        DECODER_LOG(PR_LOG_DEBUG, ("%p Decoder writing last video to MediaStream %p for %lld ms",
-                                   mDecoder.get(), mediaStream,
-                                   v->mTime - (stream->mNextVideoTime + mStartTime)));
-        // Write last video frame to catch up. mLastVideoImage can be null here
-        // which is fine, it just means there's no video.
-        WriteVideoToMediaStream(stream->mLastVideoImage,
-          v->mTime - (stream->mNextVideoTime + mStartTime), stream->mLastVideoImageDisplaySize,
-            &output);
-        stream->mNextVideoTime = v->mTime - mStartTime;
-      }
-      if (stream->mNextVideoTime + mStartTime < v->GetEndTime()) {
-        DECODER_LOG(PR_LOG_DEBUG, ("%p Decoder writing video frame %lld to MediaStream %p for %lld ms",
-                                   mDecoder.get(), v->mTime, mediaStream,
-                                   v->GetEndTime() - (stream->mNextVideoTime + mStartTime)));
-        WriteVideoToMediaStream(v->mImage,
-            v->GetEndTime() - (stream->mNextVideoTime + mStartTime), v->mDisplay,
-            &output);
-        stream->mNextVideoTime = v->GetEndTime() - mStartTime;
-        stream->mLastVideoImage = v->mImage;
-        stream->mLastVideoImageDisplaySize = v->mDisplay;
-      } else {
-        DECODER_LOG(PR_LOG_DEBUG, ("%p Decoder skipping writing video frame %lld to MediaStream",
-                                   mDecoder.get(), v->mTime));
-      }
-    }
-    if (output.GetDuration() > 0) {
-      mediaStream->AppendToTrack(TRACK_VIDEO, &output);
-    }
-    if (mReader->VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) {
-      mediaStream->EndTrack(TRACK_VIDEO);
-      stream->mHaveSentFinishVideo = true;
-    }
-    endPosition = std::max(endPosition,
-        TicksToTimeRoundDown(RATE_VIDEO, stream->mNextVideoTime - stream->mInitialTime));
-  }
-
-  if (!stream->mHaveSentFinish) {
-    stream->mStream->AdvanceKnownTracksTime(endPosition);
-  }
-
   bool finished =
       (!mInfo.HasAudio() || mReader->AudioQueue().IsFinished()) &&
       (!mInfo.HasVideo() || mReader->VideoQueue().IsFinished());
-  if (finished && !stream->mHaveSentFinish) {
-    stream->mHaveSentFinish = true;
-    stream->mStream->Finish();
+  if (mDecoder->IsSameOriginMedia()) {
+    SourceMediaStream* mediaStream = stream->mStream;
+    StreamTime endPosition = 0;
+
+    if (!stream->mStreamInitialized) {
+      if (mInfo.HasAudio()) {
+        AudioSegment* audio = new AudioSegment();
+        mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudio.mRate, 0, audio);
+      }
+      if (mInfo.HasVideo()) {
+        VideoSegment* video = new VideoSegment();
+        mediaStream->AddTrack(TRACK_VIDEO, RATE_VIDEO, 0, video);
+      }
+      stream->mStreamInitialized = true;
+    }
+
+    if (mInfo.HasAudio()) {
+      nsAutoTArray<AudioData*,10> audio;
+      // It's OK to hold references to the AudioData because while audio
+      // is captured, only the decoder thread pops from the queue (see below).
+      mReader->AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio);
+      AudioSegment output;
+      for (uint32_t i = 0; i < audio.Length(); ++i) {
+        SendStreamAudio(audio[i], stream, &output);
+      }
+      if (output.GetDuration() > 0) {
+        mediaStream->AppendToTrack(TRACK_AUDIO, &output);
+      }
+      if (mReader->AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) {
+        mediaStream->EndTrack(TRACK_AUDIO);
+        stream->mHaveSentFinishAudio = true;
+      }
+      minLastAudioPacketTime = std::min(minLastAudioPacketTime, stream->mLastAudioPacketTime);
+      endPosition = std::max(endPosition,
+          TicksToTimeRoundDown(mInfo.mAudio.mRate, stream->mAudioFramesWritten));
+    }
+
+    if (mInfo.HasVideo()) {
+      nsAutoTArray<VideoData*,10> video;
+      // It's OK to hold references to the VideoData only the decoder thread
+      // pops from the queue.
+      mReader->VideoQueue().GetElementsAfter(stream->mNextVideoTime + mStartTime, &video);
+      VideoSegment output;
+      for (uint32_t i = 0; i < video.Length(); ++i) {
+        VideoData* v = video[i];
+        if (stream->mNextVideoTime + mStartTime < v->mTime) {
+          DECODER_LOG(PR_LOG_DEBUG, ("%p Decoder writing last video to MediaStream %p for %lld ms",
+                                     mDecoder.get(), mediaStream,
+                                     v->mTime - (stream->mNextVideoTime + mStartTime)));
+          // Write last video frame to catch up. mLastVideoImage can be null here
+          // which is fine, it just means there's no video.
+          WriteVideoToMediaStream(stream->mLastVideoImage,
+            v->mTime - (stream->mNextVideoTime + mStartTime), stream->mLastVideoImageDisplaySize,
+              &output);
+          stream->mNextVideoTime = v->mTime - mStartTime;
+        }
+        if (stream->mNextVideoTime + mStartTime < v->GetEndTime()) {
+          DECODER_LOG(PR_LOG_DEBUG, ("%p Decoder writing video frame %lld to MediaStream %p for %lld ms",
+                                     mDecoder.get(), v->mTime, mediaStream,
+                                     v->GetEndTime() - (stream->mNextVideoTime + mStartTime)));
+          WriteVideoToMediaStream(v->mImage,
+              v->GetEndTime() - (stream->mNextVideoTime + mStartTime), v->mDisplay,
+              &output);
+          stream->mNextVideoTime = v->GetEndTime() - mStartTime;
+          stream->mLastVideoImage = v->mImage;
+          stream->mLastVideoImageDisplaySize = v->mDisplay;
+        } else {
+          DECODER_LOG(PR_LOG_DEBUG, ("%p Decoder skipping writing video frame %lld to MediaStream",
+                                     mDecoder.get(), v->mTime));
+        }
+      }
+      if (output.GetDuration() > 0) {
+        mediaStream->AppendToTrack(TRACK_VIDEO, &output);
+      }
+      if (mReader->VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) {
+        mediaStream->EndTrack(TRACK_VIDEO);
+        stream->mHaveSentFinishVideo = true;
+      }
+      endPosition = std::max(endPosition,
+          TicksToTimeRoundDown(RATE_VIDEO, stream->mNextVideoTime - stream->mInitialTime));
+    }
+
+    if (!stream->mHaveSentFinish) {
+      stream->mStream->AdvanceKnownTracksTime(endPosition);
+    }
+
+    if (finished && !stream->mHaveSentFinish) {
+      stream->mHaveSentFinish = true;
+      stream->mStream->Finish();
+    }
   }
 
   if (mAudioCaptured) {
     // Discard audio packets that are no longer needed.
     while (true) {
       nsAutoPtr<AudioData> a(mReader->AudioQueue().PopFront());
       if (!a)
         break;
@@ -743,16 +741,17 @@ void MediaDecoderStateMachine::SendStrea
       // create a new output stream and we actually don't have the audio for the
       // very start. That's OK, we'll play silence instead for a brief moment.
       // That's OK. Seeking to this time would have a similar issue for such
       // badly muxed resources.
       if (a->GetEndTime() >= minLastAudioPacketTime) {
         mReader->AudioQueue().PushFront(a.forget());
         break;
       }
+      mAudioEndTime = std::max(mAudioEndTime, a->GetEndTime());
     }
 
     if (finished) {
       mAudioCompleted = true;
       UpdateReadyState();
     }
   }
 }
--- a/content/media/test/mochitest.ini
+++ b/content/media/test/mochitest.ini
@@ -252,16 +252,17 @@ support-files =
 [test_seekLies.html]
 [test_media_sniffer.html]
 [test_streams_srcObject.html]
 [test_reset_src.html]
 [test_streams_autoplay.html]
 [test_streams_element_capture.html]
 [test_streams_element_capture_reset.html]
 [test_streams_element_capture_createObjectURL.html]
+[test_streams_element_capture_playback.html]
 [test_streams_gc.html]
 [test_streams_tracks.html]
 [test_texttrack.html]
 [test_texttrackcue.html]
 [test_trackevent.html]
 [test_texttrackregion.html]
 [test_timeupdate_small_files.html]
 [test_unseekable.html]
new file mode 100644
--- /dev/null
+++ b/content/media/test/test_streams_element_capture_playback.html
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <title>Test that capturing a stream doesn't stop the underlying element from firing events</title>
+  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+  <script type="text/javascript" src="manifest.js"></script>
+</head>
+<body>
+<audio id="a"></audio>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+var a = document.getElementById('a');
+var validTimeUpdate = false;
+
+function startTest() {
+  a.src = "big.wav";
+  var context = new AudioContext();
+  var node = context.createMediaElementSource(a);
+  node.connect(context.destination);
+  a.addEventListener("timeupdate", function() {
+    if (a.currentTime > 0.0 && a.currentTime < 5.0 && !validTimeUpdate) {
+      validTimeUpdate = true;
+      ok(true, "Received reasonable currentTime in a timeupdate");
+      SimpleTest.finish();
+    }
+  });
+  a.addEventListener("ended", function() {
+    if (!validTimeUpdate) {
+      ok(false, "Received reasonable currentTime in a timeupdate");
+      SimpleTest.finish();
+    }
+  });
+  a.play();
+}
+
+if (a.canPlayType("audio/wave")) {
+  startTest();
+} else {
+  todo(false, "No playable audio");
+}
+</script>
+</pre>
+</body>
+</html>