Bug 1171314: P2. Fill SharedTrackInfo data in MSE samples. r=cpearce
authorJean-Yves Avenard <jyavenard@mozilla.com>
Fri, 12 Jun 2015 15:18:50 +1000
changeset 280480 40c9845aff7d882bf72a4c258a2a02ff2d8e3eef
parent 280479 c1b8e5419562452499cb65d1a4a20f5973983058
child 280481 5a79d2d62dc4d0ee76023085ff57f2c6137b93a5
push id4932
push userjlund@mozilla.com
push dateMon, 10 Aug 2015 18:23:06 +0000
treeherdermozilla-beta@6dd5a4f5f745 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerscpearce
bugs1171314
milestone41.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1171314: P2. Fill SharedTrackInfo data in MSE samples. r=cpearce
dom/media/mediasource/TrackBuffersManager.cpp
dom/media/mediasource/TrackBuffersManager.h
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -32,16 +32,18 @@ AppendStateToStr(TrackBuffersManager::Ap
       return "PARSING_INIT_SEGMENT";
     case TrackBuffersManager::AppendState::PARSING_MEDIA_SEGMENT:
       return "PARSING_MEDIA_SEGMENT";
     default:
       return "IMPOSSIBLE";
   }
 }
 
+static Atomic<uint32_t> sStreamSourceID(0u);
+
 TrackBuffersManager::TrackBuffersManager(dom::SourceBuffer* aParent, MediaSourceDecoder* aParentDecoder, const nsACString& aType)
   : mInputBuffer(new MediaByteBuffer)
   , mAppendState(AppendState::WAITING_FOR_SEGMENT)
   , mBufferFull(false)
   , mFirstInitializationSegmentReceived(false)
   , mActiveTrack(false)
   , mType(aType)
   , mParser(ContainerParser::CreateForMIMEType(aType))
@@ -789,16 +791,19 @@ TrackBuffersManager::OnDemuxerInitDone(n
 
     mVideoTracks.mLongestFrameDuration = mVideoTracks.mLastFrameDuration;
     mAudioTracks.mLongestFrameDuration = mAudioTracks.mLastFrameDuration;
   }
 
   // 4. Let active track flag equal false.
   mActiveTrack = false;
 
+  // Increase our stream id.
+  uint32_t streamID = sStreamSourceID++;
+
   // 5. If the first initialization segment received flag is false, then run the following steps:
   if (!mFirstInitializationSegmentReceived) {
     mAudioTracks.mNumTracks = numAudios;
     // TODO:
     // 1. If the initialization segment contains tracks with codecs the user agent
     // does not support, then run the append error algorithm with the decode
     // error parameter set to true and abort these steps.
 
@@ -824,17 +829,18 @@ TrackBuffersManager::OnDemuxerInitDone(n
       //     2. Set active track flag to true.
       mActiveTrack = true;
       //   8. Add new audio track to the audioTracks attribute on this SourceBuffer object.
       //   9. Queue a task to fire a trusted event named addtrack, that does not bubble and is not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object referenced by the audioTracks attribute on this SourceBuffer object.
       //   10. Add new audio track to the audioTracks attribute on the HTMLMediaElement.
       //   11. Queue a task to fire a trusted event named addtrack, that does not bubble and is not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object referenced by the audioTracks attribute on the HTMLMediaElement.
       mAudioTracks.mBuffers.AppendElement(TrackBuffer());
       // 10. Add the track description for this track to the track buffer.
-      mAudioTracks.mInfo = info.mAudio.Clone();
+      mAudioTracks.mInfo = new SharedTrackInfo(info.mAudio, streamID);
+      mAudioTracks.mLastInfo = mAudioTracks.mInfo;
     }
 
     mVideoTracks.mNumTracks = numVideos;
     // 3. For each video track in the initialization segment, run following steps:
     // for (uint32_t i = 0; i < numVideos; i++) {
     if (numVideos) {
       // 1. Let video byte stream track ID be the Track ID for the current track being processed.
       // 2. Let video language be a BCP 47 language tag for the language specified in the initialization segment for this track or an empty string if no language info is present.
@@ -855,24 +861,28 @@ TrackBuffersManager::OnDemuxerInitDone(n
       //     2. Set active track flag to true.
       mActiveTrack = true;
       //   8. Add new video track to the videoTracks attribute on this SourceBuffer object.
       //   9. Queue a task to fire a trusted event named addtrack, that does not bubble and is not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object referenced by the videoTracks attribute on this SourceBuffer object.
       //   10. Add new video track to the videoTracks attribute on the HTMLMediaElement.
       //   11. Queue a task to fire a trusted event named addtrack, that does not bubble and is not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object referenced by the videoTracks attribute on the HTMLMediaElement.
       mVideoTracks.mBuffers.AppendElement(TrackBuffer());
       // 10. Add the track description for this track to the track buffer.
-      mVideoTracks.mInfo = info.mVideo.Clone();
+      mVideoTracks.mInfo = new SharedTrackInfo(info.mVideo, streamID);
+      mVideoTracks.mLastInfo = mVideoTracks.mInfo;
     }
     // 4. For each text track in the initialization segment, run following steps:
     // 5. If active track flag equals true, then run the following steps:
     // This is handled by SourceBuffer once the promise is resolved.
 
     // 6. Set first initialization segment received flag to true.
     mFirstInitializationSegmentReceived = true;
+  } else {
+    mAudioTracks.mLastInfo = new SharedTrackInfo(info.mAudio, streamID);
+    mVideoTracks.mLastInfo = new SharedTrackInfo(info.mVideo, streamID);
   }
 
   // TODO CHECK ENCRYPTION
   UniquePtr<EncryptionInfo> crypto = mInputDemuxer->GetCrypto();
   if (crypto && crypto->IsEncrypted()) {
 #ifdef MOZ_EME
     // Try and dispatch 'encrypted'. Won't go if ready state still HAVE_NOTHING.
     for (uint32_t i = 0; i < crypto->mInitDatas.Length(); i++) {
@@ -1311,16 +1321,18 @@ TrackBuffersManager::ProcessFrame(MediaR
     }
     // Update our buffered range to exclude the range just removed.
     trackBuffer.mBufferedRanges -= removedInterval;
   }
 
   // 16. Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer.
   aSample->mTime = presentationTimestamp.ToMicroseconds();
   aSample->mTimecode = decodeTimestamp.ToMicroseconds();
+  aSample->mTrackInfo = trackBuffer.mLastInfo;
+
   if (firstRemovedIndex >= 0) {
     data.InsertElementAt(firstRemovedIndex, aSample);
   } else {
     if (data.IsEmpty() || aSample->mTimecode > data.LastElement()->mTimecode) {
       data.AppendElement(aSample);
     } else {
       // Find where to insert frame.
       for (uint32_t i = 0; i < data.Length(); i++) {
--- a/dom/media/mediasource/TrackBuffersManager.h
+++ b/dom/media/mediasource/TrackBuffersManager.h
@@ -212,17 +212,19 @@ private:
     // We only manage a single track of each type at this time.
     nsTArray<TrackBuffer> mBuffers;
     // Track buffer ranges variable that represents the presentation time ranges
     // occupied by the coded frames currently stored in the track buffer.
     TimeIntervals mBufferedRanges;
     // Byte size of all samples contained in this track buffer.
     uint32_t mSizeBuffer;
     // TrackInfo of the first metadata received.
-    UniquePtr<TrackInfo> mInfo;
+    nsRefPtr<SharedTrackInfo> mInfo;
+    // TrackInfo of the last metadata parsed (updated with each init segment.
+    nsRefPtr<SharedTrackInfo> mLastInfo;
   };
 
   bool ProcessFrame(MediaRawData* aSample, TrackData& aTrackData);
   void RejectProcessing(nsresult aRejectValue, const char* aName);
   void ResolveProcessing(bool aResolveValue, const char* aName);
   MediaPromiseRequestHolder<CodedFrameProcessingPromise> mProcessingRequest;
   MediaPromiseHolder<CodedFrameProcessingPromise> mProcessingPromise;