Bug 1274626 part 4 - make the blank decoders adjust the start time of the first processed sample; r?jya draft
authorKaku Kuo <tkuo@mozilla.com>
Thu, 28 Jul 2016 22:28:43 +0800
changeset 393764 1d106018774beadfbcce3a33dcd5aabf3d310f5e
parent 393763 14b1efc06962991b848af9768dba865e990c4bde
child 393765 37e544b066b22f71e6a8f2ac7259fc85107ae294
child 393771 5503977db045b22043d3bc7ec36358c1a17b60b5
child 393774 7fe6b7c20246f02e2ed0a15c13952963bd4a8885
push id24415
push usertkuo@mozilla.com
push dateThu, 28 Jul 2016 14:56:24 +0000
reviewersjya
bugs1274626
milestone50.0a1
Bug 1274626 part 4 - make the blank decoders adjust the start time of the first processed sample; r?jya Pass the time information of the original decoder's latest decoded sample into the newly created blank decoder so that the blank decoder could adjust its first sample's time. MozReview-Commit-ID: 2HhR6WpSS1f
dom/media/MediaData.h
dom/media/MediaFormatReader.cpp
dom/media/MediaFormatReader.h
dom/media/platforms/PlatformDecoderModule.h
dom/media/platforms/agnostic/BlankDecoderModule.cpp
dom/media/platforms/wrappers/H264Converter.cpp
dom/media/platforms/wrappers/H264Converter.h
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -13,16 +13,17 @@
 #include "nsIMemoryReporter.h"
 #include "SharedBuffer.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/UniquePtrExtensions.h"
 #include "nsTArray.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/PodOperations.h"
+#include "TimeUnits.h"
 
 namespace mozilla {
 
 namespace layers {
 class Image;
 class ImageContainer;
 } // namespace layers
 
@@ -263,16 +264,18 @@ typedef AlignedBuffer<uint8_t> AlignedBy
 typedef AlignedBuffer<float> AlignedFloatBuffer;
 typedef AlignedBuffer<int16_t> AlignedShortBuffer;
 typedef AlignedBuffer<AudioDataValue> AlignedAudioBuffer;
 
 // Container that holds media samples.
 class MediaData {
 public:
 
+  using TimeUnit = media::TimeUnit;
+
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaData)
 
   enum Type {
     AUDIO_DATA = 0,
     VIDEO_DATA,
     RAW_DATA,
     NULL_DATA
   };
@@ -320,16 +323,34 @@ public:
   int64_t GetEndTime() const { return mTime + mDuration; }
 
   bool AdjustForStartTime(int64_t aStartTime)
   {
     mTime = mTime - aStartTime;
     return mTime >= 0;
   }
 
+  bool AdjustStartTimeAndDuration(const TimeUnit& aNewStartTime)
+  {
+    MOZ_ASSERT(aNewStartTime.IsValid());
+
+    const TimeUnit delta = TimeUnit::FromMicroseconds(mTime) - aNewStartTime;
+    const TimeUnit newDuration = TimeUnit::FromMicroseconds(mDuration) + delta;
+
+    if (!newDuration.IsValid() || newDuration.IsInfinite() ||
+        newDuration <= TimeUnit::FromMicroseconds(0)) {
+      return false;
+    }
+
+    mTime = aNewStartTime.ToMicroseconds();
+    mDuration = newDuration.ToMicroseconds();
+
+    return true;
+  }
+
   template <typename ReturnType>
   const ReturnType* As() const
   {
     MOZ_ASSERT(this->mType == ReturnType::sType);
     return static_cast<const ReturnType*>(this);
   }
 
   template <typename ReturnType>
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -411,31 +411,33 @@ MediaFormatReader::EnsureDecoderCreated(
   MonitorAutoLock mon(decoder.mMonitor);
 
   switch (aTrack) {
     case TrackType::kAudioTrack: {
       decoder.mDecoder = mPlatform->CreateDecoder({
         decoder.mInfo ? *decoder.mInfo->GetAsAudioInfo() : mInfo.mAudio,
         decoder.mTaskQueue,
         decoder.mCallback.get(),
-        mCrashHelper
+        mCrashHelper,
+        decoder.LastDecodedSampleTime().mEnd
       });
       break;
     }
 
     case TrackType::kVideoTrack: {
       // Decoders use the layers backend to decide if they can use hardware decoding,
       // so specify LAYERS_NONE if we want to forcibly disable it.
       decoder.mDecoder = mPlatform->CreateDecoder({
         mVideo.mInfo ? *mVideo.mInfo->GetAsVideoInfo() : mInfo.mVideo,
         decoder.mTaskQueue,
         decoder.mCallback.get(),
         mLayersBackendType,
         GetImageContainer(),
-        mCrashHelper
+        mCrashHelper,
+        decoder.LastDecodedSampleTime().mEnd
       });
       break;
     }
     default:
       break;
   }
   if (decoder.mDecoder ) {
     decoder.mDescription = decoder.mDecoder->GetDescriptionName();
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -422,16 +422,31 @@ private:
     Atomic<bool> mIsHardwareAccelerated;
     // Sample format monitoring.
     uint32_t mLastStreamSourceID;
     Maybe<uint32_t> mNextStreamSourceID;
     media::TimeIntervals mTimeRanges;
     Maybe<media::TimeUnit> mLastTimeRangesEnd;
     RefPtr<SharedTrackInfo> mInfo;
     Maybe<media::TimeUnit> mFirstDemuxedSampleTime;
+
+    media::TimeInterval LastDecodedSampleTime() const
+    {
+      if (!mOutput.IsEmpty()) {
+        return media::TimeInterval(
+          media::TimeUnit::FromMicroseconds(mOutput.LastElement()->mTime),
+          media::TimeUnit::FromMicroseconds(mOutput.LastElement()->GetEndTime()));
+      }
+
+      if (mLastSampleTime.isSome()) {
+        return mLastSampleTime.ref();
+      }
+
+      return media::TimeInterval();
+    }
   };
 
   class DecoderDataWithPromise : public DecoderData {
   public:
     DecoderDataWithPromise(MediaFormatReader* aOwner,
                            MediaData::Type aType,
                            uint32_t aDecodeAhead,
                            uint32_t aNumOfMaxError)
--- a/dom/media/platforms/PlatformDecoderModule.h
+++ b/dom/media/platforms/PlatformDecoderModule.h
@@ -59,24 +59,26 @@ struct CreateDecoderParams {
 
   const TrackInfo& mConfig;
   TaskQueue* mTaskQueue = nullptr;
   MediaDataDecoderCallback* mCallback = nullptr;
   DecoderDoctorDiagnostics* mDiagnostics = nullptr;
   layers::ImageContainer* mImageContainer = nullptr;
   layers::LayersBackend mLayersBackend = layers::LayersBackend::LAYERS_NONE;
   RefPtr<GMPCrashHelper> mCrashHelper;
+  media::TimeUnit mFirstSampleStartTime = media::TimeUnit::Invalid();
 
 private:
   void Set(TaskQueue* aTaskQueue) { mTaskQueue = aTaskQueue; }
   void Set(MediaDataDecoderCallback* aCallback) { mCallback = aCallback; }
   void Set(DecoderDoctorDiagnostics* aDiagnostics) { mDiagnostics = aDiagnostics; }
   void Set(layers::ImageContainer* aImageContainer) { mImageContainer = aImageContainer; }
   void Set(layers::LayersBackend aLayersBackend) { mLayersBackend = aLayersBackend; }
   void Set(GMPCrashHelper* aCrashHelper) { mCrashHelper = aCrashHelper; }
+  void Set(media::TimeUnit aFirstSampleStartTime) { mFirstSampleStartTime = aFirstSampleStartTime; }
   template <typename T1, typename T2, typename... Ts>
   void Set(T1 a1, T2 a2, Ts... as)
   {
     // Parameter pack expansion trick, to call Set() on each argument.
     using expander = int[];
     (void)expander {
       (Set(a1), 0), (Set(a2), 0), (Set(as), 0)...
     };
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -31,29 +31,32 @@ public:
   BlankMediaDataDecoder(BlankMediaDataCreator* aCreator,
                         const CreateDecoderParams& aParams)
     : mCreator(aCreator)
     , mCallback(aParams.mCallback)
     , mMaxRefFrames(aParams.mConfig.GetType() == TrackInfo::kVideoTrack
                     ? ComputeMaxRefFrames(aParams.VideoConfig().mExtraData)
                     : 0)
     , mType(aParams.mConfig.GetType())
+    , mFirstSampleStartTime(aParams.mFirstSampleStartTime)
   {
   }
 
   RefPtr<InitPromise> Init() override {
     return InitPromise::CreateAndResolve(mType, __func__);
   }
 
   nsresult Shutdown() override {
     return NS_OK;
   }
 
   nsresult Input(MediaRawData* aSample) override
   {
+    AdjustFirstSampleStartTime(aSample);
+
     RefPtr<MediaData> data =
       mCreator->Create(media::TimeUnit::FromMicroseconds(aSample->mTime),
                        media::TimeUnit::FromMicroseconds(aSample->mDuration),
                        aSample->mOffset);
 
     OutputFrame(data.get());
 
     return NS_OK;
@@ -97,22 +100,32 @@ private:
     }
 
     if (mReorderQueue.Length() <= mMaxRefFrames) {
       mCallback->InputExhausted();
     }
 
   }
 
+  void AdjustFirstSampleStartTime(MediaData* aSample)
+  {
+    if (mFirstSampleStartTime.IsValid()) {
+      aSample->AdjustStartTimeAndDuration(mFirstSampleStartTime);
+      mFirstSampleStartTime = media::TimeUnit::Invalid();
+    }
+
+  }
+
 private:
   nsAutoPtr<BlankMediaDataCreator> mCreator;
   MediaDataDecoderCallback* mCallback;
   const uint32_t mMaxRefFrames;
   ReorderQueue mReorderQueue;
   TrackInfo::TrackType mType;
+  media::TimeUnit mFirstSampleStartTime;
 };
 
 class BlankVideoDataCreator {
 public:
   BlankVideoDataCreator(uint32_t aFrameWidth,
                         uint32_t aFrameHeight,
                         layers::ImageContainer* aImageContainer)
     : mFrameWidth(aFrameWidth)
--- a/dom/media/platforms/wrappers/H264Converter.cpp
+++ b/dom/media/platforms/wrappers/H264Converter.cpp
@@ -23,16 +23,17 @@ H264Converter::H264Converter(PlatformDec
   , mLayersBackend(aParams.mLayersBackend)
   , mImageContainer(aParams.mImageContainer)
   , mTaskQueue(aParams.mTaskQueue)
   , mCallback(aParams.mCallback)
   , mDecoder(nullptr)
   , mGMPCrashHelper(aParams.mCrashHelper)
   , mNeedAVCC(aPDM->DecoderNeedsConversion(aParams.mConfig) == PlatformDecoderModule::kNeedAVCC)
   , mLastError(NS_OK)
+  , mFirstSampleStartTime(aParams.mFirstSampleStartTime)
 {
   CreateDecoder(aParams.mDiagnostics);
 }
 
 H264Converter::~H264Converter()
 {
 }
 
@@ -144,17 +145,18 @@ H264Converter::CreateDecoder(DecoderDoct
 
   mDecoder = mPDM->CreateVideoDecoder({
     mNeedAVCC ? mCurrentConfig : mOriginalConfig,
     mTaskQueue,
     mCallback,
     aDiagnostics,
     mImageContainer,
     mLayersBackend,
-    mGMPCrashHelper
+    mGMPCrashHelper,
+    mFirstSampleStartTime
   });
 
   if (!mDecoder) {
     mLastError = NS_ERROR_FAILURE;
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
 }
--- a/dom/media/platforms/wrappers/H264Converter.h
+++ b/dom/media/platforms/wrappers/H264Converter.h
@@ -63,13 +63,14 @@ private:
   const RefPtr<TaskQueue> mTaskQueue;
   nsTArray<RefPtr<MediaRawData>> mMediaRawSamples;
   MediaDataDecoderCallback* mCallback;
   RefPtr<MediaDataDecoder> mDecoder;
   MozPromiseRequestHolder<InitPromise> mInitPromiseRequest;
   RefPtr<GMPCrashHelper> mGMPCrashHelper;
   bool mNeedAVCC;
   nsresult mLastError;
+  media::TimeUnit mFirstSampleStartTime;
 };
 
 } // namespace mozilla
 
 #endif // mozilla_H264Converter_h