Bug 1257107 - Discard decoded data if its pts is smaller than seek time. r=jya
☠☠ backed out by c6d91a79d23b ☠ ☠
authorAlfredo Yang <ayang@mozilla.com>
Tue, 17 May 2016 21:46:00 -0400
changeset 298134 0e4c5be816f3b91cbaef28e58697e4341ed1a597
parent 298112 b8d15a27d1ecb0a533011438eb5f39599b9521ee
child 298135 be9358f86e3a8584b33fad3323bbc7b09308e868
push id30273
push userkwierso@gmail.com
push dateFri, 20 May 2016 21:08:12 +0000
treeherdermozilla-central@c403ac05b8f4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjya
bugs1257107
milestone49.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1257107 - Discard decoded data if its pts is smaller than seek time. r=jya
dom/media/MediaFormatReader.cpp
dom/media/platforms/PlatformDecoderModule.h
dom/media/platforms/agnostic/VPXDecoder.cpp
dom/media/platforms/agnostic/VPXDecoder.h
dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp
dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h
dom/media/platforms/android/AndroidDecoderModule.cpp
dom/media/platforms/android/AndroidDecoderModule.h
dom/media/platforms/apple/AppleVDADecoder.cpp
dom/media/platforms/apple/AppleVDADecoder.h
dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
dom/media/platforms/omx/OmxDataDecoder.cpp
dom/media/platforms/omx/OmxDataDecoder.h
dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
dom/media/platforms/wmf/WMFMediaDataDecoder.h
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
dom/media/platforms/wrappers/FuzzingWrapper.cpp
dom/media/platforms/wrappers/FuzzingWrapper.h
dom/media/platforms/wrappers/H264Converter.h
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -1626,16 +1626,21 @@ MediaFormatReader::DoVideoSeek()
 
 void
 MediaFormatReader::OnVideoSeekCompleted(media::TimeUnit aTime)
 {
   MOZ_ASSERT(OnTaskQueue());
   LOGV("Video seeked to %lld", aTime.ToMicroseconds());
   mVideo.mSeekRequest.Complete();
 
+  if (mVideo.mDecoder) {
+    auto& decoder = GetDecoderData(TrackInfo::kVideoTrack);
+    decoder.mDecoder->SetSeekThreshold(mPendingSeekTime.ref());
+  }
+
   if (HasAudio() && !mOriginalSeekTarget.IsVideoOnly()) {
     MOZ_ASSERT(mPendingSeekTime.isSome());
     if (mOriginalSeekTarget.IsFast()) {
       // We are performing a fast seek. We need to seek audio to where the
       // video seeked to, to ensure proper A/V sync once playback resume.
       mPendingSeekTime = Some(aTime);
     }
     DoAudioSeek();
--- a/dom/media/platforms/PlatformDecoderModule.h
+++ b/dom/media/platforms/PlatformDecoderModule.h
@@ -224,13 +224,20 @@ public:
   {
     return NS_OK;
   }
 
   // Return the name of the MediaDataDecoder, only used for decoding.
   // Only return a static const string, as the information may be accessed
   // in a non thread-safe fashion.
   virtual const char* GetDescriptionName() const = 0;
+
+  // Set a hint of seek target time to decoder. Decoder will drop any decoded
+  // data which pts is smaller than this value. This threshold needs to be clear
+  // after reset decoder.
+  // Decoder may not honor this value. However, it'd be better that
+  // video decoder implements this API to improve seek performance.
+  virtual void SetSeekThreshold(const media::TimeUnit& aTime) {}
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -81,16 +81,23 @@ VPXDecoder::Init()
   }
   return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
 }
 
 nsresult
 VPXDecoder::Flush()
 {
   mTaskQueue->Flush();
+
+  RefPtr<VPXDecoder> self = this;
+  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self] () {
+    self->mSeekTargetThreshold.reset();
+  });
+  mTaskQueue->Dispatch(r.forget());
+
   return NS_OK;
 }
 
 int
 VPXDecoder::DoDecodeFrame(MediaRawData* aSample)
 {
 #if defined(DEBUG)
   vpx_codec_stream_info_t si;
@@ -113,16 +120,23 @@ VPXDecoder::DoDecodeFrame(MediaRawData* 
   vpx_codec_iter_t  iter = nullptr;
   vpx_image_t      *img;
 
   while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
     NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420 ||
                  img->fmt == VPX_IMG_FMT_I444,
                  "WebM image format not I420 or I444");
 
+    if (mSeekTargetThreshold.isSome()) {
+      if (aSample->mTime < mSeekTargetThreshold.ref().ToMicroseconds()) {
+        continue;
+      }
+      mSeekTargetThreshold.reset();
+    }
+
     // Chroma shifts are rounded down as per the decoding examples in the SDK
     VideoData::YCbCrBuffer b;
     b.mPlanes[0].mData = img->planes[0];
     b.mPlanes[0].mStride = img->stride[0];
     b.mPlanes[0].mHeight = img->d_h;
     b.mPlanes[0].mWidth = img->d_w;
     b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
 
@@ -212,10 +226,20 @@ bool
 VPXDecoder::IsVPX(const nsACString& aMimeType, uint8_t aCodecMask)
 {
   return ((aCodecMask & VPXDecoder::VP8) &&
           aMimeType.EqualsLiteral("video/webm; codecs=vp8")) ||
          ((aCodecMask & VPXDecoder::VP9) &&
           aMimeType.EqualsLiteral("video/webm; codecs=vp9"));
 }
 
+void
+VPXDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+  RefPtr<VPXDecoder> self = this;
+  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self, aTime] () {
+    self->mSeekTargetThreshold = Some(aTime);
+  });
+  mTaskQueue->Dispatch(r.forget());
+}
+
 } // namespace mozilla
 #undef LOG
--- a/dom/media/platforms/agnostic/VPXDecoder.h
+++ b/dom/media/platforms/agnostic/VPXDecoder.h
@@ -32,16 +32,17 @@ public:
   nsresult Input(MediaRawData* aSample) override;
   nsresult Flush() override;
   nsresult Drain() override;
   nsresult Shutdown() override;
   const char* GetDescriptionName() const override
   {
     return "libvpx video decoder";
   }
+  void SetSeekThreshold(const media::TimeUnit& aTime) override;
 
   enum Codec: uint8_t {
     VP8 = 1 << 0,
     VP9 = 1 << 1
   };
 
   // Return true if mimetype is a VPX codec of given types.
   static bool IsVPX(const nsACString& aMimeType, uint8_t aCodecMask=VP8|VP9);
@@ -51,16 +52,18 @@ private:
   int DoDecodeFrame (MediaRawData* aSample);
   void DoDrain ();
   void OutputDelayedFrames ();
 
   RefPtr<ImageContainer> mImageContainer;
   RefPtr<FlushableTaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
 
+  Maybe<media::TimeUnit> mSeekTargetThreshold;
+
   // VPx decoder state
   vpx_codec_ctx_t mVPX;
 
   const VideoInfo& mInfo;
 
   int mCodec;
 };
 
--- a/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
@@ -146,16 +146,20 @@ public:
     mCallback = nullptr;
     return rv;
   }
 
   const char* GetDescriptionName() const override {
     return mDecoder->GetDescriptionName();
   }
 
+  void SetSeekThreshold(const media::TimeUnit& aTime) override {
+    mDecoder->SetSeekThreshold(aTime);
+  }
+
 private:
 
   RefPtr<MediaDataDecoder> mDecoder;
   MediaDataDecoderCallback* mCallback;
   RefPtr<TaskQueue> mTaskQueue;
   RefPtr<CDMProxy> mProxy;
   nsClassHashtable<nsRefPtrHashKey<MediaRawData>, DecryptPromiseRequestHolder> mDecrypts;
   RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -19,16 +19,23 @@ extern bool IsOnGMPThread();
 
 void
 VideoCallbackAdapter::Decoded(GMPVideoi420Frame* aDecodedFrame)
 {
   GMPUniquePtr<GMPVideoi420Frame> decodedFrame(aDecodedFrame);
 
   MOZ_ASSERT(IsOnGMPThread());
 
+  if (mSeekTargetThreshold.isSome()) {
+    if (decodedFrame->Timestamp() < (uint64_t)mSeekTargetThreshold.ref().ToMicroseconds()) {
+      return;
+    }
+    mSeekTargetThreshold.reset();
+  }
+
   VideoData::YCbCrBuffer b;
   for (int i = 0; i < kGMPNumOfPlanes; ++i) {
     b.mPlanes[i].mData = decodedFrame->Buffer(GMPPlaneType(i));
     b.mPlanes[i].mStride = decodedFrame->Stride(GMPPlaneType(i));
     if (i == kGMPYPlane) {
       b.mPlanes[i].mWidth = decodedFrame->Width();
       b.mPlanes[i].mHeight = decodedFrame->Height();
     } else {
@@ -100,16 +107,30 @@ void
 VideoCallbackAdapter::Terminated()
 {
   // Note that this *may* be called from the proxy thread also.
   NS_WARNING("H.264 GMP decoder terminated.");
   mCallback->Error();
 }
 
 void
+VideoCallbackAdapter::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+  MOZ_ASSERT(IsOnGMPThread());
+  mSeekTargetThreshold = Some(aTime);
+}
+
+void
+VideoCallbackAdapter::ResetSeekThreshold()
+{
+  MOZ_ASSERT(IsOnGMPThread());
+  mSeekTargetThreshold.reset();
+}
+
+void
 GMPVideoDecoder::InitTags(nsTArray<nsCString>& aTags)
 {
   aTags.AppendElement(NS_LITERAL_CSTRING("h264"));
   const Maybe<nsCString> gmp(
     GMPDecoderModule::PreferredGMP(NS_LITERAL_CSTRING("video/avc")));
   if (gmp.isSome()) {
     aTags.AppendElement(gmp.value());
   }
@@ -274,16 +295,17 @@ nsresult
 GMPVideoDecoder::Flush()
 {
   MOZ_ASSERT(IsOnGMPThread());
 
   if (!mGMP || NS_FAILED(mGMP->Reset())) {
     // Abort the flush.
     mCallback->FlushComplete();
   }
+  mAdapter->ResetSeekThreshold();
 
   return NS_OK;
 }
 
 nsresult
 GMPVideoDecoder::Drain()
 {
   MOZ_ASSERT(IsOnGMPThread());
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
@@ -36,19 +36,23 @@ public:
   void ResetComplete() override;
   void Error(GMPErr aErr) override;
   void Terminated() override;
 
   void SetLastStreamOffset(int64_t aStreamOffset) {
     mLastStreamOffset = aStreamOffset;
   }
 
+  void SetSeekThreshold(const media::TimeUnit& aTime);
+  void ResetSeekThreshold();
+
 private:
   MediaDataDecoderCallbackProxy* mCallback;
   int64_t mLastStreamOffset;
+  Maybe<media::TimeUnit> mSeekTargetThreshold;
 
   VideoInfo mVideoInfo;
   RefPtr<layers::ImageContainer> mImageContainer;
 };
 
 class GMPVideoDecoder : public MediaDataDecoder {
 protected:
   GMPVideoDecoder(const VideoInfo& aConfig,
@@ -89,16 +93,21 @@ public:
   nsresult Flush() override;
   nsresult Drain() override;
   nsresult Shutdown() override;
   const char* GetDescriptionName() const override
   {
     return "GMP video decoder";
   }
 
+  void SetSeekThreshold(const media::TimeUnit& aTime) override
+  {
+    mAdapter->SetSeekThreshold(aTime);
+  }
+
 protected:
   virtual void InitTags(nsTArray<nsCString>& aTags);
   virtual nsCString GetNodeId();
   virtual GMPUniquePtr<GMPVideoEncodedFrame> CreateFrame(MediaRawData* aSample);
 
 private:
 
   class GMPInitDoneCallback : public GetGMPVideoDecoderCallback
--- a/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp
+++ b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp
@@ -91,9 +91,25 @@ MediaDataDecoderProxy::Shutdown()
 }
 
 void
 MediaDataDecoderProxy::FlushComplete()
 {
   mFlushComplete.Set(true);
 }
 
+void
+MediaDataDecoderProxy::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+  MOZ_ASSERT(!IsOnProxyThread());
+  MOZ_ASSERT(!mIsShutdown);
+
+  int64_t threshold = aTime.ToMicroseconds();
+  RefPtr<MediaDataDecoderProxy> self = this;
+  nsCOMPtr<nsIRunnable> task =
+    NS_NewRunnableFunction([threshold, self] () {
+      media::TimeUnit time = media::TimeUnit::FromMicroseconds(threshold);
+      self->mProxyDecoder->SetSeekThreshold(time);
+  });
+  mProxyThread->Dispatch(task.forget());
+}
+
 } // namespace mozilla
--- a/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h
+++ b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h
@@ -140,16 +140,18 @@ public:
   nsresult Drain() override;
   nsresult Shutdown() override;
 
   const char* GetDescriptionName() const override
   {
     return "GMP proxy data decoder";
   }
 
+  void SetSeekThreshold(const media::TimeUnit& aTime) override;
+
   // Called by MediaDataDecoderCallbackProxy.
   void FlushComplete();
 
 private:
   RefPtr<InitPromise> InternalInit();
 
 #ifdef DEBUG
   bool IsOnProxyThread() {
--- a/dom/media/platforms/android/AndroidDecoderModule.cpp
+++ b/dom/media/platforms/android/AndroidDecoderModule.cpp
@@ -580,22 +580,42 @@ MediaCodecDataDecoder::ProcessOutput(
   if (buffer) {
     // The buffer will be null on Android L if we are decoding to a Surface.
     void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());
     Output(aInfo, directBuffer, aFormat, duration.value());
   }
 
   // The Surface will be updated at this point (for video).
   mDecoder->ReleaseOutputBuffer(aStatus, true);
+
+  {
+    MonitorAutoLock lock(mMonitor);
+    int64_t pts = 0;
+    if (mSeekTargetThreshold.isSome() &&
+        NS_SUCCEEDED(aInfo->PresentationTimeUs(&pts))) {
+      if (pts < mSeekTargetThreshold.ref().ToMicroseconds()) {
+        return NS_OK;
+      }
+      mSeekTargetThreshold.reset();
+    }
+  }
+
   PostOutput(aInfo, aFormat, duration.value());
 
   return NS_OK;
 }
 
 void
+MediaCodecDataDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+  MonitorAutoLock lock(mMonitor);
+  mSeekTargetThreshold = Some(aTime);
+}
+
+void
 MediaCodecDataDecoder::DecoderLoop()
 {
   bool isOutputDone = false;
   AutoLocalJNIFrame frame(jni::GetEnvForThread(), 1);
   MediaFormat::LocalRef outputFormat(frame.GetEnv());
   nsresult res = NS_OK;
 
   while (WaitForInput()) {
@@ -744,16 +764,17 @@ MediaCodecDataDecoder::ResetOutputBuffer
 {
   return mDecoder->GetOutputBuffers(ReturnTo(&mOutputBuffers));
 }
 
 nsresult
 MediaCodecDataDecoder::Flush()
 {
   MonitorAutoLock lock(mMonitor);
+  mSeekTargetThreshold.reset();
   if (!State(kFlushing)) {
     return NS_OK;
   }
   lock.Notify();
 
   while (State() == kFlushing) {
     lock.Wait();
   }
--- a/dom/media/platforms/android/AndroidDecoderModule.h
+++ b/dom/media/platforms/android/AndroidDecoderModule.h
@@ -60,16 +60,17 @@ public:
   nsresult Flush() override;
   nsresult Drain() override;
   nsresult Shutdown() override;
   nsresult Input(MediaRawData* aSample) override;
   const char* GetDescriptionName() const override
   {
     return "android decoder";
   }
+  void SetSeekThreshold(const media::TimeUnit& aTime) override;
 
 protected:
   enum ModuleState {
     kDecoding = 0,
     kFlushing,
     kDrainQueue,
     kDrainDecoder,
     kDrainWaitEOS,
@@ -134,13 +135,15 @@ protected:
   // Only these members are protected by mMonitor.
   Monitor mMonitor;
 
   ModuleState mState;
 
   SampleQueue mQueue;
   // Durations are stored in microseconds.
   std::deque<media::TimeUnit> mDurations;
+
+  Maybe<media::TimeUnit> mSeekTargetThreshold;
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/platforms/apple/AppleVDADecoder.cpp
+++ b/dom/media/platforms/apple/AppleVDADecoder.cpp
@@ -149,16 +149,20 @@ AppleVDADecoder::Flush()
   MOZ_ASSERT(mCallback->OnReaderTaskQueue());
   mIsFlushing = true;
   nsCOMPtr<nsIRunnable> runnable =
     NewRunnableMethod(this, &AppleVDADecoder::ProcessFlush);
   SyncRunnable::DispatchToThread(mTaskQueue, runnable);
   mIsFlushing = false;
   // All ProcessDecode() tasks should be done.
   MOZ_ASSERT(mInputIncoming == 0);
+
+  MonitorAutoLock mon(mMonitor);
+  mSeekTargetThreshold.reset();
+
   return NS_OK;
 }
 
 nsresult
 AppleVDADecoder::Drain()
 {
   MOZ_ASSERT(mCallback->OnReaderTaskQueue());
   nsCOMPtr<nsIRunnable> runnable =
@@ -286,16 +290,24 @@ AppleVDADecoder::ClearReorderedFrames()
 {
   MonitorAutoLock mon(mMonitor);
   while (!mReorderQueue.IsEmpty()) {
     mReorderQueue.Pop();
   }
   mQueuedSamples = 0;
 }
 
+void
+AppleVDADecoder::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+  LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
+  MonitorAutoLock mon(mMonitor);
+  mSeekTargetThreshold = Some(aTime);
+}
+
 // Copy and return a decoded frame.
 nsresult
 AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
                              AppleVDADecoder::AppleFrameRef aFrameRef)
 {
   if (mIsShutDown || mIsFlushing) {
     // We are in the process of flushing or shutting down; ignore frame.
     return NS_OK;
@@ -317,16 +329,27 @@ AppleVDADecoder::OutputFrame(CVPixelBuff
   MOZ_ASSERT(mQueuedSamples);
   mQueuedSamples--;
 
   if (!aImage) {
     // Image was dropped by decoder.
     return NS_OK;
   }
 
+  {
+    MonitorAutoLock mon(mMonitor);
+    if (mSeekTargetThreshold.isSome()) {
+      if (aFrameRef.composition_timestamp < mSeekTargetThreshold.ref()) {
+        return NS_OK;
+      } else {
+        mSeekTargetThreshold.reset();
+      }
+    }
+  }
+
   // Where our resulting image will end up.
   RefPtr<VideoData> data;
   // Bounds.
   VideoInfo info;
   info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
   gfx::IntRect visible = gfx::IntRect(0,
                                       0,
                                       mPictureWidth,
--- a/dom/media/platforms/apple/AppleVDADecoder.h
+++ b/dom/media/platforms/apple/AppleVDADecoder.h
@@ -82,16 +82,18 @@ private:
     return true;
   }
 
   const char* GetDescriptionName() const override
   {
     return "apple VDA decoder";
   }
 
+  void SetSeekThreshold(const media::TimeUnit& aTime) override;
+
 protected:
   AppleVDADecoder(const VideoInfo& aConfig,
                   TaskQueue* aTaskQueue,
                   MediaDataDecoderCallback* aCallback,
                   layers::ImageContainer* aImageContainer);
   virtual ~AppleVDADecoder();
 
   void AssertOnTaskQueueThread()
@@ -128,23 +130,27 @@ private:
   const RefPtr<layers::ImageContainer> mImageContainer;
   // Increased when Input is called, and decreased when ProcessFrame runs.
   // Reaching 0 indicates that there's no pending Input.
   Atomic<uint32_t> mInputIncoming;
   Atomic<bool> mIsShutDown;
   const bool mUseSoftwareImages;
   const bool mIs106;
 
-  // Protects mReorderQueue.
+  // Protects mReorderQueue and mSeekTargetThreshold.
   Monitor mMonitor;
   // Set on reader/decode thread calling Flush() to indicate that output is
   // not required and so input samples on mTaskQueue need not be processed.
   // Cleared on mTaskQueue in ProcessDrain().
   Atomic<bool> mIsFlushing;
   ReorderQueue mReorderQueue;
+  // Decoded frame will be dropped if its pts is smaller than this
+  // value. It is accessed in VideoToolbox thread and reader taskqueue so it
+  // should be protected by mMonitor.
+  Maybe<media::TimeUnit> mSeekTargetThreshold;
 
   // Method to set up the decompression session.
   nsresult InitializeSession();
 
   // Method to pass a frame to VideoToolbox for decoding.
   nsresult ProcessDecode(MediaRawData* aSample);
   virtual nsresult DoDecode(MediaRawData* aSample);
   CFDictionaryRef CreateDecoderSpecification();
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
@@ -156,22 +156,33 @@ FFmpegDataDecoder<LIBAV_VER>::Drain()
   MOZ_ASSERT(mCallback->OnReaderTaskQueue());
   nsCOMPtr<nsIRunnable> runnable =
     NewRunnableMethod(this, &FFmpegDataDecoder<LIBAV_VER>::ProcessDrain);
   mTaskQueue->Dispatch(runnable.forget());
   return NS_OK;
 }
 
 void
+FFmpegDataDecoder<LIBAV_VER>::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+  RefPtr<FFmpegDataDecoder> self = this;
+  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self, aTime] () {
+    self->mSeekTargetThreshold = Some(aTime);
+  });
+  mTaskQueue->Dispatch(r.forget());
+}
+
+void
 FFmpegDataDecoder<LIBAV_VER>::ProcessFlush()
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
   if (mCodecContext) {
     mLib->avcodec_flush_buffers(mCodecContext);
   }
+  mSeekTargetThreshold.reset();
 }
 
 void
 FFmpegDataDecoder<LIBAV_VER>::ProcessShutdown()
 {
   StaticMutexAutoLock mon(sMonitor);
 
   if (mCodecContext) {
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
@@ -31,16 +31,17 @@ public:
 
   static bool Link();
 
   RefPtr<InitPromise> Init() override = 0;
   nsresult Input(MediaRawData* aSample) override;
   nsresult Flush() override;
   nsresult Drain() override;
   nsresult Shutdown() override;
+  void SetSeekThreshold(const media::TimeUnit& aTime) override;
 
   static AVCodec* FindAVCodec(FFmpegLibWrapper* aLib, AVCodecID aCodec);
 
 protected:
   enum DecodeResult {
     DECODE_FRAME,
     DECODE_NO_FRAME,
     DECODE_ERROR
@@ -56,16 +57,19 @@ protected:
   FFmpegLibWrapper* mLib;
   MediaDataDecoderCallback* mCallback;
 
   AVCodecContext* mCodecContext;
   AVFrame*        mFrame;
   RefPtr<MediaByteBuffer> mExtraData;
   AVCodecID mCodecID;
 
+  // Accessed in mTaskQueue.
+  Maybe<media::TimeUnit> mSeekTargetThreshold;
+
 private:
   void ProcessDecode(MediaRawData* aSample);
   virtual DecodeResult DoDecode(MediaRawData* aSample) = 0;
   virtual void ProcessDrain() = 0;
 
   static StaticMutex sMonitor;
   const RefPtr<TaskQueue> mTaskQueue;
   // Set/cleared on reader thread calling Flush() to indicate that output is
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -256,16 +256,25 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
     if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
       NS_WARNING("Unable to retrieve duration from map");
       duration = aSample->mDuration;
       // dts are probably incorrectly reported ; so clear the map as we're
       // unlikely to find them in the future anyway. This also guards
       // against the map becoming extremely big.
       mDurationMap.Clear();
     }
+
+    if (mSeekTargetThreshold.isSome()) {
+      if (pts < mSeekTargetThreshold.ref().ToMicroseconds()) {
+        FFMPEG_LOG("Dropping decoded frame.");
+        return DecodeResult::DECODE_FRAME;
+      }
+      mSeekTargetThreshold.reset();
+    }
+
     FFMPEG_LOG("Got one frame output with pts=%lld dts=%lld duration=%lld opaque=%lld",
                pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);
 
     VideoData::YCbCrBuffer b;
     b.mPlanes[0].mData = mFrame->data[0];
     b.mPlanes[1].mData = mFrame->data[1];
     b.mPlanes[2].mData = mFrame->data[2];
 
--- a/dom/media/platforms/omx/OmxDataDecoder.cpp
+++ b/dom/media/platforms/omx/OmxDataDecoder.cpp
@@ -352,16 +352,24 @@ OmxDataDecoder::FillBufferDone(BufferDat
 
 void
 OmxDataDecoder::Output(BufferData* aData)
 {
   if (!mMediaDataHelper) {
     mMediaDataHelper = new MediaDataHelper(mTrackInfo.get(), mImageContainer, mOmxLayer);
   }
 
+  if (mSeekTargetThreshold.isSome()) {
+    if (aData->mRawData->mTime < mSeekTargetThreshold.ref().ToMicroseconds()) {
+      aData->mStatus = BufferData::BufferStatus::FREE;
+      return;
+    }
+    mSeekTargetThreshold.reset();
+  }
+
   bool isPlatformData = false;
   RefPtr<MediaData> data = mMediaDataHelper->GetMediaData(aData, isPlatformData);
   if (!data) {
     aData->mStatus = BufferData::BufferStatus::FREE;
     return;
   }
 
   if (isPlatformData) {
@@ -868,16 +876,18 @@ OmxDataDecoder::SendEosBuffer()
   FillAndEmptyBuffers();
 }
 
 void
 OmxDataDecoder::DoFlush()
 {
   MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
 
+  mSeekTargetThreshold.reset();
+
   // 1. Call OMX command OMX_CommandFlush in Omx TaskQueue.
   // 2. Remove all elements in mMediaRawDatas when flush is completed.
   mOmxLayer->SendCommand(OMX_CommandFlush, OMX_ALL, nullptr)
     ->Then(mOmxTaskQueue, __func__, this,
            &OmxDataDecoder::FlushComplete,
            &OmxDataDecoder::FlushFailure);
 }
 
@@ -1032,9 +1042,20 @@ MediaDataHelper::CreateYUV420VideoData(B
 
   LOG("YUV420 VideoData: disp width %d, height %d, pic width %d, height %d, time %ld",
       info.mDisplay.width, info.mDisplay.height, info.mImage.width,
       info.mImage.height, aBufferData->mBuffer->nTimeStamp);
 
   return data.forget();
 }
 
+void
+OmxDataDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+  RefPtr<OmxDataDecoder> self = this;
+
+  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self, aTime] () {
+    self->mSeekTargetThreshold = Some(aTime);
+  });
+  mOmxTaskQueue->Dispatch(r.forget());
 }
+
+}
--- a/dom/media/platforms/omx/OmxDataDecoder.h
+++ b/dom/media/platforms/omx/OmxDataDecoder.h
@@ -74,16 +74,18 @@ public:
 
   nsresult Shutdown() override;
 
   const char* GetDescriptionName() const override
   {
     return "omx decoder";
   }
 
+  void SetSeekThreshold(const media::TimeUnit& aTime) override;
+
   // Return true if event is handled.
   bool Event(OMX_EVENTTYPE aEvent, OMX_U32 aData1, OMX_U32 aData2);
 
 protected:
   void InitializationTask();
 
   void ResolveInitPromise(const char* aMethodName);
 
@@ -192,16 +194,19 @@ protected:
 
   BUFFERLIST mInPortBuffers;
 
   BUFFERLIST mOutPortBuffers;
 
   RefPtr<MediaDataHelper> mMediaDataHelper;
 
   MediaDataDecoderCallback* mCallback;
+
+  // It is accessed in Omx TaskQueue.
+  Maybe<media::TimeUnit> mSeekTargetThreshold;
 };
 
 template<class T>
 void InitOmxParameter(T* aParam)
 {
   PodZero(aParam);
   aParam->nSize = sizeof(T);
   aParam->nVersion.s.nVersionMajor = 1;
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
@@ -232,9 +232,24 @@ WMFMediaDataDecoder::ConfigurationChange
 void
 WMFMediaDataDecoder::ProcessConfigurationChanged(UniquePtr<TrackInfo>&& aConfig)
 {
   if (mMFTManager) {
     mMFTManager->ConfigurationChanged(*aConfig);
   }
 }
 
+void
+WMFMediaDataDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+  MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+  MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
+
+  RefPtr<WMFMediaDataDecoder> self = this;
+  nsCOMPtr<nsIRunnable> runnable =
+    NS_NewRunnableFunction([self, aTime] () {
+      media::TimeUnit threshold = aTime;
+      self->mMFTManager->SetSeekThreshold(threshold);
+    });
+  mTaskQueue->Dispatch(runnable.forget());
+}
+
 } // namespace mozilla
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.h
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
@@ -31,17 +31,20 @@ public:
   // or until no more is able to be produced.
   // Returns S_OK on success, or MF_E_TRANSFORM_NEED_MORE_INPUT if there's not
   // enough data to produce more output. If this returns a failure code other
   // than MF_E_TRANSFORM_NEED_MORE_INPUT, an error will be reported to the
   // MP4Reader.
   virtual HRESULT Output(int64_t aStreamOffset,
                          RefPtr<MediaData>& aOutput) = 0;
 
-  void Flush() { mDecoder->Flush(); }
+  void Flush() {
+    mDecoder->Flush();
+    mSeekTargetThreshold.reset();
+  }
 
   void Drain()
   {
     if (FAILED(mDecoder->SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0))) {
       NS_WARNING("Failed to send DRAIN command to MFT");
     }
   }
 
@@ -51,19 +54,25 @@ public:
   virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const { return false; }
 
   virtual TrackInfo::TrackType GetType() = 0;
 
   virtual void ConfigurationChanged(const TrackInfo& aConfig) {}
 
   virtual const char* GetDescriptionName() const = 0;
 
+  void SetSeekThreshold(const media::TimeUnit& aTime) {
+    mSeekTargetThreshold = Some(aTime);
+  }
+
 protected:
   // IMFTransform wrapper that performs the decoding.
   RefPtr<MFTDecoder> mDecoder;
+
+  Maybe<media::TimeUnit> mSeekTargetThreshold;
 };
 
 // Decodes audio and video using Windows Media Foundation. Samples are decoded
 // using the MFTDecoder created by the MFTManager. This class implements
 // the higher-level logic that drives mapping the MFT to the async
 // MediaDataDecoder interface. The specifics of decoding the exact stream
 // type are handled by MFTManager and the MFTDecoder it creates.
 class WMFMediaDataDecoder : public MediaDataDecoder {
@@ -87,16 +96,18 @@ public:
 
   nsresult ConfigurationChanged(const TrackInfo& aConfig) override;
 
   const char* GetDescriptionName() const override
   {
     return mMFTManager ? mMFTManager->GetDescriptionName() : "";
   }
 
+  void SetSeekThreshold(const media::TimeUnit& aTime) override;
+
 private:
 
   // Called on the task queue. Inserts the sample into the decoder, and
   // extracts output if available.
   void ProcessDecode(MediaRawData* aSample);
 
   // Called on the task queue. Extracts output if available, and delivers
   // it to the reader. Called after ProcessDecode() and ProcessDrain().
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -762,16 +762,30 @@ WMFVideoMFTManager::Output(int64_t aStre
         ++mNullOutputCount;
         if (mNullOutputCount > 250) {
           LOG("Excessive Video MFTDecoder returning success but no output; giving up");
           mGotExcessiveNullOutput = true;
           return E_FAIL;
         }
         continue;
       }
+      if (mSeekTargetThreshold.isSome()) {
+        media::TimeUnit pts = GetSampleTime(sample);
+        if (!pts.IsValid()) {
+          return E_FAIL;
+        }
+        if (pts < mSeekTargetThreshold.ref()) {
+          LOG("Dropping video frame which pts is smaller than seek target.");
+          // It is necessary to clear the pointer to release the previous output
+          // buffer.
+          sample = nullptr;
+          continue;
+        }
+        mSeekTargetThreshold.reset();
+      }
       break;
     }
     // Else unexpected error, assert, and bail.
     NS_WARNING("WMFVideoMFTManager::Output() unexpected error");
     return hr;
   }
 
   RefPtr<VideoData> frame;
--- a/dom/media/platforms/wrappers/FuzzingWrapper.cpp
+++ b/dom/media/platforms/wrappers/FuzzingWrapper.cpp
@@ -93,16 +93,23 @@ DecoderFuzzingWrapper::IsHardwareAcceler
 nsresult
 DecoderFuzzingWrapper::ConfigurationChanged(const TrackInfo& aConfig)
 {
   DFW_LOGV("");
   MOZ_ASSERT(mDecoder);
   return mDecoder->ConfigurationChanged(aConfig);
 }
 
+void
+DecoderFuzzingWrapper::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+  DFW_LOGV("");
+  MOZ_ASSERT(mDecoder);
+  mDecoder->SetSeekThreshold(aTime);
+}
 
 DecoderCallbackFuzzingWrapper::DecoderCallbackFuzzingWrapper(MediaDataDecoderCallback* aCallback)
   : mCallback(aCallback)
   , mDontDelayInputExhausted(false)
   , mDraining(false)
   , mTaskQueue(new TaskQueue(SharedThreadPool::Get(NS_LITERAL_CSTRING("MediaFuzzingWrapper"), 1)))
 {
   CFW_LOGV("aCallback=%p", aCallback);
--- a/dom/media/platforms/wrappers/FuzzingWrapper.h
+++ b/dom/media/platforms/wrappers/FuzzingWrapper.h
@@ -110,16 +110,17 @@ private:
   nsresult Drain() override;
   nsresult Shutdown() override;
   bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
   nsresult ConfigurationChanged(const TrackInfo& aConfig) override;
   const char* GetDescriptionName() const override
   {
     return mDecoder->GetDescriptionName();
   }
+  void SetSeekThreshold(const media::TimeUnit& aTime) override;
 
   RefPtr<MediaDataDecoder> mDecoder;
   RefPtr<DecoderCallbackFuzzingWrapper> mCallbackWrapper;
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/platforms/wrappers/H264Converter.h
+++ b/dom/media/platforms/wrappers/H264Converter.h
@@ -39,16 +39,23 @@ public:
   const char* GetDescriptionName() const override
   {
     if (mDecoder) {
       return mDecoder->GetDescriptionName();
     }
     return "H264Converter decoder (pending)";
   }
 
+  void SetSeekThreshold(const media::TimeUnit& aTime) override
+  {
+    if (mDecoder) {
+      mDecoder->SetSeekThreshold(aTime);
+    }
+  }
+
   // Return true if mimetype is H.264.
   static bool IsH264(const TrackInfo& aConfig);
   nsresult GetLastError() const { return mLastError; }
 
 private:
   // Will create the required MediaDataDecoder if need AVCC and we have a SPS NAL.
   // Returns NS_ERROR_FAILURE if error is permanent and can't be recovered and
   // will set mError accordingly.