Bug 1179094: Use TimeUnit in PlatformDecoderModule. r=cpearce
authorJean-Yves Avenard <jyavenard@mozilla.com>
Wed, 01 Jul 2015 16:50:27 +1000
changeset 283495 b94026e3296fefb08d098b4763bad3ad46f1c49f
parent 283494 54d35cae57d656d952a6523fb088d87ce78dfa47
child 283496 40740cddc1315f7c527a62042e86b9bcace3193d
push id5067
push userraliiev@mozilla.com
push dateMon, 21 Sep 2015 14:04:52 +0000
treeherdermozilla-beta@14221ffe5b2f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerscpearce
bugs1179094
milestone42.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1179094: Use TimeUnit in PlatformDecoderModule. r=cpearce
dom/media/TimeUnits.h
dom/media/VideoUtils.cpp
dom/media/VideoUtils.h
dom/media/fmp4/MP4Reader.cpp
dom/media/fmp4/MP4Reader.h
dom/media/platforms/PlatformDecoderModule.h
dom/media/platforms/agnostic/BlankDecoderModule.cpp
dom/media/platforms/android/AndroidDecoderModule.cpp
dom/media/platforms/android/AndroidDecoderModule.h
dom/media/platforms/apple/AppleATDecoder.cpp
dom/media/platforms/apple/AppleVDADecoder.cpp
dom/media/platforms/apple/AppleVDADecoder.h
dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
dom/media/platforms/wmf/WMFAudioMFTManager.cpp
dom/media/platforms/wmf/WMFUtils.cpp
dom/media/platforms/wmf/WMFUtils.h
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
--- a/dom/media/TimeUnits.h
+++ b/dom/media/TimeUnits.h
@@ -114,16 +114,24 @@ public:
   static TimeUnit FromNanoseconds(int64_t aValue) {
     return TimeUnit(aValue / 1000);
   }
 
   static TimeUnit FromInfinity() {
     return TimeUnit(INT64_MAX);
   }
 
+  static TimeUnit Invalid() {
+    TimeUnit ret;
+    ret.mValue = CheckedInt64(INT64_MAX);
+    // Force an overflow to render the CheckedInt invalid.
+    ret.mValue += 1;
+    return ret;
+  }
+
   int64_t ToMicroseconds() const {
     return mValue.value();
   }
 
   int64_t ToNanoseconds() const {
     return mValue.value() * 1000;
   }
 
--- a/dom/media/VideoUtils.cpp
+++ b/dom/media/VideoUtils.cpp
@@ -24,16 +24,20 @@ namespace mozilla {
 using layers::PlanarYCbCrImage;
 
 // Converts from number of audio frames to microseconds, given the specified
 // audio rate.
 CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate) {
   return (CheckedInt64(aFrames) * USECS_PER_S) / aRate;
 }
 
+media::TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate) {
+  return (media::TimeUnit::FromMicroseconds(aFrames) * USECS_PER_S) / aRate;
+}
+
 // Converts from microseconds to number of audio frames, given the specified
 // audio rate.
 CheckedInt64 UsecsToFrames(int64_t aUsecs, uint32_t aRate) {
   return (CheckedInt64(aUsecs) * aRate) / USECS_PER_S;
 }
 
 nsresult SecondsToUsecs(double aSeconds, int64_t& aOutUsecs) {
   if (aSeconds * double(USECS_PER_S) > INT64_MAX) {
--- a/dom/media/VideoUtils.h
+++ b/dom/media/VideoUtils.h
@@ -122,20 +122,21 @@ class MediaResource;
 // do file I/O, and can be used when we don't have detailed knowledge
 // of the byte->time mapping of a resource. aDurationUsecs is the duration
 // of the media in microseconds. Estimated buffered ranges are stored in
 // aOutBuffered. Ranges are 0-normalized, i.e. in the range of (0,duration].
 media::TimeIntervals GetEstimatedBufferedTimeRanges(mozilla::MediaResource* aStream,
                                                     int64_t aDurationUsecs);
 
 // Converts from number of audio frames (aFrames) to microseconds, given
-// the specified audio rate (aRate). Stores result in aOutUsecs. Returns true
-// if the operation succeeded, or false if there was an integer overflow
-// while calulating the conversion.
+// the specified audio rate (aRate).
 CheckedInt64 FramesToUsecs(int64_t aFrames, uint32_t aRate);
+// Converts from number of audio frames (aFrames) TimeUnit, given
+// the specified audio rate (aRate).
+media::TimeUnit FramesToTimeUnit(int64_t aFrames, uint32_t aRate);
 
 // Converts from microseconds (aUsecs) to number of audio frames, given the
 // specified audio rate (aRate). Stores the result in aOutFrames. Returns
 // true if the operation succeeded, or false if there was an integer
 // overflow while calulating the conversion.
 CheckedInt64 UsecsToFrames(int64_t aUsecs, uint32_t aRate);
 
 // Converts milliseconds to seconds.
--- a/dom/media/fmp4/MP4Reader.cpp
+++ b/dom/media/fmp4/MP4Reader.cpp
@@ -418,17 +418,17 @@ MP4Reader::ReadMetadata(MediaInfo* aInfo
       new DispatchKeyNeededEvent(mDecoder, initData, NS_LITERAL_STRING("cenc")));
 #endif // MOZ_EME
     // Add init data to info, will get sent from HTMLMediaElement::MetadataLoaded
     // (i.e., when transitioning from HAVE_NOTHING to HAVE_METADATA).
     mInfo.mCrypto.AddInitData(NS_LITERAL_STRING("cenc"), Move(initData));
   }
 
   // Get the duration, and report it to the decoder if we have it.
-  Microseconds duration;
+  mp4_demuxer::Microseconds duration;
   {
     MonitorAutoLock lock(mDemuxerMonitor);
     duration = mDemuxer->Duration();
   }
   if (duration != -1) {
     mInfo.mMetadataDuration = Some(TimeUnit::FromMicroseconds(duration));
   }
 
@@ -556,17 +556,17 @@ MP4Reader::GetDecoderData(TrackType aTra
   MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
              aTrack == TrackInfo::kVideoTrack);
   if (aTrack == TrackInfo::kAudioTrack) {
     return mAudio;
   }
   return mVideo;
 }
 
-Microseconds
+mp4_demuxer::Microseconds
 MP4Reader::GetNextKeyframeTime()
 {
   MonitorAutoLock mon(mDemuxerMonitor);
   return mVideo.mTrackDemuxer->GetNextKeyframeTime();
 }
 
 void
 MP4Reader::DisableHardwareAcceleration()
@@ -591,17 +591,17 @@ MP4Reader::DisableHardwareAcceleration()
 bool
 MP4Reader::ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold)
 {
   // The MP4Reader doesn't do normal skip-to-next-keyframe if the demuxer
   // has exposes where the next keyframe is. We can then instead skip only
   // if the time threshold (the current playback position) is after the next
   // keyframe in the stream. This means we'll only skip frames that we have
   // no hope of ever playing.
-  Microseconds nextKeyframe = -1;
+  mp4_demuxer::Microseconds nextKeyframe = -1;
   if (!sDemuxSkipToNextKeyframe ||
       (nextKeyframe = GetNextKeyframeTime()) == -1) {
     return aSkipToNextKeyframe;
   }
   return nextKeyframe < aTimeThreshold && nextKeyframe >= 0;
 }
 
 nsRefPtr<MediaDecoderReader::VideoDataPromise>
@@ -1085,17 +1085,17 @@ MP4Reader::GetBuffered()
   UpdateIndex();
   NS_ENSURE_TRUE(HaveStartTime(), media::TimeIntervals());
 
   AutoPinned<MediaResource> resource(mDecoder->GetResource());
   nsTArray<MediaByteRange> ranges;
   nsresult rv = resource->GetCachedRanges(ranges);
 
   if (NS_SUCCEEDED(rv)) {
-    nsTArray<Interval<Microseconds>> timeRanges;
+    nsTArray<Interval<mp4_demuxer::Microseconds>> timeRanges;
     mDemuxer->ConvertByteRangesToTime(ranges, &timeRanges);
     for (size_t i = 0; i < timeRanges.Length(); i++) {
       buffered += media::TimeInterval(
         media::TimeUnit::FromMicroseconds(timeRanges[i].start - StartTime()),
         media::TimeUnit::FromMicroseconds(timeRanges[i].end - StartTime()));
     }
   }
 
--- a/dom/media/fmp4/MP4Reader.h
+++ b/dom/media/fmp4/MP4Reader.h
@@ -113,17 +113,17 @@ private:
   void Error(TrackType aTrack);
   void Flush(TrackType aTrack);
   void DrainComplete(TrackType aTrack);
   void UpdateIndex();
   bool IsSupportedAudioMimeType(const nsACString& aMimeType);
   bool IsSupportedVideoMimeType(const nsACString& aMimeType);
   virtual bool IsWaitingOnCDMResource() override;
 
-  Microseconds GetNextKeyframeTime();
+  mp4_demuxer::Microseconds GetNextKeyframeTime();
   bool ShouldSkip(bool aSkipToNextKeyframe, int64_t aTimeThreshold);
 
   size_t SizeOfQueue(TrackType aTrack);
 
   nsRefPtr<MP4Stream> mStream;
   nsRefPtr<mp4_demuxer::MP4Demuxer> mDemuxer;
   nsRefPtr<PlatformDecoderModule> mPlatform;
   mp4_demuxer::CryptoFile mCrypto;
--- a/dom/media/platforms/PlatformDecoderModule.h
+++ b/dom/media/platforms/PlatformDecoderModule.h
@@ -22,17 +22,16 @@ class MediaRawData;
 namespace layers {
 class ImageContainer;
 }
 
 class MediaDataDecoder;
 class MediaDataDecoderCallback;
 class FlushableMediaTaskQueue;
 class CDMProxy;
-typedef int64_t Microseconds;
 
 // The PlatformDecoderModule interface is used by the MP4Reader to abstract
 // access to the H264 and Audio (AAC/MP3) decoders provided by various platforms.
 // It may be extended to support other codecs in future. Each platform (Windows,
 // MacOSX, Linux, B2G etc) must implement a PlatformDecoderModule to provide
 // access to its decoders in order to get decompressed H.264/AAC from the
 // MP4Reader.
 //
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -8,16 +8,17 @@
 #include "PlatformDecoderModule.h"
 #include "nsRect.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/CheckedInt.h"
 #include "VideoUtils.h"
 #include "ImageContainer.h"
 #include "MediaInfo.h"
 #include "MediaTaskQueue.h"
+#include "TimeUnits.h"
 
 namespace mozilla {
 
 // Decoder that uses a passed in object's Create function to create blank
 // MediaData objects.
 template<class BlankMediaDataCreator>
 class BlankMediaDataDecoder : public MediaDataDecoder {
 public:
@@ -46,19 +47,20 @@ public:
                 BlankMediaDataCreator* aCreator)
       : mSample(aSample)
       , mCreator(aCreator)
       , mCallback(aCallback)
     {
     }
     NS_IMETHOD Run() override
     {
-      nsRefPtr<MediaData> data = mCreator->Create(mSample->mTime,
-                                                  mSample->mDuration,
-                                                  mSample->mOffset);
+      nsRefPtr<MediaData> data =
+        mCreator->Create(media::TimeUnit::FromMicroseconds(mSample->mTime),
+                         media::TimeUnit::FromMicroseconds(mSample->mDuration),
+                         mSample->mOffset);
       mCallback->Output(data);
       return NS_OK;
     }
   private:
     nsRefPtr<MediaRawData> mSample;
     BlankMediaDataCreator* mCreator;
     MediaDataDecoderCallback* mCallback;
   };
@@ -98,17 +100,17 @@ public:
     , mFrameHeight(aFrameHeight)
     , mImageContainer(aImageContainer)
   {
     mInfo.mDisplay = nsIntSize(mFrameWidth, mFrameHeight);
     mPicture = gfx::IntRect(0, 0, mFrameWidth, mFrameHeight);
   }
 
   already_AddRefed<MediaData>
-  Create(Microseconds aDTS, Microseconds aDuration, int64_t aOffsetInStream)
+  Create(const media::TimeUnit& aDTS, const media::TimeUnit& aDuration, int64_t aOffsetInStream)
   {
     // Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
     // with a U and V plane that are half the size of the Y plane, i.e 8 bit,
     // 2x2 subsampled. Have the data pointers of each frame point to the
     // first plane, they'll always be zero'd memory anyway.
     nsAutoArrayPtr<uint8_t> frame(new uint8_t[mFrameWidth * mFrameHeight]);
     memset(frame, 0, mFrameWidth * mFrameHeight);
     VideoData::YCbCrBuffer buffer;
@@ -136,21 +138,21 @@ public:
     buffer.mPlanes[2].mWidth = mFrameWidth / 2;
     buffer.mPlanes[2].mOffset = 0;
     buffer.mPlanes[2].mSkip = 0;
 
     return VideoData::Create(mInfo,
                              mImageContainer,
                              nullptr,
                              aOffsetInStream,
-                             aDTS,
-                             aDuration,
+                             aDTS.ToMicroseconds(),
+                             aDuration.ToMicroseconds(),
                              buffer,
                              true,
-                             aDTS,
+                             aDTS.ToMicroseconds(),
                              mPicture);
   }
 private:
   VideoInfo mInfo;
   gfx::IntRect mPicture;
   uint32_t mFrameWidth;
   uint32_t mFrameHeight;
   RefPtr<layers::ImageContainer> mImageContainer;
@@ -159,23 +161,24 @@ private:
 
 class BlankAudioDataCreator {
 public:
   BlankAudioDataCreator(uint32_t aChannelCount, uint32_t aSampleRate)
     : mFrameSum(0), mChannelCount(aChannelCount), mSampleRate(aSampleRate)
   {
   }
 
-  MediaData* Create(Microseconds aDTS,
-                    Microseconds aDuration,
+  MediaData* Create(const media::TimeUnit& aDTS,
+                    const media::TimeUnit& aDuration,
                     int64_t aOffsetInStream)
   {
     // Convert duration to frames. We add 1 to duration to account for
     // rounding errors, so we get a consistent tone.
-    CheckedInt64 frames = UsecsToFrames(aDuration+1, mSampleRate);
+    CheckedInt64 frames =
+      UsecsToFrames(aDuration.ToMicroseconds()+1, mSampleRate);
     if (!frames.isValid() ||
         !mChannelCount ||
         !mSampleRate ||
         frames.value() > (UINT32_MAX / mChannelCount)) {
       return nullptr;
     }
     AudioDataValue* samples = new AudioDataValue[frames.value() * mChannelCount];
     // Fill the sound buffer with an A4 tone.
@@ -184,18 +187,18 @@ public:
     for (int i = 0; i < frames.value(); i++) {
       float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
       for (unsigned c = 0; c < mChannelCount; c++) {
         samples[i * mChannelCount + c] = AudioDataValue(f);
       }
       mFrameSum++;
     }
     return new AudioData(aOffsetInStream,
-                         aDTS,
-                         aDuration,
+                         aDTS.ToMicroseconds(),
+                         aDuration.ToMicroseconds(),
                          uint32_t(frames.value()),
                          samples,
                          mChannelCount,
                          mSampleRate);
   }
 
 private:
   int64_t mFrameSum;
--- a/dom/media/platforms/android/AndroidDecoderModule.cpp
+++ b/dom/media/platforms/android/AndroidDecoderModule.cpp
@@ -100,17 +100,18 @@ public:
     EGLImage eglImage = sEGLLibrary.fCreateImage(EGL_DISPLAY(), eglContext,
                                                  LOCAL_EGL_GL_TEXTURE_2D_KHR,
                                                  (EGLClientBuffer)tex, attribs);
     mGLContext->fDeleteTextures(1, &tex);
 
     return eglImage;
   }
 
-  virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat, Microseconds aDuration) override {
+  virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat,
+                              const media::TimeUnit& aDuration) override {
     if (!EnsureGLContext()) {
       return NS_ERROR_FAILURE;
     }
 
     nsRefPtr<layers::Image> img = mImageContainer->CreateImage(ImageFormat::SURFACE_TEXTURE);
     layers::SurfaceTextureImage::Data data;
     data.mSurfTex = mSurfaceTexture.get();
     data.mSize = mConfig.mDisplay;
@@ -163,17 +164,17 @@ public:
     int64_t presentationTimeUs;
     NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
 
     nsRefPtr<VideoData> v =
       VideoData::CreateFromImage(mConfig,
                                  mImageContainer,
                                  offset,
                                  presentationTimeUs,
-                                 aDuration,
+                                 aDuration.ToMicroseconds(),
                                  img,
                                  isSync,
                                  presentationTimeUs,
                                  gfx::IntRect(0, 0,
                                               mConfig.mDisplay.width,
                                               mConfig.mDisplay.height));
     ENVOKE_CALLBACK(Output, v);
     return NS_OK;
@@ -208,17 +209,19 @@ public:
 
     if (!buffer && aConfig.mCodecSpecificConfig->Length() >= 2) {
       buffer = jni::Object::LocalRef::Adopt(env, env->NewDirectByteBuffer(aConfig.mCodecSpecificConfig->Elements(),
                                                                           aConfig.mCodecSpecificConfig->Length()));
       NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"), buffer));
     }
   }
 
-  nsresult Output(BufferInfo::Param aInfo, void* aBuffer, MediaFormat::Param aFormat, Microseconds aDuration) {
+  nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
+                  MediaFormat::Param aFormat,
+                  const media::TimeUnit& aDuration) {
     // The output on Android is always 16-bit signed
 
     nsresult rv;
     int32_t numChannels;
     NS_ENSURE_SUCCESS(rv =
         aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv);
 
     int32_t sampleRate;
@@ -234,17 +237,17 @@ public:
 
     int32_t offset;
     NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);
 
     int64_t presentationTimeUs;
     NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
 
     nsRefPtr<AudioData> data = new AudioData(offset, presentationTimeUs,
-                                             aDuration,
+                                             aDuration.ToMicroseconds(),
                                              numFrames,
                                              audio,
                                              numChannels,
                                              sampleRate);
     ENVOKE_CALLBACK(Output, data);
     return NS_OK;
   }
 };
@@ -480,17 +483,17 @@ void MediaCodecDataDecoder::DecoderLoop(
         }
 
         PodCopy((uint8_t*)directBuffer, sample->mData, sample->mSize);
 
         res = mDecoder->QueueInputBuffer(inputIndex, 0, sample->mSize,
                                          sample->mTime, 0);
         HANDLE_DECODER_ERROR();
 
-        mDurations.push(sample->mDuration);
+        mDurations.push(media::TimeUnit::FromMicroseconds(sample->mDuration));
         sample = nullptr;
         outputDone = false;
       }
     }
 
     if (!outputDone) {
       BufferInfo::LocalRef bufferInfo;
       res = BufferInfo::New(&bufferInfo);
@@ -538,17 +541,17 @@ void MediaCodecDataDecoder::DecoderLoop(
           outputDone = true;
 
           // We only queue empty EOF frames, so we're done for now
           continue;
         }
 
         MOZ_ASSERT(!mDurations.empty(), "Should have had a duration queued");
 
-        Microseconds duration = 0;
+        media::TimeUnit duration;
         if (!mDurations.empty()) {
           duration = mDurations.front();
           mDurations.pop();
         }
 
         auto buffer = jni::Object::LocalRef::Adopt(
             frame.GetEnv()->GetObjectArrayElement(mOutputBuffers.Get(), outputStatus));
         if (buffer) {
--- a/dom/media/platforms/android/AndroidDecoderModule.h
+++ b/dom/media/platforms/android/AndroidDecoderModule.h
@@ -4,16 +4,17 @@
 
 #ifndef AndroidDecoderModule_h_
 #define AndroidDecoderModule_h_
 
 #include "PlatformDecoderModule.h"
 #include "AndroidSurfaceTexture.h"
 
 #include "MediaCodec.h"
+#include "TimeUnits.h"
 #include "mozilla/Monitor.h"
 
 #include <queue>
 
 namespace mozilla {
 
 typedef std::queue<nsRefPtr<MediaRawData>> SampleQueue;
 
@@ -76,22 +77,23 @@ protected:
 
   // Only these members are protected by mMonitor.
   Monitor mMonitor;
   bool mFlushing;
   bool mDraining;
   bool mStopping;
 
   SampleQueue mQueue;
-  std::queue<Microseconds> mDurations;
+  // Durations are stored in microseconds.
+  std::queue<media::TimeUnit> mDurations;
 
   virtual nsresult InitDecoder(widget::sdk::Surface::Param aSurface);
 
-  virtual nsresult Output(widget::sdk::BufferInfo::Param aInfo, void* aBuffer, widget::sdk::MediaFormat::Param aFormat, Microseconds aDuration) { return NS_OK; }
-  virtual nsresult PostOutput(widget::sdk::BufferInfo::Param aInfo, widget::sdk::MediaFormat::Param aFormat, Microseconds aDuration) { return NS_OK; }
+  virtual nsresult Output(widget::sdk::BufferInfo::Param aInfo, void* aBuffer, widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { return NS_OK; }
+  virtual nsresult PostOutput(widget::sdk::BufferInfo::Param aInfo, widget::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration) { return NS_OK; }
   virtual void Cleanup() {};
 
   nsresult ResetInputBuffers();
   nsresult ResetOutputBuffers();
 
   void DecoderLoop();
   nsresult GetInputBuffer(JNIEnv* env, int index, jni::Object::LocalRef* buffer);
   virtual void ClearQueue();
--- a/dom/media/platforms/apple/AppleATDecoder.cpp
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -256,33 +256,33 @@ AppleATDecoder::DecodeSample(MediaRawDat
   } while (true);
 
   if (outputData.IsEmpty()) {
     return NS_OK;
   }
 
   size_t numFrames = outputData.Length() / channels;
   int rate = mOutputFormat.mSampleRate;
-  CheckedInt<Microseconds> duration = FramesToUsecs(numFrames, rate);
-  if (!duration.isValid()) {
+  media::TimeUnit duration = FramesToTimeUnit(numFrames, rate);
+  if (!duration.IsValid()) {
     NS_WARNING("Invalid count of accumulated audio samples");
     return NS_ERROR_FAILURE;
   }
 
 #ifdef LOG_SAMPLE_DECODE
   LOG("pushed audio at time %lfs; duration %lfs\n",
       (double)aSample->mTime / USECS_PER_S,
-      (double)duration.value() / USECS_PER_S);
+      duration.ToSeconds());
 #endif
 
   nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outputData.Length()]);
   PodCopy(data.get(), &outputData[0], outputData.Length());
   nsRefPtr<AudioData> audio = new AudioData(aSample->mOffset,
                                             aSample->mTime,
-                                            duration.value(),
+                                            duration.ToMicroseconds(),
                                             numFrames,
                                             data.forget(),
                                             channels,
                                             rate);
   mCallback->Output(audio);
   return NS_OK;
 }
 
--- a/dom/media/platforms/apple/AppleVDADecoder.cpp
+++ b/dom/media/platforms/apple/AppleVDADecoder.cpp
@@ -190,34 +190,35 @@ PlatformCallback(void* decompressionOutp
     (CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_DTS"));
   AutoCFRelease<CFNumberRef> durref =
     (CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_DURATION"));
   AutoCFRelease<CFNumberRef> boref =
     (CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_OFFSET"));
   AutoCFRelease<CFNumberRef> kfref =
     (CFNumberRef)CFDictionaryGetValue(frameInfo, CFSTR("FRAME_KEYFRAME"));
 
-  Microseconds dts;
-  Microseconds pts;
-  Microseconds duration;
+  int64_t dts;
+  int64_t pts;
+  int64_t duration;
   int64_t byte_offset;
   char is_sync_point;
 
   CFNumberGetValue(ptsref, kCFNumberSInt64Type, &pts);
   CFNumberGetValue(dtsref, kCFNumberSInt64Type, &dts);
   CFNumberGetValue(durref, kCFNumberSInt64Type, &duration);
   CFNumberGetValue(boref, kCFNumberSInt64Type, &byte_offset);
   CFNumberGetValue(kfref, kCFNumberSInt8Type, &is_sync_point);
 
   nsAutoPtr<AppleVDADecoder::AppleFrameRef> frameRef(
-    new AppleVDADecoder::AppleFrameRef(dts,
-    pts,
-    duration,
-    byte_offset,
-    is_sync_point == 1));
+    new AppleVDADecoder::AppleFrameRef(
+      media::TimeUnit::FromMicroseconds(dts),
+      media::TimeUnit::FromMicroseconds(pts),
+      media::TimeUnit::FromMicroseconds(duration),
+      byte_offset,
+      is_sync_point == 1));
 
   // Forward the data back to an object method which can access
   // the correct MP4Reader callback.
   decoder->OutputFrame(image, frameRef);
 }
 
 AppleVDADecoder::AppleFrameRef*
 AppleVDADecoder::CreateAppleFrameRef(const MediaRawData* aSample)
@@ -247,19 +248,19 @@ nsresult
 AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
                              nsAutoPtr<AppleVDADecoder::AppleFrameRef> aFrameRef)
 {
   IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
   MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
 
   LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
     aFrameRef->byte_offset,
-    aFrameRef->decode_timestamp,
-    aFrameRef->composition_timestamp,
-    aFrameRef->duration,
+    aFrameRef->decode_timestamp.ToMicroseconds(),
+    aFrameRef->composition_timestamp.ToMicroseconds(),
+    aFrameRef->duration.ToMicroseconds(),
     aFrameRef->is_sync_point ? " keyframe" : ""
   );
 
   nsRefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
   // Bounds.
   VideoInfo info;
   info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
   gfx::IntRect visible = gfx::IntRect(0,
@@ -272,20 +273,21 @@ AppleVDADecoder::OutputFrame(CVPixelBuff
   layers::MacIOSurfaceImage* videoImage =
     static_cast<layers::MacIOSurfaceImage*>(image.get());
   videoImage->SetSurface(macSurface);
 
   nsRefPtr<VideoData> data;
   data = VideoData::CreateFromImage(info,
                                     mImageContainer,
                                     aFrameRef->byte_offset,
-                                    aFrameRef->composition_timestamp,
-                                    aFrameRef->duration, image.forget(),
+                                    aFrameRef->composition_timestamp.ToMicroseconds(),
+                                    aFrameRef->duration.ToMicroseconds(),
+                                    image.forget(),
                                     aFrameRef->is_sync_point,
-                                    aFrameRef->decode_timestamp,
+                                    aFrameRef->decode_timestamp.ToMicroseconds(),
                                     visible);
 
   if (!data) {
     NS_ERROR("Couldn't create VideoData for frame");
     mCallback->Error();
     return NS_ERROR_FAILURE;
   }
 
--- a/dom/media/platforms/apple/AppleVDADecoder.h
+++ b/dom/media/platforms/apple/AppleVDADecoder.h
@@ -8,49 +8,50 @@
 #define mozilla_AppleVDADecoder_h
 
 #include "PlatformDecoderModule.h"
 #include "mozilla/ReentrantMonitor.h"
 #include "MP4Reader.h"
 #include "MP4Decoder.h"
 #include "nsIThread.h"
 #include "ReorderQueue.h"
+#include "TimeUnits.h"
 
 #include "VideoDecodeAcceleration/VDADecoder.h"
 
 namespace mozilla {
 
 class FlushableMediaTaskQueue;
 class MediaDataDecoderCallback;
 namespace layers {
   class ImageContainer;
 }
 
 class AppleVDADecoder : public MediaDataDecoder {
 public:
   class AppleFrameRef {
   public:
-    Microseconds decode_timestamp;
-    Microseconds composition_timestamp;
-    Microseconds duration;
+    media::TimeUnit decode_timestamp;
+    media::TimeUnit composition_timestamp;
+    media::TimeUnit duration;
     int64_t byte_offset;
     bool is_sync_point;
 
     explicit AppleFrameRef(const MediaRawData& aSample)
-      : decode_timestamp(aSample.mTimecode)
-      , composition_timestamp(aSample.mTime)
-      , duration(aSample.mDuration)
+      : decode_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTimecode))
+      , composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
+      , duration(media::TimeUnit::FromMicroseconds(aSample.mDuration))
       , byte_offset(aSample.mOffset)
       , is_sync_point(aSample.mKeyframe)
     {
     }
 
-    AppleFrameRef(Microseconds aDts,
-                  Microseconds aPts,
-                  Microseconds aDuration,
+    AppleFrameRef(const media::TimeUnit& aDts,
+                  const media::TimeUnit& aPts,
+                  const media::TimeUnit& aDuration,
                   int64_t aByte_offset,
                   bool aIs_sync_point)
       : decode_timestamp(aDts)
       , composition_timestamp(aPts)
       , duration(aDuration)
       , byte_offset(aByte_offset)
       , is_sync_point(aIs_sync_point)
     {
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaTaskQueue.h"
 #include "FFmpegRuntimeLinker.h"
 
 #include "FFmpegAudioDecoder.h"
+#include "TimeUnits.h"
 
 #define MAX_CHANNELS 16
 
 namespace mozilla
 {
 
 FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(
   FlushableMediaTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
@@ -92,17 +93,17 @@ FFmpegAudioDecoder<LIBAV_VER>::DecodePac
 
   if (!PrepareFrame()) {
     NS_WARNING("FFmpeg audio decoder failed to allocate frame.");
     mCallback->Error();
     return;
   }
 
   int64_t samplePosition = aSample->mOffset;
-  Microseconds pts = aSample->mTime;
+  media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);
 
   while (packet.size > 0) {
     int decoded;
     int bytesConsumed =
       avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);
 
     if (bytesConsumed < 0) {
       NS_WARNING("FFmpeg audio decoder error.");
@@ -112,33 +113,38 @@ FFmpegAudioDecoder<LIBAV_VER>::DecodePac
 
     if (decoded) {
       uint32_t numChannels = mCodecContext->channels;
       uint32_t samplingRate = mCodecContext->sample_rate;
 
       nsAutoArrayPtr<AudioDataValue> audio(
         CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples));
 
-      CheckedInt<Microseconds> duration =
-        FramesToUsecs(mFrame->nb_samples, samplingRate);
-      if (!duration.isValid()) {
+      media::TimeUnit duration =
+        FramesToTimeUnit(mFrame->nb_samples, samplingRate);
+      if (!duration.IsValid()) {
         NS_WARNING("Invalid count of accumulated audio samples");
         mCallback->Error();
         return;
       }
 
       nsRefPtr<AudioData> data = new AudioData(samplePosition,
-                                               pts,
-                                               duration.value(),
+                                               pts.ToMicroseconds(),
+                                               duration.ToMicroseconds(),
                                                mFrame->nb_samples,
                                                audio.forget(),
                                                numChannels,
                                                samplingRate);
       mCallback->Output(data);
-      pts += duration.value();
+      pts += duration;
+      if (!pts.IsValid()) {
+        NS_WARNING("Invalid count of accumulated audio samples");
+        mCallback->Error();
+        return;
+      }
     }
     packet.data += bytesConsumed;
     packet.size -= bytesConsumed;
     samplePosition += bytesConsumed;
   }
 
   if (mTaskQueue->IsEmpty()) {
     mCallback->InputExhausted();
--- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "WMFAudioMFTManager.h"
 #include "MediaInfo.h"
 #include "VideoUtils.h"
 #include "WMFUtils.h"
 #include "nsTArray.h"
+#include "TimeUnits.h"
 
 #include "mozilla/Logging.h"
 
 PRLogModuleInfo* GetDemuxerLog();
 #define LOG(...) MOZ_LOG(GetDemuxerLog(), mozilla::LogLevel::Debug, (__VA_ARGS__))
 
 namespace mozilla {
 
@@ -285,35 +286,36 @@ WMFAudioMFTManager::Output(int64_t aStre
 
   int16_t* pcm = (int16_t*)data;
   for (int32_t i = 0; i < numSamples; ++i) {
     audioData[i] = AudioSampleToFloat(pcm[i]);
   }
 
   buffer->Unlock();
 
-  CheckedInt64 timestamp = FramesToUsecs(mAudioFrameOffset + mAudioFrameSum, mAudioRate);
-  NS_ENSURE_TRUE(timestamp.isValid(), E_FAIL);
+  media::TimeUnit timestamp =
+    FramesToTimeUnit(mAudioFrameOffset + mAudioFrameSum, mAudioRate);
+  NS_ENSURE_TRUE(timestamp.IsValid(), E_FAIL);
 
   mAudioFrameSum += numFrames;
 
-  CheckedInt64 duration = FramesToUsecs(numFrames, mAudioRate);
-  NS_ENSURE_TRUE(duration.isValid(), E_FAIL);
+  media::TimeUnit duration = FramesToTimeUnit(numFrames, mAudioRate);
+  NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
 
   aOutData = new AudioData(aStreamOffset,
-                           timestamp.value(),
-                           duration.value(),
+                           timestamp.ToMicroseconds(),
+                           duration.ToMicroseconds(),
                            numFrames,
                            audioData.forget(),
                            mAudioChannels,
                            mAudioRate);
 
   #ifdef LOG_SAMPLE_DECODE
   LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
-      timestamp, duration, currentLength);
+      timestamp.ToMicroseconds(), duration.ToMicroseconds(), currentLength);
   #endif
 
   return S_OK;
 }
 
 void
 WMFAudioMFTManager::Shutdown()
 {
--- a/dom/media/platforms/wmf/WMFUtils.cpp
+++ b/dom/media/platforms/wmf/WMFUtils.cpp
@@ -65,33 +65,33 @@ GetDefaultStride(IMFMediaType *aType, ui
 }
 
 int32_t
 MFOffsetToInt32(const MFOffset& aOffset)
 {
   return int32_t(aOffset.value + (aOffset.fract / 65536.0f));
 }
 
-int64_t
+media::TimeUnit
 GetSampleDuration(IMFSample* aSample)
 {
-  NS_ENSURE_TRUE(aSample, -1);
+  NS_ENSURE_TRUE(aSample, media::TimeUnit::Invalid());
   int64_t duration = 0;
   aSample->GetSampleDuration(&duration);
-  return HNsToUsecs(duration);
+  return media::TimeUnit::FromMicroseconds(HNsToUsecs(duration));
 }
 
-int64_t
+media::TimeUnit
 GetSampleTime(IMFSample* aSample)
 {
-  NS_ENSURE_TRUE(aSample, -1);
+  NS_ENSURE_TRUE(aSample, media::TimeUnit::Invalid());
   LONGLONG timestampHns = 0;
   HRESULT hr = aSample->GetSampleTime(&timestampHns);
-  NS_ENSURE_TRUE(SUCCEEDED(hr), -1);
-  return HNsToUsecs(timestampHns);
+  NS_ENSURE_TRUE(SUCCEEDED(hr), media::TimeUnit::Invalid());
+  return media::TimeUnit::FromMicroseconds(HNsToUsecs(timestampHns));
 }
 
 // Gets the sub-region of the video frame that should be displayed.
 // See: http://msdn.microsoft.com/en-us/library/windows/desktop/bb530115(v=vs.85).aspx
 HRESULT
 GetPictureRegion(IMFMediaType* aMediaType, nsIntRect& aOutPictureRegion)
 {
   // Determine if "pan and scan" is enabled for this media. If it is, we
--- a/dom/media/platforms/wmf/WMFUtils.h
+++ b/dom/media/platforms/wmf/WMFUtils.h
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef WMFUtils_h
 #define WMFUtils_h
 
 #include "WMF.h"
 #include "nsString.h"
 #include "nsRect.h"
+#include "TimeUnits.h"
 #include "VideoUtils.h"
 
 // Various utilities shared by WMF backend files.
 
 namespace mozilla {
 
 // Converts from microseconds to hundreds of nanoseconds.
 // We use microseconds for our timestamps, whereas WMF uses
@@ -41,24 +42,24 @@ GetDefaultStride(IMFMediaType *aType, ui
 int32_t
 MFOffsetToInt32(const MFOffset& aOffset);
 
 // Gets the sub-region of the video frame that should be displayed.
 // See: http://msdn.microsoft.com/en-us/library/windows/desktop/bb530115(v=vs.85).aspx
 HRESULT
 GetPictureRegion(IMFMediaType* aMediaType, nsIntRect& aOutPictureRegion);
 
-// Returns the duration of a IMFSample in microseconds.
-// Returns -1 on failure.
-int64_t
+// Returns the duration of a IMFSample in TimeUnit.
+// Returns media::TimeUnit::Invalid() on failure.
+media::TimeUnit
 GetSampleDuration(IMFSample* aSample);
 
-// Returns the presentation time of a IMFSample in microseconds.
-// Returns -1 on failure.
-int64_t
+// Returns the presentation time of a IMFSample in TimeUnit.
+// Returns media::TimeUnit::Invalid() on failure.
+media::TimeUnit
 GetSampleTime(IMFSample* aSample);
 
 inline bool
 IsFlagSet(DWORD flags, DWORD pattern) {
   return (flags & pattern) == pattern;
 }
 
 } // namespace mozilla
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -405,34 +405,36 @@ WMFVideoMFTManager::CreateBasicVideoFram
   // V plane (Cr)
   b.mPlanes[2].mData = data + y_size;
   b.mPlanes[2].mStride = halfStride;
   b.mPlanes[2].mHeight = halfHeight;
   b.mPlanes[2].mWidth = halfWidth;
   b.mPlanes[2].mOffset = 0;
   b.mPlanes[2].mSkip = 0;
 
-  Microseconds pts = GetSampleTime(aSample);
-  Microseconds duration = GetSampleDuration(aSample);
+  media::TimeUnit pts = GetSampleTime(aSample);
+  NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
+  media::TimeUnit duration = GetSampleDuration(aSample);
+  NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
 
   nsRefPtr<layers::PlanarYCbCrImage> image =
     new IMFYCbCrImage(buffer, twoDBuffer);
 
   VideoData::SetVideoDataToImage(image,
                                  mVideoInfo,
                                  b,
                                  mPictureRegion,
                                  false);
 
   nsRefPtr<VideoData> v =
     VideoData::CreateFromImage(mVideoInfo,
                                mImageContainer,
                                aStreamOffset,
-                               std::max(0LL, pts),
-                               duration,
+                               pts.ToMicroseconds(),
+                               duration.ToMicroseconds(),
                                image.forget(),
                                false,
                                -1,
                                mPictureRegion);
 
   v.forget(aOutVideoData);
   return S_OK;
 }
@@ -453,23 +455,25 @@ WMFVideoMFTManager::CreateD3DVideoFrame(
   nsRefPtr<Image> image;
   hr = mDXVA2Manager->CopyToImage(aSample,
                                   mPictureRegion,
                                   mImageContainer,
                                   getter_AddRefs(image));
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
   NS_ENSURE_TRUE(image, E_FAIL);
 
-  Microseconds pts = GetSampleTime(aSample);
-  Microseconds duration = GetSampleDuration(aSample);
+  media::TimeUnit pts = GetSampleTime(aSample);
+  NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
+  media::TimeUnit duration = GetSampleDuration(aSample);
+  NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
   nsRefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo,
                                                      mImageContainer,
                                                      aStreamOffset,
-                                                     pts,
-                                                     duration,
+                                                     pts.ToMicroseconds(),
+                                                     duration.ToMicroseconds(),
                                                      image.forget(),
                                                      false,
                                                      -1,
                                                      mPictureRegion);
 
   NS_ENSURE_TRUE(v, E_FAIL);
   v.forget(aOutVideoData);