Bug 1355740 - Change the type of TrackInfo::mDuration to TimeUnit. r=jya
authorJW Wang <jwwang@mozilla.com>
Wed, 12 Apr 2017 16:41:36 +0800
changeset 352884 5a7cbc44a0de4c5415a82aefaaf8e2ab81df131e
parent 352883 2df7d0984161a6e68dd086b4c1e2dc812f2fcee5
child 352885 742905f2fd0cd716f3d509ae3a03ffa735111a64
push id31652
push userkwierso@gmail.com
push dateThu, 13 Apr 2017 20:03:53 +0000
treeherdermozilla-central@3243c8fc3ce7 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjya
bugs1355740
milestone55.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1355740 - Change the type of TrackInfo::mDuration to TimeUnit. r=jya MozReview-Commit-ID: P7aqw4d5Vk
dom/media/ADTSDemuxer.cpp
dom/media/MP3Demuxer.cpp
dom/media/MediaFormatReader.cpp
dom/media/MediaInfo.h
dom/media/TimeUnits.h
dom/media/flac/FlacDemuxer.cpp
dom/media/flac/FlacFrameParser.cpp
dom/media/fmp4/MP4Decoder.cpp
dom/media/mediasource/TrackBuffersManager.cpp
dom/media/ogg/OggDemuxer.cpp
dom/media/platforms/wmf/DXVA2Manager.cpp
dom/media/wave/WaveDemuxer.cpp
dom/media/webm/WebMDemuxer.cpp
media/libstagefright/binding/DecoderData.cpp
media/libstagefright/binding/MP4Metadata.cpp
media/libstagefright/gtest/TestParser.cpp
--- a/dom/media/ADTSDemuxer.cpp
+++ b/dom/media/ADTSDemuxer.cpp
@@ -400,17 +400,17 @@ ADTSTrackDemuxer::Init()
 
   if (!mInfo) {
     mInfo = MakeUnique<AudioInfo>();
   }
 
   mInfo->mRate = mSamplesPerSecond;
   mInfo->mChannels = mChannels;
   mInfo->mBitDepth = 16;
-  mInfo->mDuration = Duration().ToMicroseconds();
+  mInfo->mDuration = Duration();
 
   // AAC Specific information
   mInfo->mMimeType = "audio/mp4a-latm";
 
   // Configure AAC codec-specific values.
 
   // According to
   // https://msdn.microsoft.com/en-us/library/windows/desktop/dd742784%28v=vs.85%29.aspx,
@@ -419,17 +419,18 @@ ADTSTrackDemuxer::Init()
   mInfo->mProfile = ProfileLevelIndication(mParser->FirstFrame());
   // For AAC, mExtendedProfile contains the audioObjectType from Table
   // 1.3 -- Audio Profile definition, ISO/IEC 14496-3. Eg. 2 == AAC LC
   mInfo->mExtendedProfile = mParser->FirstFrame().Header().mObjectType;
   InitAudioSpecificConfig(mParser->FirstFrame(), mInfo->mCodecSpecificConfig);
 
   ADTSLOG("Init mInfo={mRate=%u mChannels=%u mBitDepth=%u mDuration=%" PRId64
           "}",
-          mInfo->mRate, mInfo->mChannels, mInfo->mBitDepth, mInfo->mDuration);
+          mInfo->mRate, mInfo->mChannels, mInfo->mBitDepth,
+          mInfo->mDuration.ToMicroseconds());
 
   return mSamplesPerSecond && mChannels;
 }
 
 UniquePtr<TrackInfo>
 ADTSTrackDemuxer::GetInfo() const
 {
   return mInfo->Clone();
--- a/dom/media/MP3Demuxer.cpp
+++ b/dom/media/MP3Demuxer.cpp
@@ -139,21 +139,21 @@ MP3TrackDemuxer::Init()
   if (!mInfo) {
     mInfo = MakeUnique<AudioInfo>();
   }
 
   mInfo->mRate = mSamplesPerSecond;
   mInfo->mChannels = mChannels;
   mInfo->mBitDepth = 16;
   mInfo->mMimeType = "audio/mpeg";
-  mInfo->mDuration = Duration().ToMicroseconds();
+  mInfo->mDuration = Duration();
 
   MP3LOG("Init mInfo={mRate=%d mChannels=%d mBitDepth=%d mDuration=%" PRId64 "}",
          mInfo->mRate, mInfo->mChannels, mInfo->mBitDepth,
-         mInfo->mDuration);
+         mInfo->mDuration.ToMicroseconds());
 
   return mSamplesPerSecond && mChannels;
 }
 
 media::TimeUnit
 MP3TrackDemuxer::SeekPosition() const
 {
   TimeUnit pos = Duration(mFrameIndex);
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -1415,22 +1415,22 @@ MediaFormatReader::OnDemuxerInitDone(con
       nsCOMPtr<nsIRunnable> r =
         new DispatchKeyNeededEvent(mDecoder, crypto->mInitDatas[i].mInitData,
                                    crypto->mInitDatas[i].mType);
       mDecoder->AbstractMainThread()->Dispatch(r.forget());
     }
     mInfo.mCrypto = *crypto;
   }
 
-  int64_t videoDuration = HasVideo() ? mInfo.mVideo.mDuration : 0;
-  int64_t audioDuration = HasAudio() ? mInfo.mAudio.mDuration : 0;
-
-  int64_t duration = std::max(videoDuration, audioDuration);
-  if (duration != -1) {
-    mInfo.mMetadataDuration = Some(TimeUnit::FromMicroseconds(duration));
+  auto videoDuration = HasVideo() ? mInfo.mVideo.mDuration : TimeUnit::Zero();
+  auto audioDuration = HasAudio() ? mInfo.mAudio.mDuration : TimeUnit::Zero();
+
+  auto duration = std::max(videoDuration, audioDuration);
+  if (duration.IsPositive()) {
+    mInfo.mMetadataDuration = Some(duration);
   }
 
   mInfo.mMediaSeekable = mDemuxer->IsSeekable();
   mInfo.mMediaSeekableOnlyInBufferedRanges =
     mDemuxer->IsSeekableOnlyInBufferedRanges();
 
   if (!videoActive && !audioActive) {
     mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
--- a/dom/media/MediaInfo.h
+++ b/dom/media/MediaInfo.h
@@ -57,17 +57,16 @@ public:
             bool aEnabled,
             TrackID aTrackId)
     : mId(aId)
     , mKind(aKind)
     , mLabel(aLabel)
     , mLanguage(aLanguage)
     , mEnabled(aEnabled)
     , mTrackId(aTrackId)
-    , mDuration(0)
     , mMediaTime(0)
     , mIsRenderedExternally(false)
     , mType(aType)
   {
     MOZ_COUNT_CTOR(TrackInfo);
   }
 
   // Only used for backward compatibility. Do not use in new code.
@@ -89,17 +88,17 @@ public:
   nsString mKind;
   nsString mLabel;
   nsString mLanguage;
   bool mEnabled;
 
   TrackID mTrackId;
 
   nsCString mMimeType;
-  int64_t mDuration;
+  media::TimeUnit mDuration;
   int64_t mMediaTime;
   CryptoTrack mCrypto;
 
   nsTArray<MetadataTag> mTags;
 
   // True if the track is gonna be (decrypted)/decoded and
   // rendered directly by non-gecko components.
   bool mIsRenderedExternally;
--- a/dom/media/TimeUnits.h
+++ b/dom/media/TimeUnits.h
@@ -156,16 +156,20 @@ public:
   TimeDuration ToTimeDuration() const {
     return TimeDuration::FromMicroseconds(mValue.value());
   }
 
   bool IsInfinite() const {
     return mValue.value() == INT64_MAX;
   }
 
+  bool IsPositive() const {
+    return mValue.value() > 0;
+  }
+
   bool operator == (const TimeUnit& aOther) const {
     MOZ_ASSERT(IsValid() && aOther.IsValid());
     return mValue.value() == aOther.mValue.value();
   }
   bool operator != (const TimeUnit& aOther) const {
     MOZ_ASSERT(IsValid() && aOther.IsValid());
     return mValue.value() != aOther.mValue.value();
   }
--- a/dom/media/flac/FlacDemuxer.cpp
+++ b/dom/media/flac/FlacDemuxer.cpp
@@ -676,17 +676,17 @@ FlacTrackDemuxer::Init()
     mSource.Seek(SEEK_SET, mParser->FirstFrame().Offset());
     mParser->EndFrameSession();
   } else if (!mParser->Info().IsValid() || !mParser->FirstFrame().IsValid()) {
     // We must find at least a frame to determine the metadata.
     // We can't play this stream.
     return false;
   }
 
-  if (!mParser->Info().IsValid() || !mParser->Info().mDuration) {
+  if (!mParser->Info().IsValid() || !mParser->Info().mDuration.IsPositive()) {
     // Check if we can look at the last frame for the end time to determine the
     // duration when we don't have any.
     TimeAtEnd();
   }
 
   return true;
 }
 
@@ -701,28 +701,28 @@ FlacTrackDemuxer::GetInfo() const
       for (auto iter = tags->Iter(); !iter.Done(); iter.Next()) {
         info->mTags.AppendElement(MetadataTag(iter.Key(), iter.Data()));
       }
     }
     return info;
   } else if (mParser->FirstFrame().Info().IsValid()) {
     // Use the first frame header.
     UniquePtr<TrackInfo> info = mParser->FirstFrame().Info().Clone();
-    info->mDuration = Duration().ToMicroseconds();
+    info->mDuration = Duration();
     return info;
   }
   return nullptr;
 }
 
 bool
 FlacTrackDemuxer::IsSeekable() const
 {
   // For now we only allow seeking if a STREAMINFO block was found and with
   // a known number of samples (duration is set).
-  return mParser->Info().IsValid() && mParser->Info().mDuration;
+  return mParser->Info().IsValid() && mParser->Info().mDuration.IsPositive();
 }
 
 RefPtr<FlacTrackDemuxer::SeekPromise>
 FlacTrackDemuxer::Seek(const TimeUnit& aTime)
 {
   // Efficiently seek to the position.
   FastSeek(aTime);
   // Correct seek position by scanning the next frames.
@@ -1010,18 +1010,17 @@ FlacTrackDemuxer::AverageFrameLength() c
   }
 
   return 0.0;
 }
 
 TimeUnit
 FlacTrackDemuxer::Duration() const
 {
-  return std::max(mParsedFramesDuration,
-                  TimeUnit::FromMicroseconds(mParser->Info().mDuration));
+  return std::max(mParsedFramesDuration, mParser->Info().mDuration);
 }
 
 TimeUnit
 FlacTrackDemuxer::TimeAtEnd()
 {
   // Scan the last 128kB if available to determine the last frame.
   static const int OFFSET_FROM_END = 128 * 1024;
 
--- a/dom/media/flac/FlacFrameParser.cpp
+++ b/dom/media/flac/FlacFrameParser.cpp
@@ -143,19 +143,18 @@ FlacFrameParser::DecodeHeaderBlock(const
       }
       mNumFrames = blob & BITMASK(36);
 
       mInfo.mMimeType = "audio/flac";
       mInfo.mRate = sampleRate;
       mInfo.mChannels = numChannels;
       mInfo.mBitDepth = bps;
       mInfo.mCodecSpecificConfig->AppendElements(blockDataStart, blockDataSize);
-      CheckedInt64 duration =
-        SaferMultDiv(mNumFrames, USECS_PER_S, sampleRate);
-      mInfo.mDuration = duration.isValid() ? duration.value() : 0;
+      auto duration = FramesToTimeUnit(mNumFrames, sampleRate);
+      mInfo.mDuration = duration.IsValid() ? duration : media::TimeUnit::Zero();
       mParser = new OpusParser;
       break;
     }
     case FLAC_METADATA_TYPE_VORBIS_COMMENT:
     {
       if (!mParser) {
         // We must have seen a valid streaminfo first.
         return false;
--- a/dom/media/fmp4/MP4Decoder.cpp
+++ b/dom/media/fmp4/MP4Decoder.cpp
@@ -215,17 +215,17 @@ static const uint8_t sTestH264ExtraData[
 
 static already_AddRefed<MediaDataDecoder>
 CreateTestH264Decoder(layers::KnowsCompositor* aKnowsCompositor,
                       VideoInfo& aConfig,
                       TaskQueue* aTaskQueue)
 {
   aConfig.mMimeType = "video/avc";
   aConfig.mId = 1;
-  aConfig.mDuration = 40000;
+  aConfig.mDuration = media::TimeUnit::FromMicroseconds(40000);
   aConfig.mMediaTime = 0;
   aConfig.mImage = aConfig.mDisplay = nsIntSize(640, 360);
   aConfig.mExtraData = new MediaByteBuffer();
   aConfig.mExtraData->AppendElements(sTestH264ExtraData,
                                      MOZ_ARRAY_LENGTH(sTestH264ExtraData));
 
   RefPtr<PDMFactory> platform = new PDMFactory();
   RefPtr<MediaDataDecoder> decoder(platform->CreateDecoder({ aConfig, aTaskQueue, aKnowsCompositor }));
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -985,18 +985,18 @@ TrackBuffersManager::OnDemuxerInitDone(c
     // We currently only handle the first audio track.
     mAudioTracks.mDemuxer =
       mInputDemuxer->GetTrackDemuxer(TrackInfo::kAudioTrack, 0);
     MOZ_ASSERT(mAudioTracks.mDemuxer);
     info.mAudio = *mAudioTracks.mDemuxer->GetInfo()->GetAsAudioInfo();
     info.mAudio.mTrackId = 1;
   }
 
-  int64_t videoDuration = numVideos ? info.mVideo.mDuration : 0;
-  int64_t audioDuration = numAudios ? info.mAudio.mDuration : 0;
+  int64_t videoDuration = numVideos ? info.mVideo.mDuration.ToMicroseconds() : 0;
+  int64_t audioDuration = numAudios ? info.mAudio.mDuration.ToMicroseconds() : 0;
 
   int64_t duration = std::max(videoDuration, audioDuration);
   // 1. Update the duration attribute if it currently equals NaN.
   // Those steps are performed by the MediaSourceDecoder::SetInitialDuration
   mAbstractMainThread->Dispatch(NewRunnableMethod<int64_t>
                                 (mParentDecoder.get(),
                                 &MediaSourceDecoder::SetInitialDuration,
                                 duration ? duration : -1));
--- a/dom/media/ogg/OggDemuxer.cpp
+++ b/dom/media/ogg/OggDemuxer.cpp
@@ -576,20 +576,20 @@ OggDemuxer::ReadMetadata()
         mInfo.mMetadataDuration.emplace(TimeUnit::FromMicroseconds(endTime - mStartTime.refOr(0)));
         OGG_DEBUG("Got Ogg duration from seeking to end %" PRId64, endTime);
       }
     }
     if (mInfo.mMetadataDuration.isNothing()) {
       mInfo.mMetadataDuration.emplace(TimeUnit::FromInfinity());
     }
     if (HasAudio()) {
-      mInfo.mAudio.mDuration = mInfo.mMetadataDuration->ToMicroseconds();
+      mInfo.mAudio.mDuration = mInfo.mMetadataDuration.ref();
     }
     if (HasVideo()) {
-      mInfo.mVideo.mDuration = mInfo.mMetadataDuration->ToMicroseconds();
+      mInfo.mVideo.mDuration = mInfo.mMetadataDuration.ref();
     }
   } else {
     OGG_DEBUG("no audio or video tracks");
     return NS_ERROR_FAILURE;
   }
 
   OGG_DEBUG("success?!");
   return NS_OK;
--- a/dom/media/platforms/wmf/DXVA2Manager.cpp
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -510,17 +510,17 @@ D3D9DXVA2Manager::CreateDXVA2Decoder(con
 {
   MOZ_ASSERT(NS_IsMainThread());
   DXVA2_VideoDesc desc;
   desc.SampleWidth = aVideoInfo.mImage.width;
   desc.SampleHeight = aVideoInfo.mImage.height;
   desc.Format = (D3DFORMAT)MAKEFOURCC('N','V','1','2');
 
   // Assume the current duration is representative for the entire video.
-  float framerate = 1000000.0 / aVideoInfo.mDuration;
+  float framerate = 1000000.0 / aVideoInfo.mDuration.ToMicroseconds();
   if (IsUnsupportedResolution(desc.SampleWidth, desc.SampleHeight, framerate)) {
     return false;
   }
 
   mDecoder = CreateDecoder(desc);
   if (!mDecoder) {
     aFailureReason =
       nsPrintfCString("Fail to create video decoder in D3D9DXVA2Manager.");
@@ -734,17 +734,17 @@ D3D11DXVA2Manager::Init(layers::KnowsCom
 HRESULT
 D3D11DXVA2Manager::InitInternal(layers::KnowsCompositor* aKnowsCompositor,
                                 nsACString& aFailureReason,
                                 ID3D11Device* aDevice)
 {
   HRESULT hr;
 
   mDevice = aDevice;
-  
+
   if (!mDevice) {
     mDevice = gfx::DeviceManagerDx::Get()->CreateDecoderDevice();
     if (!mDevice) {
       aFailureReason.AssignLiteral("Failed to create D3D11 device for decoder");
       return E_FAIL;
     }
   }
 
@@ -1097,17 +1097,17 @@ D3D11DXVA2Manager::CreateDXVA2Decoder(co
   MOZ_ASSERT(NS_IsMainThread());
   D3D11_VIDEO_DECODER_DESC desc;
   desc.Guid = mDecoderGUID;
   desc.OutputFormat = DXGI_FORMAT_NV12;
   desc.SampleWidth = aVideoInfo.mImage.width;
   desc.SampleHeight = aVideoInfo.mImage.height;
 
   // Assume the current duration is representative for the entire video.
-  float framerate = 1000000.0 / aVideoInfo.mDuration;
+  float framerate = 1000000.0 / aVideoInfo.mDuration.ToMicroseconds();
   if (IsUnsupportedResolution(desc.SampleWidth, desc.SampleHeight, framerate)) {
     return false;
   }
 
   mDecoder = CreateDecoder(desc);
   if (!mDecoder) {
     aFailureReason =
       nsPrintfCString("Fail to create video decoder in D3D11DXVA2Manager.");
--- a/dom/media/wave/WaveDemuxer.cpp
+++ b/dom/media/wave/WaveDemuxer.cpp
@@ -156,19 +156,19 @@ WAVTrackDemuxer::Init()
 
   mInfo->mRate = mSamplesPerSecond;
   mInfo->mChannels = mChannels;
   mInfo->mBitDepth = mSampleFormat;
   mInfo->mProfile = mFmtParser.FmtChunk().WaveFormat() & 0x00FF;
   mInfo->mExtendedProfile = (mFmtParser.FmtChunk().WaveFormat() & 0xFF00) >> 8;
   mInfo->mMimeType = "audio/wave; codecs=";
   mInfo->mMimeType.AppendInt(mFmtParser.FmtChunk().WaveFormat());
-  mInfo->mDuration = Duration().ToMicroseconds();
+  mInfo->mDuration = Duration();
 
-  return !!(mInfo->mDuration);
+  return mInfo->mDuration.IsPositive();
 }
 
 bool
 WAVTrackDemuxer::RIFFParserInit()
 {
   RefPtr<MediaRawData> riffHeader = GetFileHeader(FindRIFFHeader());
   if (!riffHeader) {
     return false;
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -385,18 +385,17 @@ WebMDemuxer::ReadMetadata()
           break;
         case NESTEGG_VIDEO_STEREO_RIGHT_LEFT:
           mInfo.mVideo.mStereoMode = StereoMode::RIGHT_LEFT;
           break;
       }
       uint64_t duration = 0;
       r = nestegg_duration(context, &duration);
       if (!r) {
-        mInfo.mVideo.mDuration =
-          media::TimeUnit::FromNanoseconds(duration).ToMicroseconds();
+        mInfo.mVideo.mDuration = media::TimeUnit::FromNanoseconds(duration);
       }
       mInfo.mVideo.mCrypto = GetTrackCrypto(TrackInfo::kVideoTrack, track);
       if (mInfo.mVideo.mCrypto.mValid) {
         mCrypto.AddInitData(NS_LITERAL_STRING("webm"),
                             mInfo.mVideo.mCrypto.mKeyId);
       }
     } else if (type == NESTEGG_TRACK_AUDIO && !mHasAudio) {
       nestegg_audio_params params;
@@ -453,18 +452,17 @@ WebMDemuxer::ReadMetadata()
       }
       else {
         mInfo.mAudio.mCodecSpecificConfig->AppendElements(headers[0],
                                                           headerLens[0]);
       }
       uint64_t duration = 0;
       r = nestegg_duration(context, &duration);
       if (!r) {
-        mInfo.mAudio.mDuration =
-          media::TimeUnit::FromNanoseconds(duration).ToMicroseconds();
+        mInfo.mAudio.mDuration = media::TimeUnit::FromNanoseconds(duration);
       }
       mInfo.mAudio.mCrypto = GetTrackCrypto(TrackInfo::kAudioTrack, track);
       if (mInfo.mAudio.mCrypto.mValid) {
         mCrypto.AddInitData(NS_LITERAL_STRING("webm"),
                             mInfo.mAudio.mCrypto.mKeyId);
       }
     }
   }
--- a/media/libstagefright/binding/DecoderData.cpp
+++ b/media/libstagefright/binding/DecoderData.cpp
@@ -13,16 +13,17 @@
 #include "mozilla/ArrayUtils.h"
 #include "include/ESDS.h"
 
 // OpusDecoder header is really needed only by MP4 in rust
 #include "OpusDecoder.h"
 #include "mp4parse.h"
 
 using namespace stagefright;
+using mozilla::media::TimeUnit;
 
 namespace mp4_demuxer
 {
 
 static int32_t
 FindInt32(const MetaData* mMetaData, uint32_t mKey)
 {
   int32_t value;
@@ -109,17 +110,18 @@ CryptoFile::DoUpdate(const uint8_t* aDat
 
 static void
 UpdateTrackInfo(mozilla::TrackInfo& aConfig,
                 const MetaData* aMetaData,
                 const char* aMimeType)
 {
   mozilla::CryptoTrack& crypto = aConfig.mCrypto;
   aConfig.mMimeType = aMimeType;
-  aConfig.mDuration = FindInt64(aMetaData, kKeyDuration);
+  aConfig.mDuration = TimeUnit::FromMicroseconds(
+    FindInt64(aMetaData, kKeyDuration));
   aConfig.mMediaTime = FindInt64(aMetaData, kKeyMediaTime);
   aConfig.mTrackId = FindInt32(aMetaData, kKeyTrackID);
   aConfig.mCrypto.mValid = aMetaData->findInt32(kKeyCryptoMode, &crypto.mMode) &&
     aMetaData->findInt32(kKeyCryptoDefaultIVSize, &crypto.mIVSize) &&
     FindData(aMetaData, kKeyCryptoKey, &crypto.mKeyId);
 }
 
 void
@@ -223,17 +225,17 @@ MP4AudioInfo::Update(const mp4parse_trac
   } else if (track->codec == mp4parse_codec_MP3) {
     mMimeType = MEDIA_MIMETYPE_AUDIO_MPEG;
   }
 
   mRate = audio->sample_rate;
   mChannels = audio->channels;
   mBitDepth = audio->bit_depth;
   mExtendedProfile = audio->profile;
-  mDuration = track->duration;
+  mDuration = TimeUnit::FromMicroseconds(track->duration);
   mMediaTime = track->media_time;
   mTrackId = track->track_id;
 
   // In stagefright, mProfile is kKeyAACProfile, mExtendedProfile is kKeyAACAOT.
   // Both are from audioObjectType in AudioSpecificConfig.
   if (audio->profile <= 4) {
     mProfile = audio->profile;
   }
@@ -255,17 +257,17 @@ MP4VideoInfo::Update(const mp4parse_trac
 {
   UpdateTrackProtectedInfo(*this, video->protected_data);
   if (track->codec == mp4parse_codec_AVC) {
     mMimeType = MEDIA_MIMETYPE_VIDEO_AVC;
   } else if (track->codec == mp4parse_codec_VP9) {
     mMimeType = NS_LITERAL_CSTRING("video/vp9");
   }
   mTrackId = track->track_id;
-  mDuration = track->duration;
+  mDuration = TimeUnit::FromMicroseconds(track->duration);
   mMediaTime = track->media_time;
   mDisplay.width = video->display_width;
   mDisplay.height = video->display_height;
   mImage.width = video->image_width;
   mImage.height = video->image_height;
   mRotation = ToSupportedRotation(video->rotation);
   if (video->extra_data.data) {
     mExtraData->AppendElements(video->extra_data.data, video->extra_data.length);
--- a/media/libstagefright/binding/MP4Metadata.cpp
+++ b/media/libstagefright/binding/MP4Metadata.cpp
@@ -25,16 +25,17 @@
 #include <limits>
 #include <stdint.h>
 #include <vector>
 
 
 struct FreeMP4Parser { void operator()(mp4parse_parser* aPtr) { mp4parse_free(aPtr); } };
 
 using namespace stagefright;
+using mozilla::media::TimeUnit;
 
 namespace mp4_demuxer
 {
 
 static LazyLogModule sLog("MP4Metadata");
 
 class DataSourceAdapter : public DataSource
 {
@@ -721,20 +722,20 @@ MP4MetadataStagefright::GetTrackInfo(moz
             nullptr};
   }
 
   UniquePtr<mozilla::TrackInfo> e = CheckTrack(mimeType, metaData.get(), index);
 
   if (e) {
     metaData = mMetadataExtractor->getMetaData();
     int64_t movieDuration;
-    if (!e->mDuration &&
+    if (!e->mDuration.IsPositive() &&
         metaData->findInt64(kKeyMovieDuration, &movieDuration)) {
       // No duration in track, use movie extend header box one.
-      e->mDuration = movieDuration;
+      e->mDuration = TimeUnit::FromMicroseconds(movieDuration);
     }
   }
 
   return {NS_OK, Move(e)};
 }
 
 mozilla::UniquePtr<mozilla::TrackInfo>
 MP4MetadataStagefright::CheckTrack(const char* aMimeType,
@@ -1071,21 +1072,21 @@ MP4MetadataRust::GetTrackInfo(mozilla::T
       return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                           RESULT_DETAIL("Cannot handle %s track #%zu",
                                         TrackTypeToStr(aType),
                                         aTrackNumber)),
               nullptr};
   }
 
   // No duration in track, use fragment_duration.
-  if (e && !e->mDuration) {
+  if (e && !e->mDuration.IsPositive()) {
     mp4parse_fragment_info info;
     auto rv = mp4parse_get_fragment_info(mRustParser.get(), &info);
     if (rv == mp4parse_status_OK) {
-      e->mDuration = info.fragment_duration;
+      e->mDuration = TimeUnit::FromMicroseconds(info.fragment_duration);
     }
   }
 
   if (e && e->IsValid()) {
     return {NS_OK, Move(e)};
   }
   MOZ_LOG(sLog, LogLevel::Debug, ("TrackInfo didn't validate"));
 
--- a/media/libstagefright/gtest/TestParser.cpp
+++ b/media/libstagefright/gtest/TestParser.cpp
@@ -310,17 +310,18 @@ TEST(stagefright_MPEG4Metadata, test_cas
           tests[test].mNumberVideoTracks == E) {
         EXPECT_TRUE(!trackInfo.Ref());
       } else {
         ASSERT_TRUE(!!trackInfo.Ref());
         const VideoInfo* videoInfo = trackInfo.Ref()->GetAsVideoInfo();
         ASSERT_TRUE(!!videoInfo);
         EXPECT_TRUE(videoInfo->IsValid());
         EXPECT_TRUE(videoInfo->IsVideo());
-        EXPECT_EQ(tests[test].mVideoDuration, videoInfo->mDuration);
+        EXPECT_EQ(tests[test].mVideoDuration,
+                  videoInfo->mDuration.ToMicroseconds());
         EXPECT_EQ(tests[test].mWidth, videoInfo->mDisplay.width);
         EXPECT_EQ(tests[test].mHeight, videoInfo->mDisplay.height);
 
         MP4Metadata::ResultAndIndice indices =
           metadata.GetTrackIndice(videoInfo->mTrackId);
         EXPECT_TRUE(!!indices.Ref());
         for (size_t i = 0; i < indices.Ref()->Length(); i++) {
           Index::Indice data;
@@ -334,19 +335,21 @@ TEST(stagefright_MPEG4Metadata, test_cas
           tests[test].mNumberAudioTracks == E) {
         EXPECT_TRUE(!trackInfo.Ref());
       } else {
         ASSERT_TRUE(!!trackInfo.Ref());
         const AudioInfo* audioInfo = trackInfo.Ref()->GetAsAudioInfo();
         ASSERT_TRUE(!!audioInfo);
         EXPECT_TRUE(audioInfo->IsValid());
         EXPECT_TRUE(audioInfo->IsAudio());
-        EXPECT_EQ(tests[test].mAudioDuration, audioInfo->mDuration);
+        EXPECT_EQ(tests[test].mAudioDuration,
+                  audioInfo->mDuration.ToMicroseconds());
         EXPECT_EQ(tests[test].mAudioProfile, audioInfo->mProfile);
-        if (tests[test].mAudioDuration != audioInfo->mDuration) {
+        if (tests[test].mAudioDuration !=
+            audioInfo->mDuration.ToMicroseconds()) {
           MOZ_RELEASE_ASSERT(false);
         }
 
         MP4Metadata::ResultAndIndice indices =
           metadata.GetTrackIndice(audioInfo->mTrackId);
         EXPECT_TRUE(!!indices.Ref());
         for (size_t i = 0; i < indices.Ref()->Length(); i++) {
           Index::Indice data;