Bug 1186257 - Avoid 'using' namespace imports in headers. r=jya
authorRalph Giles <giles@mozilla.com>
Wed, 22 Jul 2015 11:40:09 -0700
changeset 254204 e33bdcd0c5d3086480a194f0e51cfafff34bb5f9
parent 254203 2630a69c9edb5380974abd30275ca78e140fa825
child 254205 f592ff085124be73f701b690e49eb4f76aad31c7
push id62683
push userrgiles@mozilla.com
push dateThu, 23 Jul 2015 00:00:01 +0000
treeherdermozilla-inbound@e33bdcd0c5d3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjya
bugs1186257
milestone42.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1186257 - Avoid 'using' namespace imports in headers. r=jya This is poor style as it can have side-effects in the caller's code. Instead, use full namespace prefixes.
dom/media/mediasource/SourceBuffer.h
dom/media/mediasource/SourceBufferContentManager.h
dom/media/mediasource/TrackBuffer.cpp
dom/media/mediasource/TrackBuffer.h
dom/media/mediasource/TrackBuffersManager.cpp
dom/media/mediasource/TrackBuffersManager.h
--- a/dom/media/mediasource/SourceBuffer.h
+++ b/dom/media/mediasource/SourceBuffer.h
@@ -33,19 +33,16 @@ namespace mozilla {
 
 class ErrorResult;
 class MediaByteBuffer;
 template <typename T> class AsyncEventRunner;
 class TrackBuffersManager;
 
 namespace dom {
 
-using media::TimeUnit;
-using media::TimeIntervals;
-
 class TimeRanges;
 
 class SourceBuffer final : public DOMEventTargetHelper
 {
 public:
   /** WebIDL Methods. */
   SourceBufferAppendMode Mode() const
   {
@@ -55,17 +52,17 @@ public:
   void SetMode(SourceBufferAppendMode aMode, ErrorResult& aRv);
 
   bool Updating() const
   {
     return mUpdating;
   }
 
   already_AddRefed<TimeRanges> GetBuffered(ErrorResult& aRv);
-  TimeIntervals GetTimeIntervals();
+  media::TimeIntervals GetTimeIntervals();
 
   double TimestampOffset() const
   {
     return mApparentTimestampOffset;
   }
 
   void SetTimestampOffset(double aTimestampOffset, ErrorResult& aRv);
 
@@ -163,29 +160,29 @@ private:
   already_AddRefed<MediaByteBuffer> PrepareAppend(const uint8_t* aData,
                                                   uint32_t aLength,
                                                   ErrorResult& aRv);
 
   void AppendDataCompletedWithSuccess(bool aHasActiveTracks);
   void AppendDataErrored(nsresult aError);
 
   // Set timestampOffset, must be called on the main thread.
-  void SetTimestampOffset(const TimeUnit& aTimestampOffset);
+  void SetTimestampOffset(const media::TimeUnit& aTimestampOffset);
 
   nsRefPtr<MediaSource> mMediaSource;
 
   uint32_t mEvictionThreshold;
 
   nsRefPtr<SourceBufferContentManager> mContentManager;
 
   double mAppendWindowStart;
   double mAppendWindowEnd;
 
   double mApparentTimestampOffset;
-  TimeUnit mTimestampOffset;
+  media::TimeUnit mTimestampOffset;
 
   SourceBufferAppendMode mAppendMode;
   bool mUpdating;
   bool mGenerateTimestamps;
   bool mIsUsingFormatReader;
 
   mozilla::Atomic<bool> mActive;
 
--- a/dom/media/mediasource/SourceBufferContentManager.h
+++ b/dom/media/mediasource/SourceBufferContentManager.h
@@ -11,19 +11,16 @@
 
 #include "MediaData.h"
 #include "MediaSourceDecoder.h"
 #include "TimeUnits.h"
 #include "nsString.h"
 
 namespace mozilla {
 
-using media::TimeUnit;
-using media::TimeIntervals;
-
 namespace dom {
 class SourceBuffer;
 }
 
 class SourceBufferContentManager {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SourceBufferContentManager);
 
@@ -32,52 +29,55 @@ public:
 
   static already_AddRefed<SourceBufferContentManager>
   CreateManager(dom::SourceBuffer* aParent, MediaSourceDecoder* aParentDecoder,
                 const nsACString& aType);
 
   // Add data to the end of the input buffer.
   // Returns false if the append failed.
   virtual bool
-  AppendData(MediaByteBuffer* aData, TimeUnit aTimestampOffset) = 0;
+  AppendData(MediaByteBuffer* aData, media::TimeUnit aTimestampOffset) = 0;
 
   // Run MSE Buffer Append Algorithm
   // 3.5.5 Buffer Append Algorithm.
   // http://w3c.github.io/media-source/index.html#sourcebuffer-buffer-append
   virtual nsRefPtr<AppendPromise> BufferAppend() = 0;
 
   // Abort any pending AppendData.
   virtual void AbortAppendData() = 0;
 
   // Run MSE Reset Parser State Algorithm.
   // 3.5.2 Reset Parser State
   // http://w3c.github.io/media-source/#sourcebuffer-reset-parser-state
   virtual void ResetParserState() = 0;
 
   // Runs MSE range removal algorithm.
   // http://w3c.github.io/media-source/#sourcebuffer-coded-frame-removal
-  virtual nsRefPtr<RangeRemovalPromise> RangeRemoval(TimeUnit aStart, TimeUnit aEnd) = 0;
+  virtual nsRefPtr<RangeRemovalPromise> RangeRemoval(media::TimeUnit aStart,
+                                                     media::TimeUnit aEnd) = 0;
 
   enum class EvictDataResult : int8_t
   {
     NO_DATA_EVICTED,
     DATA_EVICTED,
     CANT_EVICT,
     BUFFER_FULL,
   };
 
   // Evicts data up to aPlaybackTime. aThreshold is used to
   // bound the data being evicted. It will not evict more than aThreshold
   // bytes. aBufferStartTime contains the new start time of the data after the
   // eviction.
   virtual EvictDataResult
-  EvictData(TimeUnit aPlaybackTime, uint32_t aThreshold, TimeUnit* aBufferStartTime) = 0;
+  EvictData(media::TimeUnit aPlaybackTime,
+            uint32_t aThreshold,
+            media::TimeUnit* aBufferStartTime) = 0;
 
   // Evicts data up to aTime.
-  virtual void EvictBefore(TimeUnit aTime) = 0;
+  virtual void EvictBefore(media::TimeUnit aTime) = 0;
 
   // Returns the buffered range currently managed.
   // This may be called on any thread.
   // Buffered must conform to http://w3c.github.io/media-source/index.html#widl-SourceBuffer-buffered
   virtual media::TimeIntervals Buffered() = 0;
 
   // Return the size of the data managed by this SourceBufferContentManager.
   virtual int64_t GetSize() = 0;
@@ -97,19 +97,19 @@ public:
     PARSING_MEDIA_SEGMENT,
   };
 
   virtual AppendState GetAppendState()
   {
     return AppendState::WAITING_FOR_SEGMENT;
   }
 
-  virtual void SetGroupStartTimestamp(const TimeUnit& aGroupStartTimestamp) {}
+  virtual void SetGroupStartTimestamp(const media::TimeUnit& aGroupStartTimestamp) {}
   virtual void RestartGroupStartTimestamp() {}
-  virtual TimeUnit GroupEndTimestamp() = 0;
+  virtual media::TimeUnit GroupEndTimestamp() = 0;
 
 #if defined(DEBUG)
   virtual void Dump(const char* aPath) { }
 #endif
 
 protected:
   virtual ~SourceBufferContentManager() { }
 };
--- a/dom/media/mediasource/TrackBuffer.cpp
+++ b/dom/media/mediasource/TrackBuffer.cpp
@@ -35,16 +35,17 @@ extern PRLogModuleInfo* GetMediaSourceLo
 #define FUZZ_TIMESTAMP_OFFSET 100000
 
 #define EOS_FUZZ_US 125000
 
 namespace mozilla {
 
 using media::TimeIntervals;
 using media::Interval;
+using media::TimeUnit;
 
 TrackBuffer::TrackBuffer(MediaSourceDecoder* aParentDecoder, const nsACString& aType)
   : mParentDecoder(aParentDecoder)
   , mType(aType)
   , mLastStartTimestamp(0)
   , mIsWaitingOnCDM(false)
   , mShutdown(false)
 {
--- a/dom/media/mediasource/TrackBuffer.h
+++ b/dom/media/mediasource/TrackBuffer.h
@@ -26,35 +26,39 @@ class MediaSourceDecoder;
 class MediaByteBuffer;
 
 class TrackBuffer final : public SourceBufferContentManager {
 public:
   TrackBuffer(MediaSourceDecoder* aParentDecoder, const nsACString& aType);
 
   nsRefPtr<ShutdownPromise> Shutdown();
 
-  bool AppendData(MediaByteBuffer* aData, TimeUnit aTimestampOffset) override;
+  bool AppendData(MediaByteBuffer* aData,
+                  media::TimeUnit aTimestampOffset) override;
 
   // Append data to the current decoder.  Also responsible for calling
   // NotifyDataArrived on the decoder to keep buffered range computation up
   // to date.
   nsRefPtr<AppendPromise> BufferAppend() override;
 
   // Evicts data held in the current decoders SourceBufferResource from the
   // start of the buffer through to aPlaybackTime. aThreshold is used to
   // bound the data being evicted. It will not evict more than aThreshold
   // bytes. aBufferStartTime contains the new start time of the current
   // decoders buffered data after the eviction.
-  EvictDataResult EvictData(TimeUnit aPlaybackTime, uint32_t aThreshold, TimeUnit* aBufferStartTime) override;
+  EvictDataResult EvictData(media::TimeUnit aPlaybackTime,
+                            uint32_t aThreshold,
+                            media::TimeUnit* aBufferStartTime) override;
 
   // Evicts data held in all the decoders SourceBufferResource from the start
   // of the buffer through to aTime.
-  void EvictBefore(TimeUnit aTime) override;
+  void EvictBefore(media::TimeUnit aTime) override;
 
-  nsRefPtr<RangeRemovalPromise> RangeRemoval(TimeUnit aStart, TimeUnit aEnd) override;
+  nsRefPtr<RangeRemovalPromise> RangeRemoval(media::TimeUnit aStart,
+                                             media::TimeUnit aEnd) override;
 
   void AbortAppendData() override;
 
   int64_t GetSize() override;
 
   void ResetParserState() override;
 
   // Returns the union of the decoders buffered ranges in aRanges.
@@ -63,17 +67,17 @@ public:
 
   void Ended() override
   {
     EndCurrentDecoder();
   }
 
   void Detach() override;
 
-  TimeUnit GroupEndTimestamp() override
+  media::TimeUnit GroupEndTimestamp() override
   {
     return Buffered().GetEnd();
   }
 
 
   // Mark the current decoder's resource as ended, clear mCurrentDecoder and
   // reset mLast{Start,End}Timestamp.  Main thread only.
   void DiscardCurrentDecoder();
@@ -206,17 +210,17 @@ private:
 
   nsRefPtr<MediaSourceDecoder> mParentDecoder;
   const nsCString mType;
 
   // The last start and end timestamps added to the TrackBuffer via
   // AppendData.  Accessed on the main thread only.
   int64_t mLastStartTimestamp;
   Maybe<int64_t> mLastEndTimestamp;
-  void AdjustDecodersTimestampOffset(TimeUnit aOffset);
+  void AdjustDecodersTimestampOffset(media::TimeUnit aOffset);
 
   // The timestamp offset used by our current decoder.
   media::TimeUnit mLastTimestampOffset;
   media::TimeUnit mTimestampOffset;
   media::TimeUnit mAdjustedTimestamp;
 
   // True if at least one of our decoders has encrypted content.
   bool mIsWaitingOnCDM;
--- a/dom/media/mediasource/TrackBuffersManager.cpp
+++ b/dom/media/mediasource/TrackBuffersManager.cpp
@@ -32,16 +32,19 @@ PRLogModuleInfo* GetMediaSourceSamplesLo
   }
   return sLogModule;
 }
 #define SAMPLE_DEBUG(arg, ...) MOZ_LOG(GetMediaSourceSamplesLog(), mozilla::LogLevel::Debug, ("TrackBuffersManager(%p:%s)::%s: " arg, this, mType.get(), __func__, ##__VA_ARGS__))
 
 namespace mozilla {
 
 using dom::SourceBufferAppendMode;
+using media::TimeUnit;
+using media::TimeInterval;
+using media::TimeIntervals;
 
 static const char*
 AppendStateToStr(TrackBuffersManager::AppendState aState)
 {
   switch (aState) {
     case TrackBuffersManager::AppendState::WAITING_FOR_SEGMENT:
       return "WAITING_FOR_SEGMENT";
     case TrackBuffersManager::AppendState::PARSING_INIT_SEGMENT:
@@ -248,17 +251,17 @@ TrackBuffersManager::EvictBefore(TimeUni
 
   nsCOMPtr<nsIRunnable> task =
     NS_NewRunnableMethodWithArg<TimeInterval>(
       this, &TrackBuffersManager::CodedFrameRemoval,
       TimeInterval(TimeUnit::FromSeconds(0), aTime));
   GetTaskQueue()->Dispatch(task.forget());
 }
 
-media::TimeIntervals
+TimeIntervals
 TrackBuffersManager::Buffered()
 {
   MSE_DEBUG("");
   MonitorAutoLock mon(mMonitor);
   // http://w3c.github.io/media-source/index.html#widl-SourceBuffer-buffered
   // 2. Let highest end time be the largest track buffer ranges end time across all the track buffers managed by this SourceBuffer object.
   TimeUnit highestEndTime;
 
--- a/dom/media/mediasource/TrackBuffersManager.h
+++ b/dom/media/mediasource/TrackBuffersManager.h
@@ -23,78 +23,79 @@ namespace mozilla {
 
 class ContainerParser;
 class MediaByteBuffer;
 class MediaRawData;
 class MediaSourceDemuxer;
 class SourceBuffer;
 class SourceBufferResource;
 
-using media::TimeUnit;
-using media::TimeInterval;
-using media::TimeIntervals;
-
 class TrackBuffersManager : public SourceBufferContentManager {
 public:
   typedef MozPromise<bool, nsresult, /* IsExclusive = */ true> CodedFrameProcessingPromise;
   typedef TrackInfo::TrackType TrackType;
   typedef MediaData::Type MediaType;
   typedef nsTArray<nsRefPtr<MediaRawData>> TrackBuffer;
 
   TrackBuffersManager(dom::SourceBuffer* aParent, MediaSourceDecoder* aParentDecoder, const nsACString& aType);
 
-  bool AppendData(MediaByteBuffer* aData, TimeUnit aTimestampOffset) override;
+  bool AppendData(MediaByteBuffer* aData,
+                  media::TimeUnit aTimestampOffset) override;
 
   nsRefPtr<AppendPromise> BufferAppend() override;
 
   void AbortAppendData() override;
 
   void ResetParserState() override;
 
-  nsRefPtr<RangeRemovalPromise> RangeRemoval(TimeUnit aStart, TimeUnit aEnd) override;
+  nsRefPtr<RangeRemovalPromise> RangeRemoval(media::TimeUnit aStart,
+                                             media::TimeUnit aEnd) override;
 
   EvictDataResult
-  EvictData(TimeUnit aPlaybackTime, uint32_t aThreshold, TimeUnit* aBufferStartTime) override;
+  EvictData(media::TimeUnit aPlaybackTime,
+            uint32_t aThreshold,
+            media::TimeUnit* aBufferStartTime) override;
 
-  void EvictBefore(TimeUnit aTime) override;
+  void EvictBefore(media::TimeUnit aTime) override;
 
-  TimeIntervals Buffered() override;
+  media::TimeIntervals Buffered() override;
 
   int64_t GetSize() override;
 
   void Ended() override;
 
   void Detach() override;
 
   AppendState GetAppendState() override
   {
     return mAppendState;
   }
 
-  void SetGroupStartTimestamp(const TimeUnit& aGroupStartTimestamp) override;
+  void SetGroupStartTimestamp(const media::TimeUnit& aGroupStartTimestamp) override;
   void RestartGroupStartTimestamp() override;
-  TimeUnit GroupEndTimestamp() override;
+  media::TimeUnit GroupEndTimestamp() override;
 
   // Interface for MediaSourceDemuxer
   MediaInfo GetMetadata();
   const TrackBuffer& GetTrackBuffer(TrackInfo::TrackType aTrack);
-  const TimeIntervals& Buffered(TrackInfo::TrackType);
-  TimeIntervals SafeBuffered(TrackInfo::TrackType) const;
+  const media::TimeIntervals& Buffered(TrackInfo::TrackType);
+  media::TimeIntervals SafeBuffered(TrackInfo::TrackType) const;
   bool IsEnded() const
   {
     return mEnded;
   }
-  TimeUnit Seek(TrackInfo::TrackType aTrack, const TimeUnit& aTime);
+  media::TimeUnit Seek(TrackInfo::TrackType aTrack,
+                       const media::TimeUnit& aTime);
   uint32_t SkipToNextRandomAccessPoint(TrackInfo::TrackType aTrack,
-                                       const TimeUnit& aTimeThreadshold,
+                                       const media::TimeUnit& aTimeThreadshold,
                                        bool& aFound);
   already_AddRefed<MediaRawData> GetSample(TrackInfo::TrackType aTrack,
-                                           const TimeUnit& aFuzz,
+                                           const media::TimeUnit& aFuzz,
                                            bool& aError);
-  TimeUnit GetNextRandomAccessPoint(TrackInfo::TrackType aTrack);
+  media::TimeUnit GetNextRandomAccessPoint(TrackInfo::TrackType aTrack);
 
 #if defined(DEBUG)
   void Dump(const char* aPath) override;
 #endif
 
 private:
   // for MediaSourceDemuxer::GetMozDebugReaderData
   friend class MediaSourceDemuxer;
@@ -112,46 +113,47 @@ private:
   // Will return a promise that will be resolved once all frames of the current
   // media segment have been processed.
   nsRefPtr<CodedFrameProcessingPromise> CodedFrameProcessing();
   void CompleteCodedFrameProcessing();
   // Called by ResetParserState. Complete parsing the input buffer for the
   // current media segment.
   void FinishCodedFrameProcessing();
   void CompleteResetParserState();
-  nsRefPtr<RangeRemovalPromise> CodedFrameRemovalWithPromise(TimeInterval aInterval);
-  bool CodedFrameRemoval(TimeInterval aInterval);
+  nsRefPtr<RangeRemovalPromise>
+    CodedFrameRemovalWithPromise(media::TimeInterval aInterval);
+  bool CodedFrameRemoval(media::TimeInterval aInterval);
   void SetAppendState(AppendState aAppendState);
 
   bool HasVideo() const
   {
     return mVideoTracks.mNumTracks > 0;
   }
   bool HasAudio() const
   {
     return mAudioTracks.mNumTracks > 0;
   }
 
-  typedef Pair<nsRefPtr<MediaByteBuffer>, TimeUnit> IncomingBuffer;
+  typedef Pair<nsRefPtr<MediaByteBuffer>, media::TimeUnit> IncomingBuffer;
   void AppendIncomingBuffer(IncomingBuffer aData);
   nsTArray<IncomingBuffer> mIncomingBuffers;
 
   // The input buffer as per http://w3c.github.io/media-source/index.html#sourcebuffer-input-buffer
   nsRefPtr<MediaByteBuffer> mInputBuffer;
   // The current append state as per https://w3c.github.io/media-source/#sourcebuffer-append-state
   // Accessed on both the main thread and the task queue.
   Atomic<AppendState> mAppendState;
   // Buffer full flag as per https://w3c.github.io/media-source/#sourcebuffer-buffer-full-flag.
   // Accessed on both the main thread and the task queue.
   // TODO: Unused for now.
   Atomic<bool> mBufferFull;
   bool mFirstInitializationSegmentReceived;
   bool mActiveTrack;
-  Maybe<TimeUnit> mGroupStartTimestamp;
-  TimeUnit mGroupEndTimestamp;
+  Maybe<media::TimeUnit> mGroupStartTimestamp;
+  media::TimeUnit mGroupEndTimestamp;
   nsCString mType;
 
   // ContainerParser objects and methods.
   // Those are used to parse the incoming input buffer.
 
   // Recreate the ContainerParser and only feed it with the previous init
   // segment found.
   void RecreateParser();
@@ -180,45 +182,45 @@ private:
   void DoDemuxAudio();
   void OnAudioDemuxCompleted(nsRefPtr<MediaTrackDemuxer::SamplesHolder> aSamples);
   void OnAudioDemuxFailed(DemuxerFailureReason aFailure)
   {
     mAudioTracks.mDemuxRequest.Complete();
     OnDemuxFailed(TrackType::kAudioTrack, aFailure);
   }
 
-  void DoEvictData(const TimeUnit& aPlaybackTime, uint32_t aThreshold);
+  void DoEvictData(const media::TimeUnit& aPlaybackTime, uint32_t aThreshold);
 
   struct TrackData {
     TrackData()
       : mNumTracks(0)
       , mNeedRandomAccessPoint(true)
       , mSizeBuffer(0)
     {}
     uint32_t mNumTracks;
     // Definition of variables:
     // https://w3c.github.io/media-source/#track-buffers
     // Last decode timestamp variable that stores the decode timestamp of the
     // last coded frame appended in the current coded frame group.
     // The variable is initially unset to indicate that no coded frames have
     // been appended yet.
-    Maybe<TimeUnit> mLastDecodeTimestamp;
+    Maybe<media::TimeUnit> mLastDecodeTimestamp;
     // Last frame duration variable that stores the coded frame duration of the
     // last coded frame appended in the current coded frame group.
     // The variable is initially unset to indicate that no coded frames have
     // been appended yet.
-    Maybe<TimeUnit> mLastFrameDuration;
+    Maybe<media::TimeUnit> mLastFrameDuration;
     // Highest end timestamp variable that stores the highest coded frame end
     // timestamp across all coded frames in the current coded frame group that
     // were appended to this track buffer.
     // The variable is initially unset to indicate that no coded frames have
     // been appended yet.
-    Maybe<TimeUnit> mHighestEndTimestamp;
+    Maybe<media::TimeUnit> mHighestEndTimestamp;
     // Longest frame duration seen in a coded frame group.
-    Maybe<TimeUnit> mLongestFrameDuration;
+    Maybe<media::TimeUnit> mLongestFrameDuration;
     // Need random access point flag variable that keeps track of whether the
     // track buffer is waiting for a random access point coded frame.
     // The variable is initially set to true to indicate that random access
     // point coded frame is needed before anything can be added to the track
     // buffer.
     bool mNeedRandomAccessPoint;
     nsRefPtr<MediaTrackDemuxer> mDemuxer;
     MozPromiseRequestHolder<MediaTrackDemuxer::SamplesPromise> mDemuxRequest;
@@ -227,51 +229,51 @@ private:
     // the next insertion.
     Maybe<size_t> mNextInsertionIndex;
     // Samples just demuxed, but not yet parsed.
     TrackBuffer mQueuedSamples;
     // We only manage a single track of each type at this time.
     nsTArray<TrackBuffer> mBuffers;
     // Track buffer ranges variable that represents the presentation time ranges
     // occupied by the coded frames currently stored in the track buffer.
-    TimeIntervals mBufferedRanges;
+    media::TimeIntervals mBufferedRanges;
     // Byte size of all samples contained in this track buffer.
     uint32_t mSizeBuffer;
     // TrackInfo of the first metadata received.
     nsRefPtr<SharedTrackInfo> mInfo;
     // TrackInfo of the last metadata parsed (updated with each init segment.
     nsRefPtr<SharedTrackInfo> mLastInfo;
 
     // If set, position of the next sample to be retrieved by GetSample().
     Maybe<uint32_t> mNextGetSampleIndex;
     // Approximation of the next sample's decode timestamp.
-    TimeUnit mNextSampleTimecode;
+    media::TimeUnit mNextSampleTimecode;
     // Approximation of the next sample's presentation timestamp.
-    TimeUnit mNextSampleTime;
+    media::TimeUnit mNextSampleTime;
 
     void ResetAppendState()
     {
       mLastDecodeTimestamp.reset();
       mLastFrameDuration.reset();
       mHighestEndTimestamp.reset();
       mNeedRandomAccessPoint = true;
 
       mLongestFrameDuration.reset();
       mNextInsertionIndex.reset();
     }
   };
 
   void CheckSequenceDiscontinuity();
   void ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData);
   void CheckNextInsertionIndex(TrackData& aTrackData,
-                               const TimeUnit& aSampleTime);
+                               const media::TimeUnit& aSampleTime);
   void InsertFrames(TrackBuffer& aSamples,
-                    const TimeIntervals& aIntervals,
+                    const media::TimeIntervals& aIntervals,
                     TrackData& aTrackData);
-  void RemoveFrames(const TimeIntervals& aIntervals,
+  void RemoveFrames(const media::TimeIntervals& aIntervals,
                     TrackData& aTrackData,
                     uint32_t aStartIndex);
   void UpdateBufferedRanges();
   void RejectProcessing(nsresult aRejectValue, const char* aName);
   void ResolveProcessing(bool aResolveValue, const char* aName);
   MozPromiseRequestHolder<CodedFrameProcessingPromise> mProcessingRequest;
   MozPromiseHolder<CodedFrameProcessingPromise> mProcessingPromise;
 
@@ -301,19 +303,19 @@ private:
     return mTaskQueue;
   }
   bool OnTaskQueue()
   {
     return !GetTaskQueue() || GetTaskQueue()->IsCurrentThreadIn();
   }
   RefPtr<TaskQueue> mTaskQueue;
 
-  TimeInterval mAppendWindow;
-  TimeUnit mTimestampOffset;
-  TimeUnit mLastTimestampOffset;
+  media::TimeInterval mAppendWindow;
+  media::TimeUnit mTimestampOffset;
+  media::TimeUnit mLastTimestampOffset;
   void RestoreCachedVariables();
 
   // Strong references to external objects.
   nsMainThreadPtrHandle<dom::SourceBuffer> mParent;
   nsMainThreadPtrHandle<MediaSourceDecoder> mParentDecoder;
   nsRefPtr<MediaSourceDemuxer> mMediaSourceDemuxer;
 
   // MediaSource duration mirrored from MediaDecoder on the main thread..
@@ -327,18 +329,18 @@ private:
   // Global size of this source buffer content.
   Atomic<int64_t> mSizeSourceBuffer;
   uint32_t mEvictionThreshold;
   Atomic<bool> mEvictionOccurred;
 
   // Monitor to protect following objects accessed across multipple threads.
   mutable Monitor mMonitor;
   // Stable audio and video track time ranges.
-  TimeIntervals mVideoBufferedRanges;
-  TimeIntervals mAudioBufferedRanges;
-  TimeUnit mOfficialGroupEndTimestamp;
+  media::TimeIntervals mVideoBufferedRanges;
+  media::TimeIntervals mAudioBufferedRanges;
+  media::TimeUnit mOfficialGroupEndTimestamp;
   // MediaInfo of the first init segment read.
   MediaInfo mInfo;
 };
 
 } // namespace mozilla
 
 #endif /* MOZILLA_TRACKBUFFERSMANAGER_H_ */