author | Robert O'Callahan <robert@ocallahan.org> |
Thu, 18 Sep 2014 17:20:43 +1200 | |
changeset 216608 | c5d415a669b5fc62c9cb8828842d195479289efa |
parent 216607 | 442dfde6455a1577de0c3d2e70951cb366624a55 |
child 216609 | ee6c0bfb269ff48dbff8631b9ba23e89f89f3325 |
push id | 27858 |
push user | kwierso@gmail.com |
push date | Fri, 21 Nov 2014 01:35:46 +0000 |
treeherder | mozilla-central@6309710dd71d [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | karlt |
bugs | 1061046 |
milestone | 36.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/dom/media/AudioSegment.h +++ b/dom/media/AudioSegment.h @@ -79,30 +79,30 @@ void DownmixAndInterleave(const nsTArray * pointers so it can represent a subinterval of a buffer without copying. * An AudioChunk can store its individual channels anywhere; it maintains * separate pointers to each channel's buffer. */ struct AudioChunk { typedef mozilla::AudioSampleFormat SampleFormat; // Generic methods - void SliceTo(TrackTicks aStart, TrackTicks aEnd) + void SliceTo(StreamTime aStart, StreamTime aEnd) { MOZ_ASSERT(aStart >= 0 && aStart < aEnd && aEnd <= mDuration, "Slice out of bounds"); if (mBuffer) { MOZ_ASSERT(aStart < INT32_MAX, "Can't slice beyond 32-bit sample lengths"); for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) { mChannelData[channel] = AddAudioSampleOffset(mChannelData[channel], mBufferFormat, int32_t(aStart)); } } mDuration = aEnd - aStart; } - TrackTicks GetDuration() const { return mDuration; } + StreamTime GetDuration() const { return mDuration; } bool CanCombineWithFollowing(const AudioChunk& aOther) const { if (aOther.mBuffer != mBuffer) { return false; } if (mBuffer) { NS_ASSERTION(aOther.mBufferFormat == mBufferFormat, "Wrong metadata about buffer"); @@ -116,17 +116,17 @@ struct AudioChunk { mBufferFormat, int32_t(mDuration))) { return false; } } } return true; } bool IsNull() const { return mBuffer == nullptr; } - void SetNull(TrackTicks aDuration) + void SetNull(StreamTime aDuration) { mBuffer = nullptr; mChannelData.Clear(); mDuration = aDuration; mVolume = 1.0f; mBufferFormat = AUDIO_FORMAT_SILENCE; } int ChannelCount() const { return mChannelData.Length(); } @@ -149,17 +149,17 @@ struct AudioChunk { amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); } // Memory in the array is owned by mBuffer. amount += mChannelData.SizeOfExcludingThis(aMallocSizeOf); return amount; } - TrackTicks mDuration; // in frames within the buffer + StreamTime mDuration; // in frames within the buffer nsRefPtr<ThreadSharedObject> mBuffer; // the buffer object whose lifetime is managed; null means data is all zeroes nsTArray<const void*> mChannelData; // one pointer per channel; empty if and only if mBuffer is null float mVolume; // volume multiplier to apply (1.0f if mBuffer is nonnull) SampleFormat mBufferFormat; // format of frames in mBuffer (only meaningful if mBuffer is nonnull) #ifdef MOZILLA_INTERNAL_API mozilla::TimeStamp mTimeStamp; // time at which this has been fetched from the MediaEngine #endif };
--- a/dom/media/DOMMediaStream.cpp +++ b/dom/media/DOMMediaStream.cpp @@ -28,17 +28,17 @@ public: // Main thread only void Forget() { mStream = nullptr; } DOMMediaStream* GetStream() { return mStream; } class TrackChange : public nsRunnable { public: TrackChange(StreamListener* aListener, - TrackID aID, TrackTicks aTrackOffset, + TrackID aID, StreamTime aTrackOffset, uint32_t aEvents, MediaSegment::Type aType) : mListener(aListener), mID(aID), mEvents(aEvents), mType(aType) { } NS_IMETHOD Run() { NS_ASSERTION(NS_IsMainThread(), "main thread only"); @@ -80,17 +80,17 @@ public: /** * Notify that changes to one of the stream tracks have been queued. * aTrackEvents can be any combination of TRACK_EVENT_CREATED and * TRACK_EVENT_ENDED. aQueuedMedia is the data being added to the track * at aTrackOffset (relative to the start of the stream). * aQueuedMedia can be null if there is no output. */ virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) MOZ_OVERRIDE { if (aTrackEvents & (TRACK_EVENT_CREATED | TRACK_EVENT_ENDED)) { nsRefPtr<TrackChange> runnable = new TrackChange(this, aID, aTrackOffset, aTrackEvents, aQueuedMedia.GetType()); NS_DispatchToMainThread(runnable);
--- a/dom/media/MediaManager.h +++ b/dom/media/MediaManager.h @@ -234,18 +234,18 @@ private: // Set at Activate on MainThread // Accessed from MediaStreamGraph thread, MediaManager thread, and MainThread // No locking needed as they're only addrefed except on the MediaManager thread nsRefPtr<MediaEngineSource> mAudioSource; // threadsafe refcnt nsRefPtr<MediaEngineSource> mVideoSource; // threadsafe refcnt nsRefPtr<SourceMediaStream> mStream; // threadsafe refcnt - TrackTicks mLastEndTimeAudio; - TrackTicks mLastEndTimeVideo; + StreamTime mLastEndTimeAudio; + StreamTime mLastEndTimeVideo; bool mFinished; // Accessed from MainThread and MSG thread Mutex mLock; // protects mRemoved access from MainThread bool mRemoved; }; class GetUserMediaNotificationEvent: public nsRunnable
--- a/dom/media/MediaSegment.h +++ b/dom/media/MediaSegment.h @@ -23,32 +23,41 @@ namespace mozilla { typedef int32_t TrackRate; const int64_t TRACK_RATE_MAX_BITS = 20; const TrackRate TRACK_RATE_MAX = 1 << TRACK_RATE_MAX_BITS; /** * A number of ticks at a rate determined by some underlying track (e.g. * audio sample rate). We want to make sure that multiplying TrackTicks by * a TrackRate doesn't overflow, so we set its max accordingly. + * StreamTime should be used instead when we're working with MediaStreamGraph's + * rate, but TrackTicks can be used outside MediaStreams when we have data + * at a different rate. */ typedef int64_t TrackTicks; const int64_t TRACK_TICKS_MAX = INT64_MAX >> TRACK_RATE_MAX_BITS; /** * We represent media times in 64-bit audio frame counts or ticks. * All tracks in a MediaStreamGraph have the same rate. */ typedef int64_t MediaTime; const int64_t MEDIA_TIME_MAX = TRACK_TICKS_MAX; /** + * Media time relative to the start of a StreamBuffer. + */ +typedef MediaTime StreamTime; +const StreamTime STREAM_TIME_MAX = MEDIA_TIME_MAX; + +/** * A MediaSegment is a chunk of media data sequential in time. Different * types of data have different subclasses of MediaSegment, all inheriting * from MediaSegmentBase. - * All MediaSegment data is timed using TrackTicks. The actual tick rate + * All MediaSegment data is timed using StreamTime. The actual tick rate * is defined on a per-track basis. For some track types, this can be * a fixed constant for all tracks of that type (e.g. 1MHz for video). * * Each media segment defines a concept of "null media data" (e.g. silence * for audio or "no video frame" for video), which can be efficiently * represented. This is used for padding. */ class MediaSegment { @@ -62,48 +71,48 @@ public: AUDIO, VIDEO, TYPE_COUNT }; /** * Gets the total duration of the segment. */ - TrackTicks GetDuration() const { return mDuration; } + StreamTime GetDuration() const { return mDuration; } Type GetType() const { return mType; } /** * Create a MediaSegment of the same type. */ virtual MediaSegment* CreateEmptyClone() const = 0; /** * Moves contents of aSource to the end of this segment. */ virtual void AppendFrom(MediaSegment* aSource) = 0; /** * Append a slice of aSource to this segment. */ virtual void AppendSlice(const MediaSegment& aSource, - TrackTicks aStart, TrackTicks aEnd) = 0; + StreamTime aStart, StreamTime aEnd) = 0; /** * Replace all contents up to aDuration with null data. */ - virtual void ForgetUpTo(TrackTicks aDuration) = 0; + virtual void ForgetUpTo(StreamTime aDuration) = 0; /** * Forget all data buffered after a given point */ - virtual void FlushAfter(TrackTicks aNewEnd) = 0; + virtual void FlushAfter(StreamTime aNewEnd) = 0; /** * Insert aDuration of null data at the start of the segment. */ - virtual void InsertNullDataAtStart(TrackTicks aDuration) = 0; + virtual void InsertNullDataAtStart(StreamTime aDuration) = 0; /** * Insert aDuration of null data at the end of the segment. */ - virtual void AppendNullData(TrackTicks aDuration) = 0; + virtual void AppendNullData(StreamTime aDuration) = 0; /** * Replace contents with disabled data of the same duration */ virtual void ReplaceWithDisabled() = 0; /** * Remove all contents, setting duration to 0. */ virtual void Clear() = 0; @@ -119,17 +128,17 @@ public: } protected: explicit MediaSegment(Type aType) : mDuration(0), mType(aType) { MOZ_COUNT_CTOR(MediaSegment); } - TrackTicks mDuration; // total of mDurations of all chunks + StreamTime mDuration; // total of mDurations of all chunks Type mType; }; /** * C is the implementation class subclassed from MediaSegmentBase. * C must contain a Chunk class. */ template <class C, class Chunk> class MediaSegmentBase : public MediaSegment { @@ -143,103 +152,103 @@ public: NS_ASSERTION(aSource->GetType() == C::StaticType(), "Wrong type"); AppendFromInternal(static_cast<C*>(aSource)); } void AppendFrom(C* aSource) { AppendFromInternal(aSource); } virtual void AppendSlice(const MediaSegment& aSource, - TrackTicks aStart, TrackTicks aEnd) + StreamTime aStart, StreamTime aEnd) { NS_ASSERTION(aSource.GetType() == C::StaticType(), "Wrong type"); AppendSliceInternal(static_cast<const C&>(aSource), aStart, aEnd); } - void AppendSlice(const C& aOther, TrackTicks aStart, TrackTicks aEnd) + void AppendSlice(const C& aOther, StreamTime aStart, StreamTime aEnd) { AppendSliceInternal(aOther, aStart, aEnd); } /** * Replace the first aDuration ticks with null media data, because the data * will not be required again. */ - virtual void ForgetUpTo(TrackTicks aDuration) + virtual void ForgetUpTo(StreamTime aDuration) { if (mChunks.IsEmpty() || aDuration <= 0) { return; } if (mChunks[0].IsNull()) { - TrackTicks extraToForget = std::min(aDuration, mDuration) - mChunks[0].GetDuration(); + StreamTime extraToForget = std::min(aDuration, mDuration) - mChunks[0].GetDuration(); if (extraToForget > 0) { RemoveLeading(extraToForget, 1); mChunks[0].mDuration += extraToForget; mDuration += extraToForget; } return; } RemoveLeading(aDuration, 0); mChunks.InsertElementAt(0)->SetNull(aDuration); mDuration += aDuration; } - virtual void FlushAfter(TrackTicks aNewEnd) + virtual void FlushAfter(StreamTime aNewEnd) { if (mChunks.IsEmpty()) { return; } if (mChunks[0].IsNull()) { - TrackTicks extraToKeep = aNewEnd - mChunks[0].GetDuration(); + StreamTime extraToKeep = aNewEnd - mChunks[0].GetDuration(); if (extraToKeep < 0) { // reduce the size of the Null, get rid of everthing else mChunks[0].SetNull(aNewEnd); extraToKeep = 0; } RemoveTrailing(extraToKeep, 1); } else { if (aNewEnd > mDuration) { NS_ASSERTION(aNewEnd <= mDuration, "can't add data in FlushAfter"); return; } RemoveTrailing(aNewEnd, 0); } mDuration = aNewEnd; } - virtual void InsertNullDataAtStart(TrackTicks aDuration) + virtual void InsertNullDataAtStart(StreamTime aDuration) { if (aDuration <= 0) { return; } if (!mChunks.IsEmpty() && mChunks[0].IsNull()) { mChunks[0].mDuration += aDuration; } else { mChunks.InsertElementAt(0)->SetNull(aDuration); } #ifdef MOZILLA_INTERNAL_API mChunks[0].mTimeStamp = mozilla::TimeStamp::Now(); #endif mDuration += aDuration; } - virtual void AppendNullData(TrackTicks aDuration) + virtual void AppendNullData(StreamTime aDuration) { if (aDuration <= 0) { return; } if (!mChunks.IsEmpty() && mChunks[mChunks.Length() - 1].IsNull()) { mChunks[mChunks.Length() - 1].mDuration += aDuration; } else { mChunks.AppendElement()->SetNull(aDuration); } mDuration += aDuration; } virtual void ReplaceWithDisabled() { if (GetType() != AUDIO) { MOZ_CRASH("Disabling unknown segment type"); } - TrackTicks duration = GetDuration(); + StreamTime duration = GetDuration(); Clear(); AppendNullData(duration); } virtual void Clear() { mDuration = 0; mChunks.Clear(); } @@ -252,17 +261,17 @@ public: void Next() { ++mIndex; } Chunk& operator*() { return mSegment.mChunks[mIndex]; } Chunk* operator->() { return &mSegment.mChunks[mIndex]; } private: MediaSegmentBase<C, Chunk>& mSegment; uint32_t mIndex; }; - void RemoveLeading(TrackTicks aDuration) + void RemoveLeading(StreamTime aDuration) { RemoveLeading(aDuration, 0); } #ifdef MOZILLA_INTERNAL_API void GetStartTime(TimeStamp &aTime) { aTime = mChunks[0].mTimeStamp; } @@ -297,52 +306,52 @@ protected: mChunks[mChunks.Length() - 1].CanCombineWithFollowing(aSource->mChunks[0])) { mChunks[mChunks.Length() - 1].mDuration += aSource->mChunks[0].mDuration; aSource->mChunks.RemoveElementAt(0); } mChunks.MoveElementsFrom(aSource->mChunks); } void AppendSliceInternal(const MediaSegmentBase<C, Chunk>& aSource, - TrackTicks aStart, TrackTicks aEnd) + StreamTime aStart, StreamTime aEnd) { MOZ_ASSERT(aStart <= aEnd, "Endpoints inverted"); NS_WARN_IF_FALSE(aStart >= 0 && aEnd <= aSource.mDuration, "Slice out of range"); mDuration += aEnd - aStart; - TrackTicks offset = 0; + StreamTime offset = 0; for (uint32_t i = 0; i < aSource.mChunks.Length() && offset < aEnd; ++i) { const Chunk& c = aSource.mChunks[i]; - TrackTicks start = std::max(aStart, offset); - TrackTicks nextOffset = offset + c.GetDuration(); - TrackTicks end = std::min(aEnd, nextOffset); + StreamTime start = std::max(aStart, offset); + StreamTime nextOffset = offset + c.GetDuration(); + StreamTime end = std::min(aEnd, nextOffset); if (start < end) { mChunks.AppendElement(c)->SliceTo(start - offset, end - offset); } offset = nextOffset; } } - Chunk* AppendChunk(TrackTicks aDuration) + Chunk* AppendChunk(StreamTime aDuration) { MOZ_ASSERT(aDuration >= 0); Chunk* c = mChunks.AppendElement(); c->mDuration = aDuration; mDuration += aDuration; return c; } - Chunk* FindChunkContaining(TrackTicks aOffset, TrackTicks* aStart = nullptr) + Chunk* FindChunkContaining(StreamTime aOffset, StreamTime* aStart = nullptr) { if (aOffset < 0) { return nullptr; } - TrackTicks offset = 0; + StreamTime offset = 0; for (uint32_t i = 0; i < mChunks.Length(); ++i) { Chunk& c = mChunks[i]; - TrackTicks nextOffset = offset + c.GetDuration(); + StreamTime nextOffset = offset + c.GetDuration(); if (aOffset < nextOffset) { if (aStart) { *aStart = offset; } return &c; } offset = nextOffset; } @@ -352,39 +361,39 @@ protected: Chunk* GetLastChunk() { if (mChunks.IsEmpty()) { return nullptr; } return &mChunks[mChunks.Length() - 1]; } - void RemoveLeading(TrackTicks aDuration, uint32_t aStartIndex) + void RemoveLeading(StreamTime aDuration, uint32_t aStartIndex) { NS_ASSERTION(aDuration >= 0, "Can't remove negative duration"); - TrackTicks t = aDuration; + StreamTime t = aDuration; uint32_t chunksToRemove = 0; for (uint32_t i = aStartIndex; i < mChunks.Length() && t > 0; ++i) { Chunk* c = &mChunks[i]; if (c->GetDuration() > t) { c->SliceTo(t, c->GetDuration()); t = 0; break; } t -= c->GetDuration(); chunksToRemove = i + 1 - aStartIndex; } mChunks.RemoveElementsAt(aStartIndex, chunksToRemove); mDuration -= aDuration - t; } - void RemoveTrailing(TrackTicks aKeep, uint32_t aStartIndex) + void RemoveTrailing(StreamTime aKeep, uint32_t aStartIndex) { NS_ASSERTION(aKeep >= 0, "Can't keep negative duration"); - TrackTicks t = aKeep; + StreamTime t = aKeep; uint32_t i; for (i = aStartIndex; i < mChunks.Length(); ++i) { Chunk* c = &mChunks[i]; if (c->GetDuration() > t) { c->SliceTo(0, t); break; } t -= c->GetDuration();
--- a/dom/media/MediaStreamGraph.cpp +++ b/dom/media/MediaStreamGraph.cpp @@ -190,17 +190,17 @@ MediaStreamGraphImpl::ExtractPendingInpu } } finished = aStream->mUpdateFinished; for (int32_t i = aStream->mUpdateTracks.Length() - 1; i >= 0; --i) { SourceMediaStream::TrackData* data = &aStream->mUpdateTracks[i]; aStream->ApplyTrackDisabling(data->mID, data->mData); for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) { MediaStreamListener* l = aStream->mListeners[j]; - TrackTicks offset = (data->mCommands & SourceMediaStream::TRACK_CREATE) + StreamTime offset = (data->mCommands & SourceMediaStream::TRACK_CREATE) ? data->mStart : aStream->mBuffer.FindTrack(data->mID)->GetSegment()->GetDuration(); l->NotifyQueuedTrackChanges(this, data->mID, offset, data->mCommands, *data->mData); } if (data->mCommands & SourceMediaStream::TRACK_CREATE) { MediaSegment* segment = data->mData.forget(); STREAM_LOG(PR_LOG_DEBUG, ("SourceMediaStream %p creating track %d, start %lld, initial end %lld", aStream, data->mID, int64_t(data->mStart), @@ -945,29 +945,29 @@ MediaStreamGraphImpl::CreateOrDestroyAud for (int32_t i = audioOutputStreamsFound.Length() - 1; i >= 0; --i) { if (!audioOutputStreamsFound[i]) { aStream->mAudioOutputStreams.RemoveElementAt(i); } } } -TrackTicks +StreamTime MediaStreamGraphImpl::PlayAudio(MediaStream* aStream, GraphTime aFrom, GraphTime aTo) { MOZ_ASSERT(mRealtime, "Should only attempt to play audio in realtime mode"); - TrackTicks ticksWritten = 0; + StreamTime ticksWritten = 0; // We compute the number of needed ticks by converting a difference of graph // time rather than by substracting two converted stream time to ensure that // the rounding between {Graph,Stream}Time and track ticks is not dependant // on the absolute value of the {Graph,Stream}Time, and so that number of // ticks to play is the same for each cycle. - TrackTicks ticksNeeded = aTo - aFrom; + StreamTime ticksNeeded = aTo - aFrom; if (aStream->mAudioOutputStreams.IsEmpty()) { return 0; } float volume = 0.0f; for (uint32_t i = 0; i < aStream->mAudioOutputs.Length(); ++i) { volume += aStream->mAudioOutputs[i].mVolume; @@ -978,17 +978,17 @@ MediaStreamGraphImpl::PlayAudio(MediaStr StreamBuffer::Track* track = aStream->mBuffer.FindTrack(audioOutput.mTrackID); AudioSegment* audio = track->Get<AudioSegment>(); AudioSegment output; // offset and audioOutput.mLastTickWritten can differ by at most one sample, // because of the rounding issue. We track that to ensure we don't skip a // sample. One sample may be played twice, but this should not happen // again during an unblocked sequence of track samples. - TrackTicks offset = GraphTimeToStreamTime(aStream, aFrom); + StreamTime offset = GraphTimeToStreamTime(aStream, aFrom); if (audioOutput.mLastTickWritten && audioOutput.mLastTickWritten != offset) { // If there is a global underrun of the MSG, this property won't hold, and // we reset the sample count tracking. if (offset - audioOutput.mLastTickWritten == 1) { offset = audioOutput.mLastTickWritten; } } @@ -1001,33 +1001,33 @@ MediaStreamGraphImpl::PlayAudio(MediaStr GraphTime t = aFrom; while (ticksNeeded) { GraphTime end; bool blocked = aStream->mBlocked.GetAt(t, &end); end = std::min(end, aTo); // Check how many ticks of sound we can provide if we are blocked some // time in the middle of this cycle. - TrackTicks toWrite = 0; + StreamTime toWrite = 0; if (end >= aTo) { toWrite = ticksNeeded; } else { toWrite = end - t; } ticksNeeded -= toWrite; if (blocked) { output.InsertNullDataAtStart(toWrite); ticksWritten += toWrite; STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing %ld blocking-silence samples for %f to %f (%ld to %ld)\n", aStream, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end), offset, offset + toWrite)); } else { - TrackTicks endTicksNeeded = offset + toWrite; - TrackTicks endTicksAvailable = audio->GetDuration(); + StreamTime endTicksNeeded = offset + toWrite; + StreamTime endTicksAvailable = audio->GetDuration(); STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing %ld samples for %f to %f (samples %ld to %ld)\n", aStream, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end), offset, endTicksNeeded)); if (endTicksNeeded <= endTicksAvailable) { output.AppendSlice(*audio, offset, endTicksNeeded); ticksWritten += toWrite; offset = endTicksNeeded; @@ -1088,22 +1088,22 @@ MediaStreamGraphImpl::PlayVideo(MediaStr if (framePosition > CurrentDriver()->StateComputedTime()) { NS_WARN_IF_FALSE(std::abs(framePosition - CurrentDriver()->StateComputedTime()) < MillisecondsToMediaTime(5), "Graph thread slowdown?"); framePosition = CurrentDriver()->StateComputedTime(); } MOZ_ASSERT(framePosition >= aStream->mBufferStartTime, "frame position before buffer?"); StreamTime frameBufferTime = GraphTimeToStreamTime(aStream, framePosition); - TrackTicks start; + StreamTime start; const VideoFrame* frame = nullptr; for (StreamBuffer::TrackIter tracks(aStream->GetStreamBuffer(), MediaSegment::VIDEO); !tracks.IsEnded(); tracks.Next()) { VideoSegment* segment = tracks->Get<VideoSegment>(); - TrackTicks thisStart; + StreamTime thisStart; const VideoFrame* thisFrame = segment->GetFrameAt(frameBufferTime, &thisStart); if (thisFrame && thisFrame->GetImage()) { start = thisStart; frame = thisFrame; } } if (!frame || *frame == aStream->mLastPlayedVideoFrame) @@ -1191,20 +1191,20 @@ MediaStreamGraphImpl::PrepareUpdatesToMa !mStreamUpdates.IsEmpty()) { EnsureStableStateEventPosted(); } } GraphTime MediaStreamGraphImpl::RoundUpToNextAudioBlock(GraphTime aTime) { - TrackTicks ticks = aTime; + StreamTime ticks = aTime; uint64_t block = ticks >> WEBAUDIO_BLOCK_SIZE_BITS; uint64_t nextBlock = block + 1; - TrackTicks nextTicks = nextBlock << WEBAUDIO_BLOCK_SIZE_BITS; + StreamTime nextTicks = nextBlock << WEBAUDIO_BLOCK_SIZE_BITS; return nextTicks; } void MediaStreamGraphImpl::ProduceDataForStreamsBlockByBlock(uint32_t aStreamIndex, TrackRate aSampleRate, GraphTime aFrom, GraphTime aTo) @@ -1292,17 +1292,17 @@ void MediaStreamGraphImpl::Process(GraphTime aFrom, GraphTime aTo) { // Play stream contents. bool allBlockedForever = true; // True when we've done ProcessInput for all processed streams. bool doneAllProducing = false; // This is the number of frame that are written to the AudioStreams, for // this cycle. - TrackTicks ticksPlayed = 0; + StreamTime ticksPlayed = 0; mMixer.StartMixing(); // Figure out what each stream wants to do for (uint32_t i = 0; i < mStreams.Length(); ++i) { MediaStream* stream = mStreams[i]; if (!doneAllProducing) { ProcessedMediaStream* ps = stream->AsProcessedStream(); @@ -1331,17 +1331,17 @@ MediaStreamGraphImpl::Process(GraphTime } } } NotifyHasCurrentData(stream); // Only playback audio and video in real-time mode if (mRealtime) { CreateOrDestroyAudioStreams(aFrom, stream); if (CurrentDriver()->AsAudioCallbackDriver()) { - TrackTicks ticksPlayedForThisStream = PlayAudio(stream, aFrom, aTo); + StreamTime ticksPlayedForThisStream = PlayAudio(stream, aFrom, aTo); if (!ticksPlayed) { ticksPlayed = ticksPlayedForThisStream; } else { MOZ_ASSERT(!ticksPlayedForThisStream || ticksPlayedForThisStream == ticksPlayed, "Each stream should have the same number of frame."); } } PlayVideo(stream); @@ -2265,17 +2265,17 @@ SourceMediaStream::SetPullEnabled(bool a MutexAutoLock lock(mMutex); mPullEnabled = aEnabled; if (mPullEnabled && GraphImpl()) { GraphImpl()->EnsureNextIteration(); } } void -SourceMediaStream::AddTrackInternal(TrackID aID, TrackRate aRate, TrackTicks aStart, +SourceMediaStream::AddTrackInternal(TrackID aID, TrackRate aRate, StreamTime aStart, MediaSegment* aSegment) { MutexAutoLock lock(mMutex); TrackData* data = mUpdateTracks.AppendElement(); data->mID = aID; data->mInputRate = aRate; data->mStart = aStart; data->mCommands = TRACK_CREATE; @@ -2356,17 +2356,17 @@ void SourceMediaStream::NotifyDirectConsumers(TrackData *aTrack, MediaSegment *aSegment) { // Call with mMutex locked MOZ_ASSERT(aTrack); for (uint32_t j = 0; j < mDirectListeners.Length(); ++j) { MediaStreamDirectListener* l = mDirectListeners[j]; - TrackTicks offset = 0; // FIX! need a separate TrackTicks.... or the end of the internal buffer + StreamTime offset = 0; // FIX! need a separate StreamTime.... or the end of the internal buffer l->NotifyRealtimeData(static_cast<MediaStreamGraph*>(GraphImpl()), aTrack->mID, offset, aTrack->mCommands, *aSegment); } } // These handle notifying all the listeners of an event void SourceMediaStream::NotifyListenersEventImpl(MediaStreamListener::MediaStreamGraphEvent aEvent) @@ -2500,17 +2500,17 @@ SourceMediaStream::EndAllTrackAndFinish( for (uint32_t i = 0; i < mUpdateTracks.Length(); ++i) { SourceMediaStream::TrackData* data = &mUpdateTracks[i]; data->mCommands |= TRACK_END; } FinishWithLockHeld(); // we will call NotifyEvent() to let GetUserMedia know } -TrackTicks +StreamTime SourceMediaStream::GetBufferedTicks(TrackID aID) { StreamBuffer::Track* track = mBuffer.FindTrack(aID); if (track) { MediaSegment* segment = track->GetSegment(); if (segment) { return segment->GetDuration() - GraphTimeToStreamTime(GraphImpl()->CurrentDriver()->StateComputedTime());
--- a/dom/media/MediaStreamGraph.h +++ b/dom/media/MediaStreamGraph.h @@ -169,17 +169,17 @@ public: }; /** * Notify that changes to one of the stream tracks have been queued. * aTrackEvents can be any combination of TRACK_EVENT_CREATED and * TRACK_EVENT_ENDED. aQueuedMedia is the data being added to the track * at aTrackOffset (relative to the start of the stream). */ virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) {} }; /** * This is a base class for media graph thread listener direct callbacks * from within AppendToTrack(). Note that your regular listener will * still get NotifyQueuedTrackChanges() callbacks from the MSG thread, so @@ -192,17 +192,17 @@ public: /* * This will be called on any MediaStreamDirectListener added to a * a SourceMediaStream when AppendToTrack() is called. The MediaSegment * will be the RawSegment (unresampled) if available in AppendToTrack(). * Note that NotifyQueuedTrackChanges() calls will also still occur. */ virtual void NotifyRealtimeData(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aMedia) {} }; /** * This is a base class for main-thread listener callbacks. * This callback is invoked on the main thread when the main-thread-visible * state of a stream has changed. @@ -482,20 +482,22 @@ public: { return mConsumers.Length(); } const StreamBuffer& GetStreamBuffer() { return mBuffer; } GraphTime GetStreamBufferStartTime() { return mBufferStartTime; } double StreamTimeToSeconds(StreamTime aTime) { - return TrackTicksToSeconds(mBuffer.GraphRate(), aTime); + NS_ASSERTION(0 <= aTime && aTime <= STREAM_TIME_MAX, "Bad time"); + return static_cast<double>(aTime)/mBuffer.GraphRate(); } int64_t StreamTimeToMicroseconds(StreamTime aTime) { + NS_ASSERTION(0 <= aTime && aTime <= STREAM_TIME_MAX, "Bad time"); return (aTime*1000000)/mBuffer.GraphRate(); } StreamTime MicrosecondsToStreamTimeRoundDown(int64_t aMicroseconds) { return (aMicroseconds*mBuffer.GraphRate())/1000000; } TrackTicks TimeToTicksRoundUp(TrackRate aRate, StreamTime aTime) { @@ -613,17 +615,17 @@ protected: struct AudioOutputStream { // When we started audio playback for this track. // Add mStream->GetPosition() to find the current audio playback position. GraphTime mAudioPlaybackStartTime; // Amount of time that we've wanted to play silence because of the stream // blocking. MediaTime mBlockedAudioTime; // Last tick written to the audio output. - TrackTicks mLastTickWritten; + StreamTime mLastTickWritten; TrackID mTrackID; }; nsTArray<AudioOutputStream> mAudioOutputStreams; /** * When true, this means the stream will be finished once all * buffered data has been consumed. */ @@ -717,25 +719,25 @@ public: void RemoveDirectListener(MediaStreamDirectListener* aListener); /** * Add a new track to the stream starting at the given base time (which * must be greater than or equal to the last time passed to * AdvanceKnownTracksTime). Takes ownership of aSegment. aSegment should * contain data starting after aStart. */ - void AddTrack(TrackID aID, TrackTicks aStart, MediaSegment* aSegment) + void AddTrack(TrackID aID, StreamTime aStart, MediaSegment* aSegment) { AddTrackInternal(aID, GraphRate(), aStart, aSegment); } /** * Like AddTrack, but resamples audio from aRate to the graph rate. */ - void AddAudioTrack(TrackID aID, TrackRate aRate, TrackTicks aStart, + void AddAudioTrack(TrackID aID, TrackRate aRate, StreamTime aStart, AudioSegment* aSegment) { AddTrackInternal(aID, aRate, aStart, aSegment); } /** * Append media data to a track. Ownership of aSegment remains with the caller, * but aSegment is emptied. @@ -795,17 +797,17 @@ public: * Note: Only call from Media Graph thread (eg NotifyPull) * * Returns amount of time (data) that is currently buffered in the track, * assuming playout via PlayAudio or via a TrackUnion - note that * NotifyQueuedTrackChanges() on a SourceMediaStream will occur without * any "extra" buffering, but NotifyQueued TrackChanges() on a TrackUnion * will be buffered. */ - TrackTicks GetBufferedTicks(TrackID aID); + StreamTime GetBufferedTicks(TrackID aID); void RegisterForAudioMixing(); // XXX need a Reset API friend class MediaStreamGraphImpl; protected: @@ -831,33 +833,33 @@ protected: // Sample rate of the input data. TrackRate mInputRate; // Resampler if the rate of the input track does not match the // MediaStreamGraph's. nsAutoRef<SpeexResamplerState> mResampler; #ifdef DEBUG int mResamplerChannelCount; #endif - TrackTicks mStart; + StreamTime mStart; // Each time the track updates are flushed to the media graph thread, // this is cleared. uint32_t mCommands; // Each time the track updates are flushed to the media graph thread, // the segment buffer is emptied. nsAutoPtr<MediaSegment> mData; nsTArray<ThreadAndRunnable> mDispatchWhenNotEnough; bool mHaveEnough; }; bool NeedsMixing(); void ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSegment* aSegment); void AddTrackInternal(TrackID aID, TrackRate aRate, - TrackTicks aStart, MediaSegment* aSegment); + StreamTime aStart, MediaSegment* aSegment); TrackData* FindDataForTrack(TrackID aID) { mMutex.AssertCurrentThreadOwns(); for (uint32_t i = 0; i < mUpdateTracks.Length(); ++i) { if (mUpdateTracks[i].mID == aID) { return &mUpdateTracks[i]; }
--- a/dom/media/MediaStreamGraphImpl.h +++ b/dom/media/MediaStreamGraphImpl.h @@ -345,17 +345,17 @@ public: * If aStream needs an audio stream but doesn't have one, create it. * If aStream doesn't need an audio stream but has one, destroy it. */ void CreateOrDestroyAudioStreams(GraphTime aAudioOutputStartTime, MediaStream* aStream); /** * Queue audio (mix of stream audio and silence for blocked intervals) * to the audio output stream. Returns the number of frames played. */ - TrackTicks PlayAudio(MediaStream* aStream, GraphTime aFrom, GraphTime aTo); + StreamTime PlayAudio(MediaStream* aStream, GraphTime aFrom, GraphTime aTo); /** * Set the correct current video frame for stream aStream. */ void PlayVideo(MediaStream* aStream); /** * No more data will be forthcoming for aStream. The stream will end * at the current buffer end point. The StreamBuffer's tracks must be * explicitly set to finished by the caller. @@ -396,21 +396,24 @@ public: mStreamOrderDirty = true; } // Always stereo for now. uint32_t AudioChannelCount() { return 2; } double MediaTimeToSeconds(GraphTime aTime) { - return TrackTicksToSeconds(GraphRate(), aTime); + NS_ASSERTION(0 <= aTime && aTime <= STREAM_TIME_MAX, "Bad time"); + return static_cast<double>(aTime)/GraphRate(); } GraphTime SecondsToMediaTime(double aS) { - return SecondsToTicksRoundDown(GraphRate(), aS); + NS_ASSERTION(0 <= aS && aS <= TRACK_TICKS_MAX/TRACK_RATE_MAX, + "Bad seconds"); + return GraphRate() * aS; } GraphTime MillisecondsToMediaTime(int32_t aMS) { return RateConvertTicksRoundDown(GraphRate(), 1000, aMS); } /** * Signal to the graph that the thread has paused indefinitly,
--- a/dom/media/StreamBuffer.cpp +++ b/dom/media/StreamBuffer.cpp @@ -91,14 +91,14 @@ StreamBuffer::ForgetUpTo(StreamTime aTim for (uint32_t i = 0; i < mTracks.Length(); ++i) { Track* track = mTracks[i]; if (track->IsEnded() && track->GetEnd() <= aTime) { mTracks.RemoveElementAt(i); --i; continue; } - TrackTicks forgetTo = std::min(track->GetEnd() - 1, aTime); + StreamTime forgetTo = std::min(track->GetEnd() - 1, aTime); track->ForgetUpTo(forgetTo); } } }
--- a/dom/media/StreamBuffer.h +++ b/dom/media/StreamBuffer.h @@ -7,22 +7,16 @@ #define MOZILLA_STREAMBUFFER_H_ #include "MediaSegment.h" #include "nsAutoPtr.h" namespace mozilla { /** - * Media time relative to the start of a StreamBuffer. - */ -typedef MediaTime StreamTime; -const StreamTime STREAM_TIME_MAX = MEDIA_TIME_MAX; - -/** * Unique ID for track within a StreamBuffer. Tracks from different * StreamBuffers may have the same ID; this matters when appending StreamBuffers, * since tracks with the same ID are matched. Only IDs greater than 0 are allowed. */ typedef int32_t TrackID; const TrackID TRACK_NONE = 0; const TrackID TRACK_INVALID = -1; @@ -39,31 +33,16 @@ inline TrackTicks RateConvertTicksRoundU TrackRate aInRate, TrackTicks aTicks) { NS_ASSERTION(0 < aOutRate && aOutRate <= TRACK_RATE_MAX, "Bad out rate"); NS_ASSERTION(0 < aInRate && aInRate <= TRACK_RATE_MAX, "Bad in rate"); NS_ASSERTION(0 <= aTicks && aTicks <= TRACK_TICKS_MAX, "Bad ticks"); return (aTicks * aOutRate + aInRate - 1) / aInRate; } -inline TrackTicks SecondsToTicksRoundDown(TrackRate aRate, double aSeconds) -{ - NS_ASSERTION(0 < aRate && aRate <= TRACK_RATE_MAX, "Bad rate"); - NS_ASSERTION(0 <= aSeconds && aSeconds <= TRACK_TICKS_MAX/TRACK_RATE_MAX, - "Bad seconds"); - return aSeconds * aRate; -} - -inline double TrackTicksToSeconds(TrackRate aRate, TrackTicks aTicks) -{ - NS_ASSERTION(0 < aRate && aRate <= TRACK_RATE_MAX, "Bad rate"); - NS_ASSERTION(0 <= aTicks && aTicks <= TRACK_TICKS_MAX, "Bad ticks"); - return static_cast<double>(aTicks)/aRate; -} - /** * This object contains the decoded data for a stream's tracks. * A StreamBuffer can be appended to. Logically a StreamBuffer only gets longer, * but we also have the ability to "forget" data before a certain time that * we know won't be used again. (We prune a whole number of seconds internally.) * * StreamBuffers should only be used from one thread at a time. * @@ -76,21 +55,21 @@ public: /** * Every track has a start time --- when it started in the StreamBuffer. * It has an end flag; when false, no end point is known; when true, * the track ends when the data we have for the track runs out. * Tracks have a unique ID assigned at creation. This allows us to identify * the same track across StreamBuffers. A StreamBuffer should never have * two tracks with the same ID (even if they don't overlap in time). * TODO Tracks can also be enabled and disabled over time. - * TODO Add TimeVarying<TrackTicks,bool> mEnabled. + * TODO Add TimeVarying<StreamTime,bool> mEnabled. * Takes ownership of aSegment. */ class Track { - Track(TrackID aID, TrackTicks aStart, MediaSegment* aSegment, TrackRate aGraphRate) + Track(TrackID aID, StreamTime aStart, MediaSegment* aSegment, TrackRate aGraphRate) : mStart(aStart), mSegment(aSegment), mGraphRate(aGraphRate), mID(aID), mEnded(false) { MOZ_COUNT_CTOR(Track); @@ -107,18 +86,18 @@ public: if (mSegment->GetType() == T::StaticType()) { return static_cast<T*>(mSegment.get()); } return nullptr; } MediaSegment* GetSegment() const { return mSegment; } TrackID GetID() const { return mID; } bool IsEnded() const { return mEnded; } - TrackTicks GetStart() const { return mStart; } - TrackTicks GetEnd() const { return mSegment->GetDuration(); } + StreamTime GetStart() const { return mStart; } + StreamTime GetEnd() const { return mSegment->GetDuration(); } MediaSegment::Type GetType() const { return mSegment->GetType(); } void SetEnded() { mEnded = true; } void AppendFrom(Track* aTrack) { NS_ASSERTION(!mEnded, "Can't append to ended track"); NS_ASSERTION(aTrack->mID == mID, "IDs must match"); NS_ASSERTION(aTrack->mStart == 0, "Source track must start at zero"); @@ -126,21 +105,21 @@ public: mSegment->AppendFrom(aTrack->mSegment); mEnded = aTrack->mEnded; } MediaSegment* RemoveSegment() { return mSegment.forget(); } - void ForgetUpTo(TrackTicks aTime) + void ForgetUpTo(StreamTime aTime) { mSegment->ForgetUpTo(aTime); } - void FlushAfter(TrackTicks aNewEnd) + void FlushAfter(StreamTime aNewEnd) { // Forget everything after a given endpoint // a specified amount mSegment->FlushAfter(aNewEnd); } size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { @@ -150,17 +129,17 @@ public: } return amount; } protected: friend class StreamBuffer; // Start offset is in ticks at rate mRate - TrackTicks mStart; + StreamTime mStart; // The segment data starts at the start of the owning StreamBuffer, i.e., // there's mStart silence/no video at the beginning. nsAutoPtr<MediaSegment> mSegment; TrackRate mGraphRate; // graph rate in StreamTime per second // Unique ID TrackID mID; // True when the track ends with the data in mSegment bool mEnded; @@ -218,17 +197,17 @@ public: return mGraphRate; } /** * Takes ownership of aSegment. Don't do this while iterating, or while * holding a Track reference. * aSegment must have aStart worth of null data. */ - Track& AddTrack(TrackID aID, TrackTicks aStart, MediaSegment* aSegment) + Track& AddTrack(TrackID aID, StreamTime aStart, MediaSegment* aSegment) { NS_ASSERTION(!FindTrack(aID), "Track with this ID already exists"); Track* track = new Track(aID, aStart, aSegment, GraphRate()); mTracks.InsertElementSorted(track, CompareTracksByID()); if (mTracksKnownTime == STREAM_TIME_MAX) { // There exists code like
--- a/dom/media/TrackUnionStream.cpp +++ b/dom/media/TrackUnionStream.cpp @@ -186,17 +186,17 @@ TrackUnionStream::TrackUnionStream(DOMMe break; } id = ++maxTrackID; } // Round up the track start time so the track, if anything, starts a // little later than the true time. This means we'll have enough // samples in our input stream to go just beyond the destination time. - TrackTicks outputStart = GraphTimeToStreamTime(aFrom); + StreamTime outputStart = GraphTimeToStreamTime(aFrom); nsAutoPtr<MediaSegment> segment; segment = aTrack->GetSegment()->CreateEmptyClone(); for (uint32_t j = 0; j < mListeners.Length(); ++j) { MediaStreamListener* l = mListeners[j]; l->NotifyQueuedTrackChanges(Graph(), id, outputStart, MediaStreamListener::TRACK_EVENT_CREATED, *segment); @@ -221,17 +221,17 @@ TrackUnionStream::TrackUnionStream(DOMMe void TrackUnionStream::EndTrack(uint32_t aIndex) { StreamBuffer::Track* outputTrack = mBuffer.FindTrack(mTrackMap[aIndex].mOutputTrackID); if (!outputTrack || outputTrack->IsEnded()) return; for (uint32_t j = 0; j < mListeners.Length(); ++j) { MediaStreamListener* l = mListeners[j]; - TrackTicks offset = outputTrack->GetSegment()->GetDuration(); + StreamTime offset = outputTrack->GetSegment()->GetDuration(); nsAutoPtr<MediaSegment> segment; segment = outputTrack->GetSegment()->CreateEmptyClone(); l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(), offset, MediaStreamListener::TRACK_EVENT_ENDED, *segment); } outputTrack->SetEnded(); } @@ -248,28 +248,28 @@ TrackUnionStream::TrackUnionStream(DOMMe MediaStream* source = map->mInputPort->GetSource(); GraphTime next; *aOutputTrackFinished = false; for (GraphTime t = aFrom; t < aTo; t = next) { MediaInputPort::InputInterval interval = map->mInputPort->GetNextInputInterval(t); interval.mEnd = std::min(interval.mEnd, aTo); StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd); - TrackTicks inputTrackEndPoint = TRACK_TICKS_MAX; + StreamTime inputTrackEndPoint = STREAM_TIME_MAX; if (aInputTrack->IsEnded() && aInputTrack->GetEnd() <= inputEnd) { inputTrackEndPoint = aInputTrack->GetEnd(); *aOutputTrackFinished = true; } if (interval.mStart >= interval.mEnd) { break; } - TrackTicks ticks = interval.mEnd - interval.mStart; + StreamTime ticks = interval.mEnd - interval.mStart; next = interval.mEnd; StreamTime outputStart = outputTrack->GetEnd(); if (interval.mInputIsBlocked) { // Maybe the input track ended? segment->AppendNullData(ticks); STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
--- a/dom/media/TrackUnionStream.h +++ b/dom/media/TrackUnionStream.h @@ -33,17 +33,17 @@ public: protected: TrackIDFilterCallback mFilterCallback; // Only non-ended tracks are allowed to persist in this map. struct TrackMapEntry { // mEndOfConsumedInputTicks is the end of the input ticks that we've consumed. // 0 if we haven't consumed any yet. - TrackTicks mEndOfConsumedInputTicks; + StreamTime mEndOfConsumedInputTicks; // mEndOfLastInputIntervalInInputStream is the timestamp for the end of the // previous interval which was unblocked for both the input and output // stream, in the input stream's timeline, or -1 if there wasn't one. StreamTime mEndOfLastInputIntervalInInputStream; // mEndOfLastInputIntervalInOutputStream is the timestamp for the end of the // previous interval which was unblocked for both the input and output // stream, in the output stream's timeline, or -1 if there wasn't one. StreamTime mEndOfLastInputIntervalInOutputStream;
--- a/dom/media/VideoSegment.cpp +++ b/dom/media/VideoSegment.cpp @@ -87,17 +87,17 @@ VideoFrame::CreateBlackImage(const gfxIn VideoChunk::VideoChunk() {} VideoChunk::~VideoChunk() {} void VideoSegment::AppendFrame(already_AddRefed<Image>&& aImage, - TrackTicks aDuration, + StreamTime aDuration, const IntSize& aIntrinsicSize, bool aForceBlack) { VideoChunk* chunk = AppendChunk(aDuration); VideoFrame frame(aImage, ThebesIntSize(aIntrinsicSize)); frame.SetForceBlack(aForceBlack); chunk->mFrame.TakeFrom(&frame); }
--- a/dom/media/VideoSegment.h +++ b/dom/media/VideoSegment.h @@ -54,69 +54,69 @@ protected: // The desired size to render the video frame at. gfxIntSize mIntrinsicSize; bool mForceBlack; }; struct VideoChunk { VideoChunk(); ~VideoChunk(); - void SliceTo(TrackTicks aStart, TrackTicks aEnd) + void SliceTo(StreamTime aStart, StreamTime aEnd) { NS_ASSERTION(aStart >= 0 && aStart < aEnd && aEnd <= mDuration, "Slice out of bounds"); mDuration = aEnd - aStart; } - TrackTicks GetDuration() const { return mDuration; } + StreamTime GetDuration() const { return mDuration; } bool CanCombineWithFollowing(const VideoChunk& aOther) const { return aOther.mFrame == mFrame; } bool IsNull() const { return !mFrame.GetImage(); } - void SetNull(TrackTicks aDuration) + void SetNull(StreamTime aDuration) { mDuration = aDuration; mFrame.SetNull(); mTimeStamp = TimeStamp(); } void SetForceBlack(bool aForceBlack) { mFrame.SetForceBlack(aForceBlack); } size_t SizeOfExcludingThisIfUnshared(MallocSizeOf aMallocSizeOf) const { // Future: // - mFrame return 0; } - TrackTicks mDuration; + StreamTime mDuration; VideoFrame mFrame; mozilla::TimeStamp mTimeStamp; }; class VideoSegment : public MediaSegmentBase<VideoSegment, VideoChunk> { public: typedef mozilla::layers::Image Image; typedef mozilla::gfx::IntSize IntSize; VideoSegment(); ~VideoSegment(); void AppendFrame(already_AddRefed<Image>&& aImage, - TrackTicks aDuration, + StreamTime aDuration, const IntSize& aIntrinsicSize, bool aForceBlack = false); - const VideoFrame* GetFrameAt(TrackTicks aOffset, TrackTicks* aStart = nullptr) + const VideoFrame* GetFrameAt(StreamTime aOffset, StreamTime* aStart = nullptr) { VideoChunk* c = FindChunkContaining(aOffset, aStart); if (!c) { return nullptr; } return &c->mFrame; } - const VideoFrame* GetLastFrame(TrackTicks* aStart = nullptr) + const VideoFrame* GetLastFrame(StreamTime* aStart = nullptr) { VideoChunk* c = GetLastChunk(); if (!c) { return nullptr; } if (aStart) { *aStart = mDuration - c->mDuration; }
--- a/dom/media/encoder/MediaEncoder.cpp +++ b/dom/media/encoder/MediaEncoder.cpp @@ -41,17 +41,17 @@ PRLogModuleInfo* gMediaEncoderLog; #define LOG(type, msg) #endif namespace mozilla { void MediaEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) { // Process the incoming raw track data from MediaStreamGraph, called on the // thread of MediaStreamGraph. if (mAudioEncoder && aQueuedMedia.GetType() == MediaSegment::AUDIO) { mAudioEncoder->NotifyQueuedTrackChanges(aGraph, aID, aTrackOffset, aTrackEvents,
--- a/dom/media/encoder/MediaEncoder.h +++ b/dom/media/encoder/MediaEncoder.h @@ -75,17 +75,17 @@ public : ~MediaEncoder() {}; /** * Notified by the control loop of MediaStreamGraph; aQueueMedia is the raw * track data in form of MediaSegment. */ virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) MOZ_OVERRIDE; /** * Notified the stream is being removed. */ virtual void NotifyEvent(MediaStreamGraph* aGraph, MediaStreamListener::MediaStreamGraphEvent event) MOZ_OVERRIDE;
--- a/dom/media/encoder/TrackEncoder.cpp +++ b/dom/media/encoder/TrackEncoder.cpp @@ -48,17 +48,17 @@ TrackEncoder::TrackEncoder() gTrackEncoderLog = PR_NewLogModule("TrackEncoder"); } #endif } void AudioTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) { if (mCanceled) { return; } const AudioSegment& audio = static_cast<const AudioSegment&>(aQueuedMedia); @@ -176,17 +176,17 @@ size_t AudioTrackEncoder::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const { return mRawSegment.SizeOfExcludingThis(aMallocSizeOf); } void VideoTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) { if (mCanceled) { return; } const VideoSegment& video = static_cast<const VideoSegment&>(aQueuedMedia);
--- a/dom/media/encoder/TrackEncoder.h +++ b/dom/media/encoder/TrackEncoder.h @@ -34,17 +34,17 @@ public: virtual ~TrackEncoder() {} /** * Notified by the same callbcak of MediaEncoder when it has received a track * change from MediaStreamGraph. Called on the MediaStreamGraph thread. */ virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) = 0; /** * Notified by the same callback of MediaEncoder when it has been removed from * MediaStreamGraph. Called on the MediaStreamGraph thread. */ void NotifyEvent(MediaStreamGraph* aGraph, @@ -141,17 +141,17 @@ class AudioTrackEncoder : public TrackEn public: AudioTrackEncoder() : TrackEncoder() , mChannels(0) , mSamplingRate(0) {} virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) MOZ_OVERRIDE; /** * Interleaves the track data and stores the result into aOutput. Might need * to up-mix or down-mix the channel data if the channels number of this chunk * is different from aOutputChannels. The channel data from aChunk might be * modified by up-mixing. @@ -234,17 +234,17 @@ public: , mTotalFrameDuration(0) {} /** * Notified by the same callback of MediaEncoder when it has received a track * change from MediaStreamGraph. Called on the MediaStreamGraph thread. */ virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) MOZ_OVERRIDE; /** * Measure size of mRawSegment */ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; protected: @@ -292,20 +292,20 @@ protected: int mDisplayHeight; /** * The track rate of source video. */ TrackRate mTrackRate; /** - * The total duration of frames in encoded video in TrackTicks, kept track of + * The total duration of frames in encoded video in StreamTime, kept track of * in subclasses. */ - TrackTicks mTotalFrameDuration; + StreamTime mTotalFrameDuration; /** * The last unique frame we've sent to track encoder, kept track of in * subclasses. */ VideoFrame mLastFrame; /**
--- a/dom/media/encoder/VP8TrackEncoder.cpp +++ b/dom/media/encoder/VP8TrackEncoder.cpp @@ -358,17 +358,17 @@ nsresult VP8TrackEncoder::PrepareRawFram /** * Compares the elapsed time from the beginning of GetEncodedTrack and * the processed frame duration in mSourceSegment * in order to set the nextEncodeOperation for next target frame. */ VP8TrackEncoder::EncodeOperation VP8TrackEncoder::GetNextEncodeOperation(TimeDuration aTimeElapsed, - TrackTicks aProcessedDuration) + StreamTime aProcessedDuration) { int64_t durationInUsec = FramesToUsecs(aProcessedDuration + mEncodedFrameDuration, mTrackRate).value(); if (aTimeElapsed.ToMicroseconds() > (durationInUsec * SKIP_FRAME_RATIO)) { // The encoder is too slow. // We should skip next frame to consume the mSourceSegment. return SKIP_FRAME; @@ -376,30 +376,30 @@ VP8TrackEncoder::GetNextEncodeOperation( // The encoder is a little slow. // We force the encoder to encode an I-frame to accelerate. return ENCODE_I_FRAME; } else { return ENCODE_NORMAL_FRAME; } } -TrackTicks -VP8TrackEncoder::CalculateRemainingTicks(TrackTicks aDurationCopied, - TrackTicks aEncodedDuration) +StreamTime +VP8TrackEncoder::CalculateRemainingTicks(StreamTime aDurationCopied, + StreamTime aEncodedDuration) { return mRemainingTicks + aEncodedDuration - aDurationCopied; } // Try to extend the encodedDuration as long as possible if the target frame // has a long duration. -TrackTicks -VP8TrackEncoder::CalculateEncodedDuration(TrackTicks aDurationCopied) +StreamTime +VP8TrackEncoder::CalculateEncodedDuration(StreamTime aDurationCopied) { - TrackTicks temp64 = aDurationCopied; - TrackTicks encodedDuration = mEncodedFrameDuration; + StreamTime temp64 = aDurationCopied; + StreamTime encodedDuration = mEncodedFrameDuration; temp64 -= mRemainingTicks; while (temp64 > mEncodedFrameDuration) { temp64 -= mEncodedFrameDuration; encodedDuration += mEncodedFrameDuration; } return encodedDuration; } @@ -465,33 +465,33 @@ VP8TrackEncoder::GetEncodedTrack(Encoded } if (mCanceled || mEncodingComplete) { return NS_ERROR_FAILURE; } mSourceSegment.AppendFrom(&mRawSegment); } VideoSegment::ChunkIterator iter(mSourceSegment); - TrackTicks durationCopied = 0; - TrackTicks totalProcessedDuration = 0; + StreamTime durationCopied = 0; + StreamTime totalProcessedDuration = 0; TimeStamp timebase = TimeStamp::Now(); EncodeOperation nextEncodeOperation = ENCODE_NORMAL_FRAME; for (; !iter.IsEnded(); iter.Next()) { VideoChunk &chunk = *iter; // Accumulate chunk's duration to durationCopied until it reaches // mRemainingTicks. durationCopied += chunk.GetDuration(); MOZ_ASSERT(mRemainingTicks <= mEncodedFrameDuration); VP8LOG("durationCopied %lld mRemainingTicks %lld\n", durationCopied, mRemainingTicks); if (durationCopied >= mRemainingTicks) { VP8LOG("nextEncodeOperation is %d\n",nextEncodeOperation); // Calculate encodedDuration for this target frame. - TrackTicks encodedDuration = CalculateEncodedDuration(durationCopied); + StreamTime encodedDuration = CalculateEncodedDuration(durationCopied); // Encode frame. if (nextEncodeOperation != SKIP_FRAME) { nsresult rv = PrepareRawFrame(chunk); NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE); // Encode the data with VP8 encoder int flags = (nextEncodeOperation == ENCODE_NORMAL_FRAME) ?
--- a/dom/media/encoder/VP8TrackEncoder.h +++ b/dom/media/encoder/VP8TrackEncoder.h @@ -38,40 +38,40 @@ public: protected: nsresult Init(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth, int32_t aDisplayHeight, TrackRate aTrackRate) MOZ_FINAL MOZ_OVERRIDE; private: // Calculate the target frame's encoded duration. - TrackTicks CalculateEncodedDuration(TrackTicks aDurationCopied); + StreamTime CalculateEncodedDuration(StreamTime aDurationCopied); // Calculate the mRemainingTicks for next target frame. - TrackTicks CalculateRemainingTicks(TrackTicks aDurationCopied, - TrackTicks aEncodedDuration); + StreamTime CalculateRemainingTicks(StreamTime aDurationCopied, + StreamTime aEncodedDuration); // Get the EncodeOperation for next target frame. EncodeOperation GetNextEncodeOperation(TimeDuration aTimeElapsed, - TrackTicks aProcessedDuration); + StreamTime aProcessedDuration); // Get the encoded data from encoder to aData. nsresult GetEncodedPartitions(EncodedFrameContainer& aData); // Prepare the input data to the mVPXImageWrapper for encoding. nsresult PrepareRawFrame(VideoChunk &aChunk); // Output frame rate. uint32_t mEncodedFrameRate; // Duration for the output frame, reciprocal to mEncodedFrameRate. - TrackTicks mEncodedFrameDuration; + StreamTime mEncodedFrameDuration; // Encoded timestamp. - TrackTicks mEncodedTimestamp; + StreamTime mEncodedTimestamp; // Duration to the next encode frame. - TrackTicks mRemainingTicks; + StreamTime mRemainingTicks; // Muted frame, we only create it once. nsRefPtr<layers::Image> mMuteFrame; // I420 frame, convert the 4:4:4, 4:2:2 to I420. nsTArray<uint8_t> mI420Frame; /**
--- a/dom/media/gtest/TestVideoSegment.cpp +++ b/dom/media/gtest/TestVideoSegment.cpp @@ -14,17 +14,17 @@ namespace mozilla { } TEST(VideoSegment, TestAppendFrameForceBlack) { nsRefPtr<layers::Image> testImage = nullptr; VideoSegment segment; segment.AppendFrame(testImage.forget(), - mozilla::TrackTicks(90000), + mozilla::StreamTime(90000), mozilla::gfx::IntSize(640, 480), true); VideoSegment::ChunkIterator iter(segment); while (!iter.IsEnded()) { VideoChunk chunk = *iter; EXPECT_TRUE(chunk.mFrame.GetForceBlack()); iter.Next(); @@ -32,17 +32,17 @@ TEST(VideoSegment, TestAppendFrameForceB } TEST(VideoSegment, TestAppendFrameNotForceBlack) { nsRefPtr<layers::Image> testImage = nullptr; VideoSegment segment; segment.AppendFrame(testImage.forget(), - mozilla::TrackTicks(90000), + mozilla::StreamTime(90000), mozilla::gfx::IntSize(640, 480)); VideoSegment::ChunkIterator iter(segment); while (!iter.IsEnded()) { VideoChunk chunk = *iter; EXPECT_FALSE(chunk.mFrame.GetForceBlack()); iter.Next(); }
--- a/dom/media/gtest/TestVideoTrackEncoder.cpp +++ b/dom/media/gtest/TestVideoTrackEncoder.cpp @@ -263,17 +263,17 @@ TEST(VP8VideoTrackEncoder, FrameEncode) generator.Generate(images); // Put generated YUV frame into video segment. // Duration of each frame is 1 second. VideoSegment segment; for (nsTArray<nsRefPtr<Image>>::size_type i = 0; i < images.Length(); i++) { nsRefPtr<Image> image = images[i]; - segment.AppendFrame(image.forget(), mozilla::TrackTicks(90000), generator.GetSize()); + segment.AppendFrame(image.forget(), mozilla::StreamTime(90000), generator.GetSize()); } // track change notification. encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, segment); // Pull Encoded Data back from encoder. EncodedFrameContainer container; EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
--- a/dom/media/imagecapture/CaptureTask.cpp +++ b/dom/media/imagecapture/CaptureTask.cpp @@ -79,17 +79,17 @@ void CaptureTask::PrincipalChanged(DOMMediaStream* aMediaStream) { MOZ_ASSERT(NS_IsMainThread()); mPrincipalChanged = true; } void CaptureTask::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) { if (mImageGrabbedOrTrackEnd) { return; } if (aTrackEvents == MediaStreamListener::TRACK_EVENT_ENDED) {
--- a/dom/media/imagecapture/CaptureTask.h +++ b/dom/media/imagecapture/CaptureTask.h @@ -28,17 +28,17 @@ class ImageCapture; * released during the period of the capturing process described above. */ class CaptureTask : public MediaStreamListener, public DOMMediaStream::PrincipalChangeObserver { public: // MediaStreamListener methods. virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) MOZ_OVERRIDE; virtual void NotifyEvent(MediaStreamGraph* aGraph, MediaStreamGraphEvent aEvent) MOZ_OVERRIDE; // DOMMediaStream::PrincipalChangeObserver method. virtual void PrincipalChanged(DOMMediaStream* aMediaStream) MOZ_OVERRIDE;
--- a/dom/media/webaudio/AudioBufferSourceNode.cpp +++ b/dom/media/webaudio/AudioBufferSourceNode.cpp @@ -51,17 +51,17 @@ NS_IMPL_RELEASE_INHERITED(AudioBufferSou */ class AudioBufferSourceNodeEngine : public AudioNodeEngine { public: explicit AudioBufferSourceNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination) : AudioNodeEngine(aNode), mStart(0.0), mBeginProcessing(0), - mStop(TRACK_TICKS_MAX), + mStop(STREAM_TIME_MAX), mResampler(nullptr), mRemainingResamplerTail(0), mBufferEnd(0), mLoopStart(0), mLoopEnd(0), mBufferSampleRate(0), mBufferPosition(0), mChannels(0), mDopplerShift(1.0f), mDestination(static_cast<AudioNodeStream*>(aDestination->Stream())), mPlaybackRateTimeline(1.0f), mLoop(false) {} @@ -86,17 +86,17 @@ public: case AudioBufferSourceNode::PLAYBACKRATE: mPlaybackRateTimeline = aValue; WebAudioUtils::ConvertAudioParamToTicks(mPlaybackRateTimeline, mSource, mDestination); break; default: NS_ERROR("Bad AudioBufferSourceNodeEngine TimelineParameter"); } } - virtual void SetStreamTimeParameter(uint32_t aIndex, TrackTicks aParam) + virtual void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) { switch (aIndex) { case AudioBufferSourceNode::STOP: mStop = aParam; break; default: NS_ERROR("Bad AudioBufferSourceNodeEngine StreamTimeParameter"); } } virtual void SetDoubleParameter(uint32_t aIndex, double aParam) @@ -135,17 +135,17 @@ public: } virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) { mBuffer = aBuffer; } bool BegunResampling() { - return mBeginProcessing == -TRACK_TICKS_MAX; + return mBeginProcessing == -STREAM_TIME_MAX; } void UpdateResampler(int32_t aOutRate, uint32_t aChannels) { if (mResampler && (aChannels != mChannels || // If the resampler has begun, then it will have moved // mBufferPosition to after the samples it has read, but it hasn't @@ -228,17 +228,17 @@ public: // the playbackRate. // The number of frames consumed/produced depends on the amount of space // remaining in both the input and output buffer, and the playback rate (that // is, the ratio between the output samplerate and the input samplerate). void CopyFromInputBufferWithResampling(AudioNodeStream* aStream, AudioChunk* aOutput, uint32_t aChannels, uint32_t* aOffsetWithinBlock, - TrackTicks* aCurrentPosition, + StreamTime* aCurrentPosition, int32_t aBufferMax) { // TODO: adjust for mStop (see bug 913854 comment 9). uint32_t availableInOutputBuffer = WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock; SpeexResamplerState* resampler = mResampler; MOZ_ASSERT(aChannels > 0); if (mBufferPosition < aBufferMax) { @@ -263,17 +263,17 @@ public: if (leadTicks > 0.0) { // Round to nearest output subsample supported by the resampler at // these rates. skipFracNum -= leadTicks * ratioNum + 0.5; MOZ_ASSERT(skipFracNum < INT32_MAX, "mBeginProcessing is wrong?"); } speex_resampler_set_skip_frac_num(resampler, skipFracNum); - mBeginProcessing = -TRACK_TICKS_MAX; + mBeginProcessing = -STREAM_TIME_MAX; } inputLimit = std::min(inputLimit, availableInInputBuffer); for (uint32_t i = 0; true; ) { uint32_t inSamples = inputLimit; const float* inputData = mBuffer->GetData(i) + mBufferPosition; uint32_t outSamples = availableInOutputBuffer; @@ -329,22 +329,22 @@ public: * This will never advance aOffsetWithinBlock past WEBAUDIO_BLOCK_SIZE or * aCurrentPosition past aMaxPos. This function knows when it needs to * allocate the output buffer, and also optimizes the case where it can avoid * memory allocations. */ void FillWithZeroes(AudioChunk* aOutput, uint32_t aChannels, uint32_t* aOffsetWithinBlock, - TrackTicks* aCurrentPosition, - TrackTicks aMaxPos) + StreamTime* aCurrentPosition, + StreamTime aMaxPos) { MOZ_ASSERT(*aCurrentPosition < aMaxPos); uint32_t numFrames = - std::min<TrackTicks>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, + std::min<StreamTime>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, aMaxPos - *aCurrentPosition); if (numFrames == WEBAUDIO_BLOCK_SIZE) { aOutput->SetNull(numFrames); } else { if (*aOffsetWithinBlock == 0) { AllocateAudioBlock(aChannels, aOutput); } WriteZeroesToAudioBlock(aOutput, *aOffsetWithinBlock, numFrames); @@ -361,22 +361,22 @@ public: * the buffer at aBufferOffset, and never takes more data than aBufferMax. * This function knows when it needs to allocate the output buffer, and also * optimizes the case where it can avoid memory allocations. */ void CopyFromBuffer(AudioNodeStream* aStream, AudioChunk* aOutput, uint32_t aChannels, uint32_t* aOffsetWithinBlock, - TrackTicks* aCurrentPosition, + StreamTime* aCurrentPosition, int32_t aBufferMax) { MOZ_ASSERT(*aCurrentPosition < mStop); uint32_t numFrames = - std::min(std::min<TrackTicks>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, + std::min(std::min<StreamTime>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, aBufferMax - mBufferPosition), mStop - *aCurrentPosition); if (numFrames == WEBAUDIO_BLOCK_SIZE && !mResampler) { MOZ_ASSERT(mBufferPosition < aBufferMax); BorrowFromInputBuffer(aOutput, aChannels); *aOffsetWithinBlock += numFrames; *aCurrentPosition += numFrames; mBufferPosition += numFrames; @@ -440,21 +440,21 @@ public: } // WebKit treats the playbackRate as a k-rate parameter in their code, // despite the spec saying that it should be an a-rate parameter. We treat // it as k-rate. Spec bug: https://www.w3.org/Bugs/Public/show_bug.cgi?id=21592 UpdateSampleRateIfNeeded(channels); uint32_t written = 0; - TrackTicks streamPosition = aStream->GetCurrentPosition(); + StreamTime streamPosition = aStream->GetCurrentPosition(); while (written < WEBAUDIO_BLOCK_SIZE) { - if (mStop != TRACK_TICKS_MAX && + if (mStop != STREAM_TIME_MAX && streamPosition >= mStop) { - FillWithZeroes(aOutput, channels, &written, &streamPosition, TRACK_TICKS_MAX); + FillWithZeroes(aOutput, channels, &written, &streamPosition, STREAM_TIME_MAX); continue; } if (streamPosition < mBeginProcessing) { FillWithZeroes(aOutput, channels, &written, &streamPosition, mBeginProcessing); continue; } if (mLoop) { @@ -464,17 +464,17 @@ public: if (mBufferPosition >= mLoopEnd) { mBufferPosition = mLoopStart; } CopyFromBuffer(aStream, aOutput, channels, &written, &streamPosition, mLoopEnd); } else { if (mBufferPosition < mBufferEnd || mRemainingResamplerTail) { CopyFromBuffer(aStream, aOutput, channels, &written, &streamPosition, mBufferEnd); } else { - FillWithZeroes(aOutput, channels, &written, &streamPosition, TRACK_TICKS_MAX); + FillWithZeroes(aOutput, channels, &written, &streamPosition, STREAM_TIME_MAX); } } } // We've finished if we've gone past mStop, or if we're past mDuration when // looping is disabled. if (streamPosition >= mStop || (!mLoop && mBufferPosition >= mBufferEnd && !mRemainingResamplerTail)) { @@ -505,20 +505,20 @@ public: virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE { return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); } double mStart; // including the fractional position between ticks // Low pass filter effects from the resampler mean that samples before the // start time are influenced by resampling the buffer. mBeginProcessing - // includes the extent of this filter. The special value of -TRACK_TICKS_MAX + // includes the extent of this filter. The special value of -STREAM_TIME_MAX // indicates that the resampler has begun processing. - TrackTicks mBeginProcessing; - TrackTicks mStop; + StreamTime mBeginProcessing; + StreamTime mStop; nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer; SpeexResamplerState* mResampler; // mRemainingResamplerTail, like mBufferPosition, and // mBufferEnd, is measured in input buffer samples. int mRemainingResamplerTail; int32_t mBufferEnd; int32_t mLoopStart; int32_t mLoopEnd;
--- a/dom/media/webaudio/AudioNodeEngine.h +++ b/dom/media/webaudio/AudioNodeEngine.h @@ -236,17 +236,17 @@ public: virtual ~AudioNodeEngine() { MOZ_ASSERT(!mNode, "The node reference must be already cleared"); MOZ_COUNT_DTOR(AudioNodeEngine); } virtual dom::DelayNodeEngine* AsDelayNodeEngine() { return nullptr; } - virtual void SetStreamTimeParameter(uint32_t aIndex, TrackTicks aParam) + virtual void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) { NS_ERROR("Invalid SetStreamTimeParameter index"); } virtual void SetDoubleParameter(uint32_t aIndex, double aParam) { NS_ERROR("Invalid SetDoubleParameter index"); } virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam)
--- a/dom/media/webaudio/AudioNodeExternalInputStream.cpp +++ b/dom/media/webaudio/AudioNodeExternalInputStream.cpp @@ -138,17 +138,17 @@ AudioNodeExternalInputStream::ProcessInp MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t); interval.mEnd = std::min(interval.mEnd, aTo); if (interval.mStart >= interval.mEnd) break; next = interval.mEnd; StreamTime outputStart = GraphTimeToStreamTime(interval.mStart); StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd); - TrackTicks ticks = outputEnd - outputStart; + StreamTime ticks = outputEnd - outputStart; if (interval.mInputIsBlocked) { segment.AppendNullData(ticks); } else { StreamTime inputStart = std::min(inputSegment.GetDuration(), source->GraphTimeToStreamTime(interval.mStart)); StreamTime inputEnd =
--- a/dom/media/webaudio/AudioNodeStream.cpp +++ b/dom/media/webaudio/AudioNodeStream.cpp @@ -93,17 +93,17 @@ AudioNodeStream::SetStreamTimeParameter( aContext->DestinationStream(), aContext->DOMTimeToStreamTime(aStreamTime))); } void AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream, double aStreamTime) { - TrackTicks ticks = TicksFromDestinationTime(aRelativeToStream, aStreamTime); + StreamTime ticks = TicksFromDestinationTime(aRelativeToStream, aStreamTime); mEngine->SetStreamTimeParameter(aIndex, ticks); } void AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue) { class Message : public ControlMessage { public: @@ -551,17 +551,17 @@ AudioNodeStream::AdvanceOutputSegment() AudioChunk copyChunk = mLastChunks[0]; AudioSegment tmpSegment; tmpSegment.AppendAndConsumeChunk(©Chunk); l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK, segment->GetDuration(), 0, tmpSegment); } } -TrackTicks +StreamTime AudioNodeStream::GetCurrentPosition() { NS_ASSERTION(!mFinished, "Don't create another track after finishing"); return EnsureTrack(AUDIO_TRACK)->Get<AudioSegment>()->GetDuration(); } void AudioNodeStream::FinishOutput() @@ -600,33 +600,33 @@ AudioNodeStream::FractionalTicksFromDest GraphTime graphTime = aDestination->StreamTimeToGraphTime(destinationStreamTime); StreamTime thisStreamTime = GraphTimeToStreamTimeOptimistic(graphTime); double thisFractionalTicks = thisStreamTime + offset; MOZ_ASSERT(thisFractionalTicks >= 0.0); return thisFractionalTicks; } -TrackTicks +StreamTime AudioNodeStream::TicksFromDestinationTime(MediaStream* aDestination, double aSeconds) { AudioNodeStream* destination = aDestination->AsAudioNodeStream(); MOZ_ASSERT(destination); double thisSeconds = FractionalTicksFromDestinationTime(destination, aSeconds); // Round to nearest - TrackTicks ticks = thisSeconds + 0.5; + StreamTime ticks = thisSeconds + 0.5; return ticks; } double AudioNodeStream::DestinationTimeFromTicks(AudioNodeStream* aDestination, - TrackTicks aPosition) + StreamTime aPosition) { MOZ_ASSERT(SampleRate() == aDestination->SampleRate()); GraphTime graphTime = StreamTimeToGraphTime(aPosition); StreamTime destinationTime = aDestination->GraphTimeToStreamTimeOptimistic(graphTime); return StreamTimeToSeconds(destinationTime); } }
--- a/dom/media/webaudio/AudioNodeStream.h +++ b/dom/media/webaudio/AudioNodeStream.h @@ -108,17 +108,17 @@ public: ChannelInterpretation aChannelInterpretation); virtual void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) MOZ_OVERRIDE; /** * Produce the next block of output, before input is provided. * ProcessInput() will be called later, and it then should not change * the output. This is used only for DelayNodeEngine in a feedback loop. */ void ProduceOutputBeforeInput(GraphTime aFrom); - TrackTicks GetCurrentPosition(); + StreamTime GetCurrentPosition(); bool IsAudioParamStream() const { return mAudioParamStream; } const OutputChunks& LastChunks() const { return mLastChunks; @@ -140,27 +140,27 @@ public: /** * Convert a time in seconds on the destination stream to ticks * on this stream, including fractional position between ticks. */ double FractionalTicksFromDestinationTime(AudioNodeStream* aDestination, double aSeconds); /** - * Convert a time in seconds on the destination stream to nearest TrackTicks + * Convert a time in seconds on the destination stream to StreamTime * on this stream. */ - TrackTicks TicksFromDestinationTime(MediaStream* aDestination, + StreamTime TicksFromDestinationTime(MediaStream* aDestination, double aSeconds); /** * Get the destination stream time in seconds corresponding to a position on * this stream. */ double DestinationTimeFromTicks(AudioNodeStream* aDestination, - TrackTicks aPosition); + StreamTime aPosition); size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE; size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE; void SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf, AudioNodeSizes& aUsage) const; protected:
--- a/dom/media/webaudio/BiquadFilterNode.cpp +++ b/dom/media/webaudio/BiquadFilterNode.cpp @@ -180,17 +180,17 @@ public: // Adjust the number of biquads based on the number of channels mBiquads.SetLength(aInput.mChannelData.Length()); } uint32_t numberOfChannels = mBiquads.Length(); AllocateAudioBlock(numberOfChannels, aOutput); - TrackTicks pos = aStream->GetCurrentPosition(); + StreamTime pos = aStream->GetCurrentPosition(); double freq = mFrequency.GetValueAtTime(pos); double q = mQ.GetValueAtTime(pos); double gain = mGain.GetValueAtTime(pos); double detune = mDetune.GetValueAtTime(pos); for (uint32_t i = 0; i < numberOfChannels; ++i) { const float* input;
--- a/dom/media/webaudio/DelayNode.cpp +++ b/dom/media/webaudio/DelayNode.cpp @@ -131,17 +131,17 @@ public: double delayFrames = mDelay.GetValue() * sampleRate; double delayFramesClamped = std::max(minDelay, std::min(delayFrames, maxDelay)); mBuffer.Read(delayFramesClamped, aOutput, channelInterpretation); } else { // Compute the delay values for the duration of the input AudioChunk // If this DelayNode is in a cycle, make sure the delay value is at least // one block. - TrackTicks tick = mSource->GetCurrentPosition(); + StreamTime tick = mSource->GetCurrentPosition(); double computedDelay[WEBAUDIO_BLOCK_SIZE]; for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) { double delayAtTick = mDelay.GetValueAtTime(tick, counter) * sampleRate; double delayAtTickClamped = std::max(minDelay, std::min(delayAtTick, maxDelay)); computedDelay[counter] = delayAtTickClamped; } mBuffer.Read(computedDelay, aOutput, channelInterpretation);
--- a/dom/media/webaudio/DynamicsCompressorNode.cpp +++ b/dom/media/webaudio/DynamicsCompressorNode.cpp @@ -106,17 +106,17 @@ public: const uint32_t channelCount = aInput.mChannelData.Length(); if (mCompressor->numberOfChannels() != channelCount) { // Create a new compressor object with a new channel count mCompressor = new WebCore::DynamicsCompressor(aStream->SampleRate(), aInput.mChannelData.Length()); } - TrackTicks pos = aStream->GetCurrentPosition(); + StreamTime pos = aStream->GetCurrentPosition(); mCompressor->setParameterValue(DynamicsCompressor::ParamThreshold, mThreshold.GetValueAtTime(pos)); mCompressor->setParameterValue(DynamicsCompressor::ParamKnee, mKnee.GetValueAtTime(pos)); mCompressor->setParameterValue(DynamicsCompressor::ParamRatio, mRatio.GetValueAtTime(pos)); mCompressor->setParameterValue(DynamicsCompressor::ParamAttack, mAttack.GetValueAtTime(pos));
--- a/dom/media/webaudio/GainNode.cpp +++ b/dom/media/webaudio/GainNode.cpp @@ -82,17 +82,17 @@ public: // timeline at hand, and then for each channel, multiply the values // in the buffer with the gain vector. AllocateAudioBlock(aInput.mChannelData.Length(), aOutput); // Compute the gain values for the duration of the input AudioChunk // XXX we need to add a method to AudioEventTimeline to compute this buffer directly. float computedGain[WEBAUDIO_BLOCK_SIZE]; for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) { - TrackTicks tick = aStream->GetCurrentPosition(); + StreamTime tick = aStream->GetCurrentPosition(); computedGain[counter] = mGain.GetValueAtTime(tick, counter) * aInput.mVolume; } // Apply the gain to the output buffer for (size_t channel = 0; channel < aOutput->mChannelData.Length(); ++channel) { const float* inputBuffer = static_cast<const float*> (aInput.mChannelData[channel]); float* buffer = static_cast<float*> (const_cast<void*> (aOutput->mChannelData[channel]));
--- a/dom/media/webaudio/OscillatorNode.cpp +++ b/dom/media/webaudio/OscillatorNode.cpp @@ -60,17 +60,17 @@ private: class OscillatorNodeEngine : public AudioNodeEngine { public: OscillatorNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination) : AudioNodeEngine(aNode) , mSource(nullptr) , mDestination(static_cast<AudioNodeStream*> (aDestination->Stream())) , mStart(-1) - , mStop(TRACK_TICKS_MAX) + , mStop(STREAM_TIME_MAX) // Keep the default values in sync with OscillatorNode::OscillatorNode. , mFrequency(440.f) , mDetune(0.f) , mType(OscillatorType::Sine) , mPhase(0.) // mSquare, mTriangle, and mSaw are not used for default type "sine". // They are initialized if and when switching to the OscillatorTypes that // use them. @@ -111,17 +111,17 @@ public: mDetune = aValue; WebAudioUtils::ConvertAudioParamToTicks(mDetune, mSource, mDestination); break; default: NS_ERROR("Bad OscillatorNodeEngine TimelineParameter"); } } - virtual void SetStreamTimeParameter(uint32_t aIndex, TrackTicks aParam) + virtual void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) { switch (aIndex) { case START: mStart = aParam; break; case STOP: mStop = aParam; break; default: NS_ERROR("Bad OscillatorNodeEngine StreamTimeParameter"); } } @@ -202,17 +202,17 @@ public: } // Square and triangle are using a bipolar band-limited impulse train, saw is // using a normal band-limited impulse train. bool UsesBipolarBLIT() { return mType == OscillatorType::Square || mType == OscillatorType::Triangle; } - void UpdateParametersIfNeeded(TrackTicks ticks, size_t count) + void UpdateParametersIfNeeded(StreamTime ticks, size_t count) { double frequency, detune; bool simpleFrequency = mFrequency.HasSimpleValue(); bool simpleDetune = mDetune.HasSimpleValue(); // Shortcut if frequency-related AudioParam are not automated, and we // already have computed the frequency information and related parameters. @@ -244,21 +244,21 @@ public: // Even number of harmonics for bipolar blit, odd otherwise. mNumberOfHarmonics = UsesBipolarBLIT() ? 2 * floor(0.5 * mSignalPeriod) : 2 * floor(0.5 * mSignalPeriod) + 1; mPhaseIncrement = mType == OscillatorType::Sine ? 2 * M_PI / mSignalPeriod : M_PI / mSignalPeriod; mAmplitudeAtZero = mNumberOfHarmonics / mSignalPeriod; } - void FillBounds(float* output, TrackTicks ticks, + void FillBounds(float* output, StreamTime ticks, uint32_t& start, uint32_t& end) { MOZ_ASSERT(output); - static_assert(TrackTicks(WEBAUDIO_BLOCK_SIZE) < UINT_MAX, + static_assert(StreamTime(WEBAUDIO_BLOCK_SIZE) < UINT_MAX, "WEBAUDIO_BLOCK_SIZE overflows interator bounds."); start = 0; if (ticks < mStart) { start = mStart - ticks; for (uint32_t i = 0; i < start; ++i) { output[i] = 0.0; } } @@ -299,59 +299,59 @@ public: } else { blit = sin(mNumberOfHarmonics * mPhase); blit /= mSignalPeriod * denom; } return blit; } - void ComputeSine(float * aOutput, TrackTicks ticks, uint32_t aStart, uint32_t aEnd) + void ComputeSine(float * aOutput, StreamTime ticks, uint32_t aStart, uint32_t aEnd) { for (uint32_t i = aStart; i < aEnd; ++i) { UpdateParametersIfNeeded(ticks, i); aOutput[i] = sin(mPhase); IncrementPhase(); } } - void ComputeSquare(float * aOutput, TrackTicks ticks, uint32_t aStart, uint32_t aEnd) + void ComputeSquare(float * aOutput, StreamTime ticks, uint32_t aStart, uint32_t aEnd) { for (uint32_t i = aStart; i < aEnd; ++i) { UpdateParametersIfNeeded(ticks, i); // Integration to get us a square. It turns out we can have a // pure integrator here. mSquare = mSquare * sLeak + BipolarBLIT(); aOutput[i] = mSquare; // maybe we want to apply a gain, the wg has not decided yet aOutput[i] *= 1.5; IncrementPhase(); } } - void ComputeSawtooth(float * aOutput, TrackTicks ticks, uint32_t aStart, uint32_t aEnd) + void ComputeSawtooth(float * aOutput, StreamTime ticks, uint32_t aStart, uint32_t aEnd) { float dcoffset; for (uint32_t i = aStart; i < aEnd; ++i) { UpdateParametersIfNeeded(ticks, i); // DC offset so the Saw does not ramp up to infinity when integrating. dcoffset = mFinalFrequency / mSource->SampleRate(); // Integrate and offset so we get mAmplitudeAtZero sawtooth. We have a // very low frequency component somewhere here, but I'm not sure where. mSaw = mSaw * sLeak + (UnipolarBLIT() - dcoffset); // reverse the saw so we are spec compliant aOutput[i] = -mSaw * 1.5; IncrementPhase(); } } - void ComputeTriangle(float * aOutput, TrackTicks ticks, uint32_t aStart, uint32_t aEnd) + void ComputeTriangle(float * aOutput, StreamTime ticks, uint32_t aStart, uint32_t aEnd) { for (uint32_t i = aStart; i < aEnd; ++i) { UpdateParametersIfNeeded(ticks, i); // Integrate to get a square mSquare += BipolarBLIT(); // Leaky integrate to get a triangle. We get too much dc offset if we don't // leaky integrate here. // C6 = k0 / period @@ -361,17 +361,17 @@ public: // DC Block, and scale back to [-1.0; 1.0] aOutput[i] = mDCBlocker.Process(mTriangle) / (mSignalPeriod/2) * 1.5; IncrementPhase(); } } void ComputeCustom(float* aOutput, - TrackTicks ticks, + StreamTime ticks, uint32_t aStart, uint32_t aEnd) { MOZ_ASSERT(mPeriodicWave, "No custom waveform data"); uint32_t periodicWaveSize = mPeriodicWave->periodicWaveSize(); // Mask to wrap wave data indices into the range [0,periodicWaveSize). uint32_t indexMask = periodicWaveSize - 1; @@ -421,17 +421,17 @@ public: virtual void ProcessBlock(AudioNodeStream* aStream, const AudioChunk& aInput, AudioChunk* aOutput, bool* aFinished) MOZ_OVERRIDE { MOZ_ASSERT(mSource == aStream, "Invalid source stream"); - TrackTicks ticks = aStream->GetCurrentPosition(); + StreamTime ticks = aStream->GetCurrentPosition(); if (mStart == -1) { ComputeSilence(aOutput); return; } if (ticks >= mStop) { // We've finished playing. ComputeSilence(aOutput); @@ -498,18 +498,18 @@ public: virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE { return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); } DCBlocker mDCBlocker; AudioNodeStream* mSource; AudioNodeStream* mDestination; - TrackTicks mStart; - TrackTicks mStop; + StreamTime mStart; + StreamTime mStop; AudioParamTimeline mFrequency; AudioParamTimeline mDetune; OscillatorType mType; float mPhase; float mFinalFrequency; uint32_t mNumberOfHarmonics; float mSignalPeriod; float mAmplitudeAtZero;
--- a/dom/media/webaudio/ScriptProcessorNode.cpp +++ b/dom/media/webaudio/ScriptProcessorNode.cpp @@ -95,17 +95,17 @@ private: Mutex mMutex; // The list representing the queue. BufferList mBufferList; }; public: explicit SharedBuffers(float aSampleRate) : mOutputQueue("SharedBuffers::outputQueue") - , mDelaySoFar(TRACK_TICKS_MAX) + , mDelaySoFar(STREAM_TIME_MAX) , mSampleRate(aSampleRate) , mLatency(0.0) , mDroppingBuffers(false) { } size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { @@ -179,57 +179,57 @@ public: AudioChunk GetOutputBuffer() { MOZ_ASSERT(!NS_IsMainThread()); AudioChunk buffer; { MutexAutoLock lock(mOutputQueue.Lock()); if (mOutputQueue.ReadyToConsume() > 0) { - if (mDelaySoFar == TRACK_TICKS_MAX) { + if (mDelaySoFar == STREAM_TIME_MAX) { mDelaySoFar = 0; } buffer = mOutputQueue.Consume(); } else { // If we're out of buffers to consume, just output silence buffer.SetNull(WEBAUDIO_BLOCK_SIZE); - if (mDelaySoFar != TRACK_TICKS_MAX) { + if (mDelaySoFar != STREAM_TIME_MAX) { // Remember the delay that we just hit mDelaySoFar += WEBAUDIO_BLOCK_SIZE; } } } return buffer; } - TrackTicks DelaySoFar() const + StreamTime DelaySoFar() const { MOZ_ASSERT(!NS_IsMainThread()); - return mDelaySoFar == TRACK_TICKS_MAX ? 0 : mDelaySoFar; + return mDelaySoFar == STREAM_TIME_MAX ? 0 : mDelaySoFar; } void Reset() { MOZ_ASSERT(!NS_IsMainThread()); - mDelaySoFar = TRACK_TICKS_MAX; + mDelaySoFar = STREAM_TIME_MAX; mLatency = 0.0f; { MutexAutoLock lock(mOutputQueue.Lock()); mOutputQueue.Clear(); } mLastEventTime = TimeStamp(); } private: OutputQueue mOutputQueue; // How much delay we've seen so far. This measures the amount of delay // caused by the main thread lagging behind in producing output buffers. - // TRACK_TICKS_MAX means that we have not received our first buffer yet. - TrackTicks mDelaySoFar; + // STREAM_TIME_MAX means that we have not received our first buffer yet. + StreamTime mDelaySoFar; // The samplerate of the context. float mSampleRate; // This is the latency caused by the buffering. If this grows too high, we // will drop buffers until it is acceptable. float mLatency; // This is the time at which we last produced a buffer, to detect if the main // thread has been blocked. TimeStamp mLastEventTime; @@ -347,17 +347,17 @@ private: } } void SendBuffersToMainThread(AudioNodeStream* aStream) { MOZ_ASSERT(!NS_IsMainThread()); // we now have a full input buffer ready to be sent to the main thread. - TrackTicks playbackTick = mSource->GetCurrentPosition(); + StreamTime playbackTick = mSource->GetCurrentPosition(); // Add the duration of the current sample playbackTick += WEBAUDIO_BLOCK_SIZE; // Add the delay caused by the main thread playbackTick += mSharedBuffers->DelaySoFar(); // Compute the playback time in the coordinate system of the destination double playbackTime = mSource->DestinationTimeFromTicks(mDestination, playbackTick);
--- a/dom/media/webrtc/MediaEngine.h +++ b/dom/media/webrtc/MediaEngine.h @@ -112,17 +112,17 @@ public: /* tell the source if there are any direct listeners attached */ virtual void SetDirectListeners(bool) = 0; /* Called when the stream wants more data */ virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aId, StreamTime aDesiredTime, - TrackTicks &aLastEndTime) = 0; + StreamTime &aLastEndTime) = 0; /* Stop the device and release the corresponding MediaStream */ virtual nsresult Stop(SourceMediaStream *aSource, TrackID aID) = 0; /* Change device configuration. */ virtual nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn, uint32_t aAGC, bool aNoiseOn, uint32_t aNoise,
--- a/dom/media/webrtc/MediaEngineCameraVideoSource.cpp +++ b/dom/media/webrtc/MediaEngineCameraVideoSource.cpp @@ -47,17 +47,17 @@ MediaEngineCameraVideoSource::Intersect( aA.mMax = std::min(aA.mMax, aB.mMax); return true; } // guts for appending data to the MSG track bool MediaEngineCameraVideoSource::AppendToTrack(SourceMediaStream* aSource, layers::Image* aImage, TrackID aID, - TrackTicks delta) + StreamTime delta) { MOZ_ASSERT(aSource); VideoSegment segment; nsRefPtr<layers::Image> image = aImage; IntSize size(image ? mWidth : 0, image ? mHeight : 0); segment.AppendFrame(image.forget(), delta, size);
--- a/dom/media/webrtc/MediaEngineCameraVideoSource.h +++ b/dom/media/webrtc/MediaEngineCameraVideoSource.h @@ -60,17 +60,17 @@ public: protected: ~MediaEngineCameraVideoSource() {} // guts for appending data to the MSG track virtual bool AppendToTrack(SourceMediaStream* aSource, layers::Image* aImage, TrackID aID, - TrackTicks delta); + StreamTime delta); static bool IsWithin(int32_t n, const dom::ConstrainLongRange& aRange); static bool IsWithin(double n, const dom::ConstrainDoubleRange& aRange); static int32_t Clamp(int32_t n, const dom::ConstrainLongRange& aRange); static bool AreIntersecting(const dom::ConstrainLongRange& aA, const dom::ConstrainLongRange& aB); static bool Intersect(dom::ConstrainLongRange& aA, const dom::ConstrainLongRange& aB); void GuessCapability(const VideoTrackConstraintsN& aConstraints,
--- a/dom/media/webrtc/MediaEngineDefault.cpp +++ b/dom/media/webrtc/MediaEngineDefault.cpp @@ -224,28 +224,28 @@ MediaEngineDefaultVideoSource::Notify(ns return NS_OK; } void MediaEngineDefaultVideoSource::NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aID, StreamTime aDesiredTime, - TrackTicks &aLastEndTime) + StreamTime &aLastEndTime) { // AddTrack takes ownership of segment VideoSegment segment; MonitorAutoLock lock(mMonitor); if (mState != kStarted) { return; } // Note: we're not giving up mImage here nsRefPtr<layers::Image> image = mImage; - TrackTicks delta = aDesiredTime - aLastEndTime; + StreamTime delta = aDesiredTime - aLastEndTime; if (delta > 0) { // nullptr images are allowed IntSize size(image ? mOpts.mWidth : 0, image ? mOpts.mHeight : 0); segment.AppendFrame(image.forget(), delta, size); // This can fail if either a) we haven't added the track yet, or b) // we've removed or finished the track. if (aSource->AppendToTrack(aID, &segment)) {
--- a/dom/media/webrtc/MediaEngineDefault.h +++ b/dom/media/webrtc/MediaEngineDefault.h @@ -49,17 +49,17 @@ public: virtual nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn, uint32_t aAGC, bool aNoiseOn, uint32_t aNoise, int32_t aPlayoutDelay) { return NS_OK; }; virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aId, StreamTime aDesiredTime, - TrackTicks &aLastEndTime); + StreamTime &aLastEndTime); virtual bool SatisfiesConstraintSets( const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets) { return true; } virtual bool IsFake() { return true; @@ -118,17 +118,17 @@ public: virtual nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn, uint32_t aAGC, bool aNoiseOn, uint32_t aNoise, int32_t aPlayoutDelay) { return NS_OK; }; virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aId, StreamTime aDesiredTime, - TrackTicks &aLastEndTime) {} + StreamTime &aLastEndTime) {} virtual bool IsFake() { return true; } virtual const MediaSourceType GetMediaSource() { return MediaSourceType::Microphone; }
--- a/dom/media/webrtc/MediaEngineGonkVideoSource.cpp +++ b/dom/media/webrtc/MediaEngineGonkVideoSource.cpp @@ -37,28 +37,28 @@ NS_IMPL_RELEASE_INHERITED(MediaEngineGon // Called if the graph thinks it's running out of buffered video; repeat // the last frame for whatever minimum period it think it needs. Note that // this means that no *real* frame can be inserted during this period. void MediaEngineGonkVideoSource::NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource, TrackID aID, StreamTime aDesiredTime, - TrackTicks& aLastEndTime) + StreamTime& aLastEndTime) { VideoSegment segment; MonitorAutoLock lock(mMonitor); // B2G does AddTrack, but holds kStarted until the hardware changes state. // So mState could be kReleased here. We really don't care about the state, // though. // Note: we're not giving up mImage here nsRefPtr<layers::Image> image = mImage; - TrackTicks delta = aDesiredTime - aLastEndTime; + StreamTime delta = aDesiredTime - aLastEndTime; LOGFRAME(("NotifyPull, desired = %ld, delta = %ld %s", (int64_t) aDesiredTime, (int64_t) delta, image ? "" : "<null>")); // Bug 846188 We may want to limit incoming frames to the requested frame rate // mFps - if you want 30FPS, and the camera gives you 60FPS, this could // cause issues. // We may want to signal if the actual frame rate is below mMinFPS - // cameras often don't return the requested frame rate especially in low
--- a/dom/media/webrtc/MediaEngineGonkVideoSource.h +++ b/dom/media/webrtc/MediaEngineGonkVideoSource.h @@ -58,17 +58,17 @@ public: const MediaEnginePrefs &aPrefs) MOZ_OVERRIDE; virtual nsresult Deallocate() MOZ_OVERRIDE; virtual nsresult Start(SourceMediaStream* aStream, TrackID aID) MOZ_OVERRIDE; virtual nsresult Stop(SourceMediaStream* aSource, TrackID aID) MOZ_OVERRIDE; virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource, TrackID aId, StreamTime aDesiredTime, - TrackTicks& aLastEndTime) MOZ_OVERRIDE; + StreamTime &aLastEndTime) MOZ_OVERRIDE; virtual bool SatisfiesConstraintSets( const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets) { return true; } void OnHardwareStateChange(HardwareState aState); void GetRotation();
--- a/dom/media/webrtc/MediaEngineTabVideoSource.cpp +++ b/dom/media/webrtc/MediaEngineTabVideoSource.cpp @@ -188,24 +188,24 @@ MediaEngineTabVideoSource::Start(SourceM return NS_OK; } void MediaEngineTabVideoSource::NotifyPull(MediaStreamGraph*, SourceMediaStream* aSource, TrackID aID, StreamTime aDesiredTime, - TrackTicks& aLastEndTime) + StreamTime& aLastEndTime) { VideoSegment segment; MonitorAutoLock mon(mMonitor); // Note: we're not giving up mImage here nsRefPtr<layers::CairoImage> image = mImage; - TrackTicks delta = aDesiredTime - aLastEndTime; + StreamTime delta = aDesiredTime - aLastEndTime; if (delta > 0) { // nullptr images are allowed gfx::IntSize size = image ? image->GetSize() : IntSize(0, 0); segment.AppendFrame(image.forget().downcast<layers::Image>(), delta, size); // This can fail if either a) we haven't added the track yet, or b) // we've removed or finished the track. if (aSource->AppendToTrack(aID, &(segment))) { aLastEndTime = aDesiredTime;
--- a/dom/media/webrtc/MediaEngineTabVideoSource.h +++ b/dom/media/webrtc/MediaEngineTabVideoSource.h @@ -20,17 +20,17 @@ class MediaEngineTabVideoSource : public virtual void GetName(nsAString_internal&); virtual void GetUUID(nsAString_internal&); virtual nsresult Allocate(const VideoTrackConstraintsN &, const mozilla::MediaEnginePrefs&); virtual nsresult Deallocate(); virtual nsresult Start(mozilla::SourceMediaStream*, mozilla::TrackID); virtual void SetDirectListeners(bool aHasDirectListeners) {}; - virtual void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, mozilla::TrackTicks&); + virtual void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, mozilla::StreamTime&); virtual nsresult Stop(mozilla::SourceMediaStream*, mozilla::TrackID); virtual nsresult Config(bool, uint32_t, bool, uint32_t, bool, uint32_t, int32_t); virtual bool IsFake(); virtual const MediaSourceType GetMediaSource() { return MediaSourceType::Browser; } virtual bool SatisfiesConstraintSets( const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets)
--- a/dom/media/webrtc/MediaEngineWebRTC.h +++ b/dom/media/webrtc/MediaEngineWebRTC.h @@ -93,17 +93,17 @@ public: const MediaEnginePrefs& aPrefs); virtual nsresult Deallocate(); virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID); virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource, TrackID aId, StreamTime aDesiredTime, - TrackTicks& aLastEndTime); + StreamTime &aLastEndTime); virtual const MediaSourceType GetMediaSource() { return mMediaSource; } virtual nsresult TakePhoto(PhotoCallback* aCallback) { return NS_ERROR_NOT_IMPLEMENTED; } @@ -176,17 +176,17 @@ public: bool aAgcOn, uint32_t aAGC, bool aNoiseOn, uint32_t aNoise, int32_t aPlayoutDelay); virtual void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource, TrackID aId, StreamTime aDesiredTime, - TrackTicks& aLastEndTime); + StreamTime &aLastEndTime); virtual bool IsFake() { return false; } virtual const MediaSourceType GetMediaSource() { return MediaSourceType::Microphone; }
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp +++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp @@ -392,21 +392,21 @@ MediaEngineWebRTCAudioSource::Stop(Sourc return NS_OK; } void MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aID, StreamTime aDesiredTime, - TrackTicks &aLastEndTime) + StreamTime &aLastEndTime) { // Ignore - we push audio data #ifdef DEBUG - TrackTicks delta = aDesiredTime - aLastEndTime; + StreamTime delta = aDesiredTime - aLastEndTime; LOG(("Audio: NotifyPull: aDesiredTime %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) delta)); aLastEndTime = aDesiredTime; #endif } void MediaEngineWebRTCAudioSource::Init()
--- a/dom/media/webrtc/MediaEngineWebRTCVideo.cpp +++ b/dom/media/webrtc/MediaEngineWebRTCVideo.cpp @@ -120,26 +120,26 @@ MediaEngineWebRTCVideoSource::DeliverFra // Called if the graph thinks it's running out of buffered video; repeat // the last frame for whatever minimum period it think it needs. Note that // this means that no *real* frame can be inserted during this period. void MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource, TrackID aID, StreamTime aDesiredTime, - TrackTicks &aLastEndTime) + StreamTime &aLastEndTime) { VideoSegment segment; MonitorAutoLock lock(mMonitor); // B2G does AddTrack, but holds kStarted until the hardware changes state. // So mState could be kReleased here. We really don't care about the state, // though. - TrackTicks delta = aDesiredTime - aLastEndTime; + StreamTime delta = aDesiredTime - aLastEndTime; LOGFRAME(("NotifyPull, desired = %ld, delta = %ld %s", (int64_t) aDesiredTime, (int64_t) delta, mImage.get() ? "" : "<null>")); // Bug 846188 We may want to limit incoming frames to the requested frame rate // mFps - if you want 30FPS, and the camera gives you 60FPS, this could // cause issues. // We may want to signal if the actual frame rate is below mMinFPS - // cameras often don't return the requested frame rate especially in low
--- a/dom/media/webspeech/recognition/SpeechStreamListener.cpp +++ b/dom/media/webspeech/recognition/SpeechStreamListener.cpp @@ -26,17 +26,17 @@ SpeechStreamListener::~SpeechStreamListe mRecognition.swap(forgottenRecognition); NS_ProxyRelease(mainThread, static_cast<DOMEventTargetHelper*>(forgottenRecognition)); } void SpeechStreamListener::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) { AudioSegment* audio = const_cast<AudioSegment*>( static_cast<const AudioSegment*>(&aQueuedMedia)); AudioSegment::ChunkIterator iterator(*audio); while (!iterator.IsEnded()) {
--- a/dom/media/webspeech/recognition/SpeechStreamListener.h +++ b/dom/media/webspeech/recognition/SpeechStreamListener.h @@ -20,17 +20,17 @@ class SpeechRecognition; class SpeechStreamListener : public MediaStreamListener { public: explicit SpeechStreamListener(SpeechRecognition* aRecognition); ~SpeechStreamListener(); virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID, - TrackTicks aTrackOffset, + StreamTime aTrackOffset, uint32_t aTrackEvents, const MediaSegment& aQueuedMedia) MOZ_OVERRIDE; virtual void NotifyEvent(MediaStreamGraph* aGraph, MediaStreamListener::MediaStreamGraphEvent event) MOZ_OVERRIDE; private: template<typename SampleFormatType>
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp +++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp @@ -935,27 +935,27 @@ nsresult MediaPipeline::PipelineTranspor pipeline_->increment_rtcp_packets_sent(); return pipeline_->SendPacket(pipeline_->rtcp_.transport_, inner_data, out_len); } // Called if we're attached with AddDirectListener() void MediaPipelineTransmit::PipelineListener:: NotifyRealtimeData(MediaStreamGraph* graph, TrackID tid, - TrackTicks offset, + StreamTime offset, uint32_t events, const MediaSegment& media) { MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyRealtimeData()"); NewData(graph, tid, offset, events, media); } void MediaPipelineTransmit::PipelineListener:: NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid, - TrackTicks offset, + StreamTime offset, uint32_t events, const MediaSegment& queued_media) { MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyQueuedTrackChanges()"); // ignore non-direct data if we're also getting direct data if (!direct_connect_) { NewData(graph, tid, offset, events, queued_media); } @@ -967,17 +967,17 @@ NotifyQueuedTrackChanges(MediaStreamGrap #define I420SIZE(x,y) (YSIZE((x),(y)) + 2 * CRSIZE((x),(y))) // XXX NOTE: this code will have to change when we get support for multiple tracks of type // in a MediaStream and especially in a PeerConnection stream. bug 1056650 // It should be matching on the "correct" track for the pipeline, not just "any video track". void MediaPipelineTransmit::PipelineListener:: NewData(MediaStreamGraph* graph, TrackID tid, - TrackTicks offset, + StreamTime offset, uint32_t events, const MediaSegment& media) { if (!active_) { MOZ_MTLOG(ML_DEBUG, "Discarding packets because transport not ready"); return; } if (track_id_ != TRACK_INVALID) { @@ -1538,17 +1538,17 @@ void MediaPipelineReceiveVideo::Pipeline void MediaPipelineReceiveVideo::PipelineListener:: NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) { ReentrantMonitorAutoEnter enter(monitor_); #ifdef MOZILLA_INTERNAL_API nsRefPtr<layers::Image> image = image_; // our constructor sets track_rate_ to the graph rate MOZ_ASSERT(track_rate_ == source_->GraphRate()); - TrackTicks delta = desired_time - played_ticks_; + StreamTime delta = desired_time - played_ticks_; // Don't append if we've already provided a frame that supposedly // goes past the current aDesiredTime Doing so means a negative // delta and thus messes up handling of the graph if (delta > 0) { VideoSegment segment; segment.AppendFrame(image.forget(), delta, IntSize(width_, height_)); // Handle track not actually added yet or removed/finished
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h +++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h @@ -458,30 +458,30 @@ public: void SetEnabled(bool enabled) { enabled_ = enabled; } TrackID trackid() { MutexAutoLock lock(mMutex); return track_id_external_; } // Implement MediaStreamListener virtual void NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid, - TrackTicks offset, + StreamTime offset, uint32_t events, const MediaSegment& queued_media) MOZ_OVERRIDE; virtual void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime) MOZ_OVERRIDE {} // Implement MediaStreamDirectListener virtual void NotifyRealtimeData(MediaStreamGraph* graph, TrackID tid, - TrackTicks offset, + StreamTime offset, uint32_t events, const MediaSegment& media) MOZ_OVERRIDE; private: void NewData(MediaStreamGraph* graph, TrackID tid, - TrackTicks offset, + StreamTime offset, uint32_t events, const MediaSegment& media); virtual void ProcessAudioChunk(AudioSessionConduit *conduit, TrackRate rate, AudioChunk& chunk); #ifdef MOZILLA_INTERNAL_API virtual void ProcessVideoChunk(VideoSessionConduit *conduit, VideoChunk& chunk); @@ -618,17 +618,17 @@ class MediaPipelineReceiveAudio : public MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main"); if (NS_FAILED(rv)) { MOZ_CRASH(); } } // Implement MediaStreamListener virtual void NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid, - TrackTicks offset, + StreamTime offset, uint32_t events, const MediaSegment& queued_media) MOZ_OVERRIDE {} virtual void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) MOZ_OVERRIDE; private: RefPtr<MediaSessionConduit> conduit_; }; @@ -707,17 +707,17 @@ class MediaPipelineReceiveVideo : public // Separate class to allow ref counting class PipelineListener : public GenericReceiveListener { public: PipelineListener(SourceMediaStream * source, TrackID track_id); // Implement MediaStreamListener virtual void NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid, - TrackTicks offset, + StreamTime offset, uint32_t events, const MediaSegment& queued_media) MOZ_OVERRIDE {} virtual void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) MOZ_OVERRIDE; // Accessors for external writes from the renderer void FrameSizeChange(unsigned int width, unsigned int height, unsigned int number_of_streams) {
--- a/media/webrtc/signaling/test/FakeMediaStreams.h +++ b/media/webrtc/signaling/test/FakeMediaStreams.h @@ -34,32 +34,32 @@ class Fake_SourceMediaStream; class Fake_MediaStreamListener { protected: virtual ~Fake_MediaStreamListener() {} public: virtual void NotifyQueuedTrackChanges(mozilla::MediaStreamGraph* aGraph, mozilla::TrackID aID, - mozilla::TrackTicks aTrackOffset, + mozilla::StreamTime aTrackOffset, uint32_t aTrackEvents, const mozilla::MediaSegment& aQueuedMedia) = 0; virtual void NotifyPull(mozilla::MediaStreamGraph* aGraph, mozilla::StreamTime aDesiredTime) = 0; NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Fake_MediaStreamListener) }; class Fake_MediaStreamDirectListener : public Fake_MediaStreamListener { protected: virtual ~Fake_MediaStreamDirectListener() {} public: virtual void NotifyRealtimeData(mozilla::MediaStreamGraph* graph, mozilla::TrackID tid, - mozilla::TrackTicks offset, + mozilla::StreamTime offset, uint32_t events, const mozilla::MediaSegment& media) = 0; }; // Note: only one listener supported class Fake_MediaStream { protected: virtual ~Fake_MediaStream() { Stop(); } @@ -123,21 +123,21 @@ protected: class Fake_SourceMediaStream : public Fake_MediaStream { public: Fake_SourceMediaStream() : mSegmentsAdded(0), mDesiredTime(0), mPullEnabled(false), mStop(false), mPeriodic(new Fake_MediaPeriodic(this)) {} - void AddTrack(mozilla::TrackID aID, mozilla::TrackTicks aStart, + void AddTrack(mozilla::TrackID aID, mozilla::StreamTime aStart, mozilla::MediaSegment* aSegment) { delete aSegment; } - void AddAudioTrack(mozilla::TrackID aID, mozilla::TrackRate aRate, mozilla::TrackTicks aStart, + void AddAudioTrack(mozilla::TrackID aID, mozilla::TrackRate aRate, mozilla::StreamTime aStart, mozilla::AudioSegment* aSegment) { delete aSegment; } void EndTrack(mozilla::TrackID aID) {} bool AppendToTrack(mozilla::TrackID aID, mozilla::MediaSegment* aSegment, mozilla::MediaSegment *aRawSegment) { return AppendToTrack(aID, aSegment);