Bug 1061046. Part 11: Remove callback rate parameters. r=karlt
authorRobert O'Callahan <robert@ocallahan.org>
Thu, 18 Sep 2014 11:51:13 +1200
changeset 216600 ed7ac92094e941ff443116cf739e8c1e8130c674
parent 216599 596a7b88afb763e6f2b584b9db09c45706d48d0a
child 216601 3565d8ea6d61bc53b3ee0b65940068adb40d6c2a
push idunknown
push userunknown
push dateunknown
reviewerskarlt
bugs1061046
milestone36.0a1
Bug 1061046. Part 11: Remove callback rate parameters. r=karlt
dom/media/DOMMediaStream.cpp
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/TrackUnionStream.cpp
dom/media/encoder/MediaEncoder.cpp
dom/media/encoder/MediaEncoder.h
dom/media/encoder/TrackEncoder.cpp
dom/media/encoder/TrackEncoder.h
dom/media/gtest/TestVideoTrackEncoder.cpp
dom/media/gtest/TestVorbisTrackEncoder.cpp
dom/media/imagecapture/CaptureTask.cpp
dom/media/imagecapture/CaptureTask.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webspeech/recognition/SpeechStreamListener.cpp
dom/media/webspeech/recognition/SpeechStreamListener.h
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
media/webrtc/signaling/test/FakeMediaStreams.h
media/webrtc/signaling/test/FakeMediaStreamsImpl.h
--- a/dom/media/DOMMediaStream.cpp
+++ b/dom/media/DOMMediaStream.cpp
@@ -80,17 +80,16 @@ public:
   /**
    * Notify that changes to one of the stream tracks have been queued.
    * aTrackEvents can be any combination of TRACK_EVENT_CREATED and
    * TRACK_EVENT_ENDED. aQueuedMedia is the data being added to the track
    * at aTrackOffset (relative to the start of the stream).
    * aQueuedMedia can be null if there is no output.
    */
   virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
-                                        TrackRate aTrackRate,
                                         TrackTicks aTrackOffset,
                                         uint32_t aTrackEvents,
                                         const MediaSegment& aQueuedMedia) MOZ_OVERRIDE
   {
     if (aTrackEvents & (TRACK_EVENT_CREATED | TRACK_EVENT_ENDED)) {
       nsRefPtr<TrackChange> runnable =
         new TrackChange(this, aID, aTrackOffset, aTrackEvents,
                         aQueuedMedia.GetType());
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -192,17 +192,17 @@ MediaStreamGraphImpl::ExtractPendingInpu
     finished = aStream->mUpdateFinished;
     for (int32_t i = aStream->mUpdateTracks.Length() - 1; i >= 0; --i) {
       SourceMediaStream::TrackData* data = &aStream->mUpdateTracks[i];
       aStream->ApplyTrackDisabling(data->mID, data->mData);
       for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
         MediaStreamListener* l = aStream->mListeners[j];
         TrackTicks offset = (data->mCommands & SourceMediaStream::TRACK_CREATE)
             ? data->mStart : aStream->mBuffer.FindTrack(data->mID)->GetSegment()->GetDuration();
-        l->NotifyQueuedTrackChanges(this, data->mID, mSampleRate,
+        l->NotifyQueuedTrackChanges(this, data->mID,
                                     offset, data->mCommands, *data->mData);
       }
       if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
         MediaSegment* segment = data->mData.forget();
         STREAM_LOG(PR_LOG_DEBUG, ("SourceMediaStream %p creating track %d, start %lld, initial end %lld",
                                   aStream, data->mID, int64_t(data->mStart),
                                   int64_t(segment->GetDuration())));
 
@@ -1934,18 +1934,17 @@ MediaStream::GetProcessingGraphUpdateInd
 StreamBuffer::Track*
 MediaStream::EnsureTrack(TrackID aTrackId, TrackRate aSampleRate)
 {
   StreamBuffer::Track* track = mBuffer.FindTrack(aTrackId);
   if (!track) {
     nsAutoPtr<MediaSegment> segment(new AudioSegment());
     for (uint32_t j = 0; j < mListeners.Length(); ++j) {
       MediaStreamListener* l = mListeners[j];
-      l->NotifyQueuedTrackChanges(Graph(), aTrackId,
-                                  GraphImpl()->GraphRate(), 0,
+      l->NotifyQueuedTrackChanges(Graph(), aTrackId, 0,
                                   MediaStreamListener::TRACK_EVENT_CREATED,
                                   *segment);
     }
     track = &mBuffer.AddTrack(aTrackId, aSampleRate, 0, segment.forget());
   }
   return track;
 }
 
@@ -2368,17 +2367,16 @@ SourceMediaStream::NotifyDirectConsumers
 {
   // Call with mMutex locked
   MOZ_ASSERT(aTrack);
 
   for (uint32_t j = 0; j < mDirectListeners.Length(); ++j) {
     MediaStreamDirectListener* l = mDirectListeners[j];
     TrackTicks offset = 0; // FIX! need a separate TrackTicks.... or the end of the internal buffer
     l->NotifyRealtimeData(static_cast<MediaStreamGraph*>(GraphImpl()), aTrack->mID,
-                          GraphRate(),
                           offset, aTrack->mCommands, *aSegment);
   }
 }
 
 // These handle notifying all the listeners of an event
 void
 SourceMediaStream::NotifyListenersEventImpl(MediaStreamListener::MediaStreamGraphEvent aEvent)
 {
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -169,17 +169,16 @@ public:
   };
   /**
    * Notify that changes to one of the stream tracks have been queued.
    * aTrackEvents can be any combination of TRACK_EVENT_CREATED and
    * TRACK_EVENT_ENDED. aQueuedMedia is the data being added to the track
    * at aTrackOffset (relative to the start of the stream).
    */
   virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
-                                        TrackRate aTrackRate,
                                         TrackTicks aTrackOffset,
                                         uint32_t aTrackEvents,
                                         const MediaSegment& aQueuedMedia) {}
 };
 
 /**
  * This is a base class for media graph thread listener direct callbacks
  * from within AppendToTrack().  Note that your regular listener will
@@ -193,17 +192,16 @@ public:
 
   /*
    * This will be called on any MediaStreamDirectListener added to a
    * a SourceMediaStream when AppendToTrack() is called.  The MediaSegment
    * will be the RawSegment (unresampled) if available in AppendToTrack().
    * Note that NotifyQueuedTrackChanges() calls will also still occur.
    */
   virtual void NotifyRealtimeData(MediaStreamGraph* aGraph, TrackID aID,
-                                  TrackRate aTrackRate,
                                   TrackTicks aTrackOffset,
                                   uint32_t aTrackEvents,
                                   const MediaSegment& aMedia) {}
 };
 
 /**
  * This is a base class for main-thread listener callbacks.
  * This callback is invoked on the main thread when the main-thread-visible
--- a/dom/media/TrackUnionStream.cpp
+++ b/dom/media/TrackUnionStream.cpp
@@ -193,17 +193,17 @@ TrackUnionStream::TrackUnionStream(DOMMe
     // little later than the true time. This means we'll have enough
     // samples in our input stream to go just beyond the destination time.
     TrackTicks outputStart = TimeToTicksRoundUp(rate, GraphTimeToStreamTime(aFrom));
 
     nsAutoPtr<MediaSegment> segment;
     segment = aTrack->GetSegment()->CreateEmptyClone();
     for (uint32_t j = 0; j < mListeners.Length(); ++j) {
       MediaStreamListener* l = mListeners[j];
-      l->NotifyQueuedTrackChanges(Graph(), id, rate, outputStart,
+      l->NotifyQueuedTrackChanges(Graph(), id, outputStart,
                                   MediaStreamListener::TRACK_EVENT_CREATED,
                                   *segment);
     }
     segment->AppendNullData(outputStart);
     StreamBuffer::Track* track =
       &mBuffer.AddTrack(id, rate, outputStart, segment.forget());
     STREAM_LOG(PR_LOG_DEBUG, ("TrackUnionStream %p adding track %d for input stream %p track %d, start ticks %lld",
                               this, id, aPort->GetSource(), aTrack->GetID(),
@@ -214,29 +214,27 @@ TrackUnionStream::TrackUnionStream(DOMMe
     map->mEndOfLastInputIntervalInInputStream = -1;
     map->mEndOfLastInputIntervalInOutputStream = -1;
     map->mInputPort = aPort;
     map->mInputTrackID = aTrack->GetID();
     map->mOutputTrackID = track->GetID();
     map->mSegment = aTrack->GetSegment()->CreateEmptyClone();
     return mTrackMap.Length() - 1;
   }
-
   void TrackUnionStream::EndTrack(uint32_t aIndex)
   {
     StreamBuffer::Track* outputTrack = mBuffer.FindTrack(mTrackMap[aIndex].mOutputTrackID);
     if (!outputTrack || outputTrack->IsEnded())
       return;
     for (uint32_t j = 0; j < mListeners.Length(); ++j) {
       MediaStreamListener* l = mListeners[j];
       TrackTicks offset = outputTrack->GetSegment()->GetDuration();
       nsAutoPtr<MediaSegment> segment;
       segment = outputTrack->GetSegment()->CreateEmptyClone();
-      l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
-                                  outputTrack->GetRate(), offset,
+      l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(), offset,
                                   MediaStreamListener::TRACK_EVENT_ENDED,
                                   *segment);
     }
     outputTrack->SetEnded();
   }
 
   void TrackUnionStream::CopyTrackData(StreamBuffer::Track* aInputTrack,
                      uint32_t aMapIndex, GraphTime aFrom, GraphTime aTo,
@@ -251,20 +249,21 @@ TrackUnionStream::TrackUnionStream(DOMMe
     MediaStream* source = map->mInputPort->GetSource();
 
     GraphTime next;
     *aOutputTrackFinished = false;
     for (GraphTime t = aFrom; t < aTo; t = next) {
       MediaInputPort::InputInterval interval = map->mInputPort->GetNextInputInterval(t);
       interval.mEnd = std::min(interval.mEnd, aTo);
       StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
-      TrackTicks inputTrackEndPoint = aInputTrack->GetEnd();
+      TrackTicks inputTrackEndPoint = TRACK_TICKS_MAX;
 
       if (aInputTrack->IsEnded() &&
           aInputTrack->GetEndTimeRoundDown() <= inputEnd) {
+        inputTrackEndPoint = aInputTrack->GetEnd();
         *aOutputTrackFinished = true;
       }
 
       if (interval.mStart >= interval.mEnd) {
         break;
       }
       next = interval.mEnd;
 
@@ -336,50 +335,38 @@ TrackUnionStream::TrackUnionStream(DOMMe
         //   <= TimeToTicksRoundDown(rate, originalInputStart) - 1 + TimeToTicksRoundDown(rate, inputEnd) - TimeToTicksRoundDown(rate, originalInputStart) + 1
         //   = TimeToTicksRoundDown(rate, inputEnd)
         //   <= inputEnd/rate
         // (now using the fact that inputEnd <= track->GetEndTimeRoundDown() for a non-ended track)
         //   <= TicksToTimeRoundDown(rate, aInputTrack->GetSegment()->GetDuration())/rate
         //   <= rate*aInputTrack->GetSegment()->GetDuration()/rate
         //   = aInputTrack->GetSegment()->GetDuration()
         // as required.
-        // Note that while the above proof appears to be generally right, if we are suffering
-        // from a lot of underrun, then in rare cases inputStartTicks >> inputTrackEndPoint.
-        // As such, we still need to verify the sanity of property #2 and use null data as
-        // appropriate.
 
         if (inputStartTicks < 0) {
           // Data before the start of the track is just null.
           // We have to add a small amount of delay to ensure that there is
           // always a sample available if we see an interval that contains a
           // tick boundary on the output stream's timeline but does not contain
           // a tick boundary on the input stream's timeline. 1 tick delay is
           // necessary and sufficient.
           segment->AppendNullData(-inputStartTicks);
           inputStartTicks = 0;
         }
         if (inputEndTicks > inputStartTicks) {
-          if (inputEndTicks <= inputTrackEndPoint) {
-            segment->AppendSlice(*aInputTrack->GetSegment(), inputStartTicks, inputEndTicks);
-            STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of input data to track %d",
-                       this, ticks, outputTrack->GetID()));
-          } else {
-            if (inputStartTicks < inputTrackEndPoint) {
-              segment->AppendSlice(*aInputTrack->GetSegment(), inputStartTicks, inputTrackEndPoint);
-              ticks -= inputTrackEndPoint - inputStartTicks;
-            }
-            segment->AppendNullData(ticks);
-            STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of input data and %lld of null data to track %d",
-                       this, inputTrackEndPoint - inputStartTicks, ticks, outputTrack->GetID()));
-          }
+          segment->AppendSlice(*aInputTrack->GetSegment(),
+                               std::min(inputTrackEndPoint, inputStartTicks),
+                               std::min(inputTrackEndPoint, inputEndTicks));
         }
+        STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of input data to track %d",
+                   this, (long long)(std::min(inputTrackEndPoint, inputEndTicks) - std::min(inputTrackEndPoint, inputStartTicks)),
+                   outputTrack->GetID()));
       }
       ApplyTrackDisabling(outputTrack->GetID(), segment);
       for (uint32_t j = 0; j < mListeners.Length(); ++j) {
         MediaStreamListener* l = mListeners[j];
         l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
-                                    outputTrack->GetRate(), startTicks, 0,
-                                    *segment);
+                                    startTicks, 0, *segment);
       }
       outputTrack->GetSegment()->AppendFrom(segment);
     }
   }
 }
--- a/dom/media/encoder/MediaEncoder.cpp
+++ b/dom/media/encoder/MediaEncoder.cpp
@@ -41,30 +41,29 @@ PRLogModuleInfo* gMediaEncoderLog;
 #define LOG(type, msg)
 #endif
 
 namespace mozilla {
 
 void
 MediaEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
                                        TrackID aID,
-                                       TrackRate aTrackRate,
                                        TrackTicks aTrackOffset,
                                        uint32_t aTrackEvents,
                                        const MediaSegment& aQueuedMedia)
 {
   // Process the incoming raw track data from MediaStreamGraph, called on the
   // thread of MediaStreamGraph.
   if (mAudioEncoder && aQueuedMedia.GetType() == MediaSegment::AUDIO) {
-    mAudioEncoder->NotifyQueuedTrackChanges(aGraph, aID, aTrackRate,
+    mAudioEncoder->NotifyQueuedTrackChanges(aGraph, aID,
                                             aTrackOffset, aTrackEvents,
                                             aQueuedMedia);
 
   } else if (mVideoEncoder && aQueuedMedia.GetType() == MediaSegment::VIDEO) {
-      mVideoEncoder->NotifyQueuedTrackChanges(aGraph, aID, aTrackRate,
+      mVideoEncoder->NotifyQueuedTrackChanges(aGraph, aID,
                                               aTrackOffset, aTrackEvents,
                                               aQueuedMedia);
   }
 }
 
 void
 MediaEncoder::NotifyEvent(MediaStreamGraph* aGraph,
                           MediaStreamListener::MediaStreamGraphEvent event)
--- a/dom/media/encoder/MediaEncoder.h
+++ b/dom/media/encoder/MediaEncoder.h
@@ -75,17 +75,16 @@ public :
 
   ~MediaEncoder() {};
 
   /**
    * Notified by the control loop of MediaStreamGraph; aQueueMedia is the raw
    * track data in form of MediaSegment.
    */
   virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
-                                        TrackRate aTrackRate,
                                         TrackTicks aTrackOffset,
                                         uint32_t aTrackEvents,
                                         const MediaSegment& aQueuedMedia) MOZ_OVERRIDE;
 
   /**
    * Notified the stream is being removed.
    */
   virtual void NotifyEvent(MediaStreamGraph* aGraph,
--- a/dom/media/encoder/TrackEncoder.cpp
+++ b/dom/media/encoder/TrackEncoder.cpp
@@ -48,17 +48,16 @@ TrackEncoder::TrackEncoder()
     gTrackEncoderLog = PR_NewLogModule("TrackEncoder");
   }
 #endif
 }
 
 void
 AudioTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
                                             TrackID aID,
-                                            TrackRate aTrackRate,
                                             TrackTicks aTrackOffset,
                                             uint32_t aTrackEvents,
                                             const MediaSegment& aQueuedMedia)
 {
   if (mCanceled) {
     return;
   }
 
@@ -72,17 +71,17 @@ AudioTrackEncoder::NotifyQueuedTrackChan
 #endif
     AudioSegment::ChunkIterator iter(const_cast<AudioSegment&>(audio));
     while (!iter.IsEnded()) {
       AudioChunk chunk = *iter;
 
       // The number of channels is determined by the first non-null chunk, and
       // thus the audio encoder is initialized at this time.
       if (!chunk.IsNull()) {
-        nsresult rv = Init(chunk.mChannelData.Length(), aTrackRate);
+        nsresult rv = Init(chunk.mChannelData.Length(), aGraph->GraphRate());
         if (NS_FAILED(rv)) {
           LOG("[AudioTrackEncoder]: Fail to initialize the encoder!");
           NotifyCancel();
         }
         break;
       }
 
       iter.Next();
@@ -177,17 +176,16 @@ size_t
 AudioTrackEncoder::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
 {
   return mRawSegment.SizeOfExcludingThis(aMallocSizeOf);
 }
 
 void
 VideoTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
                                             TrackID aID,
-                                            TrackRate aTrackRate,
                                             TrackTicks aTrackOffset,
                                             uint32_t aTrackEvents,
                                             const MediaSegment& aQueuedMedia)
 {
   if (mCanceled) {
     return;
   }
 
@@ -202,17 +200,17 @@ VideoTrackEncoder::NotifyQueuedTrackChan
     VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(video));
     while (!iter.IsEnded()) {
       VideoChunk chunk = *iter;
       if (!chunk.IsNull()) {
         gfx::IntSize imgsize = chunk.mFrame.GetImage()->GetSize();
         gfxIntSize intrinsicSize = chunk.mFrame.GetIntrinsicSize();
         nsresult rv = Init(imgsize.width, imgsize.height,
                            intrinsicSize.width, intrinsicSize.height,
-                           aTrackRate);
+                           aGraph->GraphRate());
         if (NS_FAILED(rv)) {
           LOG("[VideoTrackEncoder]: Fail to initialize the encoder!");
           NotifyCancel();
         }
         break;
       }
 
       iter.Next();
--- a/dom/media/encoder/TrackEncoder.h
+++ b/dom/media/encoder/TrackEncoder.h
@@ -34,17 +34,16 @@ public:
 
   virtual ~TrackEncoder() {}
 
   /**
    * Notified by the same callbcak of MediaEncoder when it has received a track
    * change from MediaStreamGraph. Called on the MediaStreamGraph thread.
    */
   virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
-                                        TrackRate aTrackRate,
                                         TrackTicks aTrackOffset,
                                         uint32_t aTrackEvents,
                                         const MediaSegment& aQueuedMedia) = 0;
 
   /**
    * Notified by the same callback of MediaEncoder when it has been removed from
    * MediaStreamGraph. Called on the MediaStreamGraph thread.
    */
@@ -141,21 +140,20 @@ class AudioTrackEncoder : public TrackEn
 {
 public:
   AudioTrackEncoder()
     : TrackEncoder()
     , mChannels(0)
     , mSamplingRate(0)
   {}
 
-  void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
-                                TrackRate aTrackRate,
-                                TrackTicks aTrackOffset,
-                                uint32_t aTrackEvents,
-                                const MediaSegment& aQueuedMedia) MOZ_OVERRIDE;
+  virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
+                                        TrackTicks aTrackOffset,
+                                        uint32_t aTrackEvents,
+                                        const MediaSegment& aQueuedMedia) MOZ_OVERRIDE;
 
   /**
    * Interleaves the track data and stores the result into aOutput. Might need
    * to up-mix or down-mix the channel data if the channels number of this chunk
    * is different from aOutputChannels. The channel data from aChunk might be
    * modified by up-mixing.
    */
   static void InterleaveTrackData(AudioChunk& aChunk, int32_t aDuration,
@@ -232,24 +230,23 @@ public:
     , mFrameHeight(0)
     , mDisplayWidth(0)
     , mDisplayHeight(0)
     , mTrackRate(0)
     , mTotalFrameDuration(0)
   {}
 
   /**
-   * Notified by the same callbcak of MediaEncoder when it has received a track
+   * Notified by the same callback of MediaEncoder when it has received a track
    * change from MediaStreamGraph. Called on the MediaStreamGraph thread.
    */
-  void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
-                                TrackRate aTrackRate,
-                                TrackTicks aTrackOffset,
-                                uint32_t aTrackEvents,
-                                const MediaSegment& aQueuedMedia) MOZ_OVERRIDE;
+  virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
+                                        TrackTicks aTrackOffset,
+                                        uint32_t aTrackEvents,
+                                        const MediaSegment& aQueuedMedia) MOZ_OVERRIDE;
   /**
   * Measure size of mRawSegment
   */
   size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
 protected:
   /**
    * Initialized the video encoder. In order to collect the value of width and
--- a/dom/media/gtest/TestVideoTrackEncoder.cpp
+++ b/dom/media/gtest/TestVideoTrackEncoder.cpp
@@ -267,17 +267,17 @@ TEST(VP8VideoTrackEncoder, FrameEncode)
   VideoSegment segment;
   for (nsTArray<nsRefPtr<Image>>::size_type i = 0; i < images.Length(); i++)
   {
     nsRefPtr<Image> image = images[i];
     segment.AppendFrame(image.forget(), mozilla::TrackTicks(90000), generator.GetSize());
   }
 
   // track change notification.
-  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, 0, segment);
+  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, segment);
 
   // Pull Encoded Data back from encoder.
   EncodedFrameContainer container;
   EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
 }
 #endif // _MSC_VER
 
 // EOS test
@@ -285,16 +285,16 @@ TEST(VP8VideoTrackEncoder, EncodeComplet
 {
   // Initiate VP8 encoder
   TestVP8TrackEncoder encoder;
   InitParam param = {true, 640, 480, 90000};
   encoder.TestInit(param);
 
   // track end notification.
   VideoSegment segment;
-  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, MediaStreamListener::TRACK_EVENT_ENDED, segment);
+  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, MediaStreamListener::TRACK_EVENT_ENDED, segment);
 
   // Pull Encoded Data back from encoder. Since we have sent
   // EOS to encoder, encoder.GetEncodedTrack should return
   // NS_OK immidiately.
   EncodedFrameContainer container;
   EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
 }
--- a/dom/media/gtest/TestVorbisTrackEncoder.cpp
+++ b/dom/media/gtest/TestVorbisTrackEncoder.cpp
@@ -184,17 +184,17 @@ TEST(VorbisTrackEncoder, EncodedFrame)
     data[i] = ((i%8)*4000) - (7*4000)/2;
   }
   nsAutoTArray<const AudioDataValue*,1> channelData;
   channelData.AppendElement(data);
   AudioSegment segment;
   segment.AppendFrames(samples.forget(), channelData, 44100);
 
   // Track change notification.
-  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, 0, segment);
+  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0, segment);
 
   // Pull Encoded data back from encoder and verify encoded samples.
   EncodedFrameContainer container;
   EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
   // Should have some encoded data.
   EXPECT_TRUE(container.GetEncodedFrames().Length() > 0);
   EXPECT_TRUE(container.GetEncodedFrames().ElementAt(0)->GetFrameData().Length()
               > 0);
@@ -211,17 +211,17 @@ TEST(VorbisTrackEncoder, EncodeComplete)
   // Initiate vorbis encoder
   TestVorbisTrackEncoder encoder;
   int channels = 1;
   int rate = 44100;
   encoder.TestVorbisCreation(channels, rate);
 
   // Track end notification.
   AudioSegment segment;
-  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0, 0,
+  encoder.NotifyQueuedTrackChanges(nullptr, 0, 0,
                                    MediaStreamListener::TRACK_EVENT_ENDED,
                                    segment);
 
   // Pull Encoded Data back from encoder. Since we had send out
   // EOS to encoder, encoder.GetEncodedTrack should return
   // NS_OK immidiately.
   EncodedFrameContainer container;
   EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
--- a/dom/media/imagecapture/CaptureTask.cpp
+++ b/dom/media/imagecapture/CaptureTask.cpp
@@ -79,17 +79,16 @@ void
 CaptureTask::PrincipalChanged(DOMMediaStream* aMediaStream)
 {
   MOZ_ASSERT(NS_IsMainThread());
   mPrincipalChanged = true;
 }
 
 void
 CaptureTask::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
-                                      TrackRate aTrackRate,
                                       TrackTicks aTrackOffset,
                                       uint32_t aTrackEvents,
                                       const MediaSegment& aQueuedMedia)
 {
   if (mImageGrabbedOrTrackEnd) {
     return;
   }
 
--- a/dom/media/imagecapture/CaptureTask.h
+++ b/dom/media/imagecapture/CaptureTask.h
@@ -28,17 +28,16 @@ class ImageCapture;
  * released during the period of the capturing process described above.
  */
 class CaptureTask : public MediaStreamListener,
                     public DOMMediaStream::PrincipalChangeObserver
 {
 public:
   // MediaStreamListener methods.
   virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
-                                        TrackRate aTrackRate,
                                         TrackTicks aTrackOffset,
                                         uint32_t aTrackEvents,
                                         const MediaSegment& aQueuedMedia) MOZ_OVERRIDE;
 
   virtual void NotifyEvent(MediaStreamGraph* aGraph,
                            MediaStreamGraphEvent aEvent) MOZ_OVERRIDE;
 
   // DOMMediaStream::PrincipalChangeObserver method.
--- a/dom/media/webaudio/AudioNodeStream.cpp
+++ b/dom/media/webaudio/AudioNodeStream.cpp
@@ -547,18 +547,17 @@ AudioNodeStream::AdvanceOutputSegment()
   }
 
   for (uint32_t j = 0; j < mListeners.Length(); ++j) {
     MediaStreamListener* l = mListeners[j];
     AudioChunk copyChunk = mLastChunks[0];
     AudioSegment tmpSegment;
     tmpSegment.AppendAndConsumeChunk(&copyChunk);
     l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
-                                mSampleRate, segment->GetDuration(), 0,
-                                tmpSegment);
+                                segment->GetDuration(), 0, tmpSegment);
   }
 }
 
 TrackTicks
 AudioNodeStream::GetCurrentPosition()
 {
   NS_ASSERTION(!mFinished, "Don't create another track after finishing");
   return EnsureTrack(AUDIO_TRACK, mSampleRate)->Get<AudioSegment>()->GetDuration();
@@ -574,17 +573,16 @@ AudioNodeStream::FinishOutput()
   StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK, mSampleRate);
   track->SetEnded();
   FinishOnGraphThread();
 
   for (uint32_t j = 0; j < mListeners.Length(); ++j) {
     MediaStreamListener* l = mListeners[j];
     AudioSegment emptySegment;
     l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
-                                mSampleRate,
                                 track->GetSegment()->GetDuration(),
                                 MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
   }
 }
 
 double
 AudioNodeStream::FractionalTicksFromDestinationTime(AudioNodeStream* aDestination,
                                                     double aSeconds)
--- a/dom/media/webspeech/recognition/SpeechStreamListener.cpp
+++ b/dom/media/webspeech/recognition/SpeechStreamListener.cpp
@@ -26,17 +26,16 @@ SpeechStreamListener::~SpeechStreamListe
   mRecognition.swap(forgottenRecognition);
   NS_ProxyRelease(mainThread,
                   static_cast<DOMEventTargetHelper*>(forgottenRecognition));
 }
 
 void
 SpeechStreamListener::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
                                                TrackID aID,
-                                               TrackRate aTrackRate,
                                                TrackTicks aTrackOffset,
                                                uint32_t aTrackEvents,
                                                const MediaSegment& aQueuedMedia)
 {
   AudioSegment* audio = const_cast<AudioSegment*>(
     static_cast<const AudioSegment*>(&aQueuedMedia));
 
   AudioSegment::ChunkIterator iterator(*audio);
@@ -45,30 +44,31 @@ SpeechStreamListener::NotifyQueuedTrackC
     if (iterator->GetDuration() > INT_MAX) {
       continue;
     }
     int duration = int(iterator->GetDuration());
 
     if (iterator->IsNull()) {
       nsTArray<int16_t> nullData;
       PodZero(nullData.AppendElements(duration), duration);
-      ConvertAndDispatchAudioChunk(duration, iterator->mVolume, nullData.Elements(), aTrackRate);
+      ConvertAndDispatchAudioChunk(duration, iterator->mVolume,
+                                   nullData.Elements(), aGraph->GraphRate());
     } else {
       AudioSampleFormat format = iterator->mBufferFormat;
 
       MOZ_ASSERT(format == AUDIO_FORMAT_S16 || format == AUDIO_FORMAT_FLOAT32);
 
       if (format == AUDIO_FORMAT_S16) {
         ConvertAndDispatchAudioChunk(duration,iterator->mVolume,
                                      static_cast<const int16_t*>(iterator->mChannelData[0]),
-                                     aTrackRate);
+                                     aGraph->GraphRate());
       } else if (format == AUDIO_FORMAT_FLOAT32) {
         ConvertAndDispatchAudioChunk(duration,iterator->mVolume,
                                      static_cast<const float*>(iterator->mChannelData[0]),
-                                     aTrackRate);
+                                     aGraph->GraphRate());
       }
     }
 
     iterator.Next();
   }
 }
 
 template<typename SampleFormatType> void
--- a/dom/media/webspeech/recognition/SpeechStreamListener.h
+++ b/dom/media/webspeech/recognition/SpeechStreamListener.h
@@ -19,24 +19,23 @@ namespace dom {
 class SpeechRecognition;
 
 class SpeechStreamListener : public MediaStreamListener
 {
 public:
   explicit SpeechStreamListener(SpeechRecognition* aRecognition);
   ~SpeechStreamListener();
 
-  void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
-                                TrackRate aTrackRate,
-                                TrackTicks aTrackOffset,
-                                uint32_t aTrackEvents,
-                                const MediaSegment& aQueuedMedia) MOZ_OVERRIDE;
+  virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
+                                        TrackTicks aTrackOffset,
+                                        uint32_t aTrackEvents,
+                                        const MediaSegment& aQueuedMedia) MOZ_OVERRIDE;
 
-  void NotifyEvent(MediaStreamGraph* aGraph,
-                   MediaStreamListener::MediaStreamGraphEvent event) MOZ_OVERRIDE;
+  virtual void NotifyEvent(MediaStreamGraph* aGraph,
+                           MediaStreamListener::MediaStreamGraphEvent event) MOZ_OVERRIDE;
 
 private:
   template<typename SampleFormatType>
   void ConvertAndDispatchAudioChunk(int aDuration, float aVolume, SampleFormatType* aData, TrackRate aTrackRate);
   nsRefPtr<SpeechRecognition> mRecognition;
 };
 
 } // namespace dom
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -935,51 +935,48 @@ nsresult MediaPipeline::PipelineTranspor
   pipeline_->increment_rtcp_packets_sent();
   return pipeline_->SendPacket(pipeline_->rtcp_.transport_, inner_data,
                                out_len);
 }
 
 // Called if we're attached with AddDirectListener()
 void MediaPipelineTransmit::PipelineListener::
 NotifyRealtimeData(MediaStreamGraph* graph, TrackID tid,
-                   TrackRate rate,
                    TrackTicks offset,
                    uint32_t events,
                    const MediaSegment& media) {
   MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyRealtimeData()");
 
-  NewData(graph, tid, rate, offset, events, media);
+  NewData(graph, tid, offset, events, media);
 }
 
 void MediaPipelineTransmit::PipelineListener::
 NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid,
-                         TrackRate rate,
                          TrackTicks offset,
                          uint32_t events,
                          const MediaSegment& queued_media) {
   MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyQueuedTrackChanges()");
 
   // ignore non-direct data if we're also getting direct data
   if (!direct_connect_) {
-    NewData(graph, tid, rate, offset, events, queued_media);
+    NewData(graph, tid, offset, events, queued_media);
   }
 }
 
 // I420 buffer size macros
 #define YSIZE(x,y) ((x)*(y))
 #define CRSIZE(x,y) ((((x)+1) >> 1) * (((y)+1) >> 1))
 #define I420SIZE(x,y) (YSIZE((x),(y)) + 2 * CRSIZE((x),(y)))
 
 // XXX NOTE: this code will have to change when we get support for multiple tracks of type
 // in a MediaStream and especially in a PeerConnection stream.  bug 1056650
 // It should be matching on the "correct" track for the pipeline, not just "any video track".
 
 void MediaPipelineTransmit::PipelineListener::
 NewData(MediaStreamGraph* graph, TrackID tid,
-        TrackRate rate,
         TrackTicks offset,
         uint32_t events,
         const MediaSegment& media) {
   if (!active_) {
     MOZ_MTLOG(ML_DEBUG, "Discarding packets because transport not ready");
     return;
   }
 
@@ -1002,29 +999,35 @@ NewData(MediaStreamGraph* graph, TrackID
   // track type and it's destined for us
   // See bug 784517
   if (media.GetType() == MediaSegment::AUDIO) {
     AudioSegment* audio = const_cast<AudioSegment *>(
         static_cast<const AudioSegment *>(&media));
 
     AudioSegment::ChunkIterator iter(*audio);
     while(!iter.IsEnded()) {
+      TrackRate rate;
+#ifdef USE_FAKE_MEDIA_STREAMS
+      rate = Fake_MediaStream::GraphRate();
+#else
+      rate = graph->GraphRate();
+#endif
       ProcessAudioChunk(static_cast<AudioSessionConduit*>(conduit_.get()),
                         rate, *iter);
       iter.Next();
     }
   } else if (media.GetType() == MediaSegment::VIDEO) {
 #ifdef MOZILLA_INTERNAL_API
     VideoSegment* video = const_cast<VideoSegment *>(
         static_cast<const VideoSegment *>(&media));
 
     VideoSegment::ChunkIterator iter(*video);
     while(!iter.IsEnded()) {
       ProcessVideoChunk(static_cast<VideoSessionConduit*>(conduit_.get()),
-                        rate, *iter);
+                        *iter);
       iter.Next();
     }
 #endif
   } else {
     // Ignore
   }
 }
 
@@ -1123,17 +1126,16 @@ void MediaPipelineTransmit::PipelineList
     buffer_current_ = chunk_remaining;
   }
 
 }
 
 #ifdef MOZILLA_INTERNAL_API
 void MediaPipelineTransmit::PipelineListener::ProcessVideoChunk(
     VideoSessionConduit* conduit,
-    TrackRate rate,
     VideoChunk& chunk) {
   layers::Image *img = chunk.mFrame.GetImage();
 
   // We now need to send the video frame to the other side
   if (!img) {
     // segment.AppendFrame() allows null images, which show up here as null
     return;
   }
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
@@ -458,41 +458,38 @@ public:
     void SetEnabled(bool enabled) { enabled_ = enabled; }
     TrackID trackid() {
       MutexAutoLock lock(mMutex);
       return track_id_external_;
     }
 
     // Implement MediaStreamListener
     virtual void NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid,
-                                          TrackRate rate,
                                           TrackTicks offset,
                                           uint32_t events,
                                           const MediaSegment& queued_media) MOZ_OVERRIDE;
     virtual void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime) MOZ_OVERRIDE {}
 
     // Implement MediaStreamDirectListener
     virtual void NotifyRealtimeData(MediaStreamGraph* graph, TrackID tid,
-                                    TrackRate rate,
                                     TrackTicks offset,
                                     uint32_t events,
                                     const MediaSegment& media) MOZ_OVERRIDE;
 
    private:
     void NewData(MediaStreamGraph* graph, TrackID tid,
-                 TrackRate rate,
                  TrackTicks offset,
                  uint32_t events,
                  const MediaSegment& media);
 
     virtual void ProcessAudioChunk(AudioSessionConduit *conduit,
                                    TrackRate rate, AudioChunk& chunk);
 #ifdef MOZILLA_INTERNAL_API
     virtual void ProcessVideoChunk(VideoSessionConduit *conduit,
-                                   TrackRate rate, VideoChunk& chunk);
+                                   VideoChunk& chunk);
 #endif
     RefPtr<MediaSessionConduit> conduit_;
 
     // May be TRACK_INVALID until we see data from the track
     TrackID track_id_; // this is the current TrackID this listener is attached to
     Mutex mMutex;
     // protected by mMutex
     // May be TRACK_INVALID until we see data from the track
@@ -621,17 +618,16 @@ class MediaPipelineReceiveAudio : public
       MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
       if (NS_FAILED(rv)) {
         MOZ_CRASH();
       }
     }
 
     // Implement MediaStreamListener
     virtual void NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid,
-                                          TrackRate rate,
                                           TrackTicks offset,
                                           uint32_t events,
                                           const MediaSegment& queued_media) MOZ_OVERRIDE {}
     virtual void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) MOZ_OVERRIDE;
 
    private:
     RefPtr<MediaSessionConduit> conduit_;
   };
@@ -711,17 +707,16 @@ class MediaPipelineReceiveVideo : public
 
   // Separate class to allow ref counting
   class PipelineListener : public GenericReceiveListener {
    public:
     PipelineListener(SourceMediaStream * source, TrackID track_id);
 
     // Implement MediaStreamListener
     virtual void NotifyQueuedTrackChanges(MediaStreamGraph* graph, TrackID tid,
-                                          TrackRate rate,
                                           TrackTicks offset,
                                           uint32_t events,
                                           const MediaSegment& queued_media) MOZ_OVERRIDE {}
     virtual void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) MOZ_OVERRIDE;
 
     // Accessors for external writes from the renderer
     void FrameSizeChange(unsigned int width,
                          unsigned int height,
--- a/media/webrtc/signaling/test/FakeMediaStreams.h
+++ b/media/webrtc/signaling/test/FakeMediaStreams.h
@@ -27,56 +27,52 @@ class nsIDOMWindow;
 
 namespace mozilla {
    class MediaStreamGraph;
    class MediaSegment;
 };
 
 class Fake_SourceMediaStream;
 
-static const int64_t USECS_PER_S = 1000000;
-
 class Fake_MediaStreamListener
 {
- protected:
+protected:
   virtual ~Fake_MediaStreamListener() {}
 
- public:
+public:
   virtual void NotifyQueuedTrackChanges(mozilla::MediaStreamGraph* aGraph, mozilla::TrackID aID,
-                                        mozilla::TrackRate aTrackRate,
                                         mozilla::TrackTicks aTrackOffset,
                                         uint32_t aTrackEvents,
                                         const mozilla::MediaSegment& aQueuedMedia)  = 0;
   virtual void NotifyPull(mozilla::MediaStreamGraph* aGraph, mozilla::StreamTime aDesiredTime) = 0;
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Fake_MediaStreamListener)
 };
 
 class Fake_MediaStreamDirectListener : public Fake_MediaStreamListener
 {
- protected:
+protected:
   virtual ~Fake_MediaStreamDirectListener() {}
 
- public:
+public:
   virtual void NotifyRealtimeData(mozilla::MediaStreamGraph* graph, mozilla::TrackID tid,
-                                  mozilla::TrackRate rate,
                                   mozilla::TrackTicks offset,
                                   uint32_t events,
                                   const mozilla::MediaSegment& media) = 0;
 };
 
 // Note: only one listener supported
 class Fake_MediaStream {
  protected:
   virtual ~Fake_MediaStream() { Stop(); }
 
  public:
   Fake_MediaStream () : mListeners(), mMutex("Fake MediaStream") {}
 
-  uint32_t GraphRate() { return 16000; }
+  static uint32_t GraphRate() { return 16000; }
 
   void AddListener(Fake_MediaStreamListener *aListener) {
     mozilla::MutexAutoLock lock(mMutex);
     mListeners.insert(aListener);
   }
 
   void RemoveListener(Fake_MediaStreamListener *aListener) {
     mozilla::MutexAutoLock lock(mMutex);
--- a/media/webrtc/signaling/test/FakeMediaStreamsImpl.h
+++ b/media/webrtc/signaling/test/FakeMediaStreamsImpl.h
@@ -109,17 +109,16 @@ void Fake_AudioStreamSource::Periodic() 
   nsAutoTArray<const int16_t *,1> channels;
   channels.AppendElement(data);
   segment.AppendFrames(samples.forget(), channels, AUDIO_BUFFER_SIZE);
 
   for(std::set<Fake_MediaStreamListener *>::iterator it = mListeners.begin();
        it != mListeners.end(); ++it) {
     (*it)->NotifyQueuedTrackChanges(nullptr, // Graph
                                     0, // TrackID
-                                    GRAPH_RATE, // Rate (hz)
                                     0, // Offset TODO(ekr@rtfm.com) fix
                                     0, // ???
                                     segment);
   }
 }
 
 
 // Fake_MediaPeriodic