Bug 568431 - Remove the requirement for mCallbackPeriod from the generic media backend. r=doublec
authorMatthew Gregan <kinetik@flim.org>
Mon, 31 May 2010 16:02:00 +1200
changeset 43445 1a6eec7899681cc62c59d94a7fd04962b5f4ac26
parent 43444 523c8b56ca16885fca928631415f1489daca21c9
child 43446 8cc8e86e725ac50c27c6c4be809b53b9a0758744
push id13712
push usercpearce@mozilla.com
push dateThu, 10 Jun 2010 03:56:59 +0000
treeherdermozilla-central@8cc8e86e725a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdoublec
bugs568431
milestone1.9.3a5pre
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 568431 - Remove the requirement for mCallbackPeriod from the generic media backend. r=doublec
content/media/nsBuiltinDecoderReader.cpp
content/media/nsBuiltinDecoderReader.h
content/media/nsBuiltinDecoderStateMachine.cpp
content/media/nsBuiltinDecoderStateMachine.h
content/media/nsMediaDecoder.h
content/media/ogg/nsOggCodecState.cpp
content/media/ogg/nsOggReader.cpp
content/media/ogg/nsOggReader.h
content/media/webm/nsWebMReader.cpp
content/media/webm/nsWebMReader.h
--- a/content/media/nsBuiltinDecoderReader.cpp
+++ b/content/media/nsBuiltinDecoderReader.cpp
@@ -84,16 +84,17 @@ ValidatePlane(const VideoData::YCbCrBuff
          aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION &&
          aPlane.mStride > 0;
 }
 
 VideoData* VideoData::Create(nsVideoInfo& aInfo,
                              ImageContainer* aContainer,
                              PRInt64 aOffset,
                              PRInt64 aTime,
+                             PRInt64 aEndTime,
                              const YCbCrBuffer& aBuffer,
                              PRBool aKeyframe,
                              PRInt64 aTimecode)
 {
   if (!aContainer) {
     return nsnull;
   }
 
@@ -130,17 +131,17 @@ VideoData* VideoData::Create(nsVideoInfo
       picYLimit > PRUint32(aBuffer.mPlanes[0].mHeight))
   {
     // The specified picture dimensions can't be contained inside the video
     // frame, we'll stomp memory if we try to copy it. Fail.
     NS_WARNING("Overflowing picture rect");
     return nsnull;
   }
 
-  nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aKeyframe, aTimecode));
+  nsAutoPtr<VideoData> v(new VideoData(aOffset, aTime, aEndTime, aKeyframe, aTimecode));
   // Currently our decoder only knows how to output to PLANAR_YCBCR
   // format.
   Image::Format format = Image::PLANAR_YCBCR;
   v->mImage = aContainer->CreateImage(&format, 1);
   if (!v->mImage) {
     return nsnull;
   }
   NS_ASSERTION(v->mImage->GetFormat() == Image::PLANAR_YCBCR,
--- a/content/media/nsBuiltinDecoderReader.h
+++ b/content/media/nsBuiltinDecoderReader.h
@@ -50,36 +50,27 @@
 #include "mozilla/Monitor.h"
 
 class nsBuiltinDecoderStateMachine;
 
 // Stores info relevant to presenting media samples.
 class nsVideoInfo {
 public:
   nsVideoInfo()
-    : mFramerate(0.0),
-      mPixelAspectRatio(1.0),
-      mCallbackPeriod(1),
+    : mPixelAspectRatio(1.0),
       mAudioRate(0),
       mAudioChannels(0),
       mFrame(0,0),
       mHasAudio(PR_FALSE),
       mHasVideo(PR_FALSE)
   {}
 
-  // Frames per second.
-  float mFramerate;
-
   // Pixel aspect ratio, as stored in the metadata.
   float mPixelAspectRatio;
 
-  // Length of a video frame in milliseconds, or the callback period if
-  // there's no audio.
-  PRUint32 mCallbackPeriod;
-
   // Samples per second.
   PRUint32 mAudioRate;
 
   // Number of audio channels.
   PRUint32 mAudioChannels;
 
   // Dimensions of the video frame.
   nsIntSize mFrame;
@@ -178,75 +169,85 @@ public:
   // the frame of video data. Returns nsnull if an error occurs. This may
   // indicate that memory couldn't be allocated to create the VideoData
   // object, or it may indicate some problem with the input data (e.g.
   // negative stride).
   static VideoData* Create(nsVideoInfo& aInfo,
                            ImageContainer* aContainer,
                            PRInt64 aOffset,
                            PRInt64 aTime,
+                           PRInt64 aEndTime,
                            const YCbCrBuffer &aBuffer,
                            PRBool aKeyframe,
                            PRInt64 aTimecode);
 
   // Constructs a duplicate VideoData object. This intrinsically tells the
   // player that it does not need to update the displayed frame when this
   // frame is played; this frame is identical to the previous.
   static VideoData* CreateDuplicate(PRInt64 aOffset,
                                     PRInt64 aTime,
+                                    PRInt64 aEndTime,
                                     PRInt64 aTimecode)
   {
-    return new VideoData(aOffset, aTime, aTimecode);
+    return new VideoData(aOffset, aTime, aEndTime, aTimecode);
   }
 
   ~VideoData()
   {
     MOZ_COUNT_DTOR(VideoData);
   }
 
   // Approximate byte offset of the end of the frame in the media.
   PRInt64 mOffset;
 
   // Start time of frame in milliseconds.
   PRInt64 mTime;
 
+  // End time of frame in milliseconds;
+  PRInt64 mEndTime;
+
   // Codec specific internal time code. For Ogg based codecs this is the
   // granulepos.
   PRInt64 mTimecode;
 
   // This frame's image.
   nsRefPtr<Image> mImage;
 
   // When PR_TRUE, denotes that this frame is identical to the frame that
   // came before; it's a duplicate. mBuffer will be empty.
   PRPackedBool mDuplicate;
   PRPackedBool mKeyframe;
 
 public:
-  VideoData(PRInt64 aOffset, PRInt64 aTime, PRInt64 aTimecode)
+  VideoData(PRInt64 aOffset, PRInt64 aTime, PRInt64 aEndTime, PRInt64 aTimecode)
     : mOffset(aOffset),
       mTime(aTime),
+      mEndTime(aEndTime),
       mTimecode(aTimecode),
       mDuplicate(PR_TRUE),
       mKeyframe(PR_FALSE)
   {
     MOZ_COUNT_CTOR(VideoData);
+    NS_ASSERTION(aEndTime > aTime, "Frame must start before it ends.");
   }
 
   VideoData(PRInt64 aOffset,
             PRInt64 aTime,
+            PRInt64 aEndTime,
             PRBool aKeyframe,
             PRInt64 aTimecode)
     : mOffset(aOffset),
       mTime(aTime),
+      mEndTime(aEndTime),
       mTimecode(aTimecode),
       mDuplicate(PR_FALSE),
       mKeyframe(aKeyframe)
   {
     MOZ_COUNT_CTOR(VideoData);
+    NS_ASSERTION(aEndTime > aTime, "Frame must start before it ends.");
   }
 
 };
 
 // Thread and type safe wrapper around nsDeque.
 template <class T>
 class MediaQueueDeallocator : public nsDequeFunctor {
   virtual void* operator() (void* anObject) {
--- a/content/media/nsBuiltinDecoderStateMachine.cpp
+++ b/content/media/nsBuiltinDecoderStateMachine.cpp
@@ -87,32 +87,35 @@ const unsigned AMPLE_AUDIO_MS = 2000;
 // which is at or after the current playback position.
 //
 // Also if the decode catches up with the end of the downloaded data,
 // we'll only go into BUFFERING state if we've got audio and have queued
 // less than LOW_AUDIO_MS of audio, or if we've got video and have queued
 // less than LOW_VIDEO_FRAMES frames.
 static const PRUint32 LOW_VIDEO_FRAMES = 1;
 
+// Arbitrary "frame duration" when playing only audio.
+static const int AUDIO_DURATION_MS = 40;
+
 nsBuiltinDecoderStateMachine::nsBuiltinDecoderStateMachine(nsBuiltinDecoder* aDecoder,
                                                            nsBuiltinDecoderReader* aReader) :
   mDecoder(aDecoder),
   mState(DECODER_STATE_DECODING_METADATA),
   mAudioMonitor("media.audiostream"),
   mCbCrSize(0),
   mPlayDuration(0),
   mBufferingEndOffset(0),
   mStartTime(-1),
   mEndTime(-1),
   mSeekTime(0),
   mReader(aReader),
   mCurrentFrameTime(0),
   mAudioStartTime(-1),
   mAudioEndTime(-1),
-  mVideoFrameTime(-1),
+  mVideoFrameEndTime(-1),
   mVolume(1.0),
   mSeekable(PR_TRUE),
   mPositionChangeQueued(PR_FALSE),
   mAudioCompleted(PR_FALSE),
   mBufferExhausted(PR_FALSE),
   mGotDurationFromHeader(PR_FALSE),
   mStopDecodeThreads(PR_TRUE)
 {
@@ -631,17 +634,17 @@ void nsBuiltinDecoderStateMachine::Decod
     mDecoder->GetMonitor().NotifyAll();
   }
 }
 
 void nsBuiltinDecoderStateMachine::ResetPlayback()
 {
   NS_ASSERTION(IsCurrentThread(mDecoder->mStateMachineThread),
                "Should be on state machine thread.");
-  mVideoFrameTime = -1;
+  mVideoFrameEndTime = -1;
   mAudioStartTime = -1;
   mAudioEndTime = -1;
   mAudioCompleted = PR_FALSE;
 }
 
 void nsBuiltinDecoderStateMachine::Seek(float aTime)
 {
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
@@ -889,17 +892,17 @@ nsresult nsBuiltinDecoderStateMachine::R
               mPlayDuration = TimeDuration::FromMilliseconds(audio->mTime);
             }
             if (HasVideo()) {
               nsAutoPtr<VideoData> video(mReader->mVideoQueue.PeekFront());
               if (video) {
                 RenderVideoFrame(video);
                 if (!audio) {
                   NS_ASSERTION(video->mTime <= seekTime &&
-                               seekTime <= video->mTime + mReader->GetInfo().mCallbackPeriod,
+                               seekTime <= video->mEndTime,
                                "Seek target should lie inside the first frame after seek");
                   mPlayDuration = TimeDuration::FromMilliseconds(seekTime);
                 }
               }
               mReader->mVideoQueue.PopFront();
             }
             UpdatePlaybackPosition(seekTime);
           }
@@ -985,30 +988,30 @@ nsresult nsBuiltinDecoderStateMachine::R
         // end of the media, and so that we update the readyState.
         do {
           AdvanceFrame();
         } while (mState == DECODER_STATE_COMPLETED &&
                  (mReader->mVideoQueue.GetSize() > 0 ||
                  (HasAudio() && !mAudioCompleted)));
 
         if (mAudioStream) {
-          // Close the audop stream so that next time audio is used a new stream
+          // Close the audio stream so that next time audio is used a new stream
           // is created. The StopPlayback call also resets the IsPlaying() state
           // so audio is restarted correctly.
           StopPlayback(AUDIO_SHUTDOWN);
         }
 
         if (mState != DECODER_STATE_COMPLETED)
           continue;
 
         LOG(PR_LOG_DEBUG, ("Shutting down the state machine thread"));
         StopDecodeThreads();
 
         if (mDecoder->GetState() == nsBuiltinDecoder::PLAY_STATE_PLAYING) {
-          PRInt64 videoTime = HasVideo() ? (mVideoFrameTime + mReader->GetInfo().mCallbackPeriod) : 0;
+          PRInt64 videoTime = HasVideo() ? mVideoFrameEndTime : 0;
           PRInt64 clockTime = NS_MAX(mEndTime, NS_MAX(videoTime, GetAudioClock()));
           UpdatePlaybackPosition(clockTime);
           {
             MonitorAutoExit exitMon(mDecoder->GetMonitor());
             nsCOMPtr<nsIRunnable> event =
               NS_NewRunnableMethod(mDecoder, &nsBuiltinDecoder::PlaybackEnded);
             NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
           }
@@ -1058,21 +1061,22 @@ void nsBuiltinDecoderStateMachine::Advan
   // When it's time to display a frame, decode the frame and display it.
   if (mDecoder->GetState() == nsBuiltinDecoder::PLAY_STATE_PLAYING) {
     if (!IsPlaying()) {
       StartPlayback();
       mDecoder->GetMonitor().NotifyAll();
     }
 
     if (HasAudio() && mAudioStartTime == -1 && !mAudioCompleted) {
-      // We've got audio (so we should sync off the audio clock), but we've
-      // not played a sample on the audio thread, so we can't get a time
-      // from the audio clock. Just wait and then return, to give the audio
-      // clock time to tick.
-      Wait(mReader->GetInfo().mCallbackPeriod);
+      // We've got audio (so we should sync off the audio clock), but we've not
+      // played a sample on the audio thread, so we can't get a time from the
+      // audio clock. Just wait and then return, to give the audio clock time
+      // to tick.  This should really wait for a specific signal from the audio
+      // thread rather than polling after a sleep.  See bug 568431 comment 4.
+      Wait(AUDIO_DURATION_MS);
       return;
     }
 
     // Determine the clock time. If we've got audio, and we've not reached
     // the end of the audio, use the audio clock. However if we've finished
     // audio, or don't have audio, use the system clock.
     PRInt64 clock_time = -1;
     PRInt64 audio_time = GetAudioClock();
@@ -1090,61 +1094,63 @@ void nsBuiltinDecoderStateMachine::Advan
       clock_time = NS_MAX(mCurrentFrameTime, clock_time) + mStartTime;
     }
 
     NS_ASSERTION(clock_time >= mStartTime, "Should have positive clock time.");
     nsAutoPtr<VideoData> videoData;
     if (mReader->mVideoQueue.GetSize() > 0) {
       VideoData* data = mReader->mVideoQueue.PeekFront();
       while (clock_time >= data->mTime) {
-        mVideoFrameTime = data->mTime;
+        mVideoFrameEndTime = data->mEndTime;
         videoData = data;
         mReader->mVideoQueue.PopFront();
         mDecoder->UpdatePlaybackOffset(data->mOffset);
         if (mReader->mVideoQueue.GetSize() == 0)
           break;
         data = mReader->mVideoQueue.PeekFront();
       }
     }
 
+    PRInt64 frameDuration = AUDIO_DURATION_MS;
     if (videoData) {
       // Decode one frame and display it
       NS_ASSERTION(videoData->mTime >= mStartTime, "Should have positive frame time");
       {
         MonitorAutoExit exitMon(mDecoder->GetMonitor());
         // If we have video, we want to increment the clock in steps of the frame
         // duration.
         RenderVideoFrame(videoData);
       }
       mDecoder->GetMonitor().NotifyAll();
+      frameDuration = videoData->mEndTime - videoData->mTime;
       videoData = nsnull;
     }
 
     // Cap the current time to the larger of the audio and video end time.
     // This ensures that if we're running off the system clock, we don't
     // advance the clock to after the media end time.
-    if (mVideoFrameTime != -1 || mAudioEndTime != -1) {
+    if (mVideoFrameEndTime != -1 || mAudioEndTime != -1) {
       // These will be non -1 if we've displayed a video frame, or played an audio sample.
-      clock_time = NS_MIN(clock_time, NS_MAX(mVideoFrameTime, mAudioEndTime));
+      clock_time = NS_MIN(clock_time, NS_MAX(mVideoFrameEndTime, mAudioEndTime));
       if (clock_time - mStartTime > mCurrentFrameTime) {
         // Only update the playback position if the clock time is greater
         // than the previous playback position. The audio clock can
         // sometimes report a time less than its previously reported in
         // some situations, and we need to gracefully handle that.
         UpdatePlaybackPosition(clock_time);
       }
     }
 
     // If the number of audio/video samples queued has changed, either by
     // this function popping and playing a video sample, or by the audio
     // thread popping and playing an audio sample, we may need to update our
     // ready state. Post an update to do so.
     UpdateReadyState();
 
-    Wait(mReader->GetInfo().mCallbackPeriod);
+    Wait(frameDuration);
   } else {
     if (IsPlaying()) {
       StopPlayback(AUDIO_PAUSE);
       mDecoder->GetMonitor().NotifyAll();
     }
 
     if (mState == DECODER_STATE_DECODING ||
         mState == DECODER_STATE_COMPLETED) {
@@ -1260,11 +1266,9 @@ void nsBuiltinDecoderStateMachine::LoadM
 
   if (!info.mHasVideo && !info.mHasAudio) {
     mState = DECODER_STATE_SHUTDOWN;      
     nsCOMPtr<nsIRunnable> event =
       NS_NewRunnableMethod(mDecoder, &nsBuiltinDecoder::DecodeError);
     NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
     return;
   }
-
-  LOG(PR_LOG_DEBUG, ("%p Callback Period: %u", mDecoder, info.mCallbackPeriod));
 }
--- a/content/media/nsBuiltinDecoderStateMachine.h
+++ b/content/media/nsBuiltinDecoderStateMachine.h
@@ -391,19 +391,19 @@ protected:
   // Accessed on audio and state machine thread. Synchronized by decoder monitor.
   PRInt64 mAudioStartTime;
 
   // The end time of the last audio sample that's been pushed onto the audio
   // hardware. This will approximately be the end time of the audio stream,
   // unless another sample is pushed to the hardware.
   PRInt64 mAudioEndTime;
 
-  // The presentation time of the last video frame which has been displayed.
+  // The presentation end time of the last video frame which has been displayed.
   // Accessed from the state machine thread.
-  PRInt64 mVideoFrameTime;
+  PRInt64 mVideoFrameEndTime;
   
   // Volume of playback. 0.0 = muted. 1.0 = full volume. Read/Written
   // from the state machine and main threads. Synchronised via decoder
   // monitor.
   float mVolume;
 
   // PR_TRUE if the media resource can be seeked. Accessed from the state
   // machine and main threads. Synchronised via decoder monitor.
--- a/content/media/nsMediaDecoder.h
+++ b/content/media/nsMediaDecoder.h
@@ -273,20 +273,16 @@ protected:
   // values while they are out of sync (width changed but
   // not height yet, etc).
   // Backends that are updating the height, width or writing
   // to the RGB buffer must obtain this lock first to ensure that
   // the video element does not use video data or sizes that are
   // in the midst of being changed.
   PRLock* mVideoUpdateLock;
 
-  // Framerate of video being displayed in the element
-  // expressed in numbers of frames per second.
-  float mFramerate;
-
   // Pixel aspect ratio (ratio of the pixel width to pixel height)
   float mPixelAspectRatio;
 
   // Has our size changed since the last repaint?
   PRPackedBool mSizeChanged;
 
   // True if the decoder is being shutdown. At this point all events that
   // are currently queued need to return immediately to prevent javascript
--- a/content/media/ogg/nsOggCodecState.cpp
+++ b/content/media/ogg/nsOggCodecState.cpp
@@ -143,17 +143,16 @@ PRBool nsOggCodecState::PageInFromBuffer
   return PR_TRUE;
 }
 
 nsTheoraState::nsTheoraState(ogg_page* aBosPage) :
   nsOggCodecState(aBosPage),
   mSetup(0),
   mCtx(0),
   mFrameDuration(0),
-  mFrameRate(0),
   mPixelAspectRatio(0)
 {
   MOZ_COUNT_CTOR(nsTheoraState);
   th_info_init(&mInfo);
   th_comment_init(&mComment);
 }
 
 nsTheoraState::~nsTheoraState() {
@@ -170,19 +169,16 @@ PRBool nsTheoraState::Init() {
   mCtx = th_decode_alloc(&mInfo, mSetup);
   if (mCtx == NULL) {
     return mActive = PR_FALSE;
   }
 
   PRInt64 n = mInfo.fps_numerator;
   PRInt64 d = mInfo.fps_denominator;
 
-  mFrameRate = (n == 0 || d == 0) ?
-    0.0f : static_cast<float>(n) / static_cast<float>(d);
-
   PRInt64 f;
   if (!MulOverflow(1000, d, f)) {
     return mActive = PR_FALSE;
   }
   f /= n;
   if (f > PR_UINT32_MAX) {
     return mActive = PR_FALSE;
   }
--- a/content/media/ogg/nsOggReader.cpp
+++ b/content/media/ogg/nsOggReader.cpp
@@ -60,26 +60,21 @@ extern PRLogModuleInfo* gBuiltinDecoderL
 #define LOG(type, msg)
 #define SEEK_LOG(type, msg)
 #endif
 
 // Chunk size to read when reading Ogg files. Average Ogg page length
 // is about 4300 bytes, so we read the file in chunks larger than that.
 static const int PAGE_STEP = 8192;
 
-// The frame rate to use if there is no video data in the resource to
-// be played.
-#define AUDIO_FRAME_RATE 25.0
-
 nsOggReader::nsOggReader(nsBuiltinDecoder* aDecoder)
   : nsBuiltinDecoderReader(aDecoder),
     mTheoraState(nsnull),
     mVorbisState(nsnull),
     mPageOffset(0),
-    mCallbackPeriod(0),
     mTheoraGranulepos(-1),
     mVorbisGranulepos(-1)
 {
   MOZ_COUNT_CTOR(nsOggReader);
 }
 
 nsOggReader::~nsOggReader()
 {
@@ -246,40 +241,36 @@ nsresult nsOggReader::ReadMetadata()
       s->Deactivate();
     }
   }
 
   // Initialize the first Theora and Vorbis bitstreams. According to the
   // Theora spec these can be considered the 'primary' bitstreams for playback.
   // Extract the metadata needed from these streams.
   // Set a default callback period for if we have no video data
-  mCallbackPeriod = 1000 / AUDIO_FRAME_RATE;
   if (mTheoraState) {
     if (mTheoraState->Init()) {
-      mCallbackPeriod = mTheoraState->mFrameDuration;
       gfxIntSize sz(mTheoraState->mInfo.pic_width,
                     mTheoraState->mInfo.pic_height);
       mDecoder->SetVideoData(sz, mTheoraState->mPixelAspectRatio, nsnull);
     } else {
       mTheoraState = nsnull;
     }
   }
   if (mVorbisState) {
     mVorbisState->Init();
   }
 
   mInfo.mHasAudio = HasAudio();
   mInfo.mHasVideo = HasVideo();
-  mInfo.mCallbackPeriod = mCallbackPeriod;
   if (HasAudio()) {
     mInfo.mAudioRate = mVorbisState->mInfo.rate;
     mInfo.mAudioChannels = mVorbisState->mInfo.channels;
   }
   if (HasVideo()) {
-    mInfo.mFramerate = mTheoraState->mFrameRate;
     mInfo.mPixelAspectRatio = mTheoraState->mPixelAspectRatio;
     mInfo.mPicture.width = mTheoraState->mInfo.pic_width;
     mInfo.mPicture.height = mTheoraState->mInfo.pic_height;
     mInfo.mPicture.x = mTheoraState->mInfo.pic_x;
     mInfo.mPicture.y = mTheoraState->mInfo.pic_y;
     mInfo.mFrame.width = mTheoraState->mInfo.frame_width;
     mInfo.mFrame.height = mTheoraState->mInfo.frame_height;
   }
@@ -477,16 +468,17 @@ nsresult nsOggReader::DecodeTheora(nsTAr
   if (ret != 0 && ret != TH_DUPFRAME) {
     return NS_ERROR_FAILURE;
   }
   PRInt64 time = (aPacket->granulepos != -1)
     ? mTheoraState->StartTime(aPacket->granulepos) : -1;
   if (ret == TH_DUPFRAME) {
     aFrames.AppendElement(VideoData::CreateDuplicate(mPageOffset,
                                                      time,
+                                                     time + mTheoraState->mFrameDuration,
                                                      aPacket->granulepos));
   } else if (ret == 0) {
     th_ycbcr_buffer buffer;
     ret = th_decode_ycbcr_out(mTheoraState->mCtx, buffer);
     NS_ASSERTION(ret == 0, "th_decode_ycbcr_out failed");
     PRBool isKeyframe = th_packet_iskeyframe(aPacket) == 1;
     VideoData::YCbCrBuffer b;
     for (PRUint32 i=0; i < 3; ++i) {
@@ -494,16 +486,17 @@ nsresult nsOggReader::DecodeTheora(nsTAr
       b.mPlanes[i].mHeight = buffer[i].height;
       b.mPlanes[i].mWidth = buffer[i].width;
       b.mPlanes[i].mStride = buffer[i].stride;
     }
     VideoData *v = VideoData::Create(mInfo,
                                      mDecoder->GetImageContainer(),
                                      mPageOffset,
                                      time,
+                                     time + mTheoraState->mFrameDuration,
                                      b,
                                      isKeyframe,
                                      aPacket->granulepos);
     if (!v) {
       // There may be other reasons for this error, but for
       // simplicity just assume the worst case: out of memory.
       NS_WARNING("Failed to allocate memory for video frame");
       Clear(aFrames);
@@ -1040,39 +1033,37 @@ nsresult nsOggReader::Seek(PRInt64 aTarg
       NS_ASSERTION(mTheoraGranulepos == -1, "SeekBisection must reset Theora decode");
       NS_ASSERTION(mVorbisGranulepos == -1, "SeekBisection must reset Vorbis decode");
     }
   }
 
   // Decode forward to the seek target frame. Start with video, if we have it.
   // We should pass a keyframe while doing this.
   if (HasVideo()) {
-    nsAutoPtr<VideoData> video;
     PRBool eof = PR_FALSE;
     PRInt64 startTime = -1;
-    video = nsnull;
     while (HasVideo() && !eof) {
       while (mVideoQueue.GetSize() == 0 && !eof) {
         PRBool skip = PR_FALSE;
         eof = !DecodeVideoFrame(skip, 0);
         {
           MonitorAutoExit exitReaderMon(mMonitor);
           MonitorAutoEnter decoderMon(mDecoder->GetMonitor());
           if (mDecoder->GetDecodeState() == nsBuiltinDecoderStateMachine::DECODER_STATE_SHUTDOWN) {
             return NS_ERROR_FAILURE;
           }
         }
       }
       if (mVideoQueue.GetSize() == 0) {
         break;
       }
-      video = mVideoQueue.PeekFront();
+      nsAutoPtr<VideoData> video(mVideoQueue.PeekFront());
       // If the frame end time is less than the seek target, we won't want
       // to display this frame after the seek, so discard it.
-      if (video && video->mTime + mCallbackPeriod < aTarget) {
+      if (video && video->mEndTime < aTarget) {
         if (startTime == -1) {
           startTime = video->mTime;
         }
         mVideoQueue.PopFront();
         video = nsnull;
       } else {
         video.forget();
         break;
--- a/content/media/ogg/nsOggReader.h
+++ b/content/media/ogg/nsOggReader.h
@@ -121,19 +121,16 @@ private:
 
   // Ogg decoding state.
   ogg_sync_state mOggState;
 
   // The offset of the end of the last page we've read, or the start of
   // the page we're about to read.
   PRInt64 mPageOffset;
 
-  // Number of milliseconds of data video/audio data held in a frame.
-  PRUint32 mCallbackPeriod;
-
   // The granulepos of the last decoded Theora frame.
   PRInt64 mTheoraGranulepos;
 
   // The granulepos of the last decoded Vorbis sample.
   PRInt64 mVorbisGranulepos;
 };
 
 #endif
--- a/content/media/webm/nsWebMReader.cpp
+++ b/content/media/webm/nsWebMReader.cpp
@@ -56,24 +56,17 @@ extern PRLogModuleInfo* gBuiltinDecoderL
 #else
 #define SEEK_LOG(type, msg)
 #endif
 #else
 #define LOG(type, msg)
 #define SEEK_LOG(type, msg)
 #endif
 
-// Nestegg doesn't expose the framerate and the framerate is optional
-// anyway. We use a default value - the backend playback code
-// only uses it for a 'maximum wait time' not actual frame display time
-// so an estimate is fine. A value higher than a standard framerate is
-// used to ensure that backend Wait's don't take longer than frame
-// display. Bug 568431 should remove the need for 'faking' a framerate in
-// the future.
-#define DEFAULT_FRAMERATE 32.0
+static const unsigned NS_PER_MS = 1000000;
 
 // Functions for reading and seeking using nsMediaStream required for
 // nestegg_io. The 'user data' passed to these functions is the
 // decoder from which the media stream is obtained.
 static int webm_read(void *aBuffer, size_t aLength, void *aUserData)
 {
   NS_ASSERTION(aUserData, "aUserData must point to a valid nsBuiltinDecoder");
   nsBuiltinDecoder* decoder = reinterpret_cast<nsBuiltinDecoder*>(aUserData);
@@ -200,17 +193,17 @@ nsresult nsWebMReader::ReadMetadata()
     return NS_ERROR_FAILURE;
   }
 
   uint64_t duration = 0;
   r = nestegg_duration(mContext, &duration);
   if (r == 0) {
     MonitorAutoExit exitReaderMon(mMonitor);
     MonitorAutoEnter decoderMon(mDecoder->GetMonitor());
-    mDecoder->GetStateMachine()->SetDuration(duration / 1000000);
+    mDecoder->GetStateMachine()->SetDuration(duration / NS_PER_MS);
   }
 
   unsigned int ntracks = 0;
   r = nestegg_track_count(mContext, &ntracks);
   if (r == -1) {
     Cleanup();
     return NS_ERROR_FAILURE;
   }
@@ -251,36 +244,29 @@ nsresult nsWebMReader::ReadMetadata()
         mInfo.mPicture.width = params.width;
         mInfo.mPicture.height = params.height;
       }
 
       // mDataOffset is not used by the WebM backend.
       // See bug 566779 for a suggestion to refactor
       // and remove it.
       mInfo.mDataOffset = -1;
-
-      mInfo.mFramerate = DEFAULT_FRAMERATE;
-      mInfo.mCallbackPeriod = 1000 / mInfo.mFramerate;
     }
     else if (!mHasAudio && type == NESTEGG_TRACK_AUDIO) {
       nestegg_audio_params params;
       r = nestegg_track_audio_params(mContext, track, &params);
       if (r == -1) {
         Cleanup();
         return NS_ERROR_FAILURE;
       }
 
       mAudioTrack = track;
       mHasAudio = PR_TRUE;
       mInfo.mHasAudio = PR_TRUE;
 
-      if (!mInfo.mHasVideo) {
-        mInfo.mCallbackPeriod = 1000 / DEFAULT_FRAMERATE;
-      }
-
       // Get the Vorbis header data
       unsigned int nheaders = 0;
       r = nestegg_track_codec_data_count(mContext, track, &nheaders);
       if (r == -1 || nheaders != 3) {
         Cleanup();
         return NS_ERROR_FAILURE;
       }
 
@@ -355,17 +341,17 @@ PRBool nsWebMReader::DecodeAudioPacket(n
 
   uint64_t tstamp = 0;
   r = nestegg_packet_tstamp(aPacket, &tstamp);
   if (r == -1) {
     nestegg_free_packet(aPacket);
     return PR_FALSE;
   }
 
-  PRUint64 tstamp_ms = tstamp / 1000000;
+  PRUint64 tstamp_ms = tstamp / NS_PER_MS;
   for (PRUint32 i = 0; i < count; ++i) {
     unsigned char* data;
     size_t length;
     r = nestegg_packet_data(aPacket, i, &data, &length);
     if (r == -1) {
       nestegg_free_packet(aPacket);
       return PR_FALSE;
     }
@@ -492,17 +478,17 @@ PRBool nsWebMReader::DecodeAudioData()
     mAudioQueue.Finish();
     return PR_FALSE;
   }
 
   return DecodeAudioPacket(packet);
 }
 
 PRBool nsWebMReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
-                                     PRInt64 aTimeThreshold)
+                                      PRInt64 aTimeThreshold)
 {
   MonitorAutoEnter mon(mMonitor);
   NS_ASSERTION(mDecoder->OnStateMachineThread() || mDecoder->OnDecodeThread(),
                "Should be on state machine or decode thread.");
   int r = 0;
   nestegg_packet* packet = NextPacket(VIDEO);
 
   if (!packet) {
@@ -526,17 +512,40 @@ PRBool nsWebMReader::DecodeVideoFrame(PR
 
   uint64_t tstamp = 0;
   r = nestegg_packet_tstamp(packet, &tstamp);
   if (r == -1) {
     nestegg_free_packet(packet);
     return PR_FALSE;
   }
 
-  PRInt64 tstamp_ms = tstamp / 1000000;
+  // The end time of this frame is the start time of the next frame.  Fetch
+  // the timestamp of the next packet for this track.  If we've reached the
+  // end of the stream, use the file's duration as the end time of this
+  // video frame.
+  uint64_t next_tstamp = 0;
+  {
+    nestegg_packet* next_packet = NextPacket(VIDEO);
+    if (next_packet) {
+      r = nestegg_packet_tstamp(next_packet, &next_tstamp);
+      if (r == -1) {
+        nestegg_free_packet(next_packet);
+        return PR_FALSE;
+      }
+    } else {
+      r = nestegg_duration(mContext, &next_tstamp);
+      if (r == -1) {
+        nestegg_free_packet(next_packet);
+        return PR_FALSE;
+      }
+    }
+    mVideoPackets.PushFront(next_packet);
+  }
+
+  PRInt64 tstamp_ms = tstamp / NS_PER_MS;
   for (PRUint32 i = 0; i < count; ++i) {
     unsigned char* data;
     size_t length;
     r = nestegg_packet_data(packet, i, &data, &length);
     if (r == -1) {
       nestegg_free_packet(packet);
       return PR_FALSE;
     }
@@ -566,19 +575,19 @@ PRBool nsWebMReader::DecodeVideoFrame(PR
       continue;
     }
 
     vpx_codec_iter_t  iter = NULL;
     vpx_image_t      *img;
 
     while((img = vpx_codec_get_frame(&mVP8, &iter))) {
       NS_ASSERTION(mInfo.mPicture.width == static_cast<PRInt32>(img->d_w), 
-        "WebM picture width from header does not match decoded frame");
+                   "WebM picture width from header does not match decoded frame");
       NS_ASSERTION(mInfo.mPicture.height == static_cast<PRInt32>(img->d_h),
-        "WebM picture height from header does not match decoded frame");
+                   "WebM picture height from header does not match decoded frame");
       NS_ASSERTION(img->fmt == IMG_FMT_I420, "WebM image format is not I420");
 
       // Chroma shifts are rounded down as per the decoding examples in the VP8 SDK
       VideoData::YCbCrBuffer b;
       b.mPlanes[0].mData = img->planes[0];
       b.mPlanes[0].mStride = img->stride[0];
       b.mPlanes[0].mHeight = img->d_h;
       b.mPlanes[0].mWidth = img->d_w;
@@ -592,16 +601,17 @@ PRBool nsWebMReader::DecodeVideoFrame(PR
       b.mPlanes[2].mStride = img->stride[2];
       b.mPlanes[2].mHeight = img->d_h >> img->y_chroma_shift;
       b.mPlanes[2].mWidth = img->d_w >> img->x_chroma_shift;
   
       VideoData *v = VideoData::Create(mInfo,
                                        mDecoder->GetImageContainer(),
                                        -1,
                                        tstamp_ms,
+                                       next_tstamp / NS_PER_MS,
                                        b,
                                        si.is_kf,
                                        -1);
       if (!v) {
         nestegg_free_packet(packet);
         return PR_FALSE;
       }
       mVideoQueue.Push(v);
@@ -616,49 +626,47 @@ nsresult nsWebMReader::Seek(PRInt64 aTar
 {
   MonitorAutoEnter mon(mMonitor);
   NS_ASSERTION(mDecoder->OnStateMachineThread(),
                "Should be on state machine thread.");
   LOG(PR_LOG_DEBUG, ("%p About to seek to %lldms", mDecoder, aTarget));
   if (NS_FAILED(ResetDecode())) {
     return NS_ERROR_FAILURE;
   }
-  int r = nestegg_track_seek(mContext, 0, aTarget * 1000000);
+  int r = nestegg_track_seek(mContext, 0, aTarget * NS_PER_MS);
   if (r != 0) {
     return NS_ERROR_FAILURE;
   }
   if (HasVideo()) {
     PRBool eof = PR_FALSE;
     PRInt64 startTime = -1;
     while (HasVideo() && !eof) {
-      while (mVideoQueue.GetSize() < 2 && !eof) {
+      while (mVideoQueue.GetSize() == 0 && !eof) {
         PRBool skip = PR_FALSE;
         eof = !DecodeVideoFrame(skip, 0);
         MonitorAutoExit exitReaderMon(mMonitor);
         MonitorAutoEnter decoderMon(mDecoder->GetMonitor());
         if (mDecoder->GetDecodeState() == nsBuiltinDecoderStateMachine::DECODER_STATE_SHUTDOWN) {
           return NS_ERROR_FAILURE;
         }
       }
-      if (mVideoQueue.GetSize() < 2) {
+      if (mVideoQueue.GetSize() == 0) {
         break;
       }
-      nsAutoPtr<VideoData> video(mVideoQueue.PopFront());
-      nsAutoPtr<VideoData> videoNext(mVideoQueue.PeekFront());
+      nsAutoPtr<VideoData> video(mVideoQueue.PeekFront());
       // If the frame end time is less than the seek target, we won't want
       // to display this frame after the seek, so discard it.
-      if (video && videoNext && videoNext->mTime < aTarget) {
+      if (video && video->mEndTime < aTarget) {
         if (startTime == -1) {
           startTime = video->mTime;
         }
+        mVideoQueue.PopFront();
         video = nsnull;
-        videoNext.forget();
       } else {
-        videoNext.forget();
-        mVideoQueue.PushFront(video.forget());
+        video.forget();
         break;
       }
     }
     SEEK_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld", startTime));
   }
   return NS_OK;
 }
 
--- a/content/media/webm/nsWebMReader.h
+++ b/content/media/webm/nsWebMReader.h
@@ -72,16 +72,20 @@ class PacketQueue : private nsDeque {
   inline PRInt32 GetSize() { 
     return nsDeque::GetSize();
   }
   
   inline void Push(nestegg_packet* aItem) {
     nsDeque::Push(aItem);
   }
   
+  inline void PushFront(nestegg_packet* aItem) {
+    nsDeque::PushFront(aItem);
+  }
+
   inline nestegg_packet* PopFront() {
     return static_cast<nestegg_packet*>(nsDeque::PopFront());
   }
   
   void Reset() {
     while (GetSize() > 0) {
       nestegg_free_packet(PopFront());
     }