Bug 641718 - Backout 44d43f095a4f. r=backout
authorChris Pearce <chris@pearce.org.nz>
Fri, 01 Apr 2011 13:02:20 +1300
changeset 64570 c1553501c4966df32d22dcd117532d62bdfe9731
parent 64569 9d3277cdd833297b2fb7833b9bae39b81f6af366
child 64571 943bf5a2714cf72f3ffa888df95df713736976a9
push idunknown
push userunknown
push dateunknown
reviewersbackout
bugs641718
milestone2.2a1pre
Bug 641718 - Backout 44d43f095a4f. r=backout
content/html/content/src/nsHTMLMediaElement.cpp
content/media/VideoUtils.cpp
content/media/VideoUtils.h
content/media/nsAudioAvailableEventManager.cpp
content/media/nsAudioStream.cpp
content/media/nsAudioStream.h
content/media/nsBuiltinDecoder.cpp
content/media/nsBuiltinDecoder.h
content/media/nsBuiltinDecoderReader.h
content/media/nsBuiltinDecoderStateMachine.cpp
content/media/nsBuiltinDecoderStateMachine.h
content/media/nsMediaCache.cpp
content/media/nsMediaDecoder.h
content/media/nsMediaStream.cpp
content/media/nsMediaStream.h
content/media/ogg/nsOggCodecState.cpp
content/media/ogg/nsOggCodecState.h
content/media/ogg/nsOggReader.cpp
content/media/ogg/nsOggReader.h
content/media/raw/nsRawReader.cpp
content/media/test/test_seekLies.html
content/media/wave/nsWaveReader.cpp
content/media/wave/nsWaveReader.h
content/media/webm/nsWebMBufferedParser.cpp
content/media/webm/nsWebMReader.cpp
content/media/webm/nsWebMReader.h
--- a/content/html/content/src/nsHTMLMediaElement.cpp
+++ b/content/html/content/src/nsHTMLMediaElement.cpp
@@ -1815,17 +1815,17 @@ nsresult nsHTMLMediaElement::InitializeD
   LOG(PR_LOG_DEBUG, ("%p Cloned decoder %p from %p", this, decoder.get(), aOriginal));
 
   if (!decoder->Init(this)) {
     return NS_ERROR_FAILURE;
   }
 
   double duration = aOriginal->GetDuration();
   if (duration >= 0) {
-    decoder->SetDuration(duration);
+    decoder->SetDuration(PRInt64(NS_round(duration * 1000)));
     decoder->SetSeekable(aOriginal->GetSeekable());
   }
 
   nsMediaStream* stream = originalStream->CloneData(decoder);
   if (!stream) {
     return NS_ERROR_FAILURE;
   }
 
--- a/content/media/VideoUtils.cpp
+++ b/content/media/VideoUtils.cpp
@@ -170,29 +170,29 @@ PRBool MulOverflow(PRInt64 a, PRInt64 b,
     return PR_FALSE;
   }
 
   aResult *= sign;
   NS_ASSERTION(a * b == aResult, "We didn't overflow, but result is wrong!");
   return PR_TRUE;
 }
 
-// Converts from number of audio samples to microseconds, given the specified
+// Converts from number of audio samples to milliseconds, given the specified
 // audio rate.
-PRBool SamplesToUsecs(PRInt64 aSamples, PRUint32 aRate, PRInt64& aOutUsecs)
+PRBool SamplesToMs(PRInt64 aSamples, PRUint32 aRate, PRInt64& aOutMs)
 {
   PRInt64 x;
-  if (!MulOverflow(aSamples, USECS_PER_S, x))
+  if (!MulOverflow(aSamples, 1000, x))
     return PR_FALSE;
-  aOutUsecs = x / aRate;
+  aOutMs = x / aRate;
   return PR_TRUE;
 }
 
-// Converts from microseconds to number of audio samples, given the specified
+// Converts from milliseconds to number of audio samples, given the specified
 // audio rate.
-PRBool UsecsToSamples(PRInt64 aUsecs, PRUint32 aRate, PRInt64& aOutSamples)
+PRBool MsToSamples(PRInt64 aMs, PRUint32 aRate, PRInt64& aOutSamples)
 {
   PRInt64 x;
-  if (!MulOverflow(aUsecs, aRate, x))
+  if (!MulOverflow(aMs, aRate, x))
     return PR_FALSE;
-  aOutSamples = x / USECS_PER_S;
+  aOutSamples = x / 1000;
   return PR_TRUE;
 }
--- a/content/media/VideoUtils.h
+++ b/content/media/VideoUtils.h
@@ -121,27 +121,21 @@ PRBool MulOverflow32(PRUint32 a, PRUint3
 // if addition would result in an overflow.
 PRBool AddOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult);
 
 // 64 bit integer multiplication with overflow checking. Returns PR_TRUE
 // if the multiplication was successful, or PR_FALSE if the operation resulted
 // in an integer overflow.
 PRBool MulOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult);
 
-// Converts from number of audio samples (aSamples) to microseconds, given
-// the specified audio rate (aRate). Stores result in aOutUsecs. Returns PR_TRUE
+// Converts from number of audio samples (aSamples) to milliseconds, given
+// the specified audio rate (aRate). Stores result in aOutMs. Returns PR_TRUE
 // if the operation succeeded, or PR_FALSE if there was an integer overflow
 // while calulating the conversion.
-PRBool SamplesToUsecs(PRInt64 aSamples, PRUint32 aRate, PRInt64& aOutUsecs);
+PRBool SamplesToMs(PRInt64 aSamples, PRUint32 aRate, PRInt64& aOutMs);
 
-// Converts from microseconds (aUsecs) to number of audio samples, given the
+// Converts from milliseconds (aMs) to number of audio samples, given the
 // specified audio rate (aRate). Stores the result in aOutSamples. Returns
 // PR_TRUE if the operation succeeded, or PR_FALSE if there was an integer
 // overflow while calulating the conversion.
-PRBool UsecsToSamples(PRInt64 aUsecs, PRUint32 aRate, PRInt64& aOutSamples);
-
-// Number of microseconds per second. 1e6.
-#define USECS_PER_S 1000000
-
-// Number of microseconds per millisecond.
-#define USECS_PER_MS 1000
+PRBool MsToSamples(PRInt64 aMs, PRUint32 aRate, PRInt64& aOutSamples);
 
 #endif
--- a/content/media/nsAudioAvailableEventManager.cpp
+++ b/content/media/nsAudioAvailableEventManager.cpp
@@ -34,18 +34,18 @@
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "nsTArray.h"
 #include "nsAudioAvailableEventManager.h"
-#include "VideoUtils.h"
 
+#define MILLISECONDS_PER_SECOND 1000.0f
 #define MAX_PENDING_EVENTS 100
 
 using namespace mozilla;
 
 class nsAudioAvailableEventRunner : public nsRunnable
 {
 private:
   nsCOMPtr<nsBuiltinDecoder> mDecoder;
@@ -101,17 +101,17 @@ void nsAudioAvailableEventManager::Init(
 
 void nsAudioAvailableEventManager::DispatchPendingEvents(PRUint64 aCurrentTime)
 {
   MonitorAutoEnter mon(mMonitor);
 
   while (mPendingEvents.Length() > 0) {
     nsAudioAvailableEventRunner* e =
       (nsAudioAvailableEventRunner*)mPendingEvents[0].get();
-    if (e->mTime * USECS_PER_S > aCurrentTime) {
+    if (e->mTime * MILLISECONDS_PER_SECOND > aCurrentTime) {
       break;
     }
     nsCOMPtr<nsIRunnable> event = mPendingEvents[0];
     mPendingEvents.RemoveElementAt(0);
     NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
   }
 }
 
@@ -222,17 +222,17 @@ void nsAudioAvailableEventManager::Drain
   if (0 == mSignalBufferPosition)
     return;
 
   // Zero-pad the end of the signal buffer so it's complete.
   memset(mSignalBuffer.get() + mSignalBufferPosition, 0,
          (mSignalBufferLength - mSignalBufferPosition) * sizeof(float));
 
   // Force this last event to go now.
-  float time = (aEndTime / static_cast<float>(USECS_PER_S)) - 
+  float time = (aEndTime / MILLISECONDS_PER_SECOND) - 
                (mSignalBufferPosition / mSamplesPerSecond);
   nsCOMPtr<nsIRunnable> lastEvent =
     new nsAudioAvailableEventRunner(mDecoder, mSignalBuffer.forget(),
                                     mSignalBufferLength, time);
   NS_DispatchToMainThread(lastEvent, NS_DISPATCH_NORMAL);
 
   mSignalBufferPosition = 0;
 }
--- a/content/media/nsAudioStream.cpp
+++ b/content/media/nsAudioStream.cpp
@@ -48,17 +48,16 @@ using namespace mozilla::dom;
 
 #include <stdio.h>
 #include <math.h>
 #include "prlog.h"
 #include "prmem.h"
 #include "nsAutoPtr.h"
 #include "nsAudioStream.h"
 #include "nsAlgorithm.h"
-#include "VideoUtils.h"
 extern "C" {
 #include "sydneyaudio/sydney_audio.h"
 }
 #include "mozilla/TimeStamp.h"
 #include "nsThreadUtils.h"
 
 #if defined(XP_MACOSX)
 #define SA_PER_STREAM_VOLUME 1
@@ -72,16 +71,17 @@ extern "C" {
 
 using mozilla::TimeStamp;
 
 #ifdef PR_LOGGING
 PRLogModuleInfo* gAudioStreamLog = nsnull;
 #endif
 
 #define FAKE_BUFFER_SIZE 176400
+#define MILLISECONDS_PER_SECOND 1000
 
 class nsAudioStreamLocal : public nsAudioStream
 {
  public:
   NS_DECL_ISUPPORTS
 
   ~nsAudioStreamLocal();
   nsAudioStreamLocal();
@@ -550,17 +550,17 @@ void nsAudioStreamLocal::Resume()
   mPaused = PR_FALSE;
   sa_stream_resume(static_cast<sa_stream_t*>(mAudioHandle));
 }
 
 PRInt64 nsAudioStreamLocal::GetPosition()
 {
   PRInt64 sampleOffset = GetSampleOffset();
   if (sampleOffset >= 0) {
-    return ((USECS_PER_S * sampleOffset) / mRate / mChannels);
+    return ((MILLISECONDS_PER_SECOND * sampleOffset) / mRate / mChannels);
   }
   return -1;
 }
 
 PRInt64 nsAudioStreamLocal::GetSampleOffset()
 {
   if (mInError) {
     return -1;
@@ -719,33 +719,33 @@ nsAudioStreamRemote::Resume()
   nsCOMPtr<nsIRunnable> event = new AudioPauseEvent(mAudioChild, PR_FALSE);
   NS_DispatchToMainThread(event);
 }
 
 PRInt64 nsAudioStreamRemote::GetPosition()
 {
   PRInt64 sampleOffset = GetSampleOffset();
   if (sampleOffset >= 0) {
-    return ((USECS_PER_S * sampleOffset) / mRate / mChannels);
+    return ((MILLISECONDS_PER_SECOND * sampleOffset) / mRate / mChannels);
   }
   return 0;
 }
 
 PRInt64
 nsAudioStreamRemote::GetSampleOffset()
 {
   if(!mAudioChild)
     return 0;
 
   PRInt64 offset = mAudioChild->GetLastKnownSampleOffset();
   if (offset == -1)
     return 0;
 
   PRInt64 time   = mAudioChild->GetLastKnownSampleOffsetTime();
-  PRInt64 result = offset + (mRate * mChannels * (PR_IntervalNow() - time) / USECS_PER_S);
+  PRInt64 result = offset + (mRate * mChannels * (PR_IntervalNow() - time) / MILLISECONDS_PER_SECOND);
 
   return result;
 }
 
 PRBool
 nsAudioStreamRemote::IsPaused()
 {
   return mPaused;
--- a/content/media/nsAudioStream.h
+++ b/content/media/nsAudioStream.h
@@ -102,17 +102,17 @@ public:
   virtual void Drain() = 0;
 
   // Pause audio playback
   virtual void Pause() = 0;
 
   // Resume audio playback
   virtual void Resume() = 0;
 
-  // Return the position in microseconds of the sample being played by the
+  // Return the position in milliseconds of the sample being played by the
   // audio hardware.
   virtual PRInt64 GetPosition() = 0;
 
   // Return the position, measured in samples played since the start, by
   // the audio hardware.
   virtual PRInt64 GetSampleOffset() = 0;
 
   // Returns PR_TRUE when the audio stream is paused.
--- a/content/media/nsBuiltinDecoder.cpp
+++ b/content/media/nsBuiltinDecoder.cpp
@@ -78,17 +78,17 @@ void nsBuiltinDecoder::SetVolume(double 
     mDecoderStateMachine->SetVolume(aVolume);
   }
 }
 
 double nsBuiltinDecoder::GetDuration()
 {
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
   if (mDuration >= 0) {
-     return static_cast<double>(mDuration) / static_cast<double>(USECS_PER_S);
+     return static_cast<double>(mDuration) / 1000.0;
   }
   return std::numeric_limits<double>::quiet_NaN();
 }
 
 nsBuiltinDecoder::nsBuiltinDecoder() :
   mDecoderPosition(0),
   mPlaybackPosition(0),
   mCurrentTime(0.0),
@@ -519,17 +519,17 @@ double nsBuiltinDecoder::ComputePlayback
 {
   GetMonitor().AssertCurrentThreadIn();
   NS_ASSERTION(NS_IsMainThread() || IsCurrentThread(mStateMachineThread),
                "Should be on main or state machine thread.");
 
   PRInt64 length = mStream ? mStream->GetLength() : -1;
   if (mDuration >= 0 && length >= 0) {
     *aReliable = PR_TRUE;
-    return length * static_cast<double>(USECS_PER_S) / mDuration;
+    return double(length)*1000.0/mDuration;
   }
   return mPlaybackStatistics.GetRateAtLastStop(aReliable);
 }
 
 void nsBuiltinDecoder::UpdatePlaybackRate()
 {
   NS_ASSERTION(NS_IsMainThread() || IsCurrentThread(mStateMachineThread),
                "Should be on main or state machine thread.");
@@ -795,25 +795,25 @@ void nsBuiltinDecoder::DurationChanged()
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
   MonitorAutoEnter mon(mMonitor);
   PRInt64 oldDuration = mDuration;
   mDuration = mDecoderStateMachine ? mDecoderStateMachine->GetDuration() : -1;
   // Duration has changed so we should recompute playback rate
   UpdatePlaybackRate();
 
   if (mElement && oldDuration != mDuration) {
-    LOG(PR_LOG_DEBUG, ("%p duration changed to %lld", this, mDuration));
+    LOG(PR_LOG_DEBUG, ("%p duration changed to %lldms", this, mDuration));
     mElement->DispatchEvent(NS_LITERAL_STRING("durationchange"));
   }
 }
 
-void nsBuiltinDecoder::SetDuration(double aDuration)
+void nsBuiltinDecoder::SetDuration(PRInt64 aDuration)
 {
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
-  mDuration = static_cast<PRInt64>(NS_round(aDuration * static_cast<double>(USECS_PER_S)));
+  mDuration = aDuration;
 
   MonitorAutoEnter mon(mMonitor);
   if (mDecoderStateMachine) {
     mDecoderStateMachine->SetDuration(mDuration);
   }
 
   // Duration has changed so we should recompute playback rate
   UpdatePlaybackRate();
--- a/content/media/nsBuiltinDecoder.h
+++ b/content/media/nsBuiltinDecoder.h
@@ -245,24 +245,23 @@ public:
 
   // Set the audio volume. The decoder monitor must be obtained before
   // calling this.
   virtual void SetVolume(double aVolume) = 0;
 
   virtual void Shutdown() = 0;
 
   // Called from the main thread to get the duration. The decoder monitor
-  // must be obtained before calling this. It is in units of microseconds.
+  // must be obtained before calling this. It is in units of milliseconds.
   virtual PRInt64 GetDuration() = 0;
 
   // Called from the main thread to set the duration of the media resource
   // if it is able to be obtained via HTTP headers. Called from the 
   // state machine thread to set the duration if it is obtained from the
   // media metadata. The decoder monitor must be obtained before calling this.
-  // aDuration is in microseconds.
   virtual void SetDuration(PRInt64 aDuration) = 0;
 
   // Functions used by assertions to ensure we're calling things
   // on the appropriate threads.
   virtual PRBool OnDecodeThread() const = 0;
 
   virtual nsHTMLMediaElement::NextFrameStatus GetNextFrameStatus() = 0;
 
@@ -379,20 +378,20 @@ class nsBuiltinDecoder : public nsMediaD
   // Call from any thread safely. Return PR_TRUE if we are currently
   // seeking in the media resource.
   virtual PRBool IsSeeking() const;
 
   // Return PR_TRUE if the decoder has reached the end of playback.
   // Call on the main thread only.
   virtual PRBool IsEnded() const;
 
-  // Set the duration of the media resource in units of seconds.
+  // Set the duration of the media resource in units of milliseconds.
   // This is called via a channel listener if it can pick up the duration
   // from a content header. Must be called from the main thread only.
-  virtual void SetDuration(double aDuration);
+  virtual void SetDuration(PRInt64 aDuration);
 
   // Set a flag indicating whether seeking is supported
   virtual void SetSeekable(PRBool aSeekable);
 
   // Return PR_TRUE if seeking is supported.
   virtual PRBool GetSeekable();
 
   virtual Statistics GetStatistics();
--- a/content/media/nsBuiltinDecoderReader.h
+++ b/content/media/nsBuiltinDecoderReader.h
@@ -175,18 +175,18 @@ public:
   PRUint32 AudioDataLength() {
     return mChannels * mSamples;
   }
 
   // Approximate byte offset of the end of the page on which this sample
   // chunk ends.
   const PRInt64 mOffset;
 
-  PRInt64 mTime; // Start time of samples in usecs.
-  const PRInt64 mDuration; // In usecs.
+  PRInt64 mTime; // Start time of samples in ms.
+  const PRInt64 mDuration; // In ms.
   const PRUint32 mSamples;
   const PRUint32 mChannels;
   nsAutoArrayPtr<SoundDataValue> mAudioData;
 };
 
 // Holds a decoded video frame, in YCbCr format. These are queued in the reader.
 class VideoData {
 public:
@@ -237,20 +237,20 @@ public:
   ~VideoData()
   {
     MOZ_COUNT_DTOR(VideoData);
   }
 
   // Approximate byte offset of the end of the frame in the media.
   PRInt64 mOffset;
 
-  // Start time of frame in microseconds.
+  // Start time of frame in milliseconds.
   PRInt64 mTime;
 
-  // End time of frame in microseconds;
+  // End time of frame in milliseconds;
   PRInt64 mEndTime;
 
   // Codec specific internal time code. For Ogg based codecs this is the
   // granulepos.
   PRInt64 mTimecode;
 
   // This frame's image.
   nsRefPtr<Image> mImage;
@@ -383,17 +383,17 @@ template <class T> class MediaQueue : pr
   }
 
   // Informs the media queue that it won't be receiving any more samples.
   void Finish() {
     MonitorAutoEnter mon(mMonitor);
     mEndOfStream = PR_TRUE;    
   }
 
-  // Returns the approximate number of microseconds of samples in the queue.
+  // Returns the approximate number of milliseconds of samples in the queue.
   PRInt64 Duration() {
     MonitorAutoEnter mon(mMonitor);
     if (GetSize() < 2) {
       return 0;
     }
     T* last = Peek();
     T* first = PeekFront();
     return last->mTime - first->mTime;
@@ -453,19 +453,19 @@ public:
   // first video sample, if we have video.
   virtual VideoData* FindStartTime(PRInt64 aOffset,
                                    PRInt64& aOutStartTime);
 
   // Returns the end time of the last page which occurs before aEndOffset.
   // This will not read past aEndOffset. Returns -1 on failure. 
   virtual PRInt64 FindEndTime(PRInt64 aEndOffset);
 
-  // Moves the decode head to aTime microseconds. aStartTime and aEndTime
-  // denote the start and end times of the media in usecs, and aCurrentTime
-  // is the current playback position in microseconds.
+  // Moves the decode head to aTime milliseconds. aStartTime and aEndTime
+  // denote the start and end times of the media in ms, and aCurrentTime
+  // is the current playback position in ms.
   virtual nsresult Seek(PRInt64 aTime,
                         PRInt64 aStartTime,
                         PRInt64 aEndTime,
                         PRInt64 aCurrentTime) = 0;
 
   // Queue of audio samples. This queue is threadsafe.
   MediaQueue<SoundData> mAudioQueue;
 
@@ -481,17 +481,17 @@ public:
 
   // Only used by nsWebMReader for now, so stub here rather than in every
   // reader than inherits from nsBuiltinDecoderReader.
   virtual void NotifyDataArrived(const char* aBuffer, PRUint32 aLength, PRUint32 aOffset) {}
 
 protected:
 
   // Pumps the decode until we reach frames/samples required to play at
-  // time aTarget (usecs).
+  // time aTarget (ms).
   nsresult DecodeToTarget(PRInt64 aTarget);
 
   // Reader decode function. Matches DecodeVideoFrame() and
   // DecodeAudioData().
   typedef PRBool (nsBuiltinDecoderReader::*DecodeFn)();
 
   // Calls aDecodeFn on *this until aQueue has a sample, whereupon
   // we return the first sample.
--- a/content/media/nsBuiltinDecoderStateMachine.cpp
+++ b/content/media/nsBuiltinDecoderStateMachine.cpp
@@ -63,27 +63,27 @@ extern PRLogModuleInfo* gBuiltinDecoderL
 
 // The amount of data to retrieve during buffering is computed based
 // on the download rate. BUFFERING_MIN_RATE is the minimum download
 // rate to be used in that calculation to help avoid constant buffering
 // attempts at a time when the average download rate has not stabilised.
 #define BUFFERING_MIN_RATE 50000
 #define BUFFERING_RATE(x) ((x)< BUFFERING_MIN_RATE ? BUFFERING_MIN_RATE : (x))
 
-// If audio queue has less than this many usecs of decoded audio, we won't risk
+// If audio queue has less than this many ms of decoded audio, we won't risk
 // trying to decode the video, we'll skip decoding video up to the next
 // keyframe. We may increase this value for an individual decoder if we
 // encounter video frames which take a long time to decode.
-static const PRUint32 LOW_AUDIO_USECS = 300000;
+static const PRUint32 LOW_AUDIO_MS = 300;
 
-// If more than this many usecs of decoded audio is queued, we'll hold off
+// If more than this many ms of decoded audio is queued, we'll hold off
 // decoding more audio. If we increase the low audio threshold (see
-// LOW_AUDIO_USECS above) we'll also increase this value to ensure it's not
+// LOW_AUDIO_MS above) we'll also increase this value to ensure it's not
 // less than the low audio threshold.
-const PRInt64 AMPLE_AUDIO_USECS = 1000000;
+const PRInt64 AMPLE_AUDIO_MS = 1000;
 
 // Maximum number of bytes we'll allocate and write at once to the audio
 // hardware when the audio stream contains missing samples and we're
 // writing silence in order to fill the gap. We limit our silence-writes
 // to 32KB in order to avoid allocating an impossibly large chunk of
 // memory if we encounter a large chunk of silence.
 const PRUint32 SILENCE_BYTES_CHUNK = 32 * 1024;
 
@@ -93,64 +93,64 @@ const PRUint32 SILENCE_BYTES_CHUNK = 32 
 static const PRUint32 LOW_VIDEO_FRAMES = 1;
 
 // If we've got more than AMPLE_VIDEO_FRAMES decoded video frames waiting in
 // the video queue, we will not decode any more video frames until some have
 // been consumed by the play state machine thread.
 static const PRUint32 AMPLE_VIDEO_FRAMES = 10;
 
 // Arbitrary "frame duration" when playing only audio.
-static const int AUDIO_DURATION_USECS = 40000;
+static const int AUDIO_DURATION_MS = 40;
 
-// If we increase our "low audio threshold" (see LOW_AUDIO_USECS above), we
+// If we increase our "low audio threshold" (see LOW_AUDIO_MS above), we
 // use this as a factor in all our calculations. Increasing this will cause
 // us to be more likely to increase our low audio threshold, and to
 // increase it by more.
 static const int THRESHOLD_FACTOR = 2;
 
 // If we have less than this much undecoded data available, we'll consider
 // ourselves to be running low on undecoded data. We determine how much
 // undecoded data we have remaining using the reader's GetBuffered()
 // implementation.
-static const PRInt64 LOW_DATA_THRESHOLD_USECS = 5000000;
+static const PRInt64 LOW_DATA_THRESHOLD_MS = 5000;
 
-// LOW_DATA_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS, otherwise
+// LOW_DATA_THRESHOLD_MS needs to be greater than AMPLE_AUDIO_MS, otherwise
 // the skip-to-keyframe logic can activate when we're running low on data.
-PR_STATIC_ASSERT(LOW_DATA_THRESHOLD_USECS > AMPLE_AUDIO_USECS);
+PR_STATIC_ASSERT(LOW_DATA_THRESHOLD_MS > AMPLE_AUDIO_MS);
 
-// Amount of excess usecs of data to add in to the "should we buffer" calculation.
-static const PRUint32 EXHAUSTED_DATA_MARGIN_USECS = 60000;
+// Amount of excess ms of data to add in to the "should we buffer" calculation.
+static const PRUint32 EXHAUSTED_DATA_MARGIN_MS = 60;
 
-// If we enter buffering within QUICK_BUFFER_THRESHOLD_USECS seconds of starting
+// If we enter buffering within QUICK_BUFFER_THRESHOLD_MS seconds of starting
 // decoding, we'll enter "quick buffering" mode, which exits a lot sooner than
 // normal buffering mode. This exists so that if the decode-ahead exhausts the
 // downloaded data while decode/playback is just starting up (for example
 // after a seek while the media is still playing, or when playing a media
 // as soon as it's load started), we won't necessarily stop for 30s and wait
 // for buffering. We may actually be able to playback in this case, so exit
 // buffering early and try to play. If it turns out we can't play, we'll fall
 // back to buffering normally.
-static const PRUint32 QUICK_BUFFER_THRESHOLD_USECS = 2000000;
+static const PRUint32 QUICK_BUFFER_THRESHOLD_MS = 2000;
 
 // If we're quick buffering, we'll remain in buffering mode while we have less than
-// QUICK_BUFFERING_LOW_DATA_USECS of decoded data available.
-static const PRUint32 QUICK_BUFFERING_LOW_DATA_USECS = 1000000;
+// QUICK_BUFFERING_LOW_DATA_MS of decoded data available.
+static const PRUint32 QUICK_BUFFERING_LOW_DATA_MS = 1000;
 
-// If QUICK_BUFFERING_LOW_DATA_USECS is > AMPLE_AUDIO_USECS, we won't exit
+// If QUICK_BUFFERING_LOW_DATA_MS is > AMPLE_AUDIO_MS, we won't exit
 // quick buffering in a timely fashion, as the decode pauses when it
-// reaches AMPLE_AUDIO_USECS decoded data, and thus we'll never reach
-// QUICK_BUFFERING_LOW_DATA_USECS.
-PR_STATIC_ASSERT(QUICK_BUFFERING_LOW_DATA_USECS <= AMPLE_AUDIO_USECS);
+// reaches AMPLE_AUDIO_MS decoded data, and thus we'll never reach
+// QUICK_BUFFERING_LOW_DATA_MS.
+PR_STATIC_ASSERT(QUICK_BUFFERING_LOW_DATA_MS <= AMPLE_AUDIO_MS);
 
-static TimeDuration UsecsToDuration(PRInt64 aUsecs) {
-  return TimeDuration::FromMilliseconds(static_cast<double>(aUsecs) / USECS_PER_MS);
+static TimeDuration MsToDuration(PRInt64 aMs) {
+  return TimeDuration::FromMilliseconds(static_cast<double>(aMs));
 }
 
-static PRInt64 DurationToUsecs(TimeDuration aDuration) {
-  return static_cast<PRInt64>(aDuration.ToSeconds() * USECS_PER_S);
+static PRInt64 DurationToMs(TimeDuration aDuration) {
+  return static_cast<PRInt64>(aDuration.ToSeconds() * 1000);
 }
 
 class nsAudioMetadataEventRunner : public nsRunnable
 {
 private:
   nsCOMPtr<nsBuiltinDecoder> mDecoder;
 public:
   nsAudioMetadataEventRunner(nsBuiltinDecoder* aDecoder, PRUint32 aChannels,
@@ -209,17 +209,17 @@ PRBool nsBuiltinDecoderStateMachine::Has
   mDecoder->GetMonitor().AssertCurrentThreadIn();
   NS_ASSERTION(HasAudio(), "Should only call HasFutureAudio() when we have audio");
   // We've got audio ready to play if:
   // 1. We've not completed playback of audio, and
   // 2. we either have more than the threshold of decoded audio available, or
   //    we've completely decoded all audio (but not finished playing it yet
   //    as per 1).
   return !mAudioCompleted &&
-         (AudioDecodedUsecs() > LOW_AUDIO_USECS || mReader->mAudioQueue.IsFinished());
+         (AudioDecodedMs() > LOW_AUDIO_MS || mReader->mAudioQueue.IsFinished());
 }
 
 PRBool nsBuiltinDecoderStateMachine::HaveNextFrameData() const {
   mDecoder->GetMonitor().AssertCurrentThreadIn();
   return (!HasAudio() || HasFutureAudio()) &&
          (!HasVideo() || mReader->mVideoQueue.GetSize() > 0);
 }
 
@@ -247,29 +247,29 @@ void nsBuiltinDecoderStateMachine::Decod
   // playback position. skipToNextKeyframe is PR_TRUE if we're currently
   // skipping up to the next keyframe.
   PRBool skipToNextKeyframe = PR_FALSE;
 
   // Once we've decoded more than videoPumpThreshold video frames, we'll
   // no longer be considered to be "pumping video".
   const unsigned videoPumpThreshold = AMPLE_VIDEO_FRAMES / 2;
 
-  // After the audio decode fills with more than audioPumpThreshold usecs
+  // After the audio decode fills with more than audioPumpThresholdMs ms
   // of decoded audio, we'll start to check whether the audio or video decode
   // is falling behind.
-  const unsigned audioPumpThreshold = LOW_AUDIO_USECS * 2;
+  const unsigned audioPumpThresholdMs = LOW_AUDIO_MS * 2;
 
   // Our local low audio threshold. We may increase this if we're slow to
   // decode video frames, in order to reduce the chance of audio underruns.
-  PRInt64 lowAudioThreshold = LOW_AUDIO_USECS;
+  PRInt64 lowAudioThreshold = LOW_AUDIO_MS;
 
   // Our local ample audio threshold. If we increase lowAudioThreshold, we'll
   // also increase this too appropriately (we don't want lowAudioThreshold to
   // be greater than ampleAudioThreshold, else we'd stop decoding!).
-  PRInt64 ampleAudioThreshold = AMPLE_AUDIO_USECS;
+  PRInt64 ampleAudioThreshold = AMPLE_AUDIO_MS;
 
   MediaQueue<VideoData>& videoQueue = mReader->mVideoQueue;
   MediaQueue<SoundData>& audioQueue = mReader->mAudioQueue;
 
   MonitorAutoEnter mon(mDecoder->GetMonitor());
 
   PRBool videoPlaying = HasVideo();
   PRBool audioPlaying = HasAudio();
@@ -286,17 +286,17 @@ void nsBuiltinDecoderStateMachine::Decod
         static_cast<PRUint32>(videoQueue.GetSize()) >= videoPumpThreshold)
     {
       videoPump = PR_FALSE;
     }
 
     // We don't want to consider skipping to the next keyframe if we've
     // only just started up the decode loop, so wait until we've decoded
     // some audio data before enabling the keyframe skip logic on audio.
-    if (audioPump && GetDecodedAudioDuration() >= audioPumpThreshold) {
+    if (audioPump && GetDecodedAudioDuration() >= audioPumpThresholdMs) {
       audioPump = PR_FALSE;
     }
 
     // We'll skip the video decode to the nearest keyframe if we're low on
     // audio, or if we're low on video, provided we're not running low on
     // data to decode. If we're running low on downloaded data to decode,
     // we won't start keyframe skipping, as we'll be pausing playback to buffer
     // soon anyway and we'll want to be able to display frames immediately
@@ -325,21 +325,21 @@ void nsBuiltinDecoderStateMachine::Decod
       TimeDuration decodeTime;
       {
         PRInt64 currentTime = GetMediaTime();
         MonitorAutoExit exitMon(mDecoder->GetMonitor());
         TimeStamp start = TimeStamp::Now();
         videoPlaying = mReader->DecodeVideoFrame(skipToNextKeyframe, currentTime);
         decodeTime = TimeStamp::Now() - start;
       }
-      if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > lowAudioThreshold &&
+      if (THRESHOLD_FACTOR * DurationToMs(decodeTime) > lowAudioThreshold &&
           !HasLowUndecodedData())
       {
         lowAudioThreshold =
-          NS_MIN(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS);
+          NS_MIN(THRESHOLD_FACTOR * DurationToMs(decodeTime), AMPLE_AUDIO_MS);
         ampleAudioThreshold = NS_MAX(THRESHOLD_FACTOR * lowAudioThreshold,
                                      ampleAudioThreshold);
         LOG(PR_LOG_DEBUG,
             ("Slow video decode, set lowAudioThreshold=%lld ampleAudioThreshold=%lld",
              lowAudioThreshold, ampleAudioThreshold));
       }
     }
 
@@ -471,29 +471,29 @@ void nsBuiltinDecoderStateMachine::Audio
                  "Should have data to play");
     // See if there's missing samples in the audio stream. If there is, push
     // silence into the audio hardware, so we can play across the gap.
     const SoundData* s = mReader->mAudioQueue.PeekFront();
 
     // Calculate the number of samples that have been pushed onto the audio
     // hardware.
     PRInt64 playedSamples = 0;
-    if (!UsecsToSamples(audioStartTime, rate, playedSamples)) {
+    if (!MsToSamples(audioStartTime, rate, playedSamples)) {
       NS_WARNING("Int overflow converting playedSamples");
       break;
     }
     if (!AddOverflow(playedSamples, audioDuration, playedSamples)) {
       NS_WARNING("Int overflow adding playedSamples");
       break;
     }
 
     // Calculate the timestamp of the next chunk of audio in numbers of
     // samples.
     PRInt64 sampleTime = 0;
-    if (!UsecsToSamples(s->mTime, rate, sampleTime)) {
+    if (!MsToSamples(s->mTime, rate, sampleTime)) {
       NS_WARNING("Int overflow converting sampleTime");
       break;
     }
     PRInt64 missingSamples = 0;
     if (!AddOverflow(sampleTime, -playedSamples, missingSamples)) {
       NS_WARNING("Int overflow adding missingSamples");
       break;
     }
@@ -506,36 +506,36 @@ void nsBuiltinDecoderStateMachine::Audio
       missingSamples = NS_MIN(static_cast<PRInt64>(PR_UINT32_MAX), missingSamples);
       audioDuration += PlaySilence(static_cast<PRUint32>(missingSamples),
                                    channels, playedSamples);
     } else {
       audioDuration += PlayFromAudioQueue(sampleTime, channels);
     }
     {
       MonitorAutoEnter mon(mDecoder->GetMonitor());
-      PRInt64 playedUsecs;
-      if (!SamplesToUsecs(audioDuration, rate, playedUsecs)) {
-        NS_WARNING("Int overflow calculating playedUsecs");
+      PRInt64 playedMs;
+      if (!SamplesToMs(audioDuration, rate, playedMs)) {
+        NS_WARNING("Int overflow calculating playedMs");
         break;
       }
-      if (!AddOverflow(audioStartTime, playedUsecs, mAudioEndTime)) {
+      if (!AddOverflow(audioStartTime, playedMs, mAudioEndTime)) {
         NS_WARNING("Int overflow calculating audio end time");
         break;
       }
 
       PRInt64 audioAhead = mAudioEndTime - GetMediaTime();
-      if (audioAhead > AMPLE_AUDIO_USECS &&
+      if (audioAhead > AMPLE_AUDIO_MS &&
           audioDuration - samplesAtLastSleep > minWriteSamples)
       {
         samplesAtLastSleep = audioDuration;
         // We've pushed enough audio onto the hardware that we've queued up a
         // significant amount ahead of the playback position. The decode
         // thread will be going to sleep, so we won't get any new samples
         // anyway, so sleep until we need to push to the hardware again.
-        Wait(AMPLE_AUDIO_USECS / 2);
+        Wait(AMPLE_AUDIO_MS / 2);
         // Kick the decode thread; since above we only do a NotifyAll when
         // we pop an audio chunk of the queue, the decoder won't wake up if
         // we've got no more decoded chunks to push to the hardware. We can
         // hit this condition if the last sample in the stream doesn't have
         // it's EOS flag set, and the decode thread sleeps just after decoding
         // that packet, but before realising there's no more packets.
         mon.NotifyAll();
       }
@@ -556,18 +556,18 @@ void nsBuiltinDecoderStateMachine::Audio
         MonitorAutoExit audioExit(mAudioMonitor);
         MonitorAutoEnter mon(mDecoder->GetMonitor());
         PRInt64 position = GetMediaTime();
         while (oldPosition != position &&
                mAudioEndTime - position > 0 &&
                mState != DECODER_STATE_SEEKING &&
                mState != DECODER_STATE_SHUTDOWN)
         {
-          const PRInt64 DRAIN_BLOCK_USECS = 100000;
-          Wait(NS_MIN(mAudioEndTime - position, DRAIN_BLOCK_USECS));
+          const PRInt64 DRAIN_BLOCK_MS = 100;
+          Wait(NS_MIN(mAudioEndTime - position, DRAIN_BLOCK_MS));
           oldPosition = position;
           position = GetMediaTime();
         }
         if (mState == DECODER_STATE_SEEKING) {
           seeking = PR_TRUE;
         }
       }
 
@@ -680,17 +680,17 @@ void nsBuiltinDecoderStateMachine::StopP
   mDecoder->mPlaybackStatistics.Stop(TimeStamp::Now());
 
   // Reset mPlayStartTime before we pause/shutdown the nsAudioStream. This is
   // so that if the audio loop is about to write audio, it will have the chance
   // to check to see if we're paused and not write the audio. If not, the
   // audio thread can block in the write, and we deadlock trying to acquire
   // the audio monitor upon resume playback.
   if (IsPlaying()) {
-    mPlayDuration += DurationToUsecs(TimeStamp::Now() - mPlayStartTime);
+    mPlayDuration += TimeStamp::Now() - mPlayStartTime;
     mPlayStartTime = TimeStamp();
   }
   if (HasAudio()) {
     MonitorAutoExit exitMon(mDecoder->GetMonitor());
     MonitorAutoEnter audioMon(mAudioMonitor);
     if (mAudioStream) {
       if (aMode == AUDIO_PAUSE) {
         mAudioStream->Pause();
@@ -796,17 +796,17 @@ void nsBuiltinDecoderStateMachine::SetVo
 
 double nsBuiltinDecoderStateMachine::GetCurrentTime() const
 {
   NS_ASSERTION(NS_IsMainThread() ||
                mDecoder->OnStateMachineThread() ||
                OnDecodeThread(),
                "Should be on main, decode, or state machine thread.");
 
-  return static_cast<double>(mCurrentFrameTime) / static_cast<double>(USECS_PER_S);
+  return static_cast<double>(mCurrentFrameTime) / 1000.0;
 }
 
 PRInt64 nsBuiltinDecoderStateMachine::GetDuration()
 {
   mDecoder->GetMonitor().AssertCurrentThreadIn();
 
   if (mEndTime == -1 || mStartTime == -1)
     return -1;
@@ -890,17 +890,17 @@ void nsBuiltinDecoderStateMachine::Seek(
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
   MonitorAutoEnter mon(mDecoder->GetMonitor());
   // nsBuiltinDecoder::mPlayState should be SEEKING while we seek, and
   // in that case nsBuiltinDecoder shouldn't be calling us.
   NS_ASSERTION(mState != DECODER_STATE_SEEKING,
                "We shouldn't already be seeking");
   NS_ASSERTION(mState >= DECODER_STATE_DECODING,
                "We should have loaded metadata");
-  double t = aTime * static_cast<double>(USECS_PER_S);
+  double t = aTime * 1000.0;
   if (t > PR_INT64_MAX) {
     // Prevent integer overflow.
     return;
   }
 
   mSeekTime = static_cast<PRInt64>(t) + mStartTime;
   NS_ASSERTION(mSeekTime >= mStartTime && mSeekTime <= mEndTime,
                "Can only seek in range [0,duration]");
@@ -962,47 +962,47 @@ nsBuiltinDecoderStateMachine::StartDecod
     }
     nsCOMPtr<nsIRunnable> event =
       NS_NewRunnableMethod(this, &nsBuiltinDecoderStateMachine::AudioLoop);
     mAudioThread->Dispatch(event, NS_DISPATCH_NORMAL);
   }
   return NS_OK;
 }
 
-PRInt64 nsBuiltinDecoderStateMachine::AudioDecodedUsecs() const
+PRInt64 nsBuiltinDecoderStateMachine::AudioDecodedMs() const
 {
   NS_ASSERTION(HasAudio(),
-               "Should only call AudioDecodedUsecs() when we have audio");
+               "Should only call AudioDecodedMs() when we have audio");
   // The amount of audio we have decoded is the amount of audio data we've
   // already decoded and pushed to the hardware, plus the amount of audio
   // data waiting to be pushed to the hardware.
   PRInt64 pushed = (mAudioEndTime != -1) ? (mAudioEndTime - GetMediaTime()) : 0;
   return pushed + mReader->mAudioQueue.Duration();
 }
 
-PRBool nsBuiltinDecoderStateMachine::HasLowDecodedData(PRInt64 aAudioUsecs) const
+PRBool nsBuiltinDecoderStateMachine::HasLowDecodedData(PRInt64 aAudioMs) const
 {
   mDecoder->GetMonitor().AssertCurrentThreadIn();
   // We consider ourselves low on decoded data if we're low on audio,
   // provided we've not decoded to the end of the audio stream, or
   // if we're only playing video and we're low on video frames, provided
   // we've not decoded to the end of the video stream.
   return ((HasAudio() &&
            !mReader->mAudioQueue.IsFinished() &&
-           AudioDecodedUsecs() < aAudioUsecs)
+           AudioDecodedMs() < aAudioMs)
           ||
          (!HasAudio() &&
           HasVideo() &&
           !mReader->mVideoQueue.IsFinished() &&
           static_cast<PRUint32>(mReader->mVideoQueue.GetSize()) < LOW_VIDEO_FRAMES));
 }
 
 PRBool nsBuiltinDecoderStateMachine::HasLowUndecodedData() const
 {
-  return GetUndecodedData() < LOW_DATA_THRESHOLD_USECS;
+  return GetUndecodedData() < LOW_DATA_THRESHOLD_MS;
 }
 
 PRInt64 nsBuiltinDecoderStateMachine::GetUndecodedData() const
 {
   mDecoder->GetMonitor().AssertCurrentThreadIn();
   NS_ASSERTION(mState > DECODER_STATE_DECODING_METADATA,
                "Must have loaded metadata for GetBuffered() to work");
   nsTimeRanges buffered;
@@ -1020,17 +1020,17 @@ PRInt64 nsBuiltinDecoderStateMachine::Ge
     double start, end;
     res = r->Start(index, &start);
     NS_ENSURE_SUCCESS(res, 0);
 
     res = r->End(index, &end);
     NS_ENSURE_SUCCESS(res, 0);
 
     if (start <= currentTime && end >= currentTime) {
-      return static_cast<PRInt64>((end - currentTime) * USECS_PER_S);
+      return static_cast<PRInt64>((end - currentTime) * 1000);
     }
   }
   return 0;
 }
 
 nsresult nsBuiltinDecoderStateMachine::Run()
 {
   NS_ASSERTION(IsCurrentThread(mDecoder->mStateMachineThread),
@@ -1073,17 +1073,17 @@ nsresult nsBuiltinDecoderStateMachine::R
           continue;
         }
 
         NS_ASSERTION(mStartTime != -1, "Must have start time");
         NS_ASSERTION((!HasVideo() && !HasAudio()) ||
                      !mSeekable || mEndTime != -1,
                      "Active seekable media should have end time");
         NS_ASSERTION(!mSeekable || GetDuration() != -1, "Seekable media should have duration");
-        LOG(PR_LOG_DEBUG, ("%p Media goes from %lld to %lld (duration %lld) seekable=%d",
+        LOG(PR_LOG_DEBUG, ("%p Media goes from %lldms to %lldms (duration %lldms) seekable=%d",
                            mDecoder, mStartTime, mEndTime, GetDuration(), mSeekable));
 
         if (mState == DECODER_STATE_SHUTDOWN)
           continue;
 
         // Inform the element that we've loaded the metadata and the first frame,
         // setting the default framebuffer size for audioavailable events.  Also,
         // if there is audio, let the MozAudioAvailable event manager know about
@@ -1174,17 +1174,17 @@ nsresult nsBuiltinDecoderStateMachine::R
           }
           if (NS_SUCCEEDED(res)){
             SoundData* audio = HasAudio() ? mReader->mAudioQueue.PeekFront() : nsnull;
             NS_ASSERTION(!audio || (audio->mTime <= seekTime &&
                                     seekTime <= audio->mTime + audio->mDuration),
                          "Seek target should lie inside the first audio block after seek");
             PRInt64 startTime = (audio && audio->mTime < seekTime) ? audio->mTime : seekTime;
             mAudioStartTime = startTime;
-            mPlayDuration = startTime - mStartTime;
+            mPlayDuration = MsToDuration(startTime - mStartTime);
             if (HasVideo()) {
               nsAutoPtr<VideoData> video(mReader->mVideoQueue.PeekFront());
               if (video) {
                 NS_ASSERTION(video->mTime <= seekTime && seekTime <= video->mEndTime,
                              "Seek target should lie inside the first frame after seek");
                 nsIntSize display = mInfo.mDisplay;
                 float aspect = mInfo.mPixelAspectRatio;
                 {
@@ -1207,22 +1207,22 @@ nsresult nsBuiltinDecoderStateMachine::R
         LOG(PR_LOG_DEBUG, ("Seek completed, mCurrentFrameTime=%lld\n", mCurrentFrameTime));
 
         // Change state to DECODING or COMPLETED now. SeekingStopped will
         // call nsBuiltinDecoderStateMachine::Seek to reset our state to SEEKING
         // if we need to seek again.
         
         nsCOMPtr<nsIRunnable> stopEvent;
         if (GetMediaTime() == mEndTime) {
-          LOG(PR_LOG_DEBUG, ("%p Changed state from SEEKING (to %lld) to COMPLETED",
+          LOG(PR_LOG_DEBUG, ("%p Changed state from SEEKING (to %lldms) to COMPLETED",
                              mDecoder, seekTime));
           stopEvent = NS_NewRunnableMethod(mDecoder, &nsBuiltinDecoder::SeekingStoppedAtEnd);
           mState = DECODER_STATE_COMPLETED;
         } else {
-          LOG(PR_LOG_DEBUG, ("%p Changed state from SEEKING (to %lld) to DECODING",
+          LOG(PR_LOG_DEBUG, ("%p Changed state from SEEKING (to %lldms) to DECODING",
                              mDecoder, seekTime));
           stopEvent = NS_NewRunnableMethod(mDecoder, &nsBuiltinDecoder::SeekingStopped);
           StartDecoding();
         }
         mDecoder->GetMonitor().NotifyAll();
 
         {
           MonitorAutoExit exitMon(mDecoder->GetMonitor());
@@ -1248,28 +1248,28 @@ nsresult nsBuiltinDecoderStateMachine::R
 
         // We will remain in the buffering state if we've not decoded enough
         // data to begin playback, or if we've not downloaded a reasonable
         // amount of data inside our buffering time.
         TimeDuration elapsed = now - mBufferingStart;
         PRBool isLiveStream = mDecoder->GetCurrentStream()->GetLength() == -1;
         if ((isLiveStream || !mDecoder->CanPlayThrough()) &&
              elapsed < TimeDuration::FromSeconds(BUFFERING_WAIT) &&
-             (mQuickBuffering ? HasLowDecodedData(QUICK_BUFFERING_LOW_DATA_USECS)
-                              : (GetUndecodedData() < BUFFERING_WAIT * USECS_PER_S)) &&
+             (mQuickBuffering ? HasLowDecodedData(QUICK_BUFFERING_LOW_DATA_MS)
+                              : (GetUndecodedData() < BUFFERING_WAIT * 1000)) &&
              !stream->IsDataCachedToEndOfStream(mDecoder->mDecoderPosition) &&
              !stream->IsSuspended())
         {
           LOG(PR_LOG_DEBUG,
               ("Buffering: %.3lfs/%ds, timeout in %.3lfs %s",
-               GetUndecodedData() / static_cast<double>(USECS_PER_S),
+               GetUndecodedData() / 1000.0,
                BUFFERING_WAIT,
                BUFFERING_WAIT - elapsed.ToSeconds(),
                (mQuickBuffering ? "(quick exit)" : "")));
-          Wait(USECS_PER_S);
+          Wait(1000);
           if (mState == DECODER_STATE_SHUTDOWN)
             continue;
         } else {
           LOG(PR_LOG_DEBUG, ("%p Changed state from BUFFERING to DECODING", mDecoder));
           LOG(PR_LOG_DEBUG, ("%p Buffered for %.3lfs",
                              mDecoder,
                              (now - mBufferingStart).ToSeconds()));
           StartDecoding();
@@ -1382,46 +1382,46 @@ void nsBuiltinDecoderStateMachine::Advan
   // When it's time to display a frame, decode the frame and display it.
   if (mDecoder->GetState() == nsBuiltinDecoder::PLAY_STATE_PLAYING) {
     if (HasAudio() && mAudioStartTime == -1 && !mAudioCompleted) {
       // We've got audio (so we should sync off the audio clock), but we've not
       // played a sample on the audio thread, so we can't get a time from the
       // audio clock. Just wait and then return, to give the audio clock time
       // to tick.  This should really wait for a specific signal from the audio
       // thread rather than polling after a sleep.  See bug 568431 comment 4.
-      Wait(AUDIO_DURATION_USECS);
+      Wait(AUDIO_DURATION_MS);
       return;
     }
 
     // Determine the clock time. If we've got audio, and we've not reached
     // the end of the audio, use the audio clock. However if we've finished
     // audio, or don't have audio, use the system clock.
     PRInt64 clock_time = -1;
     if (!IsPlaying()) {
-      clock_time = mPlayDuration + mStartTime;
+      clock_time = DurationToMs(mPlayDuration) + mStartTime;
     } else {
       PRInt64 audio_time = GetAudioClock();
       if (HasAudio() && !mAudioCompleted && audio_time != -1) {
         clock_time = audio_time;
         // Resync against the audio clock, while we're trusting the
         // audio clock. This ensures no "drift", particularly on Linux.
-        mPlayDuration = clock_time - mStartTime;
+        mPlayDuration = MsToDuration(clock_time - mStartTime);
         mPlayStartTime = TimeStamp::Now();
       } else {
         // Sound is disabled on this system. Sync to the system clock.
-        clock_time = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration;
+        clock_time = DurationToMs(TimeStamp::Now() - mPlayStartTime + mPlayDuration);
         // Ensure the clock can never go backwards.
         NS_ASSERTION(mCurrentFrameTime <= clock_time, "Clock should go forwards");
         clock_time = NS_MAX(mCurrentFrameTime, clock_time) + mStartTime;
       }
     }
 
     // Skip frames up to the frame at the playback position, and figure out
     // the time remaining until it's time to display the next frame.
-    PRInt64 remainingTime = AUDIO_DURATION_USECS;
+    PRInt64 remainingTime = AUDIO_DURATION_MS;
     NS_ASSERTION(clock_time >= mStartTime, "Should have positive clock time.");
     nsAutoPtr<VideoData> currentFrame;
     if (mReader->mVideoQueue.GetSize() > 0) {
       VideoData* frame = mReader->mVideoQueue.PeekFront();
       while (clock_time >= frame->mTime) {
         mVideoFrameEndTime = frame->mEndTime;
         currentFrame = frame;
         mReader->mVideoQueue.PopFront();
@@ -1429,28 +1429,28 @@ void nsBuiltinDecoderStateMachine::Advan
         if (mReader->mVideoQueue.GetSize() == 0)
           break;
         frame = mReader->mVideoQueue.PeekFront();
       }
       // Current frame has already been presented, wait until it's time to
       // present the next frame.
       if (frame && !currentFrame) {
         PRInt64 now = IsPlaying()
-          ? (DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration)
-          : mPlayDuration;
+          ? DurationToMs(TimeStamp::Now() - mPlayStartTime + mPlayDuration)
+          : DurationToMs(mPlayDuration);
         remainingTime = frame->mTime - mStartTime - now;
       }
     }
 
     // Check to see if we don't have enough data to play up to the next frame.
     // If we don't, switch to buffering mode.
     nsMediaStream* stream = mDecoder->GetCurrentStream();
     if (mState == DECODER_STATE_DECODING &&
         mDecoder->GetState() == nsBuiltinDecoder::PLAY_STATE_PLAYING &&
-        HasLowDecodedData(remainingTime + EXHAUSTED_DATA_MARGIN_USECS) &&
+        HasLowDecodedData(remainingTime + EXHAUSTED_DATA_MARGIN_MS) &&
         !stream->IsDataCachedToEndOfStream(mDecoder->mDecoderPosition) &&
         !stream->IsSuspended() &&
         (JustExitedQuickBuffering() || HasLowUndecodedData()))
     {
       if (currentFrame) {
         mReader->mVideoQueue.PushFront(currentFrame.forget());
       }
       StartBuffering();
@@ -1461,31 +1461,31 @@ void nsBuiltinDecoderStateMachine::Advan
     // Start playing now if need be.
     if (!IsPlaying()) {
       StartPlayback();
       mDecoder->GetMonitor().NotifyAll();
     }
 
     if (currentFrame) {
       // Decode one frame and display it.
-      TimeStamp presTime = mPlayStartTime - UsecsToDuration(mPlayDuration) +
-                           UsecsToDuration(currentFrame->mTime - mStartTime);
+      TimeStamp presTime = mPlayStartTime - mPlayDuration +
+                           MsToDuration(currentFrame->mTime - mStartTime);
       NS_ASSERTION(currentFrame->mTime >= mStartTime, "Should have positive frame time");
       {
         nsIntSize display = mInfo.mDisplay;
         float aspect = mInfo.mPixelAspectRatio;
         {
           MonitorAutoExit exitMon(mDecoder->GetMonitor());
           // If we have video, we want to increment the clock in steps of the frame
           // duration.
           RenderVideoFrame(currentFrame, presTime, display, aspect);
         }
       }
       mDecoder->GetFrameStatistics().NotifyPresentedFrame();
-      PRInt64 now = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration;
+      PRInt64 now = DurationToMs(TimeStamp::Now() - mPlayStartTime + mPlayDuration);
       remainingTime = currentFrame->mEndTime - mStartTime - now;
       currentFrame = nsnull;
     }
 
     // Kick the decode thread in case it filled its buffers and put itself
     // to sleep.
     mDecoder->GetMonitor().NotifyAll();
 
@@ -1521,28 +1521,30 @@ void nsBuiltinDecoderStateMachine::Advan
 
     if (mState == DECODER_STATE_DECODING ||
         mState == DECODER_STATE_COMPLETED) {
       mDecoder->GetMonitor().Wait();
     }
   }
 }
 
-void nsBuiltinDecoderStateMachine::Wait(PRInt64 aUsecs) {
+void nsBuiltinDecoderStateMachine::Wait(PRInt64 aMs) {
   mDecoder->GetMonitor().AssertCurrentThreadIn();
-  TimeStamp end = TimeStamp::Now() + UsecsToDuration(aUsecs);
+  TimeStamp end = TimeStamp::Now() + MsToDuration(aMs);
   TimeStamp now;
   while ((now = TimeStamp::Now()) < end &&
          mState != DECODER_STATE_SHUTDOWN &&
          mState != DECODER_STATE_SEEKING)
   {
     PRInt64 ms = static_cast<PRInt64>(NS_round((end - now).ToSeconds() * 1000));
     if (ms == 0 || ms > PR_UINT32_MAX) {
       break;
     }
+    NS_ASSERTION(ms <= aMs && ms > 0,
+                 "nsBuiltinDecoderStateMachine::Wait interval very wrong!");
     mDecoder->GetMonitor().Wait(PR_MillisecondsToInterval(static_cast<PRUint32>(ms)));
   }
 }
 
 VideoData* nsBuiltinDecoderStateMachine::FindStartTime()
 {
   NS_ASSERTION(IsCurrentThread(mDecoder->mStateMachineThread), "Should be on state machine thread.");
   mDecoder->GetMonitor().AssertCurrentThreadIn();
@@ -1564,17 +1566,17 @@ VideoData* nsBuiltinDecoderStateMachine:
       // duration.
       mEndTime = mStartTime + mEndTime;
     }
   }
   // Set the audio start time to be start of media. If this lies before the
   // first acutal audio sample we have, we'll inject silence during playback
   // to ensure the audio starts at the correct time.
   mAudioStartTime = mStartTime;
-  LOG(PR_LOG_DEBUG, ("%p Media start time is %lld", mDecoder, mStartTime));
+  LOG(PR_LOG_DEBUG, ("%p Media start time is %lldms", mDecoder, mStartTime));
   return v;
 }
 
 void nsBuiltinDecoderStateMachine::FindEndTime() 
 {
   NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread.");
   mDecoder->GetMonitor().AssertCurrentThreadIn();
 
@@ -1589,17 +1591,17 @@ void nsBuiltinDecoderStateMachine::FindE
   {
     MonitorAutoExit exitMon(mDecoder->GetMonitor());
     endTime = mReader->FindEndTime(length);
   }
   if (endTime != -1) {
     mEndTime = endTime;
   }
 
-  LOG(PR_LOG_DEBUG, ("%p Media end time is %lld", mDecoder, mEndTime));   
+  LOG(PR_LOG_DEBUG, ("%p Media end time is %lldms", mDecoder, mEndTime));   
 }
 
 void nsBuiltinDecoderStateMachine::UpdateReadyState() {
   mDecoder->GetMonitor().AssertCurrentThreadIn();
 
   nsCOMPtr<nsIRunnable> event;
   switch (GetNextFrameStatus()) {
     case nsHTMLMediaElement::NEXT_FRAME_UNAVAILABLE_BUFFERING:
@@ -1643,30 +1645,30 @@ void nsBuiltinDecoderStateMachine::LoadM
   mDecoder->StartProgressUpdates();
   mGotDurationFromMetaData = (GetDuration() != -1);
 }
 
 PRBool nsBuiltinDecoderStateMachine::JustExitedQuickBuffering()
 {
   return !mDecodeStartTime.IsNull() &&
     mQuickBuffering &&
-    (TimeStamp::Now() - mDecodeStartTime) < TimeDuration::FromSeconds(QUICK_BUFFER_THRESHOLD_USECS);
+    (TimeStamp::Now() - mDecodeStartTime) < TimeDuration::FromSeconds(QUICK_BUFFER_THRESHOLD_MS);
 }
 
 void nsBuiltinDecoderStateMachine::StartBuffering()
 {
   mDecoder->GetMonitor().AssertCurrentThreadIn();
 
   TimeDuration decodeDuration = TimeStamp::Now() - mDecodeStartTime;
   // Go into quick buffering mode provided we've not just left buffering using
   // a "quick exit". This stops us flip-flopping between playing and buffering
   // when the download speed is similar to the decode speed.
   mQuickBuffering =
     !JustExitedQuickBuffering() &&
-    decodeDuration < UsecsToDuration(QUICK_BUFFER_THRESHOLD_USECS);
+    decodeDuration < TimeDuration::FromMilliseconds(QUICK_BUFFER_THRESHOLD_MS);
   mBufferingStart = TimeStamp::Now();
 
   // We need to tell the element that buffering has started.
   // We can't just directly send an asynchronous runnable that
   // eventually fires the "waiting" event. The problem is that
   // there might be pending main-thread events, such as "data
   // received" notifications, that mean we're not actually still
   // buffering by the time this runnable executes. So instead
--- a/content/media/nsBuiltinDecoderStateMachine.h
+++ b/content/media/nsBuiltinDecoderStateMachine.h
@@ -244,48 +244,48 @@ public:
 
   PRInt64 GetEndMediaTime() const {
     mDecoder->GetMonitor().AssertCurrentThreadIn();
     return mEndTime;
   }
 
 protected:
 
-  // Returns PR_TRUE if we've got less than aAudioUsecs microseconds of decoded
-  // and playable data. The decoder monitor must be held.
-  PRBool HasLowDecodedData(PRInt64 aAudioUsecs) const;
+  // Returns PR_TRUE if we'v got less than aAudioMs ms of decoded and playable
+  // data. The decoder monitor must be held.
+  PRBool HasLowDecodedData(PRInt64 aAudioMs) const;
 
   // Returns PR_TRUE if we're running low on data which is not yet decoded.
   // The decoder monitor must be held.
   PRBool HasLowUndecodedData() const;
 
-  // Returns the number of microseconds of undecoded data available for
+  // Returns the number of milliseconds of undecoded data available for
   // decoding. The decoder monitor must be held.
   PRInt64 GetUndecodedData() const;
 
-  // Returns the number of unplayed usecs of audio we've got decoded and/or
+  // Returns the number of unplayed ms of audio we've got decoded and/or
   // pushed to the hardware waiting to play. This is how much audio we can
   // play without having to run the audio decoder. The decoder monitor
   // must be held.
-  PRInt64 AudioDecodedUsecs() const;
+  PRInt64 AudioDecodedMs() const;
 
   // Returns PR_TRUE when there's decoded audio waiting to play.
   // The decoder monitor must be held.
   PRBool HasFutureAudio() const;
 
   // Returns PR_TRUE if we recently exited "quick buffering" mode.
   PRBool JustExitedQuickBuffering();
 
-  // Waits on the decoder Monitor for aUsecs microseconds. If the decoder
-  // monitor is awoken by a Notify() call, we'll continue waiting, unless
-  // we've moved into shutdown state. This enables us to ensure that we
-  // wait for a specified time, and that the myriad of Notify()s we do an
-  // the decoder monitor don't cause the audio thread to be starved. The
-  // decoder monitor must be locked.
-  void Wait(PRInt64 aUsecs);
+  // Waits on the decoder Monitor for aMs. If the decoder monitor is awoken
+  // by a Notify() call, we'll continue waiting, unless we've moved into
+  // shutdown state. This enables us to ensure that we wait for a specified
+  // time, and that the myriad of Notify()s we do an the decoder monitor
+  // don't cause the audio thread to be starved. The decoder monitor must
+  // be locked.
+  void Wait(PRInt64 aMs);
 
   // Dispatches an asynchronous event to update the media element's ready state.
   void UpdateReadyState();
 
   // Resets playback timing data. Called when we seek, on the state machine
   // thread.
   void ResetPlayback();
 
@@ -325,18 +325,17 @@ protected:
 
   // Pushes up to aSamples samples of silence onto the audio hardware. Returns
   // the number of samples acutally pushed to the hardware. This pushes up to
   // 32KB worth of samples to the hardware before returning, so must be called
   // in a loop to ensure that the desired number of samples are pushed to the
   // hardware. This ensures that the playback position advances smoothly, and
   // guarantees that we don't try to allocate an impossibly large chunk of
   // memory in order to play back silence. Called on the audio thread.
-  PRUint32 PlaySilence(PRUint32 aSamples,
-                       PRUint32 aChannels,
+  PRUint32 PlaySilence(PRUint32 aSamples, PRUint32 aChannels,
                        PRUint64 aSampleOffset);
 
   // Pops an audio chunk from the front of the audio queue, and pushes its
   // sound data to the audio hardware. MozAudioAvailable sample data is also
   // queued here. Called on the audio thread.
   PRUint32 PlayFromAudioQueue(PRUint64 aSampleOffset, PRUint32 aChannels);
 
   // Stops the decode threads. The decoder monitor must be held with exactly
@@ -384,19 +383,19 @@ protected:
   // [mStartTime, mEndTime], and mStartTime will not be 0 if the media does
   // not start at 0. Note this is different to the value returned
   // by GetCurrentTime(), which is in the range [0,duration].
   PRInt64 GetMediaTime() const {
     mDecoder->GetMonitor().AssertCurrentThreadIn();
     return mStartTime + mCurrentFrameTime;
   }
 
-  // Returns an upper bound on the number of microseconds of audio that is
-  // decoded and playable. This is the sum of the number of usecs of audio which
-  // is decoded and in the reader's audio queue, and the usecs of unplayed audio
+  // Returns an upper bound on the number of milliseconds of audio that is
+  // decoded and playable. This is the sum of the number of ms of audio which
+  // is decoded and in the reader's audio queue, and the ms of unplayed audio
   // which has been pushed to the audio hardware for playback. Note that after
   // calling this, the audio hardware may play some of the audio pushed to
   // hardware, so this can only be used as a upper bound. The decoder monitor
   // must be held when calling this. Called on the decoder thread.
   PRInt64 GetDecodedAudioDuration();
 
   // Monitor on mAudioStream. This monitor must be held in order to delete
   // or use the audio stream. This stops us destroying the audio stream
@@ -422,68 +421,67 @@ protected:
   // timing the presentation of video frames when there's no audio.
   // Accessed only via the state machine thread.
   TimeStamp mPlayStartTime;
 
   // The amount of time we've spent playing already the media. The current
   // playback position is therefore |Now() - mPlayStartTime +
   // mPlayDuration|, which must be adjusted by mStartTime if used with media
   // timestamps.  Accessed only via the state machine thread.
-  PRInt64 mPlayDuration;
+  TimeDuration mPlayDuration;
 
   // Time that buffering started. Used for buffering timeout and only
   // accessed on the state machine thread. This is null while we're not
   // buffering.
   TimeStamp mBufferingStart;
 
-  // Start time of the media, in microseconds. This is the presentation
+  // Start time of the media, in milliseconds. This is the presentation
   // time of the first sample decoded from the media, and is used to calculate
   // duration and as a bounds for seeking. Accessed on state machine and
   // main thread. Access controlled by decoder monitor.
   PRInt64 mStartTime;
 
-  // Time of the last page in the media, in microseconds. This is the
+  // Time of the last page in the media, in milliseconds. This is the
   // end time of the last sample in the media. Accessed on state
   // machine and main thread. Access controlled by decoder monitor.
   PRInt64 mEndTime;
 
-  // Position to seek to in microseconds when the seek state transition occurs.
+  // Position to seek to in milliseconds when the seek state transition occurs.
   // The decoder monitor lock must be obtained before reading or writing
   // this value. Accessed on main and state machine thread.
   PRInt64 mSeekTime;
 
   // The audio stream resource. Used on the state machine, audio, and main
   // threads. You must hold the mAudioMonitor, and must NOT hold the decoder
   // monitor when using the audio stream!
   nsRefPtr<nsAudioStream> mAudioStream;
 
   // The reader, don't call its methods with the decoder monitor held.
   // This is created in the play state machine's constructor, and destroyed
   // in the play state machine's destructor.
   nsAutoPtr<nsBuiltinDecoderReader> mReader;
 
-  // The time of the current frame in microseconds. This is referenced from
+  // The time of the current frame in milliseconds. This is referenced from
   // 0 which is the initial playback position. Set by the state machine
   // thread, and read-only from the main thread to get the current
   // time value. Synchronised via decoder monitor.
   PRInt64 mCurrentFrameTime;
 
-  // The presentation time of the first audio sample that was played in
-  // microseconds. We can add this to the audio stream position to determine
-  // the current audio time. Accessed on audio and state machine thread.
-  // Synchronized by decoder monitor.
+  // The presentation time of the first audio sample that was played. We can
+  // add this to the audio stream position to determine the current audio time.
+  // Accessed on audio and state machine thread. Synchronized by decoder monitor.
   PRInt64 mAudioStartTime;
 
   // The end time of the last audio sample that's been pushed onto the audio
-  // hardware in microseconds. This will approximately be the end time of the
-  // audio stream, unless another sample is pushed to the hardware.
+  // hardware. This will approximately be the end time of the audio stream,
+  // unless another sample is pushed to the hardware.
   PRInt64 mAudioEndTime;
 
-  // The presentation end time of the last video frame which has been displayed
-  // in microseconds. Accessed from the state machine thread.
+  // The presentation end time of the last video frame which has been displayed.
+  // Accessed from the state machine thread.
   PRInt64 mVideoFrameEndTime;
   
   // Volume of playback. 0.0 = muted. 1.0 = full volume. Read/Written
   // from the state machine and main threads. Synchronised via decoder
   // monitor.
   double mVolume;
 
   // Time at which we started decoding. Synchronised via decoder monitor.
--- a/content/media/nsMediaCache.cpp
+++ b/content/media/nsMediaCache.cpp
@@ -744,17 +744,17 @@ nsMediaCache::WriteCacheFile(PRInt64 aOf
 }
 
 static PRInt32 GetMaxBlocks()
 {
   // We look up the cache size every time. This means dynamic changes
   // to the pref are applied.
   // Cache size is in KB
   PRInt32 cacheSize = nsContentUtils::GetIntPref("media.cache_size", 500*1024);
-  PRInt64 maxBlocks = static_cast<PRInt64>(cacheSize)*1024/nsMediaCache::BLOCK_SIZE;
+  PRInt64 maxBlocks = PRInt64(cacheSize)*1024/nsMediaCache::BLOCK_SIZE;
   maxBlocks = PR_MAX(maxBlocks, 1);
   return PRInt32(PR_MIN(maxBlocks, PR_INT32_MAX));
 }
 
 PRInt32
 nsMediaCache::FindBlockForIncomingData(TimeStamp aNow,
                                        nsMediaCacheStream* aStream)
 {
@@ -1036,25 +1036,25 @@ nsMediaCache::PredictNextUse(TimeStamp a
     case METADATA_BLOCK:
       // This block should be managed in LRU mode. For metadata we predict
       // that the time until the next use is the time since the last use.
       prediction = aNow - bo->mLastUseTime;
       break;
     case PLAYED_BLOCK:
       // This block should be managed in LRU mode, and we should impose
       // a "replay delay" to reflect the likelihood of replay happening
-      NS_ASSERTION(static_cast<PRInt64>(bo->mStreamBlock)*BLOCK_SIZE <
+      NS_ASSERTION(PRInt64(bo->mStreamBlock)*BLOCK_SIZE <
                    bo->mStream->mStreamOffset,
                    "Played block after the current stream position?");
       prediction = aNow - bo->mLastUseTime +
         TimeDuration::FromSeconds(REPLAY_DELAY);
       break;
     case READAHEAD_BLOCK: {
       PRInt64 bytesAhead =
-        static_cast<PRInt64>(bo->mStreamBlock)*BLOCK_SIZE - bo->mStream->mStreamOffset;
+        PRInt64(bo->mStreamBlock)*BLOCK_SIZE - bo->mStream->mStreamOffset;
       NS_ASSERTION(bytesAhead >= 0,
                    "Readahead block before the current stream position?");
       PRInt64 millisecondsAhead =
         bytesAhead*1000/bo->mStream->mPlaybackBytesPerSecond;
       prediction = TimeDuration::FromMilliseconds(
           PR_MIN(millisecondsAhead, PR_INT32_MAX));
       break;
     }
--- a/content/media/nsMediaDecoder.h
+++ b/content/media/nsMediaDecoder.h
@@ -275,20 +275,20 @@ public:
   // This can be called from any thread. It's only a snapshot of the
   // current state, since other threads might be changing the state
   // at any time.
   virtual Statistics GetStatistics() = 0;
   
   // Return the frame decode/paint related statistics.
   FrameStatistics& GetFrameStatistics() { return mFrameStats; }
 
-  // Set the duration of the media resource in units of seconds.
+  // Set the duration of the media resource in units of milliseconds.
   // This is called via a channel listener if it can pick up the duration
   // from a content header. Must be called from the main thread only.
-  virtual void SetDuration(double aDuration) = 0;
+  virtual void SetDuration(PRInt64 aDuration) = 0;
 
   // Set a flag indicating whether seeking is supported
   virtual void SetSeekable(PRBool aSeekable) = 0;
 
   // Return PR_TRUE if seeking is supported.
   virtual PRBool GetSeekable() = 0;
 
   // Invalidate the frame.
--- a/content/media/nsMediaStream.cpp
+++ b/content/media/nsMediaStream.cpp
@@ -219,17 +219,17 @@ nsMediaChannelStream::OnStartRequest(nsI
       }
       if (NS_FAILED(rv)) {
         rv = hc->GetResponseHeader(NS_LITERAL_CSTRING("X-Content-Duration"), durationText);
       }
 
       if (NS_SUCCEEDED(rv)) {
         double duration = durationText.ToDouble(&ec);
         if (ec == NS_OK && duration >= 0) {
-          mDecoder->SetDuration(duration);
+          mDecoder->SetDuration(PRInt64(NS_round(duration*1000)));
         }
       }
     }
 
     if (mOffset > 0 && responseStatus == HTTP_OK_CODE) {
       // If we get an OK response but we were seeking, we have to assume
       // that seeking doesn't work. We also need to tell the cache that
       // it's getting data for the start of the stream.
--- a/content/media/nsMediaStream.h
+++ b/content/media/nsMediaStream.h
@@ -102,28 +102,28 @@ public:
     }
     mAccumulatedBytes += aBytes;
   }
   double GetRateAtLastStop(PRPackedBool* aReliable) {
     double seconds = mAccumulatedTime.ToSeconds();
     *aReliable = seconds >= 1.0;
     if (seconds <= 0.0)
       return 0.0;
-    return static_cast<double>(mAccumulatedBytes)/seconds;
+    return double(mAccumulatedBytes)/seconds;
   }
   double GetRate(TimeStamp aNow, PRPackedBool* aReliable) {
     TimeDuration time = mAccumulatedTime;
     if (mIsStarted) {
       time += aNow - mLastStartTime;
     }
     double seconds = time.ToSeconds();
     *aReliable = seconds >= 3.0;
     if (seconds <= 0.0)
       return 0.0;
-    return static_cast<double>(mAccumulatedBytes)/seconds;
+    return double(mAccumulatedBytes)/seconds;
   }
 private:
   PRInt64      mAccumulatedBytes;
   TimeDuration mAccumulatedTime;
   TimeStamp    mLastStartTime;
   PRPackedBool mIsStarted;
 };
 
--- a/content/media/ogg/nsOggCodecState.cpp
+++ b/content/media/ogg/nsOggCodecState.cpp
@@ -121,16 +121,17 @@ PRBool nsOggCodecState::PageInFromBuffer
   delete p;
   return PR_TRUE;
 }
 
 nsTheoraState::nsTheoraState(ogg_page* aBosPage) :
   nsOggCodecState(aBosPage),
   mSetup(0),
   mCtx(0),
+  mFrameDuration(0),
   mPixelAspectRatio(0)
 {
   MOZ_COUNT_CTOR(nsTheoraState);
   th_info_init(&mInfo);
   th_comment_init(&mComment);
 }
 
 nsTheoraState::~nsTheoraState() {
@@ -140,19 +141,32 @@ nsTheoraState::~nsTheoraState() {
   th_comment_clear(&mComment);
   th_info_clear(&mInfo);
 }
 
 PRBool nsTheoraState::Init() {
   if (!mActive)
     return PR_FALSE;
 
-  PRInt64 n = mInfo.aspect_numerator;
-  PRInt64 d = mInfo.aspect_denominator;
+  PRInt64 n = mInfo.fps_numerator;
+  PRInt64 d = mInfo.fps_denominator;
 
+  PRInt64 f;
+  if (!MulOverflow(1000, d, f)) {
+    return mActive = PR_FALSE;
+  }
+  f /= n;
+  if (f > PR_UINT32_MAX) {
+    return mActive = PR_FALSE;
+  }
+  mFrameDuration = static_cast<PRUint32>(f);
+
+  n = mInfo.aspect_numerator;
+
+  d = mInfo.aspect_denominator;
   mPixelAspectRatio = (n == 0 || d == 0) ?
     1.0f : static_cast<float>(n) / static_cast<float>(d);
 
   // Ensure the frame and picture regions aren't larger than our prescribed
   // maximum, or zero sized.
   nsIntSize frame(mInfo.frame_width, mInfo.frame_height);
   nsIntRect picture(mInfo.pic_x, mInfo.pic_y, mInfo.pic_width, mInfo.pic_height);
   if (!nsVideoInfo::ValidateVideoRegion(frame, picture, frame)) {
@@ -225,55 +239,58 @@ PRInt64 nsTheoraState::Time(th_info* aIn
   // Implementation of th_granule_frame inlined here to operate
   // on the th_info structure instead of the theora_state.
   int shift = aInfo->keyframe_granule_shift; 
   ogg_int64_t iframe = aGranulepos >> shift;
   ogg_int64_t pframe = aGranulepos - (iframe << shift);
   PRInt64 frameno = iframe + pframe - TH_VERSION_CHECK(aInfo, 3, 2, 1);
   if (!AddOverflow(frameno, 1, t))
     return -1;
-  if (!MulOverflow(t, USECS_PER_S, t))
+  if (!MulOverflow(t, 1000, t))
     return -1;
   if (!MulOverflow(t, aInfo->fps_denominator, t))
     return -1;
   return t / aInfo->fps_numerator;
 }
 
 PRInt64 nsTheoraState::StartTime(PRInt64 granulepos) {
   if (granulepos < 0 || !mActive || mInfo.fps_numerator == 0) {
     return -1;
   }
   PRInt64 t = 0;
   PRInt64 frameno = th_granule_frame(mCtx, granulepos);
-  if (!MulOverflow(frameno, USECS_PER_S, t))
+  if (!MulOverflow(frameno, 1000, t))
     return -1;
   if (!MulOverflow(t, mInfo.fps_denominator, t))
     return -1;
   return t / mInfo.fps_numerator;
 }
 
 PRInt64
 nsTheoraState::MaxKeyframeOffset()
 {
-  // Determine the maximum time in microseconds by which a key frame could
+  // Determine the maximum time in milliseconds by which a key frame could
   // offset for the theora bitstream. Theora granulepos encode time as:
   // ((key_frame_number << granule_shift) + frame_offset).
   // Therefore the maximum possible time by which any frame could be offset
   // from a keyframe is the duration of (1 << granule_shift) - 1) frames.
   PRInt64 frameDuration;
-  
-  // Max number of frames keyframe could possibly be offset.
-  PRInt64 keyframeDiff = (1 << mInfo.keyframe_granule_shift) - 1;
+  PRInt64 keyframeDiff;
+
+  PRInt64 shift = mInfo.keyframe_granule_shift;
 
-  // Length of frame in usecs.
+  // Max number of frames keyframe could possibly be offset.
+  keyframeDiff = (1 << shift) - 1;
+
+  // Length of frame in ms.
   PRInt64 d = 0; // d will be 0 if multiplication overflows.
-  MulOverflow(USECS_PER_S, mInfo.fps_denominator, d);
+  MulOverflow(1000, mInfo.fps_denominator, d);
   frameDuration = d / mInfo.fps_numerator;
 
-  // Total time in usecs keyframe can be offset from any given frame.
+  // Total time in ms keyframe can be offset from any given frame.
   return frameDuration * keyframeDiff;
 }
 
 nsresult nsVorbisState::Reset()
 {
   nsresult res = NS_OK;
   if (mActive && vorbis_synthesis_restart(&mDsp) != 0) {
     res = NS_ERROR_FAILURE;
@@ -368,17 +385,17 @@ PRInt64 nsVorbisState::Time(PRInt64 gran
 }
 
 PRInt64 nsVorbisState::Time(vorbis_info* aInfo, PRInt64 aGranulepos)
 {
   if (aGranulepos == -1 || aInfo->rate == 0) {
     return -1;
   }
   PRInt64 t = 0;
-  MulOverflow(USECS_PER_S, aGranulepos, t);
+  MulOverflow(1000, aGranulepos, t);
   return t / aInfo->rate;
 }
 
 nsSkeletonState::nsSkeletonState(ogg_page* aBosPage)
   : nsOggCodecState(aBosPage),
     mVersion(0),
     mPresentationTime(0),
     mLength(0)
@@ -500,25 +517,25 @@ PRBool nsSkeletonState::DecodeIndex(ogg_
     LOG(PR_LOG_DEBUG, ("Ogg Skeleton Index packet for stream %u has 0 "
                        "timestamp denominator.", serialno));
     return (mActive = PR_FALSE);
   }
 
   // Extract the start time.
   n = LEInt64(p + INDEX_FIRST_NUMER_OFFSET);
   PRInt64 t;
-  if (!MulOverflow(n, USECS_PER_S, t)) {
+  if (!MulOverflow(n, 1000, t)) {
     return (mActive = PR_FALSE);
   } else {
     startTime = t / timeDenom;
   }
 
   // Extract the end time.
   n = LEInt64(p + INDEX_LAST_NUMER_OFFSET);
-  if (!MulOverflow(n, USECS_PER_S, t)) {
+  if (!MulOverflow(n, 1000, t)) {
     return (mActive = PR_FALSE);
   } else {
     endTime = t / timeDenom;
   }
 
   // Check the numKeyPoints value read, ensure we're not going to run out of
   // memory while trying to decode the index packet.
   PRInt64 minPacketSize;
@@ -568,21 +585,21 @@ PRBool nsSkeletonState::DecodeIndex(ogg_
     }
     p = ReadVariableLengthInt(p, limit, delta);
     if (!AddOverflow(time, delta, time) ||
         time > endTime ||
         time < startTime)
     {
       return (mActive = PR_FALSE);
     }
-    PRInt64 timeUsecs = 0;
-    if (!MulOverflow(time, USECS_PER_S, timeUsecs))
+    PRInt64 timeMs = 0;
+    if (!MulOverflow(time, 1000, timeMs))
       return mActive = PR_FALSE;
-    timeUsecs /= timeDenom;
-    keyPoints->Add(offset, timeUsecs);
+    timeMs /= timeDenom;
+    keyPoints->Add(offset, timeMs);
     numKeyPointsRead++;
   }
 
   PRInt32 keyPointsRead = keyPoints->Length();
   if (keyPointsRead > 0) {
     mIndex.Put(serialno, keyPoints.forget());
   }
 
@@ -691,17 +708,17 @@ PRBool nsSkeletonState::DecodeHeader(ogg
   if (IsSkeletonBOS(aPacket)) {
     PRUint16 verMajor = LEUint16(aPacket->packet + SKELETON_VERSION_MAJOR_OFFSET);
     PRUint16 verMinor = LEUint16(aPacket->packet + SKELETON_VERSION_MINOR_OFFSET);
 
     // Read the presentation time. We read this before the version check as the
     // presentation time exists in all versions.
     PRInt64 n = LEInt64(aPacket->packet + SKELETON_PRESENTATION_TIME_NUMERATOR_OFFSET);
     PRInt64 d = LEInt64(aPacket->packet + SKELETON_PRESENTATION_TIME_DENOMINATOR_OFFSET);
-    mPresentationTime = d == 0 ? 0 : (static_cast<float>(n) / static_cast<float>(d)) * USECS_PER_S;
+    mPresentationTime = d == 0 ? 0 : (static_cast<float>(n) / static_cast<float>(d)) * 1000;
 
     mVersion = SKELETON_VERSION(verMajor, verMinor);
     if (mVersion < SKELETON_VERSION(4,0) ||
         mVersion >= SKELETON_VERSION(5,0) ||
         aPacket->bytes < SKELETON_4_0_MIN_HEADER_LEN)
     {
       // We can only care to parse Skeleton version 4.0+.
       mActive = PR_FALSE;
--- a/content/media/ogg/nsOggCodecState.h
+++ b/content/media/ogg/nsOggCodecState.h
@@ -178,28 +178,31 @@ public:
   virtual ~nsTheoraState();
 
   virtual CodecType GetType() { return TYPE_THEORA; }
   virtual PRBool DecodeHeader(ogg_packet* aPacket);
   virtual PRInt64 Time(PRInt64 granulepos);
   virtual PRInt64 StartTime(PRInt64 granulepos);
   virtual PRBool Init();
 
-  // Returns the maximum number of microseconds which a keyframe can be offset
+  // Returns the maximum number of milliseconds which a keyframe can be offset
   // from any given interframe.
   PRInt64 MaxKeyframeOffset();
 
   // Returns the end time that a granulepos represents.
   static PRInt64 Time(th_info* aInfo, PRInt64 aGranulePos); 
   
   th_info mInfo;
   th_comment mComment;
   th_setup_info *mSetup;
   th_dec_ctx* mCtx;
 
+  // Frame duration in ms.
+  PRUint32 mFrameDuration;
+
   float mPixelAspectRatio;
 };
 
 // Constructs a 32bit version number out of two 16 bit major,minor
 // version numbers.
 #define SKELETON_VERSION(major, minor) (((major)<<16)|(minor))
 
 class nsSkeletonState : public nsOggCodecState {
@@ -225,17 +228,17 @@ public:
 
     nsKeyPoint(PRInt64 aOffset, PRInt64 aTime)
       : mOffset(aOffset),
         mTime(aTime) {}
 
     // Offset from start of segment/link-in-the-chain in bytes.
     PRInt64 mOffset;
 
-    // Presentation time in usecs.
+    // Presentation time in ms.
     PRInt64 mTime;
 
     PRBool IsNull() {
       return mOffset == PR_INT64_MAX &&
              mTime == PR_INT64_MAX;
     }
   };
 
@@ -312,20 +315,20 @@ private:
     const nsKeyPoint& Get(PRUint32 aIndex) const {
       return mKeyPoints[aIndex];
     }
 
     PRUint32 Length() const {
       return mKeyPoints.Length();
     }
 
-    // Presentation time of the first sample in this stream in usecs.
+    // Presentation time of the first sample in this stream in ms.
     const PRInt64 mStartTime;
 
-    // End time of the last sample in this stream in usecs.
+    // End time of the last sample in this stream in ms.
     const PRInt64 mEndTime;
 
   private:
     nsTArray<nsKeyPoint> mKeyPoints;
   };
 
   // Maps Ogg serialnos to the index-keypoint list.
   nsClassHashtable<nsUint32HashKey, nsKeyFrameIndex> mIndex;
--- a/content/media/ogg/nsOggReader.cpp
+++ b/content/media/ogg/nsOggReader.cpp
@@ -63,25 +63,25 @@ extern PRLogModuleInfo* gBuiltinDecoderL
 #define SEEK_LOG(type, msg)
 #endif
 
 // If we don't have a Theora video stream, then during seeking, if a seek
 // target is less than SEEK_DECODE_MARGIN ahead of the current playback
 // position, we'll just decode forwards rather than performing a bisection
 // search. If we have Theora video we use the maximum keyframe interval as
 // this value, rather than SEEK_DECODE_MARGIN. This makes small seeks faster.
-#define SEEK_DECODE_MARGIN 2000000
+#define SEEK_DECODE_MARGIN 2000
 
-// The number of microseconds of "fuzz" we use in a bisection search over
+// The number of milliseconds of "fuzz" we use in a bisection search over
 // HTTP. When we're seeking with fuzz, we'll stop the search if a bisection
-// lands between the seek target and SEEK_FUZZ_USECS microseconds before the
+// lands between the seek target and SEEK_FUZZ_MS milliseconds before the
 // seek target.  This is becaue it's usually quicker to just keep downloading
 // from an exisiting connection than to do another bisection inside that
 // small range, which would open a new HTTP connetion.
-#define SEEK_FUZZ_USECS 500000
+#define SEEK_FUZZ_MS 500
 
 enum PageSyncResult {
   PAGE_SYNC_ERROR = 1,
   PAGE_SYNC_END_OF_RANGE= 2,
   PAGE_SYNC_OK = 3
 };
 
 // Reads a page from the media stream.
@@ -385,17 +385,18 @@ nsresult nsOggReader::DecodeVorbis(nsTAr
     for (PRUint32 j = 0; j < channels; ++j) {
       VorbisPCMValue* channel = pcm[j];
       for (PRUint32 i = 0; i < PRUint32(samples); ++i) {
         buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
       }
     }
 
     PRInt64 duration = mVorbisState->Time((PRInt64)samples);
-    PRInt64 startTime = mVorbisState->Time(mVorbisGranulepos);
+    PRInt64 startTime = (mVorbisGranulepos != -1) ?
+      mVorbisState->Time(mVorbisGranulepos) : -1;
     SoundData* s = new SoundData(mPageOffset,
                                  startTime,
                                  duration,
                                  samples,
                                  buffer,
                                  channels);
     if (mVorbisGranulepos != -1) {
       mVorbisGranulepos += samples;
@@ -538,18 +539,19 @@ AllFrameTimesIncrease(nsTArray<nsAutoPtr
 
 nsresult nsOggReader::DecodeTheora(nsTArray<nsAutoPtr<VideoData> >& aFrames,
                                    ogg_packet* aPacket)
 {
   int ret = th_decode_packetin(mTheoraState->mCtx, aPacket, 0);
   if (ret != 0 && ret != TH_DUPFRAME) {
     return NS_ERROR_FAILURE;
   }
-  PRInt64 time = mTheoraState->StartTime(aPacket->granulepos);
-  PRInt64 endTime = mTheoraState->Time(aPacket->granulepos);
+  PRInt64 time = (aPacket->granulepos != -1)
+    ? mTheoraState->StartTime(aPacket->granulepos) : -1;
+  PRInt64 endTime = time != -1 ? time + mTheoraState->mFrameDuration : -1;
   if (ret == TH_DUPFRAME) {
     VideoData* v = VideoData::CreateDuplicate(mPageOffset,
                                               time,
                                               endTime,
                                               aPacket->granulepos);
     if (!aFrames.AppendElement(v)) {
       delete v;
     }
@@ -697,17 +699,17 @@ PRBool nsOggReader::DecodeVideoFrame(PRB
         // Check that the frame's granule number is one more than the
         // previous frame's.
         NS_ASSERTION(i == 0 ||
                      th_granule_frame(mTheoraState->mCtx, granulepos) ==
                      th_granule_frame(mTheoraState->mCtx, frames[i-1]->mTimecode) + 1,
                      "Granulepos calculation is incorrect!");
 
         frames[i]->mTime = mTheoraState->StartTime(granulepos);
-        frames[i]->mEndTime = mTheoraState->Time(granulepos);
+        frames[i]->mEndTime = frames[i]->mTime + mTheoraState->mFrameDuration;
         NS_ASSERTION(frames[i]->mEndTime >= frames[i]->mTime, "Frame must start before it ends.");
         frames[i]->mTimecode = granulepos;
       }
       NS_ASSERTION(AllFrameTimesIncrease(frames), "All frames must have granulepos");
 
       // Check that the second to last frame's granule number is one less than
       // the last frame's (the known granule number). If not our granulepos
       // recovery missed a beat.
@@ -1250,17 +1252,17 @@ nsresult nsOggReader::SeekInBufferedRang
     PRInt64 keyframeTime = mTheoraState->StartTime(keyframeGranulepos);
     SEEK_LOG(PR_LOG_DEBUG, ("Keyframe for %lld is at %lld, seeking back to it",
                             video->mTime, keyframeTime));
     SeekRange k = SelectSeekRange(aRanges,
                                   keyframeTime,
                                   aStartTime,
                                   aEndTime,
                                   PR_FALSE);
-    res = SeekBisection(keyframeTime, k, SEEK_FUZZ_USECS);
+    res = SeekBisection(keyframeTime, k, SEEK_FUZZ_MS);
     NS_ASSERTION(mTheoraGranulepos == -1, "SeekBisection must reset Theora decode");
     NS_ASSERTION(mVorbisGranulepos == -1, "SeekBisection must reset Vorbis decode");
   }
   return res;
 }
 
 PRBool nsOggReader::CanDecodeToTarget(PRInt64 aTarget,
                                       PRInt64 aCurrentTime)
@@ -1276,17 +1278,17 @@ PRBool nsOggReader::CanDecodeToTarget(PR
 nsresult nsOggReader::SeekInUnbuffered(PRInt64 aTarget,
                                        PRInt64 aStartTime,
                                        PRInt64 aEndTime,
                                        const nsTArray<SeekRange>& aRanges)
 {
   LOG(PR_LOG_DEBUG, ("%p Seeking in unbuffered data to %lldms using bisection search", mDecoder, aTarget));
   
   // If we've got an active Theora bitstream, determine the maximum possible
-  // time in usecs which a keyframe could be before a given interframe. We
+  // time in ms which a keyframe could be before a given interframe. We
   // subtract this from our seek target, seek to the new target, and then
   // will decode forward to the original seek target. We should encounter a
   // keyframe in that interval. This prevents us from needing to run two
   // bisections; one for the seek target frame, and another to find its
   // keyframe. It's usually faster to just download this extra data, rather
   // tham perform two bisections to find the seek target's keyframe. We
   // don't do this offsetting when seeking in a buffered range,
   // as the extra decoding causes a noticeable speed hit when all the data
@@ -1295,17 +1297,17 @@ nsresult nsOggReader::SeekInUnbuffered(P
   PRInt64 keyframeOffsetMs = 0;
   if (HasVideo() && mTheoraState) {
     keyframeOffsetMs = mTheoraState->MaxKeyframeOffset();
   }
   PRInt64 seekTarget = NS_MAX(aStartTime, aTarget - keyframeOffsetMs);
   // Minimize the bisection search space using the known timestamps from the
   // buffered ranges.
   SeekRange k = SelectSeekRange(aRanges, seekTarget, aStartTime, aEndTime, PR_FALSE);
-  nsresult res = SeekBisection(seekTarget, k, SEEK_FUZZ_USECS);
+  nsresult res = SeekBisection(seekTarget, k, SEEK_FUZZ_MS);
   NS_ASSERTION(mTheoraGranulepos == -1, "SeekBisection must reset Theora decode");
   NS_ASSERTION(mVorbisGranulepos == -1, "SeekBisection must reset Vorbis decode");
   return res;
 }
 
 nsresult nsOggReader::Seek(PRInt64 aTarget,
                            PRInt64 aStartTime,
                            PRInt64 aEndTime,
@@ -1779,18 +1781,19 @@ nsresult nsOggReader::GetBuffered(nsTime
       }
     }
 
     if (startTime != -1) {
       // We were able to find a start time for that range, see if we can
       // find an end time.
       PRInt64 endTime = FindEndTime(startOffset, endOffset, PR_TRUE, &state);
       if (endTime != -1) {
-        aBuffered->Add(startTime / static_cast<double>(USECS_PER_S),
-                       (endTime - aStartTime) / static_cast<double>(USECS_PER_S));
+        endTime -= aStartTime;
+        aBuffered->Add(static_cast<double>(startTime) / 1000.0,
+                       static_cast<double>(endTime) / 1000.0);
       }
     }
   }
 
   // If we don't clear the sync state before exit we'll leak.
   ogg_sync_clear(&state);
 
   return NS_OK;
--- a/content/media/ogg/nsOggReader.h
+++ b/content/media/ogg/nsOggReader.h
@@ -99,18 +99,18 @@ private:
   PRBool HasSkeleton()
   {
     MonitorAutoEnter mon(mMonitor);
     return mSkeletonState != 0 && mSkeletonState->mActive;
   }
 
   // Returns PR_TRUE if we should decode up to the seek target rather than
   // seeking to the target using a bisection search or index-assisted seek.
-  // We should do this if the seek target (aTarget, in usecs), lies not too far
-  // ahead of the current playback position (aCurrentTime, in usecs).
+  // We should do this if the seek target (aTarget, in ms), lies not too far
+  // ahead of the current playback position (aCurrentTime, in ms).
   PRBool CanDecodeToTarget(PRInt64 aTarget,
                            PRInt64 aCurrentTime);
 
   // Seeks to the keyframe preceeding the target time using available
   // keyframe indexes.
   enum IndexedSeekResult {
     SEEK_OK,          // Success.
     SEEK_INDEX_FAIL,  // Failure due to no index, or invalid index.
@@ -147,31 +147,31 @@ private:
     PRBool IsNull() const {
       return mOffsetStart == 0 &&
              mOffsetEnd == 0 &&
              mTimeStart == 0 &&
              mTimeEnd == 0;
     }
 
     PRInt64 mOffsetStart, mOffsetEnd; // in bytes.
-    PRInt64 mTimeStart, mTimeEnd; // in usecs.
+    PRInt64 mTimeStart, mTimeEnd; // in ms.
   };
 
-  // Seeks to aTarget usecs in the buffered range aRange using bisection search,
+  // Seeks to aTarget ms in the buffered range aRange using bisection search,
   // or to the keyframe prior to aTarget if we have video. aStartTime must be
   // the presentation time at the start of media, and aEndTime the time at
   // end of media. aRanges must be the time/byte ranges buffered in the media
   // cache as per GetSeekRanges().
   nsresult SeekInBufferedRange(PRInt64 aTarget,
                                PRInt64 aStartTime,
                                PRInt64 aEndTime,
                                const nsTArray<SeekRange>& aRanges,
                                const SeekRange& aRange);
 
-  // Seeks to before aTarget usecs in media using bisection search. If the media
+  // Seeks to before aTarget ms in media using bisection search. If the media
   // has video, this will seek to before the keyframe required to render the
   // media at aTarget. Will use aRanges in order to narrow the bisection
   // search space. aStartTime must be the presentation time at the start of
   // media, and aEndTime the time at end of media. aRanges must be the time/byte
   // ranges buffered in the media cache as per GetSeekRanges().
   nsresult SeekInUnbuffered(PRInt64 aTarget,
                             PRInt64 aStartTime,
                             PRInt64 aEndTime,
@@ -203,37 +203,37 @@ private:
   // of the page, or -1 if the page read failed.
   PRInt64 ReadOggPage(ogg_page* aPage);
 
   // Read a packet for an Ogg bitstream/codec state. Returns PR_TRUE on
   // success, or PR_FALSE if the read failed.
   PRBool ReadOggPacket(nsOggCodecState* aCodecState, ogg_packet* aPacket);
 
   // Performs a seek bisection to move the media stream's read cursor to the
-  // last ogg page boundary which has end time before aTarget usecs on both the
+  // last ogg page boundary which has end time before aTarget ms on both the
   // Theora and Vorbis bitstreams. Limits its search to data inside aRange;
   // i.e. it will only read inside of the aRange's start and end offsets.
-  // aFuzz is the number of usecs of leniency we'll allow; we'll terminate the
-  // seek when we land in the range (aTime - aFuzz, aTime) usecs.
+  // aFuzz is the number of ms of leniency we'll allow; we'll terminate the
+  // seek when we land in the range (aTime - aFuzz, aTime) ms.
   nsresult SeekBisection(PRInt64 aTarget,
                          const SeekRange& aRange,
                          PRUint32 aFuzz);
 
   // Returns true if the serial number is for a stream we encountered
   // while reading metadata. Call on the main thread only.
   PRBool IsKnownStream(PRUint32 aSerial);
 
   // Fills aRanges with SeekRanges denoting the sections of the media which
   // have been downloaded and are stored in the media cache. The reader
   // monitor must must be held with exactly one lock count. The nsMediaStream
   // must be pinned while calling this.
   nsresult GetSeekRanges(nsTArray<SeekRange>& aRanges);
 
   // Returns the range in which you should perform a seek bisection if
-  // you wish to seek to aTarget usecs, given the known (buffered) byte ranges
+  // you wish to seek to aTarget ms, given the known (buffered) byte ranges
   // in aRanges. If aExact is PR_TRUE, we only return an exact copy of a
   // range in which aTarget lies, or a null range if aTarget isn't contained
   // in any of the (buffered) ranges. Otherwise, when aExact is PR_FALSE,
   // we'll construct the smallest possible range we can, based on the times
   // and byte offsets known in aRanges. We can then use this to minimize our
   // bisection's search space when the target isn't in a known buffered range.
   SeekRange SelectSeekRange(const nsTArray<SeekRange>& aRanges,
                             PRInt64 aTarget,
--- a/content/media/raw/nsRawReader.cpp
+++ b/content/media/raw/nsRawReader.cpp
@@ -124,17 +124,17 @@ nsresult nsRawReader::ReadMetadata(nsVid
   mFrameSize = mMetadata.frameWidth * mMetadata.frameHeight *
     (mMetadata.lumaChannelBpp + mMetadata.chromaChannelBpp) / 8.0 +
     sizeof(nsRawPacketHeader);
 
   PRInt64 length = stream->GetLength();
   if (length != -1) {
     mozilla::MonitorAutoExit autoExitMonitor(mMonitor);
     mozilla::MonitorAutoEnter autoMonitor(mDecoder->GetMonitor());
-    mDecoder->GetStateMachine()->SetDuration(USECS_PER_S *
+    mDecoder->GetStateMachine()->SetDuration(1000 *
                                            (length - sizeof(nsRawVideoHeader)) /
                                            (mFrameSize * mFrameRate));
   }
 
   *aInfo = mInfo;
 
   return NS_OK;
 }
@@ -179,17 +179,17 @@ PRBool nsRawReader::DecodeVideoFrame(PRB
   // Record number of frames decoded and parsed. Automatically update the
   // stats counters using the AutoNotifyDecoded stack-based class.
   PRUint32 parsed = 0, decoded = 0;
   nsMediaDecoder::AutoNotifyDecoded autoNotify(mDecoder, parsed, decoded);
 
   if (!mFrameSize)
     return PR_FALSE; // Metadata read failed.  We should refuse to play.
 
-  PRInt64 currentFrameTime = USECS_PER_S * mCurrentFrame / mFrameRate;
+  PRInt64 currentFrameTime = 1000 * mCurrentFrame / mFrameRate;
   PRUint32 length = mFrameSize - sizeof(nsRawPacketHeader);
 
   nsAutoPtr<PRUint8> buffer(new PRUint8[length]);
   nsMediaStream* stream = mDecoder->GetCurrentStream();
   NS_ASSERTION(stream, "Decoder has no media stream");
 
   // We're always decoding one frame when called
   while(true) {
@@ -207,17 +207,17 @@ PRBool nsRawReader::DecodeVideoFrame(PRB
     }
 
     parsed++;
 
     if (currentFrameTime >= aTimeThreshold)
       break;
 
     mCurrentFrame++;
-    currentFrameTime += static_cast<double>(USECS_PER_S) / mFrameRate;
+    currentFrameTime += 1000.0 / mFrameRate;
   }
 
   VideoData::YCbCrBuffer b;
   b.mPlanes[0].mData = buffer;
   b.mPlanes[0].mStride = mMetadata.frameWidth * mMetadata.lumaChannelBpp / 8.0;
   b.mPlanes[0].mHeight = mMetadata.frameHeight;
   b.mPlanes[0].mWidth = mMetadata.frameWidth;
 
@@ -232,44 +232,44 @@ PRBool nsRawReader::DecodeVideoFrame(PRB
   b.mPlanes[2].mStride = cbcrStride;
   b.mPlanes[2].mHeight = mMetadata.frameHeight / 2;
   b.mPlanes[2].mWidth = mMetadata.frameWidth / 2;
 
   VideoData *v = VideoData::Create(mInfo,
                                    mDecoder->GetImageContainer(),
                                    -1,
                                    currentFrameTime,
-                                   currentFrameTime + (USECS_PER_S / mFrameRate),
+                                   currentFrameTime + (1000 / mFrameRate),
                                    b,
                                    1, // In raw video every frame is a keyframe
                                    -1);
   if (!v)
     return PR_FALSE;
 
   mVideoQueue.Push(v);
   mCurrentFrame++;
   decoded++;
-  currentFrameTime += USECS_PER_S / mFrameRate;
+  currentFrameTime += 1000 / mFrameRate;
 
   return PR_TRUE;
 }
 
 nsresult nsRawReader::Seek(PRInt64 aTime, PRInt64 aStartTime, PRInt64 aEndTime, PRInt64 aCurrentTime)
 {
   mozilla::MonitorAutoEnter autoEnter(mMonitor);
   NS_ASSERTION(mDecoder->OnStateMachineThread(),
                "Should be on state machine thread.");
 
   nsMediaStream *stream = mDecoder->GetCurrentStream();
   NS_ASSERTION(stream, "Decoder has no media stream");
 
   PRUint32 frame = mCurrentFrame;
   if (aTime >= UINT_MAX)
     return NS_ERROR_FAILURE;
-  mCurrentFrame = aTime * mFrameRate / USECS_PER_S;
+  mCurrentFrame = aTime * mFrameRate / 1000;
 
   PRUint32 offset;
   if (!MulOverflow32(mCurrentFrame, mFrameSize, offset))
     return NS_ERROR_FAILURE;
 
   offset += sizeof(nsRawVideoHeader);
 
   nsresult rv = stream->Seek(nsISeekableStream::NS_SEEK_SET, offset);
--- a/content/media/test/test_seekLies.html
+++ b/content/media/test/test_seekLies.html
@@ -8,17 +8,17 @@
 </head>
 <body onunload="mediaTestCleanup();">
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
 function on_metadataloaded() {
   var v = document.getElementById('v');
   var d = Math.round(v.duration*1000);
-  ok(d == 4000, "Checking duration: " + d);
+  ok(d == 3999, "Checking duration: " + d);
   SimpleTest.finish();
 }
 
 SimpleTest.waitForExplicitFinish();
 </script>
 </pre>
 <video id='v'
        src='seekLies.sjs'
--- a/content/media/wave/nsWaveReader.cpp
+++ b/content/media/wave/nsWaveReader.cpp
@@ -166,18 +166,19 @@ nsresult nsWaveReader::ReadMetadata(nsVi
   mInfo.mAudioChannels = mChannels;
   mInfo.mDataOffset = -1;
 
   *aInfo = mInfo;
 
   MonitorAutoExit exitReaderMon(mMonitor);
   MonitorAutoEnter decoderMon(mDecoder->GetMonitor());
 
-  mDecoder->GetStateMachine()->SetDuration(
-    static_cast<PRInt64>(BytesToTime(GetDataLength()) * USECS_PER_S));
+  float d = floorf(BytesToTime(GetDataLength() * 1000));
+  NS_ASSERTION(d <= PR_INT64_MAX, "Duration overflow");
+  mDecoder->GetStateMachine()->SetDuration(static_cast<PRInt64>(d));
 
   return NS_OK;
 }
 
 PRBool nsWaveReader::DecodeAudioData()
 {
   MonitorAutoEnter mon(mMonitor);
   NS_ASSERTION(mDecoder->OnStateMachineThread() || mDecoder->OnDecodeThread(),
@@ -223,28 +224,26 @@ PRBool nsWaveReader::DecodeAudioData()
         *s++ = v;
 #elif defined(MOZ_SAMPLE_TYPE_FLOAT32)
         *s++ = (PRInt32(v) - PR_INT16_MIN) / float(PR_UINT16_MAX) * 2.F - 1.F;
 #endif
       }
     }
   }
 
-  double posTime = BytesToTime(pos);
-  double readSizeTime = BytesToTime(readSize);
-  NS_ASSERTION(posTime <= PR_INT64_MAX / USECS_PER_S, "posTime overflow");
-  NS_ASSERTION(readSizeTime <= PR_INT64_MAX / USECS_PER_S, "readSizeTime overflow");
+  float posTime = BytesToTime(pos);
+  float readSizeTime = BytesToTime(readSize);
+  NS_ASSERTION(posTime <= PR_INT64_MAX / 1000, "posTime overflow");
+  NS_ASSERTION(readSizeTime <= PR_INT64_MAX / 1000, "readSizeTime overflow");
   NS_ASSERTION(samples < PR_INT32_MAX, "samples overflow");
 
-  mAudioQueue.Push(new SoundData(pos,
-                                 static_cast<PRInt64>(posTime * USECS_PER_S),
-                                 static_cast<PRInt64>(readSizeTime * USECS_PER_S),
+  mAudioQueue.Push(new SoundData(pos, static_cast<PRInt64>(posTime * 1000),
+                                 static_cast<PRInt64>(readSizeTime * 1000),
                                  static_cast<PRInt32>(samples),
-                                 sampleBuffer.forget(),
-                                 mChannels));
+                                 sampleBuffer.forget(), mChannels));
 
   return PR_TRUE;
 }
 
 PRBool nsWaveReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
                                       PRInt64 aTimeThreshold)
 {
   MonitorAutoEnter mon(mMonitor);
@@ -254,41 +253,41 @@ PRBool nsWaveReader::DecodeVideoFrame(PR
   return PR_FALSE;
 }
 
 nsresult nsWaveReader::Seek(PRInt64 aTarget, PRInt64 aStartTime, PRInt64 aEndTime, PRInt64 aCurrentTime)
 {
   MonitorAutoEnter mon(mMonitor);
   NS_ASSERTION(mDecoder->OnStateMachineThread(),
                "Should be on state machine thread.");
-  LOG(PR_LOG_DEBUG, ("%p About to seek to %lld", mDecoder, aTarget));
+  LOG(PR_LOG_DEBUG, ("%p About to seek to %lldms", mDecoder, aTarget));
   if (NS_FAILED(ResetDecode())) {
     return NS_ERROR_FAILURE;
   }
-  double d = BytesToTime(GetDataLength());
-  NS_ASSERTION(d < PR_INT64_MAX / USECS_PER_S, "Duration overflow"); 
-  PRInt64 duration = static_cast<PRInt64>(d * USECS_PER_S);
-  double seekTime = NS_MIN(aTarget, duration) / static_cast<double>(USECS_PER_S);
-  PRInt64 position = RoundDownToSample(static_cast<PRInt64>(TimeToBytes(seekTime)));
+  float d = BytesToTime(GetDataLength());
+  NS_ASSERTION(d < PR_INT64_MAX / 1000, "Duration overflow"); 
+  PRInt64 duration = static_cast<PRInt64>(d) * 1000;
+  PRInt64 seekTime = NS_MIN(aTarget, duration);
+  PRInt64 position = RoundDownToSample(static_cast<PRInt64>(TimeToBytes(seekTime) / 1000.f));
   NS_ASSERTION(PR_INT64_MAX - mWavePCMOffset > position, "Integer overflow during wave seek");
   position += mWavePCMOffset;
   return mDecoder->GetCurrentStream()->Seek(nsISeekableStream::NS_SEEK_SET, position);
 }
 
 nsresult nsWaveReader::GetBuffered(nsTimeRanges* aBuffered, PRInt64 aStartTime)
 {
   PRInt64 startOffset = mDecoder->GetCurrentStream()->GetNextCachedData(mWavePCMOffset);
   while (startOffset >= 0) {
     PRInt64 endOffset = mDecoder->GetCurrentStream()->GetCachedDataEnd(startOffset);
     // Bytes [startOffset..endOffset] are cached.
     NS_ASSERTION(startOffset >= mWavePCMOffset, "Integer underflow in GetBuffered");
     NS_ASSERTION(endOffset >= mWavePCMOffset, "Integer underflow in GetBuffered");
 
-    aBuffered->Add(BytesToTime(startOffset - mWavePCMOffset),
-                   BytesToTime(endOffset - mWavePCMOffset));
+    aBuffered->Add(floorf(BytesToTime(startOffset - mWavePCMOffset) * 1000.f) / 1000.0,
+                   floorf(BytesToTime(endOffset - mWavePCMOffset) * 1000.f) / 1000.0);
     startOffset = mDecoder->GetCurrentStream()->GetNextCachedData(endOffset);
   }
   return NS_OK;
 }
 
 PRBool
 nsWaveReader::ReadAll(char* aBuf, PRInt64 aSize, PRInt64* aBytesRead)
 {
@@ -504,25 +503,25 @@ nsWaveReader::FindDataOffset()
   }
 
   MonitorAutoEnter monitor(mDecoder->GetMonitor());
   mWaveLength = length;
   mWavePCMOffset = PRUint32(offset);
   return PR_TRUE;
 }
 
-double
+float
 nsWaveReader::BytesToTime(PRInt64 aBytes) const
 {
   NS_ABORT_IF_FALSE(aBytes >= 0, "Must be >= 0");
   return float(aBytes) / mSampleRate / mSampleSize;
 }
 
 PRInt64
-nsWaveReader::TimeToBytes(double aTime) const
+nsWaveReader::TimeToBytes(float aTime) const
 {
   NS_ABORT_IF_FALSE(aTime >= 0.0f, "Must be >= 0");
   return RoundDownToSample(PRInt64(aTime * mSampleRate * mSampleSize));
 }
 
 PRInt64
 nsWaveReader::RoundDownToSample(PRInt64 aBytes) const
 {
--- a/content/media/wave/nsWaveReader.h
+++ b/content/media/wave/nsWaveReader.h
@@ -71,23 +71,23 @@ private:
   PRBool ReadAll(char* aBuf, PRInt64 aSize, PRInt64* aBytesRead = nsnull);
   PRBool LoadRIFFChunk();
   PRBool ScanForwardUntil(PRUint32 aWantedChunk, PRUint32* aChunkSize);
   PRBool LoadFormatChunk();
   PRBool FindDataOffset();
 
   // Returns the number of seconds that aBytes represents based on the
   // current audio parameters.  e.g.  176400 bytes is 1 second at 16-bit
-  // stereo 44.1kHz. The time is rounded to the nearest microsecond.
-  double BytesToTime(PRInt64 aBytes) const;
+  // stereo 44.1kHz. The time is rounded to the nearest millisecond.
+  float BytesToTime(PRInt64 aBytes) const;
 
   // Returns the number of bytes that aTime represents based on the current
   // audio parameters.  e.g.  1 second is 176400 bytes at 16-bit stereo
   // 44.1kHz.
-  PRInt64 TimeToBytes(double aTime) const;
+  PRInt64 TimeToBytes(float aTime) const;
 
   // Rounds aBytes down to the nearest complete sample.  Assumes beginning
   // of byte range is already sample aligned by caller.
   PRInt64 RoundDownToSample(PRInt64 aBytes) const;
   PRInt64 GetDataLength();
   PRInt64 GetPosition();
 
   /*
--- a/content/media/webm/nsWebMBufferedParser.cpp
+++ b/content/media/webm/nsWebMBufferedParser.cpp
@@ -39,16 +39,17 @@
 #include "nsAlgorithm.h"
 #include "nsWebMBufferedParser.h"
 #include "nsTimeRanges.h"
 #include "nsThreadUtils.h"
 
 using mozilla::MonitorAutoEnter;
 
 static const double NS_PER_S = 1e9;
+static const double MS_PER_S = 1e3;
 
 static PRUint32
 VIntLength(unsigned char aFirstByte, PRUint32* aMask)
 {
   PRUint32 count = 1;
   PRUint32 mask = 1 << 7;
   while (count < 8) {
     if ((aFirstByte & mask) != 0) {
--- a/content/media/webm/nsWebMReader.cpp
+++ b/content/media/webm/nsWebMReader.cpp
@@ -60,23 +60,24 @@ extern PRLogModuleInfo* gBuiltinDecoderL
 #else
 #define SEEK_LOG(type, msg)
 #endif
 #else
 #define LOG(type, msg)
 #define SEEK_LOG(type, msg)
 #endif
 
-static const unsigned NS_PER_USEC = 1000;
+static const unsigned NS_PER_MS = 1000000;
 static const double NS_PER_S = 1e9;
+static const double MS_PER_S = 1e3;
 
-// If a seek request is within SEEK_DECODE_MARGIN microseconds of the
+// If a seek request is within SEEK_DECODE_MARGIN milliseconds of the
 // current time, decode ahead from the current frame rather than performing
 // a full seek.
-static const int SEEK_DECODE_MARGIN = 250000;
+static const int SEEK_DECODE_MARGIN = 250;
 
 NS_SPECIALIZE_TEMPLATE
 class nsAutoRefTraits<NesteggPacketHolder> : public nsPointerRefTraits<NesteggPacketHolder>
 {
 public:
   static void Release(NesteggPacketHolder* aHolder) { delete aHolder; }
 };
 
@@ -130,17 +131,17 @@ static int64_t webm_tell(void *aUserData
 
 nsWebMReader::nsWebMReader(nsBuiltinDecoder* aDecoder)
   : nsBuiltinDecoderReader(aDecoder),
   mContext(nsnull),
   mPacketCount(0),
   mChannels(0),
   mVideoTrack(0),
   mAudioTrack(0),
-  mAudioStartUsec(-1),
+  mAudioStartMs(-1),
   mAudioSamples(0),
   mHasVideo(PR_FALSE),
   mHasAudio(PR_FALSE)
 {
   MOZ_COUNT_CTOR(nsWebMReader);
 }
 
 nsWebMReader::~nsWebMReader()
@@ -178,17 +179,17 @@ nsresult nsWebMReader::Init(nsBuiltinDec
   }
 
   return NS_OK;
 }
 
 nsresult nsWebMReader::ResetDecode()
 {
   mAudioSamples = 0;
-  mAudioStartUsec = -1;
+  mAudioStartMs = -1;
   nsresult res = NS_OK;
   if (NS_FAILED(nsBuiltinDecoderReader::ResetDecode())) {
     res = NS_ERROR_FAILURE;
   }
 
   // Ignore failed results from vorbis_synthesis_restart. They
   // aren't fatal and it fails when ResetDecode is called at a
   // time when no vorbis data has been read.
@@ -223,17 +224,17 @@ nsresult nsWebMReader::ReadMetadata(nsVi
     return NS_ERROR_FAILURE;
   }
 
   uint64_t duration = 0;
   r = nestegg_duration(mContext, &duration);
   if (r == 0) {
     MonitorAutoExit exitReaderMon(mMonitor);
     MonitorAutoEnter decoderMon(mDecoder->GetMonitor());
-    mDecoder->GetStateMachine()->SetDuration(duration / NS_PER_USEC);
+    mDecoder->GetStateMachine()->SetDuration(duration / NS_PER_MS);
   }
 
   unsigned int ntracks = 0;
   r = nestegg_track_count(mContext, &ntracks);
   if (r == -1) {
     Cleanup();
     return NS_ERROR_FAILURE;
   }
@@ -429,49 +430,49 @@ PRBool nsWebMReader::DecodeAudioPacket(n
 
   uint64_t tstamp = 0;
   r = nestegg_packet_tstamp(aPacket, &tstamp);
   if (r == -1) {
     return PR_FALSE;
   }
 
   const PRUint32 rate = mVorbisDsp.vi->rate;
-  PRUint64 tstamp_usecs = tstamp / NS_PER_USEC;
-  if (mAudioStartUsec == -1) {
+  PRUint64 tstamp_ms = tstamp / NS_PER_MS;
+  if (mAudioStartMs == -1) {
     // This is the first audio chunk. Assume the start time of our decode
     // is the start of this chunk.
-    mAudioStartUsec = tstamp_usecs;
+    mAudioStartMs = tstamp_ms;
   }
   // If there's a gap between the start of this sound chunk and the end of
   // the previous sound chunk, we need to increment the packet count so that
   // the vorbis decode doesn't use data from before the gap to help decode
   // from after the gap.
   PRInt64 tstamp_samples = 0;
-  if (!UsecsToSamples(tstamp_usecs, rate, tstamp_samples)) {
+  if (!MsToSamples(tstamp_ms, rate, tstamp_samples)) {
     NS_WARNING("Int overflow converting WebM timestamp to samples");
     return PR_FALSE;
   }
   PRInt64 decoded_samples = 0;
-  if (!UsecsToSamples(mAudioStartUsec, rate, decoded_samples)) {
+  if (!MsToSamples(mAudioStartMs, rate, decoded_samples)) {
     NS_WARNING("Int overflow converting WebM start time to samples");
     return PR_FALSE;
   }
   if (!AddOverflow(decoded_samples, mAudioSamples, decoded_samples)) {
     NS_WARNING("Int overflow adding decoded_samples");
     return PR_FALSE;
   }
   if (tstamp_samples > decoded_samples) {
 #ifdef DEBUG
-    PRInt64 usecs = 0;
-    LOG(PR_LOG_DEBUG, ("WebMReader detected gap of %lld, %lld samples, in audio stream\n",
-      SamplesToUsecs(tstamp_samples - decoded_samples, rate, usecs) ? usecs: -1,
+    PRInt64 ms = 0;
+    LOG(PR_LOG_DEBUG, ("WebMReader detected gap of %lldms, %lld samples, in audio stream\n",
+      SamplesToMs(tstamp_samples - decoded_samples, rate, ms) ? ms: -1,
       tstamp_samples - decoded_samples));
 #endif
     mPacketCount++;
-    mAudioStartUsec = tstamp_usecs;
+    mAudioStartMs = tstamp_ms;
     mAudioSamples = 0;
   }
 
   PRInt32 total_samples = 0;
   for (PRUint32 i = 0; i < count; ++i) {
     unsigned char* data;
     size_t length;
     r = nestegg_packet_data(aPacket, i, &data, &length);
@@ -497,27 +498,27 @@ PRBool nsWebMReader::DecodeAudioPacket(n
       for (PRUint32 j = 0; j < mChannels; ++j) {
         VorbisPCMValue* channel = pcm[j];
         for (PRUint32 i = 0; i < PRUint32(samples); ++i) {
           buffer[i*mChannels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
         }
       }
 
       PRInt64 duration = 0;
-      if (!SamplesToUsecs(samples, rate, duration)) {
+      if (!SamplesToMs(samples, rate, duration)) {
         NS_WARNING("Int overflow converting WebM audio duration");
         return PR_FALSE;
       }
       PRInt64 total_duration = 0;
-      if (!SamplesToUsecs(total_samples, rate, total_duration)) {
+      if (!SamplesToMs(total_samples, rate, total_duration)) {
         NS_WARNING("Int overflow converting WebM audio total_duration");
         return PR_FALSE;
       }
       
-      PRInt64 time = tstamp_usecs + total_duration;
+      PRInt64 time = tstamp_ms + total_duration;
       total_samples += samples;
       SoundData* s = new SoundData(aOffset,
                                    time,
                                    duration,
                                    samples,
                                    buffer,
                                    mChannels);
       mAudioQueue.Push(s);
@@ -662,51 +663,51 @@ PRBool nsWebMReader::DecodeVideoFrame(PR
       MonitorAutoExit exitMon(mMonitor);
       MonitorAutoEnter decoderMon(mDecoder->GetMonitor());
       nsBuiltinDecoderStateMachine* s =
         static_cast<nsBuiltinDecoderStateMachine*>(mDecoder->GetStateMachine());
       PRInt64 endTime = s->GetEndMediaTime();
       if (endTime == -1) {
         return PR_FALSE;
       }
-      next_tstamp = endTime * NS_PER_USEC;
+      next_tstamp = endTime * NS_PER_MS;
     }
   }
 
-  PRInt64 tstamp_usecs = tstamp / NS_PER_USEC;
+  PRInt64 tstamp_ms = tstamp / NS_PER_MS;
   for (PRUint32 i = 0; i < count; ++i) {
     unsigned char* data;
     size_t length;
     r = nestegg_packet_data(packet, i, &data, &length);
     if (r == -1) {
       return PR_FALSE;
     }
 
     vpx_codec_stream_info_t si;
     memset(&si, 0, sizeof(si));
     si.sz = sizeof(si);
     vpx_codec_peek_stream_info(&vpx_codec_vp8_dx_algo, data, length, &si);
-    if (aKeyframeSkip && (!si.is_kf || tstamp_usecs < aTimeThreshold)) {
+    if (aKeyframeSkip && (!si.is_kf || tstamp_ms < aTimeThreshold)) {
       // Skipping to next keyframe...
       parsed++; // Assume 1 frame per chunk.
       continue;
     }
 
     if (aKeyframeSkip && si.is_kf) {
       aKeyframeSkip = PR_FALSE;
     }
 
     if (vpx_codec_decode(&mVP8, data, length, NULL, 0)) {
       return PR_FALSE;
     }
 
     // If the timestamp of the video frame is less than
     // the time threshold required then it is not added
     // to the video queue and won't be displayed.
-    if (tstamp_usecs < aTimeThreshold) {
+    if (tstamp_ms < aTimeThreshold) {
       parsed++; // Assume 1 frame per chunk.
       continue;
     }
 
     vpx_codec_iter_t  iter = NULL;
     vpx_image_t      *img;
 
     while ((img = vpx_codec_get_frame(&mVP8, &iter))) {
@@ -727,18 +728,18 @@ PRBool nsWebMReader::DecodeVideoFrame(PR
       b.mPlanes[2].mData = img->planes[2];
       b.mPlanes[2].mStride = img->stride[2];
       b.mPlanes[2].mHeight = img->d_h >> img->y_chroma_shift;
       b.mPlanes[2].mWidth = img->d_w >> img->x_chroma_shift;
   
       VideoData *v = VideoData::Create(mInfo,
                                        mDecoder->GetImageContainer(),
                                        holder->mOffset,
-                                       tstamp_usecs,
-                                       next_tstamp / NS_PER_USEC,
+                                       tstamp_ms,
+                                       next_tstamp / NS_PER_MS,
                                        b,
                                        si.is_kf,
                                        -1);
       if (!v) {
         return PR_FALSE;
       }
       parsed++;
       decoded++;
@@ -768,17 +769,17 @@ nsresult nsWebMReader::Seek(PRInt64 aTar
   if (CanDecodeToTarget(aTarget, aCurrentTime)) {
     LOG(PR_LOG_DEBUG, ("%p Seek target (%lld) is close to current time (%lld), "
                        "will just decode to it", mDecoder, aCurrentTime, aTarget));
   } else {
     if (NS_FAILED(ResetDecode())) {
       return NS_ERROR_FAILURE;
     }
     PRUint32 trackToSeek = mHasVideo ? mVideoTrack : mAudioTrack;
-    int r = nestegg_track_seek(mContext, trackToSeek, aTarget * NS_PER_USEC);
+    int r = nestegg_track_seek(mContext, trackToSeek, aTarget * NS_PER_MS);
     if (r != 0) {
       return NS_ERROR_FAILURE;
     }
   }
   return DecodeToTarget(aTarget);
 }
 
 nsresult nsWebMReader::GetBuffered(nsTimeRanges* aBuffered, PRInt64 aStartTime)
@@ -797,17 +798,17 @@ nsresult nsWebMReader::GetBuffered(nsTim
       aBuffered->Add(0, duration / NS_PER_S);
     }
   } else {
     nsMediaStream* stream = mDecoder->GetCurrentStream();
     nsTArray<nsByteRange> ranges;
     nsresult res = stream->GetCachedRanges(ranges);
     NS_ENSURE_SUCCESS(res, res);
 
-    PRInt64 startTimeOffsetNS = aStartTime * NS_PER_USEC;
+    PRInt64 startTimeOffsetNS = aStartTime * NS_PER_MS;
     for (PRUint32 index = 0; index < ranges.Length(); index++) {
       mBufferedState->CalculateBufferedForRange(aBuffered,
                                                 ranges[index].mStart,
                                                 ranges[index].mEnd,
                                                 timecodeScale,
                                                 startTimeOffsetNS);
     }
   }
--- a/content/media/webm/nsWebMReader.h
+++ b/content/media/webm/nsWebMReader.h
@@ -187,18 +187,18 @@ private:
   PRBool DecodeAudioPacket(nestegg_packet* aPacket, PRInt64 aOffset);
 
   // Release context and set to null. Called when an error occurs during
   // reading metadata or destruction of the reader itself.
   void Cleanup();
 
   // Returns PR_TRUE if we should decode up to the seek target rather than
   // seeking to the target using an index-assisted seek.  We should do this
-  // if the seek target (aTarget, in usecs), lies not too far ahead of the
-  // current playback position (aCurrentTime, in usecs).
+  // if the seek target (aTarget, in ms), lies not too far ahead of the
+  // current playback position (aCurrentTime, in ms).
   PRBool CanDecodeToTarget(PRInt64 aTarget, PRInt64 aCurrentTime);
 
 private:
   // libnestegg context for webm container. Access on state machine thread
   // or decoder thread only.
   nestegg* mContext;
 
   // VP8 decoder state
@@ -216,18 +216,18 @@ private:
   // must only be accessed from the state machine thread.
   PacketQueue mVideoPackets;
   PacketQueue mAudioPackets;
 
   // Index of video and audio track to play
   PRUint32 mVideoTrack;
   PRUint32 mAudioTrack;
 
-  // Time in microseconds of the start of the first audio sample we've decoded.
-  PRInt64 mAudioStartUsec;
+  // Time in ms of the start of the first audio sample we've decoded.
+  PRInt64 mAudioStartMs;
 
   // Number of samples we've decoded since decoding began at mAudioStartMs.
   PRUint64 mAudioSamples;
 
   // Parser state and computed offset-time mappings.  Shared by multiple
   // readers when decoder has been cloned.  Main thread only.
   nsRefPtr<nsWebMBufferedState> mBufferedState;