Bug 689432 - Disambiguate use of "samples" by introducing "frames" in a number of places. r=doublec
authorMatthew Gregan <kinetik@flim.org>
Tue, 27 Sep 2011 16:31:18 +1300
changeset 77739 89b5ec4cacc1a57a210ccdbd85425ed12513bbfb
parent 77738 30c186f6b48a7365fa59c4b7f91b077d0f4ef91e
child 77740 dbe064b3a6595de93fa7ae6bf6212e1989443ecd
push id3
push userfelipc@gmail.com
push dateFri, 30 Sep 2011 20:09:13 +0000
reviewersdoublec
bugs689432
milestone10.0a1
Bug 689432 - Disambiguate use of "samples" by introducing "frames" in a number of places. r=doublec
content/html/content/src/nsHTMLAudioElement.cpp
content/media/VideoUtils.cpp
content/media/VideoUtils.h
content/media/nsAudioStream.cpp
content/media/nsAudioStream.h
content/media/nsBuiltinDecoderReader.cpp
content/media/nsBuiltinDecoderReader.h
content/media/nsBuiltinDecoderStateMachine.cpp
content/media/nsBuiltinDecoderStateMachine.h
content/media/ogg/nsOggReader.cpp
content/media/wave/nsWaveReader.cpp
content/media/wave/nsWaveReader.h
content/media/webm/nsWebMReader.cpp
content/media/webm/nsWebMReader.h
dom/ipc/AudioChild.cpp
dom/ipc/AudioChild.h
dom/ipc/AudioParent.cpp
dom/ipc/AudioParent.h
dom/ipc/PAudio.ipdl
--- a/content/html/content/src/nsHTMLAudioElement.cpp
+++ b/content/html/content/src/nsHTMLAudioElement.cpp
@@ -218,36 +218,36 @@ nsHTMLAudioElement::MozWriteAudio(const 
 
   // Make sure that we are going to write the correct amount of data based
   // on number of channels.
   if (dataLength % mChannels != 0) {
     return NS_ERROR_DOM_INDEX_SIZE_ERR;
   }
 
   // Don't write more than can be written without blocking.
-  PRUint32 writeLen = NS_MIN(mAudioStream->Available(), dataLength);
+  PRUint32 writeLen = NS_MIN(mAudioStream->Available(), dataLength / mChannels);
 
   nsresult rv = mAudioStream->Write(JS_GetTypedArrayData(tsrc), writeLen);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
   // Return the actual amount written.
-  *aRetVal = writeLen;
+  *aRetVal = writeLen * mChannels;
   return rv;
 }
 
 NS_IMETHODIMP
 nsHTMLAudioElement::MozCurrentSampleOffset(PRUint64 *aRetVal)
 {
   if (!mAudioStream) {
     return NS_ERROR_DOM_INVALID_STATE_ERR;
   }
 
-  *aRetVal = mAudioStream->GetSampleOffset();
+  *aRetVal = mAudioStream->GetPositionInFrames() * mChannels;
   return NS_OK;
 }
 
   
 nsresult nsHTMLAudioElement::SetAcceptHeader(nsIHttpChannel* aChannel)
 {
     nsCAutoString value(
 #ifdef MOZ_WEBM
--- a/content/media/VideoUtils.cpp
+++ b/content/media/VideoUtils.cpp
@@ -171,35 +171,35 @@ PRBool MulOverflow(PRInt64 a, PRInt64 b,
     return PR_FALSE;
   }
 
   aResult *= sign;
   NS_ASSERTION(a * b == aResult, "We didn't overflow, but result is wrong!");
   return PR_TRUE;
 }
 
-// Converts from number of audio samples to microseconds, given the specified
+// Converts from number of audio frames to microseconds, given the specified
 // audio rate.
-PRBool SamplesToUsecs(PRInt64 aSamples, PRUint32 aRate, PRInt64& aOutUsecs)
+PRBool FramesToUsecs(PRInt64 aFrames, PRUint32 aRate, PRInt64& aOutUsecs)
 {
   PRInt64 x;
-  if (!MulOverflow(aSamples, USECS_PER_S, x))
+  if (!MulOverflow(aFrames, USECS_PER_S, x))
     return PR_FALSE;
   aOutUsecs = x / aRate;
   return PR_TRUE;
 }
 
-// Converts from microseconds to number of audio samples, given the specified
+// Converts from microseconds to number of audio frames, given the specified
 // audio rate.
-PRBool UsecsToSamples(PRInt64 aUsecs, PRUint32 aRate, PRInt64& aOutSamples)
+PRBool UsecsToFrames(PRInt64 aUsecs, PRUint32 aRate, PRInt64& aOutFrames)
 {
   PRInt64 x;
   if (!MulOverflow(aUsecs, aRate, x))
     return PR_FALSE;
-  aOutSamples = x / USECS_PER_S;
+  aOutFrames = x / USECS_PER_S;
   return PR_TRUE;
 }
 
 static PRInt32 ConditionDimension(float aValue)
 {
   // This will exclude NaNs and too-big values.
   if (aValue > 1.0 && aValue <= PR_INT32_MAX)
     return PRInt32(NS_round(aValue));
--- a/content/media/VideoUtils.h
+++ b/content/media/VideoUtils.h
@@ -123,27 +123,27 @@ PRBool MulOverflow32(PRUint32 a, PRUint3
 // if addition would result in an overflow.
 PRBool AddOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult);
 
 // 64 bit integer multiplication with overflow checking. Returns PR_TRUE
 // if the multiplication was successful, or PR_FALSE if the operation resulted
 // in an integer overflow.
 PRBool MulOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult);
 
-// Converts from number of audio samples (aSamples) to microseconds, given
+// Converts from number of audio frames (aFrames) to microseconds, given
 // the specified audio rate (aRate). Stores result in aOutUsecs. Returns PR_TRUE
 // if the operation succeeded, or PR_FALSE if there was an integer overflow
 // while calulating the conversion.
-PRBool SamplesToUsecs(PRInt64 aSamples, PRUint32 aRate, PRInt64& aOutUsecs);
+PRBool FramesToUsecs(PRInt64 aFrames, PRUint32 aRate, PRInt64& aOutUsecs);
 
-// Converts from microseconds (aUsecs) to number of audio samples, given the
-// specified audio rate (aRate). Stores the result in aOutSamples. Returns
+// Converts from microseconds (aUsecs) to number of audio frames, given the
+// specified audio rate (aRate). Stores the result in aOutFrames. Returns
 // PR_TRUE if the operation succeeded, or PR_FALSE if there was an integer
 // overflow while calulating the conversion.
-PRBool UsecsToSamples(PRInt64 aUsecs, PRUint32 aRate, PRInt64& aOutSamples);
+PRBool UsecsToFrames(PRInt64 aUsecs, PRUint32 aRate, PRInt64& aOutFrames);
 
 // Number of microseconds per second. 1e6.
 static const PRInt64 USECS_PER_S = 1000000;
 
 // Number of microseconds per millisecond.
 static const PRInt64 USECS_PER_MS = 1000;
 
 // The maximum height and width of the video. Used for
--- a/content/media/nsAudioStream.cpp
+++ b/content/media/nsAudioStream.cpp
@@ -75,36 +75,36 @@ using namespace mozilla;
 using mozilla::TimeStamp;
 
 #ifdef PR_LOGGING
 PRLogModuleInfo* gAudioStreamLog = nsnull;
 #endif
 
 static const PRUint32 FAKE_BUFFER_SIZE = 176400;
 
-class nsAudioStreamLocal : public nsAudioStream
+class nsNativeAudioStream : public nsAudioStream
 {
  public:
   NS_DECL_ISUPPORTS
 
-  ~nsAudioStreamLocal();
-  nsAudioStreamLocal();
+  ~nsNativeAudioStream();
+  nsNativeAudioStream();
 
   nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat);
   void Shutdown();
-  nsresult Write(const void* aBuf, PRUint32 aCount);
+  nsresult Write(const void* aBuf, PRUint32 aFrames);
   PRUint32 Available();
   void SetVolume(double aVolume);
   void Drain();
   void Pause();
   void Resume();
   PRInt64 GetPosition();
-  PRInt64 GetSampleOffset();
+  PRInt64 GetPositionInFrames();
   PRBool IsPaused();
-  PRInt32 GetMinWriteSamples();
+  PRInt32 GetMinWriteSize();
 
  private:
 
   double mVolume;
   void* mAudioHandle;
   int mRate;
   int mChannels;
 
@@ -113,99 +113,98 @@ class nsAudioStreamLocal : public nsAudi
   // PR_TRUE if this audio stream is paused.
   PRPackedBool mPaused;
 
   // PR_TRUE if this stream has encountered an error.
   PRPackedBool mInError;
 
 };
 
-class nsAudioStreamRemote : public nsAudioStream
+class nsRemotedAudioStream : public nsAudioStream
 {
  public:
   NS_DECL_ISUPPORTS
 
-  nsAudioStreamRemote();
-  ~nsAudioStreamRemote();
+  nsRemotedAudioStream();
+  ~nsRemotedAudioStream();
 
   nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat);
   void Shutdown();
-  nsresult Write(const void* aBuf, PRUint32 aCount);
+  nsresult Write(const void* aBuf, PRUint32 aFrames);
   PRUint32 Available();
   void SetVolume(double aVolume);
   void Drain();
   void Pause();
   void Resume();
   PRInt64 GetPosition();
-  PRInt64 GetSampleOffset();
+  PRInt64 GetPositionInFrames();
   PRBool IsPaused();
-  PRInt32 GetMinWriteSamples();
+  PRInt32 GetMinWriteSize();
 
 private:
   nsRefPtr<AudioChild> mAudioChild;
 
   SampleFormat mFormat;
   int mRate;
   int mChannels;
 
-  PRInt32 mBytesPerSample;
+  PRInt32 mBytesPerFrame;
 
   // PR_TRUE if this audio stream is paused.
   PRPackedBool mPaused;
 
   friend class AudioInitEvent;
 };
 
 class AudioInitEvent : public nsRunnable
 {
  public:
-  AudioInitEvent(nsAudioStreamRemote* owner)
+  AudioInitEvent(nsRemotedAudioStream* owner)
   {
     mOwner = owner;
   }
 
   NS_IMETHOD Run()
   {
     ContentChild * cpc = ContentChild::GetSingleton();
     NS_ASSERTION(cpc, "Content Protocol is NULL!");
     mOwner->mAudioChild =  static_cast<AudioChild*> (cpc->SendPAudioConstructor(mOwner->mChannels,
                                                                                 mOwner->mRate,
                                                                                 mOwner->mFormat));
     return NS_OK;
   }
-  
-  nsRefPtr<nsAudioStreamRemote> mOwner;
+
+  nsRefPtr<nsRemotedAudioStream> mOwner;
 };
 
 class AudioWriteEvent : public nsRunnable
 {
  public:
   AudioWriteEvent(AudioChild* aChild,
                   const void* aBuf,
-                  PRUint32 aNumberOfSamples,
-                  PRUint32 aBytesPerSample)
-  {    
+                  PRUint32 aNumberOfFrames,
+                  PRUint32 aBytesPerFrame)
+  {
     mAudioChild = aChild;
-    mBytesPerSample = aBytesPerSample;
-    mBuffer.Assign((const char*)aBuf, aNumberOfSamples*aBytesPerSample);
+    mBytesPerFrame = aBytesPerFrame;
+    mBuffer.Assign((const char*)aBuf, aNumberOfFrames * aBytesPerFrame);
   }
 
   NS_IMETHOD Run()
   {
     if (!mAudioChild->IsIPCOpen())
       return NS_OK;
 
-    mAudioChild->SendWrite(mBuffer,
-                           mBuffer.Length() / mBytesPerSample);
+    mAudioChild->SendWrite(mBuffer, mBuffer.Length() / mBytesPerFrame);
     return NS_OK;
   }
 
   nsRefPtr<AudioChild> mAudioChild;
   nsCString mBuffer;
-  PRUint32 mBytesPerSample;
+  PRUint32 mBytesPerFrame;
 };
 
 class AudioSetVolumeEvent : public nsRunnable
 {
  public:
   AudioSetVolumeEvent(AudioChild* aChild, double aVolume)
   {
     mAudioChild = aChild;
@@ -215,36 +214,36 @@ class AudioSetVolumeEvent : public nsRun
   NS_IMETHOD Run()
   {
     if (!mAudioChild->IsIPCOpen())
       return NS_OK;
 
     mAudioChild->SendSetVolume(mVolume);
     return NS_OK;
   }
-  
+
   nsRefPtr<AudioChild> mAudioChild;
   double mVolume;
 };
 
 
-class AudioMinWriteSampleEvent : public nsRunnable
+class AudioMinWriteSizeEvent : public nsRunnable
 {
  public:
-  AudioMinWriteSampleEvent(AudioChild* aChild)
+  AudioMinWriteSizeEvent(AudioChild* aChild)
   {
     mAudioChild = aChild;
   }
 
   NS_IMETHOD Run()
   {
     if (!mAudioChild->IsIPCOpen())
       return NS_OK;
 
-    mAudioChild->SendMinWriteSample();
+    mAudioChild->SendMinWriteSize();
     return NS_OK;
   }
 
   nsRefPtr<AudioChild> mAudioChild;
 };
 
 class AudioDrainEvent : public nsRunnable
 {
@@ -257,17 +256,17 @@ class AudioDrainEvent : public nsRunnabl
   NS_IMETHOD Run()
   {
     if (!mAudioChild->IsIPCOpen())
       return NS_OK;
 
     mAudioChild->SendDrain();
     return NS_OK;
   }
-  
+
   nsRefPtr<AudioChild> mAudioChild;
 };
 
 
 class AudioPauseEvent : public nsRunnable
 {
  public:
   AudioPauseEvent(AudioChild* aChild, PRBool pause)
@@ -283,17 +282,17 @@ class AudioPauseEvent : public nsRunnabl
 
     if (mPause)
       mAudioChild->SendPause();
     else
       mAudioChild->SendResume();
 
     return NS_OK;
   }
-  
+
   nsRefPtr<AudioChild> mAudioChild;
   PRBool mPause;
 };
 
 
 class AudioShutdownEvent : public nsRunnable
 {
  public:
@@ -303,17 +302,17 @@ class AudioShutdownEvent : public nsRunn
   }
 
   NS_IMETHOD Run()
   {
     if (mAudioChild->IsIPCOpen())
       mAudioChild->SendShutdown();
     return NS_OK;
   }
-  
+
   nsRefPtr<AudioChild> mAudioChild;
 };
 
 static mozilla::Mutex* gVolumeScaleLock = nsnull;
 
 static double gVolumeScale = 1.0;
 
 static int VolumeScaleChanged(const char* aPref, void *aClosure) {
@@ -360,20 +359,20 @@ nsAudioStream::GetThread()
   }
   return mAudioPlaybackThread;
 }
 
 nsAudioStream* nsAudioStream::AllocateStream()
 {
 #if defined(REMOTE_AUDIO)
   if (XRE_GetProcessType() == GeckoProcessType_Content) {
-    return new nsAudioStreamRemote();
+    return new nsRemotedAudioStream();
   }
 #endif
-  return new nsAudioStreamLocal();
+  return new nsNativeAudioStream();
 }
 
 class AsyncShutdownPlaybackThread : public nsRunnable
 {
 public:
   AsyncShutdownPlaybackThread(nsIThread* aThread) : mThread(aThread) {}
   NS_IMETHODIMP Run() { return mThread->Shutdown(); }
 private:
@@ -383,111 +382,110 @@ private:
 nsAudioStream::~nsAudioStream()
 {
   if (mAudioPlaybackThread) {
     nsCOMPtr<nsIRunnable> event = new AsyncShutdownPlaybackThread(mAudioPlaybackThread);
     NS_DispatchToMainThread(event);
   }
 }
 
-nsAudioStreamLocal::nsAudioStreamLocal() :
+nsNativeAudioStream::nsNativeAudioStream() :
   mVolume(1.0),
   mAudioHandle(0),
   mRate(0),
   mChannels(0),
   mFormat(FORMAT_S16_LE),
   mPaused(PR_FALSE),
   mInError(PR_FALSE)
 {
 }
 
-nsAudioStreamLocal::~nsAudioStreamLocal()
+nsNativeAudioStream::~nsNativeAudioStream()
 {
   Shutdown();
 }
 
-NS_IMPL_THREADSAFE_ISUPPORTS0(nsAudioStreamLocal)
+NS_IMPL_THREADSAFE_ISUPPORTS0(nsNativeAudioStream)
 
-nsresult nsAudioStreamLocal::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat)
+nsresult nsNativeAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat)
 {
   mRate = aRate;
   mChannels = aNumChannels;
   mFormat = aFormat;
 
   if (sa_stream_create_pcm(reinterpret_cast<sa_stream_t**>(&mAudioHandle),
-                           NULL, 
-                           SA_MODE_WRONLY, 
+                           NULL,
+                           SA_MODE_WRONLY,
                            SA_PCM_FORMAT_S16_NE,
                            aRate,
                            aNumChannels) != SA_SUCCESS) {
     mAudioHandle = nsnull;
     mInError = PR_TRUE;
-    PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStreamLocal: sa_stream_create_pcm error"));
+    PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_create_pcm error"));
     return NS_ERROR_FAILURE;
   }
-  
+
   if (sa_stream_open(static_cast<sa_stream_t*>(mAudioHandle)) != SA_SUCCESS) {
     sa_stream_destroy(static_cast<sa_stream_t*>(mAudioHandle));
     mAudioHandle = nsnull;
     mInError = PR_TRUE;
-    PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStreamLocal: sa_stream_open error"));
+    PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_open error"));
     return NS_ERROR_FAILURE;
   }
   mInError = PR_FALSE;
 
   return NS_OK;
 }
 
-void nsAudioStreamLocal::Shutdown()
+void nsNativeAudioStream::Shutdown()
 {
   if (!mAudioHandle)
     return;
 
   sa_stream_destroy(static_cast<sa_stream_t*>(mAudioHandle));
   mAudioHandle = nsnull;
   mInError = PR_TRUE;
 }
 
-nsresult nsAudioStreamLocal::Write(const void* aBuf, PRUint32 aCount)
+nsresult nsNativeAudioStream::Write(const void* aBuf, PRUint32 aFrames)
 {
-  NS_ABORT_IF_FALSE(aCount % mChannels == 0,
-                    "Buffer size must be divisible by channel count");
   NS_ASSERTION(!mPaused, "Don't write audio when paused, you'll block");
 
   if (mInError)
     return NS_ERROR_FAILURE;
 
-  nsAutoArrayPtr<short> s_data(new short[aCount]);
+  PRUint32 samples = aFrames * mChannels;
+  nsAutoArrayPtr<short> s_data(new short[samples]);
 
   if (s_data) {
     double scaled_volume = GetVolumeScale() * mVolume;
     switch (mFormat) {
       case FORMAT_U8: {
         const PRUint8* buf = static_cast<const PRUint8*>(aBuf);
         PRInt32 volume = PRInt32((1 << 16) * scaled_volume);
-        for (PRUint32 i = 0; i < aCount; ++i) {
+        for (PRUint32 i = 0; i < samples; ++i) {
           s_data[i] = short(((PRInt32(buf[i]) - 128) * volume) >> 8);
         }
         break;
       }
       case FORMAT_S16_LE: {
         const short* buf = static_cast<const short*>(aBuf);
         PRInt32 volume = PRInt32((1 << 16) * scaled_volume);
-        for (PRUint32 i = 0; i < aCount; ++i) {
+        for (PRUint32 i = 0; i < samples; ++i) {
           short s = buf[i];
 #if defined(IS_BIG_ENDIAN)
           s = ((s & 0x00ff) << 8) | ((s & 0xff00) >> 8);
 #endif
           s_data[i] = short((PRInt32(s) * volume) >> 16);
         }
         break;
       }
       case FORMAT_FLOAT32: {
         const float* buf = static_cast<const float*>(aBuf);
-        for (PRUint32 i = 0; i <  aCount; ++i) {
+        for (PRUint32 i = 0; i <  samples; ++i) {
           float scaled_value = floorf(0.5 + 32768 * buf[i] * scaled_volume);
           if (buf[i] < 0.0) {
             s_data[i] = (scaled_value < -32768.0) ?
               -32768 :
               short(scaled_value);
           } else {
             s_data[i] = (scaled_value > 32767.0) ?
               32767 :
@@ -495,272 +493,272 @@ nsresult nsAudioStreamLocal::Write(const
           }
         }
         break;
       }
     }
 
     if (sa_stream_write(static_cast<sa_stream_t*>(mAudioHandle),
                         s_data.get(),
-                        aCount * sizeof(short)) != SA_SUCCESS)
+                        samples * sizeof(short)) != SA_SUCCESS)
     {
-      PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStreamLocal: sa_stream_write error"));
+      PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_write error"));
       mInError = PR_TRUE;
       return NS_ERROR_FAILURE;
     }
   }
   return NS_OK;
 }
 
-PRUint32 nsAudioStreamLocal::Available()
+PRUint32 nsNativeAudioStream::Available()
 {
   // If the audio backend failed to open, lie and say we'll accept some
   // data.
   if (mInError)
     return FAKE_BUFFER_SIZE;
 
-  size_t s = 0; 
+  size_t s = 0;
   if (sa_stream_get_write_size(static_cast<sa_stream_t*>(mAudioHandle), &s) != SA_SUCCESS)
     return 0;
 
-  return s / sizeof(short);
+  return s / mChannels / sizeof(short);
 }
 
-void nsAudioStreamLocal::SetVolume(double aVolume)
+void nsNativeAudioStream::SetVolume(double aVolume)
 {
   NS_ASSERTION(aVolume >= 0.0 && aVolume <= 1.0, "Invalid volume");
 #if defined(SA_PER_STREAM_VOLUME)
   if (sa_stream_set_volume_abs(static_cast<sa_stream_t*>(mAudioHandle), aVolume) != SA_SUCCESS) {
-    PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStreamLocal: sa_stream_set_volume_abs error"));
+    PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_set_volume_abs error"));
     mInError = PR_TRUE;
   }
 #else
   mVolume = aVolume;
 #endif
 }
 
-void nsAudioStreamLocal::Drain()
+void nsNativeAudioStream::Drain()
 {
   NS_ASSERTION(!mPaused, "Don't drain audio when paused, it won't finish!");
 
   if (mInError)
     return;
 
   int r = sa_stream_drain(static_cast<sa_stream_t*>(mAudioHandle));
   if (r != SA_SUCCESS && r != SA_ERROR_INVALID) {
-    PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStreamLocal: sa_stream_drain error"));
+    PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_drain error"));
     mInError = PR_TRUE;
   }
 }
 
-void nsAudioStreamLocal::Pause()
+void nsNativeAudioStream::Pause()
 {
   if (mInError)
     return;
   mPaused = PR_TRUE;
   sa_stream_pause(static_cast<sa_stream_t*>(mAudioHandle));
 }
 
-void nsAudioStreamLocal::Resume()
+void nsNativeAudioStream::Resume()
 {
   if (mInError)
     return;
   mPaused = PR_FALSE;
   sa_stream_resume(static_cast<sa_stream_t*>(mAudioHandle));
 }
 
-PRInt64 nsAudioStreamLocal::GetPosition()
+PRInt64 nsNativeAudioStream::GetPosition()
 {
-  PRInt64 sampleOffset = GetSampleOffset();
-  if (sampleOffset >= 0) {
-    return ((USECS_PER_S * sampleOffset) / mRate / mChannels);
+  PRInt64 position = GetPositionInFrames();
+  if (position >= 0) {
+    return ((USECS_PER_S * position) / mRate);
   }
   return -1;
 }
 
-PRInt64 nsAudioStreamLocal::GetSampleOffset()
+PRInt64 nsNativeAudioStream::GetPositionInFrames()
 {
   if (mInError) {
     return -1;
   }
- 
+
   sa_position_t positionType = SA_POSITION_WRITE_SOFTWARE;
 #if defined(XP_WIN)
   positionType = SA_POSITION_WRITE_HARDWARE;
 #endif
   int64_t position = 0;
   if (sa_stream_get_position(static_cast<sa_stream_t*>(mAudioHandle),
                              positionType, &position) == SA_SUCCESS) {
-    return position / sizeof(short);
+    return position / mChannels / sizeof(short);
   }
 
   return -1;
 }
 
-PRBool nsAudioStreamLocal::IsPaused()
+PRBool nsNativeAudioStream::IsPaused()
 {
   return mPaused;
 }
 
-PRInt32 nsAudioStreamLocal::GetMinWriteSamples()
+PRInt32 nsNativeAudioStream::GetMinWriteSize()
 {
   size_t size;
   int r = sa_stream_get_min_write(static_cast<sa_stream_t*>(mAudioHandle),
                                   &size);
-  if (r == SA_ERROR_NOT_SUPPORTED) {
+  if (r == SA_ERROR_NOT_SUPPORTED)
     return 1;
-  } else if (r != SA_SUCCESS) {
+  else if (r != SA_SUCCESS || size > PR_INT32_MAX)
     return -1;
-  }
+
   return static_cast<PRInt32>(size / mChannels / sizeof(short));
 }
 
-nsAudioStreamRemote::nsAudioStreamRemote()
- : mAudioChild(NULL),
+nsRemotedAudioStream::nsRemotedAudioStream()
+ : mAudioChild(nsnull),
    mFormat(FORMAT_S16_LE),
    mRate(0),
    mChannels(0),
-   mBytesPerSample(1),
+   mBytesPerFrame(0),
    mPaused(PR_FALSE)
 {}
 
-nsAudioStreamRemote::~nsAudioStreamRemote()
+nsRemotedAudioStream::~nsRemotedAudioStream()
 {
   Shutdown();
 }
 
-NS_IMPL_THREADSAFE_ISUPPORTS0(nsAudioStreamRemote)
+NS_IMPL_THREADSAFE_ISUPPORTS0(nsRemotedAudioStream)
 
-nsresult 
-nsAudioStreamRemote::Init(PRInt32 aNumChannels,
-                          PRInt32 aRate,
-                          SampleFormat aFormat)
+nsresult
+nsRemotedAudioStream::Init(PRInt32 aNumChannels,
+                           PRInt32 aRate,
+                           SampleFormat aFormat)
 {
   mRate = aRate;
   mChannels = aNumChannels;
   mFormat = aFormat;
 
   switch (mFormat) {
     case FORMAT_U8: {
-      mBytesPerSample = sizeof(PRUint8);
+      mBytesPerFrame = sizeof(PRUint8) * mChannels;
       break;
     }
     case FORMAT_S16_LE: {
-      mBytesPerSample = sizeof(short);
+      mBytesPerFrame = sizeof(short) * mChannels;
       break;
     }
     case FORMAT_FLOAT32: {
-      mBytesPerSample = sizeof(float);
+      mBytesPerFrame = sizeof(float) * mChannels;
     }
   }
 
   nsCOMPtr<nsIRunnable> event = new AudioInitEvent(this);
   NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
   return NS_OK;
 }
 
 void
-nsAudioStreamRemote::Shutdown()
+nsRemotedAudioStream::Shutdown()
 {
   if (!mAudioChild)
     return;
   nsCOMPtr<nsIRunnable> event = new AudioShutdownEvent(mAudioChild);
   NS_DispatchToMainThread(event);
   mAudioChild = nsnull;
 }
 
 nsresult
-nsAudioStreamRemote::Write(const void* aBuf, PRUint32 aCount)
+nsRemotedAudioStream::Write(const void* aBuf, PRUint32 aFrames)
 {
   if (!mAudioChild)
     return NS_ERROR_FAILURE;
   nsCOMPtr<nsIRunnable> event = new AudioWriteEvent(mAudioChild,
                                                     aBuf,
-                                                    aCount,
-                                                    mBytesPerSample);
+                                                    aFrames,
+                                                    mBytesPerFrame);
   NS_DispatchToMainThread(event);
   return NS_OK;
 }
 
 PRUint32
-nsAudioStreamRemote::Available()
+nsRemotedAudioStream::Available()
 {
   return FAKE_BUFFER_SIZE;
 }
 
-PRInt32 nsAudioStreamRemote::GetMinWriteSamples()
+PRInt32 nsRemotedAudioStream::GetMinWriteSize()
 {
   if (!mAudioChild)
     return -1;
-  nsCOMPtr<nsIRunnable> event = new AudioMinWriteSampleEvent(mAudioChild);
+  nsCOMPtr<nsIRunnable> event = new AudioMinWriteSizeEvent(mAudioChild);
   NS_DispatchToMainThread(event);
-  return mAudioChild->WaitForMinWriteSample();
+  return mAudioChild->WaitForMinWriteSize();
 }
 
 void
-nsAudioStreamRemote::SetVolume(double aVolume)
+nsRemotedAudioStream::SetVolume(double aVolume)
 {
   if (!mAudioChild)
     return;
   nsCOMPtr<nsIRunnable> event = new AudioSetVolumeEvent(mAudioChild, aVolume);
   NS_DispatchToMainThread(event);
 }
 
 void
-nsAudioStreamRemote::Drain()
+nsRemotedAudioStream::Drain()
 {
   if (!mAudioChild)
     return;
   nsCOMPtr<nsIRunnable> event = new AudioDrainEvent(mAudioChild);
   NS_DispatchToMainThread(event);
   mAudioChild->WaitForDrain();
 }
- 
+
 void
-nsAudioStreamRemote::Pause()
+nsRemotedAudioStream::Pause()
 {
   mPaused = PR_TRUE;
   if (!mAudioChild)
     return;
   nsCOMPtr<nsIRunnable> event = new AudioPauseEvent(mAudioChild, PR_TRUE);
   NS_DispatchToMainThread(event);
 }
 
 void
-nsAudioStreamRemote::Resume()
+nsRemotedAudioStream::Resume()
 {
   mPaused = PR_FALSE;
   if (!mAudioChild)
     return;
   nsCOMPtr<nsIRunnable> event = new AudioPauseEvent(mAudioChild, PR_FALSE);
   NS_DispatchToMainThread(event);
 }
 
-PRInt64 nsAudioStreamRemote::GetPosition()
+PRInt64 nsRemotedAudioStream::GetPosition()
 {
-  PRInt64 sampleOffset = GetSampleOffset();
-  if (sampleOffset >= 0) {
-    return ((USECS_PER_S * sampleOffset) / mRate / mChannels);
+  PRInt64 position = GetPositionInFrames();
+  if (position >= 0) {
+    return ((USECS_PER_S * position) / mRate);
   }
   return 0;
 }
 
 PRInt64
-nsAudioStreamRemote::GetSampleOffset()
+nsRemotedAudioStream::GetPositionInFrames()
 {
   if(!mAudioChild)
     return 0;
 
-  PRInt64 offset = mAudioChild->GetLastKnownSampleOffset();
-  if (offset == -1)
+  PRInt64 position = mAudioChild->GetLastKnownPosition();
+  if (position == -1)
     return 0;
 
-  PRInt64 time   = mAudioChild->GetLastKnownSampleOffsetTime();
-  PRInt64 result = offset + (mRate * mChannels * (PR_IntervalNow() - time) / USECS_PER_S);
+  PRInt64 time = mAudioChild->GetLastKnownPositionTimestamp();
+  PRInt64 result = position + (mRate * (PR_IntervalNow() - time) / USECS_PER_S);
 
   return result;
 }
 
 PRBool
-nsAudioStreamRemote::IsPaused()
+nsRemotedAudioStream::IsPaused()
 {
   return mPaused;
 }
--- a/content/media/nsAudioStream.h
+++ b/content/media/nsAudioStream.h
@@ -52,80 +52,78 @@ public:
     FORMAT_U8,
     FORMAT_S16_LE,
     FORMAT_FLOAT32
   };
 
   virtual ~nsAudioStream();
 
   // Initialize Audio Library. Some Audio backends require initializing the
-  // library before using it. 
+  // library before using it.
   static void InitLibrary();
 
   // Shutdown Audio Library. Some Audio backends require shutting down the
   // library after using it.
   static void ShutdownLibrary();
 
   // Thread that is shared between audio streams.
   // This may return null in the child process
   nsIThread *GetThread();
 
   // AllocateStream will return either a local stream or a remoted stream
   // depending on where you call it from.  If you call this from a child process,
   // you may receive an implementation which forwards to a compositing process.
   static nsAudioStream* AllocateStream();
 
-  // Initialize the audio stream. aNumChannels is the number of audio channels 
-  // (1 for mono, 2 for stereo, etc) and aRate is the frequency of the audio 
-  // samples (22050, 44100, etc).
+  // Initialize the audio stream. aNumChannels is the number of audio
+  // channels (1 for mono, 2 for stereo, etc) and aRate is the sample rate
+  // (22050Hz, 44100Hz, etc).
   // Unsafe to call with the decoder monitor held.
   virtual nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat) = 0;
 
   // Closes the stream. All future use of the stream is an error.
   // Unsafe to call with the decoder monitor held.
   virtual void Shutdown() = 0;
 
-  // Write audio data to the audio hardware.  aBuf is an array of samples in
-  // the format specified by mFormat of length aCount.  aCount should be
-  // evenly divisible by the number of channels in this audio stream.  If
-  // aCount is larger than the result of Available(), the write will block
-  // until sufficient buffer space is available.
-  virtual nsresult Write(const void* aBuf, PRUint32 aCount) = 0;
+  // Write audio data to the audio hardware.  aBuf is an array of frames in
+  // the format specified by mFormat of length aCount.  If aFrames is larger
+  // than the result of Available(), the write will block until sufficient
+  // buffer space is available.
+  virtual nsresult Write(const void* aBuf, PRUint32 aFrames) = 0;
 
-  // Return the number of audio samples that can be written to the audio device
-  // without blocking.
+  // Return the number of audio frames that can be written without blocking.
   virtual PRUint32 Available() = 0;
 
   // Set the current volume of the audio playback. This is a value from
   // 0 (meaning muted) to 1 (meaning full volume).
   virtual void SetVolume(double aVolume) = 0;
 
   // Block until buffered audio data has been consumed.
   // Unsafe to call with the decoder monitor held.
   virtual void Drain() = 0;
 
   // Pause audio playback
   virtual void Pause() = 0;
 
   // Resume audio playback
   virtual void Resume() = 0;
 
-  // Return the position in microseconds of the sample being played by the
-  // audio hardware.
+  // Return the position in microseconds of the audio frame being played by
+  // the audio hardware.
   virtual PRInt64 GetPosition() = 0;
 
-  // Return the position, measured in samples played since the start, by
-  // the audio hardware.
-  virtual PRInt64 GetSampleOffset() = 0;
+  // Return the position, measured in audio frames played since the stream
+  // was opened, of the audio hardware.
+  virtual PRInt64 GetPositionInFrames() = 0;
 
   // Returns PR_TRUE when the audio stream is paused.
   virtual PRBool IsPaused() = 0;
 
-  // Returns the minimum number of samples which must be written before
+  // Returns the minimum number of audio frames which must be written before
   // you can be sure that something will be played.
   // Unsafe to call with the decoder monitor held.
-  virtual PRInt32 GetMinWriteSamples() = 0;
+  virtual PRInt32 GetMinWriteSize() = 0;
 
 protected:
   nsCOMPtr<nsIThread> mAudioPlaybackThread;
 };
 
 #endif
--- a/content/media/nsBuiltinDecoderReader.cpp
+++ b/content/media/nsBuiltinDecoderReader.cpp
@@ -301,84 +301,84 @@ nsresult nsBuiltinDecoderReader::DecodeT
         return NS_ERROR_FAILURE;
       }
     }
     LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld", startTime));
   }
 
   if (HasAudio()) {
     // Decode audio forward to the seek target.
-    PRInt64 targetSample = 0;
-    if (!UsecsToSamples(aTarget, mInfo.mAudioRate, targetSample)) {
+    PRInt64 targetFrame = 0;
+    if (!UsecsToFrames(aTarget, mInfo.mAudioRate, targetFrame)) {
       return NS_ERROR_FAILURE;
     }
     PRBool eof = PR_FALSE;
     while (HasAudio() && !eof) {
       while (!eof && mAudioQueue.GetSize() == 0) {
         eof = !DecodeAudioData();
         {
           ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
           if (mDecoder->GetDecodeState() == nsBuiltinDecoderStateMachine::DECODER_STATE_SHUTDOWN) {
             return NS_ERROR_FAILURE;
           }
         }
       }
       const AudioData* audio = mAudioQueue.PeekFront();
       if (!audio)
         break;
-      PRInt64 startSample = 0;
-      if (!UsecsToSamples(audio->mTime, mInfo.mAudioRate, startSample)) {
+      PRInt64 startFrame = 0;
+      if (!UsecsToFrames(audio->mTime, mInfo.mAudioRate, startFrame)) {
         return NS_ERROR_FAILURE;
       }
-      if (startSample + audio->mSamples <= targetSample) {
-        // Our seek target lies after the samples in this AudioData. Pop it
+      if (startFrame + audio->mFrames <= targetFrame) {
+        // Our seek target lies after the frames in this AudioData. Pop it
         // off the queue, and keep decoding forwards.
         delete mAudioQueue.PopFront();
         audio = nsnull;
         continue;
       }
-      if (startSample > targetSample) {
+      if (startFrame > targetFrame) {
         // The seek target doesn't lie in the audio block just after the last
-        // audio samples we've seen which were before the seek target. This
+        // audio frames we've seen which were before the seek target. This
         // could have been the first audio data we've seen after seek, i.e. the
         // seek terminated after the seek target in the audio stream. Just
         // abort the audio decode-to-target, the state machine will play
         // silence to cover the gap. Typically this happens in poorly muxed
         // files.
         NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
         break;
       }
 
-      // The seek target lies somewhere in this AudioData's samples, strip off
-      // any samples which lie before the seek target, so we'll begin playback
+      // The seek target lies somewhere in this AudioData's frames, strip off
+      // any frames which lie before the seek target, so we'll begin playback
       // exactly at the seek target.
-      NS_ASSERTION(targetSample >= startSample, "Target must at or be after data start.");
-      NS_ASSERTION(targetSample < startSample + audio->mSamples, "Data must end after target.");
+      NS_ASSERTION(targetFrame >= startFrame, "Target must at or be after data start.");
+      NS_ASSERTION(targetFrame < startFrame + audio->mFrames, "Data must end after target.");
 
-      PRInt64 samplesToPrune = targetSample - startSample;
-      if (samplesToPrune > audio->mSamples) {
-        // We've messed up somehow. Don't try to trim samples, the |samples|
+      PRInt64 framesToPrune = targetFrame - startFrame;
+      if (framesToPrune > audio->mFrames) {
+        // We've messed up somehow. Don't try to trim frames, the |frames|
         // variable below will overflow.
-        NS_WARNING("Can't prune more samples that we have!");
+        NS_WARNING("Can't prune more frames that we have!");
         break;
       }
-      PRUint32 samples = audio->mSamples - static_cast<PRUint32>(samplesToPrune);
+      PRUint32 frames = audio->mFrames - static_cast<PRUint32>(framesToPrune);
       PRUint32 channels = audio->mChannels;
-      nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[samples * channels]);
+      nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
       memcpy(audioData.get(),
-             audio->mAudioData.get() + (samplesToPrune * channels),
-             samples * channels * sizeof(AudioDataValue));
+             audio->mAudioData.get() + (framesToPrune * channels),
+             frames * channels * sizeof(AudioDataValue));
       PRInt64 duration;
-      if (!SamplesToUsecs(samples, mInfo.mAudioRate, duration)) {
+      if (!FramesToUsecs(frames, mInfo.mAudioRate, duration)) {
         return NS_ERROR_FAILURE;
       }
       nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
                                               aTarget,
                                               duration,
-                                              samples,
+                                              frames,
                                               audioData.forget(),
                                               channels));
       delete mAudioQueue.PopFront();
       mAudioQueue.PushFront(data.forget());
       break;
     }
   }
   return NS_OK;
--- a/content/media/nsBuiltinDecoderReader.h
+++ b/content/media/nsBuiltinDecoderReader.h
@@ -45,17 +45,17 @@
 #include "nsClassHashtable.h"
 #include "mozilla/TimeStamp.h"
 #include "nsSize.h"
 #include "nsRect.h"
 #include "mozilla/ReentrantMonitor.h"
 
 class nsBuiltinDecoderStateMachine;
 
-// Stores info relevant to presenting media samples.
+// Stores info relevant to presenting media frames.
 class nsVideoInfo {
 public:
   nsVideoInfo()
     : mAudioRate(0),
       mAudioChannels(0),
       mDisplay(0,0),
       mStereoMode(mozilla::layers::STEREO_MODE_MONO),
       mHasAudio(PR_FALSE),
@@ -65,17 +65,17 @@ public:
   // Returns PR_TRUE if it's safe to use aPicture as the picture to be
   // extracted inside a frame of size aFrame, and scaled up to and displayed
   // at a size of aDisplay. You should validate the frame, picture, and
   // display regions before using them to display video frames.
   static PRBool ValidateVideoRegion(const nsIntSize& aFrame,
                                     const nsIntRect& aPicture,
                                     const nsIntSize& aDisplay);
 
-  // Samples per second.
+  // Sample rate.
   PRUint32 mAudioRate;
 
   // Number of audio channels.
   PRUint32 mAudioChannels;
 
   // Size in pixels at which the video is rendered. This is after it has
   // been scaled by its aspect ratio.
   nsIntSize mDisplay;
@@ -111,66 +111,47 @@ typedef float AudioDataValue;
 
 #define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_FLOAT32)
 #define MOZ_CONVERT_VORBIS_SAMPLE(x) (x)
 #define MOZ_CONVERT_AUDIO_SAMPLE(x) (x)
 #define MOZ_SAMPLE_TYPE_FLOAT32 1
 
 #endif
 
-// Holds chunk a decoded audio samples.
+// Holds chunk a decoded audio frames.
 class AudioData {
 public:
   AudioData(PRInt64 aOffset,
             PRInt64 aTime,
             PRInt64 aDuration,
-            PRUint32 aSamples,
+            PRUint32 aFrames,
             AudioDataValue* aData,
             PRUint32 aChannels)
   : mOffset(aOffset),
     mTime(aTime),
     mDuration(aDuration),
-    mSamples(aSamples),
-    mChannels(aChannels),
-    mAudioData(aData)
-  {
-    MOZ_COUNT_CTOR(AudioData);
-  }
-
-  AudioData(PRInt64 aOffset,
-            PRInt64 aDuration,
-            PRUint32 aSamples,
-            AudioDataValue* aData,
-            PRUint32 aChannels)
-  : mOffset(aOffset),
-    mTime(-1),
-    mDuration(aDuration),
-    mSamples(aSamples),
+    mFrames(aFrames),
     mChannels(aChannels),
     mAudioData(aData)
   {
     MOZ_COUNT_CTOR(AudioData);
   }
 
   ~AudioData()
   {
     MOZ_COUNT_DTOR(AudioData);
   }
 
-  PRUint32 AudioDataLength() {
-    return mChannels * mSamples;
-  }
-
-  // Approximate byte offset of the end of the page on which this sample
-  // chunk ends.
+  // Approximate byte offset of the end of the page on which this chunk
+  // ends.
   const PRInt64 mOffset;
 
-  PRInt64 mTime; // Start time of samples in usecs.
+  PRInt64 mTime; // Start time of data in usecs.
   const PRInt64 mDuration; // In usecs.
-  const PRUint32 mSamples;
+  const PRUint32 mFrames;
   const PRUint32 mChannels;
   nsAutoArrayPtr<AudioDataValue> mAudioData;
 };
 
 // Holds a decoded video frame, in YCbCr format. These are queued in the reader.
 class VideoData {
 public:
   typedef mozilla::layers::ImageContainer ImageContainer;
@@ -357,34 +338,34 @@ template <class T> class MediaQueue : pr
       T* x = PopFront();
       delete x;
     }
     mEndOfStream = PR_FALSE;
   }
 
   PRBool AtEndOfStream() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
-    return GetSize() == 0 && mEndOfStream;    
+    return GetSize() == 0 && mEndOfStream;
   }
 
-  // Returns PR_TRUE if the media queue has had it last sample added to it.
+  // Returns PR_TRUE if the media queue has had it last item added to it.
   // This happens when the media stream has been completely decoded. Note this
   // does not mean that the corresponding stream has finished playback.
   PRBool IsFinished() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
-    return mEndOfStream;    
+    return mEndOfStream;
   }
 
-  // Informs the media queue that it won't be receiving any more samples.
+  // Informs the media queue that it won't be receiving any more items.
   void Finish() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
-    mEndOfStream = PR_TRUE;    
+    mEndOfStream = PR_TRUE;
   }
 
-  // Returns the approximate number of microseconds of samples in the queue.
+  // Returns the approximate number of microseconds of items in the queue.
   PRInt64 Duration() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     if (GetSize() < 2) {
       return 0;
     }
     T* last = Peek();
     T* first = PeekFront();
     return last->mTime - first->mTime;
@@ -394,17 +375,17 @@ template <class T> class MediaQueue : pr
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     ForEach(aFunctor);
   }
 
 private:
   mutable ReentrantMonitor mReentrantMonitor;
 
   // PR_TRUE when we've decoded the last frame of data in the
-  // bitstream for which we're queueing sample-data.
+  // bitstream for which we're queueing frame data.
   PRBool mEndOfStream;
 };
 
 // Encapsulates the decoding and reading of media data. Reading can only be
 // done on the decode thread thread. Never hold the decoder monitor when
 // calling into this class. Unless otherwise specified, methods and fields of
 // this class can only be accessed on the decode thread.
 class nsBuiltinDecoderReader : public nsRunnable {
@@ -437,39 +418,39 @@ public:
   virtual PRBool HasAudio() = 0;
   virtual PRBool HasVideo() = 0;
 
   // Read header data for all bitstreams in the file. Fills mInfo with
   // the data required to present the media. Returns NS_OK on success,
   // or NS_ERROR_FAILURE on failure.
   virtual nsresult ReadMetadata(nsVideoInfo* aInfo) = 0;
 
-  // Stores the presentation time of the first frame/sample we'd be
-  // able to play if we started playback at the current position. Returns
-  // the first video sample, if we have video.
+  // Stores the presentation time of the first frame we'd be able to play if
+  // we started playback at the current position. Returns the first video
+  // frame, if we have video.
   VideoData* FindStartTime(PRInt64& aOutStartTime);
 
   // Moves the decode head to aTime microseconds. aStartTime and aEndTime
   // denote the start and end times of the media in usecs, and aCurrentTime
   // is the current playback position in microseconds.
   virtual nsresult Seek(PRInt64 aTime,
                         PRInt64 aStartTime,
                         PRInt64 aEndTime,
                         PRInt64 aCurrentTime) = 0;
 
-  // Queue of audio samples. This queue is threadsafe, and is accessed from
+  // Queue of audio frames. This queue is threadsafe, and is accessed from
   // the audio, decoder, state machine, and main threads.
   MediaQueue<AudioData> mAudioQueue;
 
-  // Queue of video samples. This queue is threadsafe, and is accessed from
+  // Queue of video frames. This queue is threadsafe, and is accessed from
   // the decoder, state machine, and main threads.
   MediaQueue<VideoData> mVideoQueue;
 
   // Populates aBuffered with the time ranges which are buffered. aStartTime
-  // must be the presentation time of the first sample/frame in the media, e.g.
+  // must be the presentation time of the first frame in the media, e.g.
   // the media time corresponding to playback time/position 0. This function
   // should only be called on the main thread.
   virtual nsresult GetBuffered(nsTimeRanges* aBuffered,
                                PRInt64 aStartTime) = 0;
 
   class VideoQueueMemoryFunctor : public nsDequeFunctor {
   public:
     VideoQueueMemoryFunctor() : mResult(0) {}
@@ -497,17 +478,17 @@ public:
   }
 
   class AudioQueueMemoryFunctor : public nsDequeFunctor {
   public:
     AudioQueueMemoryFunctor() : mResult(0) {}
 
     virtual void* operator()(void* anObject) {
       const AudioData* audioData = static_cast<const AudioData*>(anObject);
-      mResult += audioData->mSamples * audioData->mChannels * sizeof(AudioDataValue);
+      mResult += audioData->mFrames * audioData->mChannels * sizeof(AudioDataValue);
       return nsnull;
     }
 
     PRInt64 mResult;
   };
 
   PRInt64 AudioQueueMemoryInUse() {
     AudioQueueMemoryFunctor functor;
@@ -516,26 +497,26 @@ public:
   }
 
   // Only used by nsWebMReader for now, so stub here rather than in every
   // reader than inherits from nsBuiltinDecoderReader.
   virtual void NotifyDataArrived(const char* aBuffer, PRUint32 aLength, PRUint32 aOffset) {}
 
 protected:
 
-  // Pumps the decode until we reach frames/samples required to play at
-  // time aTarget (usecs).
+  // Pumps the decode until we reach frames required to play at time aTarget
+  // (usecs).
   nsresult DecodeToTarget(PRInt64 aTarget);
 
   // Reader decode function. Matches DecodeVideoFrame() and
   // DecodeAudioData().
   typedef PRBool (nsBuiltinDecoderReader::*DecodeFn)();
 
-  // Calls aDecodeFn on *this until aQueue has a sample, whereupon
-  // we return the first sample.
+  // Calls aDecodeFn on *this until aQueue has an item, whereupon
+  // we return the first item.
   template<class Data>
   Data* DecodeToFirstData(DecodeFn aDecodeFn,
                           MediaQueue<Data>& aQueue);
 
   // Wrapper so that DecodeVideoFrame(PRBool&,PRInt64) can be called from
   // DecodeToFirstData().
   PRBool DecodeVideoFrame() {
     PRBool f = PR_FALSE;
--- a/content/media/nsBuiltinDecoderStateMachine.cpp
+++ b/content/media/nsBuiltinDecoderStateMachine.cpp
@@ -70,17 +70,17 @@ static const PRUint32 LOW_AUDIO_USECS = 
 
 // If more than this many usecs of decoded audio is queued, we'll hold off
 // decoding more audio. If we increase the low audio threshold (see
 // LOW_AUDIO_USECS above) we'll also increase this value to ensure it's not
 // less than the low audio threshold.
 const PRInt64 AMPLE_AUDIO_USECS = 1000000;
 
 // Maximum number of bytes we'll allocate and write at once to the audio
-// hardware when the audio stream contains missing samples and we're
+// hardware when the audio stream contains missing frames and we're
 // writing silence in order to fill the gap. We limit our silence-writes
 // to 32KB in order to avoid allocating an impossibly large chunk of
 // memory if we encounter a large chunk of silence.
 const PRUint32 SILENCE_BYTES_CHUNK = 32 * 1024;
 
 // If we have fewer than LOW_VIDEO_FRAMES decoded frames, and
 // we're not "pumping video", we'll skip the video up to the next keyframe
 // which is at or after the current playback position.
@@ -317,17 +317,17 @@ void nsBuiltinDecoderStateMachine::Decod
 
 void nsBuiltinDecoderStateMachine::DecodeLoop()
 {
   LOG(PR_LOG_DEBUG, ("%p Start DecodeLoop()", mDecoder.get()));
 
   mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
 
-  // We want to "pump" the decode until we've got a few frames/samples decoded
+  // We want to "pump" the decode until we've got a few frames decoded
   // before we consider whether decode is falling behind.
   PRBool audioPump = PR_TRUE;
   PRBool videoPump = PR_TRUE;
 
   // If the video decode is falling behind the audio, we'll start dropping the
   // inter-frames up until the next keyframe which is at or before the current
   // playback position. skipToNextKeyframe is PR_TRUE if we're currently
   // skipping up to the next keyframe.
@@ -492,21 +492,21 @@ PRBool nsBuiltinDecoderStateMachine::IsP
 }
 
 void nsBuiltinDecoderStateMachine::AudioLoop()
 {
   NS_ASSERTION(OnAudioThread(), "Should be on audio thread.");
   LOG(PR_LOG_DEBUG, ("%p Begun audio thread/loop", mDecoder.get()));
   PRInt64 audioDuration = 0;
   PRInt64 audioStartTime = -1;
-  PRInt64 samplesWritten = 0;
+  PRInt64 framesWritten = 0;
   PRUint32 channels, rate;
   double volume = -1;
   PRBool setVolume;
-  PRInt32 minWriteSamples = -1;
+  PRInt32 minWriteFrames = -1;
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     mAudioCompleted = PR_FALSE;
     audioStartTime = mAudioStartTime;
     channels = mInfo.mAudioChannels;
     rate = mInfo.mAudioRate;
     NS_ASSERTION(audioStartTime != -1, "Should have audio start time by now");
   }
@@ -573,114 +573,114 @@ void nsBuiltinDecoderStateMachine::Audio
       if (IsPlaying() && mAudioStream->IsPaused()) {
         mAudioStream->Resume();
       }
     }
 
     if (setVolume) {
       mAudioStream->SetVolume(volume);
     }
-    if (minWriteSamples == -1) {
-      minWriteSamples = mAudioStream->GetMinWriteSamples();
+    if (minWriteFrames == -1) {
+      minWriteFrames = mAudioStream->GetMinWriteSize();
     }
     NS_ASSERTION(mReader->mAudioQueue.GetSize() > 0,
                  "Should have data to play");
-    // See if there's missing samples in the audio stream. If there is, push
-    // silence into the audio hardware, so we can play across the gap.
+    // See if there's a gap in the audio. If there is, push silence into the
+    // audio hardware, so we can play across the gap.
     const AudioData* s = mReader->mAudioQueue.PeekFront();
 
-    // Calculate the number of samples that have been pushed onto the audio
+    // Calculate the number of frames that have been pushed onto the audio
     // hardware.
-    PRInt64 playedSamples = 0;
-    if (!UsecsToSamples(audioStartTime, rate, playedSamples)) {
-      NS_WARNING("Int overflow converting playedSamples");
+    PRInt64 playedFrames = 0;
+    if (!UsecsToFrames(audioStartTime, rate, playedFrames)) {
+      NS_WARNING("Int overflow converting playedFrames");
       break;
     }
-    if (!AddOverflow(playedSamples, audioDuration, playedSamples)) {
-      NS_WARNING("Int overflow adding playedSamples");
+    if (!AddOverflow(playedFrames, audioDuration, playedFrames)) {
+      NS_WARNING("Int overflow adding playedFrames");
       break;
     }
 
     // Calculate the timestamp of the next chunk of audio in numbers of
     // samples.
     PRInt64 sampleTime = 0;
-    if (!UsecsToSamples(s->mTime, rate, sampleTime)) {
+    if (!UsecsToFrames(s->mTime, rate, sampleTime)) {
       NS_WARNING("Int overflow converting sampleTime");
       break;
     }
-    PRInt64 missingSamples = 0;
-    if (!AddOverflow(sampleTime, -playedSamples, missingSamples)) {
-      NS_WARNING("Int overflow adding missingSamples");
+    PRInt64 missingFrames = 0;
+    if (!AddOverflow(sampleTime, -playedFrames, missingFrames)) {
+      NS_WARNING("Int overflow adding missingFrames");
       break;
     }
 
-    if (missingSamples > 0) {
+    if (missingFrames > 0) {
       // The next audio chunk begins some time after the end of the last chunk
       // we pushed to the audio hardware. We must push silence into the audio
       // hardware so that the next audio chunk begins playback at the correct
       // time.
-      missingSamples = NS_MIN(static_cast<PRInt64>(PR_UINT32_MAX), missingSamples);
-      samplesWritten = PlaySilence(static_cast<PRUint32>(missingSamples),
-                                   channels, playedSamples);
+      missingFrames = NS_MIN(static_cast<PRInt64>(PR_UINT32_MAX), missingFrames);
+      framesWritten = PlaySilence(static_cast<PRUint32>(missingFrames),
+                                  channels, playedFrames);
     } else {
-      samplesWritten = PlayFromAudioQueue(sampleTime, channels);
+      framesWritten = PlayFromAudioQueue(sampleTime, channels);
     }
-    audioDuration += samplesWritten;
+    audioDuration += framesWritten;
     {
       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
       PRInt64 playedUsecs;
-      if (!SamplesToUsecs(audioDuration, rate, playedUsecs)) {
+      if (!FramesToUsecs(audioDuration, rate, playedUsecs)) {
         NS_WARNING("Int overflow calculating playedUsecs");
         break;
       }
       if (!AddOverflow(audioStartTime, playedUsecs, mAudioEndTime)) {
         NS_WARNING("Int overflow calculating audio end time");
         break;
       }
 
       PRInt64 audioAhead = mAudioEndTime - GetMediaTime();
       if (audioAhead > AMPLE_AUDIO_USECS &&
-          samplesWritten > minWriteSamples)
+          framesWritten > minWriteFrames)
       {
         // We've pushed enough audio onto the hardware that we've queued up a
         // significant amount ahead of the playback position. The decode
-        // thread will be going to sleep, so we won't get any new samples
+        // thread will be going to sleep, so we won't get any new audio
         // anyway, so sleep until we need to push to the hardware again.
         Wait(AMPLE_AUDIO_USECS / 2);
         // Kick the decode thread; since above we only do a NotifyAll when
         // we pop an audio chunk of the queue, the decoder won't wake up if
         // we've got no more decoded chunks to push to the hardware. We can
-        // hit this condition if the last sample in the stream doesn't have
+        // hit this condition if the last frame in the stream doesn't have
         // it's EOS flag set, and the decode thread sleeps just after decoding
         // that packet, but before realising there's no more packets.
         mon.NotifyAll();
       }
     }
   }
   if (mReader->mAudioQueue.AtEndOfStream() &&
       mState != DECODER_STATE_SHUTDOWN &&
       !mStopAudioThread)
   {
-    // Last sample pushed to audio hardware, wait for the audio to finish,
+    // Last frame pushed to audio hardware, wait for the audio to finish,
     // before the audio thread terminates.
     PRBool seeking = PR_FALSE;
     {
       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-      if (samplesWritten < minWriteSamples) {
-        // We've not written minWriteSamples in the last write, the audio
+      if (framesWritten < minWriteFrames) {
+        // We've not written minWriteFrames in the last write, the audio
         // may not start playing. Write silence to ensure we've got enough
-        // samples written to start playback.
-        PRInt64 samples = minWriteSamples - samplesWritten;
-        if (samples < PR_UINT32_MAX / channels) {
+        // written to start playback.
+        PRInt64 minToWrite = minWriteFrames - framesWritten;
+        if (minToWrite < PR_UINT32_MAX / channels) {
           // Write silence manually rather than using PlaySilence(), so that
-          // the AudioAPI doesn't get a copy of the samples.
-          PRUint32 numValues = samples * channels;
-          nsAutoArrayPtr<AudioDataValue> buf(new AudioDataValue[numValues]);
-          memset(buf.get(), 0, sizeof(AudioDataValue) * numValues);
-          mAudioStream->Write(buf, numValues);
+          // the AudioAPI doesn't get a copy of the audio frames.
+          PRUint32 numSamples = minToWrite * channels;
+          nsAutoArrayPtr<AudioDataValue> buf(new AudioDataValue[numSamples]);
+          memset(buf.get(), 0, numSamples * sizeof(AudioDataValue));
+          mAudioStream->Write(buf, minToWrite);
         }
       }
 
       PRInt64 oldPosition = -1;
       PRInt64 position = GetMediaTime();
       while (oldPosition != position &&
              mAudioEndTime - position > 0 &&
              mState != DECODER_STATE_SEEKING &&
@@ -691,17 +691,17 @@ void nsBuiltinDecoderStateMachine::Audio
         oldPosition = position;
         position = GetMediaTime();
       }
       seeking = mState == DECODER_STATE_SEEKING;
     }
 
     if (!seeking && !mAudioStream->IsPaused()) {
       mAudioStream->Drain();
-      // Fire one last event for any extra samples that didn't fill a framebuffer.
+      // Fire one last event for any extra frames that didn't fill a framebuffer.
       mEventManager.Drain(mAudioEndTime);
     }
   }
   LOG(PR_LOG_DEBUG, ("%p Reached audio stream end.", mDecoder.get()));
   {
     // Must hold lock while anulling the audio stream to prevent
     // state machine thread trying to use it while we're destroying it.
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
@@ -716,76 +716,76 @@ void nsBuiltinDecoderStateMachine::Audio
   // Must not hold the decoder monitor while we shutdown the audio stream, as
   // it makes a synchronous dispatch on Android.
   audioStream->Shutdown();
   audioStream = nsnull;
 
   LOG(PR_LOG_DEBUG, ("%p Audio stream finished playing, audio thread exit", mDecoder.get()));
 }
 
-PRUint32 nsBuiltinDecoderStateMachine::PlaySilence(PRUint32 aSamples,
+PRUint32 nsBuiltinDecoderStateMachine::PlaySilence(PRUint32 aFrames,
                                                    PRUint32 aChannels,
-                                                   PRUint64 aSampleOffset)
+                                                   PRUint64 aFrameOffset)
 
 {
   NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
   NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
-  PRUint32 maxSamples = SILENCE_BYTES_CHUNK / aChannels;
-  PRUint32 samples = NS_MIN(aSamples, maxSamples);
-  PRUint32 numValues = samples * aChannels;
-  nsAutoArrayPtr<AudioDataValue> buf(new AudioDataValue[numValues]);
-  memset(buf.get(), 0, sizeof(AudioDataValue) * numValues);
-  mAudioStream->Write(buf, numValues);
+  PRUint32 maxFrames = SILENCE_BYTES_CHUNK / aChannels / sizeof(AudioDataValue);
+  PRUint32 frames = NS_MIN(aFrames, maxFrames);
+  PRUint32 numSamples = frames * aChannels;
+  nsAutoArrayPtr<AudioDataValue> buf(new AudioDataValue[numSamples]);
+  memset(buf.get(), 0, numSamples * sizeof(AudioDataValue));
+  mAudioStream->Write(buf, frames);
   // Dispatch events to the DOM for the audio just written.
-  mEventManager.QueueWrittenAudioData(buf.get(), numValues,
-                                      (aSampleOffset + samples) * aChannels);
-  return samples;
+  mEventManager.QueueWrittenAudioData(buf.get(), frames * aChannels,
+                                      (aFrameOffset + frames) * aChannels);
+  return frames;
 }
 
-PRUint32 nsBuiltinDecoderStateMachine::PlayFromAudioQueue(PRUint64 aSampleOffset,
+PRUint32 nsBuiltinDecoderStateMachine::PlayFromAudioQueue(PRUint64 aFrameOffset,
                                                           PRUint32 aChannels)
 {
   NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
   NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
-  nsAutoPtr<AudioData> audioData(mReader->mAudioQueue.PopFront());
+  nsAutoPtr<AudioData> audio(mReader->mAudioQueue.PopFront());
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     NS_WARN_IF_FALSE(IsPlaying(), "Should be playing");
     // Awaken the decode loop if it's waiting for space to free up in the
     // audio queue.
     mDecoder->GetReentrantMonitor().NotifyAll();
   }
   PRInt64 offset = -1;
-  PRUint32 samples = 0;
+  PRUint32 frames = 0;
   // The state machine could have paused since we've released the decoder
   // monitor and acquired the audio monitor. Rather than acquire both
   // monitors, the audio stream also maintains whether its paused or not.
   // This prevents us from doing a blocking write while holding the audio
   // monitor while paused; we would block, and the state machine won't be
   // able to acquire the audio monitor in order to resume or destroy the
   // audio stream.
   if (!mAudioStream->IsPaused()) {
-    mAudioStream->Write(audioData->mAudioData,
-                        audioData->AudioDataLength());
+    mAudioStream->Write(audio->mAudioData,
+                        audio->mFrames);
 
-    offset = audioData->mOffset;
-    samples = audioData->mSamples;
+    offset = audio->mOffset;
+    frames = audio->mFrames;
 
     // Dispatch events to the DOM for the audio just written.
-    mEventManager.QueueWrittenAudioData(audioData->mAudioData.get(),
-                                        audioData->AudioDataLength(),
-                                        (aSampleOffset + samples) * aChannels);
+    mEventManager.QueueWrittenAudioData(audio->mAudioData.get(),
+                                        audio->mFrames * aChannels,
+                                        (aFrameOffset + frames) * aChannels);
   } else {
-    mReader->mAudioQueue.PushFront(audioData);
-    audioData.forget();
+    mReader->mAudioQueue.PushFront(audio);
+    audio.forget();
   }
   if (offset != -1) {
     mDecoder->UpdatePlaybackOffset(offset);
   }
-  return samples;
+  return frames;
 }
 
 nsresult nsBuiltinDecoderStateMachine::Init(nsDecoderStateMachine* aCloneDonor)
 {
   nsBuiltinDecoderReader* cloneReader = nsnull;
   if (aCloneDonor) {
     cloneReader = static_cast<nsBuiltinDecoderStateMachine*>(aCloneDonor)->mReader;
   }
@@ -1666,17 +1666,17 @@ void nsBuiltinDecoderStateMachine::Advan
     PRInt64 audio_time = GetAudioClock();
     if (HasAudio() && !mAudioCompleted && audio_time != -1) {
       clock_time = audio_time;
       // Resync against the audio clock, while we're trusting the
       // audio clock. This ensures no "drift", particularly on Linux.
       mPlayDuration = clock_time - mStartTime;
       mPlayStartTime = TimeStamp::Now();
     } else {
-      // Audio hardware is disabled on this system. Sync to the system clock.
+      // Audio is disabled on this system. Sync to the system clock.
       clock_time = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration;
       // Ensure the clock can never go backwards.
       NS_ASSERTION(mCurrentFrameTime <= clock_time, "Clock should go forwards");
       clock_time = NS_MAX(mCurrentFrameTime, clock_time) + mStartTime;
     }
   }
 
   // Skip frames up to the frame at the playback position, and figure out
@@ -1748,30 +1748,30 @@ void nsBuiltinDecoderStateMachine::Advan
     remainingTime = currentFrame->mEndTime - mStartTime - now;
     currentFrame = nsnull;
   }
 
   // Cap the current time to the larger of the audio and video end time.
   // This ensures that if we're running off the system clock, we don't
   // advance the clock to after the media end time.
   if (mVideoFrameEndTime != -1 || mAudioEndTime != -1) {
-    // These will be non -1 if we've displayed a video frame, or played an audio sample.
+    // These will be non -1 if we've displayed a video frame, or played an audio frame.
     clock_time = NS_MIN(clock_time, NS_MAX(mVideoFrameEndTime, mAudioEndTime));
     if (clock_time > GetMediaTime()) {
       // Only update the playback position if the clock time is greater
       // than the previous playback position. The audio clock can
       // sometimes report a time less than its previously reported in
       // some situations, and we need to gracefully handle that.
       UpdatePlaybackPosition(clock_time);
     }
   }
 
-  // If the number of audio/video samples queued has changed, either by
-  // this function popping and playing a video sample, or by the audio
-  // thread popping and playing an audio sample, we may need to update our
+  // If the number of audio/video frames queued has changed, either by
+  // this function popping and playing a video frame, or by the audio
+  // thread popping and playing an audio frame, we may need to update our
   // ready state. Post an update to do so.
   UpdateReadyState();
 
   ScheduleStateMachine(remainingTime);
 }
 
 void nsBuiltinDecoderStateMachine::Wait(PRInt64 aUsecs) {
   NS_ASSERTION(OnAudioThread(), "Only call on the audio thread");
@@ -1810,17 +1810,17 @@ VideoData* nsBuiltinDecoderStateMachine:
                    "We should have mEndTime as supplied duration here");
       // We were specified a duration from a Content-Duration HTTP header.
       // Adjust mEndTime so that mEndTime-mStartTime matches the specified
       // duration.
       mEndTime = mStartTime + mEndTime;
     }
   }
   // Set the audio start time to be start of media. If this lies before the
-  // first acutal audio sample we have, we'll inject silence during playback
+  // first actual audio frame we have, we'll inject silence during playback
   // to ensure the audio starts at the correct time.
   mAudioStartTime = mStartTime;
   LOG(PR_LOG_DEBUG, ("%p Media start time is %lld", mDecoder.get(), mStartTime));
   return v;
 }
 
 void nsBuiltinDecoderStateMachine::UpdateReadyState() {
   mDecoder->GetReentrantMonitor().AssertCurrentThreadIn();
--- a/content/media/nsBuiltinDecoderStateMachine.h
+++ b/content/media/nsBuiltinDecoderStateMachine.h
@@ -309,54 +309,54 @@ protected:
 
   // Resets playback timing data. Called when we seek, on the decode thread.
   void ResetPlayback();
 
   // Returns the audio clock, if we have audio, or -1 if we don't.
   // Called on the state machine thread.
   PRInt64 GetAudioClock();
 
-  // Returns the presentation time of the first sample or frame in the media.
-  // If the media has video, it returns the first video frame. The decoder
-  // monitor must be held with exactly one lock count. Called on the state
-  // machine thread.
+  // Returns the presentation time of the first audio or video frame in the
+  // media.  If the media has video, it returns the first video frame. The
+  // decoder monitor must be held with exactly one lock count. Called on the
+  // state machine thread.
   VideoData* FindStartTime();
 
   // Update only the state machine's current playback position (and duration,
   // if unknown).  Does not update the playback position on the decoder or
   // media element -- use UpdatePlaybackPosition for that.  Called on the state
   // machine thread, caller must hold the decoder lock.
   void UpdatePlaybackPositionInternal(PRInt64 aTime);
 
   // Pushes the image down the rendering pipeline. Called on the shared state
   // machine thread. The decoder monitor must *not* be held when calling this.
   void RenderVideoFrame(VideoData* aData, TimeStamp aTarget);
  
   // If we have video, display a video frame if it's time for display has
-  // arrived, otherwise sleep until it's time for the next sample. Update
-  // the current frame time as appropriate, and trigger ready state update.
-  // The decoder monitor must be held with exactly one lock count. Called
-  // on the state machine thread.
+  // arrived, otherwise sleep until it's time for the next frame. Update the
+  // current frame time as appropriate, and trigger ready state update.  The
+  // decoder monitor must be held with exactly one lock count. Called on the
+  // state machine thread.
   void AdvanceFrame();
 
-  // Pushes up to aSamples samples of silence onto the audio hardware. Returns
-  // the number of samples acutally pushed to the hardware. This pushes up to
-  // 32KB worth of samples to the hardware before returning, so must be called
-  // in a loop to ensure that the desired number of samples are pushed to the
-  // hardware. This ensures that the playback position advances smoothly, and
-  // guarantees that we don't try to allocate an impossibly large chunk of
-  // memory in order to play back silence. Called on the audio thread.
-  PRUint32 PlaySilence(PRUint32 aSamples,
+  // Write aFrames of audio frames of silence to the audio hardware. Returns
+  // the number of frames actually written. The write size is capped at
+  // SILENCE_BYTES_CHUNK (32kB), so must be called in a loop to write the
+  // desired number of frames. This ensures that the playback position
+  // advances smoothly, and guarantees that we don't try to allocate an
+  // impossibly large chunk of memory in order to play back silence. Called
+  // on the audio thread.
+  PRUint32 PlaySilence(PRUint32 aFrames,
                        PRUint32 aChannels,
-                       PRUint64 aSampleOffset);
+                       PRUint64 aFrameOffset);
 
   // Pops an audio chunk from the front of the audio queue, and pushes its
-  // audio data to the audio hardware. MozAudioAvailable sample data is also
-  // queued here. Called on the audio thread.
-  PRUint32 PlayFromAudioQueue(PRUint64 aSampleOffset, PRUint32 aChannels);
+  // audio data to the audio hardware. MozAudioAvailable data is also queued
+  // here. Called on the audio thread.
+  PRUint32 PlayFromAudioQueue(PRUint64 aFrameOffset, PRUint32 aChannels);
 
   // Stops the decode thread. The decoder monitor must be held with exactly
   // one lock count. Called on the state machine thread.
   void StopDecodeThread();
 
   // Stops the audio thread. The decoder monitor must be held with exactly
   // one lock count. Called on the state machine thread.
   void StopAudioThread();
@@ -497,23 +497,23 @@ protected:
   PRInt64 mPlayDuration;
 
   // Time that buffering started. Used for buffering timeout and only
   // accessed on the state machine thread. This is null while we're not
   // buffering.
   TimeStamp mBufferingStart;
 
   // Start time of the media, in microseconds. This is the presentation
-  // time of the first sample decoded from the media, and is used to calculate
+  // time of the first frame decoded from the media, and is used to calculate
   // duration and as a bounds for seeking. Accessed on state machine, decode,
   // and main threads. Access controlled by decoder monitor.
   PRInt64 mStartTime;
 
-  // Time of the last page in the media, in microseconds. This is the
-  // end time of the last sample in the media. Accessed on state
+  // Time of the last frame in the media, in microseconds. This is the
+  // end time of the last frame in the media. Accessed on state
   // machine, decode, and main threads. Access controlled by decoder monitor.
   PRInt64 mEndTime;
 
   // Position to seek to in microseconds when the seek state transition occurs.
   // The decoder monitor lock must be obtained before reading or writing
   // this value. Accessed on main and decode thread.
   PRInt64 mSeekTime;
 
@@ -532,25 +532,25 @@ protected:
   nsAutoPtr<nsBuiltinDecoderReader> mReader;
 
   // The time of the current frame in microseconds. This is referenced from
   // 0 which is the initial playback position. Set by the state machine
   // thread, and read-only from the main thread to get the current
   // time value. Synchronised via decoder monitor.
   PRInt64 mCurrentFrameTime;
 
-  // The presentation time of the first audio sample that was played in
+  // The presentation time of the first audio frame that was played in
   // microseconds. We can add this to the audio stream position to determine
   // the current audio time. Accessed on audio and state machine thread.
   // Synchronized by decoder monitor.
   PRInt64 mAudioStartTime;
 
-  // The end time of the last audio sample that's been pushed onto the audio
+  // The end time of the last audio frame that's been pushed onto the audio
   // hardware in microseconds. This will approximately be the end time of the
-  // audio stream, unless another sample is pushed to the hardware.
+  // audio stream, unless another frame is pushed to the hardware.
   PRInt64 mAudioEndTime;
 
   // The presentation end time of the last video frame which has been displayed
   // in microseconds. Accessed from the state machine thread.
   PRInt64 mVideoFrameEndTime;
   
   // Volume of playback. 0.0 = muted. 1.0 = full volume. Read/Written
   // from the state machine and main threads. Synchronised via decoder
@@ -567,17 +567,17 @@ protected:
   // PR_TRUE if an event to notify about a change in the playback
   // position has been queued, but not yet run. It is set to PR_FALSE when
   // the event is run. This allows coalescing of these events as they can be
   // produced many times per second. Synchronised via decoder monitor.
   // Accessed on main and state machine threads.
   PRPackedBool mPositionChangeQueued;
 
   // PR_TRUE if the audio playback thread has finished. It is finished
-  // when either all the audio samples in the Vorbis bitstream have completed
+  // when either all the audio frames in the Vorbis bitstream have completed
   // playing, or we've moved into shutdown state, and the threads are to be
   // destroyed. Written by the audio playback thread and read and written by
   // the state machine thread. Synchronised via decoder monitor.
   PRPackedBool mAudioCompleted;
 
   // PR_TRUE if mDuration has a value obtained from an HTTP header, or from
   // the media index/metadata. Accessed on the state machine thread.
   PRPackedBool mGotDurationFromMetaData;
--- a/content/media/ogg/nsOggReader.cpp
+++ b/content/media/ogg/nsOggReader.cpp
@@ -350,39 +350,39 @@ nsresult nsOggReader::DecodeVorbis(ogg_p
   }
   if (vorbis_synthesis_blockin(&mVorbisState->mDsp,
                                &mVorbisState->mBlock) != 0)
   {
     return NS_ERROR_FAILURE;
   }
 
   VorbisPCMValue** pcm = 0;
-  PRInt32 samples = 0;
+  PRInt32 frames = 0;
   PRUint32 channels = mVorbisState->mInfo.channels;
-  ogg_int64_t endSample = aPacket->granulepos;
-  while ((samples = vorbis_synthesis_pcmout(&mVorbisState->mDsp, &pcm)) > 0) {
-    mVorbisState->ValidateVorbisPacketSamples(aPacket, samples);
-    nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[samples * channels]);
+  ogg_int64_t endFrame = aPacket->granulepos;
+  while ((frames = vorbis_synthesis_pcmout(&mVorbisState->mDsp, &pcm)) > 0) {
+    mVorbisState->ValidateVorbisPacketSamples(aPacket, frames);
+    nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames * channels]);
     for (PRUint32 j = 0; j < channels; ++j) {
       VorbisPCMValue* channel = pcm[j];
-      for (PRUint32 i = 0; i < PRUint32(samples); ++i) {
+      for (PRUint32 i = 0; i < PRUint32(frames); ++i) {
         buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
       }
     }
 
-    PRInt64 duration = mVorbisState->Time((PRInt64)samples);
-    PRInt64 startTime = mVorbisState->Time(endSample - samples);
+    PRInt64 duration = mVorbisState->Time((PRInt64)frames);
+    PRInt64 startTime = mVorbisState->Time(endFrame - frames);
     mAudioQueue.Push(new AudioData(mPageOffset,
                                    startTime,
                                    duration,
-                                   samples,
+                                   frames,
                                    buffer.forget(),
                                    channels));
-    endSample -= samples;
-    if (vorbis_synthesis_read(&mVorbisState->mDsp, samples) != 0) {
+    endFrame -= frames;
+    if (vorbis_synthesis_read(&mVorbisState->mDsp, frames) != 0) {
       return NS_ERROR_FAILURE;
     }
   }
   return NS_OK;
 }
 
 PRBool nsOggReader::DecodeAudioData()
 {
--- a/content/media/wave/nsWaveReader.cpp
+++ b/content/media/wave/nsWaveReader.cpp
@@ -180,34 +180,34 @@ PRBool nsWaveReader::DecodeAudioData()
 
   PRInt64 pos = GetPosition() - mWavePCMOffset;
   PRInt64 len = GetDataLength();
   PRInt64 remaining = len - pos;
   NS_ASSERTION(remaining >= 0, "Current wave position is greater than wave file length");
 
   static const PRInt64 BLOCK_SIZE = 4096;
   PRInt64 readSize = NS_MIN(BLOCK_SIZE, remaining);
-  PRInt64 samples = readSize / mSampleSize;
+  PRInt64 frames = readSize / mFrameSize;
 
   PR_STATIC_ASSERT(PRUint64(BLOCK_SIZE) < UINT_MAX / sizeof(AudioDataValue) / MAX_CHANNELS);
-  const size_t bufferSize = static_cast<size_t>(samples * mChannels);
+  const size_t bufferSize = static_cast<size_t>(frames * mChannels);
   nsAutoArrayPtr<AudioDataValue> sampleBuffer(new AudioDataValue[bufferSize]);
 
   PR_STATIC_ASSERT(PRUint64(BLOCK_SIZE) < UINT_MAX / sizeof(char));
   nsAutoArrayPtr<char> dataBuffer(new char[static_cast<size_t>(readSize)]);
 
   if (!ReadAll(dataBuffer, readSize)) {
     mAudioQueue.Finish();
     return PR_FALSE;
   }
 
   // convert data to samples
   const char* d = dataBuffer.get();
   AudioDataValue* s = sampleBuffer.get();
-  for (int i = 0; i < samples; ++i) {
+  for (int i = 0; i < frames; ++i) {
     for (unsigned int j = 0; j < mChannels; ++j) {
       if (mSampleFormat == nsAudioStream::FORMAT_U8) {
         PRUint8 v =  ReadUint8(&d);
 #if defined(MOZ_SAMPLE_TYPE_S16LE)
         *s++ = (v * (1.F/PR_UINT8_MAX)) * PR_UINT16_MAX + PR_INT16_MIN;
 #elif defined(MOZ_SAMPLE_TYPE_FLOAT32)
         *s++ = (v * (1.F/PR_UINT8_MAX)) * 2.F - 1.F;
 #endif
@@ -222,22 +222,22 @@ PRBool nsWaveReader::DecodeAudioData()
       }
     }
   }
 
   double posTime = BytesToTime(pos);
   double readSizeTime = BytesToTime(readSize);
   NS_ASSERTION(posTime <= PR_INT64_MAX / USECS_PER_S, "posTime overflow");
   NS_ASSERTION(readSizeTime <= PR_INT64_MAX / USECS_PER_S, "readSizeTime overflow");
-  NS_ASSERTION(samples < PR_INT32_MAX, "samples overflow");
+  NS_ASSERTION(frames < PR_INT32_MAX, "frames overflow");
 
   mAudioQueue.Push(new AudioData(pos,
                                  static_cast<PRInt64>(posTime * USECS_PER_S),
                                  static_cast<PRInt64>(readSizeTime * USECS_PER_S),
-                                 static_cast<PRInt32>(samples),
+                                 static_cast<PRInt32>(frames),
                                  sampleBuffer.forget(),
                                  mChannels));
 
   return PR_TRUE;
 }
 
 PRBool nsWaveReader::DecodeVideoFrame(PRBool &aKeyframeSkip,
                                       PRInt64 aTimeThreshold)
@@ -253,17 +253,17 @@ nsresult nsWaveReader::Seek(PRInt64 aTar
   LOG(PR_LOG_DEBUG, ("%p About to seek to %lld", mDecoder, aTarget));
   if (NS_FAILED(ResetDecode())) {
     return NS_ERROR_FAILURE;
   }
   double d = BytesToTime(GetDataLength());
   NS_ASSERTION(d < PR_INT64_MAX / USECS_PER_S, "Duration overflow"); 
   PRInt64 duration = static_cast<PRInt64>(d * USECS_PER_S);
   double seekTime = NS_MIN(aTarget, duration) / static_cast<double>(USECS_PER_S);
-  PRInt64 position = RoundDownToSample(static_cast<PRInt64>(TimeToBytes(seekTime)));
+  PRInt64 position = RoundDownToFrame(static_cast<PRInt64>(TimeToBytes(seekTime)));
   NS_ASSERTION(PR_INT64_MAX - mWavePCMOffset > position, "Integer overflow during wave seek");
   position += mWavePCMOffset;
   return mDecoder->GetCurrentStream()->Seek(nsISeekableStream::NS_SEEK_SET, position);
 }
 
 static double RoundToUsecs(double aSeconds) {
   return floor(aSeconds * USECS_PER_S) / USECS_PER_S;
 }
@@ -380,17 +380,17 @@ nsWaveReader::ScanForwardUntil(PRUint32 
       chunkSize -= size;
     }
   }
 }
 
 PRBool
 nsWaveReader::LoadFormatChunk()
 {
-  PRUint32 fmtSize, rate, channels, sampleSize, sampleFormat;
+  PRUint32 fmtSize, rate, channels, frameSize, sampleFormat;
   char waveFormat[WAVE_FORMAT_CHUNK_SIZE];
   const char* p = waveFormat;
 
   // RIFF chunks are always word (two byte) aligned.
   NS_ABORT_IF_FALSE(mDecoder->GetCurrentStream()->Tell() % 2 == 0,
                     "LoadFormatChunk called with unaligned stream");
 
   // The "format" chunk may not directly follow the "riff" chunk, so skip
@@ -415,17 +415,17 @@ nsWaveReader::LoadFormatChunk()
   }
 
   channels = ReadUint16LE(&p);
   rate = ReadUint32LE(&p);
 
   // Skip over average bytes per second field.
   p += 4;
 
-  sampleSize = ReadUint16LE(&p);
+  frameSize = ReadUint16LE(&p);
 
   sampleFormat = ReadUint16LE(&p);
 
   // PCM encoded WAVEs are not expected to have an extended "format" chunk,
   // but I have found WAVEs that have a extended "format" chunk with an
   // extension size of 0 bytes.  Be polite and handle this rather than
   // considering the file invalid.  This code skips any extension of the
   // "format" chunk.
@@ -458,26 +458,26 @@ nsWaveReader::LoadFormatChunk()
   NS_ABORT_IF_FALSE(mDecoder->GetCurrentStream()->Tell() % 2 == 0,
                     "LoadFormatChunk left stream unaligned");
 
   // Make sure metadata is fairly sane.  The rate check is fairly arbitrary,
   // but the channels check is intentionally limited to mono or stereo
   // because that's what the audio backend currently supports.
   if (rate < 100 || rate > 96000 ||
       channels < 1 || channels > MAX_CHANNELS ||
-      (sampleSize != 1 && sampleSize != 2 && sampleSize != 4) ||
+      (frameSize != 1 && frameSize != 2 && frameSize != 4) ||
       (sampleFormat != 8 && sampleFormat != 16)) {
     NS_WARNING("Invalid WAVE metadata");
     return PR_FALSE;
   }
 
   ReentrantMonitorAutoEnter monitor(mDecoder->GetReentrantMonitor());
   mSampleRate = rate;
   mChannels = channels;
-  mSampleSize = sampleSize;
+  mFrameSize = frameSize;
   if (sampleFormat == 8) {
     mSampleFormat = nsAudioStream::FORMAT_U8;
   } else {
     mSampleFormat = nsAudioStream::FORMAT_S16_LE;
   }
   return PR_TRUE;
 }
 
@@ -506,31 +506,31 @@ nsWaveReader::FindDataOffset()
   mWavePCMOffset = PRUint32(offset);
   return PR_TRUE;
 }
 
 double
 nsWaveReader::BytesToTime(PRInt64 aBytes) const
 {
   NS_ABORT_IF_FALSE(aBytes >= 0, "Must be >= 0");
-  return float(aBytes) / mSampleRate / mSampleSize;
+  return float(aBytes) / mSampleRate / mFrameSize;
 }
 
 PRInt64
 nsWaveReader::TimeToBytes(double aTime) const
 {
   NS_ABORT_IF_FALSE(aTime >= 0.0f, "Must be >= 0");
-  return RoundDownToSample(PRInt64(aTime * mSampleRate * mSampleSize));
+  return RoundDownToFrame(PRInt64(aTime * mSampleRate * mFrameSize));
 }
 
 PRInt64
-nsWaveReader::RoundDownToSample(PRInt64 aBytes) const
+nsWaveReader::RoundDownToFrame(PRInt64 aBytes) const
 {
   NS_ABORT_IF_FALSE(aBytes >= 0, "Must be >= 0");
-  return aBytes - (aBytes % mSampleSize);
+  return aBytes - (aBytes % mFrameSize);
 }
 
 PRInt64
 nsWaveReader::GetDataLength()
 {
   PRInt64 length = mWaveLength;
   // If the decoder has a valid content length, and it's shorter than the
   // expected length of the PCM data, calculate the playback duration from
--- a/content/media/wave/nsWaveReader.h
+++ b/content/media/wave/nsWaveReader.h
@@ -79,36 +79,36 @@ private:
   // stereo 44.1kHz. The time is rounded to the nearest microsecond.
   double BytesToTime(PRInt64 aBytes) const;
 
   // Returns the number of bytes that aTime represents based on the current
   // audio parameters.  e.g.  1 second is 176400 bytes at 16-bit stereo
   // 44.1kHz.
   PRInt64 TimeToBytes(double aTime) const;
 
-  // Rounds aBytes down to the nearest complete sample.  Assumes beginning
-  // of byte range is already sample aligned by caller.
-  PRInt64 RoundDownToSample(PRInt64 aBytes) const;
+  // Rounds aBytes down to the nearest complete audio frame.  Assumes
+  // beginning of byte range is already frame aligned by caller.
+  PRInt64 RoundDownToFrame(PRInt64 aBytes) const;
   PRInt64 GetDataLength();
   PRInt64 GetPosition();
 
   /*
     Metadata extracted from the WAVE header.  Used to initialize the audio
     stream, and for byte<->time domain conversions.
   */
 
   // Number of samples per second.  Limited to range [100, 96000] in LoadFormatChunk.
   PRUint32 mSampleRate;
 
   // Number of channels.  Limited to range [1, 2] in LoadFormatChunk.
   PRUint32 mChannels;
 
-  // Size of a single sample segment, which includes a sample for each
-  // channel (interleaved).
-  PRUint32 mSampleSize;
+  // Size of a single audio frame, which includes a sample for each channel
+  // (interleaved).
+  PRUint32 mFrameSize;
 
   // The sample format of the PCM data.
   nsAudioStream::SampleFormat mSampleFormat;
 
   // Size of PCM data stored in the WAVE as reported by the data chunk in
   // the media.
   PRInt64 mWaveLength;
 
--- a/content/media/webm/nsWebMReader.cpp
+++ b/content/media/webm/nsWebMReader.cpp
@@ -130,17 +130,17 @@ static int64_t webm_tell(void *aUserData
 nsWebMReader::nsWebMReader(nsBuiltinDecoder* aDecoder)
   : nsBuiltinDecoderReader(aDecoder),
   mContext(nsnull),
   mPacketCount(0),
   mChannels(0),
   mVideoTrack(0),
   mAudioTrack(0),
   mAudioStartUsec(-1),
-  mAudioSamples(0),
+  mAudioFrames(0),
   mHasVideo(PR_FALSE),
   mHasAudio(PR_FALSE)
 {
   MOZ_COUNT_CTOR(nsWebMReader);
 }
 
 nsWebMReader::~nsWebMReader()
 {
@@ -176,17 +176,17 @@ nsresult nsWebMReader::Init(nsBuiltinDec
     mBufferedState = new nsWebMBufferedState;
   }
 
   return NS_OK;
 }
 
 nsresult nsWebMReader::ResetDecode()
 {
-  mAudioSamples = 0;
+  mAudioFrames = 0;
   mAudioStartUsec = -1;
   nsresult res = NS_OK;
   if (NS_FAILED(nsBuiltinDecoderReader::ResetDecode())) {
     res = NS_ERROR_FAILURE;
   }
 
   // Ignore failed results from vorbis_synthesis_restart. They
   // aren't fatal and it fails when ResetDecode is called at a
@@ -430,43 +430,43 @@ PRBool nsWebMReader::DecodeAudioPacket(n
     // This is the first audio chunk. Assume the start time of our decode
     // is the start of this chunk.
     mAudioStartUsec = tstamp_usecs;
   }
   // If there's a gap between the start of this audio chunk and the end of
   // the previous audio chunk, we need to increment the packet count so that
   // the vorbis decode doesn't use data from before the gap to help decode
   // from after the gap.
-  PRInt64 tstamp_samples = 0;
-  if (!UsecsToSamples(tstamp_usecs, rate, tstamp_samples)) {
-    NS_WARNING("Int overflow converting WebM timestamp to samples");
+  PRInt64 tstamp_frames = 0;
+  if (!UsecsToFrames(tstamp_usecs, rate, tstamp_frames)) {
+    NS_WARNING("Int overflow converting WebM timestamp to frames");
     return PR_FALSE;
   }
-  PRInt64 decoded_samples = 0;
-  if (!UsecsToSamples(mAudioStartUsec, rate, decoded_samples)) {
-    NS_WARNING("Int overflow converting WebM start time to samples");
+  PRInt64 decoded_frames = 0;
+  if (!UsecsToFrames(mAudioStartUsec, rate, decoded_frames)) {
+    NS_WARNING("Int overflow converting WebM start time to frames");
     return PR_FALSE;
   }
-  if (!AddOverflow(decoded_samples, mAudioSamples, decoded_samples)) {
-    NS_WARNING("Int overflow adding decoded_samples");
+  if (!AddOverflow(decoded_frames, mAudioFrames, decoded_frames)) {
+    NS_WARNING("Int overflow adding decoded_frames");
     return PR_FALSE;
   }
-  if (tstamp_samples > decoded_samples) {
+  if (tstamp_frames > decoded_frames) {
 #ifdef DEBUG
     PRInt64 usecs = 0;
-    LOG(PR_LOG_DEBUG, ("WebMReader detected gap of %lld, %lld samples, in audio stream\n",
-      SamplesToUsecs(tstamp_samples - decoded_samples, rate, usecs) ? usecs: -1,
-      tstamp_samples - decoded_samples));
+    LOG(PR_LOG_DEBUG, ("WebMReader detected gap of %lld, %lld frames, in audio stream\n",
+      FramesToUsecs(tstamp_frames - decoded_frames, rate, usecs) ? usecs: -1,
+      tstamp_frames - decoded_frames));
 #endif
     mPacketCount++;
     mAudioStartUsec = tstamp_usecs;
-    mAudioSamples = 0;
+    mAudioFrames = 0;
   }
 
-  PRInt32 total_samples = 0;
+  PRInt32 total_frames = 0;
   for (PRUint32 i = 0; i < count; ++i) {
     unsigned char* data;
     size_t length;
     r = nestegg_packet_data(aPacket, i, &data, &length);
     if (r == -1) {
       return PR_FALSE;
     }
 
@@ -477,47 +477,47 @@ PRBool nsWebMReader::DecodeAudioPacket(n
     }
 
     if (vorbis_synthesis_blockin(&mVorbisDsp,
                                  &mVorbisBlock) != 0) {
       return PR_FALSE;
     }
 
     VorbisPCMValue** pcm = 0;
-    PRInt32 samples = 0;
-    while ((samples = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm)) > 0) {
-      nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[samples * mChannels]);
+    PRInt32 frames = 0;
+    while ((frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm)) > 0) {
+      nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames * mChannels]);
       for (PRUint32 j = 0; j < mChannels; ++j) {
         VorbisPCMValue* channel = pcm[j];
-        for (PRUint32 i = 0; i < PRUint32(samples); ++i) {
+        for (PRUint32 i = 0; i < PRUint32(frames); ++i) {
           buffer[i*mChannels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
         }
       }
 
       PRInt64 duration = 0;
-      if (!SamplesToUsecs(samples, rate, duration)) {
+      if (!FramesToUsecs(frames, rate, duration)) {
         NS_WARNING("Int overflow converting WebM audio duration");
         return PR_FALSE;
       }
       PRInt64 total_duration = 0;
-      if (!SamplesToUsecs(total_samples, rate, total_duration)) {
+      if (!FramesToUsecs(total_frames, rate, total_duration)) {
         NS_WARNING("Int overflow converting WebM audio total_duration");
         return PR_FALSE;
       }
       
       PRInt64 time = tstamp_usecs + total_duration;
-      total_samples += samples;
+      total_frames += frames;
       mAudioQueue.Push(new AudioData(aOffset,
                                      time,
                                      duration,
-                                     samples,
+                                     frames,
                                      buffer.forget(),
                                      mChannels));
-      mAudioSamples += samples;
-      if (vorbis_synthesis_read(&mVorbisDsp, samples) != 0) {
+      mAudioFrames += frames;
+      if (vorbis_synthesis_read(&mVorbisDsp, frames) != 0) {
         return PR_FALSE;
       }
     }
   }
 
   return PR_TRUE;
 }
 
--- a/content/media/webm/nsWebMReader.h
+++ b/content/media/webm/nsWebMReader.h
@@ -210,21 +210,21 @@ private:
   // must only be accessed from the state machine thread.
   PacketQueue mVideoPackets;
   PacketQueue mAudioPackets;
 
   // Index of video and audio track to play
   PRUint32 mVideoTrack;
   PRUint32 mAudioTrack;
 
-  // Time in microseconds of the start of the first audio sample we've decoded.
+  // Time in microseconds of the start of the first audio frame we've decoded.
   PRInt64 mAudioStartUsec;
 
-  // Number of samples we've decoded since decoding began at mAudioStartMs.
-  PRUint64 mAudioSamples;
+  // Number of audio frames we've decoded since decoding began at mAudioStartMs.
+  PRUint64 mAudioFrames;
 
   // Parser state and computed offset-time mappings.  Shared by multiple
   // readers when decoder has been cloned.  Main thread only.
   nsRefPtr<nsWebMBufferedState> mBufferedState;
 
   // Size of the frame initially present in the stream. The picture region
   // is defined as a ratio relative to this.
   nsIntSize mInitialFrame;
--- a/dom/ipc/AudioChild.cpp
+++ b/dom/ipc/AudioChild.cpp
@@ -1,9 +1,9 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set sw=2 ts=2 et tw=80 : */
 /* ***** BEGIN LICENSE BLOCK *****
  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
  *
  * The contents of this file are subject to the Mozilla Public License Version
  * 1.1 (the "License"); you may not use this file except in compliance with
  * the License. You may obtain a copy of the License at
  * http://www.mozilla.org/MPL/
@@ -40,19 +40,19 @@
 #include "mozilla/dom/AudioChild.h"
 
 namespace mozilla {
 namespace dom {
 NS_IMPL_THREADSAFE_ADDREF(AudioChild);
 NS_IMPL_THREADSAFE_RELEASE(AudioChild);
 
 AudioChild::AudioChild()
-  : mLastSampleOffset(-1),
-    mLastSampleOffsetTime(0),
-    mMinWriteSample(-2),// Initial value, -2, error on -1
+  : mLastPosition(-1),
+    mLastPositionTimestamp(0),
+    mMinWriteSize(-2),// Initial value, -2, error on -1
     mAudioReentrantMonitor("AudioChild.mReentrantMonitor"),
     mIPCOpen(PR_TRUE),
     mDrained(PR_FALSE)
 {
   MOZ_COUNT_CTOR(AudioChild);
 }
 
 AudioChild::~AudioChild()
@@ -62,67 +62,68 @@ AudioChild::~AudioChild()
 
 void
 AudioChild::ActorDestroy(ActorDestroyReason aWhy)
 {
   mIPCOpen = PR_FALSE;
 }
 
 bool
-AudioChild::RecvSampleOffsetUpdate(const PRInt64& offset,
-                                   const PRInt64& time)
+AudioChild::RecvPositionInFramesUpdate(const PRInt64& position,
+                                       const PRInt64& time)
 {
-  mLastSampleOffset = offset;
-  mLastSampleOffsetTime = time;
+  mLastPosition = position;
+  mLastPositionTimestamp = time;
   return true;
 }
 
 bool
 AudioChild::RecvDrainDone()
 {
   ReentrantMonitorAutoEnter mon(mAudioReentrantMonitor);
   mDrained = PR_TRUE;
   mAudioReentrantMonitor.NotifyAll();
   return true;
 }
 
 PRInt32
-AudioChild::WaitForMinWriteSample()
+AudioChild::WaitForMinWriteSize()
 {
   ReentrantMonitorAutoEnter mon(mAudioReentrantMonitor);
   // -2 : initial value
-  while (mMinWriteSample == -2 && mIPCOpen)
+  while (mMinWriteSize == -2 && mIPCOpen) {
     mAudioReentrantMonitor.Wait();
-  return mMinWriteSample;
+  }
+  return mMinWriteSize;
 }
 
 bool
-AudioChild::RecvMinWriteSampleDone(const PRInt32& minSamples)
+AudioChild::RecvMinWriteSizeDone(const PRInt32& minFrames)
 {
   ReentrantMonitorAutoEnter mon(mAudioReentrantMonitor);
-  mMinWriteSample = minSamples;
+  mMinWriteSize = minFrames;
   mAudioReentrantMonitor.NotifyAll();
   return true;
 }
 
 void
 AudioChild::WaitForDrain()
 {
   ReentrantMonitorAutoEnter mon(mAudioReentrantMonitor);
   while (!mDrained && mIPCOpen) {
     mAudioReentrantMonitor.Wait();
   }
 }
 
 PRInt64
-AudioChild::GetLastKnownSampleOffset()
+AudioChild::GetLastKnownPosition()
 {
-  return mLastSampleOffset;
+  return mLastPosition;
 }
 
 PRInt64
-AudioChild::GetLastKnownSampleOffsetTime()
+AudioChild::GetLastKnownPositionTimestamp()
 {
-  return mLastSampleOffsetTime;
+  return mLastPositionTimestamp;
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/ipc/AudioChild.h
+++ b/dom/ipc/AudioChild.h
@@ -49,32 +49,33 @@ namespace dom {
 class AudioChild : public PAudioChild
 {
  public:
     NS_IMETHOD_(nsrefcnt) AddRef();
     NS_IMETHOD_(nsrefcnt) Release();
 
     AudioChild();
     virtual ~AudioChild();
-    virtual bool RecvSampleOffsetUpdate(const PRInt64&, const PRInt64&);
+    virtual bool RecvPositionInFramesUpdate(const PRInt64&, const PRInt64&);
     virtual bool RecvDrainDone();
-    virtual PRInt32 WaitForMinWriteSample();
-    virtual bool RecvMinWriteSampleDone(const PRInt32& sampleCount);
+    virtual PRInt32 WaitForMinWriteSize();
+    virtual bool RecvMinWriteSizeDone(const PRInt32& frameCount);
     virtual void WaitForDrain();
     virtual void ActorDestroy(ActorDestroyReason);
 
-    PRInt64 GetLastKnownSampleOffset();
-    PRInt64 GetLastKnownSampleOffsetTime();
+    PRInt64 GetLastKnownPosition();
+    PRInt64 GetLastKnownPositionTimestamp();
 
     PRBool IsIPCOpen() { return mIPCOpen; };
  private:
     nsAutoRefCnt mRefCnt;
     NS_DECL_OWNINGTHREAD
-    PRInt64 mLastSampleOffset, mLastSampleOffsetTime;
-    PRInt32 mMinWriteSample;
+    PRInt64 mLastPosition;
+    PRInt64 mLastPositionTimestamp;
+    PRInt32 mMinWriteSize;
     mozilla::ReentrantMonitor mAudioReentrantMonitor;
     PRPackedBool mIPCOpen;
     PRPackedBool mDrained;
 };
 
 } // namespace dom
 } // namespace mozilla
 
--- a/dom/ipc/AudioParent.cpp
+++ b/dom/ipc/AudioParent.cpp
@@ -43,33 +43,33 @@
 
 // C++ file contents
 namespace mozilla {
 namespace dom {
 
 class AudioWriteEvent : public nsRunnable
 {
  public:
-  AudioWriteEvent(nsAudioStream* owner, nsCString data, PRUint32 count)
+  AudioWriteEvent(nsAudioStream* owner, nsCString data, PRUint32 frames)
   {
     mOwner = owner;
     mData  = data;
-    mCount = count;
+    mFrames = frames;
   }
 
   NS_IMETHOD Run()
   {
-    mOwner->Write(mData.get(), mCount);
+    mOwner->Write(mData.get(), mFrames);
     return NS_OK;
   }
 
  private:
     nsRefPtr<nsAudioStream> mOwner;
     nsCString mData;
-    PRUint32  mCount;
+    PRUint32  mFrames;
 };
 
 class AudioPauseEvent : public nsRunnable
 {
  public:
   AudioPauseEvent(nsAudioStream* owner, PRBool aPause)
   {
     mOwner = owner;
@@ -104,49 +104,49 @@ class AudioStreamShutdownEvent : public 
     return NS_OK;
   }
 
  private:
     nsRefPtr<nsAudioStream> mOwner;
 };
 
 
-class AudioMinWriteSampleDone : public nsRunnable
+class AudioMinWriteSizeDone : public nsRunnable
 {
  public:
-  AudioMinWriteSampleDone(AudioParent* owner, PRInt32 minSamples)
+  AudioMinWriteSizeDone(AudioParent* owner, PRInt32 minFrames)
   {
     mOwner = owner;
-    mMinSamples = minSamples;
+    mMinFrames = minFrames;
   }
 
   NS_IMETHOD Run()
   {
-    mOwner->SendMinWriteSampleDone(mMinSamples);
+    mOwner->SendMinWriteSizeDone(mMinFrames);
     return NS_OK;
   }
 
  private:
     nsRefPtr<AudioParent> mOwner;
-    PRInt32 mMinSamples;
+    PRInt32 mMinFrames;
 };
 
-class AudioMinWriteSampleEvent : public nsRunnable
+class AudioMinWriteSizeEvent : public nsRunnable
 {
  public:
-  AudioMinWriteSampleEvent(AudioParent* parent, nsAudioStream* owner)
+  AudioMinWriteSizeEvent(AudioParent* parent, nsAudioStream* owner)
   {
     mParent = parent;
     mOwner = owner;
   }
 
   NS_IMETHOD Run()
   {
-    PRInt32 minSamples = mOwner->GetMinWriteSamples();
-    nsCOMPtr<nsIRunnable> event = new AudioMinWriteSampleDone(mParent, minSamples);
+    PRInt32 minFrames = mOwner->GetMinWriteSize();
+    nsCOMPtr<nsIRunnable> event = new AudioMinWriteSizeDone(mParent, minFrames);
     NS_DispatchToMainThread(event);
     return NS_OK;
   }
 
  private:
     nsRefPtr<nsAudioStream> mOwner;
     nsRefPtr<AudioParent> mParent;
 };
@@ -197,49 +197,47 @@ nsresult
 AudioParent::Notify(nsITimer* timer)
 {
   if (!mIPCOpen) {
     timer->Cancel();
     return NS_ERROR_FAILURE;
   }
 
   NS_ASSERTION(mStream, "AudioStream not initialized.");
-  PRInt64 offset = mStream->GetSampleOffset();
-  unused << SendSampleOffsetUpdate(offset, PR_IntervalNow());
+  PRInt64 position = mStream->GetPositionInFrames();
+  unused << SendPositionInFramesUpdate(position, PR_IntervalNow());
   return NS_OK;
 }
 
 bool
-AudioParent::RecvWrite(
-        const nsCString& data,
-        const PRUint32& count)
+AudioParent::RecvWrite(const nsCString& data, const PRUint32& frames)
 {
   if (!mStream)
     return false;
-  nsCOMPtr<nsIRunnable> event = new AudioWriteEvent(mStream, data, count);
+  nsCOMPtr<nsIRunnable> event = new AudioWriteEvent(mStream, data, frames);
   nsCOMPtr<nsIThread> thread = mStream->GetThread();
   thread->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
   return true;
 }
 
 bool
 AudioParent::RecvSetVolume(const float& aVolume)
 {
   if (!mStream)
       return false;
   mStream->SetVolume(aVolume);
   return true;
 }
 
 bool
-AudioParent::RecvMinWriteSample()
+AudioParent::RecvMinWriteSize()
 {
   if (!mStream)
     return false;
-  nsCOMPtr<nsIRunnable> event = new AudioMinWriteSampleEvent(this, mStream);
+  nsCOMPtr<nsIRunnable> event = new AudioMinWriteSizeEvent(this, mStream);
   nsCOMPtr<nsIThread> thread = mStream->GetThread();
   thread->Dispatch(event, nsIEventTarget::DISPATCH_NORMAL);
   return true;
 }
 
 bool
 AudioParent::RecvDrain()
 {
@@ -277,20 +275,20 @@ bool
 AudioParent::RecvShutdown()
 {
   Shutdown();
   unused << PAudioParent::Send__delete__(this);
   return true;
 }
 
 bool
-AudioParent::SendMinWriteSampleDone(PRInt32 minSamples)
+AudioParent::SendMinWriteSizeDone(PRInt32 minFrames)
 {
   if (mIPCOpen)
-    return PAudioParent::SendMinWriteSampleDone(minSamples);
+    return PAudioParent::SendMinWriteSizeDone(minFrames);
   return true;
 }
 
 bool
 AudioParent::SendDrainDone()
 {
   if (mIPCOpen)
     return PAudioParent::SendDrainDone();
--- a/dom/ipc/AudioParent.h
+++ b/dom/ipc/AudioParent.h
@@ -49,40 +49,38 @@ namespace dom {
 class AudioParent : public PAudioParent, public nsITimerCallback
 {
  public:
 
     NS_DECL_ISUPPORTS
     NS_DECL_NSITIMERCALLBACK
 
     virtual bool
-    RecvWrite(
-            const nsCString& data,
-            const PRUint32& count);
+    RecvWrite(const nsCString& data, const PRUint32& count);
 
     virtual bool
     RecvSetVolume(const float& aVolume);
 
     virtual bool
-    RecvMinWriteSample();
+    RecvMinWriteSize();
 
     virtual bool
     RecvDrain();
 
     virtual bool
     RecvPause();
 
     virtual bool
     RecvResume();
 
     virtual bool
     RecvShutdown();
 
     virtual bool
-    SendMinWriteSampleDone(PRInt32 minSamples);
+    SendMinWriteSizeDone(PRInt32 minFrames);
 
     virtual bool
     SendDrainDone();
 
     AudioParent(PRInt32 aNumChannels, PRInt32 aRate, PRInt32 aFormat);
     virtual ~AudioParent();
     virtual void ActorDestroy(ActorDestroyReason);
 
--- a/dom/ipc/PAudio.ipdl
+++ b/dom/ipc/PAudio.ipdl
@@ -43,31 +43,31 @@ namespace mozilla {
 namespace dom {
 
 protocol PAudio
 {
   manager PContent;
 
 parent:
 
-  Write(nsCString data, PRUint32 count);
+  Write(nsCString data, PRUint32 frames);
 
   SetVolume(float aVolume);
 
-  MinWriteSample();
+  MinWriteSize();
   Drain();
 
   Pause();
   Resume();
   Shutdown();
 
  child:
 
   __delete__();
 
-  SampleOffsetUpdate(PRInt64 offset, PRInt64 time);
-  MinWriteSampleDone(PRInt32 sampleCount);
+  PositionInFramesUpdate(PRInt64 position, PRInt64 time);
+  MinWriteSizeDone(PRInt32 frameCount);
   DrainDone();
 
 };
 
 } // namespace dom
 } // namespace mozilla