Bug 679269 - Rename SoundData{,Value} to AudioData{,Value} and fix inconsistent use of "sound" vs "audio". r=doublec
authorMatthew Gregan <kinetik@flim.org>
Tue, 16 Aug 2011 17:19:51 +1200
changeset 75448 8813fd93ef5d8d204cd58c13de1ab7cf3f67452c
parent 75447 5a4ca4d59be491a1d8d68ee9ad9c40243682885b
child 75449 1b6414e98c4e552f8994535e0a5aea26cfd9818d
push id2
push userbsmedberg@mozilla.com
push dateFri, 19 Aug 2011 14:38:13 +0000
reviewersdoublec
bugs679269
milestone9.0a1
Bug 679269 - Rename SoundData{,Value} to AudioData{,Value} and fix inconsistent use of "sound" vs "audio". r=doublec
content/media/nsAudioAvailableEventManager.cpp
content/media/nsAudioAvailableEventManager.h
content/media/nsAudioStream.h
content/media/nsBuiltinDecoderReader.cpp
content/media/nsBuiltinDecoderReader.h
content/media/nsBuiltinDecoderStateMachine.cpp
content/media/nsBuiltinDecoderStateMachine.h
content/media/ogg/nsOggReader.cpp
content/media/wave/nsWaveReader.cpp
content/media/webm/nsWebMReader.cpp
--- a/content/media/nsAudioAvailableEventManager.cpp
+++ b/content/media/nsAudioAvailableEventManager.cpp
@@ -111,17 +111,17 @@ void nsAudioAvailableEventManager::Dispa
       break;
     }
     nsCOMPtr<nsIRunnable> event = mPendingEvents[0];
     mPendingEvents.RemoveElementAt(0);
     NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
   }
 }
 
-void nsAudioAvailableEventManager::QueueWrittenAudioData(SoundDataValue* aAudioData,
+void nsAudioAvailableEventManager::QueueWrittenAudioData(AudioDataValue* aAudioData,
                                                          PRUint32 aAudioDataLength,
                                                          PRUint64 aEndTimeSampleOffset)
 {
   ReentrantMonitorAutoEnter mon(mReentrantMonitor);
 
   PRUint32 currentBufferSize = mNewSignalBufferLength;
   if (currentBufferSize == 0) {
     NS_WARNING("Decoder framebuffer length not set.");
@@ -131,34 +131,34 @@ void nsAudioAvailableEventManager::Queue
   if (!mSignalBuffer ||
       (mSignalBufferPosition == 0 && mSignalBufferLength != currentBufferSize)) {
     if (!mSignalBuffer || (mSignalBufferLength < currentBufferSize)) {
       // Only resize if buffer is empty or smaller.
       mSignalBuffer = new float[currentBufferSize];
     }
     mSignalBufferLength = currentBufferSize;
   }
-  SoundDataValue* audioData = aAudioData;
+  AudioDataValue* audioData = aAudioData;
   PRUint32 audioDataLength = aAudioDataLength;
   PRUint32 signalBufferTail = mSignalBufferLength - mSignalBufferPosition;
 
   // Group audio samples into optimal size for event dispatch, and queue.
   while (signalBufferTail <= audioDataLength) {
     float time = 0.0;
     // Guard against unsigned number overflow during first frame time calculation.
     if (aEndTimeSampleOffset > mSignalBufferPosition + audioDataLength) {
       time = (aEndTimeSampleOffset - mSignalBufferPosition - audioDataLength) / 
              mSamplesPerSecond;
     }
 
     // Fill the signalBuffer.
     PRUint32 i;
     float *signalBuffer = mSignalBuffer.get() + mSignalBufferPosition;
     for (i = 0; i < signalBufferTail; ++i) {
-      signalBuffer[i] = MOZ_CONVERT_SOUND_SAMPLE(audioData[i]);
+      signalBuffer[i] = MOZ_CONVERT_AUDIO_SAMPLE(audioData[i]);
     }
     audioData += signalBufferTail;
     audioDataLength -= signalBufferTail;
 
     if (mPendingEvents.Length() > 0) {
       // Check last event timecode to make sure that all queued events
       // are in non-descending sequence.
       nsAudioAvailableEventRunner* lastPendingEvent =
@@ -167,17 +167,17 @@ void nsAudioAvailableEventManager::Queue
         // Clear the queue to start a fresh sequence.
         mPendingEvents.Clear();
       } else if (mPendingEvents.Length() >= MAX_PENDING_EVENTS) {
         NS_WARNING("Hit audio event queue max.");
         mPendingEvents.RemoveElementsAt(0, mPendingEvents.Length() - MAX_PENDING_EVENTS + 1);
       }
     }
 
-    // Inform the element that we've written sound data.
+    // Inform the element that we've written audio data.
     nsCOMPtr<nsIRunnable> event =
       new nsAudioAvailableEventRunner(mDecoder, mSignalBuffer.forget(),
                                       mSignalBufferLength, time);
     mPendingEvents.AppendElement(event);
 
     // Reset the buffer
     mSignalBufferLength = currentBufferSize;
     mSignalBuffer = new float[currentBufferSize];
@@ -189,17 +189,17 @@ void nsAudioAvailableEventManager::Queue
   NS_ASSERTION(mSignalBufferPosition + audioDataLength < mSignalBufferLength,
                "Intermediate signal buffer must fit at least one more item.");
 
   if (audioDataLength > 0) {
     // Add data to the signalBuffer.
     PRUint32 i;
     float *signalBuffer = mSignalBuffer.get() + mSignalBufferPosition;
     for (i = 0; i < audioDataLength; ++i) {
-      signalBuffer[i] = MOZ_CONVERT_SOUND_SAMPLE(audioData[i]);
+      signalBuffer[i] = MOZ_CONVERT_AUDIO_SAMPLE(audioData[i]);
     }
     mSignalBufferPosition += audioDataLength;
   }
 }
 
 void nsAudioAvailableEventManager::Clear()
 {
   ReentrantMonitorAutoEnter mon(mReentrantMonitor);
--- a/content/media/nsAudioAvailableEventManager.h
+++ b/content/media/nsAudioAvailableEventManager.h
@@ -58,17 +58,17 @@ public:
   void Init(PRUint32 aChannels, PRUint32 aRate);
 
   // Dispatch pending MozAudioAvailable events in the queue.  Called
   // from the state machine thread.
   void DispatchPendingEvents(PRUint64 aCurrentTime);
 
   // Queues audio sample data and re-packages it into equal sized
   // framebuffers.  Called from the audio thread.
-  void QueueWrittenAudioData(SoundDataValue* aAudioData,
+  void QueueWrittenAudioData(AudioDataValue* aAudioData,
                              PRUint32 aAudioDataLength,
                              PRUint64 aEndTimeSampleOffset);
 
   // Clears the queue of any existing events.  Called from both the state
   // machine and audio threads.
   void Clear();
 
   // Fires one last event for any extra samples that didn't fit in a whole
--- a/content/media/nsAudioStream.h
+++ b/content/media/nsAudioStream.h
@@ -69,33 +69,33 @@ public:
   nsIThread *GetThread();
 
   // AllocateStream will return either a local stream or a remoted stream
   // depending on where you call it from.  If you call this from a child process,
   // you may receive an implementation which forwards to a compositing process.
   static nsAudioStream* AllocateStream();
 
   // Initialize the audio stream. aNumChannels is the number of audio channels 
-  // (1 for mono, 2 for stereo, etc) and aRate is the frequency of the sound 
+  // (1 for mono, 2 for stereo, etc) and aRate is the frequency of the audio 
   // samples (22050, 44100, etc).
   // Unsafe to call with the decoder monitor held.
   virtual nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat) = 0;
 
   // Closes the stream. All future use of the stream is an error.
   // Unsafe to call with the decoder monitor held.
   virtual void Shutdown() = 0;
 
-  // Write sound data to the audio hardware.  aBuf is an array of samples in
+  // Write audio data to the audio hardware.  aBuf is an array of samples in
   // the format specified by mFormat of length aCount.  aCount should be
   // evenly divisible by the number of channels in this audio stream.  If
   // aCount is larger than the result of Available(), the write will block
   // until sufficient buffer space is available.
   virtual nsresult Write(const void* aBuf, PRUint32 aCount) = 0;
 
-  // Return the number of sound samples that can be written to the audio device
+  // Return the number of audio samples that can be written to the audio device
   // without blocking.
   virtual PRUint32 Available() = 0;
 
   // Set the current volume of the audio playback. This is a value from
   // 0 (meaning muted) to 1 (meaning full volume).
   virtual void SetVolume(double aVolume) = 0;
 
   // Block until buffered audio data has been consumed.
--- a/content/media/nsBuiltinDecoderReader.cpp
+++ b/content/media/nsBuiltinDecoderReader.cpp
@@ -223,20 +223,20 @@ VideoData* nsBuiltinDecoderReader::FindS
   if (HasVideo()) {
     videoData = DecodeToFirstData(&nsBuiltinDecoderReader::DecodeVideoFrame,
                                   mVideoQueue);
     if (videoData) {
       videoStartTime = videoData->mTime;
     }
   }
   if (HasAudio()) {
-    SoundData* soundData = DecodeToFirstData(&nsBuiltinDecoderReader::DecodeAudioData,
+    AudioData* audioData = DecodeToFirstData(&nsBuiltinDecoderReader::DecodeAudioData,
                                              mAudioQueue);
-    if (soundData) {
-      audioStartTime = soundData->mTime;
+    if (audioData) {
+      audioStartTime = audioData->mTime;
     }
   }
 
   PRInt64 startTime = NS_MIN(videoStartTime, audioStartTime);
   if (startTime != PR_INT64_MAX) {
     aOutStartTime = startTime;
   }
 
@@ -316,25 +316,25 @@ nsresult nsBuiltinDecoderReader::DecodeT
         eof = !DecodeAudioData();
         {
           ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
           if (mDecoder->GetDecodeState() == nsBuiltinDecoderStateMachine::DECODER_STATE_SHUTDOWN) {
             return NS_ERROR_FAILURE;
           }
         }
       }
-      const SoundData* audio = mAudioQueue.PeekFront();
+      const AudioData* audio = mAudioQueue.PeekFront();
       if (!audio)
         break;
       PRInt64 startSample = 0;
       if (!UsecsToSamples(audio->mTime, mInfo.mAudioRate, startSample)) {
         return NS_ERROR_FAILURE;
       }
       if (startSample + audio->mSamples <= targetSample) {
-        // Our seek target lies after the samples in this SoundData. Pop it
+        // Our seek target lies after the samples in this AudioData. Pop it
         // off the queue, and keep decoding forwards.
         delete mAudioQueue.PopFront();
         audio = nsnull;
         continue;
       }
       if (startSample > targetSample) {
         // The seek target doesn't lie in the audio block just after the last
         // audio samples we've seen which were before the seek target. This
@@ -342,40 +342,40 @@ nsresult nsBuiltinDecoderReader::DecodeT
         // seek terminated after the seek target in the audio stream. Just
         // abort the audio decode-to-target, the state machine will play
         // silence to cover the gap. Typically this happens in poorly muxed
         // files.
         NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
         break;
       }
 
-      // The seek target lies somewhere in this SoundData's samples, strip off
+      // The seek target lies somewhere in this AudioData's samples, strip off
       // any samples which lie before the seek target, so we'll begin playback
       // exactly at the seek target.
       NS_ASSERTION(targetSample >= startSample, "Target must at or be after data start.");
       NS_ASSERTION(targetSample < startSample + audio->mSamples, "Data must end after target.");
 
       PRInt64 samplesToPrune = targetSample - startSample;
       if (samplesToPrune > audio->mSamples) {
         // We've messed up somehow. Don't try to trim samples, the |samples|
         // variable below will overflow.
         NS_WARNING("Can't prune more samples that we have!");
         break;
       }
       PRUint32 samples = audio->mSamples - static_cast<PRUint32>(samplesToPrune);
       PRUint32 channels = audio->mChannels;
-      nsAutoArrayPtr<SoundDataValue> audioData(new SoundDataValue[samples * channels]);
+      nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[samples * channels]);
       memcpy(audioData.get(),
              audio->mAudioData.get() + (samplesToPrune * channels),
-             samples * channels * sizeof(SoundDataValue));
+             samples * channels * sizeof(AudioDataValue));
       PRInt64 duration;
       if (!SamplesToUsecs(samples, mInfo.mAudioRate, duration)) {
         return NS_ERROR_FAILURE;
       }
-      nsAutoPtr<SoundData> data(new SoundData(audio->mOffset,
+      nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
                                               aTarget,
                                               duration,
                                               samples,
                                               audioData.forget(),
                                               channels));
       delete mAudioQueue.PopFront();
       mAudioQueue.PushFront(data.forget());
       break;
--- a/content/media/nsBuiltinDecoderReader.h
+++ b/content/media/nsBuiltinDecoderReader.h
@@ -88,91 +88,91 @@ public:
 
   // PR_TRUE if we have an active video bitstream.
   PRPackedBool mHasVideo;
 };
 
 #ifdef MOZ_TREMOR
 #include <ogg/os_types.h>
 typedef ogg_int32_t VorbisPCMValue;
-typedef short SoundDataValue;
+typedef short AudioDataValue;
 
-#define MOZ_SOUND_DATA_FORMAT (nsAudioStream::FORMAT_S16_LE)
+#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_S16_LE)
 #define MOZ_CLIP_TO_15(x) ((x)<-32768?-32768:(x)<=32767?(x):32767)
-// Convert the output of vorbis_synthesis_pcmout to a SoundDataValue
+// Convert the output of vorbis_synthesis_pcmout to a AudioDataValue
 #define MOZ_CONVERT_VORBIS_SAMPLE(x) \
- (static_cast<SoundDataValue>(MOZ_CLIP_TO_15((x)>>9)))
-// Convert a SoundDataValue to a float for the Audio API
-#define MOZ_CONVERT_SOUND_SAMPLE(x) ((x)*(1.F/32768))
+ (static_cast<AudioDataValue>(MOZ_CLIP_TO_15((x)>>9)))
+// Convert a AudioDataValue to a float for the Audio API
+#define MOZ_CONVERT_AUDIO_SAMPLE(x) ((x)*(1.F/32768))
 #define MOZ_SAMPLE_TYPE_S16LE 1
 
 #else /*MOZ_VORBIS*/
 
 typedef float VorbisPCMValue;
-typedef float SoundDataValue;
+typedef float AudioDataValue;
 
-#define MOZ_SOUND_DATA_FORMAT (nsAudioStream::FORMAT_FLOAT32)
+#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_FLOAT32)
 #define MOZ_CONVERT_VORBIS_SAMPLE(x) (x)
-#define MOZ_CONVERT_SOUND_SAMPLE(x) (x)
+#define MOZ_CONVERT_AUDIO_SAMPLE(x) (x)
 #define MOZ_SAMPLE_TYPE_FLOAT32 1
 
 #endif
 
-// Holds chunk a decoded sound samples.
-class SoundData {
+// Holds chunk a decoded audio samples.
+class AudioData {
 public:
-  SoundData(PRInt64 aOffset,
+  AudioData(PRInt64 aOffset,
             PRInt64 aTime,
             PRInt64 aDuration,
             PRUint32 aSamples,
-            SoundDataValue* aData,
+            AudioDataValue* aData,
             PRUint32 aChannels)
   : mOffset(aOffset),
     mTime(aTime),
     mDuration(aDuration),
     mSamples(aSamples),
     mChannels(aChannels),
     mAudioData(aData)
   {
-    MOZ_COUNT_CTOR(SoundData);
+    MOZ_COUNT_CTOR(AudioData);
   }
 
-  SoundData(PRInt64 aOffset,
+  AudioData(PRInt64 aOffset,
             PRInt64 aDuration,
             PRUint32 aSamples,
-            SoundDataValue* aData,
+            AudioDataValue* aData,
             PRUint32 aChannels)
   : mOffset(aOffset),
     mTime(-1),
     mDuration(aDuration),
     mSamples(aSamples),
     mChannels(aChannels),
     mAudioData(aData)
   {
-    MOZ_COUNT_CTOR(SoundData);
+    MOZ_COUNT_CTOR(AudioData);
   }
 
-  ~SoundData()
+  ~AudioData()
   {
-    MOZ_COUNT_DTOR(SoundData);
+    MOZ_COUNT_DTOR(AudioData);
   }
 
   PRUint32 AudioDataLength() {
     return mChannels * mSamples;
   }
 
   // Approximate byte offset of the end of the page on which this sample
   // chunk ends.
   const PRInt64 mOffset;
 
   PRInt64 mTime; // Start time of samples in usecs.
   const PRInt64 mDuration; // In usecs.
   const PRUint32 mSamples;
   const PRUint32 mChannels;
-  nsAutoArrayPtr<SoundDataValue> mAudioData;
+  nsAutoArrayPtr<AudioDataValue> mAudioData;
 };
 
 // Holds a decoded video frame, in YCbCr format. These are queued in the reader.
 class VideoData {
 public:
   typedef mozilla::layers::ImageContainer ImageContainer;
   typedef mozilla::layers::Image Image;
 
@@ -452,17 +452,17 @@ public:
   // is the current playback position in microseconds.
   virtual nsresult Seek(PRInt64 aTime,
                         PRInt64 aStartTime,
                         PRInt64 aEndTime,
                         PRInt64 aCurrentTime) = 0;
 
   // Queue of audio samples. This queue is threadsafe, and is accessed from
   // the audio, decoder, state machine, and main threads.
-  MediaQueue<SoundData> mAudioQueue;
+  MediaQueue<AudioData> mAudioQueue;
 
   // Queue of video samples. This queue is threadsafe, and is accessed from
   // the decoder, state machine, and main threads.
   MediaQueue<VideoData> mVideoQueue;
 
   // Populates aBuffered with the time ranges which are buffered. aStartTime
   // must be the presentation time of the first sample/frame in the media, e.g.
   // the media time corresponding to playback time/position 0. This function
@@ -496,18 +496,18 @@ public:
     return functor.mResult;
   }
 
   class AudioQueueMemoryFunctor : public nsDequeFunctor {
   public:
     AudioQueueMemoryFunctor() : mResult(0) {}
 
     virtual void* operator()(void* anObject) {
-      const SoundData* soundData = static_cast<const SoundData*>(anObject);
-      mResult += soundData->mSamples * soundData->mChannels * sizeof(SoundDataValue);
+      const AudioData* audioData = static_cast<const AudioData*>(anObject);
+      mResult += audioData->mSamples * audioData->mChannels * sizeof(AudioDataValue);
       return nsnull;
     }
 
     PRInt64 mResult;
   };
 
   PRInt64 AudioQueueMemoryInUse() {
     AudioQueueMemoryFunctor functor;
--- a/content/media/nsBuiltinDecoderStateMachine.cpp
+++ b/content/media/nsBuiltinDecoderStateMachine.cpp
@@ -343,17 +343,17 @@ void nsBuiltinDecoderStateMachine::Decod
   PRInt64 lowAudioThreshold = LOW_AUDIO_USECS;
 
   // Our local ample audio threshold. If we increase lowAudioThreshold, we'll
   // also increase this too appropriately (we don't want lowAudioThreshold to
   // be greater than ampleAudioThreshold, else we'd stop decoding!).
   PRInt64 ampleAudioThreshold = AMPLE_AUDIO_USECS;
 
   MediaQueue<VideoData>& videoQueue = mReader->mVideoQueue;
-  MediaQueue<SoundData>& audioQueue = mReader->mAudioQueue;
+  MediaQueue<AudioData>& audioQueue = mReader->mAudioQueue;
 
   // Main decode loop.
   PRBool videoPlaying = HasVideo();
   PRBool audioPlaying = HasAudio();
   while ((mState == DECODER_STATE_DECODING || mState == DECODER_STATE_BUFFERING) &&
          !mStopDecodeThread &&
          (videoPlaying || audioPlaying))
   {
@@ -444,18 +444,18 @@ void nsBuiltinDecoderStateMachine::Decod
                            audioQueue.GetSize() > 0))
         &&
         (!videoPlaying ||
           static_cast<PRUint32>(videoQueue.GetSize()) >= AMPLE_VIDEO_FRAMES))
     {
       // All active bitstreams' decode is well ahead of the playback
       // position, we may as well wait for the playback to catch up. Note the
       // audio push thread acquires and notifies the decoder monitor every time
-      // it pops SoundData off the audio queue. So if the audio push thread pops
-      // the last SoundData off the audio queue right after that queue reported
+      // it pops AudioData off the audio queue. So if the audio push thread pops
+      // the last AudioData off the audio queue right after that queue reported
       // it was non-empty here, we'll receive a notification on the decoder
       // monitor which will wake us up shortly after we sleep, thus preventing
       // both the decode and audio push threads waiting at the same time.
       // See bug 620326.
       mDecodeThreadWaiting = PR_TRUE;
       if (mDecoder->GetState() != nsBuiltinDecoder::PLAY_STATE_PLAYING) {
         // We're not playing, and the decode is about to wait. This means
         // the decode thread may not be needed in future. Signal the state
@@ -511,17 +511,17 @@ void nsBuiltinDecoderStateMachine::Audio
   // monitor held, as on Android those methods do a synchronous dispatch to
   // the main thread. If the audio thread holds the decoder monitor while
   // it does a synchronous dispatch to the main thread, we can get deadlocks
   // if the main thread tries to acquire the decoder monitor before the
   // dispatched event has finished (or even started!) running. Methods which
   // are unsafe to call with the decoder monitor held are documented as such
   // in nsAudioStream.h.
   nsRefPtr<nsAudioStream> audioStream = nsAudioStream::AllocateStream();
-  audioStream->Init(channels, rate, MOZ_SOUND_DATA_FORMAT);
+  audioStream->Init(channels, rate, MOZ_AUDIO_DATA_FORMAT);
 
   {
     // We must hold the monitor while setting mAudioStream or whenever we query
     // the playback position off the audio thread. This ensures the audio stream
     // is always alive when we use it off the audio thread. Note that querying
     // the playback position does not do a synchronous dispatch to the main
     // thread, so it's safe to call with the decoder monitor held.
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
@@ -576,17 +576,17 @@ void nsBuiltinDecoderStateMachine::Audio
     }
     if (minWriteSamples == -1) {
       minWriteSamples = mAudioStream->GetMinWriteSamples();
     }
     NS_ASSERTION(mReader->mAudioQueue.GetSize() > 0,
                  "Should have data to play");
     // See if there's missing samples in the audio stream. If there is, push
     // silence into the audio hardware, so we can play across the gap.
-    const SoundData* s = mReader->mAudioQueue.PeekFront();
+    const AudioData* s = mReader->mAudioQueue.PeekFront();
 
     // Calculate the number of samples that have been pushed onto the audio
     // hardware.
     PRInt64 playedSamples = 0;
     if (!UsecsToSamples(audioStartTime, rate, playedSamples)) {
       NS_WARNING("Int overflow converting playedSamples");
       break;
     }
@@ -604,19 +604,19 @@ void nsBuiltinDecoderStateMachine::Audio
     }
     PRInt64 missingSamples = 0;
     if (!AddOverflow(sampleTime, -playedSamples, missingSamples)) {
       NS_WARNING("Int overflow adding missingSamples");
       break;
     }
 
     if (missingSamples > 0) {
-      // The next sound chunk begins some time after the end of the last chunk
-      // we pushed to the sound hardware. We must push silence into the audio
-      // hardware so that the next sound chunk begins playback at the correct
+      // The next audio chunk begins some time after the end of the last chunk
+      // we pushed to the audio hardware. We must push silence into the audio
+      // hardware so that the next audio chunk begins playback at the correct
       // time.
       missingSamples = NS_MIN(static_cast<PRInt64>(PR_UINT32_MAX), missingSamples);
       samplesWritten = PlaySilence(static_cast<PRUint32>(missingSamples),
                                    channels, playedSamples);
     } else {
       samplesWritten = PlayFromAudioQueue(sampleTime, channels);
     }
     audioDuration += samplesWritten;
@@ -664,18 +664,18 @@ void nsBuiltinDecoderStateMachine::Audio
         // We've not written minWriteSamples in the last write, the audio
         // may not start playing. Write silence to ensure we've got enough
         // samples written to start playback.
         PRInt64 samples = minWriteSamples - samplesWritten;
         if (samples < PR_UINT32_MAX / channels) {
           // Write silence manually rather than using PlaySilence(), so that
           // the AudioAPI doesn't get a copy of the samples.
           PRUint32 numValues = samples * channels;
-          nsAutoArrayPtr<SoundDataValue> buf(new SoundDataValue[numValues]);
-          memset(buf.get(), 0, sizeof(SoundDataValue) * numValues);
+          nsAutoArrayPtr<AudioDataValue> buf(new AudioDataValue[numValues]);
+          memset(buf.get(), 0, sizeof(AudioDataValue) * numValues);
           mAudioStream->Write(buf, numValues);
         }
       }
 
       PRInt64 oldPosition = -1;
       PRInt64 position = GetMediaTime();
       while (oldPosition != position &&
              mAudioEndTime - position > 0 &&
@@ -722,31 +722,31 @@ PRUint32 nsBuiltinDecoderStateMachine::P
                                                    PRUint64 aSampleOffset)
 
 {
   NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
   NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
   PRUint32 maxSamples = SILENCE_BYTES_CHUNK / aChannels;
   PRUint32 samples = NS_MIN(aSamples, maxSamples);
   PRUint32 numValues = samples * aChannels;
-  nsAutoArrayPtr<SoundDataValue> buf(new SoundDataValue[numValues]);
-  memset(buf.get(), 0, sizeof(SoundDataValue) * numValues);
+  nsAutoArrayPtr<AudioDataValue> buf(new AudioDataValue[numValues]);
+  memset(buf.get(), 0, sizeof(AudioDataValue) * numValues);
   mAudioStream->Write(buf, numValues);
   // Dispatch events to the DOM for the audio just written.
   mEventManager.QueueWrittenAudioData(buf.get(), numValues,
                                       (aSampleOffset + samples) * aChannels);
   return samples;
 }
 
 PRUint32 nsBuiltinDecoderStateMachine::PlayFromAudioQueue(PRUint64 aSampleOffset,
                                                           PRUint32 aChannels)
 {
   NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
   NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
-  nsAutoPtr<SoundData> sound(mReader->mAudioQueue.PopFront());
+  nsAutoPtr<AudioData> audioData(mReader->mAudioQueue.PopFront());
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     NS_WARN_IF_FALSE(IsPlaying(), "Should be playing");
     // Awaken the decode loop if it's waiting for space to free up in the
     // audio queue.
     mDecoder->GetReentrantMonitor().NotifyAll();
   }
   PRInt64 offset = -1;
@@ -754,29 +754,29 @@ PRUint32 nsBuiltinDecoderStateMachine::P
   // The state machine could have paused since we've released the decoder
   // monitor and acquired the audio monitor. Rather than acquire both
   // monitors, the audio stream also maintains whether its paused or not.
   // This prevents us from doing a blocking write while holding the audio
   // monitor while paused; we would block, and the state machine won't be
   // able to acquire the audio monitor in order to resume or destroy the
   // audio stream.
   if (!mAudioStream->IsPaused()) {
-    mAudioStream->Write(sound->mAudioData,
-                        sound->AudioDataLength());
+    mAudioStream->Write(audioData->mAudioData,
+                        audioData->AudioDataLength());
 
-    offset = sound->mOffset;
-    samples = sound->mSamples;
+    offset = audioData->mOffset;
+    samples = audioData->mSamples;
 
     // Dispatch events to the DOM for the audio just written.
-    mEventManager.QueueWrittenAudioData(sound->mAudioData.get(),
-                                        sound->AudioDataLength(),
+    mEventManager.QueueWrittenAudioData(audioData->mAudioData.get(),
+                                        audioData->AudioDataLength(),
                                         (aSampleOffset + samples) * aChannels);
   } else {
-    mReader->mAudioQueue.PushFront(sound);
-    sound.forget();
+    mReader->mAudioQueue.PushFront(audioData);
+    audioData.forget();
   }
   if (offset != -1) {
     mDecoder->UpdatePlaybackOffset(offset);
   }
   return samples;
 }
 
 nsresult nsBuiltinDecoderStateMachine::Init(nsDecoderStateMachine* aCloneDonor)
@@ -1307,17 +1307,17 @@ void nsBuiltinDecoderStateMachine::Decod
       // Now perform the seek. We must not hold the state machine monitor
       // while we seek, since the seek reads, which could block on I/O.
       res = mReader->Seek(seekTime,
                           mStartTime,
                           mEndTime,
                           mediaTime);
     }
     if (NS_SUCCEEDED(res)) {
-      SoundData* audio = HasAudio() ? mReader->mAudioQueue.PeekFront() : nsnull;
+      AudioData* audio = HasAudio() ? mReader->mAudioQueue.PeekFront() : nsnull;
       NS_ASSERTION(!audio || (audio->mTime <= seekTime &&
                               seekTime <= audio->mTime + audio->mDuration),
                     "Seek target should lie inside the first audio block after seek");
       PRInt64 startTime = (audio && audio->mTime < seekTime) ? audio->mTime : seekTime;
       mAudioStartTime = startTime;
       mPlayDuration = startTime - mStartTime;
       if (HasVideo()) {
         nsAutoPtr<VideoData> video(mReader->mVideoQueue.PeekFront());
@@ -1644,17 +1644,17 @@ void nsBuiltinDecoderStateMachine::Advan
     PRInt64 audio_time = GetAudioClock();
     if (HasAudio() && !mAudioCompleted && audio_time != -1) {
       clock_time = audio_time;
       // Resync against the audio clock, while we're trusting the
       // audio clock. This ensures no "drift", particularly on Linux.
       mPlayDuration = clock_time - mStartTime;
       mPlayStartTime = TimeStamp::Now();
     } else {
-      // Sound is disabled on this system. Sync to the system clock.
+      // Audio hardware is disabled on this system. Sync to the system clock.
       clock_time = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration;
       // Ensure the clock can never go backwards.
       NS_ASSERTION(mCurrentFrameTime <= clock_time, "Clock should go forwards");
       clock_time = NS_MAX(mCurrentFrameTime, clock_time) + mStartTime;
     }
   }
 
   // Skip frames up to the frame at the playback position, and figure out
--- a/content/media/nsBuiltinDecoderStateMachine.h
+++ b/content/media/nsBuiltinDecoderStateMachine.h
@@ -81,33 +81,33 @@ Frame skipping is done in the following 
           will be decoding video data that won't be displayed due
           to the decode thread dropping the frame immediately.
 
 When hardware accelerated graphics is not available, YCbCr conversion
 is done on the decode thread when video frames are decoded.
 
 The decode thread pushes decoded audio and videos frames into two
 separate queues - one for audio and one for video. These are kept
-separate to make it easy to constantly feed audio data to the sound
+separate to make it easy to constantly feed audio data to the audio
 hardware while allowing frame skipping of video data. These queues are
 threadsafe, and neither the decode, audio, or state machine should
 be able to monopolize them, and cause starvation of the other threads.
 
 Both queues are bounded by a maximum size. When this size is reached
 the decode thread will no longer decode video or audio depending on the
 queue that has reached the threshold. If both queues are full, the decode
 thread will wait on the decoder monitor.
 
 When the decode queues are full (they've reaced their maximum size) and
 the decoder is not in PLAYING play state, the state machine may opt
 to shut down the decode thread in order to conserve resources.
 
 During playback the audio thread will be idle (via a Wait() on the
 monitor) if the audio queue is empty. Otherwise it constantly pops
-sound data off the queue and plays it with a blocking write to the audio
+audio data off the queue and plays it with a blocking write to the audio
 hardware (via nsAudioStream and libsydneyaudio).
 
 */
 #if !defined(nsBuiltinDecoderStateMachine_h__)
 #define nsBuiltinDecoderStateMachine_h__
 
 #include "prmem.h"
 #include "nsThreadUtils.h"
@@ -355,17 +355,17 @@ protected:
   // hardware. This ensures that the playback position advances smoothly, and
   // guarantees that we don't try to allocate an impossibly large chunk of
   // memory in order to play back silence. Called on the audio thread.
   PRUint32 PlaySilence(PRUint32 aSamples,
                        PRUint32 aChannels,
                        PRUint64 aSampleOffset);
 
   // Pops an audio chunk from the front of the audio queue, and pushes its
-  // sound data to the audio hardware. MozAudioAvailable sample data is also
+  // audio data to the audio hardware. MozAudioAvailable sample data is also
   // queued here. Called on the audio thread.
   PRUint32 PlayFromAudioQueue(PRUint64 aSampleOffset, PRUint32 aChannels);
 
   // Stops the decode thread. The decoder monitor must be held with exactly
   // one lock count. Called on the state machine thread.
   void StopDecodeThread();
 
   // Stops the audio thread. The decoder monitor must be held with exactly
--- a/content/media/ogg/nsOggReader.cpp
+++ b/content/media/ogg/nsOggReader.cpp
@@ -362,27 +362,27 @@ nsresult nsOggReader::DecodeVorbis(ogg_p
   }
 
   VorbisPCMValue** pcm = 0;
   PRInt32 samples = 0;
   PRUint32 channels = mVorbisState->mInfo.channels;
   ogg_int64_t endSample = aPacket->granulepos;
   while ((samples = vorbis_synthesis_pcmout(&mVorbisState->mDsp, &pcm)) > 0) {
     mVorbisState->ValidateVorbisPacketSamples(aPacket, samples);
-    nsAutoArrayPtr<SoundDataValue> buffer(new SoundDataValue[samples * channels]);
+    nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[samples * channels]);
     for (PRUint32 j = 0; j < channels; ++j) {
       VorbisPCMValue* channel = pcm[j];
       for (PRUint32 i = 0; i < PRUint32(samples); ++i) {
         buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
       }
     }
 
     PRInt64 duration = mVorbisState->Time((PRInt64)samples);
     PRInt64 startTime = mVorbisState->Time(endSample - samples);
-    mAudioQueue.Push(new SoundData(mPageOffset,
+    mAudioQueue.Push(new AudioData(mPageOffset,
                                    startTime,
                                    duration,
                                    samples,
                                    buffer.forget(),
                                    channels));
     endSample -= samples;
     if (vorbis_synthesis_read(&mVorbisState->mDsp, samples) != 0) {
       return NS_ERROR_FAILURE;
--- a/content/media/wave/nsWaveReader.cpp
+++ b/content/media/wave/nsWaveReader.cpp
@@ -182,31 +182,31 @@ PRBool nsWaveReader::DecodeAudioData()
   PRInt64 len = GetDataLength();
   PRInt64 remaining = len - pos;
   NS_ASSERTION(remaining >= 0, "Current wave position is greater than wave file length");
 
   static const PRInt64 BLOCK_SIZE = 4096;
   PRInt64 readSize = NS_MIN(BLOCK_SIZE, remaining);
   PRInt64 samples = readSize / mSampleSize;
 
-  PR_STATIC_ASSERT(PRUint64(BLOCK_SIZE) < UINT_MAX / sizeof(SoundDataValue) / MAX_CHANNELS);
+  PR_STATIC_ASSERT(PRUint64(BLOCK_SIZE) < UINT_MAX / sizeof(AudioDataValue) / MAX_CHANNELS);
   const size_t bufferSize = static_cast<size_t>(samples * mChannels);
-  nsAutoArrayPtr<SoundDataValue> sampleBuffer(new SoundDataValue[bufferSize]);
+  nsAutoArrayPtr<AudioDataValue> sampleBuffer(new AudioDataValue[bufferSize]);
 
   PR_STATIC_ASSERT(PRUint64(BLOCK_SIZE) < UINT_MAX / sizeof(char));
   nsAutoArrayPtr<char> dataBuffer(new char[static_cast<size_t>(readSize)]);
 
   if (!ReadAll(dataBuffer, readSize)) {
     mAudioQueue.Finish();
     return PR_FALSE;
   }
 
   // convert data to samples
   const char* d = dataBuffer.get();
-  SoundDataValue* s = sampleBuffer.get();
+  AudioDataValue* s = sampleBuffer.get();
   for (int i = 0; i < samples; ++i) {
     for (unsigned int j = 0; j < mChannels; ++j) {
       if (mSampleFormat == nsAudioStream::FORMAT_U8) {
         PRUint8 v =  ReadUint8(&d);
 #if defined(MOZ_SAMPLE_TYPE_S16LE)
         *s++ = (v * (1.F/PR_UINT8_MAX)) * PR_UINT16_MAX + PR_INT16_MIN;
 #elif defined(MOZ_SAMPLE_TYPE_FLOAT32)
         *s++ = (v * (1.F/PR_UINT8_MAX)) * 2.F - 1.F;
@@ -224,17 +224,17 @@ PRBool nsWaveReader::DecodeAudioData()
   }
 
   double posTime = BytesToTime(pos);
   double readSizeTime = BytesToTime(readSize);
   NS_ASSERTION(posTime <= PR_INT64_MAX / USECS_PER_S, "posTime overflow");
   NS_ASSERTION(readSizeTime <= PR_INT64_MAX / USECS_PER_S, "readSizeTime overflow");
   NS_ASSERTION(samples < PR_INT32_MAX, "samples overflow");
 
-  mAudioQueue.Push(new SoundData(pos,
+  mAudioQueue.Push(new AudioData(pos,
                                  static_cast<PRInt64>(posTime * USECS_PER_S),
                                  static_cast<PRInt64>(readSizeTime * USECS_PER_S),
                                  static_cast<PRInt32>(samples),
                                  sampleBuffer.forget(),
                                  mChannels));
 
   return PR_TRUE;
 }
--- a/content/media/webm/nsWebMReader.cpp
+++ b/content/media/webm/nsWebMReader.cpp
@@ -426,18 +426,18 @@ PRBool nsWebMReader::DecodeAudioPacket(n
 
   const PRUint32 rate = mVorbisDsp.vi->rate;
   PRUint64 tstamp_usecs = tstamp / NS_PER_USEC;
   if (mAudioStartUsec == -1) {
     // This is the first audio chunk. Assume the start time of our decode
     // is the start of this chunk.
     mAudioStartUsec = tstamp_usecs;
   }
-  // If there's a gap between the start of this sound chunk and the end of
-  // the previous sound chunk, we need to increment the packet count so that
+  // If there's a gap between the start of this audio chunk and the end of
+  // the previous audio chunk, we need to increment the packet count so that
   // the vorbis decode doesn't use data from before the gap to help decode
   // from after the gap.
   PRInt64 tstamp_samples = 0;
   if (!UsecsToSamples(tstamp_usecs, rate, tstamp_samples)) {
     NS_WARNING("Int overflow converting WebM timestamp to samples");
     return PR_FALSE;
   }
   PRInt64 decoded_samples = 0;
@@ -479,17 +479,17 @@ PRBool nsWebMReader::DecodeAudioPacket(n
     if (vorbis_synthesis_blockin(&mVorbisDsp,
                                  &mVorbisBlock) != 0) {
       return PR_FALSE;
     }
 
     VorbisPCMValue** pcm = 0;
     PRInt32 samples = 0;
     while ((samples = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm)) > 0) {
-      nsAutoArrayPtr<SoundDataValue> buffer(new SoundDataValue[samples * mChannels]);
+      nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[samples * mChannels]);
       for (PRUint32 j = 0; j < mChannels; ++j) {
         VorbisPCMValue* channel = pcm[j];
         for (PRUint32 i = 0; i < PRUint32(samples); ++i) {
           buffer[i*mChannels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
         }
       }
 
       PRInt64 duration = 0;
@@ -500,17 +500,17 @@ PRBool nsWebMReader::DecodeAudioPacket(n
       PRInt64 total_duration = 0;
       if (!SamplesToUsecs(total_samples, rate, total_duration)) {
         NS_WARNING("Int overflow converting WebM audio total_duration");
         return PR_FALSE;
       }
       
       PRInt64 time = tstamp_usecs + total_duration;
       total_samples += samples;
-      mAudioQueue.Push(new SoundData(aOffset,
+      mAudioQueue.Push(new AudioData(aOffset,
                                      time,
                                      duration,
                                      samples,
                                      buffer.forget(),
                                      mChannels));
       mAudioSamples += samples;
       if (vorbis_synthesis_read(&mVorbisDsp, samples) != 0) {
         return PR_FALSE;