Bug 463929. Support 8-bit WAV streams. r+sr=roc
authorMatthew Gregan <kinetik@flim.org>
Mon, 10 Nov 2008 21:12:13 +1300
changeset 21554 7d06dac3fe83ee23adf6c319fdaea9ceea59292d
parent 21553 60733588d1233f966e91c1da597c0235d28d17d9
child 21555 b5f3b30402cb214670288f958462b48f253ef601
push id3573
push userrocallahan@mozilla.com
push dateMon, 10 Nov 2008 08:14:35 +0000
treeherdermozilla-central@b5f3b30402cb [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs463929
milestone1.9.1b2pre
Bug 463929. Support 8-bit WAV streams. r+sr=roc
content/media/video/public/nsAudioStream.h
content/media/video/public/nsWaveDecoder.h
content/media/video/src/nsAudioStream.cpp
content/media/video/src/nsOggDecoder.cpp
content/media/video/src/nsWaveDecoder.cpp
content/media/video/test/Makefile.in
content/media/video/test/r11025_u8_c1.wav
content/media/video/test/test_wav_8bit.html
content/media/video/test/test_wav_ended1.html
--- a/content/media/video/public/nsAudioStream.h
+++ b/content/media/video/public/nsAudioStream.h
@@ -41,43 +41,45 @@
 #include "nscore.h"
 #include "prlog.h"
 
 extern PRLogModuleInfo* gAudioStreamLog;
 
 class nsAudioStream 
 {
  public:
-  // Initialize Audio Library. Some Audio backends (eg. PortAudio) require initializing
+  enum SampleFormat
+  {
+    FORMAT_U8,
+    FORMAT_S16_LE,
+    FORMAT_FLOAT32_LE
+  };
+
+  // Initialize Audio Library. Some Audio backends require initializing the
   // library before using it. 
   static void InitLibrary();
 
-  // Shutdown Audio Library. Some Audio backends (eg. PortAudio) require shutting down
-  // the library after using it. 
+  // Shutdown Audio Library. Some Audio backends require shutting down the
+  // library after using it.
   static void ShutdownLibrary();
 
   nsAudioStream();
 
   // Initialize the audio stream. aNumChannels is the number of audio channels 
   // (1 for mono, 2 for stereo, etc) and aRate is the frequency of the sound 
   // samples (22050, 44100, etc).
-  void Init(PRInt32 aNumChannels, PRInt32 aRate);
+  void Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat);
 
   // Closes the stream. All future use of the stream is an error.
   void Shutdown();
 
-  // Write sound data to the audio hardware. aBuf is an array of floats of
-  // length aCount. aCount should be evenly divisible by the number of 
-  // channels in this audio stream.
-  void Write(const float* aBuf, PRUint32 aCount);
-
-  // Write sound data to the audio hardware.  aBuf is an array of shorts in
-  // signed 16-bit little endian format of length aCount.  Acount should be
+  // Write sound data to the audio hardware.  aBuf is an array of samples in
+  // the format specified by mFormat of length aCount.  aCount should be
   // evenly divisible by the number of channels in this audio stream.
-  void Write(const short* aBuf, PRUint32 aCount);
+  void Write(const void* aBuf, PRUint32 aCount);
 
   // Return the number of sound samples that can be written to the audio device
   // without blocking.
   PRInt32 Available();
 
   // Store in aVolume the value of the volume setting. This is a value from
   // 0 (meaning muted) to 1 (meaning full volume).
   float GetVolume();
@@ -108,11 +110,13 @@ class nsAudioStream
   // The byte position in the audio buffer where playback was last paused.
   PRInt64 mSavedPauseBytes;
   PRInt64 mPauseBytes;
 
   float mStartTime;
   float mPauseTime;
   PRInt64 mSamplesBuffered;
 
+  SampleFormat mFormat;
+
   PRPackedBool mPaused;
 };
 #endif
--- a/content/media/video/public/nsWaveDecoder.h
+++ b/content/media/video/public/nsWaveDecoder.h
@@ -119,25 +119,23 @@
 
   SHUTDOWN         -> exits state machine
 
   In addition, the following methods cause state transitions:
 
   Shutdown(), Play(), Pause(), Seek(float)
 
   The decoder implementation is currently limited to Linear PCM encoded
-  audio data with one or two channels of 16-bit samples at sample rates from
-  100 Hz to 96 kHz.  The sample size and number of channels (and, to some
-  extent, the sample format) is limited by what the audio backend
-  (sydneyaudio via nsAudioStream) currently supports.  sydneyaudio's API
-  suggests support of 8-bit, ยต-Law, and A-Law samples, but this support is
-  not currently implemented.  The supported sample rate is artificially
-  limited to arbitrarily selected sane values.  Support for additional
-  channels (and other new features) would require extending nsWaveDecoder to
-  support parsing the newer WAVE_FORMAT_EXTENSIBLE chunk format.
+  audio data with one or two channels of 8- or 16-bit samples at sample
+  rates from 100 Hz to 96 kHz.  The number of channels is limited by what
+  the audio backend (sydneyaudio via nsAudioStream) currently supports.  The
+  supported sample rate is artificially limited to arbitrarily selected sane
+  values.  Support for additional channels (and other new features) would
+  require extending nsWaveDecoder to support parsing the newer
+  WAVE_FORMAT_EXTENSIBLE chunk format.
  */
 
 class nsWaveStateMachine;
 
 class nsWaveDecoder : public nsMediaDecoder
 {
   friend class nsWaveStateMachine;
 
--- a/content/media/video/src/nsAudioStream.cpp
+++ b/content/media/video/src/nsAudioStream.cpp
@@ -71,137 +71,142 @@ nsAudioStream::nsAudioStream() :
   mVolume(1.0),
   mAudioHandle(0),
   mRate(0),
   mChannels(0),
   mSavedPauseBytes(0),
   mPauseBytes(0),
   mPauseTime(0.0),
   mSamplesBuffered(0),
+  mFormat(FORMAT_S16_LE),
   mPaused(PR_FALSE)
 {
 }
 
-void nsAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate)
+void nsAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat)
 {
   mRate = aRate;
   mChannels = aNumChannels;
+  mFormat = aFormat;
   mStartTime = CurrentTimeInSeconds();
   if (sa_stream_create_pcm(reinterpret_cast<sa_stream_t**>(&mAudioHandle),
                            NULL, 
                            SA_MODE_WRONLY, 
                            SA_PCM_FORMAT_S16_LE,
                            aRate,
                            aNumChannels) != SA_SUCCESS) {
     mAudioHandle = nsnull;
     PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStream: sa_stream_create_pcm error"));
     return;
   }
   
-  if (sa_stream_open(reinterpret_cast<sa_stream_t*>(mAudioHandle)) != SA_SUCCESS) {
-    sa_stream_destroy((sa_stream_t*)mAudioHandle);
+  if (sa_stream_open(static_cast<sa_stream_t*>(mAudioHandle)) != SA_SUCCESS) {
+    sa_stream_destroy(static_cast<sa_stream_t*>(mAudioHandle));
     mAudioHandle = nsnull;
     PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStream: sa_stream_open error"));
     return;
   }
 }
 
 void nsAudioStream::Shutdown()
 {
   if (!mAudioHandle) 
     return;
 
-  sa_stream_destroy(reinterpret_cast<sa_stream_t*>(mAudioHandle));
+  sa_stream_destroy(static_cast<sa_stream_t*>(mAudioHandle));
   mAudioHandle = nsnull;
 }
 
-void nsAudioStream::Write(const float* aBuf, PRUint32 aCount)
+void nsAudioStream::Write(const void* aBuf, PRUint32 aCount)
 {
-  mSamplesBuffered += aCount;
-
-  if (!mAudioHandle)
-    return;
-
-  // Convert array of floats, to an array of signed shorts
-  nsAutoArrayPtr<short> s_data(new short[aCount]);
+  NS_ABORT_IF_FALSE(aCount % mChannels == 0,
+                    "Buffer size must be divisible by channel count");
 
-  if (s_data) {
-    for (PRUint32 i=0; i <  aCount; ++i) {
-      float scaled_value = floorf(0.5 + 32768 * aBuf[i] * mVolume);
-      if (aBuf[i] < 0.0) {
-        s_data[i] = (scaled_value < -32768.0) ? 
-          -32768 : 
-          short(scaled_value);
-      }
-      else {
-        s_data[i] = (scaled_value > 32767.0) ? 
-          32767 : 
-          short(scaled_value);
-      }
-    }
-    
-    if (sa_stream_write(reinterpret_cast<sa_stream_t*>(mAudioHandle), s_data.get(), aCount * sizeof(short)) != SA_SUCCESS) {
-      PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStream: sa_stream_write error"));
-      Shutdown();
-    }
-  }
-}
-
-void nsAudioStream::Write(const short* aBuf, PRUint32 aCount)
-{
   mSamplesBuffered += aCount;
 
   if (!mAudioHandle)
     return;
 
   nsAutoArrayPtr<short> s_data(new short[aCount]);
 
   if (s_data) {
-    for (PRUint32 i = 0; i < aCount; ++i) {
-      s_data[i] = aBuf[i] * mVolume;
+    switch (mFormat) {
+    case FORMAT_U8: {
+      const PRUint8* buf = static_cast<const PRUint8*>(aBuf);
+      PRInt32 volume = PRInt32((1 << 16) * mVolume);
+      for (PRUint32 i = 0; i < aCount; ++i) {
+        s_data[i] = short(((PRInt32(buf[i]) - 128) * volume) >> 8);
+      }
+      break;
+    }
+    case FORMAT_S16_LE: {
+      const short* buf = static_cast<const short*>(aBuf);
+      PRInt32 volume = PRInt32((1 << 16) * mVolume);
+      for (PRUint32 i = 0; i < aCount; ++i) {
+        s_data[i] = short((PRInt32(buf[i]) * volume) >> 16);
+      }
+      break;
+    }
+    case FORMAT_FLOAT32_LE: {
+      const float* buf = static_cast<const float*>(aBuf);
+      for (PRUint32 i= 0; i <  aCount; ++i) {
+        float scaled_value = floorf(0.5 + 32768 * buf[i] * mVolume);
+        if (buf[i] < 0.0) {
+          s_data[i] = (scaled_value < -32768.0) ?
+            -32768 :
+            short(scaled_value);
+        } else {
+          s_data[i] = (scaled_value > 32767.0) ?
+            32767 :
+            short(scaled_value);
+        }
+      }
+      break;
+    }
     }
 
-    if (sa_stream_write(reinterpret_cast<sa_stream_t*>(mAudioHandle), s_data.get(), aCount * sizeof(short)) != SA_SUCCESS) {
+    if (sa_stream_write(static_cast<sa_stream_t*>(mAudioHandle), s_data.get(), aCount * sizeof(short)) != SA_SUCCESS) {
       PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStream: sa_stream_write error"));
       Shutdown();
     }
   }
 }
 
 PRInt32 nsAudioStream::Available()
 {
   // If the audio backend failed to open, lie and say we'll accept some
   // data.
   if (!mAudioHandle)
     return FAKE_BUFFER_SIZE;
 
   size_t s = 0; 
-  sa_stream_get_write_size(reinterpret_cast<sa_stream_t*>(mAudioHandle), &s);
+  sa_stream_get_write_size(static_cast<sa_stream_t*>(mAudioHandle), &s);
   return s / sizeof(short);
 }
 
 float nsAudioStream::GetVolume()
 {
   return mVolume;
 }
 
 void nsAudioStream::SetVolume(float aVolume)
 {
+  NS_ASSERTION(aVolume >= 0.0 && aVolume <= 1.0, "Invalid volume");
   mVolume = aVolume;
 }
 
 void nsAudioStream::Drain()
 {
   if (!mAudioHandle) {
     PRUint32 drainTime = (float(mSamplesBuffered) / mRate / mChannels - GetTime()) * 1000.0;
     PR_Sleep(PR_MillisecondsToInterval(drainTime));
     return;
   }
 
-  if (sa_stream_drain(reinterpret_cast<sa_stream_t*>(mAudioHandle)) != SA_SUCCESS) {
+  if (sa_stream_drain(static_cast<sa_stream_t*>(mAudioHandle)) != SA_SUCCESS) {
         PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStream: sa_stream_drain error"));
         Shutdown();
   }
 }
 
 void nsAudioStream::Pause()
 {
   if (mPaused)
@@ -213,51 +218,51 @@ void nsAudioStream::Pause()
 
   mPaused = PR_TRUE;
 
   if (!mAudioHandle)
     return;
 
   int64_t bytes = 0;
 #if !defined(WIN32)
-  sa_stream_get_position(reinterpret_cast<sa_stream_t*>(mAudioHandle), SA_POSITION_WRITE_SOFTWARE, &bytes);
+  sa_stream_get_position(static_cast<sa_stream_t*>(mAudioHandle), SA_POSITION_WRITE_SOFTWARE, &bytes);
 #endif
   mSavedPauseBytes = bytes;
 
-  sa_stream_pause(reinterpret_cast<sa_stream_t*>(mAudioHandle));
+  sa_stream_pause(static_cast<sa_stream_t*>(mAudioHandle));
 }
 
 void nsAudioStream::Resume()
 {
   if (!mPaused)
     return;
 
   // Reset the start time to the current time offset backwards by the
   // elapsed time saved when the stream paused.
   mStartTime = CurrentTimeInSeconds() - mPauseTime;
 
   mPaused = PR_FALSE;
 
   if (!mAudioHandle)
     return;
 
-  sa_stream_resume(reinterpret_cast<sa_stream_t*>(mAudioHandle));
+  sa_stream_resume(static_cast<sa_stream_t*>(mAudioHandle));
 
 #if !defined(WIN32)
   mPauseBytes += mSavedPauseBytes;
 #endif
 }
 
 double nsAudioStream::GetTime()
 {
   // If the audio backend failed to open, emulate the current playback
   // position using the system clock.
   if (!mAudioHandle)
     return mPaused ? mPauseTime : CurrentTimeInSeconds() - mStartTime;
 
   int64_t bytes = 0;
 #if defined(WIN32)
-  sa_stream_get_position(reinterpret_cast<sa_stream_t*>(mAudioHandle), SA_POSITION_WRITE_HARDWARE, &bytes);
+  sa_stream_get_position(static_cast<sa_stream_t*>(mAudioHandle), SA_POSITION_WRITE_HARDWARE, &bytes);
 #else
-  sa_stream_get_position(reinterpret_cast<sa_stream_t*>(mAudioHandle), SA_POSITION_WRITE_SOFTWARE, &bytes);
+  sa_stream_get_position(static_cast<sa_stream_t*>(mAudioHandle), SA_POSITION_WRITE_SOFTWARE, &bytes);
 #endif
   return double(bytes + mPauseBytes) / (sizeof(short) * mChannels * mRate);
 }
--- a/content/media/video/src/nsOggDecoder.cpp
+++ b/content/media/video/src/nsOggDecoder.cpp
@@ -703,17 +703,17 @@ PRBool nsOggDecodeStateMachine::PlayAudi
 void nsOggDecodeStateMachine::OpenAudioStream()
 {
   //  NS_ASSERTION(PR_InMonitor(mDecoder->GetMonitor()), "OpenAudioStream() called without acquiring decoder monitor");
   mAudioStream = new nsAudioStream();
   if (!mAudioStream) {
     LOG(PR_LOG_ERROR, ("Could not create audio stream"));
   }
   else {
-    mAudioStream->Init(mAudioChannels, mAudioRate);
+    mAudioStream->Init(mAudioChannels, mAudioRate, nsAudioStream::FORMAT_FLOAT32_LE);
     mAudioStream->SetVolume(mVolume);
   }
 }
 
 void nsOggDecodeStateMachine::CloseAudioStream()
 {
   //  NS_ASSERTION(PR_InMonitor(mDecoder->GetMonitor()), "CloseAudioStream() called without acquiring decoder monitor");
   if (mAudioStream) {
--- a/content/media/video/src/nsWaveDecoder.cpp
+++ b/content/media/video/src/nsWaveDecoder.cpp
@@ -164,17 +164,17 @@ private:
   void CloseAudioStream();
 
   // Read RIFF_INITIAL_SIZE from the beginning of the stream and verify that
   // the stream data is a RIFF bitstream containing WAVE data.
   PRBool LoadRIFFChunk();
 
   // Scan forward in the stream looking for the WAVE format chunk.  If
   // found, parse and validate required metadata, then use it to set
-  // mSampleRate, mChannels, and mSampleSize.
+  // mSampleRate, mChannels, mSampleSize, and mSampleFormat.
   PRBool LoadFormatChunk();
 
   // Scan forward in the stream looking for the start of the PCM data.  If
   // found, record the data length and offset in mWaveLength and
   // mWavePCMOffset.
   PRBool FindDataOffset();
 
   // Returns the number of seconds that aBytes represents based on the
@@ -243,20 +243,22 @@ private:
 
   // Number of samples per second.  Limited to range [100, 96000] in LoadFormatChunk.
   PRUint32 mSampleRate;
 
   // Number of channels.  Limited to range [1, 2] in LoadFormatChunk.
   PRUint32 mChannels;
 
   // Size of a single sample segment, which includes a sample for each
-  // channel (interleaved).  Limited to 2 or 4 due to requirement for 16-bit
-  // samples and the limit on number of channels.
+  // channel (interleaved).
   PRUint32 mSampleSize;
 
+  // The sample format of the PCM data.
+  nsAudioStream::SampleFormat mSampleFormat;
+
   // Size of PCM data stored in the WAVE as reported by the data chunk in
   // the media.
   PRUint32 mWaveLength;
 
   // Start offset of the PCM data in the media stream.  Extends mWaveLength
   // bytes.
   PRUint32 mWavePCMOffset;
 
@@ -284,32 +286,33 @@ private:
   float mTimeOffset;
 
   // Set when StreamEnded has fired to indicate that we should not expect
   // any more data from mStream than what is already buffered (i.e. what
   // Available() reports).
   PRPackedBool mExpectMoreData;
 
   // True once metadata has been parsed and validated. Users of mSampleRate,
-  // mChannels, mSampleSize, mWaveLength, mWavePCMOffset must check this
-  // flag before assuming the values are valid.
+  // mChannels, mSampleSize, mSampleFormat, mWaveLength, mWavePCMOffset must
+  // check this flag before assuming the values are valid.
   PRPackedBool mMetadataValid;
 };
 
 nsWaveStateMachine::nsWaveStateMachine(nsWaveDecoder* aDecoder, nsMediaStream* aStream,
                                        PRUint32 aBufferWaitTime, float aInitialVolume)
   : mDecoder(aDecoder),
     mStream(aStream),
     mBufferingWait(aBufferWaitTime),
     mBufferingBytes(0),
     mBufferingStart(0),
     mAudioBufferSize(0),
     mSampleRate(0),
     mChannels(0),
     mSampleSize(0),
+    mSampleFormat(nsAudioStream::FORMAT_S16_LE),
     mWaveLength(0),
     mWavePCMOffset(0),
     mMonitor(nsnull),
     mState(STATE_LOADING_METADATA),
     mNextState(STATE_PAUSED),
     mInitialVolume(aInitialVolume),
     mTimeOffset(0.0),
     mExpectMoreData(PR_TRUE),
@@ -526,17 +529,21 @@ nsWaveStateMachine::Run()
             len -= -endDelta;
             if (RoundDownToSample(len) != len) {
               NS_WARNING("PCM data does not end with complete sample");
               len = RoundDownToSample(len);
             }
             ChangeState(STATE_ENDED);
           }
 
-          mAudioStream->Write(reinterpret_cast<short*>(buf.get()), len / sizeof(short));
+          PRUint32 lengthInSamples = len;
+          if (mSampleFormat == nsAudioStream::FORMAT_S16_LE) {
+            lengthInSamples /= sizeof(short);
+          }
+          mAudioStream->Write(buf.get(), lengthInSamples);
           monitor.Enter();
         }
 
         // To avoid waking up too frequently to top up these buffers,
         // calculate the duration of the currently buffered data and sleep
         // until most of the buffered data has been consumed.  We can't
         // sleep for the entire duration because we might not wake up in
         // time to refill the buffers, causing an underrun.  To avoid this,
@@ -642,17 +649,19 @@ nsWaveStateMachine::ChangeState(State aS
 
 void
 nsWaveStateMachine::OpenAudioStream()
 {
   mAudioStream = new nsAudioStream();
   if (!mAudioStream) {
     LOG(PR_LOG_ERROR, ("Could not create audio stream"));
   } else {
-    mAudioStream->Init(mChannels, mSampleRate);
+    NS_ABORT_IF_FALSE(mMetadataValid,
+                      "Attempting to initialize audio stream with invalid metadata");
+    mAudioStream->Init(mChannels, mSampleRate, mSampleFormat);
     mAudioStream->SetVolume(mInitialVolume);
     mAudioBufferSize = mAudioStream->Available() * sizeof(short);
   }
 }
 
 void
 nsWaveStateMachine::CloseAudioStream()
 {
@@ -738,17 +747,17 @@ nsWaveStateMachine::LoadRIFFChunk()
   }
 
   return PR_TRUE;
 }
 
 PRBool
 nsWaveStateMachine::LoadFormatChunk()
 {
-  PRUint32 rate, channels, sampleSize;
+  PRUint32 rate, channels, sampleSize, sampleFormat;
   char waveFormat[WAVE_FORMAT_SIZE];
   const char* p = waveFormat;
 
   // RIFF chunks are always word (two byte) aligned.
   NS_ABORT_IF_FALSE(mStream->Tell() % 2 == 0,
                     "LoadFormatChunk called with unaligned stream");
 
   if (!ReadAll(mStream, waveFormat, sizeof(waveFormat))) {
@@ -770,38 +779,33 @@ nsWaveStateMachine::LoadFormatChunk()
   channels = ReadUint16LE(&p);
   rate = ReadUint32LE(&p);
 
   // Skip over average bytes per second field.
   p += 4;
 
   sampleSize = ReadUint16LE(&p);
 
-  // We only support 16-bit audio for now, since that's all that the in-tree
-  // libsydney supports.
-  if (ReadUint16LE(&p) != 16) {
-    NS_WARNING("WAVE is not 16-bit, other bit rates are not supported");
-    return PR_FALSE;
-  }
+  sampleFormat = ReadUint16LE(&p);
 
   // PCM encoded WAVEs are not expected to have an extended "format" chunk,
   // but I have found WAVEs that have a extended "format" chunk with an
   // extension size of 0 bytes.  Be polite and handle this rather than
   // considering the file invalid.  This code skips any extension of the
   // "format" chunk.
   if (fmtsize > WAVE_FORMAT_CHUNK_SIZE) {
     char extLength[2];
     const char* p = extLength;
 
     if (!ReadAll(mStream, extLength, sizeof(extLength))) {
       return PR_FALSE;
     }
 
     PRUint16 extra = ReadUint16LE(&p);
-    if (fmtsize - WAVE_FORMAT_CHUNK_SIZE + 2 != extra) {
+    if (fmtsize - (WAVE_FORMAT_CHUNK_SIZE + 2) != extra) {
       NS_WARNING("Invalid extended format chunk size");
       return PR_FALSE;
     }
     extra += extra % 2;
 
     if (extra > 0) {
       nsAutoArrayPtr<char> chunkExtension(new char[extra]);
       if (!ReadAll(mStream, chunkExtension.get(), extra)) {
@@ -810,30 +814,35 @@ nsWaveStateMachine::LoadFormatChunk()
     }
   }
 
   // RIFF chunks are always word (two byte) aligned.
   NS_ABORT_IF_FALSE(mStream->Tell() % 2 == 0,
                     "LoadFormatChunk left stream unaligned");
 
   // Make sure metadata is fairly sane.  The rate check is fairly arbitrary,
-  // but the channels/sampleSize check is intentionally limited to 16-bit
-  // mono or stereo because that's what the audio backend currently
-  // supports.
+  // but the channels check is intentionally limited to mono or stereo
+  // because that's what the audio backend currently supports.
   if (rate < 100 || rate > 96000 ||
       channels < 1 || channels > 2 ||
-      sampleSize < 2 || sampleSize > 4) {
+      (sampleSize != 1 && sampleSize != 2 && sampleSize != 4) ||
+      (sampleFormat != 8 && sampleFormat != 16)) {
     NS_WARNING("Invalid WAVE metadata");
     return PR_FALSE;
   }
 
   nsAutoMonitor monitor(mMonitor);
   mSampleRate = rate;
   mChannels = channels;
   mSampleSize = sampleSize;
+  if (sampleFormat == 8) {
+    mSampleFormat = nsAudioStream::FORMAT_U8;
+  } else {
+    mSampleFormat = nsAudioStream::FORMAT_S16_LE;
+  }
   return PR_TRUE;
 }
 
 PRBool
 nsWaveStateMachine::FindDataOffset()
 {
   PRUint32 length;
   PRInt64 offset;
--- a/content/media/video/test/Makefile.in
+++ b/content/media/video/test/Makefile.in
@@ -62,18 +62,20 @@ include $(topsrcdir)/config/rules.mk
                 test_seek6.html \
                 test_seek7.html \
                 test_seek8.html \
                 test_standalone.html \
                 test_timeupdate1.html \
                 test_timeupdate2.html \
                 test_timeupdate3.html \
                 test_volume.html \
+                test_wav_8bit.html \
                 test_wav_ended1.html \
                 320x240.ogg \
                 bug461281.ogg \
                 seek.ogg \
                 r11025_s16_c1.wav \
+                r11025_u8_c1.wav \
 #                test_bug448534.html \
                 $(NULL)
 
 libs:: $(_TEST_FILES)
 	$(INSTALL) $(foreach f,$^,"$f") $(DEPTH)/_tests/testing/mochitest/tests/$(relativesrcdir)
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..97dc453b9ed12e03e38e18a6ae903f6c22b9c184
GIT binary patch
literal 11069
zc%1FXAqs#%6a>(}&8Ro9n>~QR1*<{xg4k?(e{_!_yf+LCvu`=iQ^dI(>;2TRkBIbT
Z_`cQ@U0kJ>wgCVD000000002s*It7628#dy
new file mode 100644
--- /dev/null
+++ b/content/media/video/test/test_wav_8bit.html
@@ -0,0 +1,46 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <title>Wave Media test: 8-bit sample format</title>
+  <script type="text/javascript" src="/MochiKit/MochiKit.js"></script>
+  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<audio id='v'
+       onloadedmetadata='return startTest();'
+       onended='return playbackEnded();'>
+  <source type='audio/x-wav' src='r11025_u8_c1.wav'>
+</audio>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+// Test if the ended event works correctly.
+var v = document.getElementById('v');
+var endPassed = false;
+var completed = false;
+
+function startTest() {
+  if (completed)
+    return false;
+
+  v.play();
+  return false;
+}
+
+function playbackEnded() {
+  if (completed)
+    return false
+
+  completed = true;
+  ok(v.currentTime >= 0.9 && v.currentTime <= 1.1,
+     "Checking currentTime at end: " + v.currentTime);
+  ok(v.ended, "Checking playback has ended");
+  SimpleTest.finish();
+  return false;
+}
+
+SimpleTest.waitForExplicitFinish();
+</script>
+</pre>
+</body>
+</html>
--- a/content/media/video/test/test_wav_ended1.html
+++ b/content/media/video/test/test_wav_ended1.html
@@ -1,12 +1,12 @@
 <!DOCTYPE HTML>
 <html>
 <head>
-  <title>Media test: ended</title>
+  <title>Wave Media test: ended</title>
   <script type="text/javascript" src="/MochiKit/MochiKit.js"></script>
   <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
 </head>
 <body>
 <audio id='v'
        onloadedmetadata='return startTest();'
        onended='return playbackEnded();'>