Backed out 8 changesets (bug 1264199) for permafailing media mochitests on OSX 10.6 a=backout
authorWes Kocher <wkocher@mozilla.com>
Wed, 20 Apr 2016 11:01:36 -0700
changeset 331860 30c5dbcee7ddeafcaffa50e01429c45d459bb8fc
parent 331859 a4790cb5d5403d5f814029c4f439d899d4c2e502
child 331926 93090fa3065335c9f830e9c70cbfbbc2f40fd979
push id6048
push userkmoir@mozilla.com
push dateMon, 06 Jun 2016 19:02:08 +0000
treeherdermozilla-beta@46d72a56c57d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbackout
bugs1264199
milestone48.0a1
backs out0f20f2080824affe403a4b5d1bcbb11fda7325f7
90351d2719beeb09ea4239685baf304e5058c19f
f484b42cbddaa4ff6a2407d5886db24c30b59537
b79b62146120ca32cbe16d6283a0ab25b8d13c0e
f587df589b69827d23336fa528560130dc86dd39
02f781a573b45dba4b69ab7b7b52d76f4243d658
d3677ae4c8ecb9aa68e750bb1966cef127edc6dc
d08288654ec9a96d7b685c18efac351551d8fc35
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 8 changesets (bug 1264199) for permafailing media mochitests on OSX 10.6 a=backout Backed out changeset 0f20f2080824 (bug 1264199) Backed out changeset 90351d2719be (bug 1264199) Backed out changeset f484b42cbdda (bug 1264199) Backed out changeset b79b62146120 (bug 1264199) Backed out changeset f587df589b69 (bug 1264199) Backed out changeset 02f781a573b4 (bug 1264199) Backed out changeset d3677ae4c8ec (bug 1264199) Backed out changeset d08288654ec9 (bug 1264199) MozReview-Commit-ID: HVtiBSOxf7t
dom/media/AudioConverter.cpp
dom/media/AudioConverter.h
dom/media/AudioStream.cpp
dom/media/AudioStream.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/mediasink/DecodedAudioDataSink.cpp
dom/media/mediasink/DecodedAudioDataSink.h
media/libspeex_resampler/handle-memory-error.patch
media/libspeex_resampler/src/resample.c
media/libspeex_resampler/update.sh
--- a/dom/media/AudioConverter.cpp
+++ b/dom/media/AudioConverter.cpp
@@ -2,17 +2,16 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioConverter.h"
 #include <string.h>
 #include <speex/speex_resampler.h>
-#include <cmath>
 
 /*
  *  Parts derived from MythTV AudioConvert Class
  *  Created by Jean-Yves Avenard.
  *
  *  Copyright (C) Bubblestuff Pty Ltd 2013
  *  Copyright (C) foobum@gmail.com 2010
  */
@@ -22,59 +21,69 @@ namespace mozilla {
 AudioConverter::AudioConverter(const AudioConfig& aIn, const AudioConfig& aOut)
   : mIn(aIn)
   , mOut(aOut)
   , mResampler(nullptr)
 {
   MOZ_DIAGNOSTIC_ASSERT(aIn.Format() == aOut.Format() &&
                         aIn.Interleaved() == aOut.Interleaved(),
                         "No format or rate conversion is supported at this stage");
-  MOZ_DIAGNOSTIC_ASSERT(aOut.Channels() <= 2 ||
+  MOZ_DIAGNOSTIC_ASSERT((aIn.Channels() > aOut.Channels() && aOut.Channels() <= 2) ||
                         aIn.Channels() == aOut.Channels(),
-                        "Only down/upmixing to mono or stereo is supported at this stage");
+                        "Only downmixing to mono or stereo is supported at this stage");
   MOZ_DIAGNOSTIC_ASSERT(aOut.Interleaved(), "planar audio format not supported");
   mIn.Layout().MappingTable(mOut.Layout(), mChannelOrderMap);
   if (aIn.Rate() != aOut.Rate()) {
-    RecreateResampler();
+    int error;
+    mResampler = speex_resampler_init(aOut.Channels(),
+                                      aIn.Rate(),
+                                      aOut.Rate(),
+                                      SPEEX_RESAMPLER_QUALITY_DEFAULT,
+                                      &error);
+
+    if (error == RESAMPLER_ERR_SUCCESS) {
+      speex_resampler_skip_zeros(mResampler);
+    } else {
+      NS_WARNING("Failed to initialize resampler.");
+      mResampler = nullptr;
+    }
   }
 }
 
 AudioConverter::~AudioConverter()
 {
   if (mResampler) {
     speex_resampler_destroy(mResampler);
     mResampler = nullptr;
   }
 }
 
 bool
 AudioConverter::CanWorkInPlace() const
 {
   bool needDownmix = mIn.Channels() > mOut.Channels();
-  bool needUpmix = mIn.Channels() < mOut.Channels();
   bool canDownmixInPlace =
     mIn.Channels() * AudioConfig::SampleSize(mIn.Format()) >=
     mOut.Channels() * AudioConfig::SampleSize(mOut.Format());
   bool needResample = mIn.Rate() != mOut.Rate();
   bool canResampleInPlace = mIn.Rate() >= mOut.Rate();
   // We should be able to work in place if 1s of audio input takes less space
   // than 1s of audio output. However, as we downmix before resampling we can't
   // perform any upsampling in place (e.g. if incoming rate >= outgoing rate)
-  return !needUpmix && (!needDownmix || canDownmixInPlace) &&
+  return (!needDownmix || canDownmixInPlace) &&
          (!needResample || canResampleInPlace);
 }
 
 size_t
 AudioConverter::ProcessInternal(void* aOut, const void* aIn, size_t aFrames)
 {
   if (mIn.Channels() > mOut.Channels()) {
     return DownmixAudio(aOut, aIn, aFrames);
-  } else if (mIn.Channels() < mOut.Channels()) {
-    return UpmixAudio(aOut, aIn, aFrames);
-  } else if (mIn.Layout() != mOut.Layout() && CanReorderAudio()) {
+  } else if (mIn.Layout() != mOut.Layout() &&
+      CanReorderAudio()) {
     ReOrderInterleavedChannels(aOut, aIn, aFrames);
   } else if (aIn != aOut) {
     memmove(aOut, aIn, FramesOutToBytes(aFrames));
   }
   return aFrames;
 }
 
 // Reorder interleaved channels.
@@ -100,17 +109,20 @@ void
 }
 
 void
 AudioConverter::ReOrderInterleavedChannels(void* aOut, const void* aIn,
                                            size_t aFrames) const
 {
   MOZ_DIAGNOSTIC_ASSERT(mIn.Channels() == mOut.Channels());
 
-  if (mOut.Channels() == 1 || mOut.Layout() == mIn.Layout()) {
+  if (mOut.Layout() == mIn.Layout()) {
+    return;
+  }
+  if (mOut.Channels() == 1) {
     // If channel count is 1, planar and non-planar formats are the same and
     // there's nothing to reorder.
     if (aOut != aIn) {
       memmove(aOut, aIn, FramesOutToBytes(aFrames));
     }
     return;
   }
 
@@ -214,26 +226,26 @@ AudioConverter::DownmixAudio(void* aOut,
     aIn = aOut;
     channels = 2;
   }
 
   if (mOut.Channels() == 1) {
     if (mIn.Format() == AudioConfig::FORMAT_FLT) {
       const float* in = static_cast<const float*>(aIn);
       float* out = static_cast<float*>(aOut);
-      for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
+      for (uint32_t fIdx = 0; fIdx < aFrames; ++fIdx) {
         float sample = 0.0;
         // The sample of the buffer would be interleaved.
         sample = (in[fIdx*channels] + in[fIdx*channels + 1]) * 0.5;
         *out++ = sample;
       }
     } else if (mIn.Format() == AudioConfig::FORMAT_S16) {
       const int16_t* in = static_cast<const int16_t*>(aIn);
       int16_t* out = static_cast<int16_t*>(aOut);
-      for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
+      for (uint32_t fIdx = 0; fIdx < aFrames; ++fIdx) {
         int32_t sample = 0.0;
         // The sample of the buffer would be interleaved.
         sample = (in[fIdx*channels] + in[fIdx*channels + 1]) * 0.5;
         *out++ = sample;
       }
     } else {
       MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type");
     }
@@ -245,136 +257,37 @@ size_t
 AudioConverter::ResampleAudio(void* aOut, const void* aIn, size_t aFrames)
 {
   if (!mResampler) {
     return 0;
   }
   uint32_t outframes = ResampleRecipientFrames(aFrames);
   uint32_t inframes = aFrames;
 
-  int error;
   if (mOut.Format() == AudioConfig::FORMAT_FLT) {
     const float* in = reinterpret_cast<const float*>(aIn);
     float* out = reinterpret_cast<float*>(aOut);
-    error =
-      speex_resampler_process_interleaved_float(mResampler, in, &inframes,
-                                                out, &outframes);
+    speex_resampler_process_interleaved_float(mResampler, in, &inframes,
+                                              out, &outframes);
   } else if (mOut.Format() == AudioConfig::FORMAT_S16) {
     const int16_t* in = reinterpret_cast<const int16_t*>(aIn);
     int16_t* out = reinterpret_cast<int16_t*>(aOut);
-    error =
-      speex_resampler_process_interleaved_int(mResampler, in, &inframes,
-                                              out, &outframes);
+    speex_resampler_process_interleaved_int(mResampler, in, &inframes,
+                                            out, &outframes);
   } else {
     MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type");
   }
-  MOZ_ASSERT(error == RESAMPLER_ERR_SUCCESS);
-  if (error != RESAMPLER_ERR_SUCCESS) {
-    speex_resampler_destroy(mResampler);
-    mResampler = nullptr;
-    return 0;
-  }
   MOZ_ASSERT(inframes == aFrames, "Some frames will be dropped");
   return outframes;
 }
 
-void
-AudioConverter::RecreateResampler()
-{
-  if (mResampler) {
-    speex_resampler_destroy(mResampler);
-  }
-  int error;
-  mResampler = speex_resampler_init(mOut.Channels(),
-                                    mIn.Rate(),
-                                    mOut.Rate(),
-                                    SPEEX_RESAMPLER_QUALITY_DEFAULT,
-                                    &error);
-
-  if (error == RESAMPLER_ERR_SUCCESS) {
-    speex_resampler_skip_zeros(mResampler);
-  } else {
-    NS_WARNING("Failed to initialize resampler.");
-    mResampler = nullptr;
-  }
-}
-
-size_t
-AudioConverter::DrainResampler(void* aOut)
-{
-  if (!mResampler) {
-    return 0;
-  }
-  int frames = speex_resampler_get_input_latency(mResampler);
-  AlignedByteBuffer buffer(FramesOutToBytes(frames));
-  if (!buffer) {
-    // OOM
-    return 0;
-  }
-  frames = ResampleAudio(aOut, buffer.Data(), frames);
-  // Tore down the resampler as it's easier than handling follow-up.
-  RecreateResampler();
-  return frames;
-}
-
-size_t
-AudioConverter::UpmixAudio(void* aOut, const void* aIn, size_t aFrames) const
-{
-  MOZ_ASSERT(mIn.Format() == AudioConfig::FORMAT_S16 ||
-             mIn.Format() == AudioConfig::FORMAT_FLT);
-  MOZ_ASSERT(mIn.Channels() < mOut.Channels());
-  MOZ_ASSERT(mIn.Channels() == 1, "Can only upmix mono for now");
-  MOZ_ASSERT(mOut.Channels() == 2, "Can only upmix to stereo for now");
-
-  if (mOut.Channels() != 2) {
-    return 0;
-  }
-
-  // Upmix mono to stereo.
-  // This is a very dumb mono to stereo upmixing, power levels are preserved
-  // following the calculation: left = right = -3dB*mono.
-  if (mIn.Format() == AudioConfig::FORMAT_FLT) {
-    const float m3db = std::sqrt(0.5); // -3dB = sqrt(1/2)
-    const float* in = static_cast<const float*>(aIn);
-    float* out = static_cast<float*>(aOut);
-    for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
-      float sample = in[fIdx] * m3db;
-      // The samples of the buffer would be interleaved.
-      *out++ = sample;
-      *out++ = sample;
-    }
-  } else if (mIn.Format() == AudioConfig::FORMAT_S16) {
-    const int16_t* in = static_cast<const int16_t*>(aIn);
-    int16_t* out = static_cast<int16_t*>(aOut);
-    for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
-      int16_t sample = ((int32_t)in[fIdx] * 11585) >> 14; // close enough to i*sqrt(0.5)
-      // The samples of the buffer would be interleaved.
-      *out++ = sample;
-      *out++ = sample;
-    }
-  } else {
-    MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type");
-  }
-
-  return aFrames;
-}
-
 size_t
 AudioConverter::ResampleRecipientFrames(size_t aFrames) const
 {
-  if (!aFrames && mIn.Rate() != mOut.Rate()) {
-    // The resampler will be drained, account for frames currently buffered
-    // in the resampler.
-    if (!mResampler) {
-      return 0;
-    }
-    return speex_resampler_get_output_latency(mResampler);
-  } else {
-    return (uint64_t)aFrames * mOut.Rate() / mIn.Rate() + 1;
-  }
+  return (uint64_t)aFrames * mOut.Rate() / mIn.Rate() + 1;
 }
 
 size_t
 AudioConverter::FramesOutToSamples(size_t aFrames) const
 {
   return aFrames * mOut.Channels();
 }
 
--- a/dom/media/AudioConverter.h
+++ b/dom/media/AudioConverter.h
@@ -118,18 +118,16 @@ typedef AudioDataBuffer<AudioConfig::FOR
 class AudioConverter {
 public:
   AudioConverter(const AudioConfig& aIn, const AudioConfig& aOut);
   ~AudioConverter();
 
   // Convert the AudioDataBuffer.
   // Conversion will be done in place if possible. Otherwise a new buffer will
   // be returned.
-  // Providing an empty buffer and resampling is expected, the resampler
-  // will be drained.
   template <AudioConfig::SampleFormat Format, typename Value>
   AudioDataBuffer<Format, Value> Process(AudioDataBuffer<Format, Value>&& aBuffer)
   {
     MOZ_DIAGNOSTIC_ASSERT(mIn.Format() == mOut.Format() && mIn.Format() == Format);
     AudioDataBuffer<Format, Value> buffer = Move(aBuffer);
     if (CanWorkInPlace()) {
       size_t frames = SamplesInToFrames(buffer.Length());
       frames = ProcessInternal(buffer.Data(), buffer.Data(), frames);
@@ -149,36 +147,32 @@ public:
     MOZ_DIAGNOSTIC_ASSERT(mIn.Format() == mOut.Format() && mIn.Format() == Format);
     // Perform the downmixing / reordering in temporary buffer.
     size_t frames = SamplesInToFrames(aBuffer.Length());
     AlignedBuffer<Value> temp1;
     if (!temp1.SetLength(FramesOutToSamples(frames))) {
       return AudioDataBuffer<Format, Value>(Move(temp1));
     }
     frames = ProcessInternal(temp1.Data(), aBuffer.Data(), frames);
-    if (mIn.Rate() == mOut.Rate()) {
+    if (!frames || mIn.Rate() == mOut.Rate()) {
       temp1.SetLength(FramesOutToSamples(frames));
       return AudioDataBuffer<Format, Value>(Move(temp1));
     }
 
     // At this point, temp1 contains the buffer reordered and downmixed.
     // If we are downsampling we can re-use it.
     AlignedBuffer<Value>* outputBuffer = &temp1;
     AlignedBuffer<Value> temp2;
-    if (!frames || mOut.Rate() > mIn.Rate()) {
-      // We are upsampling or about to drain, we can't work in place.
-      // Allocate another temporary buffer where the upsampling will occur.
+    if (mOut.Rate() > mIn.Rate()) {
+      // We are upsampling, we can't work in place. Allocate another temporary
+      // buffer where the upsampling will occur.
       temp2.SetLength(FramesOutToSamples(ResampleRecipientFrames(frames)));
       outputBuffer = &temp2;
     }
-    if (!frames) {
-      frames = DrainResampler(outputBuffer->Data());
-    } else {
-      frames = ResampleAudio(outputBuffer->Data(), temp1.Data(), frames);
-    }
+    frames = ResampleAudio(outputBuffer->Data(), temp1.Data(), frames);
     outputBuffer->SetLength(FramesOutToSamples(frames));
     return AudioDataBuffer<Format, Value>(Move(*outputBuffer));
   }
 
   // Attempt to convert the AudioDataBuffer in place.
   // Will return 0 if the conversion wasn't possible.
   template <typename Value>
   size_t Process(Value* aBuffer, size_t aFrames)
@@ -214,25 +208,22 @@ private:
    * aIn   : source buffer
    * aSamples: number of frames in source buffer
    *
    * Return Value: number of frames converted or 0 if error
    */
   size_t ProcessInternal(void* aOut, const void* aIn, size_t aFrames);
   void ReOrderInterleavedChannels(void* aOut, const void* aIn, size_t aFrames) const;
   size_t DownmixAudio(void* aOut, const void* aIn, size_t aFrames) const;
-  size_t UpmixAudio(void* aOut, const void* aIn, size_t aFrames) const;
 
   size_t FramesOutToSamples(size_t aFrames) const;
   size_t SamplesInToFrames(size_t aSamples) const;
   size_t FramesOutToBytes(size_t aFrames) const;
 
   // Resampler context.
   SpeexResamplerState* mResampler;
   size_t ResampleAudio(void* aOut, const void* aIn, size_t aFrames);
   size_t ResampleRecipientFrames(size_t aFrames) const;
-  void RecreateResampler();
-  size_t DrainResampler(void* aOut);
 };
 
 } // namespace mozilla
 
 #endif /* AudioConverter_h */
--- a/dom/media/AudioStream.cpp
+++ b/dom/media/AudioStream.cpp
@@ -124,16 +124,17 @@ AudioStream::AudioStream(DataSource& aSo
   , mInRate(0)
   , mOutRate(0)
   , mChannels(0)
   , mOutChannels(0)
   , mAudioClock(this)
   , mTimeStretcher(nullptr)
   , mDumpFile(nullptr)
   , mState(INITIALIZED)
+  , mIsMonoAudioEnabled(gfxPrefs::MonoAudio())
   , mDataSource(aSource)
 {
 }
 
 AudioStream::~AudioStream()
 {
   LOG("deleted, state %d", mState);
   MOZ_ASSERT(mState == SHUTDOWN && !mCubebStream,
@@ -325,17 +326,17 @@ AudioStream::Init(uint32_t aNumChannels,
   if (!CubebUtils::GetCubebContext()) {
     return NS_ERROR_FAILURE;
   }
 
   MOZ_LOG(gAudioStreamLog, LogLevel::Debug,
     ("%s  channels: %d, rate: %d for %p", __FUNCTION__, aNumChannels, aRate, this));
   mInRate = mOutRate = aRate;
   mChannels = aNumChannels;
-  mOutChannels = aNumChannels;
+  mOutChannels = mIsMonoAudioEnabled ? 1 : aNumChannels;
 
   mDumpFile = OpenDumpFile(this);
 
   cubeb_stream_params params;
   params.rate = aRate;
   params.channels = mOutChannels;
 #if defined(__ANDROID__)
 #if defined(MOZ_B2G)
@@ -347,16 +348,21 @@ AudioStream::Init(uint32_t aNumChannels,
   if (params.stream_type == CUBEB_STREAM_TYPE_MAX) {
     return NS_ERROR_INVALID_ARG;
   }
 #endif
 
   params.format = ToCubebFormat<AUDIO_OUTPUT_FORMAT>::value;
   mAudioClock.Init();
 
+  if (mIsMonoAudioEnabled) {
+    AudioConfig inConfig(mChannels, mInRate);
+    AudioConfig outConfig(mOutChannels, mOutRate);
+    mAudioConverter = MakeUnique<AudioConverter>(inConfig, outConfig);
+  }
   return OpenCubeb(params);
 }
 
 // This code used to live inside AudioStream::Init(), but on Mac (others?)
 // it has been known to take 300-800 (or even 8500) ms to execute(!)
 nsresult
 AudioStream::OpenCubeb(cubeb_stream_params &aParams)
 {
@@ -541,27 +547,31 @@ AudioStream::GetPositionInFramesUnlocked
 bool
 AudioStream::IsPaused()
 {
   MonitorAutoLock mon(mMonitor);
   return mState == STOPPED;
 }
 
 bool
-AudioStream::IsValidAudioFormat(Chunk* aChunk)
+AudioStream::Downmix(Chunk* aChunk)
 {
   if (aChunk->Rate() != mInRate) {
     LOGW("mismatched sample %u, mInRate=%u", aChunk->Rate(), mInRate);
     return false;
   }
 
   if (aChunk->Channels() > 8) {
     return false;
   }
 
+  if (mAudioConverter) {
+    mAudioConverter->Process(aChunk->GetWritable(), aChunk->Frames());
+  }
+
   return true;
 }
 
 void
 AudioStream::GetUnprocessed(AudioBufferWriter& aWriter)
 {
   mMonitor.AssertCurrentThreadOwns();
 
@@ -580,20 +590,20 @@ AudioStream::GetUnprocessed(AudioBufferW
   }
 
   while (aWriter.Available() > 0) {
     UniquePtr<Chunk> c = mDataSource.PopFrames(aWriter.Available());
     if (c->Frames() == 0) {
       break;
     }
     MOZ_ASSERT(c->Frames() <= aWriter.Available());
-    if (IsValidAudioFormat(c.get())) {
+    if (Downmix(c.get())) {
       aWriter.Write(c->Data(), c->Frames());
     } else {
-      // Write silence if invalid format.
+      // Write silence if downmixing fails.
       aWriter.WriteZeros(c->Frames());
     }
   }
 }
 
 void
 AudioStream::GetTimeStretched(AudioBufferWriter& aWriter)
 {
@@ -608,20 +618,20 @@ AudioStream::GetTimeStretched(AudioBuffe
   uint32_t toPopFrames = ceil(aWriter.Available() * playbackRate);
 
   while (mTimeStretcher->numSamples() < aWriter.Available()) {
     UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
     if (c->Frames() == 0) {
       break;
     }
     MOZ_ASSERT(c->Frames() <= toPopFrames);
-    if (IsValidAudioFormat(c.get())) {
+    if (Downmix(c.get())) {
       mTimeStretcher->putSamples(c->Data(), c->Frames());
     } else {
-      // Write silence if invalid format.
+      // Write silence if downmixing fails.
       AutoTArray<AudioDataValue, 1000> buf;
       buf.SetLength(mOutChannels * c->Frames());
       memset(buf.Elements(), 0, buf.Length() * sizeof(AudioDataValue));
       mTimeStretcher->putSamples(buf.Elements(), c->Frames());
     }
   }
 
   auto timeStretcher = mTimeStretcher;
--- a/dom/media/AudioStream.h
+++ b/dom/media/AudioStream.h
@@ -281,21 +281,16 @@ public:
 
   // Return the position, measured in audio frames played since the stream
   // was opened, of the audio hardware.  Thread-safe.
   int64_t GetPositionInFrames();
 
   // Returns true when the audio stream is paused.
   bool IsPaused();
 
-  static uint32_t GetPreferredRate()
-  {
-    CubebUtils::InitPreferredSampleRate();
-    return CubebUtils::PreferredSampleRate();
-  }
   uint32_t GetRate() { return mOutRate; }
   uint32_t GetChannels() { return mChannels; }
   uint32_t GetOutChannels() { return mOutChannels; }
 
   // Set playback rate as a multiple of the intrinsic playback rate. This is to
   // be called only with aPlaybackRate > 0.0.
   nsresult SetPlaybackRate(double aPlaybackRate);
   // Switch between resampling (if false) and time stretching (if true, default).
@@ -328,19 +323,18 @@ private:
   }
 
 
   long DataCallback(void* aBuffer, long aFrames);
   void StateCallback(cubeb_state aState);
 
   nsresult EnsureTimeStretcherInitializedUnlocked();
 
-  // Return true if audio frames are valid (correct sampling rate and valid
-  // channel count) otherwise false.
-  bool IsValidAudioFormat(Chunk* aChunk);
+  // Return true if downmixing succeeds otherwise false.
+  bool Downmix(Chunk* aChunk);
 
   void GetUnprocessed(AudioBufferWriter& aWriter);
   void GetTimeStretched(AudioBufferWriter& aWriter);
 
   void StartUnlocked();
 
   // The monitor is held to protect all access to member variables.
   Monitor mMonitor;
@@ -370,15 +364,19 @@ private:
     STOPPED,     // Stopped by a call to Pause().
     DRAINED,     // StateCallback has indicated that the drain is complete.
     ERRORED,     // Stream disabled due to an internal error.
     SHUTDOWN     // Shutdown has been called
   };
 
   StreamState mState;
   bool mIsFirst;
+  // Get this value from the preference, if true, we would downmix the stereo.
+  bool mIsMonoAudioEnabled;
 
   DataSource& mDataSource;
+
+  UniquePtr<AudioConverter> mAudioConverter;
 };
 
 } // namespace mozilla
 
 #endif
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -357,17 +357,17 @@ MediaDecoderStateMachine::Initialization
 
 media::MediaSink*
 MediaDecoderStateMachine::CreateAudioSink()
 {
   RefPtr<MediaDecoderStateMachine> self = this;
   auto audioSinkCreator = [self] () {
     MOZ_ASSERT(self->OnTaskQueue());
     return new DecodedAudioDataSink(
-      self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
+      self->mAudioQueue, self->GetMediaTime(),
       self->mInfo.mAudio, self->mAudioChannel);
   };
   return new AudioSinkWrapper(mTaskQueue, audioSinkCreator);
 }
 
 already_AddRefed<media::MediaSink>
 MediaDecoderStateMachine::CreateMediaSink(bool aAudioCaptured)
 {
--- a/dom/media/mediasink/DecodedAudioDataSink.cpp
+++ b/dom/media/mediasink/DecodedAudioDataSink.cpp
@@ -24,73 +24,46 @@ extern LazyLogModule gMediaDecoderLog;
   MOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, \
   ("DecodedAudioDataSink=%p " msg, this, ##__VA_ARGS__))
 
 namespace media {
 
 // The amount of audio frames that is used to fuzz rounding errors.
 static const int64_t AUDIO_FUZZ_FRAMES = 1;
 
-DecodedAudioDataSink::DecodedAudioDataSink(AbstractThread* aThread,
-                                           MediaQueue<MediaData>& aAudioQueue,
+DecodedAudioDataSink::DecodedAudioDataSink(MediaQueue<MediaData>& aAudioQueue,
                                            int64_t aStartTime,
                                            const AudioInfo& aInfo,
                                            dom::AudioChannel aChannel)
   : AudioSink(aAudioQueue)
   , mStartTime(aStartTime)
   , mWritten(0)
   , mLastGoodPosition(0)
   , mInfo(aInfo)
   , mChannel(aChannel)
   , mPlaying(true)
-  , mErrored(false)
   , mPlaybackComplete(false)
-  , mOwnerThread(aThread)
-  , mFramesParsed(0)
-  , mLastEndTime(0)
 {
   bool resampling = gfxPrefs::AudioSinkResampling();
-
-  if (resampling) {
-    mOutputRate = gfxPrefs::AudioSinkResampleRate();
-  } else if (mInfo.mRate == 44100 || mInfo.mRate == 48000) {
-    // The original rate is of good quality and we want to minimize unecessary
-    // resampling. The common scenario being that the sampling rate is one or
-    // the other, this allows to minimize audio quality regression and hoping
-    // content provider want change from those rates mid-stream.
-    mOutputRate = mInfo.mRate;
-  } else {
-    // We will resample all data to match cubeb's preferred sampling rate.
-    mOutputRate = AudioStream::GetPreferredRate();
-  }
-  MOZ_DIAGNOSTIC_ASSERT(mOutputRate, "output rate can't be 0.");
-
-  bool monoAudioEnabled = gfxPrefs::MonoAudio();
-
-  mOutputChannels = monoAudioEnabled
-    ? 1 : (gfxPrefs::AudioSinkForceStereo() ? 2 : mInfo.mChannels);
+  uint32_t resamplingRate = gfxPrefs::AudioSinkResampleRate();
+  mConverter =
+    MakeUnique<AudioConverter>(
+      AudioConfig(mInfo.mChannels, mInfo.mRate),
+      AudioConfig(mInfo.mChannels > 2 && gfxPrefs::AudioSinkForceStereo()
+                    ? 2 : mInfo.mChannels,
+                  resampling ? resamplingRate : mInfo.mRate));
 }
 
 DecodedAudioDataSink::~DecodedAudioDataSink()
 {
 }
 
 RefPtr<GenericPromise>
 DecodedAudioDataSink::Init(const PlaybackParams& aParams)
 {
-  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
-
-  mAudioQueueListener = mAudioQueue.PushEvent().Connect(
-    mOwnerThread, this, &DecodedAudioDataSink::OnAudioPushed);
-  mProcessedQueueListener = mProcessedQueue.PopEvent().Connect(
-    mOwnerThread, this, &DecodedAudioDataSink::OnAudioPopped);
-
-  // To ensure at least one audio packet will be popped from AudioQueue and
-  // ready to be played.
-  NotifyAudioNeeded();
   RefPtr<GenericPromise> p = mEndPromise.Ensure(__func__);
   nsresult rv = InitializeAudioStream(aParams);
   if (NS_FAILED(rv)) {
     mEndPromise.Reject(rv, __func__);
   }
   return p;
 }
 
@@ -117,26 +90,20 @@ DecodedAudioDataSink::HasUnplayedFrames(
   // Experimentation suggests that GetPositionInFrames() is zero-indexed,
   // so we need to add 1 here before comparing it to mWritten.
   return mAudioStream && mAudioStream->GetPositionInFrames() + 1 < mWritten;
 }
 
 void
 DecodedAudioDataSink::Shutdown()
 {
-  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
-
-  mAudioQueueListener.Disconnect();
-  mProcessedQueueListener.Disconnect();
-
   if (mAudioStream) {
     mAudioStream->Shutdown();
     mAudioStream = nullptr;
   }
-  mProcessedQueue.Reset();
   mEndPromise.ResolveIfExists(true, __func__);
 }
 
 void
 DecodedAudioDataSink::SetVolume(double aVolume)
 {
   if (mAudioStream) {
     mAudioStream->SetVolume(aVolume);
@@ -174,17 +141,19 @@ DecodedAudioDataSink::SetPlaying(bool aP
   }
   mPlaying = aPlaying;
 }
 
 nsresult
 DecodedAudioDataSink::InitializeAudioStream(const PlaybackParams& aParams)
 {
   mAudioStream = new AudioStream(*this);
-  nsresult rv = mAudioStream->Init(mOutputChannels, mOutputRate, mChannel);
+  nsresult rv = mAudioStream->Init(mConverter->OutputConfig().Channels(),
+                                   mConverter->OutputConfig().Rate(),
+                                   mChannel);
   if (NS_FAILED(rv)) {
     mAudioStream->Shutdown();
     mAudioStream = nullptr;
     return rv;
   }
 
   // Set playback params before calling Start() so they can take effect
   // as soon as the 1st DataCallback of the AudioStream fires.
@@ -194,24 +163,23 @@ DecodedAudioDataSink::InitializeAudioStr
   mAudioStream->Start();
 
   return NS_OK;
 }
 
 int64_t
 DecodedAudioDataSink::GetEndTime() const
 {
-  CheckedInt64 playedUsecs = FramesToUsecs(mWritten, mOutputRate) + mStartTime;
+  CheckedInt64 playedUsecs =
+    FramesToUsecs(mWritten, mConverter->OutputConfig().Rate()) + mStartTime;
   if (!playedUsecs.isValid()) {
     NS_WARNING("Int overflow calculating audio end time");
     return -1;
   }
-  // As we may be resampling, rounding errors may occur. Ensure we never get
-  // past the original end time.
-  return std::min<int64_t>(mLastEndTime, playedUsecs.value());
+  return playedUsecs.value();
 }
 
 UniquePtr<AudioStream::Chunk>
 DecodedAudioDataSink::PopFrames(uint32_t aFrames)
 {
   class Chunk : public AudioStream::Chunk {
   public:
     Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
@@ -244,23 +212,88 @@ DecodedAudioDataSink::PopFrames(uint32_t
     AudioDataValue* GetWritable() const { return mData.get(); }
   private:
     const uint32_t mFrames;
     const uint32_t mChannels;
     const uint32_t mRate;
     UniquePtr<AudioDataValue[]> mData;
   };
 
-  if (!mCurrentData) {
+  while (!mCurrentData) {
     // No data in the queue. Return an empty chunk.
-    if (!mProcessedQueue.GetSize()) {
+    if (AudioQueue().GetSize() == 0) {
       return MakeUnique<Chunk>();
     }
 
-    mCurrentData = dont_AddRef(mProcessedQueue.PopFront().take());
+    AudioData* a = AudioQueue().PeekFront()->As<AudioData>();
+
+    // Ignore the element with 0 frames and try next.
+    if (a->mFrames == 0) {
+      RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
+      continue;
+    }
+
+    // Ignore invalid samples.
+    if (a->mRate != mInfo.mRate || a->mChannels != mInfo.mChannels) {
+      NS_WARNING(nsPrintfCString(
+        "mismatched sample format, data=%p rate=%u channels=%u frames=%u",
+        a->mAudioData.get(), a->mRate, a->mChannels, a->mFrames).get());
+      RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
+      continue;
+    }
+
+    // See if there's a gap in the audio. If there is, push silence into the
+    // audio hardware, so we can play across the gap.
+    // Calculate the timestamp of the next chunk of audio in numbers of
+    // samples.
+    CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime,
+                                            mConverter->OutputConfig().Rate());
+    // Calculate the number of frames that have been pushed onto the audio hardware.
+    CheckedInt64 playedFrames = UsecsToFrames(mStartTime,
+                                              mConverter->OutputConfig().Rate()) +
+                                static_cast<int64_t>(mWritten);
+    CheckedInt64 missingFrames = sampleTime - playedFrames;
+
+    if (!missingFrames.isValid() || !sampleTime.isValid()) {
+      NS_WARNING("Int overflow in DecodedAudioDataSink");
+      mErrored = true;
+      return MakeUnique<Chunk>();
+    }
+
+    const uint32_t rate = mConverter->OutputConfig().Rate();
+    const uint32_t channels = mConverter->OutputConfig().Channels();
+
+    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
+      // The next audio chunk begins some time after the end of the last chunk
+      // we pushed to the audio hardware. We must push silence into the audio
+      // hardware so that the next audio chunk begins playback at the correct
+      // time.
+      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
+      auto framesToPop = std::min<uint32_t>(missingFrames.value(), aFrames);
+      mWritten += framesToPop;
+      return MakeUnique<SilentChunk>(framesToPop, channels, rate);
+    }
+
+    RefPtr<AudioData> data =
+      dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
+    if (mConverter->InputConfig() != mConverter->OutputConfig()) {
+      AlignedAudioBuffer convertedData =
+        mConverter->Process(AudioSampleBuffer(Move(data->mAudioData))).Forget();
+      mCurrentData =
+        new AudioData(data->mOffset,
+                      data->mTime,
+                      data->mDuration,
+                      convertedData.Length() / channels,
+                      Move(convertedData),
+                      channels,
+                      rate);
+    } else {
+      mCurrentData = Move(data);
+    }
+
     mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
                                             mCurrentData->mChannels,
                                             mCurrentData->mFrames);
     MOZ_ASSERT(mCurrentData->mFrames > 0);
   }
 
   auto framesToPop = std::min(aFrames, mCursor->Available());
 
@@ -270,17 +303,17 @@ DecodedAudioDataSink::PopFrames(uint32_t
   UniquePtr<AudioStream::Chunk> chunk =
     MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
 
   mWritten += framesToPop;
   mCursor->Advance(framesToPop);
 
   // All frames are popped. Reset mCurrentData so we can pop new elements from
   // the audio queue in next calls to PopFrames().
-  if (!mCursor->Available()) {
+  if (mCursor->Available() == 0) {
     mCurrentData = nullptr;
   }
 
   return chunk;
 }
 
 bool
 DecodedAudioDataSink::Ended() const
@@ -292,194 +325,10 @@ DecodedAudioDataSink::Ended() const
 void
 DecodedAudioDataSink::Drained()
 {
   SINK_LOG("Drained");
   mPlaybackComplete = true;
   mEndPromise.ResolveIfExists(true, __func__);
 }
 
-void
-DecodedAudioDataSink::OnAudioPopped(const RefPtr<MediaData>& aSample)
-{
-  SINK_LOG_V("AudioStream has used an audio packet.");
-  NotifyAudioNeeded();
-}
-
-void
-DecodedAudioDataSink::OnAudioPushed(const RefPtr<MediaData>& aSample)
-{
-  SINK_LOG_V("One new audio packet available.");
-  NotifyAudioNeeded();
-}
-
-void
-DecodedAudioDataSink::NotifyAudioNeeded()
-{
-  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn(),
-             "Not called from the owner's thread");
-
-  if (AudioQueue().IsFinished() && !AudioQueue().GetSize()) {
-    // We have reached the end of the data, drain the resampler.
-    DrainConverter();
-    return;
-  }
-
-  // Always ensure we have two processed frames pending to allow for processing
-  // latency.
-  while (AudioQueue().GetSize() && mProcessedQueue.GetSize() < 2) {
-    RefPtr<AudioData> data =
-      dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
-
-    // Ignore the element with 0 frames and try next.
-    if (!data->mFrames) {
-      continue;
-    }
-
-    if (!mConverter ||
-        (data->mRate != mConverter->InputConfig().Rate() ||
-         data->mChannels != mConverter->InputConfig().Channels())) {
-      SINK_LOG_V("Audio format changed from %u@%uHz to %u@%uHz",
-                 mConverter? mConverter->InputConfig().Channels() : 0,
-                 mConverter ? mConverter->InputConfig().Rate() : 0,
-                 data->mChannels, data->mRate);
-
-      DrainConverter();
-
-      // mFramesParsed indicates the current playtime in frames at the current
-      // input sampling rate. Recalculate it per the new sampling rate.
-      if (mFramesParsed) {
-        // We minimize overflow.
-        uint32_t oldRate = mConverter->InputConfig().Rate();
-        uint32_t newRate = data->mRate;
-        int64_t major = mFramesParsed / oldRate;
-        int64_t remainder = mFramesParsed % oldRate;
-        CheckedInt64 result =
-          CheckedInt64(remainder) * newRate / oldRate + major * oldRate;
-        if (!result.isValid()) {
-          NS_WARNING("Int overflow in DecodedAudioDataSink");
-          mErrored = true;
-          return;
-        }
-        mFramesParsed = result.value();
-      }
-
-      mConverter =
-        MakeUnique<AudioConverter>(
-          AudioConfig(data->mChannels, data->mRate),
-          AudioConfig(mOutputChannels, mOutputRate));
-    }
-
-    // See if there's a gap in the audio. If there is, push silence into the
-    // audio hardware, so we can play across the gap.
-    // Calculate the timestamp of the next chunk of audio in numbers of
-    // samples.
-    CheckedInt64 sampleTime = UsecsToFrames(data->mTime - mStartTime,
-                                            data->mRate);
-    // Calculate the number of frames that have been pushed onto the audio hardware.
-    CheckedInt64 missingFrames = sampleTime - mFramesParsed;
-
-    if (!missingFrames.isValid()) {
-      NS_WARNING("Int overflow in DecodedAudioDataSink");
-      mErrored = true;
-      return;
-    }
-
-    if (missingFrames.value() > AUDIO_FUZZ_FRAMES) {
-      // The next audio packet begins some time after the end of the last packet
-      // we pushed to the audio hardware. We must push silence into the audio
-      // hardware so that the next audio packet begins playback at the correct
-      // time.
-      missingFrames = std::min<int64_t>(INT32_MAX, missingFrames.value());
-      mFramesParsed += missingFrames.value();
-      // We need to insert silence, first use drained frames if any.
-      missingFrames -= DrainConverter(missingFrames.value());
-      // Insert silence is still needed.
-      if (missingFrames.value()) {
-        AlignedAudioBuffer silenceData(missingFrames.value() * mOutputChannels);
-        if (!silenceData) {
-          NS_WARNING("OOM in DecodedAudioDataSink");
-          mErrored = true;
-          return;
-        }
-        RefPtr<AudioData> silence = CreateAudioFromBuffer(Move(silenceData), data);
-        if (silence) {
-          mProcessedQueue.Push(silence);
-        }
-      }
-    }
-
-    mLastEndTime = data->GetEndTime();
-    mFramesParsed += data->mFrames;
-
-    if (mConverter->InputConfig() != mConverter->OutputConfig()) {
-      AlignedAudioBuffer convertedData =
-        mConverter->Process(AudioSampleBuffer(Move(data->mAudioData))).Forget();
-      data = CreateAudioFromBuffer(Move(convertedData), data);
-      if (!data) {
-        continue;
-      }
-    }
-    mProcessedQueue.Push(data);
-    mLastProcessedPacket = Some(data);
-  }
-}
-
-already_AddRefed<AudioData>
-DecodedAudioDataSink::CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
-                                            AudioData* aReference)
-{
-  uint32_t frames = aBuffer.Length() / mOutputChannels;
-  if (!frames) {
-    return nullptr;
-  }
-  CheckedInt64 duration = FramesToUsecs(frames, mOutputRate);
-  if (!duration.isValid()) {
-    NS_WARNING("Int overflow in DecodedAudioDataSink");
-    mErrored = true;
-    return nullptr;
-  }
-  RefPtr<AudioData> data =
-    new AudioData(aReference->mOffset,
-                  aReference->mTime,
-                  duration.value(),
-                  frames,
-                  Move(aBuffer),
-                  mOutputChannels,
-                  mOutputRate);
-  return data.forget();
-}
-
-uint32_t
-DecodedAudioDataSink::DrainConverter(uint32_t aMaxFrames)
-{
-  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
-
-  if (!mConverter || !mLastProcessedPacket) {
-    // nothing to drain.
-    return 0;
-  }
-
-  RefPtr<AudioData> lastPacket = mLastProcessedPacket.ref();
-  mLastProcessedPacket.reset();
-
-  // To drain we simply provide an empty packet to the audio converter.
-  AlignedAudioBuffer convertedData =
-    mConverter->Process(AudioSampleBuffer(AlignedAudioBuffer())).Forget();
-
-  uint32_t frames = convertedData.Length() / mOutputChannels;
-  if (!convertedData.SetLength(std::min(frames, aMaxFrames) * mOutputChannels)) {
-    // This can never happen as we were reducing the length of convertData.
-    mErrored = true;
-    return 0;
-  }
-
-  RefPtr<AudioData> data =
-    CreateAudioFromBuffer(Move(convertedData), lastPacket);
-  if (!data) {
-    return 0;
-  }
-  mProcessedQueue.Push(data);
-  return data->mFrames;
-}
-
 } // namespace media
 } // namespace mozilla
--- a/dom/media/mediasink/DecodedAudioDataSink.h
+++ b/dom/media/mediasink/DecodedAudioDataSink.h
@@ -23,18 +23,17 @@ namespace mozilla {
 
 class AudioConverter;
 
 namespace media {
 
 class DecodedAudioDataSink : public AudioSink,
                              private AudioStream::DataSource {
 public:
-  DecodedAudioDataSink(AbstractThread* aThread,
-                       MediaQueue<MediaData>& aAudioQueue,
+  DecodedAudioDataSink(MediaQueue<MediaData>& aAudioQueue,
                        int64_t aStartTime,
                        const AudioInfo& aInfo,
                        dom::AudioChannel aChannel);
 
   // Return a promise which will be resolved when DecodedAudioDataSink
   // finishes playing, or rejected if any error.
   RefPtr<GenericPromise> Init(const PlaybackParams& aParams) override;
 
@@ -98,43 +97,20 @@ private:
    * Members to implement AudioStream::DataSource.
    * Used on the callback thread of cubeb.
    */
   // The AudioData at which AudioStream::DataSource is reading.
   RefPtr<AudioData> mCurrentData;
   // Keep track of the read position of mCurrentData.
   UniquePtr<AudioBufferCursor> mCursor;
   // True if there is any error in processing audio data like overflow.
-  Atomic<bool> mErrored;
+  bool mErrored = false;
 
   // Set on the callback thread of cubeb once the stream has drained.
   Atomic<bool> mPlaybackComplete;
 
-  const RefPtr<AbstractThread> mOwnerThread;
-
-  // Audio Processing objects and methods
-  void OnAudioPopped(const RefPtr<MediaData>& aSample);
-  void OnAudioPushed(const RefPtr<MediaData>& aSample);
-  void NotifyAudioNeeded();
-  // Drain the converter and add the output to the processed audio queue.
-  // A maximum of aMaxFrames will be added.
-  uint32_t DrainConverter(uint32_t aMaxFrames = UINT32_MAX);
-  already_AddRefed<AudioData> CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
-                                                    AudioData* aReference);
   UniquePtr<AudioConverter> mConverter;
-  MediaQueue<AudioData> mProcessedQueue;
-  MediaEventListener mAudioQueueListener;
-  MediaEventListener mProcessedQueueListener;
-  // Number of frames processed from AudioQueue(). Used to determine gaps in
-  // the input stream. It indicates the time in frames since playback started
-  // at the current input framerate.
-  int64_t mFramesParsed;
-  Maybe<RefPtr<AudioData>> mLastProcessedPacket;
-  int64_t mLastEndTime;
-  // Never modifed after construction.
-  uint32_t mOutputRate;
-  uint32_t mOutputChannels;
 };
 
 } // namespace media
 } // namespace mozilla
 
 #endif
deleted file mode 100644
--- a/media/libspeex_resampler/handle-memory-error.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-diff --git a/media/libspeex_resampler/src/resample.c b/media/libspeex_resampler/src/resample.c
-index 83ad119..a3859e3 100644
---- a/media/libspeex_resampler/src/resample.c
-+++ b/media/libspeex_resampler/src/resample.c
-@@ -811,6 +811,12 @@ EXPORT SpeexResamplerState *speex_resampler_init_frac(spx_uint32_t nb_channels,
-       return NULL;
-    }
-    st = (SpeexResamplerState *)speex_alloc(sizeof(SpeexResamplerState));
-+   if (!st)
-+   {
-+      if (err)
-+         *err = RESAMPLER_ERR_ALLOC_FAILED;
-+      return NULL;
-+   }
-    st->initialised = 0;
-    st->started = 0;
-    st->in_rate = 0;
-@@ -832,9 +838,12 @@ EXPORT SpeexResamplerState *speex_resampler_init_frac(spx_uint32_t nb_channels,
-    st->buffer_size = 160;
-    
-    /* Per channel data */
--   st->last_sample = (spx_int32_t*)speex_alloc(nb_channels*sizeof(spx_int32_t));
--   st->magic_samples = (spx_uint32_t*)speex_alloc(nb_channels*sizeof(spx_uint32_t));
--   st->samp_frac_num = (spx_uint32_t*)speex_alloc(nb_channels*sizeof(spx_uint32_t));
-+   if (!(st->last_sample = (spx_int32_t*)speex_alloc(nb_channels*sizeof(spx_int32_t))))
-+      goto fail;
-+   if (!(st->magic_samples = (spx_uint32_t*)speex_alloc(nb_channels*sizeof(spx_uint32_t))))
-+      goto fail;
-+   if (!(st->samp_frac_num = (spx_uint32_t*)speex_alloc(nb_channels*sizeof(spx_uint32_t))))
-+      goto fail;
-    for (i=0;i<nb_channels;i++)
-    {
-       st->last_sample[i] = 0;
-@@ -857,6 +866,12 @@ EXPORT SpeexResamplerState *speex_resampler_init_frac(spx_uint32_t nb_channels,
-       *err = filter_err;
- 
-    return st;
-+
-+fail:
-+   if (err)
-+      *err = RESAMPLER_ERR_ALLOC_FAILED;
-+   speex_resampler_destroy(st);
-+   return NULL;
- }
- 
- EXPORT void speex_resampler_destroy(SpeexResamplerState *st)
--- a/media/libspeex_resampler/src/resample.c
+++ b/media/libspeex_resampler/src/resample.c
@@ -806,22 +806,16 @@ EXPORT SpeexResamplerState *speex_resamp
 
    if (quality > 10 || quality < 0)
    {
       if (err)
          *err = RESAMPLER_ERR_INVALID_ARG;
       return NULL;
    }
    st = (SpeexResamplerState *)speex_alloc(sizeof(SpeexResamplerState));
-   if (!st)
-   {
-      if (err)
-         *err = RESAMPLER_ERR_ALLOC_FAILED;
-      return NULL;
-   }
    st->initialised = 0;
    st->started = 0;
    st->in_rate = 0;
    st->out_rate = 0;
    st->num_rate = 0;
    st->den_rate = 0;
    st->quality = -1;
    st->sinc_table_length = 0;
@@ -833,22 +827,19 @@ EXPORT SpeexResamplerState *speex_resamp
    st->cutoff = 1.f;
    st->nb_channels = nb_channels;
    st->in_stride = 1;
    st->out_stride = 1;
    
    st->buffer_size = 160;
    
    /* Per channel data */
-   if (!(st->last_sample = (spx_int32_t*)speex_alloc(nb_channels*sizeof(spx_int32_t))))
-      goto fail;
-   if (!(st->magic_samples = (spx_uint32_t*)speex_alloc(nb_channels*sizeof(spx_uint32_t))))
-      goto fail;
-   if (!(st->samp_frac_num = (spx_uint32_t*)speex_alloc(nb_channels*sizeof(spx_uint32_t))))
-      goto fail;
+   st->last_sample = (spx_int32_t*)speex_alloc(nb_channels*sizeof(spx_int32_t));
+   st->magic_samples = (spx_uint32_t*)speex_alloc(nb_channels*sizeof(spx_uint32_t));
+   st->samp_frac_num = (spx_uint32_t*)speex_alloc(nb_channels*sizeof(spx_uint32_t));
    for (i=0;i<nb_channels;i++)
    {
       st->last_sample[i] = 0;
       st->magic_samples[i] = 0;
       st->samp_frac_num[i] = 0;
    }
 
    speex_resampler_set_quality(st, quality);
@@ -861,22 +852,16 @@ EXPORT SpeexResamplerState *speex_resamp
    } else {
       speex_resampler_destroy(st);
       st = NULL;
    }
    if (err)
       *err = filter_err;
 
    return st;
-
-fail:
-   if (err)
-      *err = RESAMPLER_ERR_ALLOC_FAILED;
-   speex_resampler_destroy(st);
-   return NULL;
 }
 
 EXPORT void speex_resampler_destroy(SpeexResamplerState *st)
 {
    speex_free(st->mem);
    speex_free(st->sinc_table);
    speex_free(st->last_sample);
    speex_free(st->magic_samples);
--- a/media/libspeex_resampler/update.sh
+++ b/media/libspeex_resampler/update.sh
@@ -20,9 +20,8 @@ cp $1/AUTHORS .
 cp $1/COPYING .
 
 # apply outstanding local patches
 patch -p3 < outside-speex.patch
 patch -p3 < simd-detect-runtime.patch
 patch -p3 < set-skip-frac.patch
 patch -p3 < hugemem.patch
 patch -p3 < remove-empty-asm-clobber.patch
-patch -p3 < handle-memory-error.patch