Backed out 13 changesets (bug 875277, bug 897092, bug 856361) for webaudio busta=ge
authorPhil Ringnalda <philringnalda@gmail.com>
Sat, 10 Aug 2013 17:55:46 -0700
changeset 149002 4d652113720b02df038cd87fdd51160e8191caa9
parent 149001 64559423b523999065c220a32765c454c6ef65cc
child 149003 d3e60eab3da57de72cb3d4d89cc8cfb738ff77f6
push id4110
push userphilringnalda@gmail.com
push dateSun, 11 Aug 2013 00:56:10 +0000
treeherdermozilla-aurora@4d652113720b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs875277, 897092, 856361
milestone25.0a2
backs out2d110609f4d396b96aa3ea0f9df69ee4ec9fe418
60e60393b1a4339d28554721a9a0e24af2813f95
09966c9bef144047228e6c83fa59cc2c3e85942b
e72860243ab119b5bfa9bf4a4239383a4371b117
f666c107b7717feedaae6fbe96eec53e81cec40a
e6ac6390736db06ea0d77ae72a0709e49f0b27fe
3c904aad516b2de72b77e752345f7dd8a83c8db5
340f3aea16694933676d6ec84fceeb237cbdcbc7
1b9e20fef61fbfaf446b746956a4380f9aba6c80
74a10390578c847d9df8b4648abf7be56dd45dec
5d16db2ebb377a8d3146a99b7fe3907589641df7
32aab56d0aa5ec741a2f16fc3dd4615abf1ee14e
109bf9967b3d3b6e872fcb0b2db5345e04aa89f9
Backed out 13 changesets (bug 875277, bug 897092, bug 856361) for webaudio busta=ge Backed out changeset 2d110609f4d3 (bug 897092) Backed out changeset 60e60393b1a4 (bug 856361) Backed out changeset 09966c9bef14 (bug 856361) Backed out changeset e72860243ab1 (bug 856361) Backed out changeset f666c107b771 (bug 856361) Backed out changeset e6ac6390736d (bug 856361) Backed out changeset 3c904aad516b (bug 856361) Backed out changeset 340f3aea1669 (bug 856361) Backed out changeset 1b9e20fef61f (bug 856361) Backed out changeset 74a10390578c (bug 856361) Backed out changeset 5d16db2ebb37 (bug 856361) Backed out changeset 32aab56d0aa5 (bug 875277) Backed out changeset 109bf9967b3d (bug 875277)
content/html/content/src/HTMLMediaElement.cpp
content/media/AudioEventTimeline.h
content/media/AudioNodeExternalInputStream.cpp
content/media/AudioNodeExternalInputStream.h
content/media/AudioNodeStream.cpp
content/media/AudioNodeStream.h
content/media/DOMMediaStream.cpp
content/media/DOMMediaStream.h
content/media/MediaDecoder.cpp
content/media/MediaDecoder.h
content/media/MediaDecoderStateMachine.cpp
content/media/MediaStreamGraph.cpp
content/media/MediaStreamGraph.h
content/media/TrackUnionStream.h
content/media/dash/DASHDecoder.cpp
content/media/dash/DASHDecoder.h
content/media/dash/DASHRepDecoder.cpp
content/media/dash/DASHRepDecoder.h
content/media/moz.build
content/media/webaudio/AudioContext.cpp
content/media/webaudio/AudioContext.h
content/media/webaudio/AudioParamTimeline.h
content/media/webaudio/MediaStreamAudioSourceNode.cpp
content/media/webaudio/MediaStreamAudioSourceNode.h
content/media/webaudio/WaveShaperNode.cpp
content/media/webaudio/WaveShaperNode.h
content/media/webaudio/compiledtest/TestAudioEventTimeline.cpp
content/media/webaudio/moz.build
content/media/webaudio/test/Makefile.in
content/media/webaudio/test/test_audioParamSetTargetAtTime.html
content/media/webaudio/test/test_mediaStreamAudioSourceNode.html
content/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html
content/media/webaudio/test/test_mediaStreamAudioSourceNodeResampling.html
content/media/webaudio/test/webaudio.js
content/media/webspeech/recognition/SpeechStreamListener.cpp
content/media/webspeech/recognition/SpeechStreamListener.h
dom/webidl/AudioContext.webidl
dom/webidl/MediaStreamAudioSourceNode.webidl
dom/webidl/WaveShaperNode.webidl
dom/webidl/WebIDL.mk
--- a/content/html/content/src/HTMLMediaElement.cpp
+++ b/content/html/content/src/HTMLMediaElement.cpp
@@ -2509,50 +2509,42 @@ nsresult HTMLMediaElement::FinishDecoder
 
   // Force a same-origin check before allowing events for this media resource.
   mMediaSecurityVerified = false;
 
   // The new stream has not been suspended by us.
   mPausedForInactiveDocumentOrChannel = false;
   mEventDeliveryPaused = false;
   mPendingEvents.Clear();
-  // Set mDecoder now so if methods like GetCurrentSrc get called between
-  // here and Load(), they work.
-  mDecoder = aDecoder;
-
-  // Tell aDecoder about its MediaResource now so things like principals are
-  // available immediately.
-  aDecoder->SetResource(aStream);
+
   aDecoder->SetAudioChannelType(mAudioChannelType);
   aDecoder->SetAudioCaptured(mAudioCaptured);
   aDecoder->SetVolume(mMuted ? 0.0 : mVolume);
   aDecoder->SetPreservesPitch(mPreservesPitch);
   aDecoder->SetPlaybackRate(mPlaybackRate);
-  // Update decoder principal before we start decoding, since it
-  // can affect how we feed data to MediaStreams
-  NotifyDecoderPrincipalChanged();
 
   for (uint32_t i = 0; i < mOutputStreams.Length(); ++i) {
     OutputMediaStream* ms = &mOutputStreams[i];
     aDecoder->AddOutputStream(ms->mStream->GetStream()->AsProcessedStream(),
         ms->mFinishWhenEnded);
   }
 
-  nsresult rv = aDecoder->Load(aListener, aCloneDonor);
+  nsresult rv = aDecoder->Load(aStream, aListener, aCloneDonor);
   if (NS_FAILED(rv)) {
-    mDecoder = nullptr;
     LOG(PR_LOG_DEBUG, ("%p Failed to load for decoder %p", this, aDecoder));
     return rv;
   }
 
   // Decoder successfully created, the decoder now owns the MediaResource
   // which owns the channel.
   mChannel = nullptr;
 
+  mDecoder = aDecoder;
   AddMediaElementToURITable();
+  NotifyDecoderPrincipalChanged();
 
   // We may want to suspend the new stream now.
   // This will also do an AddRemoveSelfReference.
   NotifyOwnerDocumentActivityChanged();
 
   if (!mPaused) {
     SetPlayedOrSeeked(true);
     if (!mPausedForInactiveDocumentOrChannel) {
@@ -3256,24 +3248,19 @@ already_AddRefed<nsIPrincipal> HTMLMedia
     nsRefPtr<nsIPrincipal> principal = mSrcStream->GetPrincipal();
     return principal.forget();
   }
   return nullptr;
 }
 
 void HTMLMediaElement::NotifyDecoderPrincipalChanged()
 {
-  nsRefPtr<nsIPrincipal> principal = GetCurrentPrincipal();
-
-  bool subsumes;
-  mDecoder->UpdateSameOriginStatus(
-    NS_SUCCEEDED(NodePrincipal()->Subsumes(principal, &subsumes)) && subsumes);
-
   for (uint32_t i = 0; i < mOutputStreams.Length(); ++i) {
     OutputMediaStream* ms = &mOutputStreams[i];
+    nsRefPtr<nsIPrincipal> principal = GetCurrentPrincipal();
     ms->mStream->CombineWithPrincipal(principal);
   }
 }
 
 void HTMLMediaElement::UpdateMediaSize(nsIntSize size)
 {
   mMediaSize = size;
 }
--- a/content/media/AudioEventTimeline.h
+++ b/content/media/AudioEventTimeline.h
@@ -155,19 +155,17 @@ inline int64_t AudioTimelineEvent::Time<
  * ErrorResult is a type which satisfies the following:
  *  - Implements a Throw() method taking an nsresult argument, representing an error code.
  */
 template <class ErrorResult>
 class AudioEventTimeline
 {
 public:
   explicit AudioEventTimeline(float aDefaultValue)
-    : mValue(aDefaultValue),
-      mComputedValue(aDefaultValue),
-      mLastComputedValue(aDefaultValue)
+    : mValue(aDefaultValue)
   {
   }
 
   bool HasSimpleValue() const
   {
     return mEvents.IsEmpty();
   }
 
@@ -183,17 +181,17 @@ public:
     // TODO: Return the current value based on the timeline of the AudioContext
     return mValue;
   }
 
   void SetValue(float aValue)
   {
     // Silently don't change anything if there are any events
     if (mEvents.IsEmpty()) {
-      mLastComputedValue = mComputedValue = mValue = aValue;
+      mValue = aValue;
     }
   }
 
   void SetValueAtTime(float aValue, double aStartTime, ErrorResult& aRv)
   {
     InsertEvent(AudioTimelineEvent(AudioTimelineEvent::SetValue, aStartTime, aValue), aRv);
   }
 
@@ -234,69 +232,47 @@ public:
     }
   }
 
   void CancelAllEvents()
   {
     mEvents.Clear();
   }
 
-  static bool TimesEqual(int64_t aLhs, int64_t aRhs)
-  {
-    return aLhs == aRhs;
-  }
-
-  // Since we are going to accumulate error by adding 0.01 multiple time in a
-  // loop, we want to fuzz the equality check in GetValueAtTime.
-  static bool TimesEqual(double aLhs, double aRhs)
-  {
-    const float kEpsilon = 0.0000000001f;
-    return fabs(aLhs - aRhs) < kEpsilon;
-  }
-
-  template<class TimeType>
-  float GetValueAtTime(TimeType aTime)
-  {
-    mComputedValue = GetValueAtTimeHelper(aTime);
-    return mComputedValue;
-  }
-
   // This method computes the AudioParam value at a given time based on the event timeline
   template<class TimeType>
-  float GetValueAtTimeHelper(TimeType aTime)
+  float GetValueAtTime(TimeType aTime) const
   {
     const AudioTimelineEvent* previous = nullptr;
     const AudioTimelineEvent* next = nullptr;
 
     bool bailOut = false;
     for (unsigned i = 0; !bailOut && i < mEvents.Length(); ++i) {
       switch (mEvents[i].mType) {
       case AudioTimelineEvent::SetValue:
       case AudioTimelineEvent::SetTarget:
       case AudioTimelineEvent::LinearRamp:
       case AudioTimelineEvent::ExponentialRamp:
       case AudioTimelineEvent::SetValueCurve:
-        if (TimesEqual(aTime, mEvents[i].template Time<TimeType>())) {
-          mLastComputedValue = mComputedValue;
+        if (aTime == mEvents[i].template Time<TimeType>()) {
           // Find the last event with the same time
           do {
             ++i;
           } while (i < mEvents.Length() &&
                    aTime == mEvents[i].template Time<TimeType>());
 
           // SetTarget nodes can be handled no matter what their next node is (if they have one)
           if (mEvents[i - 1].mType == AudioTimelineEvent::SetTarget) {
-            // Follow the curve, without regard to the next event, starting at
-            // the last value of the last event.
+            // Follow the curve, without regard to the next node
             return ExponentialApproach(mEvents[i - 1].template Time<TimeType>(),
-                                       mLastComputedValue, mEvents[i - 1].mValue,
+                                       mValue, mEvents[i - 1].mValue,
                                        mEvents[i - 1].mTimeConstant, aTime);
           }
 
-          // SetValueCurve events can be handled no matter what their event node is (if they have one)
+          // SetValueCurve events can be handled no mattar what their next node is (if they have one)
           if (mEvents[i - 1].mType == AudioTimelineEvent::SetValueCurve) {
             return ExtractValueFromCurve(mEvents[i - 1].template Time<TimeType>(),
                                          mEvents[i - 1].mCurve,
                                          mEvents[i - 1].mCurveLength,
                                          mEvents[i - 1].mDuration, aTime);
           }
 
           // For other event types
@@ -325,18 +301,18 @@ public:
 
     // If the requested time is before all of the existing events
     if (!previous) {
       return mValue;
     }
 
     // SetTarget nodes can be handled no matter what their next node is (if they have one)
     if (previous->mType == AudioTimelineEvent::SetTarget) {
-      return ExponentialApproach(previous->template Time<TimeType>(),
-                                 mLastComputedValue, previous->mValue,
+      // Follow the curve, without regard to the next node
+      return ExponentialApproach(previous->template Time<TimeType>(), mValue, previous->mValue,
                                  previous->mTimeConstant, aTime);
     }
 
     // SetValueCurve events can be handled no mattar what their next node is (if they have one)
     if (previous->mType == AudioTimelineEvent::SetValueCurve) {
       return ExtractValueFromCurve(previous->template Time<TimeType>(),
                                    previous->mCurve, previous->mCurveLength,
                                    previous->mDuration, aTime);
@@ -559,19 +535,15 @@ private:
 private:
   // This is a sorted array of the events in the timeline.  Queries of this
   // data structure should probably be more frequent than modifications to it,
   // and that is the reason why we're using a simple array as the data structure.
   // We can optimize this in the future if the performance of the array ends up
   // being a bottleneck.
   nsTArray<AudioTimelineEvent> mEvents;
   float mValue;
-  // This is the value of this AudioParam we computed at the last call.
-  float mComputedValue;
-  // This is the value of this AudioParam at the last tick of the previous event.
-  float mLastComputedValue;
 };
 
 }
 }
 
 #endif
 
deleted file mode 100644
--- a/content/media/AudioNodeExternalInputStream.cpp
+++ /dev/null
@@ -1,492 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "MediaStreamGraphImpl.h"
-#include "AudioNodeEngine.h"
-#include "AudioNodeExternalInputStream.h"
-#include "speex/speex_resampler.h"
-
-using namespace mozilla::dom;
-
-namespace mozilla {
-
-AudioNodeExternalInputStream::AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate)
-  : AudioNodeStream(aEngine, MediaStreamGraph::INTERNAL_STREAM, aSampleRate)
-  , mCurrentOutputPosition(0)
-{
-  MOZ_COUNT_CTOR(AudioNodeExternalInputStream);
-}
-
-AudioNodeExternalInputStream::~AudioNodeExternalInputStream()
-{
-  MOZ_COUNT_DTOR(AudioNodeExternalInputStream);
-}
-
-AudioNodeExternalInputStream::TrackMapEntry::~TrackMapEntry()
-{
-  if (mResampler) {
-    speex_resampler_destroy(mResampler);
-  }
-}
-
-uint32_t
-AudioNodeExternalInputStream::GetTrackMapEntry(const StreamBuffer::Track& aTrack,
-                                               GraphTime aFrom)
-{
-  AudioSegment* segment = aTrack.Get<AudioSegment>();
-
-  // Check the map for an existing entry corresponding to the input track.
-  for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
-    TrackMapEntry* map = &mTrackMap[i];
-    if (map->mTrackID == aTrack.GetID()) {
-      return i;
-    }
-  }
-
-  // Determine channel count by finding the first entry with non-silent data.
-  AudioSegment::ChunkIterator ci(*segment);
-  while (!ci.IsEnded() && ci->IsNull()) {
-    ci.Next();
-  }
-  if (ci.IsEnded()) {
-    // The track is entirely silence so far, we can ignore it for now.
-    return nsTArray<TrackMapEntry>::NoIndex;
-  }
-
-  // Create a speex resampler with the same sample rate and number of channels
-  // as the track.
-  SpeexResamplerState* resampler = nullptr;
-  uint32_t channelCount = (*ci).mChannelData.Length();
-  if (aTrack.GetRate() != mSampleRate) {
-    resampler = speex_resampler_init(channelCount,
-      aTrack.GetRate(), mSampleRate, SPEEX_RESAMPLER_QUALITY_DEFAULT, nullptr);
-    speex_resampler_skip_zeros(resampler);
-  }
-
-  TrackMapEntry* map = mTrackMap.AppendElement();
-  map->mEndOfConsumedInputTicks = 0;
-  map->mEndOfLastInputIntervalInInputStream = -1;
-  map->mEndOfLastInputIntervalInOutputStream = -1;
-  map->mSamplesPassedToResampler =
-    TimeToTicksRoundUp(aTrack.GetRate(), GraphTimeToStreamTime(aFrom));
-  map->mResampler = resampler;
-  map->mResamplerChannelCount = channelCount;
-  map->mTrackID = aTrack.GetID();
-  return mTrackMap.Length() - 1;
-}
-
-static const uint32_t SPEEX_RESAMPLER_PROCESS_MAX_OUTPUT = 1000;
-
-template <typename T> static int
-SpeexResamplerProcess(SpeexResamplerState* aResampler,
-                      uint32_t aChannel,
-                      const T* aInput, uint32_t* aIn,
-                      float* aOutput, uint32_t* aOut);
-
-template <> int
-SpeexResamplerProcess<float>(SpeexResamplerState* aResampler,
-                             uint32_t aChannel,
-                             const float* aInput, uint32_t* aIn,
-                             float* aOutput, uint32_t* aOut)
-{
-  NS_ASSERTION(*aOut <= SPEEX_RESAMPLER_PROCESS_MAX_OUTPUT, "Bad aOut");
-  return speex_resampler_process_float(aResampler, aChannel, aInput, aIn, aOutput, aOut);
-}
-
-template <> int
-SpeexResamplerProcess<int16_t>(SpeexResamplerState* aResampler,
-                               uint32_t aChannel,
-                               const int16_t* aInput, uint32_t* aIn,
-                               float* aOutput, uint32_t* aOut)
-{
-  NS_ASSERTION(*aOut <= SPEEX_RESAMPLER_PROCESS_MAX_OUTPUT, "Bad aOut");
-  int16_t tmp[SPEEX_RESAMPLER_PROCESS_MAX_OUTPUT];
-  int result = speex_resampler_process_int(aResampler, aChannel, aInput, aIn, tmp, aOut);
-  if (result == RESAMPLER_ERR_SUCCESS) {
-    for (uint32_t i = 0; i < *aOut; ++i) {
-      aOutput[i] = AudioSampleToFloat(tmp[i]);
-    }
-  }
-  return result;
-}
-
-template <typename T> static void
-ResampleChannelBuffer(SpeexResamplerState* aResampler, uint32_t aChannel,
-                      const T* aInput, uint32_t aInputDuration,
-                      nsTArray<float>* aOutput)
-{
-  if (!aResampler) {
-    float* out = aOutput->AppendElements(aInputDuration);
-    for (uint32_t i = 0; i < aInputDuration; ++i) {
-      out[i] = AudioSampleToFloat(aInput[i]);
-    }
-    return;
-  }
-
-  uint32_t processed = 0;
-  while (processed < aInputDuration) {
-    uint32_t prevLength = aOutput->Length();
-    float* output = aOutput->AppendElements(SPEEX_RESAMPLER_PROCESS_MAX_OUTPUT);
-    uint32_t in = aInputDuration - processed;
-    uint32_t out = aOutput->Length() - prevLength;
-    SpeexResamplerProcess(aResampler, aChannel,
-                          aInput + processed, &in,
-                          output, &out);
-    processed += in;
-    aOutput->SetLength(prevLength + out);
-  }
-}
-
-class SharedChannelArrayBuffer : public ThreadSharedObject {
-public:
-  SharedChannelArrayBuffer(nsTArray<nsTArray<float> >* aBuffers)
-  {
-    mBuffers.SwapElements(*aBuffers);
-  }
-  nsTArray<nsTArray<float> > mBuffers;
-};
-
-void
-AudioNodeExternalInputStream::TrackMapEntry::ResampleChannels(const nsTArray<const void*>& aBuffers,
-                                                              uint32_t aInputDuration,
-                                                              AudioSampleFormat aFormat,
-                                                              float aVolume)
-{
-  NS_ASSERTION(aBuffers.Length() == mResamplerChannelCount,
-               "Channel count must be correct here");
-
-  nsAutoTArray<nsTArray<float>,2> resampledBuffers;
-  resampledBuffers.SetLength(aBuffers.Length());
-  nsTArray<float> samplesAdjustedForVolume;
-  nsAutoTArray<const float*,2> bufferPtrs;
-  bufferPtrs.SetLength(aBuffers.Length());
-
-  for (uint32_t i = 0; i < aBuffers.Length(); ++i) {
-    AudioSampleFormat format = aFormat;
-    const void* buffer = aBuffers[i];
-
-    if (aVolume != 1.0f) {
-      format = AUDIO_FORMAT_FLOAT32;
-      samplesAdjustedForVolume.SetLength(aInputDuration);
-      switch (aFormat) {
-      case AUDIO_FORMAT_FLOAT32:
-        ConvertAudioSamplesWithScale(static_cast<const float*>(buffer),
-                                     samplesAdjustedForVolume.Elements(),
-                                     aInputDuration, aVolume);
-        break;
-      case AUDIO_FORMAT_S16:
-        ConvertAudioSamplesWithScale(static_cast<const int16_t*>(buffer),
-                                     samplesAdjustedForVolume.Elements(),
-                                     aInputDuration, aVolume);
-        break;
-      default:
-        MOZ_ASSERT(false);
-        return;
-      }
-      buffer = samplesAdjustedForVolume.Elements();
-    }
-
-    switch (format) {
-    case AUDIO_FORMAT_FLOAT32:
-      ResampleChannelBuffer(mResampler, i,
-                            static_cast<const float*>(buffer),
-                            aInputDuration, &resampledBuffers[i]);
-      break;
-    case AUDIO_FORMAT_S16:
-      ResampleChannelBuffer(mResampler, i,
-                            static_cast<const int16_t*>(buffer),
-                            aInputDuration, &resampledBuffers[i]);
-      break;
-    default:
-      MOZ_ASSERT(false);
-      return;
-    }
-    bufferPtrs[i] = resampledBuffers[i].Elements();
-    NS_ASSERTION(i == 0 ||
-                 resampledBuffers[i].Length() == resampledBuffers[0].Length(),
-                 "Resampler made different decisions for different channels!");
-  }
-
-  uint32_t length = resampledBuffers[0].Length();
-  nsRefPtr<ThreadSharedObject> buf = new SharedChannelArrayBuffer(&resampledBuffers);
-  mResampledData.AppendFrames(buf.forget(), bufferPtrs, length);
-}
-
-void
-AudioNodeExternalInputStream::TrackMapEntry::ResampleInputData(AudioSegment* aSegment)
-{
-  AudioSegment::ChunkIterator ci(*aSegment);
-  while (!ci.IsEnded()) {
-    const AudioChunk& chunk = *ci;
-    nsAutoTArray<const void*,2> channels;
-    if (chunk.GetDuration() > UINT32_MAX) {
-      // This will cause us to OOM or overflow below. So let's just bail.
-      NS_ERROR("Chunk duration out of bounds");
-      return;
-    }
-    uint32_t duration = uint32_t(chunk.GetDuration());
-
-    if (chunk.IsNull()) {
-      nsAutoTArray<AudioDataValue,1024> silence;
-      silence.SetLength(duration);
-      PodZero(silence.Elements(), silence.Length());
-      channels.SetLength(mResamplerChannelCount);
-      for (uint32_t i = 0; i < channels.Length(); ++i) {
-        channels[i] = silence.Elements();
-      }
-      ResampleChannels(channels, duration, AUDIO_OUTPUT_FORMAT, 0.0f);
-    } else if (chunk.mChannelData.Length() == mResamplerChannelCount) {
-      // Common case, since mResamplerChannelCount is set to the first chunk's
-      // number of channels.
-      channels.AppendElements(chunk.mChannelData);
-      ResampleChannels(channels, duration, chunk.mBufferFormat, chunk.mVolume);
-    } else {
-      // Uncommon case. Since downmixing requires channels to be floats,
-      // convert everything to floats now.
-      uint32_t upChannels = GetAudioChannelsSuperset(chunk.mChannelData.Length(), mResamplerChannelCount);
-      nsTArray<float> buffer;
-      if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
-        channels.AppendElements(chunk.mChannelData);
-      } else {
-        NS_ASSERTION(chunk.mBufferFormat == AUDIO_FORMAT_S16, "Unknown format");
-        if (duration > UINT32_MAX/chunk.mChannelData.Length()) {
-          NS_ERROR("Chunk duration out of bounds");
-          return;
-        }
-        buffer.SetLength(chunk.mChannelData.Length()*duration);
-        for (uint32_t i = 0; i < chunk.mChannelData.Length(); ++i) {
-          const int16_t* samples = static_cast<const int16_t*>(chunk.mChannelData[i]);
-          float* converted = &buffer[i*duration];
-          for (uint32_t j = 0; j < duration; ++j) {
-            converted[j] = AudioSampleToFloat(samples[j]);
-          }
-          channels.AppendElement(converted);
-        }
-      }
-      nsTArray<float> zeroes;
-      if (channels.Length() < upChannels) {
-        zeroes.SetLength(duration);
-        PodZero(zeroes.Elements(), zeroes.Length());
-        AudioChannelsUpMix(&channels, upChannels, zeroes.Elements());
-      }
-      if (channels.Length() == mResamplerChannelCount) {
-        ResampleChannels(channels, duration, AUDIO_FORMAT_FLOAT32, chunk.mVolume);
-      } else {
-        nsTArray<float> output;
-        if (duration > UINT32_MAX/mResamplerChannelCount) {
-          NS_ERROR("Chunk duration out of bounds");
-          return;
-        }
-        output.SetLength(duration*mResamplerChannelCount);
-        nsAutoTArray<float*,2> outputPtrs;
-        nsAutoTArray<const void*,2> outputPtrsConst;
-        for (uint32_t i = 0; i < mResamplerChannelCount; ++i) {
-          outputPtrs.AppendElement(output.Elements() + i*duration);
-          outputPtrsConst.AppendElement(outputPtrs[i]);
-        }
-        AudioChannelsDownMix(channels, outputPtrs.Elements(), outputPtrs.Length(), duration);
-        ResampleChannels(outputPtrsConst, duration, AUDIO_FORMAT_FLOAT32, chunk.mVolume);
-      }
-    }
-    ci.Next();
-  }
-}
-
-/**
- * Copies the data in aInput to aOffsetInBlock within aBlock. All samples must
- * be float. Both chunks must have the same number of channels (or else
- * aInput is null). aBlock must have been allocated with AllocateInputBlock.
- */
-static void
-CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock, uint32_t aOffsetInBlock)
-{
-  uint32_t d = aInput.GetDuration();
-  for (uint32_t i = 0; i < aBlock->mChannelData.Length(); ++i) {
-    float* out = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[i])) +
-      aOffsetInBlock;
-    if (aInput.IsNull()) {
-      PodZero(out, d);
-    } else {
-      const float* in = static_cast<const float*>(aInput.mChannelData[i]);
-      ConvertAudioSamplesWithScale(in, out, d, aInput.mVolume);
-    }
-  }
-}
-
-/**
- * Converts the data in aSegment to a single chunk aChunk. Every chunk in
- * aSegment must have the same number of channels (or be null). aSegment must have
- * duration WEBAUDIO_BLOCK_SIZE. Every chunk in aSegment must be in float format.
- */
-static void
-ConvertSegmentToAudioBlock(AudioSegment* aSegment, AudioChunk* aBlock)
-{
-  NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, "Bad segment duration");
-
-  {
-    AudioSegment::ChunkIterator ci(*aSegment);
-    NS_ASSERTION(!ci.IsEnded(), "Segment must have at least one chunk");
-    AudioChunk& firstChunk = *ci;
-    ci.Next();
-    if (ci.IsEnded()) {
-      *aBlock = firstChunk;
-      return;
-    }
-
-    while (ci->IsNull() && !ci.IsEnded()) {
-      ci.Next();
-    }
-    if (ci.IsEnded()) {
-      // All null.
-      aBlock->SetNull(WEBAUDIO_BLOCK_SIZE);
-      return;
-    }
-
-    AllocateAudioBlock(ci->mChannelData.Length(), aBlock);
-  }
-
-  AudioSegment::ChunkIterator ci(*aSegment);
-  uint32_t duration = 0;
-  while (!ci.IsEnded()) {
-    CopyChunkToBlock(*ci, aBlock, duration);
-    duration += ci->GetDuration();
-    ci.Next();
-  }
-}
-
-void
-AudioNodeExternalInputStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
-{
-  // According to spec, number of outputs is always 1.
-  mLastChunks.SetLength(1);
-
-  // GC stuff can result in our input stream being destroyed before this stream.
-  // Handle that.
-  if (mInputs.IsEmpty()) {
-    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
-    AdvanceOutputSegment();
-    return;
-  }
-
-  MOZ_ASSERT(mInputs.Length() == 1);
-
-  MediaStream* source = mInputs[0]->GetSource();
-  nsAutoTArray<AudioSegment,1> audioSegments;
-  nsAutoTArray<bool,1> trackMapEntriesUsed;
-  uint32_t inputChannels = 0;
-  for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
-       !tracks.IsEnded(); tracks.Next()) {
-    const StreamBuffer::Track& inputTrack = *tracks;
-    // Create a TrackMapEntry if necessary.
-    uint32_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom);
-    // Maybe there's nothing in this track yet. If so, ignore it. (While the
-    // track is only playing silence, we may not be able to determine the
-    // correct number of channels to start resampling.)
-    if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) {
-      continue;
-    }
-
-    while (trackMapEntriesUsed.Length() <= trackMapIndex) {
-      trackMapEntriesUsed.AppendElement(false);
-    }
-    trackMapEntriesUsed[trackMapIndex] = true;
-
-    TrackMapEntry* trackMap = &mTrackMap[trackMapIndex];
-    AudioSegment segment;
-    GraphTime next;
-    TrackRate inputTrackRate = inputTrack.GetRate();
-    for (GraphTime t = aFrom; t < aTo; t = next) {
-      MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
-      interval.mEnd = std::min(interval.mEnd, aTo);
-      if (interval.mStart >= interval.mEnd)
-        break;
-      next = interval.mEnd;
-
-      // Ticks >= startTicks and < endTicks are in the interval
-      StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
-      TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration();
-      StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
-      NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart),
-                   "Samples missing");
-      TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd);
-      TrackTicks ticks = endTicks - startTicks;
-
-      if (interval.mInputIsBlocked) {
-        segment.AppendNullData(ticks);
-      } else {
-        // See comments in TrackUnionStream::CopyTrackData
-        StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
-        StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
-        TrackTicks inputTrackEndPoint =
-            inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX;
-
-        if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart ||
-            trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) {
-          // Start of a new series of intervals where neither stream is blocked.
-          trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1;
-        }
-        TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks;
-        TrackTicks inputEndTicks = inputStartTicks + ticks;
-        trackMap->mEndOfConsumedInputTicks = inputEndTicks;
-        trackMap->mEndOfLastInputIntervalInInputStream = inputEnd;
-        trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd;
-
-        if (inputStartTicks < 0) {
-          // Data before the start of the track is just null.
-          segment.AppendNullData(-inputStartTicks);
-          inputStartTicks = 0;
-        }
-        if (inputEndTicks > inputStartTicks) {
-          segment.AppendSlice(*inputTrack.GetSegment(),
-                              std::min(inputTrackEndPoint, inputStartTicks),
-                              std::min(inputTrackEndPoint, inputEndTicks));
-        }
-        // Pad if we're looking past the end of the track
-        segment.AppendNullData(std::max<TrackTicks>(0, inputEndTicks - inputTrackEndPoint));
-      }
-    }
-
-    trackMap->mSamplesPassedToResampler += segment.GetDuration();
-    trackMap->ResampleInputData(&segment);
-
-    if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) {
-      // We don't have enough data. Delay it.
-      trackMap->mResampledData.InsertNullDataAtStart(
-        mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration());
-    }
-    audioSegments.AppendElement()->AppendSlice(trackMap->mResampledData,
-      mCurrentOutputPosition, mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
-    trackMap->mResampledData.ForgetUpTo(mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
-    inputChannels = GetAudioChannelsSuperset(inputChannels, trackMap->mResamplerChannelCount);
-  }
-
-  for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
-    if (i >= int32_t(trackMapEntriesUsed.Length()) || !trackMapEntriesUsed[i]) {
-      mTrackMap.RemoveElementAt(i);
-    }
-  }
-
-  uint32_t outputChannels = ComputeFinalOuputChannelCount(inputChannels);
-
-  if (outputChannels) {
-    AllocateAudioBlock(outputChannels, &mLastChunks[0]);
-    nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
-    for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
-      AudioChunk tmpChunk;
-      ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk);
-      if (!tmpChunk.IsNull()) {
-        AccumulateInputChunk(i, tmpChunk, &mLastChunks[0], &downmixBuffer);
-      }
-    }
-  } else {
-    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
-  }
-  mCurrentOutputPosition += WEBAUDIO_BLOCK_SIZE;
-
-  // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
-  AdvanceOutputSegment();
-}
-
-}
deleted file mode 100644
--- a/content/media/AudioNodeExternalInputStream.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef MOZILLA_AUDIONODEEXTERNALINPUTSTREAM_H_
-#define MOZILLA_AUDIONODEEXTERNALINPUTSTREAM_H_
-
-#include "MediaStreamGraph.h"
-#include "AudioChannelFormat.h"
-#include "AudioNodeEngine.h"
-#include "AudioNodeStream.h"
-#include "mozilla/dom/AudioParam.h"
-#include <deque>
-
-#ifdef PR_LOGGING
-#define LOG(type, msg) PR_LOG(gMediaStreamGraphLog, type, msg)
-#else
-#define LOG(type, msg)
-#endif
-
-// Forward declaration for mResamplerMap
-typedef struct SpeexResamplerState_ SpeexResamplerState;
-
-namespace mozilla {
-
-/**
- * This is a MediaStream implementation that acts for a Web Audio node but
- * unlike other AudioNodeStreams, supports any kind of MediaStream as an
- * input --- handling any number of audio tracks, resampling them from whatever
- * sample rate they're using, and handling blocking of the input MediaStream.
- */
-class AudioNodeExternalInputStream : public AudioNodeStream {
-public:
-  AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate);
-  ~AudioNodeExternalInputStream();
-
-  virtual void ProduceOutput(GraphTime aFrom, GraphTime aTo) MOZ_OVERRIDE;
-
-private:
-  // For storing pointers and data about input tracks, like the last TrackTick which
-  // was read, and the associated speex resampler.
-  struct TrackMapEntry {
-    ~TrackMapEntry();
-
-    /**
-     * Resamples data from all chunks in aIterator and following, using mResampler,
-     * adding the results to mResampledData.
-     */
-    void ResampleInputData(AudioSegment* aSegment);
-    /**
-     * Resamples a set of channel buffers using mResampler, adding the results
-     * to mResampledData.
-     */
-    void ResampleChannels(const nsTArray<const void*>& aBuffers,
-                          uint32_t aInputDuration,
-                          AudioSampleFormat aFormat,
-                          float aVolume);
-
-    // mEndOfConsumedInputTicks is the end of the input ticks that we've consumed.
-    // 0 if we haven't consumed any yet.
-    TrackTicks mEndOfConsumedInputTicks;
-    // mEndOfLastInputIntervalInInputStream is the timestamp for the end of the
-    // previous interval which was unblocked for both the input and output
-    // stream, in the input stream's timeline, or -1 if there wasn't one.
-    StreamTime mEndOfLastInputIntervalInInputStream;
-    // mEndOfLastInputIntervalInOutputStream is the timestamp for the end of the
-    // previous interval which was unblocked for both the input and output
-    // stream, in the output stream's timeline, or -1 if there wasn't one.
-    StreamTime mEndOfLastInputIntervalInOutputStream;
-    /**
-     * Number of samples passed to the resampler so far.
-     */
-    TrackTicks mSamplesPassedToResampler;
-    /**
-     * Resampler being applied to this track.
-     */
-    SpeexResamplerState* mResampler;
-    /**
-     * The track data that has been resampled to the rate of the
-     * AudioNodeExternalInputStream. All data in these chunks is in floats (or null),
-     * and has the number of channels given in mResamplerChannelCount.
-     * mResampledData starts at zero in the stream's output track (so generally
-     * it will consist of null data followed by actual data).
-     */
-    AudioSegment mResampledData;
-    /**
-     * Number of channels used to create mResampler.
-     */
-    uint32_t mResamplerChannelCount;
-    /**
-     * The ID for the track of the input stream this entry is for.
-     */
-    TrackID mTrackID;
-  };
-
-  nsTArray<TrackMapEntry> mTrackMap;
-  // Amount of track data produced so far. A multiple of WEBAUDIO_BLOCK_SIZE.
-  TrackTicks mCurrentOutputPosition;
-
-  /**
-   * Creates a TrackMapEntry for the track, if needed. Returns the index
-   * of the TrackMapEntry or NoIndex if no entry is needed yet.
-   */
-  uint32_t GetTrackMapEntry(const StreamBuffer::Track& aTrack,
-                            GraphTime aFrom);
-};
-
-}
-
-#endif /* MOZILLA_AUDIONODESTREAM_H_ */
--- a/content/media/AudioNodeStream.cpp
+++ b/content/media/AudioNodeStream.cpp
@@ -243,34 +243,16 @@ AudioNodeStream::AllInputsFinished() con
   for (uint32_t i = 0; i < inputCount; ++i) {
     if (!mInputs[i]->GetSource()->IsFinishedOnGraphThread()) {
       return false;
     }
   }
   return !!inputCount;
 }
 
-uint32_t
-AudioNodeStream::ComputeFinalOuputChannelCount(uint32_t aInputChannelCount)
-{
-  switch (mChannelCountMode) {
-  case ChannelCountMode::Explicit:
-    // Disregard the channel count we've calculated from inputs, and just use
-    // mNumberOfInputChannels.
-    return mNumberOfInputChannels;
-  case ChannelCountMode::Clamped_max:
-    // Clamp the computed output channel count to mNumberOfInputChannels.
-    return std::min(aInputChannelCount, mNumberOfInputChannels);
-  default:
-  case ChannelCountMode::Max:
-    // Nothing to do here, just shut up the compiler warning.
-    return aInputChannelCount;
-  }
-}
-
 void
 AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
 {
   uint32_t inputCount = mInputs.Length();
   uint32_t outputChannelCount = 1;
   nsAutoTArray<AudioChunk*,250> inputChunks;
   for (uint32_t i = 0; i < inputCount; ++i) {
     if (aPortIndex != mInputs[i]->InputNumber()) {
@@ -290,17 +272,30 @@ AudioNodeStream::ObtainInputBlock(AudioC
       continue;
     }
 
     inputChunks.AppendElement(chunk);
     outputChannelCount =
       GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
   }
 
-  outputChannelCount = ComputeFinalOuputChannelCount(outputChannelCount);
+  switch (mChannelCountMode) {
+  case ChannelCountMode::Explicit:
+    // Disregard the output channel count that we've calculated, and just use
+    // mNumberOfInputChannels.
+    outputChannelCount = mNumberOfInputChannels;
+    break;
+  case ChannelCountMode::Clamped_max:
+    // Clamp the computed output channel count to mNumberOfInputChannels.
+    outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
+    break;
+  case ChannelCountMode::Max:
+    // Nothing to do here, just shut up the compiler warning.
+    break;
+  }
 
   uint32_t inputChunkCount = inputChunks.Length();
   if (inputChunkCount == 0 ||
       (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
     aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
     return;
   }
 
@@ -311,106 +306,91 @@ AudioNodeStream::ObtainInputBlock(AudioC
   }
 
   if (outputChannelCount == 0) {
     aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
     return;
   }
 
   AllocateAudioBlock(outputChannelCount, &aTmpChunk);
+  float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
   // The static storage here should be 1KB, so it's fine
   nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
 
   for (uint32_t i = 0; i < inputChunkCount; ++i) {
-    AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
-  }
-}
-
-void
-AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
-                                      AudioChunk* aBlock,
-                                      nsTArray<float>* aDownmixBuffer)
-{
-  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
-  UpMixDownMixChunk(&aChunk, aBlock->mChannelData.Length(), channels, *aDownmixBuffer);
+    AudioChunk* chunk = inputChunks[i];
+    nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
+    channels.AppendElements(chunk->mChannelData);
+    if (channels.Length() < outputChannelCount) {
+      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
+        AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
+        NS_ASSERTION(outputChannelCount == channels.Length(),
+                     "We called GetAudioChannelsSuperset to avoid this");
+      } else {
+        // Fill up the remaining channels by zeros
+        for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
+          channels.AppendElement(silenceChannel);
+        }
+      }
+    } else if (channels.Length() > outputChannelCount) {
+      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
+        nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
+        outputChannels.SetLength(outputChannelCount);
+        downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
+        for (uint32_t j = 0; j < outputChannelCount; ++j) {
+          outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
+        }
 
-  for (uint32_t c = 0; c < channels.Length(); ++c) {
-    const float* inputData = static_cast<const float*>(channels[c]);
-    float* outputData = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c]));
-    if (inputData) {
-      if (aInputIndex == 0) {
-        AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
+        AudioChannelsDownMix(channels, outputChannels.Elements(),
+                             outputChannelCount, WEBAUDIO_BLOCK_SIZE);
+
+        channels.SetLength(outputChannelCount);
+        for (uint32_t j = 0; j < channels.Length(); ++j) {
+          channels[j] = outputChannels[j];
+        }
       } else {
-        AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
-      }
-    } else {
-      if (aInputIndex == 0) {
-        PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
+        // Drop the remaining channels
+        channels.RemoveElementsAt(outputChannelCount,
+                                  channels.Length() - outputChannelCount);
       }
     }
-  }
-}
 
-void
-AudioNodeStream::UpMixDownMixChunk(const AudioChunk* aChunk,
-                                   uint32_t aOutputChannelCount,
-                                   nsTArray<const void*>& aOutputChannels,
-                                   nsTArray<float>& aDownmixBuffer)
-{
-  static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
-
-  aOutputChannels.AppendElements(aChunk->mChannelData);
-  if (aOutputChannels.Length() < aOutputChannelCount) {
-    if (mChannelInterpretation == ChannelInterpretation::Speakers) {
-      AudioChannelsUpMix(&aOutputChannels, aOutputChannelCount, nullptr);
-      NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
-                   "We called GetAudioChannelsSuperset to avoid this");
-    } else {
-      // Fill up the remaining aOutputChannels by zeros
-      for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
-        aOutputChannels.AppendElement(silenceChannel);
+    for (uint32_t c = 0; c < channels.Length(); ++c) {
+      const float* inputData = static_cast<const float*>(channels[c]);
+      float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c]));
+      if (inputData) {
+        if (i == 0) {
+          AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
+        } else {
+          AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
+        }
+      } else {
+        if (i == 0) {
+          memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
+        }
       }
     }
-  } else if (aOutputChannels.Length() > aOutputChannelCount) {
-    if (mChannelInterpretation == ChannelInterpretation::Speakers) {
-      nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
-      outputChannels.SetLength(aOutputChannelCount);
-      aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
-      for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
-        outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
-      }
-
-      AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
-                           aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);
-
-      aOutputChannels.SetLength(aOutputChannelCount);
-      for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
-        aOutputChannels[j] = outputChannels[j];
-      }
-    } else {
-      // Drop the remaining aOutputChannels
-      aOutputChannels.RemoveElementsAt(aOutputChannelCount,
-        aOutputChannels.Length() - aOutputChannelCount);
-    }
   }
 }
 
 // The MediaStreamGraph guarantees that this is actually one block, for
 // AudioNodeStreams.
 void
 AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
 {
   if (mMarkAsFinishedAfterThisBlock) {
     // This stream was finished the last time that we looked at it, and all
     // of the depending streams have finished their output as well, so now
     // it's time to mark this stream as finished.
     FinishOutput();
   }
 
-  EnsureTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate);
+  StreamBuffer::Track* track = EnsureTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate);
+
+  AudioSegment* segment = track->Get<AudioSegment>();
 
   uint16_t outputCount = std::max(uint16_t(1), mEngine->OutputCount());
   mLastChunks.SetLength(outputCount);
 
   if (mInCycle) {
     // XXX DelayNode not supported yet so just produce silence
     for (uint16_t i = 0; i < outputCount; ++i) {
       mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
@@ -439,25 +419,16 @@ AudioNodeStream::ProduceOutput(GraphTime
   }
 
   if (mDisabledTrackIDs.Contains(AUDIO_NODE_STREAM_TRACK_ID)) {
     for (uint32_t i = 0; i < mLastChunks.Length(); ++i) {
       mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
     }
   }
 
-  AdvanceOutputSegment();
-}
-
-void
-AudioNodeStream::AdvanceOutputSegment()
-{
-  StreamBuffer::Track* track = EnsureTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate);
-  AudioSegment* segment = track->Get<AudioSegment>();
-
   if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
     segment->AppendAndConsumeChunk(&mLastChunks[0]);
   } else {
     segment->AppendNullData(mLastChunks[0].GetDuration());
   }
 
   for (uint32_t j = 0; j < mListeners.Length(); ++j) {
     MediaStreamListener* l = mListeners[j];
--- a/content/media/AudioNodeStream.h
+++ b/content/media/AudioNodeStream.h
@@ -109,36 +109,24 @@ public:
     return mLastChunks;
   }
   virtual bool MainThreadNeedsUpdates() const MOZ_OVERRIDE
   {
     // Only source and external streams need updates on the main thread.
     return (mKind == MediaStreamGraph::SOURCE_STREAM && mFinished) ||
            mKind == MediaStreamGraph::EXTERNAL_STREAM;
   }
-  virtual bool IsIntrinsicallyConsumed() const MOZ_OVERRIDE
-  {
-    return true;
-  }
 
   // Any thread
   AudioNodeEngine* Engine() { return mEngine; }
   TrackRate SampleRate() const { return mSampleRate; }
 
 protected:
-  void AdvanceOutputSegment();
   void FinishOutput();
-  void AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
-                            AudioChunk* aBlock,
-                            nsTArray<float>* aDownmixBuffer);
-  void UpMixDownMixChunk(const AudioChunk* aChunk, uint32_t aOutputChannelCount,
-                         nsTArray<const void*>& aOutputChannels,
-                         nsTArray<float>& aDownmixBuffer);
 
-  uint32_t ComputeFinalOuputChannelCount(uint32_t aInputChannelCount);
   void ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex);
 
   // The engine that will generate output for this node.
   nsAutoPtr<AudioNodeEngine> mEngine;
   // The last block produced by this node.
   OutputChunks mLastChunks;
   // The stream's sampling rate
   const TrackRate mSampleRate;
--- a/content/media/DOMMediaStream.cpp
+++ b/content/media/DOMMediaStream.cpp
@@ -26,23 +26,21 @@ NS_IMPL_CYCLE_COLLECTING_ADDREF(DOMMedia
 NS_IMPL_CYCLE_COLLECTING_RELEASE(DOMMediaStream)
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(DOMMediaStream)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(DOMMediaStream)
   tmp->Destroy();
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mWindow)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mTracks)
-  NS_IMPL_CYCLE_COLLECTION_UNLINK(mConsumersToKeepAlive)
   NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER
 NS_IMPL_CYCLE_COLLECTION_UNLINK_END
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(DOMMediaStream)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mWindow)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mTracks)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mConsumersToKeepAlive)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
 NS_IMPL_CYCLE_COLLECTION_TRACE_WRAPPERCACHE(DOMMediaStream)
 
 NS_IMPL_ISUPPORTS_INHERITED1(DOMLocalMediaStream, DOMMediaStream,
                              nsIDOMLocalMediaStream)
 
 NS_IMPL_CYCLE_COLLECTION_INHERITED_1(DOMAudioNodeMediaStream, DOMMediaStream,
@@ -285,26 +283,16 @@ DOMMediaStream::GetDOMTrackFor(TrackID a
 
 void
 DOMMediaStream::NotifyMediaStreamGraphShutdown()
 {
   // No more tracks will ever be added, so just clear these callbacks now
   // to prevent leaks.
   mNotifiedOfMediaStreamGraphShutdown = true;
   mRunOnTracksAvailable.Clear();
-
-  mConsumersToKeepAlive.Clear();
-}
-
-void
-DOMMediaStream::NotifyStreamStateChanged()
-{
-  if (IsFinished()) {
-    mConsumersToKeepAlive.Clear();
-  }
 }
 
 void
 DOMMediaStream::OnTracksAvailable(OnTracksAvailableCallback* aRunnable)
 {
   if (mNotifiedOfMediaStreamGraphShutdown) {
     // No more tracks will ever be added, so just delete the callback now.
     delete aRunnable;
--- a/content/media/DOMMediaStream.h
+++ b/content/media/DOMMediaStream.h
@@ -63,22 +63,20 @@ public:
   {
     return mWindow;
   }
   virtual JSObject* WrapObject(JSContext* aCx,
                                JS::Handle<JSObject*> aScope) MOZ_OVERRIDE;
 
   // WebIDL
   double CurrentTime();
-
   void GetAudioTracks(nsTArray<nsRefPtr<AudioStreamTrack> >& aTracks);
   void GetVideoTracks(nsTArray<nsRefPtr<VideoStreamTrack> >& aTracks);
 
-  MediaStream* GetStream() const { return mStream; }
-
+  MediaStream* GetStream() { return mStream; }
   bool IsFinished();
   /**
    * Returns a principal indicating who may access this stream. The stream contents
    * can only be accessed by principals subsuming this principal.
    */
   nsIPrincipal* GetPrincipal() { return mPrincipal; }
 
   /**
@@ -90,20 +88,16 @@ public:
   bool CombineWithPrincipal(nsIPrincipal* aPrincipal);
 
   /**
    * Called when this stream's MediaStreamGraph has been shut down. Normally
    * MSGs are only shut down when all streams have been removed, so this
    * will only be called during a forced shutdown due to application exit.
    */
   void NotifyMediaStreamGraphShutdown();
-  /**
-   * Called when the main-thread state of the MediaStream changed.
-   */
-  void NotifyStreamStateChanged();
 
   // Indicate what track types we eventually expect to add to this stream
   enum {
     HINT_CONTENTS_AUDIO = 1 << 0,
     HINT_CONTENTS_VIDEO = 1 << 1
   };
   TrackTypeHints GetHintContents() const { return mHintContents; }
   void SetHintContents(TrackTypeHints aHintContents) { mHintContents = aHintContents; }
@@ -146,27 +140,16 @@ public:
   // It is allowed to do anything, including run script.
   // aCallback may run immediately during this call if tracks are already
   // available!
   // We only care about track additions, we'll fire the notification even if
   // some of the tracks have been removed.
   // Takes ownership of aCallback.
   void OnTracksAvailable(OnTracksAvailableCallback* aCallback);
 
-  /**
-   * Add an nsISupports object that this stream will keep alive as long as
-   * the stream is not finished.
-   */
-  void AddConsumerToKeepAlive(nsISupports* aConsumer)
-  {
-    if (!IsFinished() && !mNotifiedOfMediaStreamGraphShutdown) {
-      mConsumersToKeepAlive.AppendElement(aConsumer);
-    }
-  }
-
 protected:
   void Destroy();
   void InitSourceStream(nsIDOMWindow* aWindow, TrackTypeHints aHintContents);
   void InitTrackUnionStream(nsIDOMWindow* aWindow, TrackTypeHints aHintContents);
   void InitStreamCommon(MediaStream* aStream);
   void CheckTracksAvailable();
 
   class StreamListener;
@@ -185,19 +168,16 @@ protected:
   // If null, this stream can be used by anyone because it has no content yet.
   nsCOMPtr<nsIPrincipal> mPrincipal;
 
   nsAutoTArray<nsRefPtr<MediaStreamTrack>,2> mTracks;
   nsRefPtr<StreamListener> mListener;
 
   nsTArray<nsAutoPtr<OnTracksAvailableCallback> > mRunOnTracksAvailable;
 
-  // Keep these alive until the stream finishes
-  nsTArray<nsCOMPtr<nsISupports> > mConsumersToKeepAlive;
-
   // Indicate what track types we eventually expect to add to this stream
   uint8_t mHintContents;
   // Indicate what track types have been added to this stream
   uint8_t mTrackTypesAvailable;
   bool mNotifiedOfMediaStreamGraphShutdown;
 };
 
 class DOMLocalMediaStream : public DOMMediaStream,
--- a/content/media/MediaDecoder.cpp
+++ b/content/media/MediaDecoder.cpp
@@ -363,17 +363,16 @@ MediaDecoder::MediaDecoder() :
   mCurrentTime(0.0),
   mInitialVolume(0.0),
   mInitialPlaybackRate(1.0),
   mInitialPreservesPitch(true),
   mRequestedSeekTime(-1.0),
   mDuration(-1),
   mTransportSeekable(true),
   mMediaSeekable(true),
-  mSameOriginMedia(false),
   mReentrantMonitor("media.decoder"),
   mIsDormant(false),
   mPlayState(PLAY_STATE_PAUSED),
   mNextState(PLAY_STATE_PAUSED),
   mCalledResourceLoaded(false),
   mIgnoreProgressData(false),
   mInfiniteStream(false),
   mTriggerPlaybackEndedWhenSourceStreamFinishes(false),
@@ -441,44 +440,48 @@ void MediaDecoder::Shutdown()
 MediaDecoder::~MediaDecoder()
 {
   MOZ_ASSERT(NS_IsMainThread());
   MediaMemoryReporter::RemoveMediaDecoder(this);
   UnpinForSeek();
   MOZ_COUNT_DTOR(MediaDecoder);
 }
 
-nsresult MediaDecoder::OpenResource(nsIStreamListener** aStreamListener)
+nsresult MediaDecoder::OpenResource(MediaResource* aResource,
+                                    nsIStreamListener** aStreamListener)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (aStreamListener) {
     *aStreamListener = nullptr;
   }
 
   {
     // Hold the lock while we do this to set proper lock ordering
     // expectations for dynamic deadlock detectors: decoder lock(s)
     // should be grabbed before the cache lock
     ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
 
-    nsresult rv = mResource->Open(aStreamListener);
+    nsresult rv = aResource->Open(aStreamListener);
     if (NS_FAILED(rv)) {
       LOG(PR_LOG_DEBUG, ("%p Failed to open stream!", this));
       return rv;
     }
+
+    mResource = aResource;
   }
   return NS_OK;
 }
 
-nsresult MediaDecoder::Load(nsIStreamListener** aStreamListener,
-                            MediaDecoder* aCloneDonor)
+nsresult MediaDecoder::Load(MediaResource* aResource,
+                                nsIStreamListener** aStreamListener,
+                                MediaDecoder* aCloneDonor)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
-  nsresult rv = OpenResource(aStreamListener);
+  nsresult rv = OpenResource(aResource, aStreamListener);
   NS_ENSURE_SUCCESS(rv, rv);
 
   mDecoderStateMachine = CreateStateMachine();
   if (!mDecoderStateMachine) {
     LOG(PR_LOG_DEBUG, ("%p Failed to create state machine!", this));
     return NS_ERROR_FAILURE;
   }
 
@@ -834,28 +837,16 @@ void MediaDecoder::DecodeError()
     return;
 
   if (mOwner)
     mOwner->DecodeError();
 
   Shutdown();
 }
 
-void MediaDecoder::UpdateSameOriginStatus(bool aSameOrigin)
-{
-  ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
-  mSameOriginMedia = aSameOrigin;
-}
-
-bool MediaDecoder::IsSameOriginMedia()
-{
-  GetReentrantMonitor().AssertCurrentThreadIn();
-  return mSameOriginMedia;
-}
-
 bool MediaDecoder::IsSeeking() const
 {
   MOZ_ASSERT(NS_IsMainThread());
   return mPlayState == PLAY_STATE_SEEKING;
 }
 
 bool MediaDecoder::IsEnded() const
 {
--- a/content/media/MediaDecoder.h
+++ b/content/media/MediaDecoder.h
@@ -272,22 +272,26 @@ public:
   virtual bool Init(MediaDecoderOwner* aOwner);
 
   // Cleanup internal data structures. Must be called on the main
   // thread by the owning object before that object disposes of this object.
   virtual void Shutdown();
 
   // Start downloading the media. Decode the downloaded data up to the
   // point of the first frame of data.
+  // aResource is the media stream to use. Ownership of aResource passes to
+  // the decoder, even if Load returns an error.
   // This is called at most once per decoder, after Init().
-  virtual nsresult Load(nsIStreamListener** aListener,
+  virtual nsresult Load(MediaResource* aResource,
+                        nsIStreamListener** aListener,
                         MediaDecoder* aCloneDonor);
 
-  // Called in |Load| to open mResource.
-  nsresult OpenResource(nsIStreamListener** aStreamListener);
+  // Called in |Load| to open the media resource.
+  nsresult OpenResource(MediaResource* aResource,
+                        nsIStreamListener** aStreamListener);
 
   // Called when the video file has completed downloading.
   virtual void ResourceLoaded();
 
   // Called if the media file encounters a network error.
   virtual void NetworkError();
 
   // Get the current MediaResource being used. Its URI will be returned
@@ -297,21 +301,16 @@ public:
   // refcounting, *unless* you need to store and use the reference after the
   // MediaDecoder has been destroyed. You might need to do this if you're
   // wrapping the MediaResource in some kind of byte stream interface to be
   // passed to a platform decoder.
   MediaResource* GetResource() const MOZ_FINAL MOZ_OVERRIDE
   {
     return mResource;
   }
-  void SetResource(MediaResource* aResource)
-  {
-    NS_ASSERTION(NS_IsMainThread(), "Should only be called on main thread");
-    mResource = aResource;
-  }
 
   // Return the principal of the current URI being played or downloaded.
   virtual already_AddRefed<nsIPrincipal> GetCurrentPrincipal();
 
   // Return the time position in the video stream being
   // played measured in seconds.
   virtual double GetCurrentTime();
 
@@ -636,20 +635,16 @@ public:
   virtual void NotifyPlaybackStopped() {
     GetReentrantMonitor().AssertCurrentThreadIn();
     mPlaybackStatistics.Stop();
   }
 
   // The actual playback rate computation. The monitor must be held.
   virtual double ComputePlaybackRate(bool* aReliable);
 
-  // Return true when the media is same-origin with the element. The monitor
-  // must be held.
-  bool IsSameOriginMedia();
-
   // Returns true if we can play the entire media through without stopping
   // to buffer, given the current download and playback rates.
   bool CanPlayThrough();
 
   // Make the decoder state machine update the playback position. Called by
   // the reader on the decoder thread (Assertions for this checked by
   // mDecoderStateMachine). This must be called with the decode monitor
   // held.
@@ -736,19 +731,16 @@ public:
   // Called when a "MozAudioAvailable" event listener is added. This enables
   // the decoder to only dispatch "MozAudioAvailable" events when a
   // handler exists, reducing overhead. Called on the main thread.
   virtual void NotifyAudioAvailableListener();
 
   // Notifies the element that decoding has failed.
   virtual void DecodeError();
 
-  // Indicate whether the media is same-origin with the element.
-  void UpdateSameOriginStatus(bool aSameOrigin);
-
   MediaDecoderOwner* GetOwner() MOZ_OVERRIDE;
 
 #ifdef MOZ_RAW
   static bool IsRawEnabled();
 #endif
 
 #ifdef MOZ_OGG
   static bool IsOggEnabled();
@@ -961,20 +953,16 @@ public:
 
   // True if the resource is seekable at a transport level (server supports byte
   // range requests, local file, etc.).
   bool mTransportSeekable;
 
   // True if the media is seekable (i.e. supports random access).
   bool mMediaSeekable;
 
-  // True if the media is same-origin with the element. Data can only be
-  // passed to MediaStreams when this is true.
-  bool mSameOriginMedia;
-
   /******
    * The following member variables can be accessed from any thread.
    ******/
 
   // The state machine object for handling the decoding. It is safe to
   // call methods of this object from other threads. Its internal data
   // is synchronised on a monitor. The lifetime of this object is
   // after mPlayState is LOADING and before mPlayState is SHUTDOWN. It
--- a/content/media/MediaDecoderStateMachine.cpp
+++ b/content/media/MediaDecoderStateMachine.cpp
@@ -614,20 +614,16 @@ void MediaDecoderStateMachine::SendStrea
 
   DecodedStreamData* stream = mDecoder->GetDecodedStream();
   if (!stream)
     return;
 
   if (mState == DECODER_STATE_DECODING_METADATA)
     return;
 
-  if (!mDecoder->IsSameOriginMedia()) {
-    return;
-  }
-
   // If there's still an audio thread alive, then we can't send any stream
   // data yet since both SendStreamData and the audio thread want to be in
   // charge of popping the audio queue. We're waiting for the audio thread
   // to die before sending anything to our stream.
   if (mAudioThread)
     return;
 
   int64_t minLastAudioPacketTime = INT64_MAX;
--- a/content/media/MediaStreamGraph.cpp
+++ b/content/media/MediaStreamGraph.cpp
@@ -17,17 +17,16 @@
 #include "prlog.h"
 #include "VideoUtils.h"
 #include "mozilla/Attributes.h"
 #include "TrackUnionStream.h"
 #include "ImageContainer.h"
 #include "AudioChannelCommon.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
-#include "AudioNodeExternalInputStream.h"
 #include <algorithm>
 #include "DOMMediaStream.h"
 #include "GeckoProfiler.h"
 
 using namespace mozilla::layers;
 using namespace mozilla::dom;
 
 namespace mozilla {
@@ -516,17 +515,17 @@ MediaStreamGraphImpl::UpdateStreamOrder(
     if (ps) {
       ps->mInCycle = false;
     }
   }
 
   mozilla::LinkedList<MediaStream> stack;
   for (uint32_t i = 0; i < mOldStreams.Length(); ++i) {
     nsRefPtr<MediaStream>& s = mOldStreams[i];
-    if (s->IsIntrinsicallyConsumed()) {
+    if (!s->mAudioOutputs.IsEmpty() || !s->mVideoOutputs.IsEmpty()) {
       MarkConsumed(s);
     }
     if (!s->mHasBeenOrdered) {
       UpdateStreamOrderForStream(&stack, s.forget());
     }
   }
 }
 
@@ -1227,19 +1226,16 @@ MediaStreamGraphImpl::ApplyStreamUpdate(
   mMonitor.AssertCurrentThreadOwns();
 
   MediaStream* stream = aUpdate->mStream;
   if (!stream)
     return;
   stream->mMainThreadCurrentTime = aUpdate->mNextMainThreadCurrentTime;
   stream->mMainThreadFinished = aUpdate->mNextMainThreadFinished;
 
-  if (stream->mWrapper) {
-    stream->mWrapper->NotifyStreamStateChanged();
-  }
   for (int32_t i = stream->mMainThreadListeners.Length() - 1; i >= 0; --i) {
     stream->mMainThreadListeners[i]->NotifyMainThreadStateChanged();
   }
 }
 
 void
 MediaStreamGraphImpl::ShutdownThreads()
 {
@@ -2315,31 +2311,16 @@ MediaStreamGraph::CreateTrackUnionStream
   TrackUnionStream* stream = new TrackUnionStream(aWrapper);
   NS_ADDREF(stream);
   MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
   stream->SetGraphImpl(graph);
   graph->AppendMessage(new CreateMessage(stream));
   return stream;
 }
 
-AudioNodeExternalInputStream*
-MediaStreamGraph::CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  if (!aSampleRate) {
-    aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
-  }
-  AudioNodeExternalInputStream* stream = new AudioNodeExternalInputStream(aEngine, aSampleRate);
-  NS_ADDREF(stream);
-  MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
-  stream->SetGraphImpl(graph);
-  graph->AppendMessage(new CreateMessage(stream));
-  return stream;
-}
-
 AudioNodeStream*
 MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
                                         AudioNodeStreamKind aKind,
                                         TrackRate aSampleRate)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (!aSampleRate) {
     aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
--- a/content/media/MediaStreamGraph.h
+++ b/content/media/MediaStreamGraph.h
@@ -186,19 +186,18 @@ class MainThreadMediaStreamListener {
 public:
   virtual void NotifyMainThreadStateChanged() = 0;
 };
 
 class MediaStreamGraphImpl;
 class SourceMediaStream;
 class ProcessedMediaStream;
 class MediaInputPort;
+class AudioNodeStream;
 class AudioNodeEngine;
-class AudioNodeExternalInputStream;
-class AudioNodeStream;
 struct AudioChunk;
 
 /**
  * A stream of synchronized audio and video data. All (not blocked) streams
  * progress at the same rate --- "real time". Streams cannot seek. The only
  * operation readers can perform on a stream is to read the next data.
  *
  * Consumers of a stream can be reading from it at different offsets, but that
@@ -356,17 +355,16 @@ public:
   bool IsDestroyed()
   {
     NS_ASSERTION(NS_IsMainThread(), "Call only on main thread");
     return mMainThreadDestroyed;
   }
 
   friend class MediaStreamGraphImpl;
   friend class MediaInputPort;
-  friend class AudioNodeExternalInputStream;
 
   virtual SourceMediaStream* AsSourceStream() { return nullptr; }
   virtual ProcessedMediaStream* AsProcessedStream() { return nullptr; }
   virtual AudioNodeStream* AsAudioNodeStream() { return nullptr; }
 
   // media graph thread only
   void Init();
   // These Impl methods perform the core functionality of the control methods
@@ -397,26 +395,16 @@ public:
   void ChangeExplicitBlockerCountImpl(GraphTime aTime, int32_t aDelta)
   {
     mExplicitBlockerCount.SetAtAndAfter(aTime, mExplicitBlockerCount.GetAt(aTime) + aDelta);
   }
   void AddListenerImpl(already_AddRefed<MediaStreamListener> aListener);
   void RemoveListenerImpl(MediaStreamListener* aListener);
   void RemoveAllListenersImpl();
   void SetTrackEnabledImpl(TrackID aTrackID, bool aEnabled);
-  /**
-   * Returns true when this stream requires the contents of its inputs even if
-   * its own outputs are not being consumed. This is used to signal inputs to
-   * this stream that they are being consumed; when they're not being consumed,
-   * we make some optimizations.
-   */
-  virtual bool IsIntrinsicallyConsumed() const
-  {
-    return !mAudioOutputs.IsEmpty() || !mVideoOutputs.IsEmpty();
-  }
 
   void AddConsumer(MediaInputPort* aPort)
   {
     mConsumers.AppendElement(aPort);
   }
   void RemoveConsumer(MediaInputPort* aPort)
   {
     mConsumers.RemoveElement(aPort);
@@ -771,20 +759,20 @@ public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaInputPort)
 
   /**
    * The FLAG_BLOCK_INPUT and FLAG_BLOCK_OUTPUT flags can be used to control
    * exactly how the blocking statuses of the input and output streams affect
    * each other.
    */
   enum {
-    // When set, blocking on the output stream forces blocking on the input
+    // When set, blocking on the input stream forces blocking on the output
     // stream.
     FLAG_BLOCK_INPUT = 0x01,
-    // When set, blocking on the input stream forces blocking on the output
+    // When set, blocking on the output stream forces blocking on the input
     // stream.
     FLAG_BLOCK_OUTPUT = 0x02
   };
   ~MediaInputPort()
   {
     MOZ_COUNT_DTOR(MediaInputPort);
   }
 
@@ -965,21 +953,16 @@ public:
    * Create a stream that will process audio for an AudioNode.
    * Takes ownership of aEngine.  aSampleRate is the sampling rate used
    * for the stream.  If 0 is passed, the sampling rate of the engine's
    * node will get used.
    */
   AudioNodeStream* CreateAudioNodeStream(AudioNodeEngine* aEngine,
                                          AudioNodeStreamKind aKind,
                                          TrackRate aSampleRate = 0);
-
-  AudioNodeExternalInputStream*
-  CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine,
-                                     TrackRate aSampleRate = 0);
-
   /**
    * Returns the number of graph updates sent. This can be used to track
    * whether a given update has been processed by the graph thread and reflected
    * in main-thread stream state.
    */
   int64_t GetCurrentGraphUpdateIndex() { return mGraphUpdatesSent; }
   /**
    * Start processing non-realtime for a specific number of ticks.
--- a/content/media/TrackUnionStream.h
+++ b/content/media/TrackUnionStream.h
@@ -115,27 +115,16 @@ public:
     mFilterCallback = aCallback;
   }
 
 protected:
   TrackIDFilterCallback mFilterCallback;
 
   // Only non-ended tracks are allowed to persist in this map.
   struct TrackMapEntry {
-    // mEndOfConsumedInputTicks is the end of the input ticks that we've consumed.
-    // 0 if we haven't consumed any yet.
-    TrackTicks mEndOfConsumedInputTicks;
-    // mEndOfLastInputIntervalInInputStream is the timestamp for the end of the
-    // previous interval which was unblocked for both the input and output
-    // stream, in the input stream's timeline, or -1 if there wasn't one.
-    StreamTime mEndOfLastInputIntervalInInputStream;
-    // mEndOfLastInputIntervalInOutputStream is the timestamp for the end of the
-    // previous interval which was unblocked for both the input and output
-    // stream, in the output stream's timeline, or -1 if there wasn't one.
-    StreamTime mEndOfLastInputIntervalInOutputStream;
     MediaInputPort* mInputPort;
     // We keep track IDs instead of track pointers because
     // tracks can be removed without us being notified (e.g.
     // when a finished track is forgotten.) When we need a Track*,
     // we call StreamBuffer::FindTrack, which will return null if
     // the track has been deleted.
     TrackID mInputTrackID;
     TrackID mOutputTrackID;
@@ -167,19 +156,16 @@ protected:
     segment->AppendNullData(outputStart);
     StreamBuffer::Track* track =
       &mBuffer.AddTrack(id, rate, outputStart, segment.forget());
     LOG(PR_LOG_DEBUG, ("TrackUnionStream %p adding track %d for input stream %p track %d, start ticks %lld",
                        this, id, aPort->GetSource(), aTrack->GetID(),
                        (long long)outputStart));
 
     TrackMapEntry* map = mTrackMap.AppendElement();
-    map->mEndOfConsumedInputTicks = 0;
-    map->mEndOfLastInputIntervalInInputStream = -1;
-    map->mEndOfLastInputIntervalInOutputStream = -1;
     map->mInputPort = aPort;
     map->mInputTrackID = aTrack->GetID();
     map->mOutputTrackID = track->GetID();
     map->mSegment = aTrack->GetSegment()->CreateEmptyClone();
     return mTrackMap.Length() - 1;
   }
   void EndTrack(uint32_t aIndex)
   {
@@ -217,22 +203,24 @@ protected:
       interval.mEnd = std::min(interval.mEnd, aTo);
       if (interval.mStart >= interval.mEnd)
         break;
       next = interval.mEnd;
 
       // Ticks >= startTicks and < endTicks are in the interval
       StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
       TrackTicks startTicks = outputTrack->GetEnd();
+#ifdef DEBUG
       StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
+#endif
       NS_ASSERTION(startTicks == TimeToTicksRoundUp(rate, outputStart),
                    "Samples missing");
       TrackTicks endTicks = TimeToTicksRoundUp(rate, outputEnd);
       TrackTicks ticks = endTicks - startTicks;
-      StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
+      // StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
       StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
       TrackTicks inputTrackEndPoint = TRACK_TICKS_MAX;
 
       if (aInputTrack->IsEnded()) {
         TrackTicks inputEndTicks = aInputTrack->TimeToTicksRoundDown(inputEnd);
         if (aInputTrack->GetEnd() <= inputEndTicks) {
           inputTrackEndPoint = aInputTrack->GetEnd();
           *aOutputTrackFinished = true;
@@ -246,76 +234,22 @@ protected:
             this, (long long)ticks, outputTrack->GetID()));
       } else {
         // Figuring out which samples to use from the input stream is tricky
         // because its start time and our start time may differ by a fraction
         // of a tick. Assuming the input track hasn't ended, we have to ensure
         // that 'ticks' samples are gathered, even though a tick boundary may
         // occur between outputStart and outputEnd but not between inputStart
         // and inputEnd.
-        // These are the properties we need to ensure:
-        // 1) Exactly 'ticks' ticks of output are produced, i.e.
-        // inputEndTicks - inputStartTicks = ticks.
-        // 2) inputEndTicks <= aInputTrack->GetSegment()->GetDuration().
-        // 3) In any sequence of intervals where neither stream is blocked,
-        // the content of the input track we use is a contiguous sequence of
-        // ticks with no gaps or overlaps.
-        if (map->mEndOfLastInputIntervalInInputStream != inputStart ||
-            map->mEndOfLastInputIntervalInOutputStream != outputStart) {
-          // Start of a new series of intervals where neither stream is blocked.
-          map->mEndOfConsumedInputTicks = TimeToTicksRoundDown(rate, inputStart) - 1;
-        }
-        TrackTicks inputStartTicks = map->mEndOfConsumedInputTicks;
-        TrackTicks inputEndTicks = inputStartTicks + ticks;
-        map->mEndOfConsumedInputTicks = inputEndTicks;
-        map->mEndOfLastInputIntervalInInputStream = inputEnd;
-        map->mEndOfLastInputIntervalInOutputStream = outputEnd;
-        // Now we prove that the above properties hold:
-        // Property #1: trivial by construction.
-        // Property #3: trivial by construction. Between every two
-        // intervals where both streams are not blocked, the above if condition
-        // is false and mEndOfConsumedInputTicks advances exactly to match
-        // the ticks that were consumed.
-        // Property #2:
-        // Let originalOutputStart be the value of outputStart and originalInputStart
-        // be the value of inputStart when the body of the "if" block was last
-        // executed.
-        // Let allTicks be the sum of the values of 'ticks' computed since then.
-        // The interval [originalInputStart/rate, inputEnd/rate) is the
-        // same length as the interval [originalOutputStart/rate, outputEnd/rate),
-        // so the latter interval can have at most one more integer in it. Thus
-        // TimeToTicksRoundUp(rate, outputEnd) - TimeToTicksRoundUp(rate, originalOutputStart)
-        //   <= TimeToTicksRoundDown(rate, inputEnd) - TimeToTicksRoundDown(rate, originalInputStart) + 1
-        // Then
-        // inputEndTicks = TimeToTicksRoundDown(rate, originalInputStart) - 1 + allTicks
-        //   = TimeToTicksRoundDown(rate, originalInputStart) - 1 + TimeToTicksRoundUp(rate, outputEnd) - TimeToTicksRoundUp(rate, originalOutputStart)
-        //   <= TimeToTicksRoundDown(rate, originalInputStart) - 1 + TimeToTicksRoundDown(rate, inputEnd) - TimeToTicksRoundDown(rate, originalInputStart) + 1
-        //   = TimeToTicksRoundDown(rate, inputEnd)
-        //   <= inputEnd/rate
-        // (now using the fact that inputEnd <= track->GetEndTimeRoundDown() for a non-ended track)
-        //   <= TicksToTimeRoundDown(rate, aInputTrack->GetSegment()->GetDuration())/rate
-        //   <= rate*aInputTrack->GetSegment()->GetDuration()/rate
-        //   = aInputTrack->GetSegment()->GetDuration()
-        // as required.
-
-        if (inputStartTicks < 0) {
-          // Data before the start of the track is just null.
-          // We have to add a small amount of delay to ensure that there is
-          // always a sample available if we see an interval that contains a
-          // tick boundary on the output stream's timeline but does not contain
-          // a tick boundary on the input stream's timeline. 1 tick delay is
-          // necessary and sufficient.
-          segment->AppendNullData(-inputStartTicks);
-          inputStartTicks = 0;
-        }
-        if (inputEndTicks > inputStartTicks) {
-          segment->AppendSlice(*aInputTrack->GetSegment(),
-                               std::min(inputTrackEndPoint, inputStartTicks),
-                               std::min(inputTrackEndPoint, inputEndTicks));
-        }
+        // We'll take the latest samples we can.
+        TrackTicks inputEndTicks = TimeToTicksRoundUp(rate, inputEnd);
+        TrackTicks inputStartTicks = inputEndTicks - ticks;
+        segment->AppendSlice(*aInputTrack->GetSegment(),
+                             std::min(inputTrackEndPoint, inputStartTicks),
+                             std::min(inputTrackEndPoint, inputEndTicks));
         LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of input data to track %d",
             this, (long long)(std::min(inputTrackEndPoint, inputEndTicks) - std::min(inputTrackEndPoint, inputStartTicks)),
             outputTrack->GetID()));
       }
       ApplyTrackDisabling(outputTrack->GetID(), segment);
       for (uint32_t j = 0; j < mListeners.Length(); ++j) {
         MediaStreamListener* l = mListeners[j];
         l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
--- a/content/media/dash/DASHDecoder.cpp
+++ b/content/media/dash/DASHDecoder.cpp
@@ -190,24 +190,25 @@ DASHDecoder::ReleaseStateMachine()
     mAudioRepDecoders[i]->ReleaseStateMachine();
   }
   for (uint i = 0; i < mVideoRepDecoders.Length(); i++) {
     mVideoRepDecoders[i]->ReleaseStateMachine();
   }
 }
 
 nsresult
-DASHDecoder::Load(nsIStreamListener** aStreamListener,
+DASHDecoder::Load(MediaResource* aResource,
+                  nsIStreamListener** aStreamListener,
                   MediaDecoder* aCloneDonor)
 {
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   mDASHReader = new DASHReader(this);
 
-  nsresult rv = OpenResource(aStreamListener);
+  nsresult rv = OpenResource(aResource, aStreamListener);
   NS_ENSURE_SUCCESS(rv, rv);
 
   mDecoderStateMachine = CreateStateMachine();
   if (!mDecoderStateMachine) {
     LOG1("Failed to create state machine!");
     return NS_ERROR_FAILURE;
   }
   return NS_OK;
--- a/content/media/dash/DASHDecoder.h
+++ b/content/media/dash/DASHDecoder.h
@@ -52,18 +52,19 @@ public:
   }
 
   // Creates a single state machine for all stream decoders.
   // Called from Load on the main thread only.
   MediaDecoderStateMachine* CreateStateMachine();
 
   // Loads the MPD from the network and subsequently loads the media streams.
   // Called from the main thread only.
-  virtual nsresult Load(nsIStreamListener** aListener,
-                        MediaDecoder* aCloneDonor) MOZ_OVERRIDE;
+  nsresult Load(MediaResource* aResource,
+                nsIStreamListener** aListener,
+                MediaDecoder* aCloneDonor);
 
   // Notifies download of MPD file has ended.
   // Called on the main thread only.
   void NotifyDownloadEnded(nsresult aStatus);
 
   // Notification from |DASHReader| that a seek has occurred in
   // |aSubsegmentIdx|. Passes notification onto subdecoder which downloaded
   // the subsegment already, if download is in the past. Otherwise, it returns.
--- a/content/media/dash/DASHRepDecoder.cpp
+++ b/content/media/dash/DASHRepDecoder.cpp
@@ -68,17 +68,18 @@ DASHRepDecoder::SetMPDRepresentation(Rep
 void
 DASHRepDecoder::SetReader(WebMReader* aReader)
 {
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
   mReader = aReader;
 }
 
 nsresult
-DASHRepDecoder::Load(nsIStreamListener** aListener,
+DASHRepDecoder::Load(MediaResource* aResource,
+                     nsIStreamListener** aListener,
                      MediaDecoder* aCloneDonor)
 {
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
   NS_ENSURE_TRUE(mMPDRepresentation, NS_ERROR_NOT_INITIALIZED);
 
   // Get init range and index range from MPD.
   SegmentBase const * segmentBase = mMPDRepresentation->GetSegmentBase();
   NS_ENSURE_TRUE(segmentBase, NS_ERROR_NULL_POINTER);
--- a/content/media/dash/DASHRepDecoder.h
+++ b/content/media/dash/DASHRepDecoder.h
@@ -68,18 +68,19 @@ public:
   // for this decoder's |Representation|. Called on the main thread only.
   void SetResource(MediaResource* aResource);
 
   // Sets the |Representation| object for this decoder. Called on the main
   // thread.
   void SetMPDRepresentation(Representation const * aRep);
 
   // Called from DASHDecoder on main thread; Starts media stream download.
-  virtual nsresult Load(nsIStreamListener** aListener = nullptr,
-                        MediaDecoder* aCloneDonor = nullptr) MOZ_OVERRIDE;
+  nsresult Load(MediaResource* aResource = nullptr,
+                nsIStreamListener** aListener = nullptr,
+                MediaDecoder* aCloneDonor = nullptr);
 
   // Loads the next byte range (or first one on first call). Called on the main
   // thread only.
   void LoadNextByteRange();
 
   // Returns true if the subsegment is already in the media cache.
   bool IsSubsegmentCached(int32_t aSubsegmentIdx);
 
--- a/content/media/moz.build
+++ b/content/media/moz.build
@@ -48,17 +48,16 @@ TEST_DIRS += ['test']
 MODULE = 'content'
 
 EXPORTS += [
     'AbstractMediaDecoder.h',
     'AudioAvailableEventManager.h',
     'AudioChannelFormat.h',
     'AudioEventTimeline.h',
     'AudioNodeEngine.h',
-    'AudioNodeExternalInputStream.h',
     'AudioNodeStream.h',
     'AudioSampleFormat.h',
     'AudioSegment.h',
     'AudioStream.h',
     'BufferMediaResource.h',
     'DOMMediaStream.h',
     'DecoderTraits.h',
     'EncodedBufferCache.h',
@@ -93,17 +92,16 @@ EXPORTS.mozilla.dom += [
     'VideoPlaybackQuality.h',
     'VideoStreamTrack.h',
 ]
 
 CPP_SOURCES += [
     'AudioAvailableEventManager.cpp',
     'AudioChannelFormat.cpp',
     'AudioNodeEngine.cpp',
-    'AudioNodeExternalInputStream.cpp',
     'AudioNodeStream.cpp',
     'AudioSegment.cpp',
     'AudioStream.cpp',
     'AudioStreamTrack.cpp',
     'DOMMediaStream.cpp',
     'DecoderTraits.cpp',
     'EncodedBufferCache.cpp',
     'FileBlockCache.cpp',
--- a/content/media/webaudio/AudioContext.cpp
+++ b/content/media/webaudio/AudioContext.cpp
@@ -11,17 +11,16 @@
 #include "mozilla/dom/AudioContextBinding.h"
 #include "mozilla/dom/OfflineAudioContextBinding.h"
 #include "MediaStreamGraph.h"
 #include "mozilla/dom/AnalyserNode.h"
 #include "AudioDestinationNode.h"
 #include "AudioBufferSourceNode.h"
 #include "AudioBuffer.h"
 #include "GainNode.h"
-#include "MediaStreamAudioSourceNode.h"
 #include "DelayNode.h"
 #include "PannerNode.h"
 #include "AudioListener.h"
 #include "DynamicsCompressorNode.h"
 #include "BiquadFilterNode.h"
 #include "ScriptProcessorNode.h"
 #include "ChannelMergerNode.h"
 #include "ChannelSplitterNode.h"
@@ -249,28 +248,16 @@ AudioContext::CreateScriptProcessor(uint
 
 already_AddRefed<AnalyserNode>
 AudioContext::CreateAnalyser()
 {
   nsRefPtr<AnalyserNode> analyserNode = new AnalyserNode(this);
   return analyserNode.forget();
 }
 
-already_AddRefed<MediaStreamAudioSourceNode>
-AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream,
-                                      ErrorResult& aRv)
-{
-  if (mIsOffline) {
-    aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
-    return nullptr;
-  }
-  nsRefPtr<MediaStreamAudioSourceNode> mediaStreamAudioSourceNode = new MediaStreamAudioSourceNode(this, &aMediaStream);
-  return mediaStreamAudioSourceNode.forget();
-}
-
 already_AddRefed<GainNode>
 AudioContext::CreateGain()
 {
   nsRefPtr<GainNode> gainNode = new GainNode(this);
   return gainNode.forget();
 }
 
 already_AddRefed<WaveShaperNode>
--- a/content/media/webaudio/AudioContext.h
+++ b/content/media/webaudio/AudioContext.h
@@ -48,17 +48,16 @@ class BiquadFilterNode;
 class ChannelMergerNode;
 class ChannelSplitterNode;
 class ConvolverNode;
 class DelayNode;
 class DynamicsCompressorNode;
 class GainNode;
 class GlobalObject;
 class MediaStreamAudioDestinationNode;
-class MediaStreamAudioSourceNode;
 class OfflineRenderSuccessCallback;
 class PannerNode;
 class ScriptProcessorNode;
 class WaveShaperNode;
 class PeriodicWave;
 
 class AudioContext MOZ_FINAL : public nsDOMEventTargetHelper,
                                public EnableWebAudioCheck
@@ -157,19 +156,16 @@ public:
   CreateWaveShaper();
 
   already_AddRefed<GainNode>
   CreateGainNode()
   {
     return CreateGain();
   }
 
-  already_AddRefed<MediaStreamAudioSourceNode>
-  CreateMediaStreamSource(DOMMediaStream& aMediaStream, ErrorResult& aRv);
-
   already_AddRefed<DelayNode>
   CreateDelay(double aMaxDelayTime, ErrorResult& aRv);
 
   already_AddRefed<DelayNode>
   CreateDelayNode(double aMaxDelayTime, ErrorResult& aRv)
   {
     return CreateDelay(aMaxDelayTime, aRv);
   }
--- a/content/media/webaudio/AudioParamTimeline.h
+++ b/content/media/webaudio/AudioParamTimeline.h
@@ -45,17 +45,17 @@ public:
   }
 
   // Get the value of the AudioParam at time aTime + aCounter.
   // aCounter here is an offset to aTime if we try to get the value in ticks,
   // otherwise it should always be zero.  aCounter is meant to be used when
   // getting the value of an a-rate AudioParam for each tick inside an
   // AudioNodeEngine implementation.
   template<class TimeType>
-  float GetValueAtTime(TimeType aTime, size_t aCounter = 0)
+  float GetValueAtTime(TimeType aTime, size_t aCounter = 0) const
   {
     MOZ_ASSERT(aCounter < WEBAUDIO_BLOCK_SIZE);
     MOZ_ASSERT(!aCounter || !HasSimpleValue());
 
     // Mix the value of the AudioParam itself with that of the AudioNode inputs.
     return BaseClass::GetValueAtTime(static_cast<TimeType>(aTime + aCounter)) +
            (mStream ? AudioNodeInputValue(aCounter) : 0.0f);
   }
deleted file mode 100644
--- a/content/media/webaudio/MediaStreamAudioSourceNode.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "MediaStreamAudioSourceNode.h"
-#include "mozilla/dom/MediaStreamAudioSourceNodeBinding.h"
-#include "AudioNodeEngine.h"
-#include "AudioNodeExternalInputStream.h"
-#include "DOMMediaStream.h"
-
-namespace mozilla {
-namespace dom {
-
-NS_IMPL_CYCLE_COLLECTION_CLASS(MediaStreamAudioSourceNode)
-
-NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(MediaStreamAudioSourceNode)
-  NS_IMPL_CYCLE_COLLECTION_UNLINK(mInputStream)
-NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode)
-
-NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(MediaStreamAudioSourceNode, AudioNode)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mInputStream)
-NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
-
-NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(MediaStreamAudioSourceNode)
-NS_INTERFACE_MAP_END_INHERITING(AudioNode)
-
-NS_IMPL_ADDREF_INHERITED(MediaStreamAudioSourceNode, AudioNode)
-NS_IMPL_RELEASE_INHERITED(MediaStreamAudioSourceNode, AudioNode)
-
-MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* aContext,
-                                                       DOMMediaStream* aMediaStream)
-  : AudioNode(aContext,
-              2,
-              ChannelCountMode::Max,
-              ChannelInterpretation::Speakers),
-    mInputStream(aMediaStream)
-{
-  AudioNodeEngine* engine = new AudioNodeEngine(this);
-  mStream = aContext->Graph()->CreateAudioNodeExternalInputStream(engine);
-  ProcessedMediaStream* outputStream = static_cast<ProcessedMediaStream*>(mStream.get());
-  mInputPort = outputStream->AllocateInputPort(aMediaStream->GetStream(),
-                                               MediaInputPort::FLAG_BLOCK_INPUT);
-  mInputStream->AddConsumerToKeepAlive(this);
-}
-
-MediaStreamAudioSourceNode::~MediaStreamAudioSourceNode()
-{
-}
-
-void
-MediaStreamAudioSourceNode::DestroyMediaStream()
-{
-  if (mInputPort) {
-    mInputPort->Destroy();
-    mInputPort = nullptr;
-  }
-  AudioNode::DestroyMediaStream();
-}
-
-JSObject*
-MediaStreamAudioSourceNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope)
-{
-  return MediaStreamAudioSourceNodeBinding::Wrap(aCx, aScope, this);
-}
-
-}
-}
-
deleted file mode 100644
--- a/content/media/webaudio/MediaStreamAudioSourceNode.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef MediaStreamAudioSourceNode_h_
-#define MediaStreamAudioSourceNode_h_
-
-#include "AudioNode.h"
-
-namespace mozilla {
-
-class DOMMediaStream;
-
-namespace dom {
-
-class MediaStreamAudioSourceNode : public AudioNode
-{
-public:
-  MediaStreamAudioSourceNode(AudioContext* aContext, DOMMediaStream* aMediaStream);
-  // Define constructor out-of-line so we can forward-declare DOMMediaStream
-  virtual ~MediaStreamAudioSourceNode();
-
-  NS_DECL_ISUPPORTS_INHERITED
-  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(MediaStreamAudioSourceNode, AudioNode)
-
-  virtual JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope) MOZ_OVERRIDE;
-
-  virtual void DestroyMediaStream() MOZ_OVERRIDE;
-
-  virtual uint16_t NumberOfInputs() const MOZ_OVERRIDE { return 0; }
-
-private:
-  nsRefPtr<MediaInputPort> mInputPort;
-  nsRefPtr<DOMMediaStream> mInputStream;
-};
-
-}
-}
-
-#endif
--- a/content/media/webaudio/WaveShaperNode.cpp
+++ b/content/media/webaudio/WaveShaperNode.cpp
@@ -5,17 +5,16 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "WaveShaperNode.h"
 #include "mozilla/dom/WaveShaperNodeBinding.h"
 #include "AudioNode.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
 #include "mozilla/PodOperations.h"
-#include "speex/speex_resampler.h"
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(WaveShaperNode)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(WaveShaperNode, AudioNode)
   NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER
@@ -32,181 +31,29 @@ NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(Wav
 NS_IMPL_CYCLE_COLLECTION_TRACE_END
 
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(WaveShaperNode)
 NS_INTERFACE_MAP_END_INHERITING(AudioNode)
 
 NS_IMPL_ADDREF_INHERITED(WaveShaperNode, AudioNode)
 NS_IMPL_RELEASE_INHERITED(WaveShaperNode, AudioNode)
 
-static uint32_t ValueOf(OverSampleType aType)
-{
-  switch (aType) {
-  case OverSampleType::None: return 1;
-  case OverSampleType::_2x:  return 2;
-  case OverSampleType::_4x:  return 4;
-  default:
-    NS_NOTREACHED("We should never reach here");
-    return 1;
-  }
-}
-
-class Resampler
-{
-public:
-  Resampler()
-    : mType(OverSampleType::None)
-    , mUpSampler(nullptr)
-    , mDownSampler(nullptr)
-    , mChannels(0)
-    , mSampleRate(0)
-  {
-  }
-
-  ~Resampler()
-  {
-    Destroy();
-  }
-
-  void Reset(uint32_t aChannels, TrackRate aSampleRate, OverSampleType aType)
-  {
-    if (aChannels == mChannels &&
-        aSampleRate == mSampleRate &&
-        aType == mType) {
-      return;
-    }
-
-    mChannels = aChannels;
-    mSampleRate = aSampleRate;
-    mType = aType;
-
-    Destroy();
-
-    if (aType == OverSampleType::None) {
-      mBuffer.Clear();
-      return;
-    }
-
-    mUpSampler = speex_resampler_init(aChannels,
-                                      aSampleRate,
-                                      aSampleRate * ValueOf(aType),
-                                      SPEEX_RESAMPLER_QUALITY_DEFAULT,
-                                      nullptr);
-    mDownSampler = speex_resampler_init(aChannels,
-                                        aSampleRate * ValueOf(aType),
-                                        aSampleRate,
-                                        SPEEX_RESAMPLER_QUALITY_DEFAULT,
-                                        nullptr);
-    mBuffer.SetLength(WEBAUDIO_BLOCK_SIZE*ValueOf(aType));
-  }
-
-  float* UpSample(uint32_t aChannel, const float* aInputData, uint32_t aBlocks)
-  {
-    uint32_t inSamples = WEBAUDIO_BLOCK_SIZE;
-    uint32_t outSamples = WEBAUDIO_BLOCK_SIZE*aBlocks;
-    float* outputData = mBuffer.Elements();
-
-    MOZ_ASSERT(mBuffer.Length() == outSamples);
-
-    speex_resampler_process_float(mUpSampler, aChannel,
-                                  aInputData, &inSamples,
-                                  outputData, &outSamples);
-
-    MOZ_ASSERT(inSamples == WEBAUDIO_BLOCK_SIZE && outSamples == WEBAUDIO_BLOCK_SIZE*aBlocks);
-
-    return outputData;
-  }
-
-  void DownSample(uint32_t aChannel, float* aOutputData, uint32_t aBlocks)
-  {
-    uint32_t inSamples = WEBAUDIO_BLOCK_SIZE*aBlocks;
-    uint32_t outSamples = WEBAUDIO_BLOCK_SIZE;
-    const float* inputData = mBuffer.Elements();
-
-    MOZ_ASSERT(mBuffer.Length() == inSamples);
-
-    speex_resampler_process_float(mDownSampler, aChannel,
-                                  inputData, &inSamples,
-                                  aOutputData, &outSamples);
-
-    MOZ_ASSERT(inSamples == WEBAUDIO_BLOCK_SIZE*aBlocks && outSamples == WEBAUDIO_BLOCK_SIZE);
-  }
-
-private:
-  void Destroy()
-  {
-    if (mUpSampler) {
-      speex_resampler_destroy(mUpSampler);
-      mUpSampler = nullptr;
-    }
-    if (mDownSampler) {
-      speex_resampler_destroy(mDownSampler);
-      mDownSampler = nullptr;
-    }
-  }
-
-private:
-  OverSampleType mType;
-  SpeexResamplerState* mUpSampler;
-  SpeexResamplerState* mDownSampler;
-  uint32_t mChannels;
-  TrackRate mSampleRate;
-  nsTArray<float> mBuffer;
-};
-
 class WaveShaperNodeEngine : public AudioNodeEngine
 {
 public:
   explicit WaveShaperNodeEngine(AudioNode* aNode)
     : AudioNodeEngine(aNode)
-    , mType(OverSampleType::None)
   {
   }
 
-  enum Parameteres {
-    TYPE
-  };
-
   virtual void SetRawArrayData(nsTArray<float>& aCurve) MOZ_OVERRIDE
   {
     mCurve.SwapElements(aCurve);
   }
 
-  virtual void SetInt32Parameter(uint32_t aIndex, int32_t aValue) MOZ_OVERRIDE
-  {
-    switch (aIndex) {
-    case TYPE:
-      mType = static_cast<OverSampleType>(aValue);
-      break;
-    default:
-      NS_ERROR("Bad WaveShaperNode Int32Parameter");
-    }
-  }
-
-  template <uint32_t blocks>
-  void ProcessCurve(const float* aInputBuffer, float* aOutputBuffer)
-  {
-    for (uint32_t j = 0; j < WEBAUDIO_BLOCK_SIZE*blocks; ++j) {
-      // Index into the curve array based on the amplitude of the
-      // incoming signal by clamping the amplitude to [-1, 1] and
-      // performing a linear interpolation of the neighbor values.
-      float index = std::max(0.0f, std::min(float(mCurve.Length() - 1),
-                                            mCurve.Length() * (aInputBuffer[j] + 1) / 2));
-      uint32_t indexLower = uint32_t(index);
-      uint32_t indexHigher = uint32_t(index + 1.0f);
-      if (indexHigher == mCurve.Length()) {
-        aOutputBuffer[j] = mCurve[indexLower];
-      } else {
-        float interpolationFactor = index - indexLower;
-        aOutputBuffer[j] = (1.0f - interpolationFactor) * mCurve[indexLower] +
-                                   interpolationFactor  * mCurve[indexHigher];
-      }
-    }
-  }
-
   virtual void ProduceAudioBlock(AudioNodeStream* aStream,
                                  const AudioChunk& aInput,
                                  AudioChunk* aOutput,
                                  bool* aFinished)
   {
     uint32_t channelCount = aInput.mChannelData.Length();
     if (!mCurve.Length() || !channelCount) {
       // Optimize the case where we don't have a curve buffer,
@@ -214,54 +61,45 @@ public:
       *aOutput = aInput;
       return;
     }
 
     AllocateAudioBlock(channelCount, aOutput);
     for (uint32_t i = 0; i < channelCount; ++i) {
       const float* inputBuffer = static_cast<const float*>(aInput.mChannelData[i]);
       float* outputBuffer = const_cast<float*> (static_cast<const float*>(aOutput->mChannelData[i]));
-      float* sampleBuffer;
-
-      switch (mType) {
-      case OverSampleType::None:
-        mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::None);
-        ProcessCurve<1>(inputBuffer, outputBuffer);
-        break;
-      case OverSampleType::_2x:
-        mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::_2x);
-        sampleBuffer = mResampler.UpSample(i, inputBuffer, 2);
-        ProcessCurve<2>(sampleBuffer, sampleBuffer);
-        mResampler.DownSample(i, outputBuffer, 2);
-        break;
-      case OverSampleType::_4x:
-        mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::_4x);
-        sampleBuffer = mResampler.UpSample(i, inputBuffer, 4);
-        ProcessCurve<4>(sampleBuffer, sampleBuffer);
-        mResampler.DownSample(i, outputBuffer, 4);
-        break;
-      default:
-        NS_NOTREACHED("We should never reach here");
+      for (uint32_t j = 0; j < WEBAUDIO_BLOCK_SIZE; ++j) {
+        // Index into the curve array based on the amplitude of the
+        // incoming signal by clamping the amplitude to [-1, 1] and
+        // performing a linear interpolation of the neighbor values.
+        float index = std::max(0.0f, std::min(float(mCurve.Length() - 1),
+                                              mCurve.Length() * (inputBuffer[j] + 1) / 2));
+        uint32_t indexLower = uint32_t(index);
+        uint32_t indexHigher = uint32_t(index + 1.0f);
+        if (indexHigher == mCurve.Length()) {
+          outputBuffer[j] = mCurve[indexLower];
+        } else {
+          float interpolationFactor = index - indexLower;
+          outputBuffer[j] = (1.0f - interpolationFactor) * mCurve[indexLower] +
+                                    interpolationFactor  * mCurve[indexHigher];
+        }
       }
     }
   }
 
 private:
   nsTArray<float> mCurve;
-  OverSampleType mType;
-  Resampler mResampler;
 };
 
 WaveShaperNode::WaveShaperNode(AudioContext* aContext)
   : AudioNode(aContext,
               2,
               ChannelCountMode::Max,
               ChannelInterpretation::Speakers)
   , mCurve(nullptr)
-  , mType(OverSampleType::None)
 {
   NS_HOLD_JS_OBJECTS(this, WaveShaperNode);
 
   WaveShaperNodeEngine* engine = new WaveShaperNodeEngine(this);
   mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
 }
 
 WaveShaperNode::~WaveShaperNode()
@@ -295,17 +133,10 @@ WaveShaperNode::SetCurve(const Float32Ar
     mCurve = nullptr;
   }
 
   AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
   MOZ_ASSERT(ns, "Why don't we have a stream here?");
   ns->SetRawArrayData(curve);
 }
 
-void
-WaveShaperNode::SetOversample(OverSampleType aType)
-{
-  mType = aType;
-  SendInt32ParameterToStream(WaveShaperNodeEngine::TYPE, static_cast<int32_t>(aType));
-}
-
 }
 }
--- a/content/media/webaudio/WaveShaperNode.h
+++ b/content/media/webaudio/WaveShaperNode.h
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef WaveShaperNode_h_
 #define WaveShaperNode_h_
 
 #include "AudioNode.h"
 #include "AudioParam.h"
-#include "mozilla/dom/WaveShaperNodeBinding.h"
 
 namespace mozilla {
 namespace dom {
 
 class AudioContext;
 
 class WaveShaperNode : public AudioNode
 {
@@ -29,26 +28,19 @@ public:
                                JS::Handle<JSObject*> aScope) MOZ_OVERRIDE;
 
   JSObject* GetCurve(JSContext* aCx) const
   {
     return mCurve;
   }
   void SetCurve(const Float32Array* aData);
 
-  OverSampleType Oversample() const
-  {
-    return mType;
-  }
-  void SetOversample(OverSampleType aType);
-
 private:
   void ClearCurve();
 
 private:
   JS::Heap<JSObject*> mCurve;
-  OverSampleType mType;
 };
 
 }
 }
 
 #endif
--- a/content/media/webaudio/compiledtest/TestAudioEventTimeline.cpp
+++ b/content/media/webaudio/compiledtest/TestAudioEventTimeline.cpp
@@ -309,25 +309,16 @@ void TestAfterLastTargetValueEvent()
 void TestAfterLastTargetValueEventWithValueSet()
 {
   Timeline timeline(10.0f);
 
   ErrorResultMock rv;
 
   timeline.SetValue(50.f);
   timeline.SetTargetAtTime(20.0f, 1.0, 5.0, rv);
-
-  // When using SetTargetValueAtTime, Timeline become stateful: the value for
-  // time t may depend on the time t-1, so we can't just query the value at a
-  // time and get the right value. We have to call GetValueAtTime for the
-  // previous times.
-  for (double i = 0.0; i < 9.99; i+=0.01) {
-    timeline.GetValueAtTime(i);
-  }
-
   is(timeline.GetValueAtTime(10.), (20.f + (50.f - 20.f) * expf(-9.0f / 5.0f)), "Return the value after SetValue and the last SetTarget event based on the curve");
 }
 
 void TestValue()
 {
   Timeline timeline(10.0f);
 
   ErrorResultMock rv;
--- a/content/media/webaudio/moz.build
+++ b/content/media/webaudio/moz.build
@@ -35,17 +35,16 @@ EXPORTS.mozilla.dom += [
     'ChannelMergerNode.h',
     'ChannelSplitterNode.h',
     'ConvolverNode.h',
     'DelayNode.h',
     'DynamicsCompressorNode.h',
     'EnableWebAudioCheck.h',
     'GainNode.h',
     'MediaStreamAudioDestinationNode.h',
-    'MediaStreamAudioSourceNode.h',
     'OfflineAudioCompletionEvent.h',
     'PannerNode.h',
     'PeriodicWave.h',
     'ScriptProcessorNode.h',
     'WaveShaperNode.h',
 ]
 
 CPP_SOURCES += [
@@ -63,17 +62,16 @@ CPP_SOURCES += [
     'ChannelSplitterNode.cpp',
     'ConvolverNode.cpp',
     'DelayNode.cpp',
     'DynamicsCompressorNode.cpp',
     'EnableWebAudioCheck.cpp',
     'GainNode.cpp',
     'MediaBufferDecoder.cpp',
     'MediaStreamAudioDestinationNode.cpp',
-    'MediaStreamAudioSourceNode.cpp',
     'OfflineAudioCompletionEvent.cpp',
     'PannerNode.cpp',
     'PeriodicWave.cpp',
     'ScriptProcessorNode.cpp',
     'ThreeDPoint.cpp',
     'WaveShaperNode.cpp',
     'WebAudioUtils.cpp',
 ]
--- a/content/media/webaudio/test/Makefile.in
+++ b/content/media/webaudio/test/Makefile.in
@@ -63,19 +63,16 @@ MOCHITEST_FILES := \
   test_delayNodeSmallMaxDelay.html \
   test_delayNodeWithGain.html \
   test_dynamicsCompressorNode.html \
   test_gainNode.html \
   test_gainNodeInLoop.html \
   test_maxChannelCount.html \
   test_mediaDecoding.html \
   test_mediaStreamAudioDestinationNode.html \
-  test_mediaStreamAudioSourceNode.html \
-  test_mediaStreamAudioSourceNodeCrossOrigin.html \
-  test_mediaStreamAudioSourceNodeResampling.html \
   test_mixingRules.html \
   test_nodeToParamConnection.html \
   test_OfflineAudioContext.html \
   test_offlineDestinationChannelCountLess.html \
   test_offlineDestinationChannelCountMore.html \
   test_pannerNode.html \
   test_pannerNode_equalPower.html \
   test_periodicWave.html \
--- a/content/media/webaudio/test/test_audioParamSetTargetAtTime.html
+++ b/content/media/webaudio/test/test_audioParamSetTargetAtTime.html
@@ -5,18 +5,18 @@
   <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
   <script type="text/javascript" src="webaudio.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
 </head>
 <body>
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
-var V0 = 0.9;
-var V1 = 0.1;
+var V0 = 0.1;
+var V1 = 0.9;
 var T0 = 0;
 var TimeConstant = 10;
 
 var gTest = {
   length: 2048,
   numberOfChannels: 1,
   createGraph: function(context) {
     var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
deleted file mode 100644
--- a/content/media/webaudio/test/test_mediaStreamAudioSourceNode.html
+++ /dev/null
@@ -1,47 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<meta charset="utf-8">
-<head>
-  <title>Test MediaStreamAudioSourceNode processing is correct</title>
-  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <script type="text/javascript" src="webaudio.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test">
-<script class="testbody" type="text/javascript">
-
-function createBuffer(context, delay) {
-  var buffer = context.createBuffer(2, 2048, context.sampleRate);
-  for (var i = 0; i < 2048 - delay; ++i) {
-    buffer.getChannelData(0)[i + delay] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
-    buffer.getChannelData(1)[i + delay] = buffer.getChannelData(0)[i + delay];
-  }
-  return buffer;
-}
-
-var gTest = {
-  length: 2048,
-  skipOfflineContextTests: true,
-  createGraph: function(context) {
-    var sourceGraph = new AudioContext();
-    var source = sourceGraph.createBufferSource();
-    source.buffer = createBuffer(context, 0);
-    var dest = sourceGraph.createMediaStreamDestination();
-    source.connect(dest);
-    source.start(0);
-
-    var mediaStreamSource = context.createMediaStreamSource(dest.stream);
-    return mediaStreamSource;
-  },
-  createExpectedBuffers: function(context) {
-    return createBuffer(context, 1);
-  },
-};
-
-runTest();
-
-</script>
-</pre>
-</body>
-</html>
deleted file mode 100644
--- a/content/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html
+++ /dev/null
@@ -1,57 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<meta charset="utf-8">
-<head>
-  <title>Test MediaStreamAudioSourceNode doesn't get data from cross-origin media resources</title>
-  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test">
-<script class="testbody" type="text/javascript">
-SimpleTest.waitForExplicitFinish();
-
-var audio = new Audio("http://example.org:80/tests/content/media/webaudio/test/small-shot.ogg");
-var context = new AudioContext();
-var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
-var sp = context.createScriptProcessor(2048, 1);
-node.connect(sp);
-var nonzeroSampleCount = 0;
-var complete = false;
-var iterationCount = 0;
-
-// This test ensures we receive at least expectedSampleCount nonzero samples
-function processSamples(e) {
-  if (complete) {
-    return;
-  }
-
-  if (iterationCount == 0) {
-    // Don't start playing the audio until the AudioContext stuff is connected
-    // and running.
-    audio.play();
-  }
-  ++iterationCount;
-
-  var buf = e.inputBuffer.getChannelData(0);
-  var nonzeroSamplesThisBuffer = 0;
-  for (var i = 0; i < buf.length; ++i) {
-    if (buf[i] != 0) {
-      ++nonzeroSamplesThisBuffer;
-    }
-  }
-  is(nonzeroSamplesThisBuffer, 0,
-     "Checking all samples are zero");
-  if (iterationCount >= 20) {
-    SimpleTest.finish();
-    complete = true;
-  }
-}
-
-audio.oncanplaythrough = function() {
-  sp.onaudioprocess = processSamples;
-};
-</script>
-</pre>
-</body>
-</html>
deleted file mode 100644
--- a/content/media/webaudio/test/test_mediaStreamAudioSourceNodeResampling.html
+++ /dev/null
@@ -1,69 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<meta charset="utf-8">
-<head>
-  <title>Test MediaStreamAudioSourceNode processing is correct</title>
-  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<pre id="test">
-<script class="testbody" type="text/javascript">
-SimpleTest.waitForExplicitFinish();
-
-var audio = new Audio("small-shot.ogg");
-var context = new AudioContext();
-var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
-var sp = context.createScriptProcessor(2048, 1);
-node.connect(sp);
-var expectedMinNonzeroSampleCount;
-var expectedMaxNonzeroSampleCount;
-var nonzeroSampleCount = 0;
-var complete = false;
-var iterationCount = 0;
-
-// This test ensures we receive at least expectedSampleCount nonzero samples
-function processSamples(e) {
-  if (complete) {
-    return;
-  }
-
-  if (iterationCount == 0) {
-    // Don't start playing the audio until the AudioContext stuff is connected
-    // and running.
-    audio.play();
-  }
-  ++iterationCount;
-
-  var buf = e.inputBuffer.getChannelData(0);
-  var nonzeroSamplesThisBuffer = 0;
-  for (var i = 0; i < buf.length; ++i) {
-    if (buf[i] != 0) {
-      ++nonzeroSamplesThisBuffer;
-    }
-  }
-  nonzeroSampleCount += nonzeroSamplesThisBuffer;
-  is(e.inputBuffer.numberOfChannels, 1,
-     "Checking data channel count (nonzeroSamplesThisBuffer=" +
-     nonzeroSamplesThisBuffer + ")");
-  ok(nonzeroSampleCount <= expectedMaxNonzeroSampleCount,
-     "Too many nonzero samples (got " + nonzeroSampleCount + ", expected max " + expectedMaxNonzeroSampleCount + ")");
-  if (nonzeroSampleCount >= expectedMinNonzeroSampleCount &&
-      nonzeroSamplesThisBuffer == 0) {
-    ok(true,
-     "Check received enough nonzero samples (got " + nonzeroSampleCount + ", expected min " + expectedMinNonzeroSampleCount + ")");
-    SimpleTest.finish();
-    complete = true;
-  }
-}
-
-audio.oncanplaythrough = function() {
-  // Use a fuzz factor of 100 to account for samples that just happen to be zero
-  expectedMinNonzeroSampleCount = Math.floor(audio.duration*context.sampleRate) - 100;
-  expectedMaxNonzeroSampleCount = Math.floor(audio.duration*context.sampleRate) + 500;
-  sp.onaudioprocess = processSamples;
-};
-</script>
-</pre>
-</body>
-</html>
--- a/content/media/webaudio/test/webaudio.js
+++ b/content/media/webaudio/test/webaudio.js
@@ -51,17 +51,17 @@ function compareBuffers(buf1, buf2,
       if (firstBadIndex == -1) {
         firstBadIndex = i;
       }
     }
   };
 
   is(difference, 0, "Found " + difference + " different samples, maxDifference: " +
      maxDifference + ", first bad index: " + firstBadIndex +
-     " with source offset " + sourceOffset + " and destination offset " +
+     " with source offset " + sourceOffset + " and desitnation offset " +
      destOffset);
 }
 
 function getEmptyBuffer(context, length) {
   return context.createBuffer(gTest.numberOfChannels, length, context.sampleRate);
 }
 
 /**
@@ -82,27 +82,25 @@ function getEmptyBuffer(context, length)
  *                     or createGraph must be provided.
  * + createExpectedBuffers: optional method which takes a context object and
  *                          returns either one expected buffer or an array of
  *                          them, designating what is expected to be observed
  *                          in the output.  If omitted, the output is expected
  *                          to be silence.  The sum of the length of the expected
  *                          buffers should be equal to gTest.length.  This
  *                          function is guaranteed to be called before createGraph.
- * + skipOfflineContextTests: optional. when true, skips running tests on an offline
- *                            context by circumventing testOnOfflineContext.
  */
 function runTest()
 {
   function done() {
     SimpleTest.finish();
   }
 
   SimpleTest.waitForExplicitFinish();
-  function runTestFunction () {
+  addLoadEvent(function() {
     if (!gTest.numberOfChannels) {
       gTest.numberOfChannels = 2; // default
     }
 
     var testLength;
 
     function runTestOnContext(context, callback, testOutput) {
       if (!gTest.createExpectedBuffers) {
@@ -174,30 +172,19 @@ function runTest()
                              true);
             }
             samplesSeen += expectedBuffer.length;
           }
           callback();
         };
         context.startRendering();
       }
-
       var context = new OfflineAudioContext(gTest.numberOfChannels, testLength, sampleRate);
       runTestOnContext(context, callback, testOutput);
     }
 
     testOnNormalContext(function() {
-      if (!gTest.skipOfflineContextTests) {
-        testOnOfflineContext(function() {
-          testOnOfflineContext(done, 44100);
-        }, 48000);
-      } else {
-        done();
-      }
+      testOnOfflineContext(function() {
+        testOnOfflineContext(done, 44100);
+      }, 48000);
     });
-  };
-
-  if (document.readyState !== 'complete') {
-    addLoadEvent(runTestFunction);
-  } else {
-    runTestFunction();
-  }
+  });
 }
--- a/content/media/webspeech/recognition/SpeechStreamListener.cpp
+++ b/content/media/webspeech/recognition/SpeechStreamListener.cpp
@@ -35,56 +35,45 @@ SpeechStreamListener::NotifyQueuedTrackC
                                                uint32_t aTrackEvents,
                                                const MediaSegment& aQueuedMedia)
 {
   AudioSegment* audio = const_cast<AudioSegment*>(
     static_cast<const AudioSegment*>(&aQueuedMedia));
 
   AudioSegment::ChunkIterator iterator(*audio);
   while (!iterator.IsEnded()) {
-    // Skip over-large chunks so we don't crash!
-    if (iterator->GetDuration() > INT_MAX) {
-      continue;
-    }
-    int duration = int(iterator->GetDuration());
+    AudioSampleFormat format = iterator->mBufferFormat;
+
+    MOZ_ASSERT(format == AUDIO_FORMAT_S16 || format == AUDIO_FORMAT_FLOAT32);
 
-    if (iterator->IsNull()) {
-      nsTArray<int16_t> nullData;
-      PodZero(nullData.AppendElements(duration), duration);
-      ConvertAndDispatchAudioChunk(duration, iterator->mVolume, nullData.Elements());
-    } else {
-      AudioSampleFormat format = iterator->mBufferFormat;
-
-      MOZ_ASSERT(format == AUDIO_FORMAT_S16 || format == AUDIO_FORMAT_FLOAT32);
-
-      if (format == AUDIO_FORMAT_S16) {
-        ConvertAndDispatchAudioChunk(duration, iterator->mVolume,
-                                     static_cast<const int16_t*>(iterator->mChannelData[0]));
-      } else if (format == AUDIO_FORMAT_FLOAT32) {
-        ConvertAndDispatchAudioChunk(duration, iterator->mVolume,
-                                     static_cast<const float*>(iterator->mChannelData[0]));
-      }
+    if (format == AUDIO_FORMAT_S16) {
+      ConvertAndDispatchAudioChunk<int16_t>(*iterator);
+    } else if (format == AUDIO_FORMAT_FLOAT32) {
+      ConvertAndDispatchAudioChunk<float>(*iterator);
     }
 
     iterator.Next();
   }
 }
 
 template<typename SampleFormatType> void
-SpeechStreamListener::ConvertAndDispatchAudioChunk(int aDuration, float aVolume,
-                                                   SampleFormatType* aData)
+SpeechStreamListener::ConvertAndDispatchAudioChunk(AudioChunk& aChunk)
 {
-  nsRefPtr<SharedBuffer> samples(SharedBuffer::Create(aDuration *
+  nsRefPtr<SharedBuffer> samples(SharedBuffer::Create(aChunk.mDuration *
                                                       1 * // channel
                                                       sizeof(int16_t)));
 
+  const SampleFormatType* from =
+    static_cast<const SampleFormatType*>(aChunk.mChannelData[0]);
+
   int16_t* to = static_cast<int16_t*>(samples->Data());
-  ConvertAudioSamplesWithScale(aData, to, aDuration, aVolume);
+  ConvertAudioSamplesWithScale(from, to, aChunk.mDuration, aChunk.mVolume);
 
-  mRecognition->FeedAudioData(samples.forget(), aDuration, this);
+  mRecognition->FeedAudioData(samples.forget(), aChunk.mDuration, this);
+  return;
 }
 
 void
 SpeechStreamListener::NotifyFinished(MediaStreamGraph* aGraph)
 {
   // TODO dispatch SpeechEnd event so services can be informed
 }
 
--- a/content/media/webspeech/recognition/SpeechStreamListener.h
+++ b/content/media/webspeech/recognition/SpeechStreamListener.h
@@ -27,15 +27,14 @@ public:
                                 TrackRate aTrackRate,
                                 TrackTicks aTrackOffset,
                                 uint32_t aTrackEvents,
                                 const MediaSegment& aQueuedMedia) MOZ_OVERRIDE;
 
   void NotifyFinished(MediaStreamGraph* aGraph) MOZ_OVERRIDE;
 
 private:
-  template<typename SampleFormatType>
-  void ConvertAndDispatchAudioChunk(int aDuration, float aVolume, SampleFormatType* aData);
+  template<typename SampleFormatType> void ConvertAndDispatchAudioChunk(AudioChunk& aChunk);
   nsRefPtr<SpeechRecognition> mRecognition;
 };
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/webidl/AudioContext.webidl
+++ b/dom/webidl/AudioContext.webidl
@@ -37,18 +37,16 @@ interface AudioContext : EventTarget {
 
     [Creator, Throws]
     ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0,
                                               optional unsigned long numberOfInputChannels = 2,
                                               optional unsigned long numberOfOutputChannels = 2);
 
     [Creator]
     AnalyserNode createAnalyser();
-    [Creator, Throws]
-    MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
     [Creator]
     GainNode createGain();
     [Creator, Throws]
     DelayNode createDelay(optional double maxDelayTime = 1);
     [Creator]
     BiquadFilterNode createBiquadFilter();
     [Creator]
     WaveShaperNode createWaveShaper();
deleted file mode 100644
--- a/dom/webidl/MediaStreamAudioSourceNode.webidl
+++ /dev/null
@@ -1,16 +0,0 @@
-/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/.
- *
- * The origin of this IDL file is
- * https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html
- *
- * Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
- * liability, trademark and document use rules apply.
- */
-
-interface MediaStreamAudioSourceNode : AudioNode {
-
-};
-
--- a/dom/webidl/WaveShaperNode.webidl
+++ b/dom/webidl/WaveShaperNode.webidl
@@ -5,22 +5,15 @@
  *
  * The origin of this IDL file is
  * https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html
  *
  * Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
  * liability, trademark and document use rules apply.
  */
 
-enum OverSampleType {
-  "none",
-  "2x",
-  "4x"
-};
-
 [PrefControlled]
 interface WaveShaperNode : AudioNode {
 
       attribute Float32Array? curve;
-      attribute OverSampleType oversample;
 
 };
 
--- a/dom/webidl/WebIDL.mk
+++ b/dom/webidl/WebIDL.mk
@@ -183,17 +183,16 @@ webidl_files = \
   LinkStyle.webidl \
   LocalMediaStream.webidl \
   Location.webidl \
   MediaError.webidl \
   MediaRecorder.webidl \
   MediaSource.webidl \
   MediaStream.webidl \
   MediaStreamAudioDestinationNode.webidl \
-  MediaStreamAudioSourceNode.webidl \
   MediaStreamEvent.webidl \
   MediaStreamTrack.webidl \
   MessageEvent.webidl \
   MimeType.webidl \
   MimeTypeArray.webidl \
   MobileMessageManager.webidl \
   MouseEvent.webidl \
   MouseScrollEvent.webidl \