Backout 40f09f7bc670 & fc262e3c635f (bug 818670) for frequent fedora64 mochitest-3 leaks on a CLOSED TREE
authorEd Morley <emorley@mozilla.com>
Wed, 30 Jan 2013 10:32:11 +0000
changeset 130225 054718506d8ce2bc6a536da0d0df82df7c3e2aad
parent 130224 8da4794af39407524d5278c1abf79fb8303f3a4b
child 130226 bfe496ca5c4094a4ec653a0e1d7dbfd615d0d67e
push id2323
push userbbajaj@mozilla.com
push dateMon, 01 Apr 2013 19:47:02 +0000
treeherdermozilla-beta@7712be144d91 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs818670
milestone21.0a1
backs out40f09f7bc67013617382fb42a829bcd4011afea3
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backout 40f09f7bc670 & fc262e3c635f (bug 818670) for frequent fedora64 mochitest-3 leaks on a CLOSED TREE
content/media/webrtc/MediaEngine.h
content/media/webrtc/MediaEngineDefault.h
content/media/webrtc/MediaEngineWebRTC.h
content/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/MediaManager.cpp
dom/media/MediaManager.h
media/mtransport/build/Makefile.in
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.h
media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
media/webrtc/signaling/src/media/VcmSIPCCBinding.cpp
media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h
media/webrtc/signaling/test/mediaconduit_unittests.cpp
media/webrtc/signaling/test/mediapipeline_unittest.cpp
media/webrtc/webrtc_config.gypi
modules/libpref/src/init/all.js
--- a/content/media/webrtc/MediaEngine.h
+++ b/content/media/webrtc/MediaEngine.h
@@ -84,21 +84,16 @@ public:
                           SourceMediaStream *aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
                           TrackTicks &aLastEndTime) = 0;
 
   /* Stop the device and release the corresponding MediaStream */
   virtual nsresult Stop(SourceMediaStream *aSource, TrackID aID) = 0;
 
-  /* Change device configuration.  */
-  virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
-                          bool aAgcOn, uint32_t aAGC,
-                          bool aNoiseOn, uint32_t aNoise) = 0;
-
   /* Return false if device is currently allocated or started */
   bool IsAvailable() {
     if (mState == kAllocated || mState == kStarted) {
       return false;
     } else {
       return true;
     }
   }
--- a/content/media/webrtc/MediaEngineDefault.h
+++ b/content/media/webrtc/MediaEngineDefault.h
@@ -39,19 +39,16 @@ public:
   virtual void GetUUID(nsAString&);
 
   virtual const MediaEngineVideoOptions *GetOptions();
   virtual nsresult Allocate();
   virtual nsresult Deallocate();
   virtual nsresult Start(SourceMediaStream*, TrackID);
   virtual nsresult Stop(SourceMediaStream*, TrackID);
   virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
-  virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
-                          bool aAgcOn, uint32_t aAGC,
-                          bool aNoiseOn, uint32_t aNoise) { return NS_OK; };
   virtual void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime);
   virtual void NotifyPull(MediaStreamGraph* aGraph,
                           SourceMediaStream *aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
                           TrackTicks &aLastEndTime) {}
 
   NS_DECL_ISUPPORTS
@@ -84,19 +81,16 @@ public:
   virtual void GetName(nsAString&);
   virtual void GetUUID(nsAString&);
 
   virtual nsresult Allocate();
   virtual nsresult Deallocate();
   virtual nsresult Start(SourceMediaStream*, TrackID);
   virtual nsresult Stop(SourceMediaStream*, TrackID);
   virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
-  virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
-                          bool aAgcOn, uint32_t aAGC,
-                          bool aNoiseOn, uint32_t aNoise) { return NS_OK; };
   virtual void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime);
   virtual void NotifyPull(MediaStreamGraph* aGraph,
                           SourceMediaStream *aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
                           TrackTicks &aLastEndTime) {}
 
   NS_DECL_ISUPPORTS
--- a/content/media/webrtc/MediaEngineWebRTC.h
+++ b/content/media/webrtc/MediaEngineWebRTC.h
@@ -30,17 +30,16 @@
 // Audio Engine
 #include "voice_engine/include/voe_base.h"
 #include "voice_engine/include/voe_codec.h"
 #include "voice_engine/include/voe_hardware.h"
 #include "voice_engine/include/voe_network.h"
 #include "voice_engine/include/voe_audio_processing.h"
 #include "voice_engine/include/voe_volume_control.h"
 #include "voice_engine/include/voe_external_media.h"
-#include "voice_engine/include/voe_audio_processing.h"
 
 // Video Engine
 #include "video_engine/include/vie_base.h"
 #include "video_engine/include/vie_codec.h"
 #include "video_engine/include/vie_render.h"
 #include "video_engine/include/vie_capture.h"
 #include "video_engine/include/vie_file.h"
 
@@ -86,19 +85,16 @@ public:
   virtual void GetName(nsAString&);
   virtual void GetUUID(nsAString&);
   virtual const MediaEngineVideoOptions *GetOptions();
   virtual nsresult Allocate();
   virtual nsresult Deallocate();
   virtual nsresult Start(SourceMediaStream*, TrackID);
   virtual nsresult Stop(SourceMediaStream*, TrackID);
   virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
-  virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
-                          bool aAgcOn, uint32_t aAGC,
-                          bool aNoiseOn, uint32_t aNoise) { return NS_OK; };
   virtual void NotifyPull(MediaStreamGraph* aGraph,
                           SourceMediaStream *aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
                           TrackTicks &aLastEndTime);
 
   NS_DECL_ISUPPORTS
 
@@ -176,20 +172,16 @@ class MediaEngineWebRTCAudioSource : pub
 public:
   MediaEngineWebRTCAudioSource(webrtc::VoiceEngine* aVoiceEnginePtr, int aIndex,
     const char* name, const char* uuid)
     : mVoiceEngine(aVoiceEnginePtr)
     , mMonitor("WebRTCMic.Monitor")
     , mCapIndex(aIndex)
     , mChannel(-1)
     , mInitDone(false)
-    , mEchoOn(false), mAgcOn(false), mNoiseOn(false)
-    , mEchoCancel(webrtc::kEcDefault)
-    , mAGC(webrtc::kAgcDefault)
-    , mNoiseSuppress(webrtc::kNsDefault)
     , mNullTransport(nullptr) {
     MOZ_ASSERT(aVoiceEnginePtr);
     mState = kReleased;
     mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
     mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid));
     Init();
   }
   ~MediaEngineWebRTCAudioSource() { Shutdown(); }
@@ -197,20 +189,16 @@ public:
   virtual void GetName(nsAString&);
   virtual void GetUUID(nsAString&);
 
   virtual nsresult Allocate();
   virtual nsresult Deallocate();
   virtual nsresult Start(SourceMediaStream*, TrackID);
   virtual nsresult Stop(SourceMediaStream*, TrackID);
   virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
-  virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
-                          bool aAgcOn, uint32_t aAGC,
-                          bool aNoiseOn, uint32_t aNoise);
-
   virtual void NotifyPull(MediaStreamGraph* aGraph,
                           SourceMediaStream *aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
                           TrackTicks &aLastEndTime);
 
   // VoEMediaProcess.
   void Process(const int channel, const webrtc::ProcessingTypes type,
@@ -225,37 +213,31 @@ private:
 
   void Init();
   void Shutdown();
 
   webrtc::VoiceEngine* mVoiceEngine;
   webrtc::VoEBase* mVoEBase;
   webrtc::VoEExternalMedia* mVoERender;
   webrtc::VoENetwork*  mVoENetwork;
-  webrtc::VoEAudioProcessing *mVoEProcessing;
 
   // mMonitor protects mSources[] access/changes, and transitions of mState
   // from kStarted to kStopped (which are combined with EndTrack()).
   // mSources[] is accessed from webrtc threads.
   mozilla::ReentrantMonitor mMonitor;
   nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
 
   int mCapIndex;
   int mChannel;
   TrackID mTrackID;
   bool mInitDone;
 
   nsString mDeviceName;
   nsString mDeviceUUID;
 
-  bool mEchoOn, mAgcOn, mNoiseOn;
-  webrtc::EcModes  mEchoCancel;
-  webrtc::AgcModes mAGC;
-  webrtc::NsModes  mNoiseSuppress;
-
   NullTransport *mNullTransport;
 };
 
 class MediaEngineWebRTC : public MediaEngine
 {
 public:
   MediaEngineWebRTC()
   : mMutex("mozilla::MediaEngineWebRTC")
--- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -42,70 +42,16 @@ MediaEngineWebRTCAudioSource::GetUUID(ns
   if (mInitDone) {
     aUUID.Assign(mDeviceUUID);
   }
 
   return;
 }
 
 nsresult
-MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
-                                     bool aAgcOn, uint32_t aAGC,
-                                     bool aNoiseOn, uint32_t aNoise)
-{
-  LOG(("Audio config: aec: %d, agc: %d, noise: %d",
-       aEchoOn ? aEcho : -1,
-       aAgcOn ? aAGC : -1,
-       aNoiseOn ? aNoise : -1));
-
-  bool update_agc = (mAgcOn == aAgcOn);
-  bool update_noise = (mNoiseOn == aNoiseOn);
-  mAgcOn = aAgcOn;
-  mNoiseOn = aNoiseOn;
-
-  if ((webrtc::AgcModes) aAGC != webrtc::kAgcUnchanged) {
-    if (mAGC != (webrtc::AgcModes) aAGC) {
-      update_agc = true;
-      mAGC = (webrtc::AgcModes) aAGC;
-    }
-  }
-  if ((webrtc::NsModes) aNoise != webrtc::kNsUnchanged) {
-    if (mNoiseSuppress != (webrtc::NsModes) aNoise) {
-      update_noise = true;
-      mNoiseSuppress = (webrtc::NsModes) aNoise;
-    }
-  }
-
-  if (mInitDone) {
-    int error;
-#if 0
-    // Until we can support feeding our full output audio from the browser
-    // through the MediaStream, this won't work.  Or we need to move AEC to
-    // below audio input and output, perhaps invoked from here.
-    mEchoOn = aEchoOn;
-    if ((webrtc::EcModes) aEcho != webrtc::kEcUnchanged)
-      mEchoCancel = (webrtc::EcModes) aEcho;
-    mVoEProcessing->SetEcStatus(mEchoOn, aEcho);
-#else
-    (void) aEcho; (void) aEchoOn; // suppress warnings
-#endif
-
-    if (update_agc &&
-      0 != (error = mVoEProcessing->SetAgcStatus(mAgcOn, (webrtc::AgcModes) aAGC))) {
-      LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error));
-    }
-    if (update_noise &&
-      0 != (error = mVoEProcessing->SetNsStatus(mNoiseOn, (webrtc::NsModes) aNoise))) {
-      LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error));
-    }
-  }
-  return NS_OK;
-}
-
-nsresult
 MediaEngineWebRTCAudioSource::Allocate()
 {
   if (mState == kReleased && mInitDone) {
     webrtc::VoEHardware* ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
     int res = ptrVoEHw->SetRecordingDevice(mCapIndex);
     ptrVoEHw->Release();
     if (res) {
       return NS_ERROR_FAILURE;
@@ -155,21 +101,16 @@ MediaEngineWebRTCAudioSource::Start(Sour
   LOG(("Initial audio"));
   mTrackID = aID;
 
   if (mState == kStarted) {
     return NS_OK;
   }
   mState = kStarted;
 
-  // Configure audio processing in webrtc code
-  Config(mEchoOn, webrtc::kEcUnchanged,
-         mAgcOn, webrtc::kAgcUnchanged,
-         mNoiseOn, webrtc::kNsUnchanged);
-
   if (mVoEBase->StartReceive(mChannel)) {
     return NS_ERROR_FAILURE;
   }
   if (mVoEBase->StartSend(mChannel)) {
     return NS_ERROR_FAILURE;
   }
 
   // Attach external media processor, so this::Process will be called.
@@ -246,21 +187,16 @@ MediaEngineWebRTCAudioSource::Init()
   if (!mVoERender) {
     return;
   }
   mVoENetwork = webrtc::VoENetwork::GetInterface(mVoiceEngine);
   if (!mVoENetwork) {
     return;
   }
 
-  mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine);
-  if (!mVoEProcessing) {
-    return;
-  }
-
   mChannel = mVoEBase->CreateChannel();
   if (mChannel < 0) {
     return;
   }
   mNullTransport = new NullTransport();
   if (mVoENetwork->RegisterExternalTransport(mChannel, *mNullTransport)) {
     return;
   }
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -6,18 +6,16 @@
 
 #include "MediaStreamGraph.h"
 #include "nsIDOMFile.h"
 #include "nsIEventTarget.h"
 #include "nsIUUIDGenerator.h"
 #include "nsIScriptGlobalObject.h"
 #include "nsIPopupWindowManager.h"
 #include "nsISupportsArray.h"
-#include "nsIPrefService.h"
-#include "nsIPrefBranch.h"
 
 // For PR_snprintf
 #include "prprf.h"
 
 #include "nsJSUtils.h"
 #include "nsDOMFile.h"
 #include "nsGlobalWindow.h"
 
@@ -379,43 +377,16 @@ public:
     // Dispatch to the media thread to ask it to start the sources,
     // because that can take a while
     nsIThread *mediaThread = MediaManager::GetThread();
     nsRefPtr<MediaOperationRunnable> runnable(
       new MediaOperationRunnable(MEDIA_START, mListener,
                                  mAudioSource, mVideoSource, false));
     mediaThread->Dispatch(runnable, NS_DISPATCH_NORMAL);
 
-#ifdef MOZ_WEBRTC
-    // Right now these configs are only of use if webrtc is available
-    nsresult rv;
-    nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv);
-    if (NS_SUCCEEDED(rv)) {
-      nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
-
-      if (branch) {
-        int32_t aec = (int32_t) webrtc::kEcUnchanged;
-        int32_t agc = (int32_t) webrtc::kAgcUnchanged;
-        int32_t noise = (int32_t) webrtc::kNsUnchanged;
-        bool aec_on = false, agc_on = false, noise_on = false;
-
-        branch->GetBoolPref("media.peerconnection.aec_enabled", &aec_on);
-        branch->GetIntPref("media.peerconnection.aec", &aec);
-        branch->GetBoolPref("media.peerconnection.agc_enabled", &agc_on);
-        branch->GetIntPref("media.peerconnection.agc", &agc);
-        branch->GetBoolPref("media.peerconnection.noise_enabled", &noise_on);
-        branch->GetIntPref("media.peerconnection.noise", &noise);
-
-        mListener->AudioConfig(aec_on, (uint32_t) aec,
-                               agc_on, (uint32_t) agc,
-                               noise_on, (uint32_t) noise);
-      }
-    }
-#endif
-
     // We're in the main thread, so no worries here either.
     nsCOMPtr<nsIDOMGetUserMediaSuccessCallback> success(mSuccess);
     nsCOMPtr<nsIDOMGetUserMediaErrorCallback> error(mError);
 
     if (!(mManager->IsWindowStillActive(mWindowID))) {
       return NS_OK;
     }
     // This is safe since we're on main-thread, and the windowlist can only
--- a/dom/media/MediaManager.h
+++ b/dom/media/MediaManager.h
@@ -16,20 +16,16 @@
 
 #include "nsPIDOMWindow.h"
 #include "nsIDOMNavigatorUserMedia.h"
 #include "nsXULAppAPI.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/StaticPtr.h"
 #include "prlog.h"
 
-#ifdef MOZ_WEBRTC
-#include "mtransport/runnable_utils.h"
-#endif
-
 namespace mozilla {
 
 #ifdef PR_LOGGING
 extern PRLogModuleInfo* GetMediaManagerLog();
 #define MM_LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
 #else
 #define MM_LOG(msg)
 #endif
@@ -128,33 +124,16 @@ public:
     return mStream->AsSourceStream();
   }
 
   // implement in .cpp to avoid circular dependency with MediaOperationRunnable
   // Can be invoked from EITHER MainThread or MSG thread
   void Invalidate();
 
   void
-  AudioConfig(bool aEchoOn, uint32_t aEcho,
-              bool aAgcOn, uint32_t aAGC,
-              bool aNoiseOn, uint32_t aNoise)
-  {
-    if (mAudioSource) {
-#ifdef MOZ_WEBRTC
-      // Right now these configs are only of use if webrtc is available
-      RUN_ON_THREAD(mMediaThread,
-                    WrapRunnable(nsRefPtr<MediaEngineSource>(mAudioSource), // threadsafe
-                                 &MediaEngineSource::Config,
-                                 aEchoOn, aEcho, aAgcOn, aAGC, aNoiseOn, aNoise),
-                    NS_DISPATCH_NORMAL);
-#endif
-    }
-  }
-
-  void
   Remove()
   {
     NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
     // allow calling even if inactive (!mStream) for easier cleanup
     // Caller holds strong reference to us, so no death grip required
     MutexAutoLock lock(mLock); // protect access to mRemoved
     if (mStream && !mRemoved) {
       MM_LOG(("Listener removed on purpose, mFinished = %d", (int) mFinished));
--- a/media/mtransport/build/Makefile.in
+++ b/media/mtransport/build/Makefile.in
@@ -32,17 +32,16 @@ EXPORTS_mtransport = \
   ../transportlayer.h \
   ../transportlayerdtls.h \
   ../transportlayerice.h \
   ../transportlayerlog.h \
   ../transportlayerloopback.h \
   ../transportlayerprsock.h \
   ../m_cpp_utils.h \
   ../runnable_utils.h \
-  ../runnable_utils_generated.h \
   ../sigslot.h \
   $(NULL)
 
 CPPSRCS = \
 	$(MTRANSPORT_LCPPSRCS) \
 	$(NULL)
 
 include $(srcdir)/../objs.mk
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -1,159 +1,116 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioConduit.h"
-#include "nsCOMPtr.h"
-#include "mozilla/Services.h"
-#include "nsServiceManagerUtils.h"
-#include "nsIPrefService.h"
-#include "nsIPrefBranch.h"
-#include "nsThreadUtils.h"
-
 #include "CSFLog.h"
 #include "voice_engine/include/voe_errors.h"
 
 
 namespace mozilla {
 
 static const char* logTag ="WebrtcAudioSessionConduit";
 
 // 32 bytes is what WebRTC CodecInst expects
 const unsigned int WebrtcAudioConduit::CODEC_PLNAME_SIZE = 32;
 
 /**
  * Factory Method for AudioConduit
  */
-mozilla::RefPtr<AudioSessionConduit> AudioSessionConduit::Create(AudioSessionConduit *aOther)
+mozilla::RefPtr<AudioSessionConduit> AudioSessionConduit::Create()
 {
   CSFLogDebug(logTag,  "%s ", __FUNCTION__);
-  NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
-
   WebrtcAudioConduit* obj = new WebrtcAudioConduit();
-  if(obj->Init(static_cast<WebrtcAudioConduit*>(aOther)) != kMediaConduitNoError)
+  if(obj->Init() != kMediaConduitNoError)
   {
     CSFLogError(logTag,  "%s AudioConduit Init Failed ", __FUNCTION__);
     delete obj;
     return NULL;
   }
   CSFLogDebug(logTag,  "%s Successfully created AudioConduit ", __FUNCTION__);
   return obj;
 }
 
 /**
  * Destruction defines for our super-classes
  */
 WebrtcAudioConduit::~WebrtcAudioConduit()
 {
-  NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
-
   CSFLogDebug(logTag,  "%s ", __FUNCTION__);
   for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++)
   {
     delete mRecvCodecList[i];
   }
 
   delete mCurSendCodecConfig;
 
-  // The first one of a pair to be deleted shuts down media for both
   if(mPtrVoEXmedia)
   {
-    if (!mShutDown) {
-      mPtrVoEXmedia->SetExternalRecordingStatus(false);
-      mPtrVoEXmedia->SetExternalPlayoutStatus(false);
-    }
+    mPtrVoEXmedia->SetExternalRecordingStatus(false);
+    mPtrVoEXmedia->SetExternalPlayoutStatus(false);
     mPtrVoEXmedia->Release();
   }
 
-  if(mPtrVoEProcessing)
-  {
-    mPtrVoEProcessing->Release();
-  }
-
   //Deal with the transport
   if(mPtrVoENetwork)
   {
-    if (!mShutDown) {
-      mPtrVoENetwork->DeRegisterExternalTransport(mChannel);
-    }
+    mPtrVoENetwork->DeRegisterExternalTransport(mChannel);
     mPtrVoENetwork->Release();
   }
 
   if(mPtrVoECodec)
   {
     mPtrVoECodec->Release();
   }
 
   if(mPtrVoEBase)
   {
-    if (!mShutDown) {
-      mPtrVoEBase->StopPlayout(mChannel);
-      mPtrVoEBase->StopSend(mChannel);
-      mPtrVoEBase->StopReceive(mChannel);
-      mPtrVoEBase->DeleteChannel(mChannel);
-      mPtrVoEBase->Terminate();
-    }
+    mPtrVoEBase->StopPlayout(mChannel);
+    mPtrVoEBase->StopSend(mChannel);
+    mPtrVoEBase->StopReceive(mChannel);
+    mPtrVoEBase->DeleteChannel(mChannel);
+    mPtrVoEBase->Terminate();
     mPtrVoEBase->Release();
   }
 
-  if (mOtherDirection)
+  if(mVoiceEngine)
   {
-    // mOtherDirection owns these now!
-    mOtherDirection->mOtherDirection = NULL;
-    // let other side we terminated the channel
-    mOtherDirection->mShutDown = true;
-    mVoiceEngine = nullptr;
-  } else {
-    // only one opener can call Delete.  Have it be the last to close.
-    if(mVoiceEngine)
-    {
-      webrtc::VoiceEngine::Delete(mVoiceEngine);
-    }
+    webrtc::VoiceEngine::Delete(mVoiceEngine);
   }
 }
 
 /*
  * WebRTCAudioConduit Implementation
  */
-MediaConduitErrorCode WebrtcAudioConduit::Init(WebrtcAudioConduit *other)
+MediaConduitErrorCode WebrtcAudioConduit::Init()
 {
-  CSFLogDebug(logTag,  "%s this=%p other=%p", __FUNCTION__, this, other);
+  CSFLogDebug(logTag,  "%s ", __FUNCTION__);
 
-  if (other) {
-    MOZ_ASSERT(!other->mOtherDirection);
-    other->mOtherDirection = this;
-    mOtherDirection = other;
+  //Per WebRTC APIs below function calls return NULL on failure
+  if(!(mVoiceEngine = webrtc::VoiceEngine::Create()))
+  {
+    CSFLogError(logTag, "%s Unable to create voice engine", __FUNCTION__);
+    return kMediaConduitSessionNotInited;
+  }
 
-    // only one can call ::Create()/GetVoiceEngine()
-    MOZ_ASSERT(other->mVoiceEngine);
-    mVoiceEngine = other->mVoiceEngine;
-  } else {
-    //Per WebRTC APIs below function calls return NULL on failure
-    if(!(mVoiceEngine = webrtc::VoiceEngine::Create()))
-    {
-      CSFLogError(logTag, "%s Unable to create voice engine", __FUNCTION__);
-      return kMediaConduitSessionNotInited;
-    }
+  PRLogModuleInfo *logs = GetWebRTCLogInfo();
+  if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) {
+    // no need to a critical section or lock here
+    gWebrtcTraceLoggingOn = 1;
 
-    PRLogModuleInfo *logs = GetWebRTCLogInfo();
-    if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) {
-      // no need to a critical section or lock here
-      gWebrtcTraceLoggingOn = 1;
-
-      const char *file = PR_GetEnv("WEBRTC_TRACE_FILE");
-      if (!file) {
-        file = "WebRTC.log";
-      }
-      CSFLogDebug(logTag,  "%s Logging webrtc to %s level %d", __FUNCTION__,
-                  file, logs->level);
-      mVoiceEngine->SetTraceFilter(logs->level);
-      mVoiceEngine->SetTraceFile(file);
+    const char *file = PR_GetEnv("WEBRTC_TRACE_FILE");
+    if (!file) {
+      file = "WebRTC.log";
     }
+    CSFLogDebug(logTag,  "%s Logging webrtc to %s level %d", __FUNCTION__,
+                file, logs->level);
+    mVoiceEngine->SetTraceFilter(logs->level);
+    mVoiceEngine->SetTraceFile(file);
   }
 
   if(!(mPtrVoEBase = VoEBase::GetInterface(mVoiceEngine)))
   {
     CSFLogError(logTag, "%s Unable to initialize VoEBase", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
@@ -164,70 +121,61 @@ MediaConduitErrorCode WebrtcAudioConduit
   }
 
   if(!(mPtrVoECodec = VoECodec::GetInterface(mVoiceEngine)))
   {
     CSFLogError(logTag, "%s Unable to initialize VoEBCodec", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
-  if(!(mPtrVoEProcessing = VoEAudioProcessing::GetInterface(mVoiceEngine)))
-  {
-    CSFLogError(logTag, "%s Unable to initialize VoEProcessing", __FUNCTION__);
-    return kMediaConduitSessionNotInited;
-  }
-
   if(!(mPtrVoEXmedia = VoEExternalMedia::GetInterface(mVoiceEngine)))
   {
     CSFLogError(logTag, "%s Unable to initialize VoEExternalMedia", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
-  if (other) {
-    mChannel = other->mChannel;
-  } else {
-    // init the engine with our audio device layer
-    if(mPtrVoEBase->Init() == -1)
-    {
-      CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__);
-      return kMediaConduitSessionNotInited;
-    }
+  // init the engine with our audio device layer
+  if(mPtrVoEBase->Init() == -1)
+  {
+    CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__);
+    return kMediaConduitSessionNotInited;
+  }
 
-    if( (mChannel = mPtrVoEBase->CreateChannel()) == -1)
-    {
-      CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__);
-      return kMediaConduitChannelError;
-    }
+  if( (mChannel = mPtrVoEBase->CreateChannel()) == -1)
+  {
+    CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__);
+    return kMediaConduitChannelError;
+  }
 
-    CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel);
+  CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel);
 
-    if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1)
-    {
-      CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__);
-      return kMediaConduitTransportRegistrationFail;
-    }
+  if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1)
+  {
+    CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__);
+    return kMediaConduitTransportRegistrationFail;
+  }
 
-    if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1)
-    {
-      CSFLogError(logTag, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__,
-                  mPtrVoEBase->LastError());
-      return kMediaConduitExternalPlayoutError;
-    }
+  if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1)
+  {
+    CSFLogError(logTag, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__,
+                                                      mPtrVoEBase->LastError());
+    return kMediaConduitExternalPlayoutError;
+  }
 
-    if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1)
-    {
-      CSFLogError(logTag, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__,
-                  mPtrVoEBase->LastError());
-      return kMediaConduitExternalRecordingError;
-    }
-    CSFLogDebug(logTag ,  "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this);
+  if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1)
+  {
+    CSFLogError(logTag, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__,
+                                                     mPtrVoEBase->LastError());
+    return kMediaConduitExternalRecordingError;
   }
+  CSFLogDebug(logTag ,  "%s AudioSessionConduit Initialization Done",__FUNCTION__);
   return kMediaConduitNoError;
 }
 
+
 // AudioSessionConduit Implementation
 MediaConduitErrorCode
 WebrtcAudioConduit::AttachTransport(mozilla::RefPtr<TransportInterface> aTransport)
 {
   CSFLogDebug(logTag,  "%s ", __FUNCTION__);
 
   if(!aTransport)
   {
@@ -282,43 +230,16 @@ WebrtcAudioConduit::ConfigureSendMediaCo
     if(error ==  VE_CANNOT_SET_SEND_CODEC || error == VE_CODEC_ERROR)
     {
       return kMediaConduitInvalidSendCodec;
     }
 
     return kMediaConduitUnknownError;
   }
 
-  // TEMPORARY - see bug 694814 comment 2
-  nsresult rv;
-  nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv);
-  if (NS_SUCCEEDED(rv)) {
-    nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
-
-    if (branch) {
-      int32_t aec = 0; // 0 == unchanged
-      bool aec_on = false;
-
-      branch->GetBoolPref("media.peerconnection.aec_enabled", &aec_on);
-      branch->GetIntPref("media.peerconnection.aec", &aec);
-
-      CSFLogDebug(logTag,"Audio config: aec: %d", aec_on ? aec : -1);
-      mEchoOn = aec_on;
-      if (static_cast<webrtc::EcModes>(aec) != webrtc::kEcUnchanged)
-        mEchoCancel = static_cast<webrtc::EcModes>(aec);
-
-      branch->GetIntPref("media.peerconnection.capture_delay", &mCaptureDelay);
-    }
-  }
-
-  if (0 != (error = mPtrVoEProcessing->SetEcStatus(mEchoOn, mEchoCancel))) {
-    CSFLogError(logTag,"%s Error setting EVStatus: %d ",__FUNCTION__, error);
-    return kMediaConduitUnknownError;
-  }
-
   //Let's Send Transport State-machine on the Engine
   if(mPtrVoEBase->StartSend(mChannel) == -1)
   {
     error = mPtrVoEBase->LastError();
     CSFLogError(logTag, "%s StartSend failed %d", __FUNCTION__, error);
     return kMediaConduitUnknownError;
   }
 
@@ -478,17 +399,17 @@ WebrtcAudioConduit::SendAudioFrame(const
 
   // if transmission is not started .. conduit cannot insert frames
   if(!mEngineTransmitting)
   {
     CSFLogError(logTag, "%s Engine not transmitting ", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
-  capture_delay = mCaptureDelay;
+
   //Insert the samples
   if(mPtrVoEXmedia->ExternalRecordingInsertData(audio_data,
                                                 lengthSamples,
                                                 samplingFreqHz,
                                                 capture_delay) == -1)
   {
     int error = mPtrVoEBase->LastError();
     CSFLogError(logTag,  "%s Inserting audio data Failed %d", __FUNCTION__, error);
@@ -618,63 +539,44 @@ WebrtcAudioConduit::ReceivedRTCPPacket(c
   //good here
   return kMediaConduitNoError;
 }
 
 //WebRTC::RTP Callback Implementation
 
 int WebrtcAudioConduit::SendPacket(int channel, const void* data, int len)
 {
-  CSFLogDebug(logTag,  "%s : channel %d %s",__FUNCTION__,channel,
-              (mEngineReceiving && mOtherDirection) ? "(using mOtherDirection)" : "");
+  CSFLogDebug(logTag,  "%s : channel %d",__FUNCTION__,channel);
 
-  if (mEngineReceiving)
-  {
-    if (mOtherDirection)
-    {
-      return mOtherDirection->SendPacket(channel, data, len);
-    }
-    CSFLogDebug(logTag,  "%s : Asked to send RTP without an RTP sender",
-                __FUNCTION__, channel);
-    return -1;
-  } else {
-    if(mTransport && (mTransport->SendRtpPacket(data, len) == NS_OK))
-    {
+   if(mTransport && (mTransport->SendRtpPacket(data, len) == NS_OK))
+   {
       CSFLogDebug(logTag, "%s Sent RTP Packet ", __FUNCTION__);
       return len;
-    } else {
-      CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__);
-      return -1;
-    }
-  }
+   } else {
+     CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__);
+     return -1;
+   }
+
 }
 
 int WebrtcAudioConduit::SendRTCPPacket(int channel, const void* data, int len)
 {
   CSFLogDebug(logTag,  "%s : channel %d", __FUNCTION__, channel);
 
-  if (mEngineTransmitting)
+  // can't enable this assertion, because we do.  Suppress it
+  // NS_ASSERTION(mEngineReceiving,"We shouldn't send RTCP on the receiver side");
+  if(mEngineReceiving && mTransport && mTransport->SendRtcpPacket(data, len) == NS_OK)
   {
-    if (mOtherDirection)
-    {
-      return mOtherDirection->SendRTCPPacket(channel, data, len);
-    }
-    CSFLogDebug(logTag,  "%s : Asked to send RTCP without an RTP receiver",
-                __FUNCTION__, channel);
-    return -1;
+    CSFLogDebug(logTag, "%s Sent RTCP Packet ", __FUNCTION__);
+    return len;
   } else {
-    if(mTransport && mTransport->SendRtcpPacket(data, len) == NS_OK)
-    {
-      CSFLogDebug(logTag, "%s Sent RTCP Packet ", __FUNCTION__);
-      return len;
-    } else {
-      CSFLogError(logTag, "%s RTCP Packet Send Failed ", __FUNCTION__);
-      return -1;
-    }
+    CSFLogError(logTag, "%s RTCP Packet Send Failed ", __FUNCTION__);
+    return -1;
   }
+
 }
 
 /**
  * Converts between CodecConfig to WebRTC Codec Structure.
  */
 
 bool
 WebrtcAudioConduit::CodecConfigToWebRTCCodec(const AudioCodecConfig* codecInfo,
@@ -841,8 +743,9 @@ WebrtcAudioConduit::DumpCodecDB() const
       CSFLogDebug(logTag,"Payload Type: %d", mRecvCodecList[i]->mType);
       CSFLogDebug(logTag,"Payload Frequency: %d", mRecvCodecList[i]->mFreq);
       CSFLogDebug(logTag,"Payload PacketSize: %d", mRecvCodecList[i]->mPacSize);
       CSFLogDebug(logTag,"Payload Channels: %d", mRecvCodecList[i]->mChannels);
       CSFLogDebug(logTag,"Payload Sampling Rate: %d", mRecvCodecList[i]->mRate);
     }
  }
 }// end namespace
+
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -13,24 +13,22 @@
 // Audio Engine Includes
 #include "common_types.h"
 #include "voice_engine/include/voe_base.h"
 #include "voice_engine/include/voe_volume_control.h"
 #include "voice_engine/include/voe_codec.h"
 #include "voice_engine/include/voe_file.h"
 #include "voice_engine/include/voe_network.h"
 #include "voice_engine/include/voe_external_media.h"
-#include "voice_engine/include/voe_audio_processing.h"
 
 //Some WebRTC types for short notations
  using webrtc::VoEBase;
  using webrtc::VoENetwork;
  using webrtc::VoECodec;
  using webrtc::VoEExternalMedia;
- using webrtc::VoEAudioProcessing;
 
 /** This file hosts several structures identifying different aspects
  * of a RTP Session.
  */
 
 namespace mozilla {
 
 /**
@@ -138,33 +136,28 @@ public:
    * Webrtc transport implementation to send and receive RTCP packet.
    * AudioConduit registers itself as ExternalTransport to the VoiceEngine
    */
   virtual int SendRTCPPacket(int channel, const void *data, int len) ;
 
 
 
   WebrtcAudioConduit():
-                      mOtherDirection(NULL),
-                      mShutDown(false),
                       mVoiceEngine(NULL),
                       mTransport(NULL),
                       mEngineTransmitting(false),
                       mEngineReceiving(false),
                       mChannel(-1),
-                      mCurSendCodecConfig(NULL),
-                      mCaptureDelay(150),
-                      mEchoOn(true),
-                      mEchoCancel(webrtc::kEcAec)
+                      mCurSendCodecConfig(NULL)
   {
   }
 
   virtual ~WebrtcAudioConduit();
 
-  MediaConduitErrorCode Init(WebrtcAudioConduit *other);
+  MediaConduitErrorCode Init();
 
 private:
   WebrtcAudioConduit(const WebrtcAudioConduit& other) MOZ_DELETE;
   void operator=(const WebrtcAudioConduit& other) MOZ_DELETE;
 
   //Local database of currently applied receive codecs
   typedef std::vector<AudioCodecConfig* > RecvCodecList;
 
@@ -187,41 +180,28 @@ private:
   bool CheckCodecsForMatch(const AudioCodecConfig* curCodecConfig,
                            const AudioCodecConfig* codecInfo) const;
   //Checks the codec to be applied
   MediaConduitErrorCode ValidateCodecConfig(const AudioCodecConfig* codecInfo, bool send) const;
 
   //Utility function to dump recv codec database
   void DumpCodecDB() const;
 
-  WebrtcAudioConduit*  mOtherDirection;
-  // Other side has shut down our channel and related items already
-  bool mShutDown;
-
-  // These are shared by both directions.  They're released by the last
-  // conduit to die
   webrtc::VoiceEngine* mVoiceEngine;
   mozilla::RefPtr<TransportInterface> mTransport;
   webrtc::VoENetwork*  mPtrVoENetwork;
   webrtc::VoEBase*     mPtrVoEBase;
   webrtc::VoECodec*    mPtrVoECodec;
   webrtc::VoEExternalMedia* mPtrVoEXmedia;
-  webrtc::VoEAudioProcessing* mPtrVoEProcessing;
 
   //engine states of our interets
   bool mEngineTransmitting; // If true => VoiceEngine Send-subsystem is up
   bool mEngineReceiving;    // If true => VoiceEngine Receive-subsystem is up
                             // and playout is enabled
 
   int mChannel;
   RecvCodecList    mRecvCodecList;
   AudioCodecConfig* mCurSendCodecConfig;
-
-  // Current "capture" delay (really output plus input delay)
-  int32_t mCaptureDelay;
-
-  bool mEchoOn;
-  webrtc::EcModes  mEchoCancel;
 };
 
 } // end namespace
 
 #endif
--- a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
@@ -221,17 +221,17 @@ class AudioSessionConduit : public Media
 {
 public:
 
    /**
     * Factory function to create and initialize a Video Conduit Session
     * return: Concrete VideoSessionConduitObject or NULL in the case
     *         of failure
     */
-  static mozilla::RefPtr<AudioSessionConduit> Create(AudioSessionConduit *aOther);
+  static mozilla::RefPtr<AudioSessionConduit> Create();
 
   virtual ~AudioSessionConduit() {}
 
   virtual Type type() const { return AUDIO; }
 
 
   /**
    * Function to deliver externally captured audio sample for encoding and transport
--- a/media/webrtc/signaling/src/media/VcmSIPCCBinding.cpp
+++ b/media/webrtc/signaling/src/media/VcmSIPCCBinding.cpp
@@ -1298,28 +1298,22 @@ static int vcmRxStartICE_m(cc_mcapid_t m
                              fingerprint_alg, fingerprint);
   if (!rtcp_flow) {
       CSFLogError( logTag, "Could not create RTCP flow");
       return VCM_ERROR;
   }
 
   if (CC_IS_AUDIO(mcap_id)) {
     std::vector<mozilla::AudioCodecConfig *> configs;
-
     // Instantiate an appropriate conduit
-    mozilla::RefPtr<mozilla::AudioSessionConduit> tx_conduit =
-      pc.impl()->media()->GetConduit(level, false);
-
     mozilla::RefPtr<mozilla::AudioSessionConduit> conduit =
-                    mozilla::AudioSessionConduit::Create(tx_conduit);
+                    mozilla::AudioSessionConduit::Create();
     if(!conduit)
       return VCM_ERROR;
 
-    pc.impl()->media()->AddConduit(level, true, conduit);
-
     mozilla::AudioCodecConfig *config_raw;
 
     for(int i=0; i <num_payloads ; i++)
     {
       config_raw = new mozilla::AudioCodecConfig(
         payloads[i].remote_rtp_pt,
         ccsdpCodecName(payloads[i].codec_type),
         payloads[i].audio.frequency,
@@ -1954,27 +1948,22 @@ static int vcmTxStartICE_m(cc_mcapid_t m
       payload->audio.packet_size,
       payload->audio.channels,
       payload->audio.bitrate);
 
     // Take possession of this pointer
     mozilla::ScopedDeletePtr<mozilla::AudioCodecConfig> config(config_raw);
 
     // Instantiate an appropriate conduit
-    mozilla::RefPtr<mozilla::AudioSessionConduit> rx_conduit =
-      pc.impl()->media()->GetConduit(level, true);
-
     mozilla::RefPtr<mozilla::AudioSessionConduit> conduit =
-      mozilla::AudioSessionConduit::Create(rx_conduit);
+      mozilla::AudioSessionConduit::Create();
 
     if (!conduit || conduit->ConfigureSendMediaCodec(config))
       return VCM_ERROR;
 
-    pc.impl()->media()->AddConduit(level, false, conduit);
-
     mozilla::RefPtr<mozilla::MediaPipeline> pipeline =
         new mozilla::MediaPipelineTransmit(
             pc.impl()->GetHandle(),
             pc.impl()->GetMainThread().get(),
             pc.impl()->GetSTSThread(),
             stream->GetMediaStream()->GetStream(),
             pc_track_id,
             conduit, rtp_flow, rtcp_flow);
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h
@@ -288,38 +288,19 @@ class PeerConnectionMedia : public sigsl
     return mTransportFlows[index_inner];
   }
 
   // Add a transport flow
   void AddTransportFlow(int aIndex, bool aRtcp,
                         mozilla::RefPtr<mozilla::TransportFlow> aFlow) {
     int index_inner = aIndex * 2 + (aRtcp ? 1 : 0);
 
-    MOZ_ASSERT(!mTransportFlows[index_inner]);
     mTransportFlows[index_inner] = aFlow;
   }
 
-  mozilla::RefPtr<mozilla::AudioSessionConduit> GetConduit(int aStreamIndex, bool aReceive) {
-    int index_inner = aStreamIndex * 2 + (aReceive ? 0 : 1);
-
-    if (mAudioConduits.find(index_inner) == mAudioConduits.end())
-      return NULL;
-
-    return mAudioConduits[index_inner];
-  }
-
-  // Add a conduit
-  void AddConduit(int aIndex, bool aReceive,
-                  const mozilla::RefPtr<mozilla::AudioSessionConduit> &aConduit) {
-    int index_inner = aIndex * 2 + (aReceive ? 0 : 1);
-
-    MOZ_ASSERT(!mAudioConduits[index_inner]);
-    mAudioConduits[index_inner] = aConduit;
-  }
-
   // ICE state signals
   sigslot::signal1<mozilla::NrIceCtx *> SignalIceGatheringCompleted;  // Done gathering
   sigslot::signal1<mozilla::NrIceCtx *> SignalIceCompleted;  // Done handshaking
 
  private:
   // Disconnect the media streams. Must be called on the
   // main thread.
   void DisconnectMediaStreams();
@@ -345,17 +326,13 @@ class PeerConnectionMedia : public sigsl
 
   // ICE objects
   mozilla::RefPtr<mozilla::NrIceCtx> mIceCtx;
   std::vector<mozilla::RefPtr<mozilla::NrIceMediaStream> > mIceStreams;
 
   // Transport flows: even is RTP, odd is RTCP
   std::map<int, mozilla::RefPtr<mozilla::TransportFlow> > mTransportFlows;
 
-  // Conduits: even is receive, odd is transmit (for easier correlation with
-  // flows)
-  std::map<int, mozilla::RefPtr<mozilla::AudioSessionConduit> > mAudioConduits;
-
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PeerConnectionMedia)
 };
 
 }  // namespace sipcc
 #endif
--- a/media/webrtc/signaling/test/mediaconduit_unittests.cpp
+++ b/media/webrtc/signaling/test/mediaconduit_unittests.cpp
@@ -485,21 +485,21 @@ class TransportConduitTest : public ::te
   {
   }
 
   //1. Dump audio samples to dummy external transport
   void TestDummyAudioAndTransport()
   {
     //get pointer to AudioSessionConduit
     int err=0;
-    mAudioSession = mozilla::AudioSessionConduit::Create(NULL);
+    mAudioSession = mozilla::AudioSessionConduit::Create();
     if( !mAudioSession )
       ASSERT_NE(mAudioSession, (void*)NULL);
 
-    mAudioSession2 = mozilla::AudioSessionConduit::Create(NULL);
+    mAudioSession2 = mozilla::AudioSessionConduit::Create();
     if( !mAudioSession2 )
       ASSERT_NE(mAudioSession2, (void*)NULL);
 
     FakeMediaTransport* xport = new FakeMediaTransport();
     ASSERT_NE(xport, (void*)NULL);
     xport->SetAudioSession(mAudioSession, mAudioSession2);
     mAudioTransport = xport;
 
--- a/media/webrtc/signaling/test/mediapipeline_unittest.cpp
+++ b/media/webrtc/signaling/test/mediapipeline_unittest.cpp
@@ -43,17 +43,17 @@ MtransportTestUtils *test_utils;
 namespace {
 class TestAgent {
  public:
   TestAgent() :
       audio_flow_(new TransportFlow()),
       audio_prsock_(new TransportLayerPrsock()),
       audio_dtls_(new TransportLayerDtls()),
       audio_config_(109, "opus", 48000, 480, 1, 64000),
-      audio_conduit_(mozilla::AudioSessionConduit::Create(NULL)),
+      audio_conduit_(mozilla::AudioSessionConduit::Create()),
       audio_(),
       audio_pipeline_(),
       video_flow_(new TransportFlow()),
       video_prsock_(new TransportLayerPrsock()),
       video_config_(120, "VP8", 640, 480),
       video_conduit_(mozilla::VideoSessionConduit::Create()),
       video_(),
       video_pipeline_() {
--- a/media/webrtc/webrtc_config.gypi
+++ b/media/webrtc/webrtc_config.gypi
@@ -1,28 +1,23 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # definitions to control what gets built in webrtc
-# NOTE!!! if you change something here, due to .gyp files not
-# being reprocessed on .gypi changes, run this before building:
-# "find . -name '*.gyp' | xargs touch"
 {
   'variables': {
     # basic stuff for everything
     'include_internal_video_render': 0,
     'clang_use_chrome_plugins': 0,
     'enable_protobuf': 0,
     'include_pulse_audio': 0,
     'include_tests': 0,
     'use_system_libjpeg': 1,
     'use_system_libvpx': 1,
-# Creates AEC internal sample dump files in current directory
-#    'aec_debug_dump': 1,
 
     # codec enable/disables:
     # Note: if you change one here, you must modify shared_libs.mk!
     'codec_g711_enable': 1,
     'codec_opus_enable': 1,
     'codec_g722_enable': 0,
     'codec_ilbc_enable': 0,
     'codec_isac_enable': 0,
--- a/modules/libpref/src/init/all.js
+++ b/modules/libpref/src/init/all.js
@@ -173,25 +173,16 @@ pref("media.dash.enabled", false);
 #endif
 #ifdef MOZ_GSTREAMER
 pref("media.gstreamer.enabled", true);
 #endif
 #ifdef MOZ_WEBRTC
 pref("media.navigator.enabled", true);
 pref("media.peerconnection.enabled", false);
 pref("media.navigator.permission.disabled", false);
-// These values (aec, agc, and noice) are from media/webrtc/trunk/webrtc/common_types.h
-// kXxxUnchanged = 0, kXxxDefault = 1, and higher values are specific to each 
-// setting (for Xxx = Ec, Agc, or Ns).  Defaults are all set to kXxxDefault here.
-pref("media.peerconnection.aec_enabled", true);
-pref("media.peerconnection.aec", 1);
-pref("media.peerconnection.agc_enabled", false);
-pref("media.peerconnection.agc", 1);
-pref("media.peerconnection.noise_enabled", false);
-pref("media.peerconnection.noise", 1);
 #else
 #ifdef ANDROID
 pref("media.navigator.enabled", true);
 #endif
 #endif
 
 // Whether to enable Web Audio support
 pref("media.webaudio.enabled", false);