Bug 932112: Rollup of changes previously applied to media/webrtc/trunk/webrtc rs=jesup
authorRandell Jesup <rjesup@jesup.org>
Thu, 07 Nov 2013 20:07:47 -0500
changeset 168645 bd8f1571937ffe786828b17c3a6963323d046157
parent 168644 7aca035355aefd9694132f0c913227016358ded1
child 168646 5b8a157a6ed57864d9e7090506d90d0e763c8e85
push id3224
push userlsblakk@mozilla.com
push dateTue, 04 Feb 2014 01:06:49 +0000
treeherdermozilla-beta@60c04d0987f1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup
bugs932112
milestone28.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 932112: Rollup of changes previously applied to media/webrtc/trunk/webrtc rs=jesup * * * * * * Add AndroidAudioManager to the moz.build files.
content/media/webrtc/MediaEngineWebRTC.cpp
content/media/webrtc/MediaEngineWebRTC.h
content/media/webrtc/MediaEngineWebRTCVideo.cpp
layout/media/webrtc/Makefile.in
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
media/webrtc/signaling/src/media-conduit/VideoConduit.h
media/webrtc/signaling/src/media/VcmSIPCCBinding.cpp
media/webrtc/trunk/webrtc/build/arm_neon.gypi
media/webrtc/trunk/webrtc/build/common.gypi
media/webrtc/trunk/webrtc/build/merge_libs.gyp
media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
media/webrtc/trunk/webrtc/common_types.h
media/webrtc/trunk/webrtc/engine_configurations.h
media/webrtc/trunk/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/main/source/audio_coding_module.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_defines.h
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/packet_buffer.c
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.h
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.h
media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.cc
media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.h
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_jni_android.cc
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_jni_android.h
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.cc
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.h
media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.h
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_internal.h
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_sse2.c
media/webrtc/trunk/webrtc/modules/audio_processing/aec/echo_cancellation.c
media/webrtc/trunk/webrtc/modules/audio_processing/aec/echo_cancellation_internal.h
media/webrtc/trunk/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc
media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.h
media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc
media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
media/webrtc/trunk/webrtc/modules/modules.gyp
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/forward_error_correction.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java
media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.h
media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_objc.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/video_capture_mac.mm
media/webrtc/trunk/webrtc/modules/video_capture/video_capture.gypi
media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
media/webrtc/trunk/webrtc/modules/video_coding/main/source/timestamp_extrapolator.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/timestamp_extrapolator.h
media/webrtc/trunk/webrtc/modules/video_processing/main/source/video_processing.gypi
media/webrtc/trunk/webrtc/system_wrappers/interface/asm_defines.h
media/webrtc/trunk/webrtc/system_wrappers/interface/tick_util.h
media/webrtc/trunk/webrtc/system_wrappers/source/atomic32_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable.cc
media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/cpu_info.cc
media/webrtc/trunk/webrtc/system_wrappers/source/rw_lock.cc
media/webrtc/trunk/webrtc/system_wrappers/source/system_wrappers.gyp
media/webrtc/trunk/webrtc/system_wrappers/source/thread_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/tick_util.cc
media/webrtc/trunk/webrtc/system_wrappers/source/trace_impl.cc
media/webrtc/trunk/webrtc/system_wrappers/source/trace_posix.cc
media/webrtc/trunk/webrtc/test/channel_transport/udp_transport_impl.cc
media/webrtc/trunk/webrtc/typedefs.h
media/webrtc/trunk/webrtc/video_engine/stream_synchronization.cc
media/webrtc/trunk/webrtc/video_engine/vie_channel.cc
media/webrtc/trunk/webrtc/video_engine/vie_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_receiver.cc
media/webrtc/trunk/webrtc/video_engine/vie_receiver.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_base.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_volume_control.h
media/webrtc/trunk/webrtc/voice_engine/output_mixer_unittest.cc
media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.cc
media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voice_engine.gyp
media/webrtc/trunk/webrtc/voice_engine/voice_engine_defines.h
media/webrtc/trunk/webrtc/voice_engine/voice_engine_impl.cc
mobile/android/base/moz.build
widget/android/AndroidJNIWrapper.cpp
widget/android/AndroidJNIWrapper.h
--- a/content/media/webrtc/MediaEngineWebRTC.cpp
+++ b/content/media/webrtc/MediaEngineWebRTC.cpp
@@ -249,17 +249,17 @@ MediaEngineWebRTC::EnumerateAudioDevices
   jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef();
 
   // get the JVM
   JavaVM *jvm = mozilla::AndroidBridge::Bridge()->GetVM();
 
   JNIEnv *env;
   jvm->AttachCurrentThread(&env, nullptr);
 
-  if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) {
+  if (webrtc::VoiceEngine::SetAndroidObjects(jvm, env, (void*)context) != 0) {
     LOG(("VoiceEngine:SetAndroidObjects Failed"));
     return;
   }
 
   env->DeleteGlobalRef(context);
 #endif
 
   if (!mVoiceEngine) {
--- a/content/media/webrtc/MediaEngineWebRTC.h
+++ b/content/media/webrtc/MediaEngineWebRTC.h
@@ -40,17 +40,16 @@
 #include "webrtc/voice_engine/include/voe_external_media.h"
 #include "webrtc/voice_engine/include/voe_audio_processing.h"
 
 // Video Engine
 #include "webrtc/video_engine/include/vie_base.h"
 #include "webrtc/video_engine/include/vie_codec.h"
 #include "webrtc/video_engine/include/vie_render.h"
 #include "webrtc/video_engine/include/vie_capture.h"
-#include "webrtc/video_engine/include/vie_file.h"
 #ifdef MOZ_B2G_CAMERA
 #include "CameraPreviewMediaStream.h"
 #include "DOMCameraManager.h"
 #include "GonkCameraControl.h"
 #include "ImageContainer.h"
 #include "nsGlobalWindow.h"
 #include "prprf.h"
 #endif
@@ -113,17 +112,24 @@ public:
   {
     mState = kReleased;
     NS_NewNamedThread("CameraThread", getter_AddRefs(mCameraThread));
     Init();
   }
 #else
   // ViEExternalRenderer.
   virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int);
-  virtual int DeliverFrame(unsigned char*, int, uint32_t, int64_t);
+  virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t,
+                           void *handle);
+  /**
+   * Does DeliverFrame() support a null buffer and non-null handle
+   * (video texture)?
+   * XXX Investigate!  Especially for Android/B2G
+   */
+  virtual bool IsTextureSupported() { return false; }
 
   MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr, int aIndex)
     : mVideoEngine(aVideoEnginePtr)
     , mCaptureIndex(aIndex)
     , mFps(-1)
     , mMinFps(-1)
     , mMonitor("WebRTCCamera.Monitor")
     , mWidth(0)
--- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCVideo.cpp
@@ -35,17 +35,18 @@ MediaEngineWebRTCVideoSource::FrameSizeC
   mHeight = h;
   LOG(("Video FrameSizeChange: %ux%u", w, h));
   return 0;
 }
 
 // ViEExternalRenderer Callback. Process every incoming frame here.
 int
 MediaEngineWebRTCVideoSource::DeliverFrame(
-   unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time)
+   unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
+   void *handle)
 {
   // mInSnapshotMode can only be set before the camera is turned on and
   // the renderer is started, so this amounts to a 1-shot
   if (mInSnapshotMode) {
     // Set the condition variable to false and notify Snapshot().
     MonitorAutoLock lock(mMonitor);
     mInSnapshotMode = false;
     lock.Notify();
@@ -394,136 +395,17 @@ MediaEngineWebRTCVideoSource::Stop(Sourc
 #endif
 
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
 {
-  /**
-   * To get a Snapshot we do the following:
-   * - Set a condition variable (mInSnapshotMode) to true
-   * - Attach the external renderer and start the camera
-   * - Wait for the condition variable to change to false
-   *
-   * Starting the camera has the effect of invoking DeliverFrame() when
-   * the first frame arrives from the camera. We only need one frame for
-   * GetCaptureDeviceSnapshot to work, so we immediately set the condition
-   * variable to false and notify this method.
-   *
-   * This causes the current thread to continue (PR_CondWaitVar will return),
-   * at which point we can grab a snapshot, convert it to a file and
-   * return from this function after cleaning up the temporary stream object
-   * and caling Stop() on the media source.
-   */
-#ifdef MOZ_B2G_CAMERA
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
-  *aFile = nullptr;
-  if (!mInitDone || mState != kAllocated) {
-    return NS_ERROR_FAILURE;
-  }
-#ifdef MOZ_B2G_CAMERA
-  mLastCapture = nullptr;
-
-  NS_DispatchToMainThread(WrapRunnable(this,
-                                       &MediaEngineWebRTCVideoSource::StartImpl,
-                                       mCapability));
-  mCallbackMonitor.Wait();
-  if (mState != kStarted) {
-    return NS_ERROR_FAILURE;
-  }
-
-  NS_DispatchToMainThread(WrapRunnable(this,
-                                       &MediaEngineWebRTCVideoSource::SnapshotImpl));
-  mCallbackMonitor.Wait();
-  if (mLastCapture == nullptr)
-    return NS_ERROR_FAILURE;
-
-  mState = kStopped;
-  NS_DispatchToMainThread(WrapRunnable(this,
-                                       &MediaEngineWebRTCVideoSource::StopImpl));
-
-  // The camera return nsDOMMemoryFile indeed, and the inheritance tree is:
-  // nsIDOMBlob <- nsIDOMFile <- nsDOMFileBase <- nsDOMFile <- nsDOMMemoryFile
-  *aFile = mLastCapture.get();
-  return NS_OK;
-#else
-  {
-    MonitorAutoLock lock(mMonitor);
-    mInSnapshotMode = true;
-  }
-
-  // Start the rendering (equivalent to calling Start(), but without a track).
-  int error = 0;
-  if (!mInitDone || mState != kAllocated) {
-    return NS_ERROR_FAILURE;
-  }
-  error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
-  if (error == -1) {
-    return NS_ERROR_FAILURE;
-  }
-  error = mViERender->StartRender(mCaptureIndex);
-  if (error == -1) {
-    return NS_ERROR_FAILURE;
-  }
-
-  if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
-    return NS_ERROR_FAILURE;
-  }
-
-  // Wait for the condition variable, will be set in DeliverFrame.
-  // We use a while loop, because even if Wait() returns, it's not
-  // guaranteed that the condition variable changed.
-  // FIX: we need need a way to cancel this and to bail if it appears to not be working
-  // Perhaps a maximum time, though some cameras can take seconds to start.  10 seconds?
-  {
-    MonitorAutoLock lock(mMonitor);
-    while (mInSnapshotMode) {
-      lock.Wait();
-    }
-  }
-
-  // If we get here, DeliverFrame received at least one frame.
-  webrtc::ViEFile* vieFile = webrtc::ViEFile::GetInterface(mVideoEngine);
-  if (!vieFile) {
-    return NS_ERROR_FAILURE;
-  }
-
-  // Create a temporary file on the main thread and put the snapshot in it.
-  // See Run() in MediaEngineWebRTCVideo.h (sets mSnapshotPath).
-  NS_DispatchToMainThread(this, NS_DISPATCH_SYNC);
-
-  if (!mSnapshotPath) {
-    return NS_ERROR_FAILURE;
-  }
-
-  NS_ConvertUTF16toUTF8 path(*mSnapshotPath);
-  if (vieFile->GetCaptureDeviceSnapshot(mCaptureIndex, path.get()) < 0) {
-    delete mSnapshotPath;
-    mSnapshotPath = nullptr;
-    return NS_ERROR_FAILURE;
-  }
-
-  // Stop the camera.
-  mViERender->StopRender(mCaptureIndex);
-  mViERender->RemoveRenderer(mCaptureIndex);
-
-  nsCOMPtr<nsIFile> file;
-  nsresult rv = NS_NewLocalFile(*mSnapshotPath, false, getter_AddRefs(file));
-
-  delete mSnapshotPath;
-  mSnapshotPath = nullptr;
-
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  NS_ADDREF(*aFile = new nsDOMFileFile(file));
-#endif
-  return NS_OK;
+  return NS_ERROR_NOT_IMPLEMENTED;
 }
 
 /**
  * Initialization and Shutdown functions for the video source, called by the
  * constructor and destructor respectively.
  */
 
 void
--- a/layout/media/webrtc/Makefile.in
+++ b/layout/media/webrtc/Makefile.in
@@ -4,33 +4,36 @@
 
 # shared libs for webrtc
 SHARED_LIBRARY_LIBS = \
   $(call EXPAND_LIBNAME_PATH,common_video,$(DEPTH)/media/webrtc/trunk/webrtc/common_video/common_video_common_video) \
   $(call EXPAND_LIBNAME_PATH,common_audio,$(DEPTH)/media/webrtc/trunk/webrtc/common_audio/common_audio_common_audio) \
   $(call EXPAND_LIBNAME_PATH,video_capture_module,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_video_capture_module) \
   $(call EXPAND_LIBNAME_PATH,webrtc_utility,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_webrtc_utility) \
   $(call EXPAND_LIBNAME_PATH,audio_coding_module,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_audio_coding_module) \
+  $(call EXPAND_LIBNAME_PATH,acm2,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_acm2) \
   $(call EXPAND_LIBNAME_PATH,CNG,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_CNG) \
   $(call EXPAND_LIBNAME_PATH,G711,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_G711) \
   $(call EXPAND_LIBNAME_PATH,PCM16B,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_PCM16B) \
   $(call EXPAND_LIBNAME_PATH,NetEq,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_NetEq) \
+  $(call EXPAND_LIBNAME_PATH,NetEq4,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_NetEq4) \
   $(call EXPAND_LIBNAME_PATH,system_wrappers,$(DEPTH)/media/webrtc/trunk/webrtc/system_wrappers/source/system_wrappers_system_wrappers) \
   $(call EXPAND_LIBNAME_PATH,webrtc_video_coding,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_webrtc_video_coding) \
   $(call EXPAND_LIBNAME_PATH,video_coding_utility,$(DEPTH)/media/webrtc/trunk/webrtc/modules/video_coding/utility/video_coding_utility_video_coding_utility) \
   $(call EXPAND_LIBNAME_PATH,webrtc_i420,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_webrtc_i420) \
   $(call EXPAND_LIBNAME_PATH,webrtc_vp8,$(DEPTH)/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8_webrtc_vp8) \
   $(call EXPAND_LIBNAME_PATH,webrtc_opus,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_webrtc_opus) \
   $(call EXPAND_LIBNAME_PATH,video_render_module,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_video_render_module) \
   $(call EXPAND_LIBNAME_PATH,video_engine_core,$(DEPTH)/media/webrtc/trunk/webrtc/video_engine/video_engine_video_engine_core) \
   $(call EXPAND_LIBNAME_PATH,voice_engine,$(DEPTH)/media/webrtc/trunk/webrtc/voice_engine/voice_engine_voice_engine) \
   $(call EXPAND_LIBNAME_PATH,media_file,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_media_file) \
   $(call EXPAND_LIBNAME_PATH,rtp_rtcp,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_rtp_rtcp) \
   $(call EXPAND_LIBNAME_PATH,bitrate_controller,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_bitrate_controller) \
   $(call EXPAND_LIBNAME_PATH,remote_bitrate_estimator,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_remote_bitrate_estimator) \
+  $(call EXPAND_LIBNAME_PATH,rbe_components,$(DEPTH)/media/webrtc/trunk/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_components_rbe_components) \
   $(call EXPAND_LIBNAME_PATH,paced_sender,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_paced_sender) \
   $(call EXPAND_LIBNAME_PATH,video_processing,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_video_processing) \
   $(call EXPAND_LIBNAME_PATH,audio_conference_mixer,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_audio_conference_mixer) \
   $(call EXPAND_LIBNAME_PATH,audio_device,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_audio_device) \
   $(call EXPAND_LIBNAME_PATH,audio_processing,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_audio_processing) \
   $(call EXPAND_LIBNAME_PATH,yuv,$(DEPTH)/media/webrtc/trunk/third_party/libyuv/libyuv_libyuv) \
   $(call EXPAND_LIBNAME_PATH,nicer,$(DEPTH)/media/mtransport/third_party/nICEr/nicer_nicer) \
   $(call EXPAND_LIBNAME_PATH,nrappkit,$(DEPTH)/media/mtransport/third_party/nrappkit/nrappkit_nrappkit) \
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -148,18 +148,19 @@ MediaConduitErrorCode WebrtcAudioConduit
     MOZ_ASSERT(other->mVoiceEngine);
     mVoiceEngine = other->mVoiceEngine;
   } else {
 #ifdef MOZ_WIDGET_ANDROID
       jobject context = jsjni_GetGlobalContextRef();
 
       // get the JVM
       JavaVM *jvm = jsjni_GetVM();
+      JNIEnv* jenv = jsjni_GetJNIForThread();
 
-      if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) {
+      if (webrtc::VoiceEngine::SetAndroidObjects(jvm, jenv, (void*)context) != 0) {
         CSFLogError(logTag, "%s Unable to set Android objects", __FUNCTION__);
         return kMediaConduitSessionNotInited;
       }
 #endif
 
     //Per WebRTC APIs below function calls return NULL on failure
     if(!(mVoiceEngine = webrtc::VoiceEngine::Create()))
     {
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
@@ -967,17 +967,18 @@ WebrtcVideoConduit::FrameSizeChange(unsi
   CSFLogError(logTag,  "%s Renderer is NULL ", __FUNCTION__);
   return -1;
 }
 
 int
 WebrtcVideoConduit::DeliverFrame(unsigned char* buffer,
                                  int buffer_size,
                                  uint32_t time_stamp,
-                                 int64_t render_time)
+                                 int64_t render_time,
+                                 void *handle)
 {
   CSFLogDebug(logTag,  "%s Buffer Size %d", __FUNCTION__, buffer_size);
 
   if(mRenderer)
   {
     mRenderer->RenderVideoFrame(buffer, buffer_size, time_stamp, render_time);
     return 0;
   }
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.h
@@ -11,17 +11,16 @@
 
 // Video Engine Includes
 #include "webrtc/common_types.h"
 #include "webrtc/video_engine/include/vie_base.h"
 #include "webrtc/video_engine/include/vie_capture.h"
 #include "webrtc/video_engine/include/vie_codec.h"
 #include "webrtc/video_engine/include/vie_render.h"
 #include "webrtc/video_engine/include/vie_network.h"
-#include "webrtc/video_engine/include/vie_file.h"
 #include "webrtc/video_engine/include/vie_rtp_rtcp.h"
 
 /** This file hosts several structures identifying different aspects
  * of a RTP Session.
  */
 
  using  webrtc::ViEBase;
  using  webrtc::ViENetwork;
@@ -143,17 +142,25 @@ public:
 
 
   /**
    * Webrtc External Renderer Implementation APIs.
    * Raw I420 Frames are delivred to the VideoConduit by the VideoEngine
    */
   virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int);
 
-  virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t);
+  virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t,
+                           void *handle);
+
+  /**
+   * Does DeliverFrame() support a null buffer and non-null handle
+   * (video texture)?
+   * XXX Investigate!  Especially for Android/B2G
+   */
+  virtual bool IsTextureSupported() { return false; }
 
   unsigned short SendingWidth() {
     return mSendingWidth;
   }
 
   unsigned short SendingHeight() {
     return mSendingHeight;
   }
--- a/media/webrtc/signaling/src/media/VcmSIPCCBinding.cpp
+++ b/media/webrtc/signaling/src/media/VcmSIPCCBinding.cpp
@@ -583,17 +583,17 @@ static short vcmRxAllocICE_s(TemporaryRe
 
   // Set the opaque so we can correlate events.
   stream->SetOpaque(new VcmIceOpaque(stream_id, call_handle, level));
 
   // Attach ourself to the candidate signal.
   VcmSIPCCBinding::connectCandidateSignal(stream);
 
   std::vector<std::string> candidates = stream->GetCandidates();
-  CSFLogDebug( logTag, "%s: Got %lu candidates", __FUNCTION__, candidates.size());
+  CSFLogDebug( logTag, "%s: Got %lu candidates", __FUNCTION__, (unsigned long) candidates.size());
 
   std::string default_addr;
   int default_port;
 
   nsresult res = stream->GetDefaultCandidate(1, &default_addr, &default_port);
   MOZ_ASSERT(NS_SUCCEEDED(res));
   if (!NS_SUCCEEDED(res)) {
     return VCM_ERROR;
--- a/media/webrtc/trunk/webrtc/build/arm_neon.gypi
+++ b/media/webrtc/trunk/webrtc/build/arm_neon.gypi
@@ -18,13 +18,35 @@
 #   ],
 #   'includes': ['path/to/this/gypi/file'],
 # }
 
 {
   'cflags!': [
     '-mfpu=vfpv3-d16',
   ],
+  'cflags_mozilla!': [
+    '-mfpu=vfpv3-d16',
+  ],
   'cflags': [
     '-mfpu=neon',
     '-flax-vector-conversions',
   ],
+  'cflags_mozilla': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+  'asflags!': [
+    '-mfpu=vfpv3-d16',
+  ],
+  'asflags_mozilla!': [
+    '-mfpu=vfpv3-d16',
+  ],
+  'asflags': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+  'asflags_mozilla': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+
 }
--- a/media/webrtc/trunk/webrtc/build/common.gypi
+++ b/media/webrtc/trunk/webrtc/build/common.gypi
@@ -40,26 +40,38 @@
       'build_with_libjingle%': '<(build_with_libjingle)',
       'webrtc_root%': '<(webrtc_root)',
       'apk_tests_path%': '<(apk_tests_path)',
       'import_isolate_path%': '<(import_isolate_path)',
       'modules_java_gyp_path%': '<(modules_java_gyp_path)',
 
       'webrtc_vp8_dir%': '<(webrtc_root)/modules/video_coding/codecs/vp8',
       'rbe_components_path%': '<(webrtc_root)/modules/remote_bitrate_estimator',
+      'include_g711%': 1,
+      'include_g722%': 1,
+      'include_ilbc%': 1,
       'include_opus%': 1,
+      'include_isac%': 1,
+      'include_pcm16b%': 1,
     },
     'build_with_chromium%': '<(build_with_chromium)',
     'build_with_libjingle%': '<(build_with_libjingle)',
     'webrtc_root%': '<(webrtc_root)',
     'apk_tests_path%': '<(apk_tests_path)',
     'import_isolate_path%': '<(import_isolate_path)',
     'modules_java_gyp_path%': '<(modules_java_gyp_path)',
     'webrtc_vp8_dir%': '<(webrtc_vp8_dir)',
+
+    'include_g711%': '<(include_g711)',
+    'include_g722%': '<(include_g722)',
+    'include_ilbc%': '<(include_ilbc)',
     'include_opus%': '<(include_opus)',
+    'include_isac%': '<(include_isac)',
+    'include_pcm16b%': '<(include_pcm16b)',
+
     'rbe_components_path%': '<(rbe_components_path)',
 
     # The Chromium common.gypi we use treats all gyp files without
     # chromium_code==1 as third party code. This disables many of the
     # preferred warning settings.
     #
     # We can set this here to have WebRTC code treated as Chromium code. Our
     # third party code will still have the reduced warning settings.
@@ -108,16 +120,19 @@
         # Exclude internal VCM in Chromium build.
         'include_internal_video_capture%': 0,
 
         # Exclude internal video render module in Chromium build.
         'include_internal_video_render%': 0,
 
         # Include ndk cpu features in Chromium build.
         'include_ndk_cpu_features%': 1,
+
+        # lazily allocate the ~4MB of trace message buffers if set
+        'enable_lazy_trace_alloc%': 0,
       }, {  # Settings for the standalone (not-in-Chromium) build.
         # TODO(andrew): For now, disable the Chrome plugins, which causes a
         # flood of chromium-style warnings. Investigate enabling them:
         # http://code.google.com/p/webrtc/issues/detail?id=163
         'clang_use_chrome_plugins%': 0,
 
         'include_pulse_audio%': 1,
         'include_internal_audio_device%': 1,
@@ -131,16 +146,31 @@
         'enable_android_opensl%': 0,
       }, {
         'include_tests%': 1,
         'enable_tracing%': 1,
         # Switch between Android audio device OpenSL ES implementation
         # and Java Implementation
         'enable_android_opensl%': 0,
       }],
+      ['OS=="linux"', {
+        'include_alsa_audio%': 1,
+      }, {
+        'include_alsa_audio%': 0,
+      }],
+      ['OS=="solaris" or os_bsd==1', {
+        'include_pulse_audio%': 1,
+      }, {
+        'include_pulse_audio%': 0,
+      }],
+      ['OS=="linux" or OS=="solaris" or os_bsd==1', {
+        'include_v4l2_video_capture%': 1,
+      }, {
+        'include_v4l2_video_capture%': 0,
+      }],
       ['OS=="ios"', {
         'build_libjpeg%': 0,
         'enable_protobuf%': 0,
         'include_tests%': 0,
       }],
       ['target_arch=="arm" or target_arch=="armv7"', {
         'prefer_fixed_point%': 1,
       }],
@@ -155,20 +185,25 @@
       '../..',
       # To include the top-level directory when building in Chrome, so we can
       # use full paths (e.g. headers inside testing/ or third_party/).
       '<(DEPTH)',
     ],
     'defines': [
       # TODO(leozwang): Run this as a gclient hook rather than at build-time:
       # http://code.google.com/p/webrtc/issues/detail?id=687
-      'WEBRTC_SVNREVISION="Unavailable(issue687)"',
+      'WEBRTC_SVNREVISION="\\\"Unavailable_issue687\\\""',
       #'WEBRTC_SVNREVISION="<!(python <(webrtc_root)/build/version.py)"',
     ],
     'conditions': [
+      ['moz_widget_toolkit_gonk==1', {
+        'defines' : [
+          'WEBRTC_GONK',
+        ],
+      }],
       ['enable_tracing==1', {
         'defines': ['WEBRTC_LOGGING',],
       }],
       ['build_with_mozilla==1', {
         'defines': [
           # Changes settings for Mozilla build.
           'WEBRTC_MOZILLA_BUILD',
          ],
@@ -196,27 +231,41 @@
         ],
       }],
       ['target_arch=="arm" or target_arch=="armv7"', {
         'defines': [
           'WEBRTC_ARCH_ARM',
         ],
         'conditions': [
           ['armv7==1', {
-            'defines': ['WEBRTC_ARCH_ARM_V7',],
+            'defines': ['WEBRTC_ARCH_ARM_V7',
+                        'WEBRTC_BUILD_NEON_LIBS'],
             'conditions': [
               ['arm_neon==1', {
                 'defines': ['WEBRTC_ARCH_ARM_NEON',],
               }, {
                 'defines': ['WEBRTC_DETECT_ARM_NEON',],
               }],
             ],
           }],
         ],
       }],
+      ['os_bsd==1', {
+        'defines': [
+          'WEBRTC_BSD',
+          'WEBRTC_THREAD_RR',
+        ],
+      }],
+      ['OS=="dragonfly" or OS=="netbsd"', {
+        'defines': [
+          # doesn't support pthread_condattr_setclock
+          'WEBRTC_CLOCK_TYPE_REALTIME',
+        ],
+      }],
+      # Mozilla: if we support Mozilla on MIPS, we'll need to mod the cflags entries here
       ['target_arch=="mipsel"', {
         'defines': [
           'MIPS32_LE',
         ],
         'conditions': [
           ['mips_fpu==1', {
             'defines': [
               'MIPS_FPU_LE',
@@ -267,16 +316,23 @@
       }],
       ['OS=="ios"', {
         'defines': [
           'WEBRTC_MAC',
           'WEBRTC_IOS',
         ],
       }],
       ['OS=="linux"', {
+#        'conditions': [
+#          ['have_clock_monotonic==1', {
+#            'defines': [
+#              'WEBRTC_CLOCK_TYPE_REALTIME',
+#            ],
+#          }],
+#        ],
         'defines': [
           'WEBRTC_LINUX',
         ],
       }],
       ['OS=="mac"', {
         'defines': [
           'WEBRTC_MAC',
         ],
@@ -290,27 +346,28 @@
         # http://code.google.com/p/webrtc/issues/detail?id=261 is solved.
         'msvs_disabled_warnings': [
           4373,  # legacy warning for ignoring const / volatile in signatures.
           4389,  # Signed/unsigned mismatch.
         ],
         # Re-enable some warnings that Chromium disables.
         'msvs_disabled_warnings!': [4189,],
       }],
+      # used on GONK as well
+      ['enable_android_opensl==1 and (OS=="android" or moz_widget_toolkit_gonk==1)', {
+        'defines': [
+          'WEBRTC_ANDROID_OPENSLES',
+        ],
+      }],
       ['OS=="android"', {
         'defines': [
           'WEBRTC_LINUX',
           'WEBRTC_ANDROID',
          ],
          'conditions': [
-           ['enable_android_opensl==1', {
-             'defines': [
-               'WEBRTC_ANDROID_OPENSLES',
-             ],
-           }],
            ['clang!=1', {
              # The Android NDK doesn't provide optimized versions of these
              # functions. Ensure they are disabled for all compilers.
              'cflags': [
                '-fno-builtin-cos',
                '-fno-builtin-sin',
                '-fno-builtin-cosf',
                '-fno-builtin-sinf',
--- a/media/webrtc/trunk/webrtc/build/merge_libs.gyp
+++ b/media/webrtc/trunk/webrtc/build/merge_libs.gyp
@@ -39,10 +39,12 @@
           'outputs': ['<(output_lib)'],
           'action': ['python',
                      'merge_libs.py',
                      '<(PRODUCT_DIR)',
                      '<(output_lib)',],
         },
       ],
     },
+#      }],
+#    ],
   ],
 }
--- a/media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
+++ b/media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
@@ -150,16 +150,17 @@
       'targets': [
         {
           'target_name': 'common_audio_sse2',
           'type': 'static_library',
           'sources': [
             'resampler/sinc_resampler_sse.cc',
           ],
           'cflags': ['-msse2',],
+          'cflags_mozilla': ['-msse2',],
           'xcode_settings': {
             'OTHER_CFLAGS': ['-msse2',],
           },
         },
       ],  # targets
     }],
     ['(target_arch=="arm" and armv7==1) or target_arch=="armv7"', {
       'targets': [
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
@@ -12,105 +12,54 @@
 /*
  * A wrapper for resampling a numerous amount of sampling combinations.
  */
 
 #ifndef WEBRTC_RESAMPLER_RESAMPLER_H_
 #define WEBRTC_RESAMPLER_RESAMPLER_H_
 
 #include "webrtc/typedefs.h"
+#include "speex/speex_resampler.h"
 
 namespace webrtc
 {
 
-// TODO(andrew): the implementation depends on the exact values of this enum.
-// It should be rewritten in a less fragile way.
+#define FIXED_RATE_RESAMPLER 0x10
 enum ResamplerType
 {
-    // 4 MSB = Number of channels
-    // 4 LSB = Synchronous or asynchronous
-
-    kResamplerSynchronous = 0x10,
-    kResamplerAsynchronous = 0x11,
-    kResamplerSynchronousStereo = 0x20,
-    kResamplerAsynchronousStereo = 0x21,
-    kResamplerInvalid = 0xff
-};
-
-// TODO(andrew): doesn't need to be part of the interface.
-enum ResamplerMode
-{
-    kResamplerMode1To1,
-    kResamplerMode1To2,
-    kResamplerMode1To3,
-    kResamplerMode1To4,
-    kResamplerMode1To6,
-    kResamplerMode1To12,
-    kResamplerMode2To3,
-    kResamplerMode2To11,
-    kResamplerMode4To11,
-    kResamplerMode8To11,
-    kResamplerMode11To16,
-    kResamplerMode11To32,
-    kResamplerMode2To1,
-    kResamplerMode3To1,
-    kResamplerMode4To1,
-    kResamplerMode6To1,
-    kResamplerMode12To1,
-    kResamplerMode3To2,
-    kResamplerMode11To2,
-    kResamplerMode11To4,
-    kResamplerMode11To8
+    kResamplerSynchronous            = 0x00,
+    kResamplerSynchronousStereo      = 0x01,
+    kResamplerFixedSynchronous       = 0x00 | FIXED_RATE_RESAMPLER,
+    kResamplerFixedSynchronousStereo = 0x01 | FIXED_RATE_RESAMPLER,
 };
 
 class Resampler
 {
-
 public:
     Resampler();
     // TODO(andrew): use an init function instead.
-    Resampler(int inFreq, int outFreq, ResamplerType type);
+    Resampler(int in_freq, int out_freq, ResamplerType type);
     ~Resampler();
 
     // Reset all states
-    int Reset(int inFreq, int outFreq, ResamplerType type);
+    int Reset(int in_freq, int out_freq, ResamplerType type);
 
     // Reset all states if any parameter has changed
-    int ResetIfNeeded(int inFreq, int outFreq, ResamplerType type);
+    int ResetIfNeeded(int in_freq, int out_freq, ResamplerType type);
 
     // Synchronous resampling, all output samples are written to samplesOut
-    int Push(const int16_t* samplesIn, int lengthIn, int16_t* samplesOut,
-             int maxLen, int &outLen);
-
-    // Asynchronous resampling, input
-    int Insert(int16_t* samplesIn, int lengthIn);
-
-    // Asynchronous resampling output, remaining samples are buffered
-    int Pull(int16_t* samplesOut, int desiredLen, int &outLen);
+    int Push(const int16_t* samples_in, int length_in,
+             int16_t* samples_out, int max_len, int &out_len);
 
 private:
-    // Generic pointers since we don't know what states we'll need
-    void* state1_;
-    void* state2_;
-    void* state3_;
+    bool IsFixedRate() { return !!(type_ & FIXED_RATE_RESAMPLER); }
+
+    SpeexResamplerState* state_;
 
-    // Storage if needed
-    int16_t* in_buffer_;
-    int16_t* out_buffer_;
-    int in_buffer_size_;
-    int out_buffer_size_;
-    int in_buffer_size_max_;
-    int out_buffer_size_max_;
-
-    // State
-    int my_in_frequency_khz_;
-    int my_out_frequency_khz_;
-    ResamplerMode my_mode_;
-    ResamplerType my_type_;
-
-    // Extra instance for stereo
-    Resampler* slave_left_;
-    Resampler* slave_right_;
+    int in_freq_;
+    int out_freq_;
+    int channels_;
+    ResamplerType type_;
 };
 
 }  // namespace webrtc
 
 #endif // WEBRTC_RESAMPLER_RESAMPLER_H_
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
@@ -8,17 +8,16 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/common_audio/resampler/include/push_resampler.h"
 
 #include <string.h>
 
 #include "webrtc/common_audio/include/audio_util.h"
-#include "webrtc/common_audio/resampler/include/resampler.h"
 #include "webrtc/common_audio/resampler/push_sinc_resampler.h"
 
 namespace webrtc {
 
 PushResampler::PushResampler()
     : sinc_resampler_(NULL),
       sinc_resampler_right_(NULL),
       src_sample_rate_hz_(0),
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
@@ -10,1075 +10,126 @@
 
 
 /*
  * A wrapper for resampling a numerous amount of sampling combinations.
  */
 
 #include <stdlib.h>
 #include <string.h>
+#include <assert.h>
 
 #include "webrtc/common_audio/resampler/include/resampler.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
+// TODO(jesup) better adjust per platform ability
+// Note: if these are changed (higher), you may need to change the
+// KernelDelay values in the unit tests here and in output_mixer.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_GONK)
+#define RESAMPLER_QUALITY 2
+#else
+#define RESAMPLER_QUALITY 3
+#endif
 
 namespace webrtc
 {
 
-Resampler::Resampler()
+Resampler::Resampler() : state_(NULL), type_(kResamplerSynchronous)
 {
-    state1_ = NULL;
-    state2_ = NULL;
-    state3_ = NULL;
-    in_buffer_ = NULL;
-    out_buffer_ = NULL;
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-    // we need a reset before we will work
-    my_in_frequency_khz_ = 0;
-    my_out_frequency_khz_ = 0;
-    my_mode_ = kResamplerMode1To1;
-    my_type_ = kResamplerInvalid;
-    slave_left_ = NULL;
-    slave_right_ = NULL;
+  // Note: Push will fail until Reset() is called
 }
 
-Resampler::Resampler(int inFreq, int outFreq, ResamplerType type)
+Resampler::Resampler(int in_freq, int out_freq, ResamplerType type) :
+  state_(NULL) // all others get initialized in reset
 {
-    state1_ = NULL;
-    state2_ = NULL;
-    state3_ = NULL;
-    in_buffer_ = NULL;
-    out_buffer_ = NULL;
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-    // we need a reset before we will work
-    my_in_frequency_khz_ = 0;
-    my_out_frequency_khz_ = 0;
-    my_mode_ = kResamplerMode1To1;
-    my_type_ = kResamplerInvalid;
-    slave_left_ = NULL;
-    slave_right_ = NULL;
-
-    Reset(inFreq, outFreq, type);
+  Reset(in_freq, out_freq, type);
 }
 
 Resampler::~Resampler()
 {
-    if (state1_)
-    {
-        free(state1_);
-    }
-    if (state2_)
-    {
-        free(state2_);
-    }
-    if (state3_)
-    {
-        free(state3_);
-    }
-    if (in_buffer_)
-    {
-        free(in_buffer_);
-    }
-    if (out_buffer_)
-    {
-        free(out_buffer_);
-    }
-    if (slave_left_)
-    {
-        delete slave_left_;
-    }
-    if (slave_right_)
-    {
-        delete slave_right_;
-    }
+  if (state_)
+  {
+    speex_resampler_destroy(state_);
+  }
 }
 
-int Resampler::ResetIfNeeded(int inFreq, int outFreq, ResamplerType type)
+int Resampler::ResetIfNeeded(int in_freq, int out_freq, ResamplerType type)
 {
-    int tmpInFreq_kHz = inFreq / 1000;
-    int tmpOutFreq_kHz = outFreq / 1000;
-
-    if ((tmpInFreq_kHz != my_in_frequency_khz_) || (tmpOutFreq_kHz != my_out_frequency_khz_)
-            || (type != my_type_))
-    {
-        return Reset(inFreq, outFreq, type);
-    } else
-    {
-        return 0;
-    }
+  if (!state_ || type != type_ ||
+      in_freq != in_freq_ || out_freq != out_freq_)
+  {
+    // Note that fixed-rate resamplers where input == output rate will
+    // have state_ == NULL, and will call Reset() here - but reset won't
+    // do anything beyond overwrite the member vars unless it needs a
+    // real resampler.
+    return Reset(in_freq, out_freq, type);
+  } else {
+    return 0;
+  }
 }
 
-int Resampler::Reset(int inFreq, int outFreq, ResamplerType type)
+int Resampler::Reset(int in_freq, int out_freq, ResamplerType type)
 {
-
-    if (state1_)
-    {
-        free(state1_);
-        state1_ = NULL;
-    }
-    if (state2_)
-    {
-        free(state2_);
-        state2_ = NULL;
-    }
-    if (state3_)
-    {
-        free(state3_);
-        state3_ = NULL;
-    }
-    if (in_buffer_)
-    {
-        free(in_buffer_);
-        in_buffer_ = NULL;
-    }
-    if (out_buffer_)
-    {
-        free(out_buffer_);
-        out_buffer_ = NULL;
-    }
-    if (slave_left_)
-    {
-        delete slave_left_;
-        slave_left_ = NULL;
-    }
-    if (slave_right_)
-    {
-        delete slave_right_;
-        slave_right_ = NULL;
-    }
-
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-
-    // This might be overridden if parameters are not accepted.
-    my_type_ = type;
-
-    // Start with a math exercise, Euclid's algorithm to find the gcd:
-
-    int a = inFreq;
-    int b = outFreq;
-    int c = a % b;
-    while (c != 0)
-    {
-        a = b;
-        b = c;
-        c = a % b;
-    }
-    // b is now the gcd;
-
-    // We need to track what domain we're in.
-    my_in_frequency_khz_ = inFreq / 1000;
-    my_out_frequency_khz_ = outFreq / 1000;
-
-    // Scale with GCD
-    inFreq = inFreq / b;
-    outFreq = outFreq / b;
-
-    // Do we need stereo?
-    if ((my_type_ & 0xf0) == 0x20)
-    {
-        // Change type to mono
-        type = static_cast<ResamplerType>(
-            ((static_cast<int>(type) & 0x0f) + 0x10));
-        slave_left_ = new Resampler(inFreq, outFreq, type);
-        slave_right_ = new Resampler(inFreq, outFreq, type);
-    }
+  uint32_t channels = (type == kResamplerSynchronousStereo ||
+                       type == kResamplerFixedSynchronousStereo) ? 2 : 1;
 
-    if (inFreq == outFreq)
-    {
-        my_mode_ = kResamplerMode1To1;
-    } else if (inFreq == 1)
-    {
-        switch (outFreq)
-        {
-            case 2:
-                my_mode_ = kResamplerMode1To2;
-                break;
-            case 3:
-                my_mode_ = kResamplerMode1To3;
-                break;
-            case 4:
-                my_mode_ = kResamplerMode1To4;
-                break;
-            case 6:
-                my_mode_ = kResamplerMode1To6;
-                break;
-            case 12:
-                my_mode_ = kResamplerMode1To12;
-                break;
-            default:
-                my_type_ = kResamplerInvalid;
-                return -1;
-        }
-    } else if (outFreq == 1)
-    {
-        switch (inFreq)
-        {
-            case 2:
-                my_mode_ = kResamplerMode2To1;
-                break;
-            case 3:
-                my_mode_ = kResamplerMode3To1;
-                break;
-            case 4:
-                my_mode_ = kResamplerMode4To1;
-                break;
-            case 6:
-                my_mode_ = kResamplerMode6To1;
-                break;
-            case 12:
-                my_mode_ = kResamplerMode12To1;
-                break;
-            default:
-                my_type_ = kResamplerInvalid;
-                return -1;
-        }
-    } else if ((inFreq == 2) && (outFreq == 3))
-    {
-        my_mode_ = kResamplerMode2To3;
-    } else if ((inFreq == 2) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode2To11;
-    } else if ((inFreq == 4) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode4To11;
-    } else if ((inFreq == 8) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode8To11;
-    } else if ((inFreq == 3) && (outFreq == 2))
-    {
-        my_mode_ = kResamplerMode3To2;
-    } else if ((inFreq == 11) && (outFreq == 2))
-    {
-        my_mode_ = kResamplerMode11To2;
-    } else if ((inFreq == 11) && (outFreq == 4))
-    {
-        my_mode_ = kResamplerMode11To4;
-    } else if ((inFreq == 11) && (outFreq == 16))
-    {
-        my_mode_ = kResamplerMode11To16;
-    } else if ((inFreq == 11) && (outFreq == 32))
-    {
-        my_mode_ = kResamplerMode11To32;
-    } else if ((inFreq == 11) && (outFreq == 8))
+  if (state_)
+  {
+    speex_resampler_destroy(state_);
+    state_ = NULL;
+  }
+  type_ = type;
+  channels_ = channels;
+  in_freq_ = in_freq;
+  out_freq_ = out_freq;
+
+  // For fixed-rate, same-rate resamples we just memcpy and so don't spin up a resampler
+  if (in_freq != out_freq || !IsFixedRate())
+  {
+    state_ = speex_resampler_init(channels, in_freq, out_freq, RESAMPLER_QUALITY, NULL);
+    if (!state_)
     {
-        my_mode_ = kResamplerMode11To8;
-    } else
-    {
-        my_type_ = kResamplerInvalid;
-        return -1;
+      return -1;
     }
-
-    // Now create the states we need
-    switch (my_mode_)
-    {
-        case kResamplerMode1To1:
-            // No state needed;
-            break;
-        case kResamplerMode1To2:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode1To3:
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
-            break;
-        case kResamplerMode1To4:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:4
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode1To6:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:6
-            state2_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state2_);
-            break;
-        case kResamplerMode1To12:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:4
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            // 4:12
-            state3_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz(
-                (WebRtcSpl_State16khzTo48khz*) state3_);
-            break;
-        case kResamplerMode2To3:
-            // 2:6
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
-            // 6:3
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode2To11:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            state2_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
-            WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state2_);
-            break;
-        case kResamplerMode4To11:
-            state1_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
-            WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state1_);
-            break;
-        case kResamplerMode8To11:
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo22khz));
-            WebRtcSpl_ResetResample16khzTo22khz((WebRtcSpl_State16khzTo22khz *)state1_);
-            break;
-        case kResamplerMode11To16:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
-            break;
-        case kResamplerMode11To32:
-            // 11 -> 22
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            // 22 -> 16
-            state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
-
-            // 16 -> 32
-            state3_ = malloc(8 * sizeof(int32_t));
-            memset(state3_, 0, 8 * sizeof(int32_t));
-
-            break;
-        case kResamplerMode2To1:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode3To1:
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
-            break;
-        case kResamplerMode4To1:
-            // 4:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:1
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode6To1:
-            // 6:2
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
-            // 2:1
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode12To1:
-            // 12:4
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz(
-                (WebRtcSpl_State48khzTo16khz*) state1_);
-            // 4:2
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            // 2:1
-            state3_ = malloc(8 * sizeof(int32_t));
-            memset(state3_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode3To2:
-            // 3:6
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 6:2
-            state2_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state2_);
-            break;
-        case kResamplerMode11To2:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
-            WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
-
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-
-            break;
-        case kResamplerMode11To4:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
-            WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
-            break;
-        case kResamplerMode11To8:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state1_);
-            break;
-
-    }
-
-    return 0;
+  }
+  return 0;
 }
 
-// Synchronous resampling, all output samples are written to samplesOut
-int Resampler::Push(const int16_t * samplesIn, int lengthIn, int16_t* samplesOut,
-                    int maxLen, int &outLen)
+// Synchronous resampling, all output samples are written to samples_out
+// TODO(jesup) Change to take samples-per-channel in and out
+int Resampler::Push(const int16_t* samples_in, int length_in,
+                    int16_t* samples_out, int max_len, int &out_len)
 {
-    // Check that the resampler is not in asynchronous mode
-    if (my_type_ & 0x0f)
-    {
-        return -1;
-    }
-
-    // Do we have a stereo signal?
-    if ((my_type_ & 0xf0) == 0x20)
+  if (max_len < length_in)
+  {
+    return -1;
+  }
+  if (!state_)
+  {
+    if (!IsFixedRate() || in_freq_ != out_freq_)
     {
-
-        // Split up the signal and call the slave object for each channel
-
-        int16_t* left = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
-        int16_t* right = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
-        int16_t* out_left = (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
-        int16_t* out_right =
-                (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
-        int res = 0;
-        for (int i = 0; i < lengthIn; i += 2)
-        {
-            left[i >> 1] = samplesIn[i];
-            right[i >> 1] = samplesIn[i + 1];
-        }
-
-        // It's OK to overwrite the local parameter, since it's just a copy
-        lengthIn = lengthIn / 2;
-
-        int actualOutLen_left = 0;
-        int actualOutLen_right = 0;
-        // Do resampling for right channel
-        res |= slave_left_->Push(left, lengthIn, out_left, maxLen / 2, actualOutLen_left);
-        res |= slave_right_->Push(right, lengthIn, out_right, maxLen / 2, actualOutLen_right);
-        if (res || (actualOutLen_left != actualOutLen_right))
-        {
-            free(left);
-            free(right);
-            free(out_left);
-            free(out_right);
-            return -1;
-        }
-
-        // Reassemble the signal
-        for (int i = 0; i < actualOutLen_left; i++)
-        {
-            samplesOut[i * 2] = out_left[i];
-            samplesOut[i * 2 + 1] = out_right[i];
-        }
-        outLen = 2 * actualOutLen_left;
-
-        free(left);
-        free(right);
-        free(out_left);
-        free(out_right);
-
-        return 0;
+      // Since we initialize to a non-Fixed type, Push() will fail
+      // until Reset() is called
+      return -1;
     }
 
-    // Containers for temp samples
-    int16_t* tmp;
-    int16_t* tmp_2;
-    // tmp data for resampling routines
-    int32_t* tmp_mem;
-
-    switch (my_mode_)
-    {
-        case kResamplerMode1To1:
-            memcpy(samplesOut, samplesIn, lengthIn * sizeof(int16_t));
-            outLen = lengthIn;
-            break;
-        case kResamplerMode1To2:
-            if (maxLen < (lengthIn * 2))
-            {
-                return -1;
-            }
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-            outLen = lengthIn * 2;
-            return 0;
-        case kResamplerMode1To3:
-
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn * 3))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(samplesIn + i, samplesOut + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn * 3;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode1To4:
-            if (maxLen < (lengthIn * 4))
-            {
-                return -1;
-            }
-
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-            // 1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            // 2:4
-            WebRtcSpl_UpsampleBy2(tmp, lengthIn * 2, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn * 4;
-            free(tmp);
-            return 0;
-        case kResamplerMode1To6:
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn * 6))
-            {
-                return -1;
-            }
-
-            //1:2
-
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            outLen = lengthIn * 2;
-
-            for (int i = 0; i < outLen; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state2_,
-                                               tmp_mem);
-            }
-            outLen = outLen * 3;
-            free(tmp_mem);
-            free(tmp);
-
-            return 0;
-        case kResamplerMode1To12:
-            // We can only handle blocks of 40 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 40) != 0) {
-              return -1;
-            }
-            if (maxLen < (lengthIn * 12)) {
-              return -1;
-            }
-
-            tmp_mem = (int32_t*) malloc(336 * sizeof(int32_t));
-            tmp = (int16_t*) malloc(sizeof(int16_t) * 4 * lengthIn);
-            //1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut,
-                                  (int32_t*) state1_);
-            outLen = lengthIn * 2;
-            //2:4
-            WebRtcSpl_UpsampleBy2(samplesOut, outLen, tmp, (int32_t*) state2_);
-            outLen = outLen * 2;
-            // 4:12
-            for (int i = 0; i < outLen; i += 160) {
-              // WebRtcSpl_Resample16khzTo48khz() takes a block of 160 samples
-              // as input and outputs a resampled block of 480 samples. The
-              // data is now actually in 32 kHz sampling rate, despite the
-              // function name, and with a resampling factor of three becomes
-              // 96 kHz.
-              WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
-                                             (WebRtcSpl_State16khzTo48khz*) state3_,
-                                             tmp_mem);
-            }
-            outLen = outLen * 3;
-            free(tmp_mem);
-            free(tmp);
-
-            return 0;
-        case kResamplerMode2To3:
-            if (maxLen < (lengthIn * 3 / 2))
-            {
-                return -1;
-            }
-            // 2:6
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 3));
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(samplesIn + i, tmp + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state1_,
-                                               tmp_mem);
-            }
-            lengthIn = lengthIn * 3;
-            // 6:3
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 2;
-            free(tmp);
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode2To11:
-
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 2))
-            {
-                return -1;
-            }
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-            // 1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            lengthIn *= 2;
-
-            tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 80)
-            {
-                WebRtcSpl_Resample8khzTo22khz(tmp + i, samplesOut + (i * 11) / 4,
-                                              (WebRtcSpl_State8khzTo22khz *)state2_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 4;
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-        case kResamplerMode4To11:
-
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 4))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 80)
-            {
-                WebRtcSpl_Resample8khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 4,
-                                              (WebRtcSpl_State8khzTo22khz *)state1_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 4;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode8To11:
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 8))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(88 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 8,
-                                               (WebRtcSpl_State16khzTo22khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 8;
-            free(tmp_mem);
-            return 0;
-
-        case kResamplerMode11To16:
-            // We can only handle blocks of 110 samples
-            if ((lengthIn % 110) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 16) / 11))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
-
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-
-            for (int i = 0; i < (lengthIn * 2); i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(tmp + i, samplesOut + (i / 220) * 160,
-                                               (WebRtcSpl_State22khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-
-            outLen = (lengthIn * 16) / 11;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-
-        case kResamplerMode11To32:
-
-            // We can only handle blocks of 110 samples
-            if ((lengthIn % 110) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 32) / 11))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
-
-            // 11 -> 22 kHz in samplesOut
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-
-            // 22 -> 16 in tmp
-            for (int i = 0; i < (lengthIn * 2); i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(samplesOut + i, tmp + (i / 220) * 160,
-                                               (WebRtcSpl_State22khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-
-            // 16 -> 32 in samplesOut
-            WebRtcSpl_UpsampleBy2(tmp, (lengthIn * 16) / 11, samplesOut,
-                                  (int32_t*)state3_);
-
-            outLen = (lengthIn * 32) / 11;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-
-        case kResamplerMode2To1:
-            if (maxLen < (lengthIn / 2))
-            {
-                return -1;
-            }
-            WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-            outLen = lengthIn / 2;
-            return 0;
-        case kResamplerMode3To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn / 3))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(samplesIn + i, samplesOut + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode4To1:
-            if (maxLen < (lengthIn / 4))
-            {
-                return -1;
-            }
-            tmp = (int16_t*)malloc(sizeof(int16_t) * lengthIn / 2);
-            // 4:2
-            WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            // 2:1
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn / 2, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 4;
-            free(tmp);
-            return 0;
-
-        case kResamplerMode6To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn / 6))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn) / 3);
-
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            WebRtcSpl_DownsampleBy2(tmp, outLen, samplesOut, (int32_t*)state2_);
-            free(tmp);
-            outLen = outLen / 2;
-            return 0;
-        case kResamplerMode12To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0) {
-              return -1;
-            }
-            if (maxLen < (lengthIn / 12)) {
-              return -1;
-            }
-
-            tmp_mem = (int32_t*) malloc(496 * sizeof(int32_t));
-            tmp = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 3);
-            tmp_2 = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 6);
-            // 12:4
-            for (int i = 0; i < lengthIn; i += 480) {
-              // WebRtcSpl_Resample48khzTo16khz() takes a block of 480 samples
-              // as input and outputs a resampled block of 160 samples. The
-              // data is now actually in 96 kHz sampling rate, despite the
-              // function name, and with a resampling factor of 1/3 becomes
-              // 32 kHz.
-              WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
-                                             (WebRtcSpl_State48khzTo16khz*) state1_,
-                                             tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            // 4:2
-            WebRtcSpl_DownsampleBy2(tmp, outLen, tmp_2,
-                                    (int32_t*) state2_);
-            outLen = outLen / 2;
-            free(tmp);
-            // 2:1
-            WebRtcSpl_DownsampleBy2(tmp_2, outLen, samplesOut,
-                                    (int32_t*) state3_);
-            free(tmp_2);
-            outLen = outLen / 2;
-            return 0;
-        case kResamplerMode3To2:
-            if (maxLen < (lengthIn * 2 / 3))
-            {
-                return -1;
-            }
-            // 3:6
-            tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 2));
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            lengthIn *= 2;
-            // 6:2
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                free(tmp);
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(tmp + i, samplesOut + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp);
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode11To2:
-            // We can only handle blocks of 220 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 2) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((lengthIn * 4) / 11 * sizeof(int16_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo8khz(samplesIn + i, tmp + (i * 4) / 11,
-                                              (WebRtcSpl_State22khzTo8khz *)state1_,
-                                              tmp_mem);
-            }
-            lengthIn = (lengthIn * 4) / 11;
-
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 2;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-        case kResamplerMode11To4:
-            // We can only handle blocks of 220 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 4) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo8khz(samplesIn + i, samplesOut + (i * 4) / 11,
-                                              (WebRtcSpl_State22khzTo8khz *)state1_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 4) / 11;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode11To8:
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 8) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(samplesIn + i, samplesOut + (i * 8) / 11,
-                                               (WebRtcSpl_State22khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = (lengthIn * 8) / 11;
-            free(tmp_mem);
-            return 0;
-            break;
-
-    }
+    // Fixed-rate, same-freq "resample" - use memcpy, which avoids
+    // filtering and delay.  For non-fixed rates, where we might tweak
+    // from 48000->48000 to 48000->48001 for drift, we need to resample
+    // (and filter) all the time to avoid glitches on rate changes.
+    memcpy(samples_out, samples_in, length_in*sizeof(*samples_in));
+    out_len = length_in;
     return 0;
-}
-
-// Asynchronous resampling, input
-int Resampler::Insert(int16_t * samplesIn, int lengthIn)
-{
-    if (my_type_ != kResamplerAsynchronous)
-    {
-        return -1;
-    }
-    int sizeNeeded, tenMsblock;
-
-    // Determine need for size of outBuffer
-    sizeNeeded = out_buffer_size_ + ((lengthIn + in_buffer_size_) * my_out_frequency_khz_)
-            / my_in_frequency_khz_;
-    if (sizeNeeded > out_buffer_size_max_)
-    {
-        // Round the value upwards to complete 10 ms blocks
-        tenMsblock = my_out_frequency_khz_ * 10;
-        sizeNeeded = (sizeNeeded / tenMsblock + 1) * tenMsblock;
-        out_buffer_ = (int16_t*)realloc(out_buffer_, sizeNeeded * sizeof(int16_t));
-        out_buffer_size_max_ = sizeNeeded;
-    }
-
-    // If we need to use inBuffer, make sure all input data fits there.
-
-    tenMsblock = my_in_frequency_khz_ * 10;
-    if (in_buffer_size_ || (lengthIn % tenMsblock))
-    {
-        // Check if input buffer size is enough
-        if ((in_buffer_size_ + lengthIn) > in_buffer_size_max_)
-        {
-            // Round the value upwards to complete 10 ms blocks
-            sizeNeeded = ((in_buffer_size_ + lengthIn) / tenMsblock + 1) * tenMsblock;
-            in_buffer_ = (int16_t*)realloc(in_buffer_,
-                                           sizeNeeded * sizeof(int16_t));
-            in_buffer_size_max_ = sizeNeeded;
-        }
-        // Copy in data to input buffer
-        memcpy(in_buffer_ + in_buffer_size_, samplesIn, lengthIn * sizeof(int16_t));
-
-        // Resample all available 10 ms blocks
-        int lenOut;
-        int dataLenToResample = (in_buffer_size_ / tenMsblock) * tenMsblock;
-        Push(in_buffer_, dataLenToResample, out_buffer_ + out_buffer_size_,
-             out_buffer_size_max_ - out_buffer_size_, lenOut);
-        out_buffer_size_ += lenOut;
-
-        // Save the rest
-        memmove(in_buffer_, in_buffer_ + dataLenToResample,
-                (in_buffer_size_ - dataLenToResample) * sizeof(int16_t));
-        in_buffer_size_ -= dataLenToResample;
-    } else
-    {
-        // Just resample
-        int lenOut;
-        Push(in_buffer_, lengthIn, out_buffer_ + out_buffer_size_,
-             out_buffer_size_max_ - out_buffer_size_, lenOut);
-        out_buffer_size_ += lenOut;
-    }
-
-    return 0;
-}
-
-// Asynchronous resampling output, remaining samples are buffered
-int Resampler::Pull(int16_t* samplesOut, int desiredLen, int &outLen)
-{
-    if (my_type_ != kResamplerAsynchronous)
-    {
-        return -1;
-    }
-
-    // Check that we have enough data
-    if (desiredLen <= out_buffer_size_)
-    {
-        // Give out the date
-        memcpy(samplesOut, out_buffer_, desiredLen * sizeof(int32_t));
-
-        // Shuffle down remaining
-        memmove(out_buffer_, out_buffer_ + desiredLen,
-                (out_buffer_size_ - desiredLen) * sizeof(int16_t));
-
-        // Update remaining size
-        out_buffer_size_ -= desiredLen;
-
-        return 0;
-    } else
-    {
-        return -1;
-    }
+  }
+  assert(channels_ == 1 || channels_ == 2);
+  spx_uint32_t len = length_in = (length_in >> (channels_ - 1));
+  spx_uint32_t out = (spx_uint32_t) (max_len >> (channels_ - 1));
+  if ((speex_resampler_process_interleaved_int(state_, samples_in, &len,
+                             samples_out, &out) != RESAMPLER_ERR_SUCCESS) ||
+      len != (spx_uint32_t) length_in)
+  {
+    return -1;
+  }
+  out_len = (int) (channels_ * out);
+  return 0;
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
@@ -3,67 +3,59 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include <math.h>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 #include "webrtc/common_audio/resampler/include/resampler.h"
 
 // TODO(andrew): this is a work-in-progress. Many more tests are needed.
 
 namespace webrtc {
 namespace {
 const ResamplerType kTypes[] = {
   kResamplerSynchronous,
-  kResamplerAsynchronous,
   kResamplerSynchronousStereo,
-  kResamplerAsynchronousStereo
-  // kResamplerInvalid excluded
 };
 const size_t kTypesSize = sizeof(kTypes) / sizeof(*kTypes);
 
 // Rates we must support.
 const int kMaxRate = 96000;
 const int kRates[] = {
   8000,
   16000,
   32000,
-  44000,
+  44100,
   48000,
   kMaxRate
 };
 const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
 const int kMaxChannels = 2;
 const size_t kDataSize = static_cast<size_t> (kMaxChannels * kMaxRate / 100);
 
-// TODO(andrew): should we be supporting these combinations?
-bool ValidRates(int in_rate, int out_rate) {
-  // Not the most compact notation, for clarity.
-  if ((in_rate == 44000 && (out_rate == 48000 || out_rate == 96000)) ||
-      (out_rate == 44000 && (in_rate == 48000 || in_rate == 96000))) {
-    return false;
-  }
-
-  return true;
-}
-
 class ResamplerTest : public testing::Test {
  protected:
   ResamplerTest();
   virtual void SetUp();
   virtual void TearDown();
+  void RunResampleTest(int channels,
+                       int src_sample_rate_hz,
+                       int dst_sample_rate_hz);
 
   Resampler rs_;
   int16_t data_in_[kDataSize];
   int16_t data_out_[kDataSize];
+  int16_t data_reference_[kDataSize];
 };
 
 ResamplerTest::ResamplerTest() {}
 
 void ResamplerTest::SetUp() {
   // Initialize input data with anything. The tests are content independent.
   memset(data_in_, 1, sizeof(data_in_));
 }
@@ -78,66 +70,141 @@ TEST_F(ResamplerTest, Reset) {
   // Check that all required combinations are supported.
   for (size_t i = 0; i < kRatesSize; ++i) {
     for (size_t j = 0; j < kRatesSize; ++j) {
       for (size_t k = 0; k < kTypesSize; ++k) {
         std::ostringstream ss;
         ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j]
             << ", type: " << kTypes[k];
         SCOPED_TRACE(ss.str());
-        if (ValidRates(kRates[i], kRates[j]))
-          EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
-        else
-          EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
+        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
       }
     }
   }
 }
 
-// TODO(tlegrand): Replace code inside the two tests below with a function
-// with number of channels and ResamplerType as input.
-TEST_F(ResamplerTest, Synchronous) {
-  for (size_t i = 0; i < kRatesSize; ++i) {
-    for (size_t j = 0; j < kRatesSize; ++j) {
-      std::ostringstream ss;
-      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
-      SCOPED_TRACE(ss.str());
+// Sets the signal value to increase by |data| with every sample. Floats are
+// used so non-integer values result in rounding error, but not an accumulating
+// error.
+void SetMonoFrame(int16_t* buffer, float data, int sample_rate_hz) {
+  for (int i = 0; i < sample_rate_hz / 100; i++) {
+    buffer[i] = data * i;
+  }
+}
+
+// Sets the signal value to increase by |left| and |right| with every sample in
+// each channel respectively.
+void SetStereoFrame(int16_t* buffer, float left, float right,
+                    int sample_rate_hz) {
+  for (int i = 0; i < sample_rate_hz / 100; i++) {
+    buffer[i * 2] = left * i;
+    buffer[i * 2 + 1] = right * i;
+  }
+}
+
+// Computes the best SNR based on the error between |ref_frame| and
+// |test_frame|. It allows for a sample delay between the signals to
+// compensate for the resampling delay.
+float ComputeSNR(const int16_t* reference, const int16_t* test,
+                 int sample_rate_hz, int channels, int max_delay) {
+  float best_snr = 0;
+  int best_delay = 0;
+  int samples_per_channel = sample_rate_hz/100;
+  for (int delay = 0; delay < max_delay; delay++) {
+    float mse = 0;
+    float variance = 0;
+    for (int i = 0; i < samples_per_channel * channels - delay; i++) {
+      int error = reference[i] - test[i + delay];
+      mse += error * error;
+      variance += reference[i] * reference[i];
+    }
+    float snr = 100;  // We assign 100 dB to the zero-error case.
+    if (mse > 0)
+      snr = 10 * log10(variance / mse);
+    if (snr > best_snr) {
+      best_snr = snr;
+      best_delay = delay;
+    }
+  }
+  printf("SNR=%.1f dB at delay=%d\n", best_snr, best_delay);
+  return best_snr;
+}
 
-      if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kRates[i] / 100;
-        int out_length = 0;
-        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
-        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
-                              out_length));
-        EXPECT_EQ(kRates[j] / 100, out_length);
-      } else {
-        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
-      }
+void ResamplerTest::RunResampleTest(int channels,
+                                    int src_sample_rate_hz,
+                                    int dst_sample_rate_hz) {
+  Resampler resampler;  // Create a new one with every test.
+  const int16_t kSrcLeft = 60;  // Shouldn't overflow for any used sample rate.
+  const int16_t kSrcRight = 30;
+  const float kResamplingFactor = (1.0 * src_sample_rate_hz) /
+      dst_sample_rate_hz;
+  const float kDstLeft = kResamplingFactor * kSrcLeft;
+  const float kDstRight = kResamplingFactor * kSrcRight;
+  if (channels == 1)
+    SetMonoFrame(data_in_, kSrcLeft, src_sample_rate_hz);
+  else
+    SetStereoFrame(data_in_, kSrcLeft, kSrcRight, src_sample_rate_hz);
+
+  if (channels == 1) {
+    SetMonoFrame(data_out_, 0, dst_sample_rate_hz);
+    SetMonoFrame(data_reference_, kDstLeft, dst_sample_rate_hz);
+  } else {
+    SetStereoFrame(data_out_, 0, 0, dst_sample_rate_hz);
+    SetStereoFrame(data_reference_, kDstLeft, kDstRight, dst_sample_rate_hz);
+  }
+
+  // The speex resampler has a known delay dependent on quality and rates,
+  // which we approximate here. Multiplying by two gives us a crude maximum
+  // for any resampling, as the old resampler typically (but not always)
+  // has lower delay.  The actual delay is calculated internally based on the
+  // filter length in the QualityMap.
+  static const int kInputKernelDelaySamples = 16*3;
+  const int max_delay = std::min(1.0f, 1/kResamplingFactor) *
+                        kInputKernelDelaySamples * channels * 2;
+  printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
+      channels, src_sample_rate_hz, channels, dst_sample_rate_hz);
+
+  int in_length = channels * src_sample_rate_hz / 100;
+  int out_length = 0;
+  EXPECT_EQ(0, rs_.Reset(src_sample_rate_hz, dst_sample_rate_hz,
+                         (channels == 1 ?
+                          kResamplerSynchronous :
+                          kResamplerSynchronousStereo)));
+  EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
+                        out_length));
+  EXPECT_EQ(channels * dst_sample_rate_hz / 100, out_length);
+
+  //  EXPECT_EQ(0, Resample(src_frame_, &resampler, &dst_frame_));
+  EXPECT_GT(ComputeSNR(data_reference_, data_out_, dst_sample_rate_hz,
+                       channels, max_delay), 40.0f);
+}
+
+TEST_F(ResamplerTest, Synchronous) {
+  // Number of channels is 1, mono mode.
+  const int kChannels = 1;
+  // We don't attempt to be exhaustive here, but just get good coverage. Some
+  // combinations of rates will not be resampled, and some give an odd
+  // resampling factor which makes it more difficult to evaluate.
+  const int kSampleRates[] = {16000, 32000, 44100, 48000};
+  const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
     }
   }
 }
 
 TEST_F(ResamplerTest, SynchronousStereo) {
   // Number of channels is 2, stereo mode.
   const int kChannels = 2;
-  for (size_t i = 0; i < kRatesSize; ++i) {
-    for (size_t j = 0; j < kRatesSize; ++j) {
-      std::ostringstream ss;
-      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
-      SCOPED_TRACE(ss.str());
-
-      if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kChannels * kRates[i] / 100;
-        int out_length = 0;
-        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j],
-                               kResamplerSynchronousStereo));
-        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
-                              out_length));
-        EXPECT_EQ(kChannels * kRates[j] / 100, out_length);
-      } else {
-        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j],
-                                kResamplerSynchronousStereo));
-      }
+  // We don't attempt to be exhaustive here, but just get good coverage. Some
+  // combinations of rates will not be resampled, and some give an odd
+  // resampling factor which makes it more difficult to evaluate.
+  const int kSampleRates[] = {16000, 32000, 44100, 48000};
+  const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
     }
   }
 }
 }  // namespace
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
@@ -21,21 +21,21 @@ float SincResampler::Convolve_NEON(const
                                    const float* k2,
                                    double kernel_interpolation_factor) {
   float32x4_t m_input;
   float32x4_t m_sums1 = vmovq_n_f32(0);
   float32x4_t m_sums2 = vmovq_n_f32(0);
 
   const float* upper = input_ptr + kKernelSize;
   for (; input_ptr < upper; ) {
-    m_input = vld1q_f32(input_ptr);
+    m_input = vld1q_f32((const float32_t *) input_ptr);
     input_ptr += 4;
-    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
+    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32((const float32_t *) k1));
     k1 += 4;
-    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
+    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32((const float32_t *) k2));
     k2 += 4;
   }
 
   // Linearly interpolate the two "convolutions".
   m_sums1 = vmlaq_f32(
       vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
       m_sums2, vmovq_n_f32(kernel_interpolation_factor));
 
--- a/media/webrtc/trunk/webrtc/common_types.h
+++ b/media/webrtc/trunk/webrtc/common_types.h
@@ -336,17 +336,17 @@ typedef struct        // All levels are 
 enum NsModes    // type of Noise Suppression
 {
     kNsUnchanged = 0,   // previously set mode
     kNsDefault,         // platform default
     kNsConference,      // conferencing default
     kNsLowSuppression,  // lowest suppression
     kNsModerateSuppression,
     kNsHighSuppression,
-    kNsVeryHighSuppression,     // highest suppression
+    kNsVeryHighSuppression     // highest suppression
 };
 
 enum AgcModes                  // type of Automatic Gain Control
 {
     kAgcUnchanged = 0,        // previously set mode
     kAgcDefault,              // platform default
     // adaptive mode for use when analog volume control exists (e.g. for
     // PC softphone)
@@ -361,17 +361,17 @@ enum AgcModes                  // type o
 
 // EC modes
 enum EcModes                   // type of Echo Control
 {
     kEcUnchanged = 0,          // previously set mode
     kEcDefault,                // platform default
     kEcConference,             // conferencing default (aggressive AEC)
     kEcAec,                    // Acoustic Echo Cancellation
-    kEcAecm,                   // AEC mobile
+    kEcAecm                    // AEC mobile
 };
 
 // AECM modes
 enum AecmModes                 // mode of AECM
 {
     kAecmQuietEarpieceOrHeadset = 0,
                                // Quiet earpiece or headset use
     kAecmEarpiece,             // most earpiece use
@@ -413,31 +413,31 @@ enum NetEqModes             // NetEQ pla
     // Improved jitter robustness at the cost of increased delay. Can be
     // used in one-way communication.
     kNetEqStreaming = 1,
     // Optimzed for decodability of fax signals rather than for perceived audio
     // quality.
     kNetEqFax = 2,
     // Minimal buffer management. Inserts zeros for lost packets and during
     // buffer increases.
-    kNetEqOff = 3,
+    kNetEqOff = 3
 };
 
 enum OnHoldModes            // On Hold direction
 {
     kHoldSendAndPlay = 0,    // Put both sending and playing in on-hold state.
     kHoldSendOnly,           // Put only sending in on-hold state.
     kHoldPlayOnly            // Put only playing in on-hold state.
 };
 
 enum AmrMode
 {
     kRfc3267BwEfficient = 0,
     kRfc3267OctetAligned = 1,
-    kRfc3267FileStorage = 2,
+    kRfc3267FileStorage = 2
 };
 
 // ==================================================================
 // Video specific types
 // ==================================================================
 
 // Raw video types
 enum RawVideoType
--- a/media/webrtc/trunk/webrtc/engine_configurations.h
+++ b/media/webrtc/trunk/webrtc/engine_configurations.h
@@ -30,17 +30,19 @@
 #endif  // WEBRTC_ARCH_ARM
 #endif  // !WEBRTC_MOZILLA_BUILD
 
 // AVT is included in all builds, along with G.711, NetEQ and CNG
 // (which are mandatory and don't have any defines).
 #define WEBRTC_CODEC_AVT
 
 // PCM16 is useful for testing and incurs only a small binary size cost.
+#ifndef WEBRTC_CODEC_PCM16
 #define WEBRTC_CODEC_PCM16
+#endif
 
 // iLBC, G.722, and Redundancy coding are excluded from Chromium and Mozilla
 // builds to reduce binary size.
 #if !defined(WEBRTC_CHROMIUM_BUILD) && !defined(WEBRTC_MOZILLA_BUILD)
 #define WEBRTC_CODEC_ILBC
 #define WEBRTC_CODEC_G722
 #define WEBRTC_CODEC_RED
 #endif  // !WEBRTC_CHROMIUM_BUILD && !WEBRTC_MOZILLA_BUILD
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
@@ -10,32 +10,28 @@
 
 
 #include "pcm16b.h"
 
 #include <stdlib.h>
 
 #include "typedefs.h"
 
-#ifdef WEBRTC_BIG_ENDIAN
-#include "signal_processing_library.h"
-#endif
-
 #define HIGHEND 0xFF00
 #define LOWEND    0xFF
 
 
 
 /* Encoder with int16_t Output */
 int16_t WebRtcPcm16b_EncodeW16(int16_t *speechIn16b,
                                int16_t len,
                                int16_t *speechOut16b)
 {
 #ifdef WEBRTC_BIG_ENDIAN
-    WEBRTC_SPL_MEMCPY_W16(speechOut16b, speechIn16b, len);
+    memcpy(speechOut16b, speechIn16b, len * sizeof(int16_t));
 #else
     int i;
     for (i=0;i<len;i++) {
         speechOut16b[i]=(((uint16_t)speechIn16b[i])>>8)|((((uint16_t)speechIn16b[i])<<8)&0xFF00);
     }
 #endif
     return(len<<1);
 }
@@ -64,17 +60,17 @@ int16_t WebRtcPcm16b_Encode(int16_t *spe
 /* Decoder with int16_t Input instead of char when the int16_t Encoder is used */
 int16_t WebRtcPcm16b_DecodeW16(void *inst,
                                int16_t *speechIn16b,
                                int16_t len,
                                int16_t *speechOut16b,
                                int16_t* speechType)
 {
 #ifdef WEBRTC_BIG_ENDIAN
-    WEBRTC_SPL_MEMCPY_W8(speechOut16b, speechIn16b, ((len*sizeof(int16_t)+1)>>1));
+    memcpy(speechOut16b, speechIn16b, ((len*sizeof(int16_t)+1)>>1));
 #else
     int i;
     int samples=len>>1;
 
     for (i=0;i<samples;i++) {
         speechOut16b[i]=(((uint16_t)speechIn16b[i])>>8)|(((uint16_t)(speechIn16b[i]&0xFF))<<8);
     }
 #endif
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
@@ -39,35 +39,24 @@
         'acm_celt.h',
         'acm_cng.cc',
         'acm_cng.h',
         'acm_codec_database.cc',
         'acm_codec_database.h',
         'acm_common_defs.h',
         'acm_dtmf_playout.cc',
         'acm_dtmf_playout.h',
-        'acm_g722.cc',
-        'acm_g722.h',
-        'acm_g7221.cc',
-        'acm_g7221.h',
-        'acm_g7221c.cc',
-        'acm_g7221c.h',
         'acm_g729.cc',
         'acm_g729.h',
         'acm_g7291.cc',
         'acm_g7291.h',
         'acm_generic_codec.cc',
         'acm_generic_codec.h',
         'acm_gsmfr.cc',
         'acm_gsmfr.h',
-        'acm_ilbc.cc',
-        'acm_ilbc.h',
-        'acm_isac.cc',
-        'acm_isac.h',
-        'acm_isac_macros.h',
         'acm_opus.cc',
         'acm_opus.h',
         'acm_speex.cc',
         'acm_speex.h',
         'acm_pcm16b.cc',
         'acm_pcm16b.h',
         'acm_pcma.cc',
         'acm_pcma.h',
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/main/source/audio_coding_module.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/main/source/audio_coding_module.gypi
@@ -5,31 +5,76 @@
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
   'variables': {
     'audio_coding_dependencies': [
       'CNG',
-      'G711',
-      'G722',
-      'iLBC',
-      'iSAC',
-      'iSACFix',
-      'PCM16B',
       'NetEq',
       '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
       '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
     ],
     'audio_coding_defines': [],
     'conditions': [
       ['include_opus==1', {
         'audio_coding_dependencies': ['webrtc_opus',],
         'audio_coding_defines': ['WEBRTC_CODEC_OPUS',],
+        'audio_coding_sources': [
+          'acm_opus.cc',
+          'acm_opus.h',
+        ],
+      }],
+      ['include_g711==1', {
+        'audio_coding_dependencies': ['G711',],
+        'audio_coding_defines': ['WEBRTC_CODEC_G711',],
+        'audio_coding_sources': [
+          'acm_pcma.cc',
+          'acm_pcma.h',
+          'acm_pcmu.cc',
+          'acm_pcmu.h',
+        ],
+      }],
+      ['include_g722==1', {
+        'audio_coding_dependencies': ['G722',],
+        'audio_coding_defines': ['WEBRTC_CODEC_G722',],
+        'audio_coding_sources': [
+          'acm_g722.cc',
+          'acm_g722.h',
+          'acm_g7221.cc',
+          'acm_g7221.h',
+          'acm_g7221c.cc',
+          'acm_g7221c.h',
+        ],
+      }],
+      ['include_ilbc==1', {
+        'audio_coding_dependencies': ['iLBC',],
+        'audio_coding_defines': ['WEBRTC_CODEC_ILBC',],
+        'audio_coding_sources': [
+          'acm_ilbc.cc',
+          'acm_ilbc.h',
+        ],
+      }],
+      ['include_isac==1', {
+        'audio_coding_dependencies': ['iSAC', 'iSACFix',],
+        'audio_coding_defines': ['WEBRTC_CODEC_ISAC', 'WEBRTC_CODEC_ISACFX',],
+        'audio_coding_sources': [
+          'acm_isac.cc',
+          'acm_isac.h',
+          'acm_isac_macros.h',
+        ],
+      }],
+      ['include_pcm16b==1', {
+        'audio_coding_dependencies': ['PCM16B',],
+        'audio_coding_defines': ['WEBRTC_CODEC_PCM16',],
+        'audio_coding_sources': [
+          'acm_pcm16b.cc',
+          'acm_pcm16b.h',
+        ],
       }],
     ],
   },
   'targets': [
     {
       'target_name': 'audio_coding_module',
       'type': 'static_library',
       'defines': [
@@ -45,57 +90,34 @@
       ],
       'direct_dependent_settings': {
         'include_dirs': [
           '../interface',
           '../../../interface',
         ],
       },
       'sources': [
+#        '<@(audio_coding_sources)',
         '../interface/audio_coding_module.h',
         '../interface/audio_coding_module_typedefs.h',
-        'acm_amr.cc',
-        'acm_amr.h',
-        'acm_amrwb.cc',
-        'acm_amrwb.h',
-        'acm_celt.cc',
-        'acm_celt.h',
         'acm_cng.cc',
         'acm_cng.h',
         'acm_codec_database.cc',
         'acm_codec_database.h',
         'acm_dtmf_detection.cc',
         'acm_dtmf_detection.h',
         'acm_dtmf_playout.cc',
         'acm_dtmf_playout.h',
-        'acm_g722.cc',
-        'acm_g722.h',
-        'acm_g7221.cc',
-        'acm_g7221.h',
-        'acm_g7221c.cc',
-        'acm_g7221c.h',
-        'acm_g729.cc',
-        'acm_g729.h',
-        'acm_g7291.cc',
-        'acm_g7291.h',
         'acm_generic_codec.cc',
         'acm_generic_codec.h',
-        'acm_gsmfr.cc',
-        'acm_gsmfr.h',
-        'acm_ilbc.cc',
-        'acm_ilbc.h',
-        'acm_isac.cc',
-        'acm_isac.h',
-        'acm_isac_macros.h',
         'acm_neteq.cc',
         'acm_neteq.h',
+# cheat until I get audio_coding_sources to work
         'acm_opus.cc',
         'acm_opus.h',
-        'acm_speex.cc',
-        'acm_speex.h',
         'acm_pcm16b.cc',
         'acm_pcm16b.h',
         'acm_pcma.cc',
         'acm_pcma.h',
         'acm_pcmu.cc',
         'acm_pcmu.h',
         'acm_red.cc',
         'acm_red.h',
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_defines.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_defines.h
@@ -64,16 +64,18 @@
  * NETEQ_ISAC_CODEC               Enable iSAC
  *
  * NETEQ_ISAC_SWB_CODEC           Enable iSAC-SWB
  *
  * Note that the decoder of iSAC full-band operates at 32 kHz, that is the
  * decoded signal is at 32 kHz.
  * NETEQ_ISAC_FB_CODEC            Enable iSAC-FB
  *
+ * NETEQ_OPUS_CODEC               Enable Opus
+ *
  * NETEQ_G722_CODEC               Enable G.722
  *
  * NETEQ_G729_CODEC               Enable G.729
  *
  * NETEQ_G729_1_CODEC             Enable G.729.1
  *
  * NETEQ_G726_CODEC               Enable G.726
  *
@@ -316,39 +318,46 @@
     #define NETEQ_RED_CODEC
     #define NETEQ_VAD
     #define NETEQ_ARBITRARY_CODEC
 
     /* Narrowband codecs */
     #define NETEQ_PCM16B_CODEC
     #define NETEQ_G711_CODEC
     #define NETEQ_ILBC_CODEC
+    #define NETEQ_OPUS_CODEC
     #define NETEQ_G729_CODEC
     #define NETEQ_G726_CODEC
     #define NETEQ_GSMFR_CODEC
     #define NETEQ_AMR_CODEC
 
     /* Wideband codecs */
     #define NETEQ_WIDEBAND
     #define NETEQ_ISAC_CODEC
+    /*#define NETEQ_OPUS_CODEC define only once */
     #define NETEQ_G722_CODEC
     #define NETEQ_G722_1_CODEC
     #define NETEQ_G729_1_CODEC
     #define NETEQ_SPEEX_CODEC
     #define NETEQ_AMRWB_CODEC
 
     /* Super wideband 32kHz codecs */
     #define NETEQ_ISAC_SWB_CODEC
+    /*#define NETEQ_OPUS_CODEC*/
     #define NETEQ_32KHZ_WIDEBAND
     #define NETEQ_G722_1C_CODEC
     #define NETEQ_CELT_CODEC
+    /*#define NETEQ_OPUS_CODEC*/
+
+    /* hack in 48 kHz support */
+    #define NETEQ_48KHZ_WIDEBAND
 
     /* Super wideband 48kHz codecs */
     #define NETEQ_48KHZ_WIDEBAND
-    #define NETEQ_OPUS_CODEC
+    /*#define NETEQ_OPUS_CODEC*/
     #define NETEQ_ISAC_FB
 #endif
 
 /* Max output size from decoding one frame */
 #if defined(NETEQ_48KHZ_WIDEBAND)
     #define NETEQ_MAX_FRAME_SIZE 5760  /* 120 ms super wideband */
     #define NETEQ_MAX_OUTPUT_SIZE 6480  /* 120+15 ms super wideband (120 ms
                                          * decoded + 15 ms for merge overlap) */
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/packet_buffer.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/packet_buffer.c
@@ -673,16 +673,21 @@ int WebRtcNetEQ_GetDefaultCodecSettings(
             codecBytes = 1560; /* 240ms @ 52kbps (30ms frames) */
             codecBuffers = 8;
         }
         else if (codecID[i] == kDecoderOpus)
         {
             codecBytes = 15300; /* 240ms @ 510kbps (60ms frames) */
             codecBuffers = 30;  /* Replicating the value for PCMu/a */
         }
+        else if (codecID[i] == kDecoderOpus)
+        {
+            codecBytes = 15300; /* 240ms @ 510kbps (60ms frames) */
+            codecBuffers = 30;  /* ?? Codec supports down to 2.5-60 ms frames */
+        }
         else if ((codecID[i] == kDecoderPCM16B) ||
             (codecID[i] == kDecoderPCM16B_2ch))
         {
             codecBytes = 3360; /* 210ms */
             codecBuffers = 15;
         }
         else if ((codecID[i] == kDecoderPCM16Bwb) ||
             (codecID[i] == kDecoderPCM16Bwb_2ch))
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.cc
@@ -21,91 +21,92 @@
 #include "webrtc/modules/audio_device/android/audio_device_jni_android.h"
 #include "webrtc/modules/audio_device/audio_device_config.h"
 #include "webrtc/modules/audio_device/audio_device_utility.h"
 
 #include "webrtc/system_wrappers/interface/event_wrapper.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+#include "AndroidJNIWrapper.h"
+
 namespace webrtc
 {
 // TODO(leozwang): Refactor jni and the following global variables, a
 // good example is jni_helper in Chromium.
 JavaVM* AudioDeviceAndroidJni::globalJvm = NULL;
-JNIEnv* AudioDeviceAndroidJni::globalJNIEnv = NULL;
 jobject AudioDeviceAndroidJni::globalContext = NULL;
 jclass AudioDeviceAndroidJni::globalScClass = NULL;
 
 // ----------------------------------------------------------------------------
 //  SetAndroidAudioDeviceObjects
 //
 //  Global function for setting Java pointers and creating Java
 //  objects that are global to all instances of VoiceEngine used
 //  by the same Java application.
 // ----------------------------------------------------------------------------
 
 int32_t AudioDeviceAndroidJni::SetAndroidAudioDeviceObjects(
     void* javaVM,
-    void* env,
     void* context) {
-  __android_log_print(ANDROID_LOG_DEBUG, "WEBRTC", "JNI:%s", __FUNCTION__);
+  return SetAndroidAudioDeviceObjects(javaVM, NULL, context);
+}
+
+int32_t AudioDeviceAndroidJni::SetAndroidAudioDeviceObjects(
+    void* javaVM,
+    void* null_env,
+    void* context) {
+  WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, -1,
+               "%s called", __FUNCTION__);
 
   // TODO(leozwang): Make this function thread-safe.
   globalJvm = reinterpret_cast<JavaVM*>(javaVM);
 
-  if (env) {
-    globalJNIEnv = reinterpret_cast<JNIEnv*>(env);
+  JNIEnv* env = NULL;
+
+  // Check if we already got a reference
+  if (globalJvm && !globalScClass) {
+      if (globalJvm->GetEnv((void**)&env, JNI_VERSION_1_4) != JNI_OK) {
+      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioDevice, -1,
+                   "%s: could not get Java environment", __FUNCTION__);
+      return -1;
+    }
+    globalJvm->AttachCurrentThread(&env, NULL);
+
     // Get java class type (note path to class packet).
-    jclass javaScClassLocal = globalJNIEnv->FindClass(
-        "org/webrtc/voiceengine/WebRTCAudioDevice");
-    if (!javaScClassLocal) {
+    globalScClass = jsjni_GetGlobalClassRef(AudioCaptureClass);
+    if (!globalScClass) {
       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                    "%s: could not find java class", __FUNCTION__);
       return -1; // exception thrown
     }
 
-    // Create a global reference to the class (to tell JNI that we are
-    // referencing it after this function has returned).
-    globalScClass = reinterpret_cast<jclass> (
-        globalJNIEnv->NewGlobalRef(javaScClassLocal));
-    if (!globalScClass) {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                   "%s: could not create reference", __FUNCTION__);
-      return -1;
-    }
-
-    globalContext = globalJNIEnv->NewGlobalRef(
+    globalContext = env->NewGlobalRef(
         reinterpret_cast<jobject>(context));
     if (!globalContext) {
       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                    "%s: could not create context reference", __FUNCTION__);
       return -1;
     }
-
-    // Delete local class ref, we only use the global ref
-    globalJNIEnv->DeleteLocalRef(javaScClassLocal);
   }
   else { // User is resetting the env variable
     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
                  "%s: env is NULL, assuming deinit", __FUNCTION__);
 
-    if (!globalJNIEnv) {
+    if (!env) {
       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
                    "%s: saved env already NULL", __FUNCTION__);
       return 0;
     }
 
-    globalJNIEnv->DeleteGlobalRef(globalScClass);
+    env->DeleteGlobalRef(globalScClass);
     globalScClass = reinterpret_cast<jclass>(NULL);
 
-    globalJNIEnv->DeleteGlobalRef(globalContext);
+    env->DeleteGlobalRef(globalContext);
     globalContext = reinterpret_cast<jobject>(NULL);
-
-    globalJNIEnv = reinterpret_cast<JNIEnv*>(NULL);
   }
 
   return 0;
 }
 
 // ============================================================================
 //                            Construction & Destruction
 // ============================================================================
@@ -135,18 +136,18 @@ AudioDeviceAndroidJni::AudioDeviceAndroi
             _playoutDeviceIsSpecified(false), _initialized(false),
             _recording(false), _playing(false), _recIsInitialized(false),
             _playIsInitialized(false), _micIsInitialized(false),
             _speakerIsInitialized(false), _startRec(false),
             _startPlay(false), _playWarning(0),
             _playError(0), _recWarning(0), _recError(0), _delayPlayout(0),
             _delayRecording(0),
             _AGC(false),
-            _samplingFreqIn((N_REC_SAMPLES_PER_SEC/1000)),
-            _samplingFreqOut((N_PLAY_SAMPLES_PER_SEC/1000)),
+            _samplingFreqIn((N_REC_SAMPLES_PER_SEC)),
+            _samplingFreqOut((N_PLAY_SAMPLES_PER_SEC)),
             _maxSpeakerVolume(0),
             _loudSpeakerOn(false),
             _recAudioSource(1), // 1 is AudioSource.MIC which is our default
             _javaVM(NULL), _jniEnvPlay(NULL),
             _jniEnvRec(NULL), _javaScClass(0), _javaScObj(0),
             _javaPlayBuffer(0), _javaRecBuffer(0), _javaDirectPlayBuffer(NULL),
             _javaDirectRecBuffer(NULL), _javaMidPlayAudio(0),
             _javaMidRecAudio(0)
@@ -1380,36 +1381,29 @@ int32_t AudioDeviceAndroidJni::InitPlayo
             return -1;
         }
         isAttached = true;
     }
 
     // get the method ID
     jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback",
                                                 "(I)I");
-
-    int samplingFreq = 44100;
-    if (_samplingFreqOut != 44)
-    {
-        samplingFreq = _samplingFreqOut * 1000;
-    }
-
     int retVal = -1;
 
     // Call java sc object method
-    jint res = env->CallIntMethod(_javaScObj, initPlaybackID, samplingFreq);
+    jint res = env->CallIntMethod(_javaScObj, initPlaybackID, _samplingFreqOut);
     if (res < 0)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "InitPlayback failed (%d)", res);
     }
     else
     {
         // Set the audio device buffer sampling rate
-        _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut * 1000);
+        _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut);
         _playIsInitialized = true;
         retVal = 0;
     }
 
     // Detach this thread if it was attached
     if (isAttached)
     {
         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
@@ -1485,40 +1479,33 @@ int32_t AudioDeviceAndroidJni::InitRecor
             return -1;
         }
         isAttached = true;
     }
 
     // get the method ID
     jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
                                                  "(II)I");
-
-    int samplingFreq = 44100;
-    if (_samplingFreqIn != 44)
-    {
-        samplingFreq = _samplingFreqIn * 1000;
-    }
-
     int retVal = -1;
 
     // call java sc object method
     jint res = env->CallIntMethod(_javaScObj, initRecordingID, _recAudioSource,
-                                  samplingFreq);
+                                  _samplingFreqIn);
     if (res < 0)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "InitRecording failed (%d)", res);
     }
     else
     {
         // Set the audio device buffer sampling rate
-        _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn * 1000);
+        _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn);
 
         // the init rec function returns a fixed delay
-        _delayRecording = res / _samplingFreqIn;
+        _delayRecording = (res * 1000) / _samplingFreqIn;
 
         _recIsInitialized = true;
         retVal = 0;
     }
 
     // Detach this thread if it was attached
     if (isAttached)
     {
@@ -2026,24 +2013,17 @@ int32_t AudioDeviceAndroidJni::SetRecord
     if (samplesPerSec > 48000 || samplesPerSec < 8000)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "  Invalid sample rate");
         return -1;
     }
 
     // set the recording sample rate to use
-    if (samplesPerSec == 44100)
-    {
-        _samplingFreqIn = 44;
-    }
-    else
-    {
-        _samplingFreqIn = samplesPerSec / 1000;
-    }
+    _samplingFreqIn = samplesPerSec;
 
     // Update the AudioDeviceBuffer
     _ptrAudioBuffer->SetRecordingSampleRate(samplesPerSec);
 
     return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -2057,24 +2037,17 @@ int32_t AudioDeviceAndroidJni::SetPlayou
     if (samplesPerSec > 48000 || samplesPerSec < 8000)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "  Invalid sample rate");
         return -1;
     }
 
     // set the playout sample rate to use
-    if (samplesPerSec == 44100)
-    {
-        _samplingFreqOut = 44;
-    }
-    else
-    {
-        _samplingFreqOut = samplesPerSec / 1000;
-    }
+    _samplingFreqOut = samplesPerSec;
 
     // Update the AudioDeviceBuffer
     _ptrAudioBuffer->SetPlayoutSampleRate(samplesPerSec);
 
     return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -2206,17 +2179,17 @@ int32_t AudioDeviceAndroidJni::InitJavaR
     if (cid == NULL)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "%s: could not get constructor ID", __FUNCTION__);
         return -1; /* exception thrown */
     }
 
     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-                 "construct object", __FUNCTION__);
+                 "%s: construct object", __FUNCTION__);
 
     // construct the object
     jobject javaScObjLocal = env->NewObject(_javaScClass, cid);
     if (!javaScObjLocal)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
                      "%s: could not create Java sc object", __FUNCTION__);
         return -1;
@@ -2418,21 +2391,17 @@ int32_t AudioDeviceAndroidJni::InitSampl
             return -1;
         }
         isAttached = true;
     }
 
     if (_samplingFreqIn > 0)
     {
         // read the configured sampling rate
-        samplingFreq = 44100;
-        if (_samplingFreqIn != 44)
-        {
-            samplingFreq = _samplingFreqIn * 1000;
-        }
+        samplingFreq = _samplingFreqIn;
         WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                      "  Trying configured recording sampling rate %d",
                      samplingFreq);
     }
 
     // get the method ID
     jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
                                                  "(II)I");
@@ -2463,24 +2432,17 @@ int32_t AudioDeviceAndroidJni::InitSampl
         }
         else
         {
             keepTrying = false;
         }
     }
 
     // set the recording sample rate to use
-    if (samplingFreq == 44100)
-    {
-        _samplingFreqIn = 44;
-    }
-    else
-    {
-        _samplingFreqIn = samplingFreq / 1000;
-    }
+    _samplingFreqIn = samplingFreq;
 
     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                  "Recording sample rate set to (%d)", _samplingFreqIn);
 
     // get the method ID
     jmethodID stopRecordingID = env->GetMethodID(_javaScClass, "StopRecording",
                                                  "()I");
 
@@ -2494,21 +2456,17 @@ int32_t AudioDeviceAndroidJni::InitSampl
 
     // get the method ID
     jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback",
                                                 "(I)I");
 
     if (_samplingFreqOut > 0)
     {
         // read the configured sampling rate
-        samplingFreq = 44100;
-        if (_samplingFreqOut != 44)
-        {
-            samplingFreq = _samplingFreqOut * 1000;
-        }
+        samplingFreq = _samplingFreqOut;
         WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                      "  Trying configured playback sampling rate %d",
                      samplingFreq);
     }
     else
     {
         // set the preferred sampling frequency
         if (samplingFreq == 8000)
@@ -2552,25 +2510,17 @@ int32_t AudioDeviceAndroidJni::InitSampl
     if (_maxSpeakerVolume < 1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
                      "  Did not get valid max speaker volume value (%d)",
                      _maxSpeakerVolume);
     }
 
     // set the playback sample rate to use
-    if (samplingFreq == 44100)
-    {
-        _samplingFreqOut = 44;
-    }
-    else
-    {
-        _samplingFreqOut = samplingFreq / 1000;
-    }
-
+    _samplingFreqOut = samplingFreq;
     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                  "Playback sample rate set to (%d)", _samplingFreqOut);
 
     // get the method ID
     jmethodID stopPlaybackID = env->GetMethodID(_javaScClass, "StopPlayback",
                                                 "()I");
 
     // Call java sc object method
@@ -2673,17 +2623,17 @@ bool AudioDeviceAndroidJni::PlayThreadPr
         _playStartStopEvent.Set();
         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
                      "Sent signal");
     }
 
     if (_playing)
     {
         int8_t playBuffer[2 * 480]; // Max 10 ms @ 48 kHz / 16 bit
-        uint32_t samplesToPlay = _samplingFreqOut * 10;
+        uint32_t samplesToPlay = _samplingFreqOut / 100;
 
         // ask for new PCM data to be played out using the AudioDeviceBuffer
         // ensure that this callback is executed without taking the
         // audio-thread lock
         UnLock();
         uint32_t nSamples =
                 _ptrAudioBuffer->RequestPlayoutData(samplesToPlay);
         Lock();
@@ -2718,17 +2668,17 @@ bool AudioDeviceAndroidJni::PlayThreadPr
         {
             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                          "PlayAudio failed (%d)", res);
             _playWarning = 1;
         }
         else if (res > 0)
         {
             // we are not recording and have got a delay value from playback
-            _delayPlayout = res / _samplingFreqOut;
+            _delayPlayout = (res * 1000) / _samplingFreqOut;
         }
         // If 0 is returned we are recording and then play delay is updated
         // in RecordProcess
 
         Lock();
 
     }  // _playing
 
@@ -2816,34 +2766,34 @@ bool AudioDeviceAndroidJni::RecThreadPro
         _recording = true;
         _recWarning = 0;
         _recError = 0;
         _recStartStopEvent.Set();
     }
 
     if (_recording)
     {
-        uint32_t samplesToRec = _samplingFreqIn * 10;
+        uint32_t samplesToRec = _samplingFreqIn / 100;
 
         // Call java sc object method to record data to direct buffer
         // Will block until data has been recorded (see java sc class),
         // therefore we must release the lock
         UnLock();
         jint playDelayInSamples = _jniEnvRec->CallIntMethod(_javaScObj,
                                                             _javaMidRecAudio,
                                                             2 * samplesToRec);
         if (playDelayInSamples < 0)
         {
             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                          "RecordAudio failed");
             _recWarning = 1;
         }
         else
         {
-            _delayPlayout = playDelayInSamples / _samplingFreqOut;
+            _delayPlayout = (playDelayInSamples * 1000) / _samplingFreqOut;
         }
         Lock();
 
         // Check again since recording may have stopped during Java call
         if (_recording)
         {
 //            WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
 //                         "total delay is %d", msPlayDelay + _delayRecording);
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.h
@@ -15,37 +15,33 @@
 #ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_JNI_ANDROID_H
 #define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_JNI_ANDROID_H
 
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 
 #include <jni.h> // For accessing AudioDeviceAndroid java class
 
+#define AudioCaptureClass "org/webrtc/voiceengine/WebRTCAudioDevice"
+
 namespace webrtc
 {
 class EventWrapper;
 
-const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
-
-const uint32_t N_REC_CHANNELS = 1; // default is mono recording
-const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout
-
-const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480; // Handle max 10 ms @ 48 kHz
-
-
 class ThreadWrapper;
 
 class AudioDeviceAndroidJni : public AudioDeviceGeneric {
  public:
   AudioDeviceAndroidJni(const int32_t id);
   ~AudioDeviceAndroidJni();
 
   static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
+                                              void* context);
+
+  static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
                                               void* env,
                                               void* context);
 
   virtual int32_t ActiveAudioLayer(
       AudioDeviceModule::AudioLayer& audioLayer) const;
 
   virtual int32_t Init();
   virtual int32_t Terminate();
@@ -153,16 +149,24 @@ class AudioDeviceAndroidJni : public Aud
   virtual int32_t SetRecordingSampleRate(
       const uint32_t samplesPerSec);
   virtual int32_t SetPlayoutSampleRate(
       const uint32_t samplesPerSec);
 
   virtual int32_t SetLoudspeakerStatus(bool enable);
   virtual int32_t GetLoudspeakerStatus(bool& enable) const;
 
+  static const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
+  static const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
+
+  static const uint32_t N_REC_CHANNELS = 1; // default is mono recording
+  static const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout
+
+  static const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480; // Handle max 10 ms @ 48 kHz
+
  private:
   // Lock
   void Lock() {
     _critSect.Enter();
   };
   void UnLock() {
     _critSect.Leave();
   };
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.h
@@ -31,16 +31,17 @@ class AudioManagerJni {
   // called once.
   // This function must be called by a Java thread as calling it from a thread
   // created by the native application will prevent FindClass from working. See
   // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
   // for more details.
   // It has to be called for this class' APIs to be successful. Calling
   // ClearAndroidAudioDeviceObjects will prevent this class' APIs to be called
   // successfully if SetAndroidAudioDeviceObjects is not called after it.
+  static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
   static void SetAndroidAudioDeviceObjects(void* jvm, void* env,
                                            void* context);
   // This function must be called when the AudioManagerJni class is no
   // longer needed. It frees up the global references acquired in
   // SetAndroidAudioDeviceObjects.
   static void ClearAndroidAudioDeviceObjects();
 
   bool low_latency_supported() { return low_latency_supported_; }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
@@ -6,16 +6,17 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/opensles_input.h"
 
 #include <assert.h>
+#include <dlfcn.h>
 
 #include "webrtc/modules/audio_device/android/single_rw_fifo.h"
 #include "webrtc/modules/audio_device/audio_device_buffer.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 using webrtc_opensl::kDefaultSampleRate;
@@ -60,34 +61,61 @@ OpenSlesInput::OpenSlesInput(
       sles_engine_itf_(NULL),
       sles_recorder_(NULL),
       sles_recorder_itf_(NULL),
       sles_recorder_sbq_itf_(NULL),
       audio_buffer_(NULL),
       active_queue_(0),
       rec_sampling_rate_(0),
       agc_enabled_(false),
-      recording_delay_(0) {
+      recording_delay_(0),
+      opensles_lib_(NULL) {
 }
 
 OpenSlesInput::~OpenSlesInput() {
 }
 
 int32_t OpenSlesInput::Init() {
   assert(!initialized_);
 
+  /* Try to dynamically open the OpenSLES library */
+  opensles_lib_ = dlopen("libOpenSLES.so", RTLD_LAZY);
+  if (!opensles_lib_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to dlopen OpenSLES library");
+      return -1;
+  }
+
+  f_slCreateEngine = (slCreateEngine_t)dlsym(opensles_lib_, "slCreateEngine");
+  SL_IID_ENGINE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ENGINE");
+  SL_IID_BUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_BUFFERQUEUE");
+  SL_IID_ANDROIDCONFIGURATION_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDCONFIGURATION");
+  SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
+  SL_IID_RECORD_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_RECORD");
+
+  if (!f_slCreateEngine ||
+      !SL_IID_ENGINE_ ||
+      !SL_IID_BUFFERQUEUE_ ||
+      !SL_IID_ANDROIDCONFIGURATION_ ||
+      !SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ ||
+      !SL_IID_RECORD_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to find OpenSLES function");
+      return -1;
+  }
+
   // Set up OpenSL engine.
-  OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
+  OPENSL_RETURN_ON_FAILURE(f_slCreateEngine(&sles_engine_, 1, kOption, 0,
                                           NULL, NULL),
                            -1);
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
                                                     SL_BOOLEAN_FALSE),
                            -1);
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
-                                                         SL_IID_ENGINE,
+                                                         SL_IID_ENGINE_,
                                                          &sles_engine_itf_),
                            -1);
 
   if (InitSampleRate() != 0) {
     return -1;
   }
   AllocateBuffers();
   initialized_ = true;
@@ -96,16 +124,17 @@ int32_t OpenSlesInput::Init() {
 
 int32_t OpenSlesInput::Terminate() {
   // It is assumed that the caller has stopped recording before terminating.
   assert(!recording_);
   (*sles_engine_)->Destroy(sles_engine_);
   initialized_ = false;
   mic_initialized_ = false;
   rec_initialized_ = false;
+  dlclose(opensles_lib_);
   return 0;
 }
 
 int32_t OpenSlesInput::RecordingDeviceName(uint16_t index,
                                            char name[kAdmMaxDeviceNameSize],
                                            char guid[kAdmMaxGuidSize]) {
   assert(index == 0);
   // Empty strings.
@@ -265,18 +294,22 @@ void OpenSlesInput::UpdateRecordingDelay
   // TODO(hellner): Add accurate delay estimate.
   // On average half the current buffer will have been filled with audio.
   int outstanding_samples =
       (TotalBuffersUsed() - 0.5) * buffer_size_samples();
   recording_delay_ = outstanding_samples / (rec_sampling_rate_ / 1000);
 }
 
 void OpenSlesInput::UpdateSampleRate() {
+#if !defined(WEBRTC_GONK)
   rec_sampling_rate_ = audio_manager_.low_latency_supported() ?
       audio_manager_.native_output_sample_rate() : kDefaultSampleRate;
+#else
+  rec_sampling_rate_ = kDefaultSampleRate;
+#endif
 }
 
 void OpenSlesInput::CalculateNumFifoBuffersNeeded() {
   // Buffer size is 10ms of data.
   num_fifo_buffers_needed_ = kNum10MsToBuffer;
 }
 
 void OpenSlesInput::AllocateBuffers() {
@@ -340,17 +373,17 @@ bool OpenSlesInput::CreateAudioRecorder(
   SLDataFormat_PCM configuration =
       webrtc_opensl::CreatePcmConfiguration(rec_sampling_rate_);
   SLDataSink audio_sink = { &simple_buf_queue, &configuration };
 
   // Interfaces for recording android audio data and Android are needed.
   // Note the interfaces still need to be initialized. This only tells OpenSl
   // that the interfaces will be needed at some point.
   const SLInterfaceID id[kNumInterfaces] = {
-    SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
+    SL_IID_ANDROIDSIMPLEBUFFERQUEUE_, SL_IID_ANDROIDCONFIGURATION_ };
   const SLboolean req[kNumInterfaces] = {
     SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
   OPENSL_RETURN_ON_FAILURE(
       (*sles_engine_itf_)->CreateAudioRecorder(sles_engine_itf_,
                                                &sles_recorder_,
                                                &audio_source,
                                                &audio_sink,
                                                kNumInterfaces,
@@ -358,23 +391,23 @@ bool OpenSlesInput::CreateAudioRecorder(
                                                req),
       false);
 
   // Realize the recorder in synchronous mode.
   OPENSL_RETURN_ON_FAILURE((*sles_recorder_)->Realize(sles_recorder_,
                                                       SL_BOOLEAN_FALSE),
                            false);
   OPENSL_RETURN_ON_FAILURE(
-      (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD,
+      (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD_,
                                       static_cast<void*>(&sles_recorder_itf_)),
       false);
   OPENSL_RETURN_ON_FAILURE(
       (*sles_recorder_)->GetInterface(
           sles_recorder_,
-          SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+          SL_IID_ANDROIDSIMPLEBUFFERQUEUE_,
           static_cast<void*>(&sles_recorder_sbq_itf_)),
       false);
   return true;
 }
 
 void OpenSlesInput::DestroyAudioRecorder() {
   event_.Stop();
   if (sles_recorder_sbq_itf_) {
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
@@ -10,17 +10,19 @@
 
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
 
 #include <SLES/OpenSLES.h>
 #include <SLES/OpenSLES_Android.h>
 #include <SLES/OpenSLES_AndroidConfiguration.h>
 
+#if !defined(WEBRTC_GONK)
 #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
+#endif
 #include "webrtc/modules/audio_device/android/low_latency_event.h"
 #include "webrtc/modules/audio_device/android/opensles_common.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 #include "webrtc/system_wrappers/interface/scoped_ptr.h"
 
 namespace webrtc {
 
@@ -160,18 +162,20 @@ class OpenSlesInput {
   bool StartCbThreads();
   void StopCbThreads();
   static bool CbThread(void* context);
   // This function must be protected against data race with threads calling this
   // class' public functions. It is a requirement for this class to be
   // Thread-compatible.
   bool CbThreadImpl();
 
+#if !defined(WEBRTC_GONK)
   // Java API handle
   AudioManagerJni audio_manager_;
+#endif
 
   int id_;
   webrtc_opensl::PlayoutDelayProvider* delay_provider_;
   bool initialized_;
   bool mic_initialized_;
   bool rec_initialized_;
 
   // Members that are read/write accessed concurrently by the process thread and
@@ -207,13 +211,28 @@ class OpenSlesInput {
   int active_queue_;
 
   // Audio settings
   uint32_t rec_sampling_rate_;
   bool agc_enabled_;
 
   // Audio status
   uint16_t recording_delay_;
+
+  // dlopen for OpenSLES
+  void *opensles_lib_;
+  typedef SLresult (*slCreateEngine_t)(SLObjectItf *,
+                                       SLuint32,
+                                       const SLEngineOption *,
+                                       SLuint32,
+                                       const SLInterfaceID *,
+                                       const SLboolean *);
+  slCreateEngine_t f_slCreateEngine;
+  SLInterfaceID SL_IID_ENGINE_;
+  SLInterfaceID SL_IID_BUFFERQUEUE_;
+  SLInterfaceID SL_IID_ANDROIDCONFIGURATION_;
+  SLInterfaceID SL_IID_ANDROIDSIMPLEBUFFERQUEUE_;
+  SLInterfaceID SL_IID_RECORD_;
 };
 
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.cc
@@ -6,16 +6,17 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/opensles_output.h"
 
 #include <assert.h>
+#include <dlfcn.h>
 
 #include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
 #include "webrtc/modules/audio_device/android/single_rw_fifo.h"
 #include "webrtc/modules/audio_device/audio_device_buffer.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
@@ -60,34 +61,63 @@ OpenSlesOutput::OpenSlesOutput(const int
       sles_player_itf_(NULL),
       sles_player_sbq_itf_(NULL),
       sles_output_mixer_(NULL),
       audio_buffer_(NULL),
       active_queue_(0),
       speaker_sampling_rate_(kDefaultSampleRate),
       buffer_size_samples_(0),
       buffer_size_bytes_(0),
-      playout_delay_(0) {
+      playout_delay_(0),
+      opensles_lib_(NULL) {
 }
 
 OpenSlesOutput::~OpenSlesOutput() {
 }
 
 int32_t OpenSlesOutput::Init() {
   assert(!initialized_);
 
+  /* Try to dynamically open the OpenSLES library */
+  opensles_lib_ = dlopen("libOpenSLES.so", RTLD_LAZY);
+  if (!opensles_lib_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to dlopen OpenSLES library");
+      return -1;
+  }
+
+  f_slCreateEngine = (slCreateEngine_t)dlsym(opensles_lib_, "slCreateEngine");
+  SL_IID_ENGINE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ENGINE");
+  SL_IID_BUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_BUFFERQUEUE");
+  SL_IID_ANDROIDCONFIGURATION_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDCONFIGURATION");
+  SL_IID_PLAY_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_PLAY");
+  SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
+  SL_IID_VOLUME_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_VOLUME");
+
+  if (!f_slCreateEngine ||
+      !SL_IID_ENGINE_ ||
+      !SL_IID_BUFFERQUEUE_ ||
+      !SL_IID_ANDROIDCONFIGURATION_ ||
+      !SL_IID_PLAY_ ||
+      !SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ ||
+      !SL_IID_VOLUME_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to find OpenSLES function");
+      return -1;
+  }
+
   // Set up OpenSl engine.
-  OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
+  OPENSL_RETURN_ON_FAILURE(f_slCreateEngine(&sles_engine_, 1, kOption, 0,
                                           NULL, NULL),
                            -1);
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
                                                     SL_BOOLEAN_FALSE),
                            -1);
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
-                                                         SL_IID_ENGINE,
+                                                         SL_IID_ENGINE_,
                                                          &sles_engine_itf_),
                            -1);
   // Set up OpenSl output mix.
   OPENSL_RETURN_ON_FAILURE(
       (*sles_engine_itf_)->CreateOutputMix(sles_engine_itf_,
                                            &sles_output_mixer_,
                                            0,
                                            NULL,
@@ -109,16 +139,17 @@ int32_t OpenSlesOutput::Init() {
 int32_t OpenSlesOutput::Terminate() {
   // It is assumed that the caller has stopped recording before terminating.
   assert(!playing_);
   (*sles_output_mixer_)->Destroy(sles_output_mixer_);
   (*sles_engine_)->Destroy(sles_engine_);
   initialized_ = false;
   speaker_initialized_ = false;
   play_initialized_ = false;
+  dlclose(opensles_lib_);
   return 0;
 }
 
 int32_t OpenSlesOutput::PlayoutDeviceName(uint16_t index,
                                           char name[kAdmMaxDeviceNameSize],
                                           char guid[kAdmMaxGuidSize]) {
   assert(index == 0);
   // Empty strings.
@@ -297,24 +328,28 @@ bool OpenSlesOutput::InitSampleRate() {
 void OpenSlesOutput::UpdatePlayoutDelay() {
   // TODO(hellner): Add accurate delay estimate.
   // On average half the current buffer will have been played out.
   int outstanding_samples = (TotalBuffersUsed() - 0.5) * buffer_size_samples_;
   playout_delay_ = outstanding_samples / (speaker_sampling_rate_ / 1000);
 }
 
 bool OpenSlesOutput::SetLowLatency() {
+#if !defined(WEBRTC_GONK)
   if (!audio_manager_.low_latency_supported()) {
     return false;
   }
   buffer_size_samples_ = audio_manager_.native_buffer_size();
   assert(buffer_size_samples_ > 0);
   speaker_sampling_rate_ = audio_manager_.native_output_sample_rate();
   assert(speaker_sampling_rate_ > 0);
   return true;
+#else
+  return false;
+#endif
 }
 
 void OpenSlesOutput::CalculateNumFifoBuffersNeeded() {
   int number_of_bytes_needed =
       (speaker_sampling_rate_ * kNumChannels * sizeof(int16_t)) * 10 / 1000;
 
   // Ceiling of integer division: 1 + ((x - 1) / y)
   int buffers_per_10_ms =
@@ -390,34 +425,34 @@ bool OpenSlesOutput::CreateAudioPlayer()
   locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
   locator_outputmix.outputMix = sles_output_mixer_;
   SLDataSink audio_sink = { &locator_outputmix, NULL };
 
   // Interfaces for streaming audio data, setting volume and Android are needed.
   // Note the interfaces still need to be initialized. This only tells OpenSl
   // that the interfaces will be needed at some point.
   SLInterfaceID ids[kNumInterfaces] = {
-    SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_ANDROIDCONFIGURATION };
+    SL_IID_BUFFERQUEUE_, SL_IID_VOLUME_, SL_IID_ANDROIDCONFIGURATION_ };
   SLboolean req[kNumInterfaces] = {
     SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
   OPENSL_RETURN_ON_FAILURE(
       (*sles_engine_itf_)->CreateAudioPlayer(sles_engine_itf_, &sles_player_,
                                              &audio_source, &audio_sink,
                                              kNumInterfaces, ids, req),
       false);
   // Realize the player in synchronous mode.
   OPENSL_RETURN_ON_FAILURE((*sles_player_)->Realize(sles_player_,
                                                     SL_BOOLEAN_FALSE),
                            false);
   OPENSL_RETURN_ON_FAILURE(
-      (*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY,
+      (*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY_,
                                     &sles_player_itf_),
       false);
   OPENSL_RETURN_ON_FAILURE(
-      (*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE,
+      (*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE_,
                                     &sles_player_sbq_itf_),
       false);
   return true;
 }
 
 void OpenSlesOutput::DestroyAudioPlayer() {
   SLAndroidSimpleBufferQueueItf sles_player_sbq_itf = sles_player_sbq_itf_;
   {
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.h
@@ -10,17 +10,19 @@
 
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
 
 #include <SLES/OpenSLES.h>
 #include <SLES/OpenSLES_Android.h>
 #include <SLES/OpenSLES_AndroidConfiguration.h>
 
+#if !defined(WEBRTC_GONK)
 #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
+#endif
 #include "webrtc/modules/audio_device/android/low_latency_event.h"
 #include "webrtc/modules/audio_device/android/opensles_common.h"
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 #include "webrtc/system_wrappers/interface/scoped_ptr.h"
 
 namespace webrtc {
 
@@ -176,18 +178,20 @@ class OpenSlesOutput : public webrtc_ope
   bool StartCbThreads();
   void StopCbThreads();
   static bool CbThread(void* context);
   // This function must be protected against data race with threads calling this
   // class' public functions. It is a requirement for this class to be
   // Thread-compatible.
   bool CbThreadImpl();
 
+#if !defined(WEBRTC_GONK)
   // Java API handle
   AudioManagerJni audio_manager_;
+#endif
 
   int id_;
   bool initialized_;
   bool speaker_initialized_;
   bool play_initialized_;
 
   // Members that are read/write accessed concurrently by the process thread and
   // threads calling public functions of this class.
@@ -224,13 +228,29 @@ class OpenSlesOutput : public webrtc_ope
 
   // Audio settings
   uint32_t speaker_sampling_rate_;
   int buffer_size_samples_;
   int buffer_size_bytes_;
 
   // Audio status
   uint16_t playout_delay_;
+
+  // dlopen for OpenSLES
+  void *opensles_lib_;
+  typedef SLresult (*slCreateEngine_t)(SLObjectItf *,
+                                       SLuint32,
+                                       const SLEngineOption *,
+                                       SLuint32,
+                                       const SLInterfaceID *,
+                                       const SLboolean *);
+  slCreateEngine_t f_slCreateEngine;
+  SLInterfaceID SL_IID_ENGINE_;
+  SLInterfaceID SL_IID_BUFFERQUEUE_;
+  SLInterfaceID SL_IID_ANDROIDCONFIGURATION_;
+  SLInterfaceID SL_IID_PLAY_;
+  SLInterfaceID SL_IID_ANDROIDSIMPLEBUFFERQUEUE_;
+  SLInterfaceID SL_IID_VOLUME_;
 };
 
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
@@ -41,21 +41,29 @@
         'audio_device_impl.h',
         'audio_device_config.h',
         'dummy/audio_device_dummy.cc',
         'dummy/audio_device_dummy.h',
         'dummy/audio_device_utility_dummy.cc',
         'dummy/audio_device_utility_dummy.h',
       ],
       'conditions': [
-        ['OS=="linux"', {
+        ['build_with_mozilla==1', {
+          'include_dirs': [
+            '$(DIST)/include',
+          ],
+          'cflags_mozilla': [
+            '$(NSPR_CFLAGS)',
+          ],
+        }],
+        ['OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1', {
           'include_dirs': [
             'linux',
           ],
-        }], # OS==linux
+        }], # OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1
         ['OS=="ios"', {
           'include_dirs': [
             'ios',
           ],
         }], # OS==ios
         ['OS=="mac"', {
           'include_dirs': [
             'mac',
@@ -63,34 +71,41 @@
         }], # OS==mac
         ['OS=="win"', {
           'include_dirs': [
             'win',
           ],
         }],
         ['OS=="android"', {
           'include_dirs': [
+            '$(topsrcdir)/widget/android',
             'android',
           ],
         }], # OS==android
+        ['moz_widget_toolkit_gonk==1', {
+          'include_dirs': [
+            '$(ANDROID_SOURCE)/frameworks/wilhelm/include',
+            '$(ANDROID_SOURCE)/system/media/wilhelm/include',
+            'android',
+          ],
+        }], # moz_widget_toolkit_gonk==1
+        ['enable_android_opensl==1', {
+          'include_dirs': [
+	    'opensl',
+          ],
+        }], # enable_android_opensl
         ['include_internal_audio_device==0', {
           'defines': [
             'WEBRTC_DUMMY_AUDIO_BUILD',
           ],
         }],
         ['include_internal_audio_device==1', {
           'sources': [
-            'linux/alsasymboltable_linux.cc',
-            'linux/alsasymboltable_linux.h',
-            'linux/audio_device_alsa_linux.cc',
-            'linux/audio_device_alsa_linux.h',
             'linux/audio_device_utility_linux.cc',
             'linux/audio_device_utility_linux.h',
-            'linux/audio_mixer_manager_alsa_linux.cc',
-            'linux/audio_mixer_manager_alsa_linux.h',
             'linux/latebindingsymboltable_linux.cc',
             'linux/latebindingsymboltable_linux.h',
             'ios/audio_device_ios.cc',
             'ios/audio_device_ios.h',
             'ios/audio_device_utility_ios.cc',
             'ios/audio_device_utility_ios.h',
             'mac/audio_device_mac.cc',
             'mac/audio_device_mac.h',
@@ -104,78 +119,100 @@
             'win/audio_device_core_win.cc',
             'win/audio_device_core_win.h',
             'win/audio_device_wave_win.cc',
             'win/audio_device_wave_win.h',
             'win/audio_device_utility_win.cc',
             'win/audio_device_utility_win.h',
             'win/audio_mixer_manager_win.cc',
             'win/audio_mixer_manager_win.h',
-            'android/audio_device_utility_android.cc',
-            'android/audio_device_utility_android.h',
           ],
           'conditions': [
             ['OS=="android"', {
+	      'sources': [
+                'opensl/audio_manager_jni.cc',
+                'opensl/audio_manager_jni.h',
+		'android/audio_device_jni_android.cc',
+		'android/audio_device_jni_android.h',
+               ],
+	    }],
+            ['OS=="android" or moz_widget_toolkit_gonk==1', {
               'link_settings': {
                 'libraries': [
                   '-llog',
                   '-lOpenSLES',
                 ],
               },
               'conditions': [
                 ['enable_android_opensl==1', {
                   'sources': [
-                    'android/audio_device_opensles_android.cc',
-                    'android/audio_device_opensles_android.h',
-                    'android/audio_manager_jni.cc',
-                    'android/audio_manager_jni.h',
-                    'android/fine_audio_buffer.cc',
-                    'android/fine_audio_buffer.h',
-                    'android/low_latency_event_posix.cc',
-                    'android/low_latency_event.h',
-                    'android/opensles_common.cc',
-                    'android/opensles_common.h',
-                    'android/opensles_input.cc',
-                    'android/opensles_input.h',
-                    'android/opensles_output.cc',
-                    'android/opensles_output.h',
-                    'android/single_rw_fifo.cc',
-                    'android/single_rw_fifo.h',
+                    'opensl/audio_device_opensles.cc',
+                    'opensl/audio_device_opensles.h',
+                    'opensl/fine_audio_buffer.cc',
+                    'opensl/fine_audio_buffer.h',
+                    'opensl/low_latency_event_posix.cc',
+                    'opensl/low_latency_event.h',
+                    'opensl/opensles_common.cc',
+                    'opensl/opensles_common.h',
+                    'opensl/opensles_input.cc',
+                    'opensl/opensles_input.h',
+                    'opensl/opensles_output.cc',
+                    'opensl/opensles_output.h',
+                    'opensl/single_rw_fifo.cc',
+                    'opensl/single_rw_fifo.h',
+		    'shared/audio_device_utility_shared.cc',
+		    'shared/audio_device_utility_shared.h',
                   ],
                 }, {
                   'sources': [
-                    'android/audio_device_jni_android.cc',
-                    'android/audio_device_jni_android.h',
+		    'shared/audio_device_utility_shared.cc',
+		    'shared/audio_device_utility_shared.h',
+		    'android/audio_device_jni_android.cc',
+		    'android/audio_device_jni_android.h',
                   ],
                 }],
               ],
             }],
             ['OS=="linux"', {
-              'defines': [
-                'LINUX_ALSA',
-              ],
               'link_settings': {
                 'libraries': [
                   '-ldl','-lX11',
                 ],
               },
-              'conditions': [
-                ['include_pulse_audio==1', {
-                  'defines': [
-                    'LINUX_PULSE',
-                  ],
-                  'sources': [
-                    'linux/audio_device_pulse_linux.cc',
-                    'linux/audio_device_pulse_linux.h',
-                    'linux/audio_mixer_manager_pulse_linux.cc',
-                    'linux/audio_mixer_manager_pulse_linux.h',
-                    'linux/pulseaudiosymboltable_linux.cc',
-                    'linux/pulseaudiosymboltable_linux.h',
-                  ],
-                }],
+            }],
+            ['include_alsa_audio==1', {
+              'cflags_mozilla': [
+                '$(MOZ_ALSA_CFLAGS)',
+              ],
+              'defines': [
+                'LINUX_ALSA',
+              ],
+              'sources': [
+                'linux/alsasymboltable_linux.cc',
+                'linux/alsasymboltable_linux.h',
+                'linux/audio_device_alsa_linux.cc',
+                'linux/audio_device_alsa_linux.h',
+                'linux/audio_mixer_manager_alsa_linux.cc',
+                'linux/audio_mixer_manager_alsa_linux.h',
+              ],
+            }],
+            ['include_pulse_audio==1', {
+              'cflags_mozilla': [
+                '$(MOZ_PULSEAUDIO_CFLAGS)',
+              ],
+              'defines': [
+                'LINUX_PULSE',
+              ],
+              'sources': [
+                'linux/audio_device_pulse_linux.cc',
+                'linux/audio_device_pulse_linux.h',
+                'linux/audio_mixer_manager_pulse_linux.cc',
+                'linux/audio_mixer_manager_pulse_linux.h',
+                'linux/pulseaudiosymboltable_linux.cc',
+                'linux/pulseaudiosymboltable_linux.h',
               ],
             }],
             ['OS=="mac" or OS=="ios"', {
               'link_settings': {
                 'libraries': [
                   '$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
                   '$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
                 ],
@@ -274,9 +311,8 @@
               ],
             },
           ],
         }],
       ],
     }], # include_tests
   ],
 }
-
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
@@ -11,36 +11,44 @@
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_device/audio_device_config.h"
 #include "webrtc/modules/audio_device/audio_device_impl.h"
 #include "webrtc/system_wrappers/interface/ref_count.h"
 
 #include <assert.h>
 #include <string.h>
 
-#if defined(_WIN32)
+#if defined(WEBRTC_DUMMY_AUDIO_BUILD)
+// do not include platform specific headers
+#elif defined(_WIN32)
     #include "audio_device_utility_win.h"
     #include "audio_device_wave_win.h"
  #if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
     #include "audio_device_core_win.h"
  #endif
 #elif defined(WEBRTC_ANDROID_OPENSLES)
+// ANDROID and GONK
     #include <stdlib.h>
+    #include <dlfcn.h>
     #include "audio_device_utility_android.h"
     #include "audio_device_opensles_android.h"
+#if !defined(WEBRTC_GONK)
+    #include "audio_device_jni_android.h"
+#endif
 #elif defined(WEBRTC_ANDROID)
+// GONK only supports opensles; android can use that or jni
     #include <stdlib.h>
     #include "audio_device_utility_android.h"
     #include "audio_device_jni_android.h"
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     #include "audio_device_utility_linux.h"
- #if defined(LINUX_ALSA)
+#if defined(LINUX_ALSA)
     #include "audio_device_alsa_linux.h"
- #endif
- #if defined(LINUX_PULSE)
+#endif
+#if defined(LINUX_PULSE)
     #include "audio_device_pulse_linux.h"
  #endif
 #elif defined(WEBRTC_IOS)
     #include "audio_device_utility_ios.h"
     #include "audio_device_ios.h"
 #elif defined(WEBRTC_MAC)
     #include "audio_device_utility_mac.h"
     #include "audio_device_mac.h"
@@ -154,17 +162,17 @@ int32_t AudioDeviceModuleImpl::CheckPlat
     PlatformType platform(kPlatformNotSupported);
 
 #if defined(_WIN32)
     platform = kPlatformWin32;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is WIN32");
 #elif defined(WEBRTC_ANDROID)
     platform = kPlatformAndroid;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is ANDROID");
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     platform = kPlatformLinux;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is LINUX");
 #elif defined(WEBRTC_IOS)
     platform = kPlatformIOS;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is IOS");
 #elif defined(WEBRTC_MAC)
     platform = kPlatformMac;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is MAC");
@@ -254,51 +262,51 @@ int32_t AudioDeviceModuleImpl::CreatePla
         //
         ptrAudioDeviceUtility = new AudioDeviceUtilityWindows(Id());
     }
 #endif  // #if defined(_WIN32)
 
     // Create the *Android OpenSLES* implementation of the Audio Device
     //
 #if defined(WEBRTC_ANDROID_OPENSLES)
-    if (audioLayer == kPlatformDefaultAudio)
-    {
-        // Create *Android OpenELSE Audio* implementation
-        ptrAudioDevice = new AudioDeviceAndroidOpenSLES(Id());
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "Android OpenSLES Audio APIs will be utilized");
+    // Check if the OpenSLES library is available before going further.
+    void* opensles_lib = dlopen("libOpenSLES.so", RTLD_LAZY);
+    if (opensles_lib) {
+        // That worked, close for now and proceed normally.
+        dlclose(opensles_lib);
+        if (audioLayer == kPlatformDefaultAudio)
+        {
+            // Create *Android OpenSLES Audio* implementation
+            ptrAudioDevice = new AudioDeviceAndroidOpenSLES(Id());
+            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+                         "Android OpenSLES Audio APIs will be utilized");
+        }
+    }
+
+#if !defined(WEBRTC_GONK)
+    // Fall back to this case if on Android 2.2/OpenSLES not available.
+    if (ptrAudioDevice == NULL) {
+        // Create the *Android Java* implementation of the Audio Device
+        if (audioLayer == kPlatformDefaultAudio)
+        {
+            // Create *Android JNI Audio* implementation
+            ptrAudioDevice = new AudioDeviceAndroidJni(Id());
+            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Android JNI Audio APIs will be utilized");
+        }
     }
 
     if (ptrAudioDevice != NULL)
     {
-        // Create the Android implementation of the Device Utility.
-        ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
+      // Create the Android implementation of the Device Utility.
+      ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
     }
-    // END #if defined(WEBRTC_ANDROID_OPENSLES)
-
-    // Create the *Android Java* implementation of the Audio Device
-    //
-#elif defined(WEBRTC_ANDROID)
-    if (audioLayer == kPlatformDefaultAudio)
-    {
-        // Create *Android JNI Audio* implementation
-        ptrAudioDevice = new AudioDeviceAndroidJni(Id());
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Android JNI Audio APIs will be utilized");
-    }
-
-    if (ptrAudioDevice != NULL)
-    {
-        // Create the Android implementation of the Device Utility.
-        ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
-    }
-    // END #if defined(WEBRTC_ANDROID)
-
+#endif
     // Create the *Linux* implementation of the Audio Device
     //
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     if ((audioLayer == kLinuxPulseAudio) || (audioLayer == kPlatformDefaultAudio))
     {
 #if defined(LINUX_PULSE)
         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "attempting to use the Linux PulseAudio APIs...");
 
         if (AudioDeviceLinuxPulse::PulseAudioIsSupported())
         {
             // create *Linux PulseAudio* implementation
@@ -334,17 +342,17 @@ int32_t AudioDeviceModuleImpl::CreatePla
     if (ptrAudioDevice != NULL)
     {
         // Create the Linux implementation of the Device Utility.
         // This class is independent of the selected audio layer
         // for Linux.
         //
         ptrAudioDeviceUtility = new AudioDeviceUtilityLinux(Id());
     }
-#endif  // #if defined(WEBRTC_LINUX)
+#endif  // #if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 
     // Create the *iPhone* implementation of the Audio Device
     //
 #if defined(WEBRTC_IOS)
     if (audioLayer == kPlatformDefaultAudio)
     {
         // Create *iPhone Audio* implementation
         ptrAudioDevice = new AudioDeviceIPhone(Id());
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
@@ -41,17 +41,17 @@ bool AudioDeviceUtility::StringCompare(
     const char* str1 , const char* str2,
     const uint32_t length)
 {
 	return ((_strnicmp(str1, str2, length) == 0) ? true : false);
 }
 
 }  // namespace webrtc
 
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 
 // ============================================================================
 //                                 Linux & Mac
 // ============================================================================
 
 #include <stdio.h>      // getchar
 #include <string.h>     // strncasecmp
 #include <sys/time.h>   // gettimeofday
@@ -104,9 +104,9 @@ uint32_t AudioDeviceUtility::GetTimeInMS
 bool AudioDeviceUtility::StringCompare(
     const char* str1 , const char* str2, const uint32_t length)
 {
     return (strncasecmp(str1, str2, length) == 0)?true: false;
 }
 
 }  // namespace webrtc
 
-#endif  // defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#endif  // defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
--- a/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.cc
@@ -1327,17 +1327,17 @@ int32_t AudioDeviceIPhone::InitPlayOrRec
                  playoutDesc.mSampleRate);
 
     playoutDesc.mSampleRate = sampleRate;
 
     // Store the sampling frequency to use towards the Audio Device Buffer
     // todo: Add 48 kHz (increase buffer sizes). Other fs?
     if ((playoutDesc.mSampleRate > 44090.0)
         && (playoutDesc.mSampleRate < 44110.0)) {
-        _adbSampFreq = 44000;
+        _adbSampFreq = 44100;
     } else if ((playoutDesc.mSampleRate > 15990.0)
                && (playoutDesc.mSampleRate < 16010.0)) {
         _adbSampFreq = 16000;
     } else if ((playoutDesc.mSampleRate > 7990.0)
                && (playoutDesc.mSampleRate < 8010.0)) {
         _adbSampFreq = 8000;
     } else {
         _adbSampFreq = 0;
--- a/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
@@ -14,18 +14,18 @@
 #include <AudioUnit/AudioUnit.h>
 
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 
 namespace webrtc {
 class ThreadWrapper;
 
-const uint32_t N_REC_SAMPLES_PER_SEC = 44000;
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 44000;
+const uint32_t N_REC_SAMPLES_PER_SEC = 44100;
+const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100;
 
 const uint32_t N_REC_CHANNELS = 1;  // default is mono recording
 const uint32_t N_PLAY_CHANNELS = 1;  // default is mono playout
 const uint32_t N_DEVICE_CHANNELS = 8;
 
 const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100);
 const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -14,16 +14,23 @@
 #include "webrtc/modules/audio_device/audio_device_utility.h"
 #include "webrtc/modules/audio_device/linux/audio_device_alsa_linux.h"
 
 #include "webrtc/system_wrappers/interface/event_wrapper.h"
 #include "webrtc/system_wrappers/interface/sleep.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+#include "Latency.h"
+
+#define LOG_FIRST_CAPTURE(x) LogTime(AsyncLatencyLogger::AudioCaptureBase, \
+                                     reinterpret_cast<uint64_t>(x), 0)
+#define LOG_CAPTURE_FRAMES(x, frames) LogLatency(AsyncLatencyLogger::AudioCapture, \
+                                                 reinterpret_cast<uint64_t>(x), frames)
+
 webrtc_adm_linux_alsa::AlsaSymbolTable AlsaSymbolTable;
 
 // Accesses ALSA functions through our late-binding symbol table instead of
 // directly. This way we don't have to link to libasound, which means our binary
 // will work on systems that don't have it.
 #define LATE(sym) \
   LATESYM_GET(webrtc_adm_linux_alsa::AlsaSymbolTable, &AlsaSymbolTable, sym)
 
@@ -91,16 +98,17 @@ AudioDeviceLinuxALSA::AudioDeviceLinuxAL
     _playChannels(ALSA_PLAYOUT_CH),
     _recordingBuffer(NULL),
     _playoutBuffer(NULL),
     _recordingFramesLeft(0),
     _playoutFramesLeft(0),
     _playBufType(AudioDeviceModule::kFixedBufferSize),
     _initialized(false),
     _recording(false),
+    _firstRecord(true),
     _playing(false),
     _recIsInitialized(false),
     _playIsInitialized(false),
     _AGC(false),
     _recordingDelay(0),
     _playoutDelay(0),
     _playWarning(0),
     _playError(0),
@@ -981,17 +989,18 @@ int32_t AudioDeviceLinuxALSA::RecordingD
 
     memset(name, 0, kAdmMaxDeviceNameSize);
 
     if (guid != NULL)
     {
         memset(guid, 0, kAdmMaxGuidSize);
     }
     
-    return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
+    return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize,
+                          guid, kAdmMaxGuidSize);
 }
 
 int16_t AudioDeviceLinuxALSA::RecordingDevices()
 {
 
     return (int16_t)GetDevicesInfo(0, false);
 }
 
@@ -1443,16 +1452,17 @@ int32_t AudioDeviceLinuxALSA::StartRecor
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "   failed to alloc recording buffer");
         _recording = false;
         return -1;
     }
     // RECORDING
     const char* threadName = "webrtc_audio_module_capture_thread";
+    _firstRecord = true;
     _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc,
                                                 this,
                                                 kRealtimePriority,
                                                 threadName);
     if (_ptrThreadRec == NULL)
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "  failed to create the rec audio thread");
@@ -1629,40 +1639,41 @@ int32_t AudioDeviceLinuxALSA::StartPlayo
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "    failed to create the play audio thread");
         _playing = false;
         delete [] _playoutBuffer;
         _playoutBuffer = NULL;
         return -1;
     }
 
+    int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
+    if (errVal < 0)
+    {
+        WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
+                     "     playout snd_pcm_prepare failed (%s)\n",
+                     LATE(snd_strerror)(errVal));
+        // just log error
+        // if snd_pcm_open fails will return -1
+    }
+
+
     unsigned int threadID(0);
     if (!_ptrThreadPlay->Start(threadID))
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "  failed to start the play audio thread");
         _playing = false;
         delete _ptrThreadPlay;
         _ptrThreadPlay = NULL;
         delete [] _playoutBuffer;
         _playoutBuffer = NULL;
         return -1;
     }
     _playThreadID = threadID;
 
-    int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
-    if (errVal < 0)
-    {
-        WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
-                     "     playout snd_pcm_prepare failed (%s)\n",
-                     LATE(snd_strerror)(errVal));
-        // just log error
-        // if snd_pcm_open fails will return -1
-    }
-
     return 0;
 }
 
 int32_t AudioDeviceLinuxALSA::StopPlayout()
 {
 
     {
         CriticalSectionScoped lock(&_critSect);
@@ -1824,17 +1835,19 @@ void AudioDeviceLinuxALSA::ClearRecordin
 //                                 Private Methods
 // ============================================================================
 
 int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
     const int32_t function,
     const bool playback,
     const int32_t enumDeviceNo,
     char* enumDeviceName,
-    const int32_t ednLen) const
+    const int32_t ednLen,
+    char* enumDeviceId,
+    const int32_t ediLen) const
 {
     
     // Device enumeration based on libjingle implementation
     // by Tristan Schmelcher at Google Inc.
 
     const char *type = playback ? "Output" : "Input";
     // dmix and dsnoop are only for playback and capture, respectively, but ALSA
     // stupidly includes them in both lists.
@@ -1863,16 +1876,18 @@ int32_t AudioDeviceLinuxALSA::GetDevices
             return -1;
         }
 
         enumCount++; // default is 0
         if ((function == FUNC_GET_DEVICE_NAME ||
             function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) && enumDeviceNo == 0)
         {
             strcpy(enumDeviceName, "default");
+            if (enumDeviceId)
+                memset(enumDeviceId, 0, ediLen);
 
             err = LATE(snd_device_name_free_hint)(hints);
             if (err != 0)
             {
                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                              "GetDevicesInfo - device name free hint error: %s",
                              LATE(snd_strerror)(err));
             }
@@ -1925,28 +1940,38 @@ int32_t AudioDeviceLinuxALSA::GetDevices
 
                 }
                 if ((FUNC_GET_DEVICE_NAME == function) &&
                     (enumDeviceNo == enumCount))
                 {
                     // We have found the enum device, copy the name to buffer.
                     strncpy(enumDeviceName, desc, ednLen);
                     enumDeviceName[ednLen-1] = '\0';
+                    if (enumDeviceId)
+                    {
+                        strncpy(enumDeviceId, name, ediLen);
+                        enumDeviceId[ediLen-1] = '\0';
+                    }
                     keepSearching = false;
                     // Replace '\n' with '-'.
                     char * pret = strchr(enumDeviceName, '\n'/*0xa*/); //LF
                     if (pret)
                         *pret = '-';
                 }
                 if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
                     (enumDeviceNo == enumCount))
                 {
                     // We have found the enum device, copy the name to buffer.
                     strncpy(enumDeviceName, name, ednLen);
                     enumDeviceName[ednLen-1] = '\0';
+                    if (enumDeviceId)
+                    {
+                        strncpy(enumDeviceId, name, ediLen);
+                        enumDeviceId[ediLen-1] = '\0';
+                    }
                     keepSearching = false;
                 }
 
                 if (keepSearching)
                     ++enumCount;
 
                 if (desc != name)
                     free(desc);
@@ -1961,17 +1986,17 @@ int32_t AudioDeviceLinuxALSA::GetDevices
         err = LATE(snd_device_name_free_hint)(hints);
         if (err != 0)
         {
             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                          "GetDevicesInfo - device name free hint error: %s",
                          LATE(snd_strerror)(err));
             // Continue and return true anyway, since we did get the whole list.
         }
-    }
+      }
 
     if (FUNC_GET_NUM_OF_DEVICE == function)
     {
         if (enumCount == 1) // only default?
             enumCount = 0;
         return enumCount; // Normal return point for function 0
     }
 
@@ -2246,16 +2271,21 @@ bool AudioDeviceLinuxALSA::RecThreadProc
         memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size],
                buffer, size);
         _recordingFramesLeft -= frames;
 
         if (!_recordingFramesLeft)
         { // buf is full
             _recordingFramesLeft = _recordingFramesIn10MS;
 
+            if (_firstRecord) {
+              LOG_FIRST_CAPTURE(this);
+              _firstRecord = false;
+            }
+            LOG_CAPTURE_FRAMES(this, _recordingFramesIn10MS);
             // store the recorded buffer (no action will be taken if the
             // #recorded samples is not a full buffer)
             _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
                                                _recordingFramesIn10MS);
 
             uint32_t currentMicLevel = 0;
             uint32_t newMicLevel = 0;
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
@@ -162,17 +162,19 @@ public:
 public:
     virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) OVERRIDE;
 
 private:
     int32_t GetDevicesInfo(const int32_t function,
                            const bool playback,
                            const int32_t enumDeviceNo = 0,
                            char* enumDeviceName = NULL,
-                           const int32_t ednLen = 0) const;
+                           const int32_t ednLen = 0,
+                           char* enumDeviceID = NULL,
+                           const int32_t ediLen = 0) const;
     int32_t ErrorRecovery(int32_t error, snd_pcm_t* deviceHandle);
 
 private:
     bool KeyPressed() const;
 
 private:
     void Lock() { _critSect.Enter(); };
     void UnLock() { _critSect.Leave(); };
@@ -228,16 +230,17 @@ private:
     uint32_t _recordingFramesLeft;
     uint32_t _playoutFramesLeft;
 
     AudioDeviceModule::BufferType _playBufType;
 
 private:
     bool _initialized;
     bool _recording;
+    bool _firstRecord;
     bool _playing;
     bool _recIsInitialized;
     bool _playIsInitialized;
     bool _AGC;
 
     snd_pcm_sframes_t _recordingDelay;
     snd_pcm_sframes_t _playoutDelay;
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
@@ -22,68 +22,68 @@
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h"
 
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 #include <dlfcn.h>
 #endif
 
 // TODO(grunell): Either put inside webrtc namespace or use webrtc:: instead.
 using namespace webrtc;
 
 namespace webrtc_adm_linux {
 
 inline static const char *GetDllError() {
-#ifdef WEBRTC_LINUX
-  char *err = dlerror();
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
+  const char *err = dlerror();
   if (err) {
     return err;
   } else {
     return "No error";
   }
 #else
 #error Not implemented
 #endif
 }
 
 DllHandle InternalLoadDll(const char dll_name[]) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   DllHandle handle = dlopen(dll_name, RTLD_NOW);
 #else
 #error Not implemented
 #endif
   if (handle == kInvalidDllHandle) {
     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
                "Can't load %s : %s", dll_name, GetDllError());
   }
   return handle;
 }
 
 void InternalUnloadDll(DllHandle handle) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   if (dlclose(handle) != 0) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "%s", GetDllError());
   }
 #else
 #error Not implemented
 #endif
 }
 
 static bool LoadSymbol(DllHandle handle,
                        const char *symbol_name,
                        void **symbol) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   *symbol = dlsym(handle, symbol_name);
-  char *err = dlerror();
+  const char *err = dlerror();
   if (err) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "Error loading symbol %s : %d", symbol_name, err);
     return false;
   } else if (!*symbol) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "Symbol %s is NULL", symbol_name);
     return false;
@@ -96,17 +96,17 @@ static bool LoadSymbol(DllHandle handle,
 
 // This routine MUST assign SOME value for every symbol, even if that value is
 // NULL, or else some symbols may be left with uninitialized data that the
 // caller may later interpret as a valid address.
 bool InternalLoadSymbols(DllHandle handle,
                          int num_symbols,
                          const char *const symbol_names[],
                          void *symbols[]) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   // Clear any old errors.
   dlerror();
 #endif
   for (int i = 0; i < num_symbols; ++i) {
     if (!LoadSymbol(handle, symbol_names[i], &symbols[i])) {
       return false;
     }
   }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
@@ -37,17 +37,17 @@
 
 // This file provides macros for creating "symbol table" classes to simplify the
 // dynamic loading of symbols from DLLs. Currently the implementation only
 // supports Linux and pure C symbols.
 // See talk/sound/pulseaudiosymboltable.(h|cc) for an example.
 
 namespace webrtc_adm_linux {
 
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 typedef void *DllHandle;
 
 const DllHandle kInvalidDllHandle = NULL;
 #else
 #error Not implemented
 #endif
 
 // These are helpers for use only by the class below.
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
@@ -24,16 +24,20 @@
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h"
 
 namespace webrtc_adm_linux_pulse {
 
+#if defined(__OpenBSD__) || defined(WEBRTC_GONK)
+LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so")
+#else
 LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so.0")
+#endif
 #define X(sym) \
     LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(PulseAudioSymbolTable, sym)
 PULSE_AUDIO_SYMBOLS_LIST
 #undef X
 LATE_BINDING_SYMBOL_TABLE_DEFINE_END(PulseAudioSymbolTable)
 
 }  // namespace webrtc_adm_linux_pulse
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_opensles_android.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_opensles_android.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_manager_jni.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.h
@@ -0,0 +1,6 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_manager_jni.h"
+
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/fine_audio_buffer.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/fine_audio_buffer.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/low_latency_event_posix.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/low_latency_event_posix.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_common.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_common.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_input.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_input.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_output.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_output.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/single_rw_fifo.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/single_rw_fifo.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_jni_android.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_jni_android.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_jni_android.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_jni_android.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_utility_android.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_utility_android.h"
--- a/media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
@@ -205,17 +205,17 @@ class AudioDeviceAPITest: public testing
                 kId, AudioDeviceModule::kWindowsCoreAudio)) == NULL);
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kLinuxAlsaAudio)) == NULL);
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kLinuxPulseAudio)) == NULL);
     // Create default implementation instance
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kPlatformDefaultAudio)) != NULL);
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kWindowsWaveAudio)) == NULL);
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kWindowsCoreAudio)) == NULL);
     // create default implementation instance
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kPlatformDefaultAudio)) != NULL);
     audio_device_->AddRef();
@@ -1698,17 +1698,17 @@ TEST_F(AudioDeviceAPITest, CPULoad) {
   EXPECT_EQ(0, load);
 #else
   EXPECT_EQ(-1, audio_device_->CPULoad(&load));
 #endif
 }
 
 // TODO(kjellander): Fix flakiness causing failures on Windows.
 // TODO(phoglund):  Fix flakiness causing failures on Linux.
-#if !defined(_WIN32) && !defined(WEBRTC_LINUX)
+#if !defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_BSD)
 TEST_F(AudioDeviceAPITest, StartAndStopRawOutputFileRecording) {
   // NOTE: this API is better tested in a functional test
   CheckInitialPlayoutStates();
 
   // fail tests
   EXPECT_EQ(-1, audio_device_->StartRawOutputFileRecording(NULL));
 
   // bulk tests
@@ -1767,50 +1767,50 @@ TEST_F(AudioDeviceAPITest, StartAndStopR
       GetFilename("raw_input_not_recording.pcm")));
   EXPECT_EQ(0, audio_device_->StopRawInputFileRecording());
 
   // results after this test:
   //
   // - size of raw_input_not_recording.pcm shall be 0
   // - size of raw_input_not_recording.pcm shall be > 0
 }
-#endif  // !WIN32 && !WEBRTC_LINUX
+#endif  // !WIN32 && !WEBRTC_LINUX && !defined(WEBRTC_BSD)
 
 TEST_F(AudioDeviceAPITest, RecordingSampleRate) {
   uint32_t sampleRate(0);
 
   // bulk tests
   EXPECT_EQ(0, audio_device_->RecordingSampleRate(&sampleRate));
 #if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
   EXPECT_EQ(48000, sampleRate);
 #elif defined(ANDROID)
   TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
 #elif defined(WEBRTC_IOS)
   TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
               (sampleRate == 8000));
 #endif
 
   // @TODO(xians) - add tests for all platforms here...
 }
 
 TEST_F(AudioDeviceAPITest, PlayoutSampleRate) {
   uint32_t sampleRate(0);
 
   // bulk tests
   EXPECT_EQ(0, audio_device_->PlayoutSampleRate(&sampleRate));
 #if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
   EXPECT_EQ(48000, sampleRate);
 #elif defined(ANDROID)
   TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
 #elif defined(WEBRTC_IOS)
   TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
               (sampleRate == 8000));
 #endif
 }
 
 TEST_F(AudioDeviceAPITest, ResetAudioDevice) {
   CheckInitialPlayoutStates();
   CheckInitialRecordingStates();
   EXPECT_EQ(0, audio_device_->SetPlayoutDevice(MACRO_DEFAULT_DEVICE));
--- a/media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
@@ -343,22 +343,16 @@ int32_t AudioTransportImpl::NeedMorePlay
                 const uint8_t nChannelsIn = packet->nChannels;
                 const uint32_t samplesPerSecIn = packet->samplesPerSec;
                 const uint16_t nBytesPerSampleIn =
                     packet->nBytesPerSample;
 
                 int32_t fsInHz(samplesPerSecIn);
                 int32_t fsOutHz(samplesPerSec);
 
-                if (fsInHz == 44100)
-                    fsInHz = 44000;
-
-                if (fsOutHz == 44100)
-                    fsOutHz = 44000;
-
                 if (nChannelsIn == 2 && nBytesPerSampleIn == 4)
                 {
                     // input is stereo => we will resample in stereo
                     ret = _resampler.ResetIfNeeded(fsInHz, fsOutHz,
                                                    kResamplerSynchronousStereo);
                     if (ret == 0)
                     {
                         if (nChannels == 2)
@@ -1253,17 +1247,17 @@ int32_t FuncTestManager::TestAudioTransp
 
         EXPECT_EQ(0, audioDevice->RegisterAudioCallback(_audioTransport));
 
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (samplesPerSec == 48000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile48.c_str()));
-        } else if (samplesPerSec == 44100 || samplesPerSec == 44000) {
+        } else if (samplesPerSec == 44100) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile44.c_str()));
         } else if (samplesPerSec == 16000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile16.c_str()));
         } else if (samplesPerSec == 8000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile8.c_str()));
@@ -1486,17 +1480,17 @@ int32_t FuncTestManager::TestSpeakerVolu
     EXPECT_EQ(0, audioDevice->PlayoutIsAvailable(&available));
     if (available)
     {
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (48000 == samplesPerSec) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile48.c_str()));
-        } else if (44100 == samplesPerSec || samplesPerSec == 44000) {
+        } else if (44100 == samplesPerSec) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile44.c_str()));
         } else if (samplesPerSec == 16000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile16.c_str()));
         } else if (samplesPerSec == 8000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile8.c_str()));
@@ -1587,17 +1581,17 @@ int32_t FuncTestManager::TestSpeakerMute
     EXPECT_EQ(0, audioDevice->RegisterAudioCallback(_audioTransport));
     EXPECT_EQ(0, audioDevice->PlayoutIsAvailable(&available));
     if (available)
     {
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (48000 == samplesPerSec)
             _audioTransport->SetFilePlayout(true, _playoutFile48.c_str());
-        else if (44100 == samplesPerSec || 44000 == samplesPerSec)
+        else if (44100 == samplesPerSec)
             _audioTransport->SetFilePlayout(true, _playoutFile44.c_str());
         else
         {
             TEST_LOG("\nERROR: Sample rate (%d) is not supported!\n \n",
                      samplesPerSec);
             return -1;
         }
         EXPECT_EQ(0, audioDevice->StartPlayout());
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
@@ -104,17 +104,27 @@ const float WebRtcAec_overDriveCurve[65]
     1.8660f, 1.8750f, 1.8839f, 1.8927f, 1.9014f, 1.9100f,
     1.9186f, 1.9270f, 1.9354f, 1.9437f, 1.9520f, 1.9601f,
     1.9682f, 1.9763f, 1.9843f, 1.9922f, 2.0000f
 };
 
 // Target suppression levels for nlp modes.
 // log{0.001, 0.00001, 0.00000001}
 static const float kTargetSupp[3] = { -6.9f, -11.5f, -18.4f };
-static const float kMinOverDrive[3] = { 1.0f, 2.0f, 5.0f };
+
+// Two sets of parameters, one for the extended filter mode.
+static const float kExtendedMinOverDrive[3] = { 3.0f, 6.0f, 15.0f };
+static const float kNormalMinOverDrive[3] = { 1.0f, 2.0f, 5.0f };
+static const float kExtendedSmoothingCoefficients[2][2] =
+    { { 0.9f, 0.1f }, { 0.92f, 0.08f } };
+static const float kNormalSmoothingCoefficients[2][2] =
+    { { 0.9f, 0.1f }, { 0.93f, 0.07f } };
+
+// Number of partitions forming the NLP's "preferred" bands.
+enum { kPrefBandSize = 24 };
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
 extern int webrtc_aec_instance_count;
 #endif
 
 // "Private" function prototypes.
 static void ProcessBlock(AecCore* aec);
 
@@ -276,89 +286,92 @@ int WebRtcAec_FreeAec(AecCore* aec)
 
     free(aec);
     return 0;
 }
 
 static void FilterFar(AecCore* aec, float yf[2][PART_LEN1])
 {
   int i;
-  for (i = 0; i < NR_PART; i++) {
+  for (i = 0; i < aec->num_partitions; i++) {
     int j;
     int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
     int pos = i * PART_LEN1;
     // Check for wrap
-    if (i + aec->xfBufBlockPos >= NR_PART) {
-      xPos -= NR_PART*(PART_LEN1);
+    if (i + aec->xfBufBlockPos >= aec->num_partitions) {
+      xPos -= aec->num_partitions*(PART_LEN1);
     }
 
     for (j = 0; j < PART_LEN1; j++) {
       yf[0][j] += MulRe(aec->xfBuf[0][xPos + j], aec->xfBuf[1][xPos + j],
                         aec->wfBuf[0][ pos + j], aec->wfBuf[1][ pos + j]);
       yf[1][j] += MulIm(aec->xfBuf[0][xPos + j], aec->xfBuf[1][xPos + j],
                         aec->wfBuf[0][ pos + j], aec->wfBuf[1][ pos + j]);
     }
   }
 }
 
 static void ScaleErrorSignal(AecCore* aec, float ef[2][PART_LEN1])
 {
+  const float mu = aec->extended_filter_enabled ? kExtendedMu : aec->normal_mu;
+  const float error_threshold = aec->extended_filter_enabled ?
+      kExtendedErrorThreshold : aec->normal_error_threshold;
   int i;
-  float absEf;
+  float abs_ef;
   for (i = 0; i < (PART_LEN1); i++) {
     ef[0][i] /= (aec->xPow[i] + 1e-10f);
     ef[1][i] /= (aec->xPow[i] + 1e-10f);
-    absEf = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
+    abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
 
-    if (absEf > aec->errThresh) {
-      absEf = aec->errThresh / (absEf + 1e-10f);
-      ef[0][i] *= absEf;
-      ef[1][i] *= absEf;
+    if (abs_ef > error_threshold) {
+      abs_ef = error_threshold / (abs_ef + 1e-10f);
+      ef[0][i] *= abs_ef;
+      ef[1][i] *= abs_ef;
     }
 
     // Stepsize factor
-    ef[0][i] *= aec->mu;
-    ef[1][i] *= aec->mu;
+    ef[0][i] *= mu;
+    ef[1][i] *= mu;
   }
 }
 
 // Time-unconstrined filter adaptation.
 // TODO(andrew): consider for a low-complexity mode.
 //static void FilterAdaptationUnconstrained(AecCore* aec, float *fft,
 //                                          float ef[2][PART_LEN1]) {
 //  int i, j;
-//  for (i = 0; i < NR_PART; i++) {
+//  for (i = 0; i < aec->num_partitions; i++) {
 //    int xPos = (i + aec->xfBufBlockPos)*(PART_LEN1);
 //    int pos;
 //    // Check for wrap
-//    if (i + aec->xfBufBlockPos >= NR_PART) {
-//      xPos -= NR_PART * PART_LEN1;
+//    if (i + aec->xfBufBlockPos >= aec->num_partitions) {
+//      xPos -= aec->num_partitions * PART_LEN1;
 //    }
 //
 //    pos = i * PART_LEN1;
 //
 //    for (j = 0; j < PART_LEN1; j++) {
-//      aec->wfBuf[pos + j][0] += MulRe(aec->xfBuf[xPos + j][0],
-//                                      -aec->xfBuf[xPos + j][1],
-//                                      ef[j][0], ef[j][1]);
-//      aec->wfBuf[pos + j][1] += MulIm(aec->xfBuf[xPos + j][0],
-//                                      -aec->xfBuf[xPos + j][1],
-//                                      ef[j][0], ef[j][1]);
+//      aec->wfBuf[0][pos + j] += MulRe(aec->xfBuf[0][xPos + j],
+//                                      -aec->xfBuf[1][xPos + j],
+//                                      ef[0][j], ef[1][j]);
+//      aec->wfBuf[1][pos + j] += MulIm(aec->xfBuf[0][xPos + j],
+//                                      -aec->xfBuf[1][xPos + j],
+//                                      ef[0][j], ef[1][j]);
 //    }
 //  }
 //}
 
 static void FilterAdaptation(AecCore* aec, float *fft, float ef[2][PART_LEN1]) {
   int i, j;
-  for (i = 0; i < NR_PART; i++) {
+  for (i = 0; i < aec->num_partitions; i++) {
     int xPos = (i + aec->xfBufBlockPos)*(PART_LEN1);
     int pos;
     // Check for wrap
-    if (i + aec->xfBufBlockPos >= NR_PART) {
-      xPos -= NR_PART * PART_LEN1;
+    if (i + aec->xfBufBlockPos >= aec->num_partitions) {
+      xPos -= aec->num_partitions * PART_LEN1;
     }
 
     pos = i * PART_LEN1;
 
     for (j = 0; j < PART_LEN; j++) {
 
       fft[2 * j] = MulRe(aec->xfBuf[0][xPos + j],
                          -aec->xfBuf[1][xPos + j],
@@ -422,22 +435,22 @@ WebRtcAec_OverdriveAndSuppress_t WebRtcA
 
 int WebRtcAec_InitAec(AecCore* aec, int sampFreq)
 {
     int i;
 
     aec->sampFreq = sampFreq;
 
     if (sampFreq == 8000) {
-        aec->mu = 0.6f;
-        aec->errThresh = 2e-6f;
+        aec->normal_mu = 0.6f;
+        aec->normal_error_threshold = 2e-6f;
     }
     else {
-        aec->mu = 0.5f;
-        aec->errThresh = 1.5e-6f;
+        aec->normal_mu = 0.5f;
+        aec->normal_error_threshold = 1.5e-6f;
     }
 
     if (WebRtc_InitBuffer(aec->nearFrBuf) == -1) {
         return -1;
     }
 
     if (WebRtc_InitBuffer(aec->outFrBuf) == -1) {
         return -1;
@@ -469,26 +482,29 @@ int WebRtcAec_InitAec(AecCore* aec, int 
       return -1;
     }
     if (WebRtc_InitDelayEstimator(aec->delay_estimator) != 0) {
       return -1;
     }
     aec->delay_logging_enabled = 0;
     memset(aec->delay_histogram, 0, sizeof(aec->delay_histogram));
 
+    aec->extended_filter_enabled = 0;
+    aec->num_partitions = kNormalNumPartitions;
+
     // Default target suppression mode.
     aec->nlp_mode = 1;
 
     // Sampling frequency multiplier
     // SWB is processed as 160 frame size
     if (aec->sampFreq == 32000) {
       aec->mult = (short)aec->sampFreq / 16000;
     }
     else {
-        aec->mult = (short)aec->sampFreq / 8000;
+      aec->mult = (short)aec->sampFreq / 8000;
     }
 
     aec->farBufWritePos = 0;
     aec->farBufReadPos = 0;
 
     aec->inSamples = 0;
     aec->outSamples = 0;
     aec->knownDelay = 0;
@@ -509,21 +525,24 @@ int WebRtcAec_InitAec(AecCore* aec, int 
     for (i = 0; i < PART_LEN1; i++) {
         aec->dMinPow[i] = 1.0e6f;
     }
 
     // Holds the last block written to
     aec->xfBufBlockPos = 0;
     // TODO: Investigate need for these initializations. Deleting them doesn't
     //       change the output at all and yields 0.4% overall speedup.
-    memset(aec->xfBuf, 0, sizeof(complex_t) * NR_PART * PART_LEN1);
-    memset(aec->wfBuf, 0, sizeof(complex_t) * NR_PART * PART_LEN1);
+    memset(aec->xfBuf, 0, sizeof(complex_t) * kExtendedNumPartitions *
+        PART_LEN1);
+    memset(aec->wfBuf, 0, sizeof(complex_t) * kExtendedNumPartitions *
+        PART_LEN1);
     memset(aec->sde, 0, sizeof(complex_t) * PART_LEN1);
     memset(aec->sxd, 0, sizeof(complex_t) * PART_LEN1);
-    memset(aec->xfwBuf, 0, sizeof(complex_t) * NR_PART * PART_LEN1);
+    memset(aec->xfwBuf, 0, sizeof(complex_t) * kExtendedNumPartitions *
+        PART_LEN1);
     memset(aec->se, 0, sizeof(float) * PART_LEN1);
 
     // To prevent numerical instability in the first block.
     for (i = 0; i < PART_LEN1; i++) {
         aec->sd[i] = 1;
     }
     for (i = 0; i < PART_LEN1; i++) {
         aec->sx[i] = 1;
@@ -729,60 +748,63 @@ int WebRtcAec_GetDelayMetricsCore(AecCor
 
   // Reset histogram.
   memset(self->delay_histogram, 0, sizeof(self->delay_histogram));
 
   return 0;
 }
 
 int WebRtcAec_echo_state(AecCore* self) {
-  assert(self != NULL);
   return self->echoState;
 }
 
 void WebRtcAec_GetEchoStats(AecCore* self, Stats* erl, Stats* erle,
                             Stats* a_nlp) {
-  assert(self != NULL);
   assert(erl != NULL);
   assert(erle != NULL);
   assert(a_nlp != NULL);
   *erl = self->erl;
   *erle = self->erle;
   *a_nlp = self->aNlp;
 }
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
 void* WebRtcAec_far_time_buf(AecCore* self) {
-  assert(self != NULL);
   return self->far_time_buf;
 }
 #endif
 
 void WebRtcAec_SetConfigCore(AecCore* self, int nlp_mode, int metrics_mode,
                              int delay_logging) {
-  assert(self != NULL);
   assert(nlp_mode >= 0 && nlp_mode < 3);
   self->nlp_mode = nlp_mode;
   self->metricsMode = metrics_mode;
   if (self->metricsMode) {
     InitMetrics(self);
   }
   self->delay_logging_enabled = delay_logging;
   if (self->delay_logging_enabled) {
     memset(self->delay_histogram, 0, sizeof(self->delay_histogram));
   }
 }
 
+void WebRtcAec_enable_delay_correction(AecCore* self, int enable) {
+  self->extended_filter_enabled = enable;
+  self->num_partitions = enable ? kExtendedNumPartitions : kNormalNumPartitions;
+}
+
+int WebRtcAec_delay_correction_enabled(AecCore* self) {
+  return self->extended_filter_enabled;
+}
+
 int WebRtcAec_system_delay(AecCore* self) {
-  assert(self != NULL);
   return self->system_delay;
 }
 
 void WebRtcAec_SetSystemDelay(AecCore* self, int delay) {
-  assert(self != NULL);
   assert(delay >= 0);
   self->system_delay = delay;
 }
 
 static void ProcessBlock(AecCore* aec) {
     int i;
     float d[PART_LEN], y[PART_LEN], e[PART_LEN], dH[PART_LEN];
     float scale;
@@ -848,17 +870,18 @@ static void ProcessBlock(AecCore* aec) {
     // Near fft
     memcpy(fft, aec->dBuf, sizeof(float) * PART_LEN2);
     TimeToFrequency(fft, df, 0);
 
     // Power smoothing
     for (i = 0; i < PART_LEN1; i++) {
       far_spectrum = (xf_ptr[i] * xf_ptr[i]) +
           (xf_ptr[PART_LEN1 + i] * xf_ptr[PART_LEN1 + i]);
-      aec->xPow[i] = gPow[0] * aec->xPow[i] + gPow[1] * NR_PART * far_spectrum;
+      aec->xPow[i] = gPow[0] * aec->xPow[i] + gPow[1] * aec->num_partitions *
+          far_spectrum;
       // Calculate absolute spectra
       abs_far_spectrum[i] = sqrtf(far_spectrum);
 
       near_spectrum = df[0][i] * df[0][i] + df[1][i] * df[1][i];
       aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * near_spectrum;
       // Calculate absolute spectra
       abs_near_spectrum[i] = sqrtf(near_spectrum);
     }
@@ -908,17 +931,17 @@ static void ProcessBlock(AecCore* aec) {
           aec->delay_histogram[delay_estimate]++;
         }
       }
     }
 
     // Update the xfBuf block position.
     aec->xfBufBlockPos--;
     if (aec->xfBufBlockPos == -1) {
-        aec->xfBufBlockPos = NR_PART - 1;
+        aec->xfBufBlockPos = aec->num_partitions - 1;
     }
 
     // Buffer xf
     memcpy(aec->xfBuf[0] + aec->xfBufBlockPos * PART_LEN1, xf_ptr,
            sizeof(float) * PART_LEN1);
     memcpy(aec->xfBuf[1] + aec->xfBufBlockPos * PART_LEN1, &xf_ptr[PART_LEN1],
            sizeof(float) * PART_LEN1);
 
@@ -1009,28 +1032,31 @@ static void NonLinearProcessing(AecCore*
     float scale, dtmp;
     float nlpGainHband;
     int i, j, pos;
 
     // Coherence and non-linear filter
     float cohde[PART_LEN1], cohxd[PART_LEN1];
     float hNlDeAvg, hNlXdAvg;
     float hNl[PART_LEN1];
-    float hNlPref[PREF_BAND_SIZE];
+    float hNlPref[kPrefBandSize];
     float hNlFb = 0, hNlFbLow = 0;
     const float prefBandQuant = 0.75f, prefBandQuantLow = 0.5f;
-    const int prefBandSize = PREF_BAND_SIZE / aec->mult;
+    const int prefBandSize = kPrefBandSize / aec->mult;
     const int minPrefBand = 4 / aec->mult;
 
     // Near and error power sums
     float sdSum = 0, seSum = 0;
 
-    // Power estimate smoothing coefficients
-    const float gCoh[2][2] = {{0.9f, 0.1f}, {0.93f, 0.07f}};
-    const float *ptrGCoh = gCoh[aec->mult - 1];
+    // Power estimate smoothing coefficients.
+    const float *ptrGCoh = aec->extended_filter_enabled ?
+        kExtendedSmoothingCoefficients[aec->mult - 1] :
+        kNormalSmoothingCoefficients[aec->mult - 1];
+    const float* min_overdrive = aec->extended_filter_enabled ?
+        kExtendedMinOverDrive : kNormalMinOverDrive;
 
     // Filter energy
     float wfEnMax = 0, wfEn = 0;
     const int delayEstInterval = 10 * aec->mult;
 
     float* xfw_ptr = NULL;
 
     aec->delayEstCtr++;
@@ -1043,17 +1069,17 @@ static void NonLinearProcessing(AecCore*
     nlpGainHband = (float)0.0;
     dtmp = (float)0.0;
 
     // Measure energy in each filter partition to determine delay.
     // TODO: Spread by computing one partition per block?
     if (aec->delayEstCtr == 0) {
         wfEnMax = 0;
         aec->delayIdx = 0;
-        for (i = 0; i < NR_PART; i++) {
+        for (i = 0; i < aec->num_partitions; i++) {
             pos = i * PART_LEN1;
             wfEn = 0;
             for (j = 0; j < PART_LEN1; j++) {
                 wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] +
                     aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j];
             }
 
             if (wfEn > wfEnMax) {
@@ -1184,17 +1210,17 @@ static void NonLinearProcessing(AecCore*
         aec->stNearState = 1;
     }
     else if (hNlDeAvg < 0.95f || hNlXdAvg < 0.8f) {
         aec->stNearState = 0;
     }
 
     if (aec->hNlXdAvgMin == 1) {
         aec->echoState = 0;
-        aec->overDrive = kMinOverDrive[aec->nlp_mode];
+        aec->overDrive = min_overdrive[aec->nlp_mode];
 
         if (aec->stNearState == 1) {
             memcpy(hNl, cohde, sizeof(hNl));
             hNlFb = hNlDeAvg;
             hNlFbLow = hNlDeAvg;
         }
         else {
             for (i = 0; i < PART_LEN1; i++) {
@@ -1240,17 +1266,17 @@ static void NonLinearProcessing(AecCore*
     if (aec->hNlNewMin == 1) {
         aec->hNlMinCtr++;
     }
     if (aec->hNlMinCtr == 2) {
         aec->hNlNewMin = 0;
         aec->hNlMinCtr = 0;
         aec->overDrive = WEBRTC_SPL_MAX(kTargetSupp[aec->nlp_mode] /
             ((float)log(aec->hNlFbMin + 1e-10f) + 1e-10f),
-            kMinOverDrive[aec->nlp_mode]);
+            min_overdrive[aec->nlp_mode]);
     }
 
     // Smooth the overdrive.
     if (aec->overDrive < aec->overDriveSm) {
       aec->overDriveSm = 0.99f * aec->overDriveSm + 0.01f * aec->overDrive;
     }
     else {
       aec->overDriveSm = 0.9f * aec->overDriveSm + 0.1f * aec->overDrive;
@@ -1460,17 +1486,16 @@ static void InitStats(Stats* stats) {
   stats->sum = 0;
   stats->hisum = 0;
   stats->himean = kOffsetLevel;
   stats->counter = 0;
   stats->hicounter = 0;
 }
 
 static void InitMetrics(AecCore* self) {
-  assert(self != NULL);
   self->stateCounter = 0;
   InitLevel(&self->farlevel);
   InitLevel(&self->nearlevel);
   InitLevel(&self->linoutlevel);
   InitLevel(&self->nlpoutlevel);
 
   InitStats(&self->erl);
   InitStats(&self->erle);
@@ -1682,8 +1707,9 @@ static void TimeToFrequency(float time_d
   freq_data[1][PART_LEN] = 0;
   freq_data[0][0] = time_data[0];
   freq_data[0][PART_LEN] = time_data[1];
   for (i = 1; i < PART_LEN; i++) {
     freq_data[0][i] = time_data[2 * i];
     freq_data[1][i] = time_data[2 * i + 1];
   }
 }
+
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.h
@@ -65,31 +65,46 @@ void WebRtcAec_ProcessFrame(AecCore* aec
                             int knownDelay,
                             int16_t* out,
                             int16_t* outH);
 
 // A helper function to call WebRtc_MoveReadPtr() for all far-end buffers.
 // Returns the number of elements moved, and adjusts |system_delay| by the
 // corresponding amount in ms.
 int WebRtcAec_MoveFarReadPtr(AecCore* aec, int elements);
+
 // Calculates the median and standard deviation among the delay estimates
 // collected since the last call to this function.
 int WebRtcAec_GetDelayMetricsCore(AecCore* self, int* median, int* std);
+
 // Returns the echo state (1: echo, 0: no echo).
 int WebRtcAec_echo_state(AecCore* self);
+
 // Gets statistics of the echo metrics ERL, ERLE, A_NLP.
 void WebRtcAec_GetEchoStats(AecCore* self, Stats* erl, Stats* erle,
                             Stats* a_nlp);
 #ifdef WEBRTC_AEC_DEBUG_DUMP
 void* WebRtcAec_far_time_buf(AecCore* self);
 #endif
+
 // Sets local configuration modes.
 void WebRtcAec_SetConfigCore(AecCore* self, int nlp_mode, int metrics_mode,
                              int delay_logging);
+
+// We now interpret delay correction to mean an extended filter length feature.
+// We reuse the delay correction infrastructure to avoid changes through to
+// libjingle. See details along with |DelayCorrection| in
+// echo_cancellation_impl.h. Non-zero enables, zero disables.
+void WebRtcAec_enable_delay_correction(AecCore* self, int enable);
+
+// Returns non-zero if delay correction is enabled and zero if disabled.
+int WebRtcAec_delay_correction_enabled(AecCore* self);
+
 // Returns the current |system_delay|, i.e., the buffered difference between
 // far-end and near-end.
 int WebRtcAec_system_delay(AecCore* self);
+
 // Sets the |system_delay| to |value|.  Note that if the value is changed
 // improperly, there can be a performance regression.  So it should be used with
 // care.
 void WebRtcAec_SetSystemDelay(AecCore* self, int delay);
 
 #endif  // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_internal.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_internal.h
@@ -17,16 +17,26 @@
 
 #include "webrtc/modules/audio_processing/aec/aec_core.h"
 #include "webrtc/modules/audio_processing/utility/ring_buffer.h"
 #include "webrtc/typedefs.h"
 
 #define NR_PART 12  // Number of partitions in filter.
 #define PREF_BAND_SIZE 24
 
+// Number of partitions for the extended filter mode. The first one is an enum
+// to be used in array declarations, as it represents the maximum filter length.
+enum { kExtendedNumPartitions = 32 };
+static const int kNormalNumPartitions = 12;
+
+// Extended filter adaptation parameters.
+// TODO(ajm): No narrowband tuning yet.
+static const float kExtendedMu = 0.4f;
+static const float kExtendedErrorThreshold = 1.0e-6f;
+
 typedef struct PowerLevel {
   float sfrsum;
   int sfrcounter;
   float framelevel;
   float frsum;
   int frcounter;
   float minlevel;
   float averagelevel;
@@ -51,21 +61,22 @@ struct AecCore {
   float dBufH[PART_LEN2];  // nearend
 
   float xPow[PART_LEN1];
   float dPow[PART_LEN1];
   float dMinPow[PART_LEN1];
   float dInitMinPow[PART_LEN1];
   float *noisePow;
 
-  float xfBuf[2][NR_PART * PART_LEN1];  // farend fft buffer
-  float wfBuf[2][NR_PART * PART_LEN1];  // filter fft
+  float xfBuf[2][kExtendedNumPartitions * PART_LEN1];  // farend fft buffer
+  float wfBuf[2][kExtendedNumPartitions * PART_LEN1];  // filter fft
   complex_t sde[PART_LEN1];  // cross-psd of nearend and error
   complex_t sxd[PART_LEN1];  // cross-psd of farend and nearend
-  complex_t xfwBuf[NR_PART * PART_LEN1];  // farend windowed fft buffer
+  // Farend windowed fft buffer.
+  complex_t xfwBuf[kExtendedNumPartitions * PART_LEN1];
 
   float sx[PART_LEN1], sd[PART_LEN1], se[PART_LEN1];  // far, near, error psd
   float hNs[PART_LEN1];
   float hNlFbMin, hNlFbLocalMin;
   float hNlXdAvgMin;
   int hNlNewMin, hNlMinCtr;
   float overDrive, overDriveSm;
   int nlp_mode;
@@ -80,18 +91,18 @@ struct AecCore {
   RingBuffer* far_buf;
   RingBuffer* far_buf_windowed;
   int system_delay;  // Current system delay buffered in AEC.
 
   int mult;  // sampling frequency multiple
   int sampFreq;
   uint32_t seed;
 
-  float mu;  // stepsize
-  float errThresh;  // error threshold
+  float normal_mu;  // stepsize
+  float normal_error_threshold;  // error threshold
 
   int noiseEstCtr;
 
   PowerLevel farlevel;
   PowerLevel nearlevel;
   PowerLevel linoutlevel;
   PowerLevel nlpoutlevel;
 
@@ -107,16 +118,21 @@ struct AecCore {
   int flag_Hband_cn;  // for comfort noise
   float cn_scale_Hband;  // scale for comfort noise in H band
 
   int delay_histogram[kHistorySizeBlocks];
   int delay_logging_enabled;
   void* delay_estimator_farend;
   void* delay_estimator;
 
+  // 1 = extended filter mode enabled, 0 = disabled.
+  int extended_filter_enabled;
+  // Runtime selection of number of filter partitions.
+  int num_partitions;
+
 #ifdef WEBRTC_AEC_DEBUG_DUMP
   RingBuffer* far_time_buf;
   FILE *farFile;
   FILE *nearFile;
   FILE *outFile;
   FILE *outLinearFile;
 #endif
 };
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_sse2.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_sse2.c
@@ -29,23 +29,24 @@
 __inline static float MulIm(float aRe, float aIm, float bRe, float bIm)
 {
   return aRe * bIm + aIm * bRe;
 }
 
 static void FilterFarSSE2(AecCore* aec, float yf[2][PART_LEN1])
 {
   int i;
-  for (i = 0; i < NR_PART; i++) {
+  const int num_partitions = aec->num_partitions;
+  for (i = 0; i < num_partitions; i++) {
     int j;
     int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
     int pos = i * PART_LEN1;
     // Check for wrap
-    if (i + aec->xfBufBlockPos >= NR_PART) {
-      xPos -= NR_PART*(PART_LEN1);
+    if (i + aec->xfBufBlockPos >= num_partitions) {
+      xPos -= num_partitions*(PART_LEN1);
     }
 
     // vectorized code (four at once)
     for (j = 0; j + 3 < PART_LEN1; j += 4) {
       const __m128 xfBuf_re = _mm_loadu_ps(&aec->xfBuf[0][xPos + j]);
       const __m128 xfBuf_im = _mm_loadu_ps(&aec->xfBuf[1][xPos + j]);
       const __m128 wfBuf_re = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
       const __m128 wfBuf_im = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
@@ -70,18 +71,21 @@ static void FilterFarSSE2(AecCore* aec, 
                         aec->wfBuf[0][ pos + j], aec->wfBuf[1][ pos + j]);
     }
   }
 }
 
 static void ScaleErrorSignalSSE2(AecCore* aec, float ef[2][PART_LEN1])
 {
   const __m128 k1e_10f = _mm_set1_ps(1e-10f);
-  const __m128 kThresh = _mm_set1_ps(aec->errThresh);
-  const __m128 kMu = _mm_set1_ps(aec->mu);
+  const __m128 kMu = aec->extended_filter_enabled ?
+      _mm_set1_ps(kExtendedMu) : _mm_set1_ps(aec->normal_mu);
+  const __m128 kThresh = aec->extended_filter_enabled ?
+      _mm_set1_ps(kExtendedErrorThreshold) :
+      _mm_set1_ps(aec->normal_error_threshold);
 
   int i;
   // vectorized code (four at once)
   for (i = 0; i + 3 < PART_LEN1; i += 4) {
     const __m128 xPow = _mm_loadu_ps(&aec->xPow[i]);
     const __m128 ef_re_base = _mm_loadu_ps(&ef[0][i]);
     const __m128 ef_im_base = _mm_loadu_ps(&ef[1][i]);
 
@@ -105,42 +109,49 @@ static void ScaleErrorSignalSSE2(AecCore
     ef_im = _mm_or_ps(ef_im, ef_im_if);
     ef_re = _mm_mul_ps(ef_re, kMu);
     ef_im = _mm_mul_ps(ef_im, kMu);
 
     _mm_storeu_ps(&ef[0][i], ef_re);
     _mm_storeu_ps(&ef[1][i], ef_im);
   }
   // scalar code for the remaining items.
-  for (; i < (PART_LEN1); i++) {
-    float absEf;
-    ef[0][i] /= (aec->xPow[i] + 1e-10f);
-    ef[1][i] /= (aec->xPow[i] + 1e-10f);
-    absEf = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
+  {
+    const float mu = aec->extended_filter_enabled ?
+        kExtendedMu : aec->normal_mu;
+    const float error_threshold = aec->extended_filter_enabled ?
+        kExtendedErrorThreshold : aec->normal_error_threshold;
+    for (; i < (PART_LEN1); i++) {
+    float abs_ef;
+      ef[0][i] /= (aec->xPow[i] + 1e-10f);
+      ef[1][i] /= (aec->xPow[i] + 1e-10f);
+      abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
 
-    if (absEf > aec->errThresh) {
-      absEf = aec->errThresh / (absEf + 1e-10f);
-      ef[0][i] *= absEf;
-      ef[1][i] *= absEf;
+      if (abs_ef > error_threshold) {
+        abs_ef = error_threshold / (abs_ef + 1e-10f);
+        ef[0][i] *= abs_ef;
+        ef[1][i] *= abs_ef;
+      }
+
+      // Stepsize factor
+      ef[0][i] *= mu;
+      ef[1][i] *= mu;
     }
-
-    // Stepsize factor
-    ef[0][i] *= aec->mu;
-    ef[1][i] *= aec->mu;
   }
 }
 
 static void FilterAdaptationSSE2(AecCore* aec, float *fft, float ef[2][PART_LEN1]) {
   int i, j;
-  for (i = 0; i < NR_PART; i++) {
+  const int num_partitions = aec->num_partitions;
+  for (i = 0; i < num_partitions; i++) {
     int xPos = (i + aec->xfBufBlockPos)*(PART_LEN1);
     int pos = i * PART_LEN1;
     // Check for wrap
-    if (i + aec->xfBufBlockPos >= NR_PART) {
-      xPos -= NR_PART * PART_LEN1;
+    if (i + aec->xfBufBlockPos >= num_partitions) {
+      xPos -= num_partitions * PART_LEN1;
     }
 
     // Process the whole array...
     for (j = 0; j < PART_LEN; j+= 4) {
       // Load xfBuf and ef.
       const __m128 xfBuf_re = _mm_loadu_ps(&aec->xfBuf[0][xPos + j]);
       const __m128 xfBuf_im = _mm_loadu_ps(&aec->xfBuf[1][xPos + j]);
       const __m128 ef_re = _mm_loadu_ps(&ef[0][j]);
@@ -408,8 +419,9 @@ static void OverdriveAndSuppressSSE2(Aec
 }
 
 void WebRtcAec_InitAec_SSE2(void) {
   WebRtcAec_FilterFar = FilterFarSSE2;
   WebRtcAec_ScaleErrorSignal = ScaleErrorSignalSSE2;
   WebRtcAec_FilterAdaptation = FilterAdaptationSSE2;
   WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressSSE2;
 }
+
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/echo_cancellation.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/echo_cancellation.c
@@ -22,33 +22,95 @@
 
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_processing/aec/aec_core.h"
 #include "webrtc/modules/audio_processing/aec/aec_resampler.h"
 #include "webrtc/modules/audio_processing/aec/echo_cancellation_internal.h"
 #include "webrtc/modules/audio_processing/utility/ring_buffer.h"
 #include "webrtc/typedefs.h"
 
+// Measured delays [ms]
+// Device                Chrome  GTP
+// MacBook Air           10
+// MacBook Retina        10      100
+// MacPro                30?
+//
+// Win7 Desktop          70      80?
+// Win7 T430s            110
+// Win8 T420s            70
+//
+// Daisy                 50
+// Pixel (w/ preproc?)           240
+// Pixel (w/o preproc?)  110     110
+
+// The extended filter mode gives us the flexibility to ignore the system's
+// reported delays. We do this for platforms which we believe provide results
+// which are incompatible with the AEC's expectations. Based on measurements
+// (some provided above) we set a conservative (i.e. lower than measured)
+// fixed delay.
+//
+// WEBRTC_UNTRUSTED_DELAY will only have an impact when |extended_filter_mode|
+// is enabled. See the note along with |DelayCorrection| in
+// echo_cancellation_impl.h for more details on the mode.
+//
+// Justification:
+// Chromium/Mac: Here, the true latency is so low (~10-20 ms), that it plays
+// havoc with the AEC's buffering. To avoid this, we set a fixed delay of 20 ms
+// and then compensate by rewinding by 10 ms (in wideband) through
+// kDelayDiffOffsetSamples. This trick does not seem to work for larger rewind
+// values, but fortunately this is sufficient.
+//
+// Chromium/Linux(ChromeOS): The values we get on this platform don't correspond
+// well to reality. The variance doesn't match the AEC's buffer changes, and the
+// bulk values tend to be too low. However, the range across different hardware
+// appears to be too large to choose a single value.
+//
+// GTP/Linux(ChromeOS): TBD, but for the moment we will trust the values.
+#if defined(WEBRTC_CHROMIUM_BUILD) && defined(WEBRTC_MAC)
+#define WEBRTC_UNTRUSTED_DELAY
+#endif
+
+#if defined(WEBRTC_MAC)
+static const int kFixedDelayMs = 20;
+static const int kDelayDiffOffsetSamples = -160;
+#elif defined(WEBRTC_WIN)
+static const int kFixedDelayMs = 50;
+static const int kDelayDiffOffsetSamples = 0;
+#else
+// Essentially ChromeOS.
+static const int kFixedDelayMs = 50;
+static const int kDelayDiffOffsetSamples = 0;
+#endif
+static const int kMinTrustedDelayMs = 20;
+static const int kMaxTrustedDelayMs = 500;
+
 // Maximum length of resampled signal. Must be an integer multiple of frames
 // (ceil(1/(1 + MIN_SKEW)*2) + 1)*FRAME_LEN
 // The factor of 2 handles wb, and the + 1 is as a safety margin
 // TODO(bjornv): Replace with kResamplerBufferSize
 #define MAX_RESAMP_LEN (5 * FRAME_LEN)
 
 static const int kMaxBufSizeStart = 62;  // In partitions
 static const int sampMsNb = 8; // samples per ms in nb
 static const int initCheck = 42;
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
 int webrtc_aec_instance_count = 0;
 #endif
 
 // Estimates delay to set the position of the far-end buffer read pointer
 // (controlled by knownDelay)
-static int EstBufDelay(aecpc_t *aecInst);
+static void EstBufDelayNormal(aecpc_t *aecInst);
+static void EstBufDelayExtended(aecpc_t *aecInst);
+static int ProcessNormal(aecpc_t* self, const int16_t* near,
+    const int16_t* near_high, int16_t* out, int16_t* out_high,
+    int16_t num_samples, int16_t reported_delay_ms, int32_t skew);
+static void ProcessExtended(aecpc_t* self, const int16_t* near,
+    const int16_t* near_high, int16_t* out, int16_t* out_high,
+    int16_t num_samples, int16_t reported_delay_ms, int32_t skew);
 
 int32_t WebRtcAec_Create(void **aecInst)
 {
     aecpc_t *aecpc;
     if (aecInst == NULL) {
         return -1;
     }
 
@@ -130,20 +192,16 @@ int32_t WebRtcAec_Free(void *aecInst)
     return 0;
 }
 
 int32_t WebRtcAec_Init(void *aecInst, int32_t sampFreq, int32_t scSampFreq)
 {
     aecpc_t *aecpc = aecInst;
     AecConfig aecConfig;
 
-    if (aecpc == NULL) {
-        return -1;
-    }
-
     if (sampFreq != 8000 && sampFreq != 16000  && sampFreq != 32000) {
         aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
         return -1;
     }
     aecpc->sampFreq = sampFreq;
 
     if (scSampFreq < 1 || scSampFreq > 96000) {
         aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
@@ -172,41 +230,41 @@ int32_t WebRtcAec_Init(void *aecInst, in
 
     if (aecpc->sampFreq == 32000) {
         aecpc->splitSampFreq = 16000;
     }
     else {
         aecpc->splitSampFreq = sampFreq;
     }
 
-    aecpc->skewFrCtr = 0;
-    aecpc->activity = 0;
-
     aecpc->delayCtr = 0;
+    aecpc->sampFactor = (aecpc->scSampFreq * 1.0f) / aecpc->splitSampFreq;
+    // Sampling frequency multiplier (SWB is processed as 160 frame size).
+    aecpc->rate_factor = aecpc->splitSampFreq / 8000;
 
     aecpc->sum = 0;
     aecpc->counter = 0;
     aecpc->checkBuffSize = 1;
     aecpc->firstVal = 0;
 
-    aecpc->ECstartup = 1;
+    aecpc->startup_phase = 1;
     aecpc->bufSizeStart = 0;
     aecpc->checkBufSizeCtr = 0;
-    aecpc->filtDelay = 0;
+    aecpc->msInSndCardBuf = 0;
+    aecpc->filtDelay = -1;  // -1 indicates an initialized state.
     aecpc->timeForDelayChange = 0;
     aecpc->knownDelay = 0;
     aecpc->lastDelayDiff = 0;
 
-    aecpc->skew = 0;
+    aecpc->skewFrCtr = 0;
     aecpc->resample = kAecFalse;
     aecpc->highSkewCtr = 0;
-    aecpc->sampFactor = (aecpc->scSampFreq * 1.0f) / aecpc->splitSampFreq;
+    aecpc->skew = 0;
 
-    // Sampling frequency multiplier (SWB is processed as 160 frame size).
-    aecpc->rate_factor = aecpc->splitSampFreq / 8000;
+    aecpc->farend_started = 0;
 
     // Default settings.
     aecConfig.nlpMode = kAecNlpModerate;
     aecConfig.skewMode = kAecFalse;
     aecConfig.metricsMode = kAecFalse;
     aecConfig.delay_logging = kAecFalse;
 
     if (WebRtcAec_set_config(aecpc, aecConfig) == -1) {
@@ -234,20 +292,16 @@ int32_t WebRtcAec_BufferFarend(void *aec
     int newNrOfSamples = (int) nrOfSamples;
     short newFarend[MAX_RESAMP_LEN];
     const int16_t* farend_ptr = farend;
     float tmp_farend[MAX_RESAMP_LEN];
     const float* farend_float = tmp_farend;
     float skew;
     int i = 0;
 
-    if (aecpc == NULL) {
-        return -1;
-    }
-
     if (farend == NULL) {
         aecpc->lastError = AEC_NULL_POINTER_ERROR;
         return -1;
     }
 
     if (aecpc->initFlag != initCheck) {
         aecpc->lastError = AEC_UNINITIALIZED_ERROR;
         return -1;
@@ -263,16 +317,17 @@ int32_t WebRtcAec_BufferFarend(void *aec
 
     if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
         // Resample and get a new number of samples
         WebRtcAec_ResampleLinear(aecpc->resampler, farend, nrOfSamples, skew,
                                  newFarend, &newNrOfSamples);
         farend_ptr = (const int16_t*) newFarend;
     }
 
+    aecpc->farend_started = 1;
     WebRtcAec_SetSystemDelay(aecpc->aec, WebRtcAec_system_delay(aecpc->aec) +
                              newNrOfSamples);
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
     WebRtc_WriteBuffer(aecpc->far_pre_buf_s16, farend_ptr,
                        (size_t) newNrOfSamples);
 #endif
     // Cast to float and write the time-domain data to |far_pre_buf|.
@@ -306,27 +361,16 @@ int32_t WebRtcAec_BufferFarend(void *aec
 
 int32_t WebRtcAec_Process(void *aecInst, const int16_t *nearend,
                           const int16_t *nearendH, int16_t *out, int16_t *outH,
                           int16_t nrOfSamples, int16_t msInSndCardBuf,
                           int32_t skew)
 {
     aecpc_t *aecpc = aecInst;
     int32_t retVal = 0;
-    short i;
-    short nBlocks10ms;
-    short nFrames;
-    // Limit resampling to doubling/halving of signal
-    const float minSkewEst = -0.5f;
-    const float maxSkewEst = 1.0f;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
     if (nearend == NULL) {
         aecpc->lastError = AEC_NULL_POINTER_ERROR;
         return -1;
     }
 
     if (out == NULL) {
         aecpc->lastError = AEC_NULL_POINTER_ERROR;
         return -1;
@@ -349,154 +393,31 @@ int32_t WebRtcAec_Process(void *aecInst,
        return -1;
     }
 
     if (msInSndCardBuf < 0) {
         msInSndCardBuf = 0;
         aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
         retVal = -1;
     }
-    else if (msInSndCardBuf > 500) {
-        msInSndCardBuf = 500;
+    else if (msInSndCardBuf > kMaxTrustedDelayMs) {
+        // The clamping is now done in ProcessExtended/Normal().
         aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
         retVal = -1;
     }
-    // TODO(andrew): we need to investigate if this +10 is really wanted.
-    msInSndCardBuf += 10;
-    aecpc->msInSndCardBuf = msInSndCardBuf;
 
-    if (aecpc->skewMode == kAecTrue) {
-        if (aecpc->skewFrCtr < 25) {
-            aecpc->skewFrCtr++;
-        }
-        else {
-            retVal = WebRtcAec_GetSkew(aecpc->resampler, skew, &aecpc->skew);
-            if (retVal == -1) {
-                aecpc->skew = 0;
-                aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
-            }
-
-            aecpc->skew /= aecpc->sampFactor*nrOfSamples;
-
-            if (aecpc->skew < 1.0e-3 && aecpc->skew > -1.0e-3) {
-                aecpc->resample = kAecFalse;
-            }
-            else {
-                aecpc->resample = kAecTrue;
-            }
-
-            if (aecpc->skew < minSkewEst) {
-                aecpc->skew = minSkewEst;
-            }
-            else if (aecpc->skew > maxSkewEst) {
-                aecpc->skew = maxSkewEst;
-            }
-
-#ifdef WEBRTC_AEC_DEBUG_DUMP
-            (void)fwrite(&aecpc->skew, sizeof(aecpc->skew), 1, aecpc->skewFile);
-#endif
-        }
-    }
-
-    nFrames = nrOfSamples / FRAME_LEN;
-    nBlocks10ms = nFrames / aecpc->rate_factor;
-
-    if (aecpc->ECstartup) {
-        if (nearend != out) {
-            // Only needed if they don't already point to the same place.
-            memcpy(out, nearend, sizeof(short) * nrOfSamples);
-        }
-
-        // The AEC is in the start up mode
-        // AEC is disabled until the system delay is OK
-
-        // Mechanism to ensure that the system delay is reasonably stable.
-        if (aecpc->checkBuffSize) {
-            aecpc->checkBufSizeCtr++;
-            // Before we fill up the far-end buffer we require the system delay
-            // to be stable (+/-8 ms) compared to the first value. This
-            // comparison is made during the following 6 consecutive 10 ms
-            // blocks. If it seems to be stable then we start to fill up the
-            // far-end buffer.
-            if (aecpc->counter == 0) {
-                aecpc->firstVal = aecpc->msInSndCardBuf;
-                aecpc->sum = 0;
-            }
-
-            if (abs(aecpc->firstVal - aecpc->msInSndCardBuf) <
-                WEBRTC_SPL_MAX(0.2 * aecpc->msInSndCardBuf, sampMsNb)) {
-                aecpc->sum += aecpc->msInSndCardBuf;
-                aecpc->counter++;
-            }
-            else {
-                aecpc->counter = 0;
-            }
-
-            if (aecpc->counter * nBlocks10ms >= 6) {
-                // The far-end buffer size is determined in partitions of
-                // PART_LEN samples. Use 75% of the average value of the system
-                // delay as buffer size to start with.
-                aecpc->bufSizeStart = WEBRTC_SPL_MIN((3 * aecpc->sum *
-                  aecpc->rate_factor * 8) / (4 * aecpc->counter * PART_LEN),
-                  kMaxBufSizeStart);
-                // Buffer size has now been determined.
-                aecpc->checkBuffSize = 0;
-            }
-
-            if (aecpc->checkBufSizeCtr * nBlocks10ms > 50) {
-                // For really bad systems, don't disable the echo canceller for
-                // more than 0.5 sec.
-                aecpc->bufSizeStart = WEBRTC_SPL_MIN((aecpc->msInSndCardBuf *
-                    aecpc->rate_factor * 3) / 40, kMaxBufSizeStart);
-                aecpc->checkBuffSize = 0;
-            }
-        }
-
-        // If |checkBuffSize| changed in the if-statement above.
-        if (!aecpc->checkBuffSize) {
-            // The system delay is now reasonably stable (or has been unstable
-            // for too long). When the far-end buffer is filled with
-            // approximately the same amount of data as reported by the system
-            // we end the startup phase.
-            int overhead_elements =
-                WebRtcAec_system_delay(aecpc->aec) / PART_LEN -
-                aecpc->bufSizeStart;
-            if (overhead_elements == 0) {
-                // Enable the AEC
-                aecpc->ECstartup = 0;
-            } else if (overhead_elements > 0) {
-                // TODO(bjornv): Do we need a check on how much we actually
-                // moved the read pointer? It should always be possible to move
-                // the pointer |overhead_elements| since we have only added data
-                // to the buffer and no delay compensation nor AEC processing
-                // has been done.
-                WebRtcAec_MoveFarReadPtr(aecpc->aec, overhead_elements);
-
-                // Enable the AEC
-                aecpc->ECstartup = 0;
-            }
-        }
+    // This returns the value of aec->extended_filter_enabled.
+    if (WebRtcAec_delay_correction_enabled(aecpc->aec)) {
+      ProcessExtended(aecpc, nearend, nearendH, out, outH, nrOfSamples,
+                      msInSndCardBuf, skew);
     } else {
-        // AEC is enabled.
-
-        EstBufDelay(aecpc);
-
-        // Note that 1 frame is supported for NB and 2 frames for WB.
-        for (i = 0; i < nFrames; i++) {
-            // Call the AEC.
-            WebRtcAec_ProcessFrame(aecpc->aec,
-                                   &nearend[FRAME_LEN * i],
-                                   &nearendH[FRAME_LEN * i],
-                                   aecpc->knownDelay,
-                                   &out[FRAME_LEN * i],
-                                   &outH[FRAME_LEN * i]);
-            // TODO(bjornv): Re-structure such that we don't have to pass
-            // |aecpc->knownDelay| as input. Change name to something like
-            // |system_buffer_diff|.
-        }
+      if (ProcessNormal(aecpc, nearend, nearendH, out, outH, nrOfSamples,
+                        msInSndCardBuf, skew) != 0) {
+        retVal = -1;
+      }
     }
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
     {
         int16_t far_buf_size_ms = (int16_t)(WebRtcAec_system_delay(aecpc->aec) /
             (sampMsNb * aecpc->rate_factor));
         (void)fwrite(&far_buf_size_ms, 2, 1, aecpc->bufFile);
         (void)fwrite(&aecpc->knownDelay, sizeof(aecpc->knownDelay), 1,
@@ -504,21 +425,16 @@ int32_t WebRtcAec_Process(void *aecInst,
     }
 #endif
 
     return retVal;
 }
 
 int WebRtcAec_set_config(void* handle, AecConfig config) {
   aecpc_t* self = (aecpc_t*)handle;
-
-  if (handle == NULL ) {
-    return -1;
-  }
-
   if (self->initFlag != initCheck) {
     self->lastError = AEC_UNINITIALIZED_ERROR;
     return -1;
   }
 
   if (config.skewMode != kAecFalse && config.skewMode != kAecTrue) {
     self->lastError = AEC_BAD_PARAMETER_ERROR;
     return -1;
@@ -543,20 +459,16 @@ int WebRtcAec_set_config(void* handle, A
 
   WebRtcAec_SetConfigCore(self->aec, config.nlpMode, config.metricsMode,
                           config.delay_logging);
   return 0;
 }
 
 int WebRtcAec_get_echo_status(void* handle, int* status) {
   aecpc_t* self = (aecpc_t*)handle;
-
-  if (handle == NULL ) {
-    return -1;
-  }
   if (status == NULL ) {
     self->lastError = AEC_NULL_POINTER_ERROR;
     return -1;
   }
   if (self->initFlag != initCheck) {
     self->lastError = AEC_UNINITIALIZED_ERROR;
     return -1;
   }
@@ -660,20 +572,16 @@ int WebRtcAec_GetMetrics(void* handle, A
     metrics->aNlp.min = kOffsetLevel;
   }
 
   return 0;
 }
 
 int WebRtcAec_GetDelayMetrics(void* handle, int* median, int* std) {
   aecpc_t* self = handle;
-
-  if (handle == NULL) {
-    return -1;
-  }
   if (median == NULL) {
     self->lastError = AEC_NULL_POINTER_ERROR;
     return -1;
   }
   if (std == NULL) {
     self->lastError = AEC_NULL_POINTER_ERROR;
     return -1;
   }
@@ -688,32 +596,245 @@ int WebRtcAec_GetDelayMetrics(void* hand
   }
 
   return 0;
 }
 
 int32_t WebRtcAec_get_error_code(void *aecInst)
 {
     aecpc_t *aecpc = aecInst;
-
-    if (aecpc == NULL) {
-        return -1;
-    }
-
     return aecpc->lastError;
 }
 
 AecCore* WebRtcAec_aec_core(void* handle) {
   if (!handle) {
     return NULL;
   }
   return ((aecpc_t*) handle)->aec;
 }
 
-static int EstBufDelay(aecpc_t* aecpc) {
+static int ProcessNormal(aecpc_t *aecpc, const int16_t *nearend,
+                         const int16_t *nearendH, int16_t *out, int16_t *outH,
+                         int16_t nrOfSamples, int16_t msInSndCardBuf,
+                         int32_t skew) {
+  int retVal = 0;
+  short i;
+  short nBlocks10ms;
+  short nFrames;
+  // Limit resampling to doubling/halving of signal
+  const float minSkewEst = -0.5f;
+  const float maxSkewEst = 1.0f;
+
+  msInSndCardBuf = msInSndCardBuf > kMaxTrustedDelayMs ?
+      kMaxTrustedDelayMs : msInSndCardBuf;
+  // TODO(andrew): we need to investigate if this +10 is really wanted.
+  msInSndCardBuf += 10;
+  aecpc->msInSndCardBuf = msInSndCardBuf;
+
+  if (aecpc->skewMode == kAecTrue) {
+    if (aecpc->skewFrCtr < 25) {
+      aecpc->skewFrCtr++;
+    }
+    else {
+      retVal = WebRtcAec_GetSkew(aecpc->resampler, skew, &aecpc->skew);
+      if (retVal == -1) {
+        aecpc->skew = 0;
+        aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
+      }
+
+      aecpc->skew /= aecpc->sampFactor*nrOfSamples;
+
+      if (aecpc->skew < 1.0e-3 && aecpc->skew > -1.0e-3) {
+        aecpc->resample = kAecFalse;
+      }
+      else {
+        aecpc->resample = kAecTrue;
+      }
+
+      if (aecpc->skew < minSkewEst) {
+        aecpc->skew = minSkewEst;
+      }
+      else if (aecpc->skew > maxSkewEst) {
+        aecpc->skew = maxSkewEst;
+      }
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+      (void)fwrite(&aecpc->skew, sizeof(aecpc->skew), 1, aecpc->skewFile);
+#endif
+    }
+  }
+
+  nFrames = nrOfSamples / FRAME_LEN;
+  nBlocks10ms = nFrames / aecpc->rate_factor;
+
+  if (aecpc->startup_phase) {
+    // Only needed if they don't already point to the same place.
+    if (nearend != out) {
+      memcpy(out, nearend, sizeof(short) * nrOfSamples);
+    }
+    if (nearendH != outH) {
+      memcpy(outH, nearendH, sizeof(short) * nrOfSamples);
+    }
+
+    // The AEC is in the start up mode
+    // AEC is disabled until the system delay is OK
+
+    // Mechanism to ensure that the system delay is reasonably stable.
+    if (aecpc->checkBuffSize) {
+      aecpc->checkBufSizeCtr++;
+      // Before we fill up the far-end buffer we require the system delay
+      // to be stable (+/-8 ms) compared to the first value. This
+      // comparison is made during the following 6 consecutive 10 ms
+      // blocks. If it seems to be stable then we start to fill up the
+      // far-end buffer.
+      if (aecpc->counter == 0) {
+        aecpc->firstVal = aecpc->msInSndCardBuf;
+        aecpc->sum = 0;
+      }
+
+      if (abs(aecpc->firstVal - aecpc->msInSndCardBuf) <
+        WEBRTC_SPL_MAX(0.2 * aecpc->msInSndCardBuf, sampMsNb)) {
+        aecpc->sum += aecpc->msInSndCardBuf;
+        aecpc->counter++;
+      }
+      else {
+        aecpc->counter = 0;
+      }
+
+      if (aecpc->counter * nBlocks10ms >= 6) {
+        // The far-end buffer size is determined in partitions of
+        // PART_LEN samples. Use 75% of the average value of the system
+        // delay as buffer size to start with.
+        aecpc->bufSizeStart = WEBRTC_SPL_MIN((3 * aecpc->sum *
+            aecpc->rate_factor * 8) / (4 * aecpc->counter * PART_LEN),
+            kMaxBufSizeStart);
+        // Buffer size has now been determined.
+        aecpc->checkBuffSize = 0;
+      }
+
+      if (aecpc->checkBufSizeCtr * nBlocks10ms > 50) {
+        // For really bad systems, don't disable the echo canceller for
+        // more than 0.5 sec.
+        aecpc->bufSizeStart = WEBRTC_SPL_MIN((aecpc->msInSndCardBuf *
+            aecpc->rate_factor * 3) / 40, kMaxBufSizeStart);
+        aecpc->checkBuffSize = 0;
+      }
+    }
+
+    // If |checkBuffSize| changed in the if-statement above.
+    if (!aecpc->checkBuffSize) {
+      // The system delay is now reasonably stable (or has been unstable
+      // for too long). When the far-end buffer is filled with
+      // approximately the same amount of data as reported by the system
+      // we end the startup phase.
+      int overhead_elements =
+          WebRtcAec_system_delay(aecpc->aec) / PART_LEN - aecpc->bufSizeStart;
+      if (overhead_elements == 0) {
+        // Enable the AEC
+        aecpc->startup_phase = 0;
+      } else if (overhead_elements > 0) {
+        // TODO(bjornv): Do we need a check on how much we actually
+        // moved the read pointer? It should always be possible to move
+        // the pointer |overhead_elements| since we have only added data
+        // to the buffer and no delay compensation nor AEC processing
+        // has been done.
+        WebRtcAec_MoveFarReadPtr(aecpc->aec, overhead_elements);
+
+        // Enable the AEC
+        aecpc->startup_phase = 0;
+      }
+    }
+  } else {
+    // AEC is enabled.
+    EstBufDelayNormal(aecpc);
+
+    // Note that 1 frame is supported for NB and 2 frames for WB.
+    for (i = 0; i < nFrames; i++) {
+      // Call the AEC.
+      WebRtcAec_ProcessFrame(aecpc->aec,
+                             &nearend[FRAME_LEN * i],
+                             &nearendH[FRAME_LEN * i],
+                             aecpc->knownDelay,
+                             &out[FRAME_LEN * i],
+                             &outH[FRAME_LEN * i]);
+      // TODO(bjornv): Re-structure such that we don't have to pass
+      // |aecpc->knownDelay| as input. Change name to something like
+      // |system_buffer_diff|.
+    }
+  }
+
+  return retVal;
+}
+
+static void ProcessExtended(aecpc_t* self, const int16_t* near,
+    const int16_t* near_high, int16_t* out, int16_t* out_high,
+    int16_t num_samples, int16_t reported_delay_ms, int32_t skew) {
+  int i;
+  const int num_frames = num_samples / FRAME_LEN;
+#if defined(WEBRTC_UNTRUSTED_DELAY)
+  const int delay_diff_offset = kDelayDiffOffsetSamples;
+  reported_delay_ms = kFixedDelayMs;
+#else
+  // This is the usual mode where we trust the reported system delay values.
+  const int delay_diff_offset = 0;
+  // Due to the longer filter, we no longer add 10 ms to the reported delay
+  // to reduce chance of non-causality. Instead we apply a minimum here to avoid
+  // issues with the read pointer jumping around needlessly.
+  reported_delay_ms = reported_delay_ms < kMinTrustedDelayMs ?
+      kMinTrustedDelayMs : reported_delay_ms;
+  // If the reported delay appears to be bogus, we attempt to recover by using
+  // the measured fixed delay values. We use >= here because higher layers
+  // may already clamp to this maximum value, and we would otherwise not
+  // detect it here.
+  reported_delay_ms = reported_delay_ms >= kMaxTrustedDelayMs ?
+      kFixedDelayMs : reported_delay_ms;
+#endif
+  self->msInSndCardBuf = reported_delay_ms;
+
+  if (!self->farend_started) {
+    // Only needed if they don't already point to the same place.
+    if (near != out) {
+      memcpy(out, near, sizeof(short) * num_samples);
+    }
+    if (near_high != out_high) {
+      memcpy(out_high, near_high, sizeof(short) * num_samples);
+    }
+    return;
+  }
+  if (self->startup_phase) {
+    // In the extended mode, there isn't a startup "phase", just a special
+    // action on the first frame. In the trusted delay case, we'll take the
+    // current reported delay, unless it's less then our conservative
+    // measurement.
+    int startup_size_ms = reported_delay_ms < kFixedDelayMs ?
+        kFixedDelayMs : reported_delay_ms;
+    int overhead_elements = (WebRtcAec_system_delay(self->aec) -
+        startup_size_ms / 2 * self->rate_factor * 8) / PART_LEN;
+    WebRtcAec_MoveFarReadPtr(self->aec, overhead_elements);
+    self->startup_phase = 0;
+  }
+
+  EstBufDelayExtended(self);
+
+  {
+    // |delay_diff_offset| gives us the option to manually rewind the delay on
+    // very low delay platforms which can't be expressed purely through
+    // |reported_delay_ms|.
+    const int adjusted_known_delay =
+        WEBRTC_SPL_MAX(0, self->knownDelay + delay_diff_offset);
+
+    for (i = 0; i < num_frames; ++i) {
+      WebRtcAec_ProcessFrame(self->aec, &near[FRAME_LEN * i],
+          &near_high[FRAME_LEN * i], adjusted_known_delay,
+          &out[FRAME_LEN * i], &out_high[FRAME_LEN * i]);
+    }
+  }
+}
+
+static void EstBufDelayNormal(aecpc_t* aecpc) {
   int nSampSndCard = aecpc->msInSndCardBuf * sampMsNb * aecpc->rate_factor;
   int current_delay = nSampSndCard - WebRtcAec_system_delay(aecpc->aec);
   int delay_difference = 0;
 
   // Before we proceed with the delay estimate filtering we:
   // 1) Compensate for the frame that will be read.
   // 2) Compensate for drift resampling.
   // 3) Compensate for non-causality if needed, since the estimated delay can't
@@ -727,18 +848,21 @@ static int EstBufDelay(aecpc_t* aecpc) {
     current_delay -= kResamplingDelay;
   }
 
   // 3) Compensate for non-causality, if needed, by flushing one block.
   if (current_delay < PART_LEN) {
     current_delay += WebRtcAec_MoveFarReadPtr(aecpc->aec, 1) * PART_LEN;
   }
 
+  // We use -1 to signal an initialized state in the "extended" implementation;
+  // compensate for that.
+  aecpc->filtDelay = aecpc->filtDelay < 0 ? 0 : aecpc->filtDelay;
   aecpc->filtDelay = WEBRTC_SPL_MAX(0, (short) (0.8 * aecpc->filtDelay +
-          0.2 * current_delay));
+      0.2 * current_delay));
 
   delay_difference = aecpc->filtDelay - aecpc->knownDelay;
   if (delay_difference > 224) {
     if (aecpc->lastDelayDiff < 96) {
       aecpc->timeForDelayChange = 0;
     } else {
       aecpc->timeForDelayChange++;
     }
@@ -751,11 +875,63 @@ static int EstBufDelay(aecpc_t* aecpc) {
   } else {
     aecpc->timeForDelayChange = 0;
   }
   aecpc->lastDelayDiff = delay_difference;
 
   if (aecpc->timeForDelayChange > 25) {
     aecpc->knownDelay = WEBRTC_SPL_MAX((int) aecpc->filtDelay - 160, 0);
   }
+}
 
-  return 0;
+static void EstBufDelayExtended(aecpc_t* self) {
+  int reported_delay = self->msInSndCardBuf * sampMsNb * self->rate_factor;
+  int current_delay = reported_delay - WebRtcAec_system_delay(self->aec);
+  int delay_difference = 0;
+
+  // Before we proceed with the delay estimate filtering we:
+  // 1) Compensate for the frame that will be read.
+  // 2) Compensate for drift resampling.
+  // 3) Compensate for non-causality if needed, since the estimated delay can't
+  //    be negative.
+
+  // 1) Compensating for the frame(s) that will be read/processed.
+  current_delay += FRAME_LEN * self->rate_factor;
+
+  // 2) Account for resampling frame delay.
+  if (self->skewMode == kAecTrue && self->resample == kAecTrue) {
+    current_delay -= kResamplingDelay;
+  }
+
+  // 3) Compensate for non-causality, if needed, by flushing two blocks.
+  if (current_delay < PART_LEN) {
+    current_delay += WebRtcAec_MoveFarReadPtr(self->aec, 2) * PART_LEN;
+  }
+
+  if (self->filtDelay == -1) {
+    self->filtDelay = WEBRTC_SPL_MAX(0, 0.5 * current_delay);
+  } else {
+    self->filtDelay = WEBRTC_SPL_MAX(0, (short) (0.95 * self->filtDelay +
+        0.05 * current_delay));
+  }
+
+  delay_difference = self->filtDelay - self->knownDelay;
+  if (delay_difference > 384) {
+    if (self->lastDelayDiff < 128) {
+      self->timeForDelayChange = 0;
+    } else {
+      self->timeForDelayChange++;
+    }
+  } else if (delay_difference < 128 && self->knownDelay > 0) {
+    if (self->lastDelayDiff > 384) {
+      self->timeForDelayChange = 0;
+    } else {
+      self->timeForDelayChange++;
+    }
+  } else {
+    self->timeForDelayChange = 0;
+  }
+  self->lastDelayDiff = delay_difference;
+
+  if (self->timeForDelayChange > 25) {
+    self->knownDelay = WEBRTC_SPL_MAX((int) self->filtDelay - 256, 0);
+  }
 }
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/echo_cancellation_internal.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/echo_cancellation_internal.h
@@ -15,18 +15,16 @@
 #include "webrtc/modules/audio_processing/utility/ring_buffer.h"
 
 typedef struct {
   int delayCtr;
   int sampFreq;
   int splitSampFreq;
   int scSampFreq;
   float sampFactor;  // scSampRate / sampFreq
-  short autoOnOff;
-  short activity;
   short skewMode;
   int bufSizeStart;
   int knownDelay;
   int rate_factor;
 
   short initFlag;  // indicates if AEC has been initialized
 
   // Variables used for averaging far end buffer size
@@ -34,17 +32,17 @@ typedef struct {
   int sum;
   short firstVal;
   short checkBufSizeCtr;
 
   // Variables used for delay shifts
   short msInSndCardBuf;
   short filtDelay;  // Filtered delay estimate.
   int timeForDelayChange;
-  int ECstartup;
+  int startup_phase;
   int checkBuffSize;
   short lastDelayDiff;
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
   RingBuffer* far_pre_buf_s16;  // Time domain far-end pre-buffer in int16_t.
   FILE* bufFile;
   FILE* delayFile;
   FILE* skewFile;
@@ -57,12 +55,14 @@ typedef struct {
   int resample;  // if the skew is small enough we don't resample
   int highSkewCtr;
   float skew;
 
   RingBuffer* far_pre_buf;  // Time domain far-end pre-buffer.
 
   int lastError;
 
+  int farend_started;
+
   AecCore* aec;
 } aecpc_t;
 
 #endif  // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_INTERNAL_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
@@ -123,17 +123,17 @@ void SystemDelayTest::RunStableStartup()
   // Process().
   int buffer_size = BufferFillUp();
   // A stable device should be accepted and put in a regular process mode within
   // |kStableConvergenceMs|.
   int process_time_ms = 0;
   for (; process_time_ms < kStableConvergenceMs; process_time_ms += 10) {
     RenderAndCapture(kDeviceBufMs);
     buffer_size += samples_per_frame_;
-    if (self_->ECstartup == 0) {
+    if (self_->startup_phase == 0) {
       // We have left the startup phase.
       break;
     }
   }
   // Verify convergence time.
   EXPECT_GT(kStableConvergenceMs, process_time_ms);
   // Verify that the buffer has been flushed.
   EXPECT_GE(buffer_size, WebRtcAec_system_delay(self_->aec));
@@ -217,17 +217,17 @@ TEST_F(SystemDelayTest, CorrectDelayAfte
     int buffer_offset_ms = 25;
     int reported_delay_ms = 0;
     int process_time_ms = 0;
     for (; process_time_ms <= kMaxConvergenceMs; process_time_ms += 10) {
       reported_delay_ms = kDeviceBufMs + buffer_offset_ms;
       RenderAndCapture(reported_delay_ms);
       buffer_size += samples_per_frame_;
       buffer_offset_ms = -buffer_offset_ms;
-      if (self_->ECstartup == 0) {
+      if (self_->startup_phase == 0) {
         // We have left the startup phase.
         break;
       }
     }
     // Verify convergence time.
     EXPECT_GE(kMaxConvergenceMs, process_time_ms);
     // Verify that the buffer has been flushed.
     EXPECT_GE(buffer_size, WebRtcAec_system_delay(self_->aec));
@@ -263,17 +263,17 @@ TEST_F(SystemDelayTest, CorrectDelayAfte
     // We now have established the required buffer size. Let us verify that we
     // fill up before leaving the startup phase for normal processing.
     int buffer_size = 0;
     int target_buffer_size = kDeviceBufMs * samples_per_frame_ / 10 * 3 / 4;
     process_time_ms = 0;
     for (; process_time_ms <= kMaxConvergenceMs; process_time_ms += 10) {
       RenderAndCapture(kDeviceBufMs);
       buffer_size += samples_per_frame_;
-      if (self_->ECstartup == 0) {
+      if (self_->startup_phase == 0) {
         // We have left the startup phase.
         break;
       }
     }
     // Verify convergence time.
     EXPECT_GT(kMaxConvergenceMs, process_time_ms);
     // Verify that the buffer has reached the desired size.
     EXPECT_LE(target_buffer_size, WebRtcAec_system_delay(self_->aec));
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
@@ -150,16 +150,17 @@
         {
           'target_name': 'audio_processing_sse2',
           'type': 'static_library',
           'sources': [
             'aec/aec_core_sse2.c',
             'aec/aec_rdft_sse2.c',
           ],
           'cflags': ['-msse2',],
+          'cflags_mozilla': [ '-msse2', ],
           'xcode_settings': {
             'OTHER_CFLAGS': ['-msse2',],
           },
         },
       ],
     }],
     ['(target_arch=="arm" and armv7==1) or target_arch=="armv7"', {
       'targets': [{
@@ -173,21 +174,24 @@
           'aecm/aecm_core_neon.c',
           'ns/nsx_core_neon.c',
         ],
         'conditions': [
           ['OS=="android" or OS=="ios"', {
             'dependencies': [
               'audio_processing_offsets',
             ],
-            'sources': [
+	    #
+	    # We disable the ASM source, because our gyp->Makefile translator
+	    # does not support the build steps to get the asm offsets.
+            'sources!': [
               'aecm/aecm_core_neon.S',
               'ns/nsx_core_neon.S',
             ],
-            'sources!': [
+            'sources': [
               'aecm/aecm_core_neon.c',
               'ns/nsx_core_neon.c',
             ],
             'includes!': ['../../build/arm_neon.gypi',],
           }],
         ],
       }],
       'conditions': [
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -8,22 +8,24 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
 
 #include <assert.h>
 #include <string.h>
 
+extern "C" {
+#include "webrtc/modules/audio_processing/aec/aec_core.h"
+}
+#include "webrtc/modules/audio_processing/aec/include/echo_cancellation.h"
 #include "webrtc/modules/audio_processing/audio_buffer.h"
 #include "webrtc/modules/audio_processing/audio_processing_impl.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 
-#include "webrtc/modules/audio_processing/aec/include/echo_cancellation.h"
-
 namespace webrtc {
 
 typedef void Handle;
 
 namespace {
 int16_t MapSetting(EchoCancellation::SuppressionLevel level) {
   switch (level) {
     case EchoCancellation::kLowSuppression:
@@ -64,17 +66,18 @@ EchoCancellationImpl::EchoCancellationIm
     apm_(apm),
     drift_compensation_enabled_(false),
     metrics_enabled_(false),
     suppression_level_(kModerateSuppression),
     device_sample_rate_hz_(48000),
     stream_drift_samples_(0),
     was_stream_drift_set_(false),
     stream_has_echo_(false),
-    delay_logging_enabled_(false) {}
+    delay_logging_enabled_(false),
+    delay_correction_enabled_(true) {}
 
 EchoCancellationImpl::~EchoCancellationImpl() {}
 
 int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
   if (!is_component_enabled()) {
     return apm_->kNoError;
   }
 
@@ -333,16 +336,23 @@ int EchoCancellationImpl::Initialize() {
     return err;
   }
 
   was_stream_drift_set_ = false;
 
   return apm_->kNoError;
 }
 
+#if 0
+void EchoCancellationImpl::SetExtraOptions(const Config& config) {
+  delay_correction_enabled_ = config.Get<DelayCorrection>().enabled;
+  Configure();
+}
+#endif
+
 void* EchoCancellationImpl::CreateHandle() const {
   Handle* handle = NULL;
   if (WebRtcAec_Create(&handle) != apm_->kNoError) {
     handle = NULL;
   } else {
     assert(handle != NULL);
   }
 
@@ -364,16 +374,18 @@ int EchoCancellationImpl::InitializeHand
 int EchoCancellationImpl::ConfigureHandle(void* handle) const {
   assert(handle != NULL);
   AecConfig config;
   config.metricsMode = metrics_enabled_;
   config.nlpMode = MapSetting(suppression_level_);
   config.skewMode = drift_compensation_enabled_;
   config.delay_logging = delay_logging_enabled_;
 
+  WebRtcAec_enable_delay_correction(WebRtcAec_aec_core(
+      static_cast<Handle*>(handle)), delay_correction_enabled_ ? 1 : 0);
   return WebRtcAec_set_config(static_cast<Handle*>(handle), config);
 }
 
 int EchoCancellationImpl::num_handles_required() const {
   return apm_->num_output_channels() *
          apm_->num_reverse_channels();
 }
 
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.h
@@ -9,16 +9,39 @@
  */
 
 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
 #define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
 
 #include "webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h"
 
 namespace webrtc {
+// Use to enable the delay correction feature. This now engages an extended
+// filter mode in the AEC, along with robustness measures around the reported
+// system delays. It comes with a significant increase in AEC complexity, but is
+// much more robust to unreliable reported delays.
+//
+// Detailed changes to the algorithm:
+// - The filter length is changed from 48 to 128 ms. This comes with tuning of
+//   several parameters: i) filter adaptation stepsize and error threshold;
+//   ii) non-linear processing smoothing and overdrive.
+// - Option to ignore the reported delays on platforms which we deem
+//   sufficiently unreliable. See WEBRTC_UNTRUSTED_DELAY in echo_cancellation.c.
+// - Faster startup times by removing the excessive "startup phase" processing
+//   of reported delays.
+// - Much more conservative adjustments to the far-end read pointer. We smooth
+//   the delay difference more heavily, and back off from the difference more.
+//   Adjustments force a readaptation of the filter, so they should be avoided
+//   except when really necessary.
+struct DelayCorrection {
+  DelayCorrection() : enabled(false) {}
+  DelayCorrection(bool enabled) : enabled(enabled) {}
+
+  bool enabled;
+};
 
 class AudioProcessingImpl;
 class AudioBuffer;
 
 class EchoCancellationImpl : public EchoCancellationImplWrapper {
  public:
   explicit EchoCancellationImpl(const AudioProcessingImpl* apm);
   virtual ~EchoCancellationImpl();
@@ -29,16 +52,17 @@ class EchoCancellationImpl : public Echo
 
   // EchoCancellation implementation.
   virtual bool is_enabled() const OVERRIDE;
   virtual int device_sample_rate_hz() const OVERRIDE;
   virtual int stream_drift_samples() const OVERRIDE;
 
   // ProcessingComponent implementation.
   virtual int Initialize() OVERRIDE;
+  //  virtual void SetExtraOptions(const Config& config) OVERRIDE;
 
  private:
   // EchoCancellation implementation.
   virtual int Enable(bool enable) OVERRIDE;
   virtual int enable_drift_compensation(bool enable) OVERRIDE;
   virtual bool is_drift_compensation_enabled() const OVERRIDE;
   virtual int set_device_sample_rate_hz(int rate) OVERRIDE;
   virtual void set_stream_drift_samples(int drift) OVERRIDE;
@@ -65,13 +89,14 @@ class EchoCancellationImpl : public Echo
   bool drift_compensation_enabled_;
   bool metrics_enabled_;
   SuppressionLevel suppression_level_;
   int device_sample_rate_hz_;
   int stream_drift_samples_;
   bool was_stream_drift_set_;
   bool stream_has_echo_;
   bool delay_logging_enabled_;
+  bool delay_correction_enabled_;
 };
 
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+extern "C" {
+#include "webrtc/modules/audio_processing/aec/aec_core.h"
+}
+#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+
+TEST(EchoCancellationInternalTest, DelayCorrection) {
+  scoped_ptr<AudioProcessing> ap(AudioProcessing::Create(0));
+  EXPECT_TRUE(ap->echo_cancellation()->aec_core() == NULL);
+
+  EXPECT_EQ(ap->kNoError, ap->echo_cancellation()->Enable(true));
+  EXPECT_TRUE(ap->echo_cancellation()->is_enabled());
+
+  AecCore* aec_core = ap->echo_cancellation()->aec_core();
+  ASSERT_TRUE(aec_core != NULL);
+  // Disabled by default.
+  EXPECT_EQ(0, WebRtcAec_delay_correction_enabled(aec_core));
+
+  Config config;
+  config.Set<DelayCorrection>(new DelayCorrection(true));
+  ap->SetExtraOptions(config);
+  EXPECT_EQ(1, WebRtcAec_delay_correction_enabled(aec_core));
+
+  // Retains setting after initialization.
+  EXPECT_EQ(ap->kNoError, ap->Initialize());
+  EXPECT_EQ(1, WebRtcAec_delay_correction_enabled(aec_core));
+
+  config.Set<DelayCorrection>(new DelayCorrection(false));
+  ap->SetExtraOptions(config);
+  EXPECT_EQ(0, WebRtcAec_delay_correction_enabled(aec_core));
+
+  // Retains setting after initialization.
+  EXPECT_EQ(ap->kNoError, ap->Initialize());
+  EXPECT_EQ(0, WebRtcAec_delay_correction_enabled(aec_core));
+}
+
+}  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
@@ -604,23 +604,23 @@ int32_t ModuleFileUtility::ReadWavHeader
     // Calculate the number of bytes that 10 ms of audio data correspond to.
     if(_wavFormatObj.formatTag == kWaveFormatPcm)
     {
         // TODO (hellner): integer division for 22050 and 11025 would yield
         //                 the same result as the else statement. Remove those
         //                 special cases?
         if(_wavFormatObj.nSamplesPerSec == 44100)
         {
-            _readSizeBytes = 440 * _wavFormatObj.nChannels *
+            _readSizeBytes = 441 * _wavFormatObj.nChannels *
                 (_wavFormatObj.nBitsPerSample / 8);
         } else if(_wavFormatObj.nSamplesPerSec == 22050) {
-            _readSizeBytes = 220 * _wavFormatObj.nChannels *
+            _readSizeBytes = 220 * _wavFormatObj.nChannels * // XXX inexact!
                 (_wavFormatObj.nBitsPerSample / 8);
         } else if(_wavFormatObj.nSamplesPerSec == 11025) {
-            _readSizeBytes = 110 * _wavFormatObj.nChannels *
+            _readSizeBytes = 110 * _wavFormatObj.nChannels * // XXX inexact!
                 (_wavFormatObj.nBitsPerSample / 8);
         } else {
             _readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
               _wavFormatObj.nChannels * (_wavFormatObj.nBitsPerSample / 8);
         }
 
     } else {
         _readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
@@ -672,32 +672,32 @@ int32_t ModuleFileUtility::InitWavCodec(
             _codecId = kCodecL16_32Khz;
         }
         // Set the packet size for "odd" sampling frequencies so that it
         // properly corresponds to _readSizeBytes.
         else if(samplesPerSec == 11025)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 110;
-            codec_info_.plfreq = 11000;
+            codec_info_.pacsize = 110; // XXX inexact!
+            codec_info_.plfreq = 11000; // XXX inexact!
         }
         else if(samplesPerSec == 22050)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 220;
-            codec_info_.plfreq = 22000;
+            codec_info_.pacsize = 220; // XXX inexact!
+            codec_info_.plfreq = 22000; // XXX inexact!
         }
         else if(samplesPerSec == 44100)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 440;
-            codec_info_.plfreq = 44000;
+            codec_info_.pacsize = 441;
+            codec_info_.plfreq = 44100;
         }
         else if(samplesPerSec == 48000)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
             codec_info_.pacsize = 480;
             codec_info_.plfreq = 48000;
         }
@@ -1120,18 +1120,16 @@ int32_t ModuleFileUtility::WriteWavHeade
     const uint32_t freq,
     const uint32_t bytesPerSample,
     const uint32_t channels,
     const uint32_t format,
     const uint32_t lengthInBytes)
 {
 
     // Frame size in bytes for 10 ms of audio.
-    // TODO (hellner): 44.1 kHz has 440 samples frame size. Doesn't seem to
-    //                 be taken into consideration here!
     int32_t frameSize = (freq / 100) * bytesPerSample * channels;
 
     // Calculate the number of full frames that the wave file contain.
     const int32_t dataLengthInBytes = frameSize *
         (lengthInBytes / frameSize);
 
     int8_t tmpStr[4];
     int8_t tmpChar;
--- a/media/webrtc/trunk/webrtc/modules/modules.gyp
+++ b/media/webrtc/trunk/webrtc/modules/modules.gyp
@@ -140,16 +140,17 @@
             'audio_coding/neteq4/mock/mock_delay_peak_detector.h',
             'audio_coding/neteq4/mock/mock_dtmf_buffer.h',
             'audio_coding/neteq4/mock/mock_dtmf_tone_generator.h',
             'audio_coding/neteq4/mock/mock_external_decoder_pcm16b.h',
             'audio_coding/neteq4/mock/mock_packet_buffer.h',
             'audio_coding/neteq4/mock/mock_payload_splitter.h',
             'audio_processing/aec/system_delay_unittest.cc',
             'audio_processing/aec/echo_cancellation_unittest.cc',
+            'audio_processing/echo_cancellation_impl_unittest.cc',
             'audio_processing/test/audio_processing_unittest.cc',
             'audio_processing/utility/delay_estimator_unittest.cc',
             'audio_processing/utility/ring_buffer_unittest.cc',
             'bitrate_controller/bitrate_controller_unittest.cc',
             'desktop_capture/desktop_region_unittest.cc',
             'desktop_capture/differ_block_unittest.cc',
             'desktop_capture/differ_unittest.cc',
             'desktop_capture/screen_capturer_helper_unittest.cc',
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/forward_error_correction.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/forward_error_correction.cc
@@ -6,16 +6,17 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
 
 #include <assert.h>
+#include <cstdlib> // for abs()
 #include <string.h>
 
 #include <algorithm>
 #include <iterator>
 
 #include "webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.h"
 #include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
 #include "webrtc/system_wrappers/interface/trace.h"
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
@@ -16,17 +16,17 @@
 
 #if defined(_WIN32)
 // Order for these headers are important
 #include <Windows.h>  // FILETIME
 
 #include <WinSock.h>  // timeval
 
 #include <MMSystem.h>  // timeGetTime
-#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_MAC))
+#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_BSD) || (defined WEBRTC_MAC))
 #include <sys/time.h>  // gettimeofday
 #include <time.h>
 #endif
 #if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400))
 #include <stdio.h>
 #endif
 
 #include "webrtc/system_wrappers/interface/tick_util.h"
@@ -91,19 +91,19 @@ uint32_t GetCurrentRTP(Clock* clock, uin
   local_clock->CurrentNtp(secs, frac);
   if (use_global_clock) {
     delete local_clock;
   }
   return ConvertNTPTimeToRTP(secs, frac, freq);
 }
 
 uint32_t ConvertNTPTimeToRTP(uint32_t NTPsec, uint32_t NTPfrac, uint32_t freq) {
-  float ftemp = (float)NTPfrac / (float)NTP_FRAC;
+  float ftemp = (float)NTPfrac / (float)NTP_FRAC; 
   uint32_t tmp = (uint32_t)(ftemp * freq);
-  return NTPsec * freq + tmp;
+ return NTPsec * freq + tmp;
 }
 
 uint32_t ConvertNTPTimeToMS(uint32_t NTPsec, uint32_t NTPfrac) {
   int freq = 1000;
   float ftemp = (float)NTPfrac / (float)NTP_FRAC;
   uint32_t tmp = (uint32_t)(ftemp * freq);
   uint32_t MStime = NTPsec * freq + tmp;
   return MStime;
@@ -113,17 +113,17 @@ uint32_t ConvertNTPTimeToMS(uint32_t NTP
  * Misc utility routines
  */
 
 #if defined(_WIN32)
 bool StringCompare(const char* str1, const char* str2,
                    const uint32_t length) {
   return (_strnicmp(str1, str2, length) == 0) ? true : false;
 }
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 bool StringCompare(const char* str1, const char* str2,
                    const uint32_t length) {
   return (strncasecmp(str1, str2, length) == 0) ? true : false;
 }
 #endif
 
 #if !defined(WEBRTC_LITTLE_ENDIAN) && !defined(WEBRTC_BIG_ENDIAN)
 #error Either WEBRTC_LITTLE_ENDIAN or WEBRTC_BIG_ENDIAN must be defined
@@ -153,17 +153,17 @@ void AssignUWord24ToBuffer(uint8_t* data
 #else
   dataBuffer[0] = static_cast<uint8_t>(value);
   dataBuffer[1] = static_cast<uint8_t>(value >> 8);
   dataBuffer[2] = static_cast<uint8_t>(value >> 16);
 #endif
 }
 
 void AssignUWord16ToBuffer(uint8_t* dataBuffer, uint16_t value) {
-#if defined(WEBRTC_LITTLE_ENDIAN)
+#if defined(WEBRTC_LITTLE_ENDIAN) 
   dataBuffer[0] = static_cast<uint8_t>(value >> 8);
   dataBuffer[1] = static_cast<uint8_t>(value);
 #else
   uint16_t* ptr = reinterpret_cast<uint16_t*>(dataBuffer);
   ptr[0] = value;
 #endif
 }
 
--- a/media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
@@ -82,17 +82,17 @@ int32_t FilePlayerImpl::Frequency() cons
     if(_codec.plfreq == 11000)
     {
         return 16000;
     }
     else if(_codec.plfreq == 22000)
     {
         return 32000;
     }
-    else if(_codec.plfreq == 44000)
+    else if(_codec.plfreq == 44100 || _codec.plfreq == 44000 ) // XXX just 44100?
     {
         return 32000;
     }
     else if(_codec.plfreq == 48000)
     {
         return 32000;
     }
     else
--- a/media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
@@ -14,17 +14,17 @@
 #include <stdio.h>
 
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 #if defined(_WIN32)
 #include <Windows.h>
 #include <mmsystem.h>
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC) || defined(WEBRTC_BSD)
 #include <string.h>
 #include <sys/time.h>
 #include <time.h>
 #endif
 
 #if (defined(_DEBUG) && defined(_WIN32))
 #define DEBUG_PRINT(expr)   OutputDebugString(##expr)
 #define DEBUG_PRINTP(expr, p)   \
@@ -232,17 +232,17 @@ bool RtpDumpImpl::RTCP(const uint8_t* pa
     return is_rtcp;
 }
 
 // TODO (hellner): why is TickUtil not used here?
 inline uint32_t RtpDumpImpl::GetTimeInMS() const
 {
 #if defined(_WIN32)
     return timeGetTime();
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
     struct timeval tv;
     struct timezone tz;
     unsigned long val;
 
     gettimeofday(&tv, &tz);
     val = tv.tv_sec * 1000 + tv.tv_usec / 1000;
     return val;
 #else
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.cc
@@ -11,16 +11,18 @@
 #include "webrtc/modules/video_capture/android/device_info_android.h"
 
 #include <stdio.h>
 
 #include "webrtc/modules/video_capture/android/video_capture_android.h"
 #include "webrtc/system_wrappers/interface/ref_count.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+#include "AndroidJNIWrapper.h"
+
 namespace webrtc
 {
 
 namespace videocapturemodule
 {
 
 static jclass g_capabilityClass = NULL;
 
@@ -47,67 +49,60 @@ DeviceInfoAndroid::DeviceInfoAndroid(con
 int32_t DeviceInfoAndroid::Init() {
   return 0;
 }
 
 DeviceInfoAndroid::~DeviceInfoAndroid() {
 }
 
 uint32_t DeviceInfoAndroid::NumberOfDevices() {
-  JNIEnv *env;
-  jclass javaCmDevInfoClass;
-  jobject javaCmDevInfoObject;
-  bool attached = false;
-  if (VideoCaptureAndroid::AttachAndUseAndroidDeviceInfoObjects(
-          env,
-          javaCmDevInfoClass,
-          javaCmDevInfoObject,
-          attached) != 0)
-    return 0;
+  AutoLocalJNIFrame jniFrame;
+  JNIEnv* env = jniFrame.GetEnv();
+  if (!env)
+      return 0;
+
+  jclass javaCmDevInfoClass = jniFrame.GetCmDevInfoClass();
+  jobject javaCmDevInfoObject = jniFrame.GetCmDevInfoObject();
 
   WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, _id,
                "%s GetMethodId", __FUNCTION__);
   // get the method ID for the Android Java GetDeviceUniqueName name.
   jmethodID cid = env->GetMethodID(javaCmDevInfoClass,
                                    "NumberOfDevices",
                                    "()I");
 
   jint numberOfDevices = 0;
   if (cid != NULL) {
     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, _id,
                  "%s Calling Number of devices", __FUNCTION__);
     numberOfDevices = env->CallIntMethod(javaCmDevInfoObject, cid);
   }
-  VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
 
   if (numberOfDevices > 0)
     return numberOfDevices;
   return 0;
 }
 
 int32_t DeviceInfoAndroid::GetDeviceName(
     uint32_t deviceNumber,
     char* deviceNameUTF8,
     uint32_t deviceNameLength,
     char* deviceUniqueIdUTF8,
     uint32_t deviceUniqueIdUTF8Length,
     char* /*productUniqueIdUTF8*/,
     uint32_t /*productUniqueIdUTF8Length*/) {
 
-  JNIEnv *env;
-  jclass javaCmDevInfoClass;
-  jobject javaCmDevInfoObject;
   int32_t result = 0;
-  bool attached = false;
-  if (VideoCaptureAndroid::AttachAndUseAndroidDeviceInfoObjects(
-          env,
-          javaCmDevInfoClass,
-          javaCmDevInfoObject,
-          attached)!= 0)
-    return -1;
+  AutoLocalJNIFrame jniFrame;
+  JNIEnv* env = jniFrame.GetEnv();
+  if (!env)
+      return -1;
+
+  jclass javaCmDevInfoClass = jniFrame.GetCmDevInfoClass();
+  jobject javaCmDevInfoObject = jniFrame.GetCmDevInfoObject();
 
   // get the method ID for the Android Java GetDeviceUniqueName name.
   jmethodID cid = env->GetMethodID(javaCmDevInfoClass, "GetDeviceUniqueName",
                                    "(I)Ljava/lang/String;");
   if (cid != NULL) {
     jobject javaDeviceNameObj = env->CallObjectMethod(javaCmDevInfoObject,
                                                       cid, deviceNumber);
     if (javaDeviceNameObj == NULL) {
@@ -146,91 +141,81 @@ int32_t DeviceInfoAndroid::GetDeviceName
   }
   else {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                  "%s: Failed to find GetDeviceUniqueName function id",
                  __FUNCTION__);
     result = -1;
   }
 
-  VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
-
   WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
                "%s: result %d", __FUNCTION__, (int) result);
   return result;
 
 }
 
 int32_t DeviceInfoAndroid::CreateCapabilityMap(
     const char* deviceUniqueIdUTF8) {
   for (std::map<int, VideoCaptureCapability*>::iterator it =
            _captureCapabilities.begin();
        it != _captureCapabilities.end();
        ++it)
     delete it->second;
   _captureCapabilities.clear();
 
-  JNIEnv *env;
-  jclass javaCmDevInfoClass;
-  jobject javaCmDevInfoObject;
-  bool attached = false;
-  if (VideoCaptureAndroid::AttachAndUseAndroidDeviceInfoObjects(
-          env,
-          javaCmDevInfoClass,
-          javaCmDevInfoObject,
-          attached) != 0)
-    return -1;
+  AutoLocalJNIFrame jniFrame;
+  JNIEnv* env = jniFrame.GetEnv();
+  if (!env)
+      return -1;
+
+  jclass javaCmDevInfoClass = jniFrame.GetCmDevInfoClass();
+  jobject javaCmDevInfoObject = jniFrame.GetCmDevInfoObject();
 
   // Find the capability class
-  jclass javaCapClass = g_capabilityClass;
+  jclass javaCapClass = jsjni_GetGlobalClassRef(AndroidJavaCaptureCapabilityClass);
   if (javaCapClass == NULL) {
-    VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: SetAndroidCaptureClasses must be called first!",
                  __FUNCTION__);
     return -1;
   }
 
   // get the method ID for the Android Java GetCapabilityArray .
   jmethodID cid = env->GetMethodID(
       javaCmDevInfoClass,
       "GetCapabilityArray",
       "(Ljava/lang/String;)[Lorg/webrtc/videoengine/CaptureCapabilityAndroid;");
   if (cid == NULL) {
-    VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: Can't find method GetCapabilityArray.", __FUNCTION__);
     return -1;
   }
   // Create a jstring so we can pass the deviceUniquName to the java method.
   jstring capureIdString = env->NewStringUTF((char*) deviceUniqueIdUTF8);
 
   if (capureIdString == NULL) {
-    VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: Can't create string for  method GetCapabilityArray.",
                  __FUNCTION__);
     return -1;
   }
   // Call the java class and get an array with capabilities back.
   jobject javaCapabilitiesObj = env->CallObjectMethod(javaCmDevInfoObject,
                                                       cid, capureIdString);
   if (!javaCapabilitiesObj) {
-    VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: Failed to call java GetCapabilityArray.",
                  __FUNCTION__);
     return -1;
   }
 
   jfieldID widthField = env->GetFieldID(javaCapClass, "width", "I");
   jfieldID heigtField = env->GetFieldID(javaCapClass, "height", "I");
   jfieldID maxFpsField = env->GetFieldID(javaCapClass, "maxFPS", "I");
   if (widthField == NULL || heigtField == NULL || maxFpsField == NULL) {
-    VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: Failed to get field Id.", __FUNCTION__);
     return -1;
   }
 
   const jsize numberOfCapabilities =
       env->GetArrayLength((jarray) javaCapabilitiesObj);
 
@@ -253,59 +238,54 @@ int32_t DeviceInfoAndroid::CreateCapabil
 
   _lastUsedDeviceNameLength = strlen((char*) deviceUniqueIdUTF8);
   _lastUsedDeviceName = (char*) realloc(_lastUsedDeviceName,
                                         _lastUsedDeviceNameLength + 1);
   memcpy(_lastUsedDeviceName,
          deviceUniqueIdUTF8,
          _lastUsedDeviceNameLength + 1);
 
-  VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
+  env->DeleteGlobalRef(javaCapClass);
+
   WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCapture, _id,
                "CreateCapabilityMap %d", _captureCapabilities.size());
 
   return _captureCapabilities.size();
 }
 
 int32_t DeviceInfoAndroid::GetOrientation(
     const char* deviceUniqueIdUTF8,
     VideoCaptureRotation& orientation) {
-  JNIEnv *env;
-  jclass javaCmDevInfoClass;
-  jobject javaCmDevInfoObject;
-  bool attached = false;
-  if (VideoCaptureAndroid::AttachAndUseAndroidDeviceInfoObjects(
-          env,
-          javaCmDevInfoClass,
-          javaCmDevInfoObject,
-          attached) != 0)
-    return -1;
+  AutoLocalJNIFrame jniFrame;
+  JNIEnv* env = jniFrame.GetEnv();
+  if (!env)
+      return -1;
+
+  jclass javaCmDevInfoClass = jniFrame.GetCmDevInfoClass();
+  jobject javaCmDevInfoObject = jniFrame.GetCmDevInfoObject();
 
   // get the method ID for the Android Java GetOrientation .
   jmethodID cid = env->GetMethodID(javaCmDevInfoClass, "GetOrientation",
                                    "(Ljava/lang/String;)I");
   if (cid == NULL) {
-    VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: Can't find method GetOrientation.", __FUNCTION__);
     return -1;
   }
   // Create a jstring so we can pass the deviceUniquName to the java method.
   jstring capureIdString = env->NewStringUTF((char*) deviceUniqueIdUTF8);
   if (capureIdString == NULL) {
-    VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: Can't create string for  method GetCapabilityArray.",
                  __FUNCTION__);
     return -1;
   }
   // Call the java class and get the orientation.
   jint jorientation = env->CallIntMethod(javaCmDevInfoObject, cid,
                                          capureIdString);
-  VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
 
   int32_t retValue = 0;
   switch (jorientation) {
     case -1: // Error
       orientation = kCameraRotate0;
       retValue = -1;
       break;
     case 0:
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
@@ -11,16 +11,19 @@
 #ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
 #define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
 
 #include <jni.h>
 
 #include "webrtc/modules/video_capture/device_info_impl.h"
 #include "webrtc/modules/video_capture/video_capture_impl.h"
 
+#define AndroidJavaCaptureDeviceInfoClass "org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid"
+#define AndroidJavaCaptureCapabilityClass "org/webrtc/videoengine/CaptureCapabilityAndroid"
+
 namespace webrtc
 {
 namespace videocapturemodule
 {
 
 // Android logging, uncomment to print trace to
 // logcat instead of trace file/callback
 // #include <android/log.h>
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java
@@ -20,248 +20,385 @@ import org.webrtc.videoengine.VideoCaptu
 import android.graphics.ImageFormat;
 import android.graphics.PixelFormat;
 import android.graphics.Rect;
 import android.graphics.SurfaceTexture;
 import android.graphics.YuvImage;
 import android.hardware.Camera;
 import android.hardware.Camera.PreviewCallback;
 import android.util.Log;
+import android.view.Surface;
 import android.view.SurfaceHolder;
 import android.view.SurfaceHolder.Callback;
+import android.view.SurfaceView;
+import android.view.TextureView;
+import android.view.TextureView.SurfaceTextureListener;
+import android.view.View;
+
+import org.mozilla.gecko.GeckoApp;
+import org.mozilla.gecko.GeckoAppShell;
+import org.mozilla.gecko.GeckoAppShell.AppStateListener;
+import org.mozilla.gecko.util.ThreadUtils;
 
 public class VideoCaptureAndroid implements PreviewCallback, Callback {
 
     private final static String TAG = "WEBRTC-JC";
 
     private Camera camera;
+    private int cameraId;
     private AndroidVideoCaptureDevice currentDevice = null;
     public ReentrantLock previewBufferLock = new ReentrantLock();
     // This lock takes sync with StartCapture and SurfaceChanged
     private ReentrantLock captureLock = new ReentrantLock();
     private int PIXEL_FORMAT = ImageFormat.NV21;
     PixelFormat pixelFormat = new PixelFormat();
     // True when the C++ layer has ordered the camera to be started.
     private boolean isCaptureStarted = false;
     private boolean isCaptureRunning = false;
     private boolean isSurfaceReady = false;
+    private SurfaceHolder surfaceHolder = null;
+    private SurfaceTexture surfaceTexture = null;
+    private SurfaceTexture dummySurfaceTexture = null;
 
     private final int numCaptureBuffers = 3;
     private int expectedFrameSize = 0;
     private int orientation = 0;
     private int id = 0;
     // C++ callback context variable.
     private long context = 0;
     private SurfaceHolder localPreview = null;
-    private SurfaceTexture dummySurfaceTexture = null;
     // True if this class owns the preview video buffers.
     private boolean ownsBuffers = false;
 
     private int mCaptureWidth = -1;
     private int mCaptureHeight = -1;
     private int mCaptureFPS = -1;
 
+    private int mCaptureRotation = 0;
+
+    private AppStateListener mAppStateListener = null;
+
+    public class MySurfaceTextureListener implements TextureView.SurfaceTextureListener {
+        public void onSurfaceTextureAvailable(SurfaceTexture surface, int width, int height) {
+            Log.d(TAG, "VideoCaptureAndroid::onSurfaceTextureAvailable");
+
+            captureLock.lock();
+            isSurfaceReady = true;
+            surfaceTexture = surface;
+
+            tryStartCapture(mCaptureWidth, mCaptureHeight, mCaptureFPS);
+            captureLock.unlock();
+        }
+
+        public void onSurfaceTextureSizeChanged(SurfaceTexture surface,
+                                                int width, int height) {
+            // Ignored, Camera does all the work for us
+            // Note that for a TextureView we start on onSurfaceTextureAvailable,
+            // for a SurfaceView we start on surfaceChanged. TextureView
+            // will not give out an onSurfaceTextureSizeChanged during creation.
+        }
+
+        public boolean onSurfaceTextureDestroyed(SurfaceTexture surface) {
+            Log.d(TAG, "VideoCaptureAndroid::onSurfaceTextureDestroyed");
+            isSurfaceReady = false;
+            DetachCamera();
+            return true;
+        }
+
+        public void onSurfaceTextureUpdated(SurfaceTexture surface) {
+            // Invoked every time there's a new Camera preview frame
+        }
+    }
     public static
     void DeleteVideoCaptureAndroid(VideoCaptureAndroid captureAndroid) {
         Log.d(TAG, "DeleteVideoCaptureAndroid");
-        if (captureAndroid.camera == null) {
-            return;
-        }
+
+        GeckoAppShell.getGeckoInterface().removeAppStateListener(captureAndroid.mAppStateListener);
 
         captureAndroid.StopCapture();
-        captureAndroid.camera.release();
-        captureAndroid.camera = null;
+        if (captureAndroid.camera != null) {
+            captureAndroid.camera.release();
+            captureAndroid.camera = null;
+        }
         captureAndroid.context = 0;
+
+        View cameraView = GeckoAppShell.getGeckoInterface().getCameraView();
+        if (cameraView instanceof SurfaceView) {
+            ((SurfaceView)cameraView).getHolder().removeCallback(captureAndroid);
+        } else if (cameraView instanceof TextureView) {
+            // No need to explicitly remove the Listener:
+            // i.e. ((SurfaceView)cameraView).setSurfaceTextureListener(null);
+        }
+        ThreadUtils.getUiHandler().post(new Runnable() {
+            @Override
+            public void run() {
+                try {
+                    GeckoAppShell.getGeckoInterface().disableCameraView();
+                } catch (Exception e) {
+                    Log.e(TAG,
+                          "VideoCaptureAndroid disableCameraView exception: " +
+                          e.getLocalizedMessage());
+                }
+           }
+        });
     }
 
     public VideoCaptureAndroid(int in_id, long in_context, Camera in_camera,
-            AndroidVideoCaptureDevice in_device) {
+                               AndroidVideoCaptureDevice in_device,
+                               int in_cameraId) {
         id = in_id;
         context = in_context;
         camera = in_camera;
+        cameraId = in_cameraId;
         currentDevice = in_device;
+        mCaptureRotation = GetRotateAmount();
+
+        try {
+            View cameraView = GeckoAppShell.getGeckoInterface().getCameraView();
+            if (cameraView instanceof SurfaceView) {
+                ((SurfaceView)cameraView).getHolder().addCallback(this);
+            } else if (cameraView instanceof TextureView) {
+                MySurfaceTextureListener listener = new MySurfaceTextureListener();
+                ((TextureView)cameraView).setSurfaceTextureListener(listener);
+            }
+            ThreadUtils.getUiHandler().post(new Runnable() {
+                @Override
+                public void run() {
+                    try {
+                        GeckoAppShell.getGeckoInterface().enableCameraView();
+                    } catch (Exception e) {
+                        Log.e(TAG,
+                              "VideoCaptureAndroid enableCameraView exception: "
+                               + e.getLocalizedMessage());
+                    }
+                }
+            });
+        } catch (Exception ex) {
+            Log.e(TAG, "VideoCaptureAndroid constructor exception: " +
+                  ex.getLocalizedMessage());
+        }
+
+        mAppStateListener = new AppStateListener() {
+            @Override
+            public void onPause() {
+                StopCapture();
+                if (camera != null) {
+                    camera.release();
+                   camera = null;
+                }
+            }
+            @Override
+            public void onResume() {
+                try {
+                    if(android.os.Build.VERSION.SDK_INT>8) {
+                        camera = Camera.open(cameraId);
+                    } else {
+                        camera = Camera.open();
+                    }
+                } catch (Exception ex) {
+                    Log.e(TAG, "Error reopening to the camera: " + ex.getMessage());
+                }
+                captureLock.lock();
+                isCaptureStarted = true;
+                tryStartCapture(mCaptureWidth, mCaptureHeight, mCaptureFPS);
+                captureLock.unlock();
+            }
+            @Override
+            public void onOrientationChanged() {
+                mCaptureRotation = GetRotateAmount();
+            }
+        };
+
+        GeckoAppShell.getGeckoInterface().addAppStateListener(mAppStateListener);
+    }
+
+    public int GetRotateAmount() {
+        int rotation = GeckoAppShell.getGeckoInterface().getActivity().getWindowManager().getDefaultDisplay().getRotation();
+        int degrees = 0;
+        switch (rotation) {
+            case Surface.ROTATION_0: degrees = 0; break;
+            case Surface.ROTATION_90: degrees = 90; break;
+            case Surface.ROTATION_180: degrees = 180; break;
+            case Surface.ROTATION_270: degrees = 270; break;
+        }
+        if(android.os.Build.VERSION.SDK_INT>8) {
+            android.hardware.Camera.CameraInfo info =
+                new android.hardware.Camera.CameraInfo();
+            android.hardware.Camera.getCameraInfo(cameraId, info);
+            int result;
+            if (info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
+                result = (info.orientation + degrees) % 360;
+            } else {  // back-facing
+                result = (info.orientation - degrees + 360) % 360;
+            }
+            return result;
+        } else {
+            // Assume 90deg orientation for Froyo devices.
+            // Only back-facing cameras are supported in Froyo.
+            int orientation = 90;
+            int result = (orientation - degrees + 360) % 360;
+            return result;
+        }
     }
 
     private int tryStartCapture(int width, int height, int frameRate) {
         if (camera == null) {
             Log.e(TAG, "Camera not initialized %d" + id);
             return -1;
         }
 
-        Log.d(TAG, "tryStartCapture: " + width +
-            "x" + height +", frameRate: " + frameRate +
-            ", isCaptureRunning: " + isCaptureRunning +
-            ", isSurfaceReady: " + isSurfaceReady +
-            ", isCaptureStarted: " + isCaptureStarted);
+        Log.d(TAG, "tryStartCapture " + width +
+                " height " + height +" frame rate " + frameRate +
+                " isCaptureRunning " + isCaptureRunning +
+                " isSurfaceReady " + isSurfaceReady +
+                " isCaptureStarted " + isCaptureStarted);
 
-        if (isCaptureRunning || !isCaptureStarted) {
+        if (isCaptureRunning || !isSurfaceReady || !isCaptureStarted) {
             return 0;
         }
 
-        CaptureCapabilityAndroid currentCapability =
-                new CaptureCapabilityAndroid();
-        currentCapability.width = width;
-        currentCapability.height = height;
-        currentCapability.maxFPS = frameRate;
-        PixelFormat.getPixelFormatInfo(PIXEL_FORMAT, pixelFormat);
+        try {
+            if (surfaceHolder != null)
+                camera.setPreviewDisplay(surfaceHolder);
+            if (surfaceTexture != null)
+                camera.setPreviewTexture(surfaceTexture);
+            if (surfaceHolder == null && surfaceTexture == null) {
+                // No local renderer.  Camera won't capture without
+                // setPreview{Texture,Display}, so we create a dummy SurfaceTexture
+                // and hand it over to Camera, but never listen for frame-ready
+                // callbacks, and never call updateTexImage on it.
+                try {
+                    dummySurfaceTexture = new SurfaceTexture(42);
+                    camera.setPreviewTexture(dummySurfaceTexture);
+                } catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+
+            CaptureCapabilityAndroid currentCapability =
+                    new CaptureCapabilityAndroid();
+            currentCapability.width = width;
+            currentCapability.height = height;
+            currentCapability.maxFPS = frameRate;
+            PixelFormat.getPixelFormatInfo(PIXEL_FORMAT, pixelFormat);
+
 
-        Camera.Parameters parameters = camera.getParameters();
-        parameters.setPreviewSize(currentCapability.width,
-                currentCapability.height);
-        parameters.setPreviewFormat(PIXEL_FORMAT);
-        parameters.setPreviewFrameRate(currentCapability.maxFPS);
-        try {
+            Camera.Parameters parameters = camera.getParameters();
+            parameters.setPreviewSize(currentCapability.width,
+                    currentCapability.height);
+            parameters.setPreviewFormat(PIXEL_FORMAT);
+            parameters.setPreviewFrameRate(currentCapability.maxFPS);
             camera.setParameters(parameters);
-        } catch (RuntimeException e) {
-            Log.e(TAG, "setParameters failed", e);
+
+            int bufSize = width * height * pixelFormat.bitsPerPixel / 8;
+            byte[] buffer = null;
+            for (int i = 0; i < numCaptureBuffers; i++) {
+                buffer = new byte[bufSize];
+                camera.addCallbackBuffer(buffer);
+            }
+            camera.setPreviewCallbackWithBuffer(this);
+            ownsBuffers = true;
+
+            camera.startPreview();
+            previewBufferLock.lock();
+            expectedFrameSize = bufSize;
+            isCaptureRunning = true;
+            previewBufferLock.unlock();
+
+        }
+        catch (Exception ex) {
+            Log.e(TAG, "Failed to start camera: " + ex.getMessage());
             return -1;
         }
 
-        int bufSize = width * height * pixelFormat.bitsPerPixel / 8;
-        byte[] buffer = null;
-        for (int i = 0; i < numCaptureBuffers; i++) {
-            buffer = new byte[bufSize];
-            camera.addCallbackBuffer(buffer);
-        }
-        camera.setPreviewCallbackWithBuffer(this);
-        ownsBuffers = true;
-
-        camera.startPreview();
-        previewBufferLock.lock();
-        expectedFrameSize = bufSize;
         isCaptureRunning = true;
-        previewBufferLock.unlock();
-
         return 0;
     }
 
     public int StartCapture(int width, int height, int frameRate) {
         Log.d(TAG, "StartCapture width " + width +
                 " height " + height +" frame rate " + frameRate);
-        // Get the local preview SurfaceHolder from the static render class
-        localPreview = ViERenderer.GetLocalRenderer();
-        if (localPreview != null) {
-            if (localPreview.getSurface() != null &&
-                localPreview.getSurface().isValid()) {
-                surfaceCreated(localPreview);
-            }
-            localPreview.addCallback(this);
-        } else {
-          // No local renderer.  Camera won't capture without
-          // setPreview{Texture,Display}, so we create a dummy SurfaceTexture
-          // and hand it over to Camera, but never listen for frame-ready
-          // callbacks, and never call updateTexImage on it.
-          captureLock.lock();
-          try {
-            dummySurfaceTexture = new SurfaceTexture(42);
-            camera.setPreviewTexture(dummySurfaceTexture);
-          } catch (IOException e) {
-            throw new RuntimeException(e);
-          }
-          captureLock.unlock();
-        }
-
         captureLock.lock();
         isCaptureStarted = true;
         mCaptureWidth = width;
         mCaptureHeight = height;
         mCaptureFPS = frameRate;
 
         int res = tryStartCapture(mCaptureWidth, mCaptureHeight, mCaptureFPS);
 
         captureLock.unlock();
         return res;
     }
 
-    public int StopCapture() {
-        Log.d(TAG, "StopCapture");
+    public int DetachCamera() {
         try {
             previewBufferLock.lock();
             isCaptureRunning = false;
             previewBufferLock.unlock();
-            camera.stopPreview();
-            camera.setPreviewCallbackWithBuffer(null);
-        } catch (RuntimeException e) {
-            Log.e(TAG, "Failed to stop camera", e);
+            if (camera != null) {
+                camera.setPreviewCallbackWithBuffer(null);
+                camera.stopPreview();
+            }
+        } catch (Exception ex) {
+            Log.e(TAG, "Failed to stop camera: " + ex.getMessage());
             return -1;
         }
-
-        isCaptureStarted = false;
         return 0;
     }
 
-    native void ProvideCameraFrame(byte[] data, int length, long captureObject);
+    public int StopCapture() {
+        Log.d(TAG, "StopCapture");
+        isCaptureStarted = false;
+        return DetachCamera();
+    }
+
+    native void ProvideCameraFrame(byte[] data, int length, int rotation,
+                                   long captureObject);
 
     public void onPreviewFrame(byte[] data, Camera camera) {
         previewBufferLock.lock();
 
+        String dataLengthStr = "does not exist";
+        if(data != null) {
+          dataLengthStr = Integer.toString(data.length);
+        }
+
         // The following line is for debug only
-        // Log.v(TAG, "preview frame length " + data.length +
-        //            " context" + context);
+        Log.v(TAG, "preview frame length " + data.length +
+              " context" + context);
         if (isCaptureRunning) {
             // If StartCapture has been called but not StopCapture
             // Call the C++ layer with the captured frame
-            if (data.length == expectedFrameSize) {
-                ProvideCameraFrame(data, expectedFrameSize, context);
+            if (data != null && data.length == expectedFrameSize) {
+                ProvideCameraFrame(data, expectedFrameSize, mCaptureRotation,
+                                   context);
                 if (ownsBuffers) {
                     // Give the video buffer to the camera service again.
                     camera.addCallbackBuffer(data);
                 }
             }
         }
         previewBufferLock.unlock();
     }
 
-    // Sets the rotation of the preview render window.
-    // Does not affect the captured video image.
-    public void SetPreviewRotation(int rotation) {
-        Log.v(TAG, "SetPreviewRotation:" + rotation);
-
-        if (camera == null) {
-            return;
-        }
-
-        int resultRotation = 0;
-        if (currentDevice.frontCameraType ==
-            VideoCaptureDeviceInfoAndroid.FrontFacingCameraType.Android23) {
-            // this is a 2.3 or later front facing camera.
-            // SetDisplayOrientation will flip the image horizontally
-            // before doing the rotation.
-            resultRotation = ( 360 - rotation ) % 360; // compensate the mirror
-        }
-        else {
-            // Back facing or 2.2 or previous front camera
-            resultRotation = rotation;
-        }
-        camera.setDisplayOrientation(resultRotation);
-    }
-
     public void surfaceChanged(SurfaceHolder holder,
                                int format, int width, int height) {
         Log.d(TAG, "VideoCaptureAndroid::surfaceChanged");
+
+        captureLock.lock();
+        isSurfaceReady = true;
+        surfaceHolder = holder;
+
+        tryStartCapture(mCaptureWidth, mCaptureHeight, mCaptureFPS);
+        captureLock.unlock();
+        return;
     }
 
     public void surfaceCreated(SurfaceHolder holder) {
         Log.d(TAG, "VideoCaptureAndroid::surfaceCreated");
-        captureLock.lock();
-        try {
-          if (camera != null) {
-              camera.setPreviewDisplay(holder);
-          }
-        } catch (IOException e) {
-            Log.e(TAG, "Failed to set preview surface!", e);
-        }
-        captureLock.unlock();
     }
 
     public void surfaceDestroyed(SurfaceHolder holder) {
         Log.d(TAG, "VideoCaptureAndroid::surfaceDestroyed");
-        captureLock.lock();
-        try {
-            if (camera != null) {
-                camera.setPreviewDisplay(null);
-            }
-        } catch (IOException e) {
-            Log.e(TAG, "Failed to clear preview surface!", e);
-        }
-        captureLock.unlock();
+        isSurfaceReady = false;
+        DetachCamera();
     }
 }
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
@@ -247,44 +247,47 @@ public class VideoCaptureDeviceInfoAndro
 
     // Returns an instance of VideoCaptureAndroid.
     public VideoCaptureAndroid AllocateCamera(int id, long context,
             String deviceUniqueId) {
         try {
             Log.d(TAG, "AllocateCamera " + deviceUniqueId);
 
             Camera camera = null;
+            int cameraId = 0;
             AndroidVideoCaptureDevice deviceToUse = null;
             for (AndroidVideoCaptureDevice device: deviceList) {
                 if(device.deviceUniqueName.equals(deviceUniqueId)) {
                     // Found the wanted camera
                     deviceToUse = device;
                     switch(device.frontCameraType) {
                         case GalaxyS:
                             camera = AllocateGalaxySFrontCamera();
                             break;
                         case HTCEvo:
                             camera = AllocateEVOFrontFacingCamera();
                             break;
                         default:
                             // From Android 2.3 and onwards)
-                            if(android.os.Build.VERSION.SDK_INT>8)
-                                camera=Camera.open(device.index);
-                            else
-                                camera=Camera.open(); // Default camera
+                            if(android.os.Build.VERSION.SDK_INT>8) {
+                                cameraId = device.index;
+                                camera = Camera.open(device.index);
+                            } else {
+                                camera = Camera.open(); // Default_ camera
+                            }
                     }
                 }
             }
 
             if(camera == null) {
                 return null;
             }
             Log.v(TAG, "AllocateCamera - creating VideoCaptureAndroid");
 
-            return new VideoCaptureAndroid(id, context, camera, deviceToUse);
+            return new VideoCaptureAndroid(id, context, camera, deviceToUse, cameraId);
         } catch (NoSuchMethodException e) {
             Log.e(TAG, "AllocateCamera Failed to open camera", e);
         } catch (ClassNotFoundException e) {
             Log.e(TAG, "AllocateCamera Failed to open camera", e);
         } catch (InvocationTargetException e) {
             Log.e(TAG, "AllocateCamera Failed to open camera", e);
         } catch (IllegalAccessException e) {
             Log.e(TAG, "AllocateCamera Failed to open camera", e);
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.cc
@@ -11,16 +11,19 @@
 #include "webrtc/modules/video_capture/android/video_capture_android.h"
 
 #include <stdio.h>
 
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/ref_count.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+#include "AndroidJNIWrapper.h"
+#include "mozilla/Assertions.h"
+
 namespace webrtc
 {
 #if defined(WEBRTC_ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
 // TODO(leozwang) These SetAndroidVM apis will be refactored, thus we only
 // keep and reference java vm.
 int32_t SetCaptureAndroidVM(void* javaVM, void* javaContext) {
   return videocapturemodule::VideoCaptureAndroid::SetAndroidObjects(
       javaVM,
@@ -40,260 +43,218 @@ VideoCaptureModule* VideoCaptureImpl::Cr
 
   if (!implementation || implementation->Init(id, deviceUniqueIdUTF8) != 0) {
     delete implementation;
     implementation = NULL;
   }
   return implementation;
 }
 
+#ifdef DEBUG
 // Android logging, uncomment to print trace to
 // logcat instead of trace file/callback
-// #include <android/log.h>
+#include <android/log.h>
 // #undef WEBRTC_TRACE
 // #define WEBRTC_TRACE(a,b,c,...)
 // __android_log_print(ANDROID_LOG_DEBUG, "*WEBRTCN*", __VA_ARGS__)
+// Some functions are called before before the WebRTC logging can be brought up,
+// log those to the Android log.
+#define EARLY_WEBRTC_TRACE(a,b,c,...) __android_log_print(ANDROID_LOG_DEBUG, "*WEBRTC-VCA", __VA_ARGS__)
+#else
+#define EARLY_WEBRTC_TRACE(a,b,c,...)
+#endif
 
 JavaVM* VideoCaptureAndroid::g_jvm = NULL;
 //VideoCaptureAndroid.java
 jclass VideoCaptureAndroid::g_javaCmClass = NULL;
 //VideoCaptureDeviceInfoAndroid.java
 jclass VideoCaptureAndroid::g_javaCmDevInfoClass = NULL;
 //static instance of VideoCaptureDeviceInfoAndroid.java
 jobject VideoCaptureAndroid::g_javaCmDevInfoObject = NULL;
-jobject VideoCaptureAndroid::g_javaContext = NULL;
 
 /*
  * Register references to Java Capture class.
  */
 int32_t VideoCaptureAndroid::SetAndroidObjects(void* javaVM,
                                                void* javaContext) {
 
+  MOZ_ASSERT(javaVM != nullptr || g_javaCmDevInfoClass != nullptr);
+  EARLY_WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
+               "%s: running", __FUNCTION__);
+
   g_jvm = static_cast<JavaVM*> (javaVM);
-  g_javaContext = static_cast<jobject> (javaContext);
 
   if (javaVM) {
+    // Already done? Exit early.
+    if (g_javaCmClass != NULL
+        && g_javaCmDevInfoClass != NULL
+        && g_javaCmDevInfoObject != NULL) {
+        EARLY_WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
+                     "%s: early exit", __FUNCTION__);
+        return 0;
+    }
+
     JNIEnv* env = NULL;
     if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+      EARLY_WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                    "%s: could not get Java environment", __FUNCTION__);
       return -1;
     }
     // get java capture class type (note path to class packet)
-    jclass javaCmClassLocal = env->FindClass(AndroidJavaCaptureClass);
-    if (!javaCmClassLocal) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+    g_javaCmClass = jsjni_GetGlobalClassRef(AndroidJavaCaptureClass);
+    if (!g_javaCmClass) {
+      EARLY_WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                    "%s: could not find java class", __FUNCTION__);
       return -1;
     }
-    // create a global reference to the class
-    // (to tell JNI that we are referencing it
-    // after this function has returned)
-    g_javaCmClass = static_cast<jclass>
-        (env->NewGlobalRef(javaCmClassLocal));
-    if (!g_javaCmClass) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: InitVideoEngineJava(): could not create"
-                   " Java Camera class reference",
-                   __FUNCTION__);
-      return -1;
-    }
-    // Delete local class ref, we only use the global ref
-    env->DeleteLocalRef(javaCmClassLocal);
     JNINativeMethod nativeFunctions =
-        { "ProvideCameraFrame", "([BIJ)V",
+        { "ProvideCameraFrame", "([BIIJ)V",
           (void*) &VideoCaptureAndroid::ProvideCameraFrame };
     if (env->RegisterNatives(g_javaCmClass, &nativeFunctions, 1) == 0) {
-      WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
+      EARLY_WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
                    "%s: Registered native functions", __FUNCTION__);
     }
     else {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+      EARLY_WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                    "%s: Failed to register native functions",
                    __FUNCTION__);
       return -1;
     }
 
-    jclass capabilityClassLocal = env->FindClass(
-        "org/webrtc/videoengine/CaptureCapabilityAndroid");
-    if (!capabilityClassLocal) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: could not find java class", __FUNCTION__);
-      return -1;
-    }
-    jclass capabilityClassGlobal = reinterpret_cast<jclass>(env->NewGlobalRef(
-        capabilityClassLocal));
-    DeviceInfoAndroid::SetAndroidCaptureClasses(capabilityClassGlobal);
-
     // get java capture class type (note path to class packet)
-    jclass javaCmDevInfoClassLocal = env->FindClass(
-        "org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid");
-    if (!javaCmDevInfoClassLocal) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+    g_javaCmDevInfoClass = jsjni_GetGlobalClassRef(
+                 AndroidJavaCaptureDeviceInfoClass);
+    if (!g_javaCmDevInfoClass) {
+      EARLY_WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                    "%s: could not find java class", __FUNCTION__);
       return -1;
     }
 
-    // create a global reference to the class
-    // (to tell JNI that we are referencing it
-    // after this function has returned)
-    g_javaCmDevInfoClass = static_cast<jclass>
-        (env->NewGlobalRef(javaCmDevInfoClassLocal));
-    if (!g_javaCmDevInfoClass) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: InitVideoEngineJava(): could not create Java "
-                   "Camera Device info class reference",
-                   __FUNCTION__);
-      return -1;
-    }
-    // Delete local class ref, we only use the global ref
-    env->DeleteLocalRef(javaCmDevInfoClassLocal);
-
-    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
+    EARLY_WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
                  "VideoCaptureDeviceInfoAndroid get method id");
 
     // get the method ID for the Android Java CaptureClass static
     //CreateVideoCaptureAndroid factory method.
     jmethodID cid = env->GetStaticMethodID(
         g_javaCmDevInfoClass,
         "CreateVideoCaptureDeviceInfoAndroid",
         "(ILandroid/content/Context;)"
         "Lorg/webrtc/videoengine/VideoCaptureDeviceInfoAndroid;");
     if (cid == NULL) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+      EARLY_WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                    "%s: could not get java"
                    "VideoCaptureDeviceInfoAndroid constructor ID",
                    __FUNCTION__);
       return -1;
     }
 
-    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
+    EARLY_WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
                  "%s: construct static java device object", __FUNCTION__);
 
     // construct the object by calling the static constructor object
     jobject javaCameraDeviceInfoObjLocal =
         env->CallStaticObjectMethod(g_javaCmDevInfoClass,
                                     cid, (int) -1,
-                                    g_javaContext);
+                                    javaContext);
     if (!javaCameraDeviceInfoObjLocal) {
-      WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCapture, -1,
+      EARLY_WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCapture, -1,
                    "%s: could not create Java Capture Device info object",
                    __FUNCTION__);
       return -1;
     }
     // create a reference to the object (to tell JNI that
     // we are referencing it after this function has returned)
     g_javaCmDevInfoObject = env->NewGlobalRef(javaCameraDeviceInfoObjLocal);
     if (!g_javaCmDevInfoObject) {
-      WEBRTC_TRACE(webrtc::kTraceError,
+      EARLY_WEBRTC_TRACE(webrtc::kTraceError,
                    webrtc::kTraceAudioDevice,
                    -1,
                    "%s: could not create Java"
                    "cameradevinceinfo object reference",
                    __FUNCTION__);
       return -1;
     }
     // Delete local object ref, we only use the global ref
     env->DeleteLocalRef(javaCameraDeviceInfoObjLocal);
+
+    EARLY_WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
+                 "%s: success", __FUNCTION__);
     return 0;
   }
   else {
-    WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
+    EARLY_WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
                  "%s: JVM is NULL, assuming deinit", __FUNCTION__);
     if (!g_jvm) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+      EARLY_WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                    "%s: SetAndroidObjects not called with a valid JVM.",
                    __FUNCTION__);
       return -1;
     }
     JNIEnv* env = NULL;
     bool attached = false;
     if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
       // try to attach the thread and get the env
       // Attach this thread to JVM
       jint res = g_jvm->AttachCurrentThread(&env, NULL);
       if ((res < 0) || !env) {
-        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture,
+        EARLY_WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture,
                      -1, "%s: Could not attach thread to JVM (%d, %p)",
                      __FUNCTION__, res, env);
         return -1;
       }
       attached = true;
     }
     env->DeleteGlobalRef(g_javaCmDevInfoObject);
     env->DeleteGlobalRef(g_javaCmDevInfoClass);
     env->DeleteGlobalRef(g_javaCmClass);
     if (attached && g_jvm->DetachCurrentThread() < 0) {
-      WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCapture, -1,
+      EARLY_WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCapture, -1,
                    "%s: Could not detach thread from JVM", __FUNCTION__);
       return -1;
     }
     return 0;
     env = (JNIEnv *) NULL;
   }
   return 0;
 }
 
-int32_t VideoCaptureAndroid::AttachAndUseAndroidDeviceInfoObjects(
-    JNIEnv*& env,
-    jclass& javaCmDevInfoClass,
-    jobject& javaCmDevInfoObject,
-    bool& attached) {
-  // get the JNI env for this thread
-  if (!g_jvm) {
-    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                 "%s: SetAndroidObjects not called with a valid JVM.",
-                 __FUNCTION__);
-    return -1;
-  }
-  attached = false;
-  if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
-    // try to attach the thread and get the env
-    // Attach this thread to JVM
-    jint res = g_jvm->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: Could not attach thread to JVM (%d, %p)",
-                   __FUNCTION__, res, env);
-      return -1;
-    }
-    attached = true;
-  }
-  javaCmDevInfoClass = g_javaCmDevInfoClass;
-  javaCmDevInfoObject = g_javaCmDevInfoObject;
-  return 0;
-
-}
-
-int32_t VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(
-    bool attached) {
-  if (attached && g_jvm->DetachCurrentThread() < 0) {
-    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCapture, -1,
-                 "%s: Could not detach thread from JVM", __FUNCTION__);
-    return -1;
-  }
-  return 0;
-}
-
 /*
  * JNI callback from Java class. Called
  * when the camera has a new frame to deliver
  * Class:     org_webrtc_capturemodule_VideoCaptureAndroid
  * Method:    ProvideCameraFrame
  * Signature: ([BIJ)V
  */
 void JNICALL VideoCaptureAndroid::ProvideCameraFrame(JNIEnv * env,
                                                      jobject,
                                                      jbyteArray javaCameraFrame,
                                                      jint length,
+                                                     jint rotation,
                                                      jlong context) {
   VideoCaptureAndroid* captureModule =
       reinterpret_cast<VideoCaptureAndroid*>(context);
   WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCapture,
                -1, "%s: IncomingFrame %d", __FUNCTION__,length);
+
+  switch (rotation) {
+    case 90:
+      captureModule->SetCaptureRotation(kCameraRotate90);
+      break;
+    case 180:
+      captureModule->SetCaptureRotation(kCameraRotate180);
+      break;
+    case 270:
+      captureModule->SetCaptureRotation(kCameraRotate270);
+      break;
+    case 0:
+    default:
+      captureModule->SetCaptureRotation(kCameraRotate0);
+      break;
+  }
+
   jbyte* cameraFrame= env->GetByteArrayElements(javaCameraFrame,NULL);
   captureModule->IncomingFrame((uint8_t*) cameraFrame,
                                length,captureModule->_frameInfo,0);
   env->ReleaseByteArrayElements(javaCameraFrame,cameraFrame,JNI_ABORT);
 }
 
 
 
@@ -306,17 +267,17 @@ VideoCaptureAndroid::VideoCaptureAndroid
 
 // ----------------------------------------------------------------------------
 //  Init
 //
 //  Initializes needed Java resources like the JNI interface to
 //  VideoCaptureAndroid.java
 // ----------------------------------------------------------------------------
 int32_t VideoCaptureAndroid::Init(const int32_t id,
-                                  const char* deviceUniqueIdUTF8) {
+                                        const char* deviceUniqueIdUTF8) {
   const int nameLength = strlen(deviceUniqueIdUTF8);
   if (nameLength >= kVideoCaptureUniqueNameLength) {
     return -1;
   }
 
   // Store the device name
   _deviceUniqueId = new char[nameLength + 1];
   memcpy(_deviceUniqueId, deviceUniqueIdUTF8, nameLength + 1);
@@ -333,53 +294,45 @@ int32_t VideoCaptureAndroid::Init(const 
   WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1, "%s:",
                __FUNCTION__);
   // use the jvm that has been set
   if (!g_jvm) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: Not a valid Java VM pointer", __FUNCTION__);
     return -1;
   }
-  // get the JNI env for this thread
-  JNIEnv *env;
-  bool isAttached = false;
 
-  // get the JNI env for this thread
-  if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
-    // try to attach the thread and get the env
-    // Attach this thread to JVM
-    jint res = g_jvm->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
-                   "%s: Could not attach thread to JVM (%d, %p)",
-                   __FUNCTION__, res, env);
+  AutoLocalJNIFrame jniFrame;
+  JNIEnv* env = jniFrame.GetEnv();
+  if (!env)
       return -1;
-    }
-    isAttached = true;
-  }
+
+  jclass javaCmDevInfoClass = jniFrame.GetCmDevInfoClass();
+  jobject javaCmDevInfoObject = jniFrame.GetCmDevInfoObject();
+
+  int32_t rotation = 0;
 
   WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, _id,
                "get method id");
-
   // get the method ID for the Android Java
   // CaptureDeviceInfoClass AllocateCamera factory method.
   char signature[256];
   sprintf(signature, "(IJLjava/lang/String;)L%s;", AndroidJavaCaptureClass);
 
-  jmethodID cid = env->GetMethodID(g_javaCmDevInfoClass, "AllocateCamera",
+  jmethodID cid = env->GetMethodID(javaCmDevInfoClass, "AllocateCamera",
                                    signature);
   if (cid == NULL) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: could not get constructor ID", __FUNCTION__);
     return -1; /* exception thrown */
   }
 
   jstring capureIdString = env->NewStringUTF((char*) deviceUniqueIdUTF8);
   // construct the object by calling the static constructor object
-  jobject javaCameraObjLocal = env->CallObjectMethod(g_javaCmDevInfoObject,
+  jobject javaCameraObjLocal = env->CallObjectMethod(javaCmDevInfoObject,
                                                      cid, (jint) id,
                                                      (jlong) this,
                                                      capureIdString);
   if (!javaCameraObjLocal) {
     WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCapture, _id,
                  "%s: could not create Java Capture object", __FUNCTION__);
     return -1;
   }
@@ -389,113 +342,68 @@ int32_t VideoCaptureAndroid::Init(const 
   _javaCaptureObj = env->NewGlobalRef(javaCameraObjLocal);
   if (!_javaCaptureObj) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioDevice, _id,
                  "%s: could not create Java camera object reference",
                  __FUNCTION__);
     return -1;
   }
 
-  // Delete local object ref, we only use the global ref
-  env->DeleteLocalRef(javaCameraObjLocal);
-
-  // Detach this thread if it was attached
-  if (isAttached) {
-    if (g_jvm->DetachCurrentThread() < 0) {
-      WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioDevice, _id,
-                   "%s: Could not detach thread from JVM", __FUNCTION__);
-    }
-  }
-
   return 0;
 }
 
 VideoCaptureAndroid::~VideoCaptureAndroid() {
   WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1, "%s:",
                __FUNCTION__);
   if (_javaCaptureObj == NULL || g_jvm == NULL) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                  "%s: Nothing to clean", __FUNCTION__);
   }
   else {
-    bool isAttached = false;
-    // get the JNI env for this thread
-    JNIEnv *env;
-    if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
-      // try to attach the thread and get the env
-      // Attach this thread to JVM
-      jint res = g_jvm->AttachCurrentThread(&env, NULL);
-      if ((res < 0) || !env) {
-        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture,
-                     _id,
-                     "%s: Could not attach thread to JVM (%d, %p)",
-                     __FUNCTION__, res, env);
-      }
-      else {
-        isAttached = true;
-      }
-    }
+    AutoLocalJNIFrame jniFrame;
+    JNIEnv* env = jniFrame.GetEnv();
+    if (!env)
+        return;
 
     // get the method ID for the Android Java CaptureClass static
     // DeleteVideoCaptureAndroid  method. Call this to release the camera so
     // another application can use it.
-    jmethodID cid = env->GetStaticMethodID(
-        g_javaCmClass,
-        "DeleteVideoCaptureAndroid",
-        "(Lorg/webrtc/videoengine/VideoCaptureAndroid;)V");
+    jmethodID cid = env->GetStaticMethodID(g_javaCmClass,
+                                           "DeleteVideoCaptureAndroid",
+                                           "(Lorg/webrtc/videoengine/VideoCaptureAndroid;)V");
     if (cid != NULL) {
       WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
                    "%s: Call DeleteVideoCaptureAndroid", __FUNCTION__);
       // Close the camera by calling the static destruct function.
       env->CallStaticVoidMethod(g_javaCmClass, cid, _javaCaptureObj);
 
       // Delete global object ref to the camera.
       env->DeleteGlobalRef(_javaCaptureObj);
       _javaCaptureObj = NULL;
-    }
-    else {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: Failed to find DeleteVideoCaptureAndroid id",
-                   __FUNCTION__);
-    }
-
-    // Detach this thread if it was attached
-    if (isAttached) {
-      if (g_jvm->DetachCurrentThread() < 0) {
-        WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioDevice,
-                     _id, "%s: Could not detach thread from JVM",
+    } else {
+        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+                     "%s: Failed to find DeleteVideoCaptureAndroid id",
                      __FUNCTION__);
-      }
     }
   }
 }
 
 int32_t VideoCaptureAndroid::StartCapture(
     const VideoCaptureCapability& capability) {
   CriticalSectionScoped cs(&_apiCs);
   WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
                "%s: ", __FUNCTION__);
 
-  bool isAttached = false;
   int32_t result = 0;
-  // get the JNI env for this thread
-  JNIEnv *env;
-  if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
-    // try to attach the thread and get the env
-    // Attach this thread to JVM
-    jint res = g_jvm->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
-                   "%s: Could not attach thread to JVM (%d, %p)",
-                   __FUNCTION__, res, env);
-    }
-    else {
-      isAttached = true;
-    }
-  }
+  int32_t rotation = 0;
+
+  AutoLocalJNIFrame jniFrame;
+  JNIEnv* env = jniFrame.GetEnv();
+  if (!env)
+      return -1;
 
   if (_capInfo.GetBestMatchedCapability(_deviceUniqueId, capability,
                                         _frameInfo) < 0) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                  "%s: GetBestMatchedCapability failed. Req cap w%d h%d",
                  __FUNCTION__, capability.width, capability.height);
     return -1;
   }
@@ -517,54 +425,36 @@ int32_t VideoCaptureAndroid::StartCaptur
     result = env->CallIntMethod(_javaCaptureObj, cid, _frameInfo.width,
                                 _frameInfo.height, _frameInfo.maxFPS);
   }
   else {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                  "%s: Failed to find StartCapture id", __FUNCTION__);
   }
 
-  // Detach this thread if it was attached
-  if (isAttached) {
-    if (g_jvm->DetachCurrentThread() < 0) {
-      WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioDevice, _id,
-                   "%s: Could not detach thread from JVM", __FUNCTION__);
-    }
-  }
   if (result == 0) {
     _requestedCapability = capability;
     _captureStarted = true;
   }
   WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
                "%s: result %d", __FUNCTION__, result);
   return result;
 }
 
 int32_t VideoCaptureAndroid::StopCapture() {
   CriticalSectionScoped cs(&_apiCs);
   WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
                "%s: ", __FUNCTION__);
 
-  bool isAttached = false;
   int32_t result = 0;
-  // get the JNI env for this thread
-  JNIEnv *env = NULL;
-  if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
-    // try to attach the thread and get the env
-    // Attach this thread to JVM
-    jint res = g_jvm->AttachCurrentThread(&env, NULL);
-    if ((res < 0) || !env) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
-                   "%s: Could not attach thread to JVM (%d, %p)",
-                   __FUNCTION__, res, env);
-    }
-    else {
-      isAttached = true;
-    }
-  }
+
+  AutoLocalJNIFrame jniFrame;
+  JNIEnv* env = jniFrame.GetEnv();
+  if (!env)
+      return -1;
 
   memset(&_requestedCapability, 0, sizeof(_requestedCapability));
   memset(&_frameInfo, 0, sizeof(_frameInfo));
 
   // get the method ID for the Android Java CaptureClass StopCapture  method.
   jmethodID cid = env->GetMethodID(g_javaCmClass, "StopCapture", "()I");
   if (cid != NULL) {
     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
@@ -572,23 +462,16 @@ int32_t VideoCaptureAndroid::StopCapture
     // Close the camera by calling the static destruct function.
     result = env->CallIntMethod(_javaCaptureObj, cid);
   }
   else {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                  "%s: Failed to find StopCapture id", __FUNCTION__);
   }
 
-  // Detach this thread if it was attached
-  if (isAttached) {
-    if (g_jvm->DetachCurrentThread() < 0) {
-      WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioDevice, _id,
-                   "%s: Could not detach thread from JVM", __FUNCTION__);
-    }
-  }
   _captureStarted = false;
 
   WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
                "%s: result %d", __FUNCTION__, result);
   return result;
 }
 
 bool VideoCaptureAndroid::CaptureStarted() {
@@ -605,71 +488,13 @@ int32_t VideoCaptureAndroid::CaptureSett
                "%s: ", __FUNCTION__);
   settings = _requestedCapability;
   return 0;
 }
 
 int32_t VideoCaptureAndroid::SetCaptureRotation(
     VideoCaptureRotation rotation) {
   CriticalSectionScoped cs(&_apiCs);
-  if (VideoCaptureImpl::SetCaptureRotation(rotation) == 0) {
-    if (!g_jvm)
-      return -1;
-
-    // get the JNI env for this thread
-    JNIEnv *env;
-    bool isAttached = false;
-
-    // get the JNI env for this thread
-    if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
-      // try to attach the thread and get the env
-      // Attach this thread to JVM
-      jint res = g_jvm->AttachCurrentThread(&env, NULL);
-      if ((res < 0) || !env) {
-        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture,
-                     _id,
-                     "%s: Could not attach thread to JVM (%d, %p)",
-                     __FUNCTION__, res, env);
-        return -1;
-      }
-      isAttached = true;
-    }
-
-    jmethodID cid = env->GetMethodID(g_javaCmClass, "SetPreviewRotation",
-                                     "(I)V");
-    if (cid == NULL) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: could not get java SetPreviewRotation ID",
-                   __FUNCTION__);
-      return -1;
-    }
-    jint rotateFrame = 0;
-    switch (rotation) {
-      case kCameraRotate0:
-        rotateFrame = 0;
-        break;
-      case kCameraRotate90:
-        rotateFrame = 90;
-        break;
-      case kCameraRotate180:
-        rotateFrame = 180;
-        break;
-      case kCameraRotate270:
-        rotateFrame = 270;
-        break;
-    }
-    env->CallVoidMethod(_javaCaptureObj, cid, rotateFrame);
-
-    // Detach this thread if it was attached
-    if (isAttached) {
-      if (g_jvm->DetachCurrentThread() < 0) {
-        WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioDevice,
-                     _id, "%s: Could not detach thread from JVM",
-                     __FUNCTION__);
-      }
-    }
-
-  }
-  return 0;
+  return VideoCaptureImpl::SetCaptureRotation(rotation);
 }
 
 }  // namespace videocapturemodule
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.h
@@ -7,60 +7,171 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_VIDEO_CAPTURE_ANDROID_H_
 #define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_VIDEO_CAPTURE_ANDROID_H_
 
 #include <jni.h>
+#include <assert.h>
+#include "trace.h"
 
 #include "webrtc/modules/video_capture/android/device_info_android.h"
 #include "webrtc/modules/video_capture/video_capture_impl.h"
 
 #define AndroidJavaCaptureClass "org/webrtc/videoengine/VideoCaptureAndroid"
 
 namespace webrtc {
 namespace videocapturemodule {
 
 class VideoCaptureAndroid : public VideoCaptureImpl {
  public:
   static int32_t SetAndroidObjects(void* javaVM, void* javaContext);
-  static int32_t AttachAndUseAndroidDeviceInfoObjects(
-      JNIEnv*& env,
-      jclass& javaCmDevInfoClass,
-      jobject& javaCmDevInfoObject,
-      bool& attached);
-  static int32_t ReleaseAndroidDeviceInfoObjects(bool attached);
-
   VideoCaptureAndroid(const int32_t id);
   virtual int32_t Init(const int32_t id, const char* deviceUniqueIdUTF8);
 
-
   virtual int32_t StartCapture(
       const VideoCaptureCapability& capability);
   virtual int32_t StopCapture();
   virtual bool CaptureStarted();
   virtual int32_t CaptureSettings(VideoCaptureCapability& settings);
   virtual int32_t SetCaptureRotation(VideoCaptureRotation rotation);
 
+  friend class AutoLocalJNIFrame;
+
  protected:
   virtual ~VideoCaptureAndroid();
   static void JNICALL ProvideCameraFrame (JNIEnv * env,
                                           jobject,
                                           jbyteArray javaCameraFrame,
-                                          jint length, jlong context);
+                                          jint length,
+                                          jint rotation,
+                                          jlong context);
   DeviceInfoAndroid _capInfo;
   jobject _javaCaptureObj; // Java Camera object.
   VideoCaptureCapability _frameInfo;
   bool _captureStarted;
 
   static JavaVM* g_jvm;
   static jclass g_javaCmClass;
   static jclass g_javaCmDevInfoClass;
   //Static java object implementing the needed device info functions;
   static jobject g_javaCmDevInfoObject;
-  static jobject g_javaContext; // Java Application context
+};
+
+// Reworked version of what is available in AndroidBridge,
+// can attach/deatch in addition to push/pop frames.
+class AutoLocalJNIFrame {
+public:
+ AutoLocalJNIFrame(int nEntries = 128)
+     : mEntries(nEntries), mHasFrameBeenPushed(false), mAttached(false)
+    {
+        mJNIEnv = InitJNIEnv();
+        Push();
+    }
+
+    JNIEnv* GetEnv() {
+        return mJNIEnv;
+    }
+
+    jclass GetCmDevInfoClass() {
+        assert(VideoCaptureAndroid::g_javaCmDevInfoClass != nullptr);
+        return VideoCaptureAndroid::g_javaCmDevInfoClass;
+    }
+
+    jobject GetCmDevInfoObject() {
+        assert(VideoCaptureAndroid::g_javaCmDevInfoObject != nullptr);
+        return VideoCaptureAndroid::g_javaCmDevInfoObject;
+    }
+
+    bool CheckForException() {
+        if (mJNIEnv->ExceptionCheck()) {
+            mJNIEnv->ExceptionDescribe();
+            mJNIEnv->ExceptionClear();
+            return true;
+        }
+
+        return false;
+    }
+
+    ~AutoLocalJNIFrame() {
+        if (!mJNIEnv)
+            return;
+
+        CheckForException();
+
+        if (mHasFrameBeenPushed)
+            mJNIEnv->PopLocalFrame(NULL);
+
+        if (mAttached) {
+            int res = VideoCaptureAndroid::g_jvm->DetachCurrentThread();
+            if (res < 0) {
+                WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+                         "%s: JVM Detach failed.", __FUNCTION__);
+            }
+        }
+    }
+
+private:
+    void Push() {
+        if (!mJNIEnv)
+            return;
+
+        // Make sure there is enough space to store a local ref to the
+        // exception.  I am not completely sure this is needed, but does
+        // not hurt.
+        jint ret = mJNIEnv->PushLocalFrame(mEntries + 1);
+        assert(ret == 0);
+        if (ret < 0)
+            CheckForException();
+        else
+            mHasFrameBeenPushed = true;
+    }
+
+    JNIEnv* InitJNIEnv()
+    {
+        JNIEnv* env = nullptr;
+
+        // Get the JNI env for this thread.
+        if (!VideoCaptureAndroid::g_jvm) {
+            WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+                         "%s: SetAndroidObjects not called with a valid JVM.",
+                         __FUNCTION__);
+            return nullptr;
+        }
+
+        jint res = VideoCaptureAndroid::g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4);
+        if (res == JNI_EDETACHED) {
+            // Try to attach this thread to the JVM and get the env.
+            res = VideoCaptureAndroid::g_jvm->AttachCurrentThread(&env, NULL);
+            if ((res < 0) || !env) {
+                // Attaching failed, error out.
+                WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+                             "%s: Could not attach thread to JVM (%d, %p)",
+                             __FUNCTION__, res, env);
+                return nullptr;
+            }
+            mAttached = true;
+            WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
+                         "%s: attach success", __FUNCTION__);
+        } else if (res == JNI_OK) {
+            // Already attached, GetEnv succeeded.
+            WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
+                         "%s: did not attach because JVM Env already present",
+                         __FUNCTION__);
+        } else {
+            // Non-recoverable error in GetEnv.
+            return nullptr;
+        }
+
+        return env;
+    }
+
+    int mEntries;
+    JNIEnv* mJNIEnv;
+    bool mHasFrameBeenPushed;
+    bool mAttached;
 };
 
 }  // namespace videocapturemodule
 }  // namespace webrtc
 #endif // WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_VIDEO_CAPTURE_ANDROID_H_
--- a/media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
@@ -50,17 +50,17 @@ int32_t DeviceInfoImpl::NumberOfCapabili
     if (!deviceUniqueIdUTF8)
         return -1;
 
     _apiLock.AcquireLockShared();
 
     if (_lastUsedDeviceNameLength == strlen((char*) deviceUniqueIdUTF8))
     {
         // Is it the same device that is asked for again.
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
         if(strncasecmp((char*)_lastUsedDeviceName,
                        (char*) deviceUniqueIdUTF8,
                        _lastUsedDeviceNameLength)==0)
 #else
         if (_strnicmp((char*) _lastUsedDeviceName,
                       (char*) deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) == 0)
 #endif
@@ -87,17 +87,17 @@ int32_t DeviceInfoImpl::GetCapability(co
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                    "deviceUniqueIdUTF8 parameter not set in call to GetCapability");
         return -1;
     }
     ReadLockScoped cs(_apiLock);
 
     if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
         || (strncasecmp((char*)_lastUsedDeviceName,
                         (char*) deviceUniqueIdUTF8,
                         _lastUsedDeviceNameLength)!=0))
 #else
         || (_strnicmp((char*) _lastUsedDeviceName,
                       (char*) deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) != 0))
 #endif
@@ -151,17 +151,17 @@ int32_t DeviceInfoImpl::GetBestMatchedCa
 {
 
 
     if (!deviceUniqueIdUTF8)
         return -1;
 
     ReadLockScoped cs(_apiLock);
     if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
         || (strncasecmp((char*)_lastUsedDeviceName,
                         (char*) deviceUniqueIdUTF8,
                         _lastUsedDeviceNameLength)!=0))
 #else
         || (_strnicmp((char*) _lastUsedDeviceName,
                       (char*) deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) != 0))
 #endif
--- a/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
@@ -88,19 +88,20 @@ int32_t DeviceInfoLinux::GetDeviceName(
 {
     WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCapture, _id, "%s", __FUNCTION__);
 
     // Travel through /dev/video [0-63]
     uint32_t count = 0;
     char device[20];
     int fd = -1;
     bool found = false;
-    for (int n = 0; n < 64; n++)
+    int device_index;
+    for (device_index = 0; device_index < 64; device_index++)
     {
-        sprintf(device, "/dev/video%d", n);
+        sprintf(device, "/dev/video%d", device_index);
         if ((fd = open(device, O_RDONLY)) != -1)
         {
             if (count == deviceNumber) {
                 // Found the device
                 found = true;
                 break;
             } else {
                 close(fd);
@@ -149,73 +150,84 @@ int32_t DeviceInfoLinux::GetDeviceName(
                    strlen((const char*) cap.bus_info));
         }
         else
         {
             WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                        "buffer passed is too small");
             return -1;
         }
+    } else {
+        // if there's no bus info to use for uniqueId, invent one - and it has to be repeatable
+        if (snprintf(deviceUniqueIdUTF8, deviceUniqueIdUTF8Length, "fake_%u", device_index) >=
+            deviceUniqueIdUTF8Length)
+        {
+            WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
+                       "buffer passed is too small");
+            return -1;
+        }
     }
 
     return 0;
 }
 
 int32_t DeviceInfoLinux::CreateCapabilityMap(
                                         const char* deviceUniqueIdUTF8)
 {
     int fd;
     char device[32];
     bool found = false;
+    int device_index;
 
     const int32_t deviceUniqueIdUTF8Length =
                             (int32_t) strlen((char*) deviceUniqueIdUTF8);
     if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength)
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id, "Device name too long");
         return -1;
     }
     WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCapture, _id,
                "CreateCapabilityMap called for device %s", deviceUniqueIdUTF8);
 
     /* detect /dev/video [0-63] entries */
-    for (int n = 0; n < 64; ++n)
+    if (sscanf(deviceUniqueIdUTF8,"fake_%d",&device_index) == 1)
     {
-        sprintf(device, "/dev/video%d", n);
+        sprintf(device, "/dev/video%d", device_index);
         fd = open(device, O_RDONLY);
-        if (fd == -1)
-          continue;
+        if (fd != -1) {
+            found = true;
+        }
+    } else {
+        /* detect /dev/video [0-63] entries */
+        for (int n = 0; n < 64; ++n)
+        {