Bug 1109248: Rollup of changes previously applied to media/webrtc/trunk/webrtc rs=jesup
authorRandell Jesup <rjesup@jesup.org>
Thu, 29 Jan 2015 18:33:36 -0500
changeset 253833 55c60f012e8fb51715f908ac01f6df2218607938
parent 253832 f5a4769477bf7880c88ab688ef4d7f94d8219369
child 253834 aa4120c46b704e323eea79d9372cac91b4985617
push id4610
push userjlund@mozilla.com
push dateMon, 30 Mar 2015 18:32:55 +0000
treeherdermozilla-beta@4df54044d9ef [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup
bugs1109248
milestone38.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1109248: Rollup of changes previously applied to media/webrtc/trunk/webrtc rs=jesup
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCVideo.cpp
media/webrtc/trunk/webrtc/build/arm_neon.gypi
media/webrtc/trunk/webrtc/build/common.gypi
media/webrtc/trunk/webrtc/build/merge_libs.gyp
media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
media/webrtc/trunk/webrtc/common_types.h
media/webrtc/trunk/webrtc/common_video/libyuv/webrtc_libyuv.cc
media/webrtc/trunk/webrtc/engine_configurations.h
media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq.gypi
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.h
media/webrtc/trunk/webrtc/modules/audio_device/android/single_rw_fifo.cc
media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/mac/audio_device_mac.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.h
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.cc
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.h
media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_internal.h
media/webrtc/trunk/webrtc/modules/audio_processing/aec/echo_cancellation.c
media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.h
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_mac.mm
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_null.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_unittest.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_capture.gypi
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_capture_types.h
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info.h
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info_null.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/differ_block.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/mac/desktop_device_info_mac.h
media/webrtc/trunk/webrtc/modules/desktop_capture/mac/desktop_device_info_mac.mm
media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/desktop_device_info_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/desktop_device_info_win.h
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capture_utils.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h
media/webrtc/trunk/webrtc/modules/desktop_capture/win/win_shared.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/win_shared.h
media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/desktop_device_info_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/desktop_device_info_x11.h
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/shared_x_util.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/shared_x_util.h
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/x_error_trap.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/x_error_trap.h
media/webrtc/trunk/webrtc/modules/interface/module_common_types.h
media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
media/webrtc/trunk/webrtc/modules/modules.gyp
media/webrtc/trunk/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/CaptureCapabilityAndroid.java
media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java
media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.cc
media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.h
media/webrtc/trunk/webrtc/modules/video_capture/include/video_capture.h
media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_objc.mm
media/webrtc/trunk/webrtc/modules/video_capture/video_capture.gypi
media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.h
media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
media/webrtc/trunk/webrtc/modules/video_capture/windows/video_capture_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/video_capture_ds.h
media/webrtc/trunk/webrtc/modules/video_coding/codecs/h264/include/h264.h
media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/interface/video_coding.h
media/webrtc/trunk/webrtc/modules/video_coding/main/interface/video_coding_defines.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/codec_database.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/content_metrics_processing.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/content_metrics_processing.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/decoding_state.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_decoder.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/media_optimization.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/media_optimization.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/packet.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/qm_select.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/qm_select.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/receiver.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/receiver.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_coding_impl.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_coding_impl.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_receiver.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_sender.cc
media/webrtc/trunk/webrtc/modules/video_processing/main/source/content_analysis.cc
media/webrtc/trunk/webrtc/modules/video_processing/main/source/content_analysis.h
media/webrtc/trunk/webrtc/modules/video_processing/main/source/content_analysis_sse2.cc
media/webrtc/trunk/webrtc/modules/video_processing/main/source/video_processing.gypi
media/webrtc/trunk/webrtc/modules/video_render/android/java/src/org/webrtc/videoengine/ViEAndroidGLES20.java
media/webrtc/trunk/webrtc/modules/video_render/android/java/src/org/webrtc/videoengine/ViERenderer.java
media/webrtc/trunk/webrtc/modules/video_render/android/java/src/org/webrtc/videoengine/ViESurfaceRenderer.java
media/webrtc/trunk/webrtc/system_wrappers/interface/asm_defines.h
media/webrtc/trunk/webrtc/system_wrappers/interface/scoped_ptr.h
media/webrtc/trunk/webrtc/system_wrappers/interface/thread_wrapper.h
media/webrtc/trunk/webrtc/system_wrappers/interface/tick_util.h
media/webrtc/trunk/webrtc/system_wrappers/interface/trace.h
media/webrtc/trunk/webrtc/system_wrappers/source/atomic32_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/clock.cc
media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable.cc
media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/cpu_features.cc
media/webrtc/trunk/webrtc/system_wrappers/source/cpu_features_android.c
media/webrtc/trunk/webrtc/system_wrappers/source/cpu_info.cc
media/webrtc/trunk/webrtc/system_wrappers/source/droid-cpu-features.c
media/webrtc/trunk/webrtc/system_wrappers/source/droid-cpu-features.h
media/webrtc/trunk/webrtc/system_wrappers/source/rw_lock.cc
media/webrtc/trunk/webrtc/system_wrappers/source/spreadsortlib/spreadsort.hpp
media/webrtc/trunk/webrtc/system_wrappers/source/system_wrappers.gyp
media/webrtc/trunk/webrtc/system_wrappers/source/thread.cc
media/webrtc/trunk/webrtc/system_wrappers/source/thread_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/thread_win.cc
media/webrtc/trunk/webrtc/system_wrappers/source/thread_win.h
media/webrtc/trunk/webrtc/system_wrappers/source/tick_util.cc
media/webrtc/trunk/webrtc/system_wrappers/source/trace_impl.cc
media/webrtc/trunk/webrtc/system_wrappers/source/trace_impl.h
media/webrtc/trunk/webrtc/system_wrappers/source/trace_posix.cc
media/webrtc/trunk/webrtc/test/channel_transport/udp_transport_impl.cc
media/webrtc/trunk/webrtc/typedefs.h
media/webrtc/trunk/webrtc/video/receive_statistics_proxy.cc
media/webrtc/trunk/webrtc/video/receive_statistics_proxy.h
media/webrtc/trunk/webrtc/video_engine/browser_capture_impl.h
media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.cc
media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.h
media/webrtc/trunk/webrtc/video_engine/include/vie_base.h
media/webrtc/trunk/webrtc/video_engine/include/vie_capture.h
media/webrtc/trunk/webrtc/video_engine/include/vie_codec.h
media/webrtc/trunk/webrtc/video_engine/include/vie_rtp_rtcp.h
media/webrtc/trunk/webrtc/video_engine/test/auto_test/source/vie_autotest_custom_call.cc
media/webrtc/trunk/webrtc/video_engine/vie_base_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_base_impl.h
media/webrtc/trunk/webrtc/video_engine/vie_capturer.cc
media/webrtc/trunk/webrtc/video_engine/vie_capturer.h
media/webrtc/trunk/webrtc/video_engine/vie_channel.cc
media/webrtc/trunk/webrtc/video_engine/vie_channel.h
media/webrtc/trunk/webrtc/video_engine/vie_channel_manager.cc
media/webrtc/trunk/webrtc/video_engine/vie_channel_manager.h
media/webrtc/trunk/webrtc/video_engine/vie_codec_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_codec_impl.h
media/webrtc/trunk/webrtc/video_engine/vie_encoder.cc
media/webrtc/trunk/webrtc/video_engine/vie_encoder.h
media/webrtc/trunk/webrtc/video_engine/vie_input_manager.cc
media/webrtc/trunk/webrtc/video_engine/vie_input_manager.h
media/webrtc/trunk/webrtc/video_engine/vie_receiver.cc
media/webrtc/trunk/webrtc/video_engine/vie_receiver.h
media/webrtc/trunk/webrtc/video_engine/vie_rtp_rtcp_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_rtp_rtcp_impl.h
media/webrtc/trunk/webrtc/video_engine/vie_shared_data.cc
media/webrtc/trunk/webrtc/video_engine/vie_shared_data.h
media/webrtc/trunk/webrtc/video_engine/vie_sync_module.cc
media/webrtc/trunk/webrtc/voice_engine/channel.cc
media/webrtc/trunk/webrtc/voice_engine/channel.h
media/webrtc/trunk/webrtc/voice_engine/include/mock/fake_voe_external_media.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_base.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_rtp_rtcp.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_video_sync.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_volume_control.h
media/webrtc/trunk/webrtc/voice_engine/output_mixer.cc
media/webrtc/trunk/webrtc/voice_engine/output_mixer.h
media/webrtc/trunk/webrtc/voice_engine/test/auto_test/standard/video_sync_test.cc
media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.cc
media/webrtc/trunk/webrtc/voice_engine/utility_unittest.cc
media/webrtc/trunk/webrtc/voice_engine/voe_rtp_rtcp_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_rtp_rtcp_impl.h
media/webrtc/trunk/webrtc/voice_engine/voe_video_sync_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_video_sync_impl.h
media/webrtc/trunk/webrtc/voice_engine/voice_engine.gyp
media/webrtc/trunk/webrtc/voice_engine/voice_engine_defines.h
media/webrtc/trunk/webrtc/voice_engine/voice_engine_impl.cc
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -64,16 +64,17 @@ class MediaEngineWebRTCVideoSource : pub
 public:
   NS_DECL_THREADSAFE_ISUPPORTS
 
   // ViEExternalRenderer.
   virtual int FrameSizeChange(unsigned int w, unsigned int h, unsigned int streams) MOZ_OVERRIDE;
   virtual int DeliverFrame(unsigned char* buffer,
                            int size,
                            uint32_t time_stamp,
+                           int64_t ntp_time_ms,
                            int64_t render_time,
                            void *handle) MOZ_OVERRIDE;
   /**
    * Does DeliverFrame() support a null buffer and non-null handle
    * (video texture)?
    * XXX Investigate!  Especially for Android/B2G
    */
   virtual bool IsTextureSupported() MOZ_OVERRIDE { return false; }
--- a/dom/media/webrtc/MediaEngineWebRTCVideo.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCVideo.cpp
@@ -41,18 +41,18 @@ MediaEngineWebRTCVideoSource::FrameSizeC
   mHeight = h;
   LOG(("Video FrameSizeChange: %ux%u", w, h));
   return 0;
 }
 
 // ViEExternalRenderer Callback. Process every incoming frame here.
 int
 MediaEngineWebRTCVideoSource::DeliverFrame(
-   unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
-   void *handle)
+   unsigned char* buffer, int size, uint32_t time_stamp,
+   int64_t ntp_time_ms, int64_t render_time, void *handle)
 {
   // Check for proper state.
   if (mState != kStarted) {
     LOG(("DeliverFrame: video not started"));
     return 0;
   }
 
   if (mWidth*mHeight + 2*(((mWidth+1)/2)*((mHeight+1)/2)) != size) {
@@ -82,18 +82,18 @@ MediaEngineWebRTCVideoSource::DeliverFra
   data.mPicY = 0;
   data.mPicSize = IntSize(mWidth, mHeight);
   data.mStereoMode = StereoMode::MONO;
 
   videoImage->SetData(data);
 
 #ifdef DEBUG
   static uint32_t frame_num = 0;
-  LOGFRAME(("frame %d (%dx%d); timestamp %u, render_time %lu", frame_num++,
-            mWidth, mHeight, time_stamp, render_time));
+  LOGFRAME(("frame %d (%dx%d); timestamp %u, ntp_time %lu, render_time %lu", frame_num++,
+            mWidth, mHeight, time_stamp, ntp_time_ms, render_time));
 #endif
 
   // we don't touch anything in 'this' until here (except for snapshot,
   // which has it's own lock)
   MonitorAutoLock lock(mMonitor);
 
   // implicitly releases last image
   mImage = image.forget();
--- a/media/webrtc/trunk/webrtc/build/arm_neon.gypi
+++ b/media/webrtc/trunk/webrtc/build/arm_neon.gypi
@@ -18,13 +18,35 @@
 #   ],
 #   'includes': ['path/to/this/gypi/file'],
 # }
 
 {
   'cflags!': [
     '-mfpu=vfpv3-d16',
   ],
+  'cflags_mozilla!': [
+    '-mfpu=vfpv3-d16',
+  ],
   'cflags': [
     '-mfpu=neon',
     '-flax-vector-conversions',
   ],
+  'cflags_mozilla': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+  'asflags!': [
+    '-mfpu=vfpv3-d16',
+  ],
+  'asflags_mozilla!': [
+    '-mfpu=vfpv3-d16',
+  ],
+  'asflags': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+  'asflags_mozilla': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+
 }
--- a/media/webrtc/trunk/webrtc/build/common.gypi
+++ b/media/webrtc/trunk/webrtc/build/common.gypi
@@ -38,28 +38,42 @@
       'build_with_chromium%': '<(build_with_chromium)',
       'build_with_libjingle%': '<(build_with_libjingle)',
       'webrtc_root%': '<(webrtc_root)',
       'apk_tests_path%': '<(apk_tests_path)',
       'modules_java_gyp_path%': '<(modules_java_gyp_path)',
       'gen_core_neon_offsets_gyp%': '<(gen_core_neon_offsets_gyp)',
       'webrtc_vp8_dir%': '<(webrtc_root)/modules/video_coding/codecs/vp8',
       'webrtc_vp9_dir%': '<(webrtc_root)/modules/video_coding/codecs/vp9',
+      'webrtc_h264_dir%': '<(webrtc_root)/modules/video_coding/codecs/h264',
       'rbe_components_path%': '<(webrtc_root)/modules/remote_bitrate_estimator',
+      'include_g711%': 1,
+      'include_g722%': 1,
+      'include_ilbc%': 1,
       'include_opus%': 1,
+      'include_isac%': 1,
+      'include_pcm16b%': 1,
     },
     'build_with_chromium%': '<(build_with_chromium)',
     'build_with_libjingle%': '<(build_with_libjingle)',
     'webrtc_root%': '<(webrtc_root)',
     'apk_tests_path%': '<(apk_tests_path)',
     'modules_java_gyp_path%': '<(modules_java_gyp_path)',
     'gen_core_neon_offsets_gyp%': '<(gen_core_neon_offsets_gyp)',
     'webrtc_vp8_dir%': '<(webrtc_vp8_dir)',
     'webrtc_vp9_dir%': '<(webrtc_vp9_dir)',
+    'webrtc_h264_dir%': '<(webrtc_h264_dir)',
+
+    'include_g711%': '<(include_g711)',
+    'include_g722%': '<(include_g722)',
+    'include_ilbc%': '<(include_ilbc)',
     'include_opus%': '<(include_opus)',
+    'include_isac%': '<(include_isac)',
+    'include_pcm16b%': '<(include_pcm16b)',
+
     'rtc_relative_path%': 1,
     'rbe_components_path%': '<(rbe_components_path)',
     'external_libraries%': '0',
     'json_root%': '<(DEPTH)/third_party/jsoncpp/source/include/',
     # openssl needs to be defined or gyp will complain. Is is only used when
     # when providing external libraries so just use current directory as a
     # placeholder.
     'ssl_root%': '.',
@@ -128,32 +142,52 @@
       ['build_with_chromium==1', {
         # Exclude pulse audio on Chromium since its prerequisites don't require
         # pulse audio.
         'include_pulse_audio%': 0,
 
         # Exclude internal ADM since Chromium uses its own IO handling.
         'include_internal_audio_device%': 0,
 
+        # lazily allocate the ~4MB of trace message buffers if set
+        'enable_lazy_trace_alloc%': 0,
+
+        'include_ndk_cpu_features%': 0,
       }, {  # Settings for the standalone (not-in-Chromium) build.
         # TODO(andrew): For now, disable the Chrome plugins, which causes a
         # flood of chromium-style warnings. Investigate enabling them:
         # http://code.google.com/p/webrtc/issues/detail?id=163
         'clang_use_chrome_plugins%': 0,
 
         'include_pulse_audio%': 1,
         'include_internal_audio_device%': 1,
+        'include_ndk_cpu_features%': 0,
       }],
       ['build_with_libjingle==1', {
         'include_tests%': 0,
         'restrict_webrtc_logging%': 1,
       }, {
         'include_tests%': 1,
         'restrict_webrtc_logging%': 0,
       }],
+      ['OS=="linux"', {
+        'include_alsa_audio%': 1,
+      }, {
+        'include_alsa_audio%': 0,
+      }],
+      ['OS=="solaris" or os_bsd==1', {
+        'include_pulse_audio%': 1,
+      }, {
+        'include_pulse_audio%': 0,
+      }],
+      ['OS=="linux" or OS=="solaris" or os_bsd==1', {
+        'include_v4l2_video_capture%': 1,
+      }, {
+        'include_v4l2_video_capture%': 0,
+      }],
       ['OS=="ios"', {
         'build_libjpeg%': 0,
         'enable_protobuf%': 0,
       }],
       ['target_arch=="arm" or target_arch=="armv7"', {
         'prefer_fixed_point%': 1,
       }],
       ['OS!="ios" and (target_arch!="arm" or arm_version>=7)', {
@@ -165,16 +199,21 @@
   },
   'target_defaults': {
     'include_dirs': [
       # To include the top-level directory when building in Chrome, so we can
       # use full paths (e.g. headers inside testing/ or third_party/).
       '<(DEPTH)',
     ],
     'conditions': [
+      ['moz_widget_toolkit_gonk==1', {
+        'defines' : [
+          'WEBRTC_GONK',
+        ],
+      }],
       ['restrict_webrtc_logging==1', {
         'defines': ['WEBRTC_RESTRICT_LOGGING',],
       }],
       ['build_with_mozilla==1', {
         'defines': [
           # Changes settings for Mozilla build.
           'WEBRTC_MOZILLA_BUILD',
          ],
@@ -257,27 +296,41 @@
         ],
       }],
       ['target_arch=="arm" or target_arch=="armv7"', {
         'defines': [
           'WEBRTC_ARCH_ARM',
         ],
         'conditions': [
           ['arm_version==7', {
-            'defines': ['WEBRTC_ARCH_ARM_V7',],
+            'defines': ['WEBRTC_ARCH_ARM_V7',
+                        'WEBRTC_BUILD_NEON_LIBS'],
             'conditions': [
               ['arm_neon==1', {
                 'defines': ['WEBRTC_ARCH_ARM_NEON',],
               }, {
                 'defines': ['WEBRTC_DETECT_ARM_NEON',],
               }],
             ],
           }],
         ],
       }],
+      ['os_bsd==1', {
+        'defines': [
+          'WEBRTC_BSD',
+          'WEBRTC_THREAD_RR',
+        ],
+      }],
+      ['OS=="dragonfly" or OS=="netbsd"', {
+        'defines': [
+          # doesn't support pthread_condattr_setclock
+          'WEBRTC_CLOCK_TYPE_REALTIME',
+        ],
+      }],
+      # Mozilla: if we support Mozilla on MIPS, we'll need to mod the cflags entries here
       ['target_arch=="mipsel" and mips_arch_variant!="r6" and android_webview_build==0', {
         'defines': [
           'MIPS32_LE',
         ],
         'conditions': [
           ['mips_fpu==1', {
             'defines': [
               'MIPS_FPU_LE',
@@ -340,16 +393,23 @@
       }],
       ['OS=="ios"', {
         'defines': [
           'WEBRTC_MAC',
           'WEBRTC_IOS',
         ],
       }],
       ['OS=="linux"', {
+#        'conditions': [
+#          ['have_clock_monotonic==1', {
+#            'defines': [
+#              'WEBRTC_CLOCK_TYPE_REALTIME',
+#            ],
+#          }],
+#        ],
         'defines': [
           'WEBRTC_LINUX',
         ],
       }],
       ['OS=="mac"', {
         'defines': [
           'WEBRTC_MAC',
         ],
@@ -363,27 +423,33 @@
         # http://code.google.com/p/webrtc/issues/detail?id=261 is solved.
         'msvs_disabled_warnings': [
           4373,  # legacy warning for ignoring const / volatile in signatures.
           4389,  # Signed/unsigned mismatch.
         ],
         # Re-enable some warnings that Chromium disables.
         'msvs_disabled_warnings!': [4189,],
       }],
+      # used on GONK as well
+      ['enable_android_opensl==1 and (OS=="android" or moz_widget_toolkit_gonk==1)', {
+        'defines': [
+          'WEBRTC_ANDROID_OPENSLES',
+        ],
+      }],
+      ['moz_webrtc_omx==1', {
+        'defines' : [
+          'MOZ_WEBRTC_OMX'
+        ],
+      }],
       ['OS=="android"', {
         'defines': [
           'WEBRTC_LINUX',
           'WEBRTC_ANDROID',
          ],
          'conditions': [
-           ['enable_android_opensl==1', {
-             'defines': [
-               'WEBRTC_ANDROID_OPENSLES',
-             ],
-           }],
            ['clang!=1', {
              # The Android NDK doesn't provide optimized versions of these
              # functions. Ensure they are disabled for all compilers.
              'cflags': [
                '-fno-builtin-cos',
                '-fno-builtin-sin',
                '-fno-builtin-cosf',
                '-fno-builtin-sinf',
--- a/media/webrtc/trunk/webrtc/build/merge_libs.gyp
+++ b/media/webrtc/trunk/webrtc/build/merge_libs.gyp
@@ -43,10 +43,12 @@
           'outputs': ['<(output_lib)'],
           'action': ['python',
                      'merge_libs.py',
                      '<(PRODUCT_DIR)',
                      '<(output_lib)',],
         },
       ],
     },
+#      }],
+#    ],
   ],
 }
--- a/media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
+++ b/media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
@@ -177,16 +177,21 @@
         {
           'target_name': 'common_audio_sse2',
           'type': 'static_library',
           'sources': [
             'fir_filter_sse.cc',
             'resampler/sinc_resampler_sse.cc',
           ],
           'cflags': ['-msse2',],
+          'conditions': [
+            [ 'os_posix == 1', {
+              'cflags_mozilla': ['-msse2',],
+            }],
+          ],
           'xcode_settings': {
             'OTHER_CFLAGS': ['-msse2',],
           },
         },
       ],  # targets
     }],
     ['(target_arch=="arm" and arm_version==7) or target_arch=="armv7"', {
       'targets': [
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
@@ -12,105 +12,54 @@
 /*
  * A wrapper for resampling a numerous amount of sampling combinations.
  */
 
 #ifndef WEBRTC_RESAMPLER_RESAMPLER_H_
 #define WEBRTC_RESAMPLER_RESAMPLER_H_
 
 #include "webrtc/typedefs.h"
+#include <speex/speex_resampler.h>
 
 namespace webrtc
 {
 
-// TODO(andrew): the implementation depends on the exact values of this enum.
-// It should be rewritten in a less fragile way.
+#define FIXED_RATE_RESAMPLER 0x10
 enum ResamplerType
 {
-    // 4 MSB = Number of channels
-    // 4 LSB = Synchronous or asynchronous
-
-    kResamplerSynchronous = 0x10,
-    kResamplerAsynchronous = 0x11,
-    kResamplerSynchronousStereo = 0x20,
-    kResamplerAsynchronousStereo = 0x21,
-    kResamplerInvalid = 0xff
-};
-
-// TODO(andrew): doesn't need to be part of the interface.
-enum ResamplerMode
-{
-    kResamplerMode1To1,
-    kResamplerMode1To2,
-    kResamplerMode1To3,
-    kResamplerMode1To4,
-    kResamplerMode1To6,
-    kResamplerMode1To12,
-    kResamplerMode2To3,
-    kResamplerMode2To11,
-    kResamplerMode4To11,
-    kResamplerMode8To11,
-    kResamplerMode11To16,
-    kResamplerMode11To32,
-    kResamplerMode2To1,
-    kResamplerMode3To1,
-    kResamplerMode4To1,
-    kResamplerMode6To1,
-    kResamplerMode12To1,
-    kResamplerMode3To2,
-    kResamplerMode11To2,
-    kResamplerMode11To4,
-    kResamplerMode11To8
+    kResamplerSynchronous            = 0x00,
+    kResamplerSynchronousStereo      = 0x01,
+    kResamplerFixedSynchronous       = 0x00 | FIXED_RATE_RESAMPLER,
+    kResamplerFixedSynchronousStereo = 0x01 | FIXED_RATE_RESAMPLER,
 };
 
 class Resampler
 {
-
 public:
     Resampler();
     // TODO(andrew): use an init function instead.
-    Resampler(int inFreq, int outFreq, ResamplerType type);
+    Resampler(int in_freq, int out_freq, ResamplerType type);
     ~Resampler();
 
     // Reset all states
-    int Reset(int inFreq, int outFreq, ResamplerType type);
+    int Reset(int in_freq, int out_freq, ResamplerType type);
 
     // Reset all states if any parameter has changed
-    int ResetIfNeeded(int inFreq, int outFreq, ResamplerType type);
+    int ResetIfNeeded(int in_freq, int out_freq, ResamplerType type);
 
     // Synchronous resampling, all output samples are written to samplesOut
-    int Push(const int16_t* samplesIn, int lengthIn, int16_t* samplesOut,
-             int maxLen, int &outLen);
-
-    // Asynchronous resampling, input
-    int Insert(int16_t* samplesIn, int lengthIn);
-
-    // Asynchronous resampling output, remaining samples are buffered
-    int Pull(int16_t* samplesOut, int desiredLen, int &outLen);
+    int Push(const int16_t* samples_in, int length_in,
+             int16_t* samples_out, int max_len, int &out_len);
 
 private:
-    // Generic pointers since we don't know what states we'll need
-    void* state1_;
-    void* state2_;
-    void* state3_;
+    bool IsFixedRate() { return !!(type_ & FIXED_RATE_RESAMPLER); }
+
+    SpeexResamplerState* state_;
 
-    // Storage if needed
-    int16_t* in_buffer_;
-    int16_t* out_buffer_;
-    int in_buffer_size_;
-    int out_buffer_size_;
-    int in_buffer_size_max_;
-    int out_buffer_size_max_;
-
-    // State
-    int my_in_frequency_khz_;
-    int my_out_frequency_khz_;
-    ResamplerMode my_mode_;
-    ResamplerType my_type_;
-
-    // Extra instance for stereo
-    Resampler* slave_left_;
-    Resampler* slave_right_;
+    int in_freq_;
+    int out_freq_;
+    int channels_;
+    ResamplerType type_;
 };
 
 }  // namespace webrtc
 
 #endif // WEBRTC_RESAMPLER_RESAMPLER_H_
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
@@ -8,17 +8,16 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/common_audio/resampler/include/push_resampler.h"
 
 #include <string.h>
 
 #include "webrtc/common_audio/include/audio_util.h"
-#include "webrtc/common_audio/resampler/include/resampler.h"
 #include "webrtc/common_audio/resampler/push_sinc_resampler.h"
 
 namespace webrtc {
 
 template <typename T>
 PushResampler<T>::PushResampler()
     : src_sample_rate_hz_(0),
       dst_sample_rate_hz_(0),
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
@@ -10,1075 +10,126 @@
 
 
 /*
  * A wrapper for resampling a numerous amount of sampling combinations.
  */
 
 #include <stdlib.h>
 #include <string.h>
+#include <assert.h>
 
 #include "webrtc/common_audio/resampler/include/resampler.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
+// TODO(jesup) better adjust per platform ability
+// Note: if these are changed (higher), you may need to change the
+// KernelDelay values in the unit tests here and in output_mixer.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_GONK)
+#define RESAMPLER_QUALITY 2
+#else
+#define RESAMPLER_QUALITY 3
+#endif
 
 namespace webrtc
 {
 
-Resampler::Resampler()
+Resampler::Resampler() : state_(NULL), type_(kResamplerSynchronous)
 {
-    state1_ = NULL;
-    state2_ = NULL;
-    state3_ = NULL;
-    in_buffer_ = NULL;
-    out_buffer_ = NULL;
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-    // we need a reset before we will work
-    my_in_frequency_khz_ = 0;
-    my_out_frequency_khz_ = 0;
-    my_mode_ = kResamplerMode1To1;
-    my_type_ = kResamplerInvalid;
-    slave_left_ = NULL;
-    slave_right_ = NULL;
+  // Note: Push will fail until Reset() is called
 }
 
-Resampler::Resampler(int inFreq, int outFreq, ResamplerType type)
+Resampler::Resampler(int in_freq, int out_freq, ResamplerType type) :
+  state_(NULL) // all others get initialized in reset
 {
-    state1_ = NULL;
-    state2_ = NULL;
-    state3_ = NULL;
-    in_buffer_ = NULL;
-    out_buffer_ = NULL;
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-    // we need a reset before we will work
-    my_in_frequency_khz_ = 0;
-    my_out_frequency_khz_ = 0;
-    my_mode_ = kResamplerMode1To1;
-    my_type_ = kResamplerInvalid;
-    slave_left_ = NULL;
-    slave_right_ = NULL;
-
-    Reset(inFreq, outFreq, type);
+  Reset(in_freq, out_freq, type);
 }
 
 Resampler::~Resampler()
 {
-    if (state1_)
-    {
-        free(state1_);
-    }
-    if (state2_)
-    {
-        free(state2_);
-    }
-    if (state3_)
-    {
-        free(state3_);
-    }
-    if (in_buffer_)
-    {
-        free(in_buffer_);
-    }
-    if (out_buffer_)
-    {
-        free(out_buffer_);
-    }
-    if (slave_left_)
-    {
-        delete slave_left_;
-    }
-    if (slave_right_)
-    {
-        delete slave_right_;
-    }
+  if (state_)
+  {
+    speex_resampler_destroy(state_);
+  }
 }
 
-int Resampler::ResetIfNeeded(int inFreq, int outFreq, ResamplerType type)
+int Resampler::ResetIfNeeded(int in_freq, int out_freq, ResamplerType type)
 {
-    int tmpInFreq_kHz = inFreq / 1000;
-    int tmpOutFreq_kHz = outFreq / 1000;
-
-    if ((tmpInFreq_kHz != my_in_frequency_khz_) || (tmpOutFreq_kHz != my_out_frequency_khz_)
-            || (type != my_type_))
-    {
-        return Reset(inFreq, outFreq, type);
-    } else
-    {
-        return 0;
-    }
+  if (!state_ || type != type_ ||
+      in_freq != in_freq_ || out_freq != out_freq_)
+  {
+    // Note that fixed-rate resamplers where input == output rate will
+    // have state_ == NULL, and will call Reset() here - but reset won't
+    // do anything beyond overwrite the member vars unless it needs a
+    // real resampler.
+    return Reset(in_freq, out_freq, type);
+  } else {
+    return 0;
+  }
 }
 
-int Resampler::Reset(int inFreq, int outFreq, ResamplerType type)
+int Resampler::Reset(int in_freq, int out_freq, ResamplerType type)
 {
-
-    if (state1_)
-    {
-        free(state1_);
-        state1_ = NULL;
-    }
-    if (state2_)
-    {
-        free(state2_);
-        state2_ = NULL;
-    }
-    if (state3_)
-    {
-        free(state3_);
-        state3_ = NULL;
-    }
-    if (in_buffer_)
-    {
-        free(in_buffer_);
-        in_buffer_ = NULL;
-    }
-    if (out_buffer_)
-    {
-        free(out_buffer_);
-        out_buffer_ = NULL;
-    }
-    if (slave_left_)
-    {
-        delete slave_left_;
-        slave_left_ = NULL;
-    }
-    if (slave_right_)
-    {
-        delete slave_right_;
-        slave_right_ = NULL;
-    }
-
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-
-    // This might be overridden if parameters are not accepted.
-    my_type_ = type;
-
-    // Start with a math exercise, Euclid's algorithm to find the gcd:
-
-    int a = inFreq;
-    int b = outFreq;
-    int c = a % b;
-    while (c != 0)
-    {
-        a = b;
-        b = c;
-        c = a % b;
-    }
-    // b is now the gcd;
-
-    // We need to track what domain we're in.
-    my_in_frequency_khz_ = inFreq / 1000;
-    my_out_frequency_khz_ = outFreq / 1000;
-
-    // Scale with GCD
-    inFreq = inFreq / b;
-    outFreq = outFreq / b;
-
-    // Do we need stereo?
-    if ((my_type_ & 0xf0) == 0x20)
-    {
-        // Change type to mono
-        type = static_cast<ResamplerType>(
-            ((static_cast<int>(type) & 0x0f) + 0x10));
-        slave_left_ = new Resampler(inFreq, outFreq, type);
-        slave_right_ = new Resampler(inFreq, outFreq, type);
-    }
+  uint32_t channels = (type == kResamplerSynchronousStereo ||
+                       type == kResamplerFixedSynchronousStereo) ? 2 : 1;
 
-    if (inFreq == outFreq)
-    {
-        my_mode_ = kResamplerMode1To1;
-    } else if (inFreq == 1)
-    {
-        switch (outFreq)
-        {
-            case 2:
-                my_mode_ = kResamplerMode1To2;
-                break;
-            case 3:
-                my_mode_ = kResamplerMode1To3;
-                break;
-            case 4:
-                my_mode_ = kResamplerMode1To4;
-                break;
-            case 6:
-                my_mode_ = kResamplerMode1To6;
-                break;
-            case 12:
-                my_mode_ = kResamplerMode1To12;
-                break;
-            default:
-                my_type_ = kResamplerInvalid;
-                return -1;
-        }
-    } else if (outFreq == 1)
-    {
-        switch (inFreq)
-        {
-            case 2:
-                my_mode_ = kResamplerMode2To1;
-                break;
-            case 3:
-                my_mode_ = kResamplerMode3To1;
-                break;
-            case 4:
-                my_mode_ = kResamplerMode4To1;
-                break;
-            case 6:
-                my_mode_ = kResamplerMode6To1;
-                break;
-            case 12:
-                my_mode_ = kResamplerMode12To1;
-                break;
-            default:
-                my_type_ = kResamplerInvalid;
-                return -1;
-        }
-    } else if ((inFreq == 2) && (outFreq == 3))
-    {
-        my_mode_ = kResamplerMode2To3;
-    } else if ((inFreq == 2) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode2To11;
-    } else if ((inFreq == 4) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode4To11;
-    } else if ((inFreq == 8) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode8To11;
-    } else if ((inFreq == 3) && (outFreq == 2))
-    {
-        my_mode_ = kResamplerMode3To2;
-    } else if ((inFreq == 11) && (outFreq == 2))
-    {
-        my_mode_ = kResamplerMode11To2;
-    } else if ((inFreq == 11) && (outFreq == 4))
-    {
-        my_mode_ = kResamplerMode11To4;
-    } else if ((inFreq == 11) && (outFreq == 16))
-    {
-        my_mode_ = kResamplerMode11To16;
-    } else if ((inFreq == 11) && (outFreq == 32))
-    {
-        my_mode_ = kResamplerMode11To32;
-    } else if ((inFreq == 11) && (outFreq == 8))
+  if (state_)
+  {
+    speex_resampler_destroy(state_);
+    state_ = NULL;
+  }
+  type_ = type;
+  channels_ = channels;
+  in_freq_ = in_freq;
+  out_freq_ = out_freq;
+
+  // For fixed-rate, same-rate resamples we just memcpy and so don't spin up a resampler
+  if (in_freq != out_freq || !IsFixedRate())
+  {
+    state_ = speex_resampler_init(channels, in_freq, out_freq, RESAMPLER_QUALITY, NULL);
+    if (!state_)
     {
-        my_mode_ = kResamplerMode11To8;
-    } else
-    {
-        my_type_ = kResamplerInvalid;
-        return -1;
+      return -1;
     }
-
-    // Now create the states we need
-    switch (my_mode_)
-    {
-        case kResamplerMode1To1:
-            // No state needed;
-            break;
-        case kResamplerMode1To2:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode1To3:
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
-            break;
-        case kResamplerMode1To4:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:4
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode1To6:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:6
-            state2_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state2_);
-            break;
-        case kResamplerMode1To12:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:4
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            // 4:12
-            state3_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz(
-                (WebRtcSpl_State16khzTo48khz*) state3_);
-            break;
-        case kResamplerMode2To3:
-            // 2:6
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
-            // 6:3
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode2To11:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            state2_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
-            WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state2_);
-            break;
-        case kResamplerMode4To11:
-            state1_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
-            WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state1_);
-            break;
-        case kResamplerMode8To11:
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo22khz));
-            WebRtcSpl_ResetResample16khzTo22khz((WebRtcSpl_State16khzTo22khz *)state1_);
-            break;
-        case kResamplerMode11To16:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
-            break;
-        case kResamplerMode11To32:
-            // 11 -> 22
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            // 22 -> 16
-            state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
-
-            // 16 -> 32
-            state3_ = malloc(8 * sizeof(int32_t));
-            memset(state3_, 0, 8 * sizeof(int32_t));
-
-            break;
-        case kResamplerMode2To1:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode3To1:
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
-            break;
-        case kResamplerMode4To1:
-            // 4:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:1
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode6To1:
-            // 6:2
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
-            // 2:1
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode12To1:
-            // 12:4
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz(
-                (WebRtcSpl_State48khzTo16khz*) state1_);
-            // 4:2
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            // 2:1
-            state3_ = malloc(8 * sizeof(int32_t));
-            memset(state3_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode3To2:
-            // 3:6
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 6:2
-            state2_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state2_);
-            break;
-        case kResamplerMode11To2:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
-            WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
-
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-
-            break;
-        case kResamplerMode11To4:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
-            WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
-            break;
-        case kResamplerMode11To8:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state1_);
-            break;
-
-    }
-
-    return 0;
+  }
+  return 0;
 }
 
-// Synchronous resampling, all output samples are written to samplesOut
-int Resampler::Push(const int16_t * samplesIn, int lengthIn, int16_t* samplesOut,
-                    int maxLen, int &outLen)
+// Synchronous resampling, all output samples are written to samples_out
+// TODO(jesup) Change to take samples-per-channel in and out
+int Resampler::Push(const int16_t* samples_in, int length_in,
+                    int16_t* samples_out, int max_len, int &out_len)
 {
-    // Check that the resampler is not in asynchronous mode
-    if (my_type_ & 0x0f)
-    {
-        return -1;
-    }
-
-    // Do we have a stereo signal?
-    if ((my_type_ & 0xf0) == 0x20)
+  if (max_len < length_in)
+  {
+    return -1;
+  }
+  if (!state_)
+  {
+    if (!IsFixedRate() || in_freq_ != out_freq_)
     {
-
-        // Split up the signal and call the slave object for each channel
-
-        int16_t* left = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
-        int16_t* right = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
-        int16_t* out_left = (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
-        int16_t* out_right =
-                (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
-        int res = 0;
-        for (int i = 0; i < lengthIn; i += 2)
-        {
-            left[i >> 1] = samplesIn[i];
-            right[i >> 1] = samplesIn[i + 1];
-        }
-
-        // It's OK to overwrite the local parameter, since it's just a copy
-        lengthIn = lengthIn / 2;
-
-        int actualOutLen_left = 0;
-        int actualOutLen_right = 0;
-        // Do resampling for right channel
-        res |= slave_left_->Push(left, lengthIn, out_left, maxLen / 2, actualOutLen_left);
-        res |= slave_right_->Push(right, lengthIn, out_right, maxLen / 2, actualOutLen_right);
-        if (res || (actualOutLen_left != actualOutLen_right))
-        {
-            free(left);
-            free(right);
-            free(out_left);
-            free(out_right);
-            return -1;
-        }
-
-        // Reassemble the signal
-        for (int i = 0; i < actualOutLen_left; i++)
-        {
-            samplesOut[i * 2] = out_left[i];
-            samplesOut[i * 2 + 1] = out_right[i];
-        }
-        outLen = 2 * actualOutLen_left;
-
-        free(left);
-        free(right);
-        free(out_left);
-        free(out_right);
-
-        return 0;
+      // Since we initialize to a non-Fixed type, Push() will fail
+      // until Reset() is called
+      return -1;
     }
 
-    // Containers for temp samples
-    int16_t* tmp;
-    int16_t* tmp_2;
-    // tmp data for resampling routines
-    int32_t* tmp_mem;
-
-    switch (my_mode_)
-    {
-        case kResamplerMode1To1:
-            memcpy(samplesOut, samplesIn, lengthIn * sizeof(int16_t));
-            outLen = lengthIn;
-            break;
-        case kResamplerMode1To2:
-            if (maxLen < (lengthIn * 2))
-            {
-                return -1;
-            }
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-            outLen = lengthIn * 2;
-            return 0;
-        case kResamplerMode1To3:
-
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn * 3))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(samplesIn + i, samplesOut + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn * 3;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode1To4:
-            if (maxLen < (lengthIn * 4))
-            {
-                return -1;
-            }
-
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-            // 1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            // 2:4
-            WebRtcSpl_UpsampleBy2(tmp, lengthIn * 2, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn * 4;
-            free(tmp);
-            return 0;
-        case kResamplerMode1To6:
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn * 6))
-            {
-                return -1;
-            }
-
-            //1:2
-
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            outLen = lengthIn * 2;
-
-            for (int i = 0; i < outLen; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state2_,
-                                               tmp_mem);
-            }
-            outLen = outLen * 3;
-            free(tmp_mem);
-            free(tmp);
-
-            return 0;
-        case kResamplerMode1To12:
-            // We can only handle blocks of 40 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 40) != 0) {
-              return -1;
-            }
-            if (maxLen < (lengthIn * 12)) {
-              return -1;
-            }
-
-            tmp_mem = (int32_t*) malloc(336 * sizeof(int32_t));
-            tmp = (int16_t*) malloc(sizeof(int16_t) * 4 * lengthIn);
-            //1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut,
-                                  (int32_t*) state1_);
-            outLen = lengthIn * 2;
-            //2:4
-            WebRtcSpl_UpsampleBy2(samplesOut, outLen, tmp, (int32_t*) state2_);
-            outLen = outLen * 2;
-            // 4:12
-            for (int i = 0; i < outLen; i += 160) {
-              // WebRtcSpl_Resample16khzTo48khz() takes a block of 160 samples
-              // as input and outputs a resampled block of 480 samples. The
-              // data is now actually in 32 kHz sampling rate, despite the
-              // function name, and with a resampling factor of three becomes
-              // 96 kHz.
-              WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
-                                             (WebRtcSpl_State16khzTo48khz*) state3_,
-                                             tmp_mem);
-            }
-            outLen = outLen * 3;
-            free(tmp_mem);
-            free(tmp);
-
-            return 0;
-        case kResamplerMode2To3:
-            if (maxLen < (lengthIn * 3 / 2))
-            {
-                return -1;
-            }
-            // 2:6
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 3));
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(samplesIn + i, tmp + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state1_,
-                                               tmp_mem);
-            }
-            lengthIn = lengthIn * 3;
-            // 6:3
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 2;
-            free(tmp);
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode2To11:
-
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 2))
-            {
-                return -1;
-            }
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-            // 1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            lengthIn *= 2;
-
-            tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 80)
-            {
-                WebRtcSpl_Resample8khzTo22khz(tmp + i, samplesOut + (i * 11) / 4,
-                                              (WebRtcSpl_State8khzTo22khz *)state2_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 4;
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-        case kResamplerMode4To11:
-
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 4))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 80)
-            {
-                WebRtcSpl_Resample8khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 4,
-                                              (WebRtcSpl_State8khzTo22khz *)state1_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 4;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode8To11:
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 8))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(88 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 8,
-                                               (WebRtcSpl_State16khzTo22khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 8;
-            free(tmp_mem);
-            return 0;
-
-        case kResamplerMode11To16:
-            // We can only handle blocks of 110 samples
-            if ((lengthIn % 110) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 16) / 11))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
-
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-
-            for (int i = 0; i < (lengthIn * 2); i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(tmp + i, samplesOut + (i / 220) * 160,
-                                               (WebRtcSpl_State22khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-
-            outLen = (lengthIn * 16) / 11;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-
-        case kResamplerMode11To32:
-
-            // We can only handle blocks of 110 samples
-            if ((lengthIn % 110) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 32) / 11))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
-
-            // 11 -> 22 kHz in samplesOut
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-
-            // 22 -> 16 in tmp
-            for (int i = 0; i < (lengthIn * 2); i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(samplesOut + i, tmp + (i / 220) * 160,
-                                               (WebRtcSpl_State22khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-
-            // 16 -> 32 in samplesOut
-            WebRtcSpl_UpsampleBy2(tmp, (lengthIn * 16) / 11, samplesOut,
-                                  (int32_t*)state3_);
-
-            outLen = (lengthIn * 32) / 11;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-
-        case kResamplerMode2To1:
-            if (maxLen < (lengthIn / 2))
-            {
-                return -1;
-            }
-            WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-            outLen = lengthIn / 2;
-            return 0;
-        case kResamplerMode3To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn / 3))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(samplesIn + i, samplesOut + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode4To1:
-            if (maxLen < (lengthIn / 4))
-            {
-                return -1;
-            }
-            tmp = (int16_t*)malloc(sizeof(int16_t) * lengthIn / 2);
-            // 4:2
-            WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            // 2:1
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn / 2, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 4;
-            free(tmp);
-            return 0;
-
-        case kResamplerMode6To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn / 6))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn) / 3);
-
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            WebRtcSpl_DownsampleBy2(tmp, outLen, samplesOut, (int32_t*)state2_);
-            free(tmp);
-            outLen = outLen / 2;
-            return 0;
-        case kResamplerMode12To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0) {
-              return -1;
-            }
-            if (maxLen < (lengthIn / 12)) {
-              return -1;
-            }
-
-            tmp_mem = (int32_t*) malloc(496 * sizeof(int32_t));
-            tmp = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 3);
-            tmp_2 = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 6);
-            // 12:4
-            for (int i = 0; i < lengthIn; i += 480) {
-              // WebRtcSpl_Resample48khzTo16khz() takes a block of 480 samples
-              // as input and outputs a resampled block of 160 samples. The
-              // data is now actually in 96 kHz sampling rate, despite the
-              // function name, and with a resampling factor of 1/3 becomes
-              // 32 kHz.
-              WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
-                                             (WebRtcSpl_State48khzTo16khz*) state1_,
-                                             tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            // 4:2
-            WebRtcSpl_DownsampleBy2(tmp, outLen, tmp_2,
-                                    (int32_t*) state2_);
-            outLen = outLen / 2;
-            free(tmp);
-            // 2:1
-            WebRtcSpl_DownsampleBy2(tmp_2, outLen, samplesOut,
-                                    (int32_t*) state3_);
-            free(tmp_2);
-            outLen = outLen / 2;
-            return 0;
-        case kResamplerMode3To2:
-            if (maxLen < (lengthIn * 2 / 3))
-            {
-                return -1;
-            }
-            // 3:6
-            tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 2));
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            lengthIn *= 2;
-            // 6:2
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                free(tmp);
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(tmp + i, samplesOut + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp);
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode11To2:
-            // We can only handle blocks of 220 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 2) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((lengthIn * 4) / 11 * sizeof(int16_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo8khz(samplesIn + i, tmp + (i * 4) / 11,
-                                              (WebRtcSpl_State22khzTo8khz *)state1_,
-                                              tmp_mem);
-            }
-            lengthIn = (lengthIn * 4) / 11;
-
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 2;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-        case kResamplerMode11To4:
-            // We can only handle blocks of 220 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 4) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo8khz(samplesIn + i, samplesOut + (i * 4) / 11,
-                                              (WebRtcSpl_State22khzTo8khz *)state1_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 4) / 11;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode11To8:
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 8) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(samplesIn + i, samplesOut + (i * 8) / 11,
-                                               (WebRtcSpl_State22khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = (lengthIn * 8) / 11;
-            free(tmp_mem);
-            return 0;
-            break;
-
-    }
+    // Fixed-rate, same-freq "resample" - use memcpy, which avoids
+    // filtering and delay.  For non-fixed rates, where we might tweak
+    // from 48000->48000 to 48000->48001 for drift, we need to resample
+    // (and filter) all the time to avoid glitches on rate changes.
+    memcpy(samples_out, samples_in, length_in*sizeof(*samples_in));
+    out_len = length_in;
     return 0;
-}
-
-// Asynchronous resampling, input
-int Resampler::Insert(int16_t * samplesIn, int lengthIn)
-{
-    if (my_type_ != kResamplerAsynchronous)
-    {
-        return -1;
-    }
-    int sizeNeeded, tenMsblock;
-
-    // Determine need for size of outBuffer
-    sizeNeeded = out_buffer_size_ + ((lengthIn + in_buffer_size_) * my_out_frequency_khz_)
-            / my_in_frequency_khz_;
-    if (sizeNeeded > out_buffer_size_max_)
-    {
-        // Round the value upwards to complete 10 ms blocks
-        tenMsblock = my_out_frequency_khz_ * 10;
-        sizeNeeded = (sizeNeeded / tenMsblock + 1) * tenMsblock;
-        out_buffer_ = (int16_t*)realloc(out_buffer_, sizeNeeded * sizeof(int16_t));
-        out_buffer_size_max_ = sizeNeeded;
-    }
-
-    // If we need to use inBuffer, make sure all input data fits there.
-
-    tenMsblock = my_in_frequency_khz_ * 10;
-    if (in_buffer_size_ || (lengthIn % tenMsblock))
-    {
-        // Check if input buffer size is enough
-        if ((in_buffer_size_ + lengthIn) > in_buffer_size_max_)
-        {
-            // Round the value upwards to complete 10 ms blocks
-            sizeNeeded = ((in_buffer_size_ + lengthIn) / tenMsblock + 1) * tenMsblock;
-            in_buffer_ = (int16_t*)realloc(in_buffer_,
-                                           sizeNeeded * sizeof(int16_t));
-            in_buffer_size_max_ = sizeNeeded;
-        }
-        // Copy in data to input buffer
-        memcpy(in_buffer_ + in_buffer_size_, samplesIn, lengthIn * sizeof(int16_t));
-
-        // Resample all available 10 ms blocks
-        int lenOut;
-        int dataLenToResample = (in_buffer_size_ / tenMsblock) * tenMsblock;
-        Push(in_buffer_, dataLenToResample, out_buffer_ + out_buffer_size_,
-             out_buffer_size_max_ - out_buffer_size_, lenOut);
-        out_buffer_size_ += lenOut;
-
-        // Save the rest
-        memmove(in_buffer_, in_buffer_ + dataLenToResample,
-                (in_buffer_size_ - dataLenToResample) * sizeof(int16_t));
-        in_buffer_size_ -= dataLenToResample;
-    } else
-    {
-        // Just resample
-        int lenOut;
-        Push(in_buffer_, lengthIn, out_buffer_ + out_buffer_size_,
-             out_buffer_size_max_ - out_buffer_size_, lenOut);
-        out_buffer_size_ += lenOut;
-    }
-
-    return 0;
-}
-
-// Asynchronous resampling output, remaining samples are buffered
-int Resampler::Pull(int16_t* samplesOut, int desiredLen, int &outLen)
-{
-    if (my_type_ != kResamplerAsynchronous)
-    {
-        return -1;
-    }
-
-    // Check that we have enough data
-    if (desiredLen <= out_buffer_size_)
-    {
-        // Give out the date
-        memcpy(samplesOut, out_buffer_, desiredLen * sizeof(int32_t));
-
-        // Shuffle down remaining
-        memmove(out_buffer_, out_buffer_ + desiredLen,
-                (out_buffer_size_ - desiredLen) * sizeof(int16_t));
-
-        // Update remaining size
-        out_buffer_size_ -= desiredLen;
-
-        return 0;
-    } else
-    {
-        return -1;
-    }
+  }
+  assert(channels_ == 1 || channels_ == 2);
+  spx_uint32_t len = length_in = (length_in >> (channels_ - 1));
+  spx_uint32_t out = (spx_uint32_t) (max_len >> (channels_ - 1));
+  if ((speex_resampler_process_interleaved_int(state_, samples_in, &len,
+                             samples_out, &out) != RESAMPLER_ERR_SUCCESS) ||
+      len != (spx_uint32_t) length_in)
+  {
+    return -1;
+  }
+  out_len = (int) (channels_ * out);
+  return 0;
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
@@ -3,67 +3,59 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include <math.h>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 #include "webrtc/common_audio/resampler/include/resampler.h"
 
 // TODO(andrew): this is a work-in-progress. Many more tests are needed.
 
 namespace webrtc {
 namespace {
 const ResamplerType kTypes[] = {
   kResamplerSynchronous,
-  kResamplerAsynchronous,
   kResamplerSynchronousStereo,
-  kResamplerAsynchronousStereo
-  // kResamplerInvalid excluded
 };
 const size_t kTypesSize = sizeof(kTypes) / sizeof(*kTypes);
 
 // Rates we must support.
 const int kMaxRate = 96000;
 const int kRates[] = {
   8000,
   16000,
   32000,
-  44000,
+  44100,
   48000,
   kMaxRate
 };
 const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
 const int kMaxChannels = 2;
 const size_t kDataSize = static_cast<size_t> (kMaxChannels * kMaxRate / 100);
 
-// TODO(andrew): should we be supporting these combinations?
-bool ValidRates(int in_rate, int out_rate) {
-  // Not the most compact notation, for clarity.
-  if ((in_rate == 44000 && (out_rate == 48000 || out_rate == 96000)) ||
-      (out_rate == 44000 && (in_rate == 48000 || in_rate == 96000))) {
-    return false;
-  }
-
-  return true;
-}
-
 class ResamplerTest : public testing::Test {
  protected:
   ResamplerTest();
   virtual void SetUp();
   virtual void TearDown();
+  void RunResampleTest(int channels,
+                       int src_sample_rate_hz,
+                       int dst_sample_rate_hz);
 
   Resampler rs_;
   int16_t data_in_[kDataSize];
   int16_t data_out_[kDataSize];
+  int16_t data_reference_[kDataSize];
 };
 
 ResamplerTest::ResamplerTest() {}
 
 void ResamplerTest::SetUp() {
   // Initialize input data with anything. The tests are content independent.
   memset(data_in_, 1, sizeof(data_in_));
 }
@@ -78,66 +70,141 @@ TEST_F(ResamplerTest, Reset) {
   // Check that all required combinations are supported.
   for (size_t i = 0; i < kRatesSize; ++i) {
     for (size_t j = 0; j < kRatesSize; ++j) {
       for (size_t k = 0; k < kTypesSize; ++k) {
         std::ostringstream ss;
         ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j]
             << ", type: " << kTypes[k];
         SCOPED_TRACE(ss.str());
-        if (ValidRates(kRates[i], kRates[j]))
-          EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
-        else
-          EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
+        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
       }
     }
   }
 }
 
-// TODO(tlegrand): Replace code inside the two tests below with a function
-// with number of channels and ResamplerType as input.
-TEST_F(ResamplerTest, Synchronous) {
-  for (size_t i = 0; i < kRatesSize; ++i) {
-    for (size_t j = 0; j < kRatesSize; ++j) {
-      std::ostringstream ss;
-      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
-      SCOPED_TRACE(ss.str());
+// Sets the signal value to increase by |data| with every sample. Floats are
+// used so non-integer values result in rounding error, but not an accumulating
+// error.
+void SetMonoFrame(int16_t* buffer, float data, int sample_rate_hz) {
+  for (int i = 0; i < sample_rate_hz / 100; i++) {
+    buffer[i] = data * i;
+  }
+}
+
+// Sets the signal value to increase by |left| and |right| with every sample in
+// each channel respectively.
+void SetStereoFrame(int16_t* buffer, float left, float right,
+                    int sample_rate_hz) {
+  for (int i = 0; i < sample_rate_hz / 100; i++) {
+    buffer[i * 2] = left * i;
+    buffer[i * 2 + 1] = right * i;
+  }
+}
+
+// Computes the best SNR based on the error between |ref_frame| and
+// |test_frame|. It allows for a sample delay between the signals to
+// compensate for the resampling delay.
+float ComputeSNR(const int16_t* reference, const int16_t* test,
+                 int sample_rate_hz, int channels, int max_delay) {
+  float best_snr = 0;
+  int best_delay = 0;
+  int samples_per_channel = sample_rate_hz/100;
+  for (int delay = 0; delay < max_delay; delay++) {
+    float mse = 0;
+    float variance = 0;
+    for (int i = 0; i < samples_per_channel * channels - delay; i++) {
+      int error = reference[i] - test[i + delay];
+      mse += error * error;
+      variance += reference[i] * reference[i];
+    }
+    float snr = 100;  // We assign 100 dB to the zero-error case.
+    if (mse > 0)
+      snr = 10 * log10(variance / mse);
+    if (snr > best_snr) {
+      best_snr = snr;
+      best_delay = delay;
+    }
+  }
+  printf("SNR=%.1f dB at delay=%d\n", best_snr, best_delay);
+  return best_snr;
+}
 
-      if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kRates[i] / 100;
-        int out_length = 0;
-        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
-        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
-                              out_length));
-        EXPECT_EQ(kRates[j] / 100, out_length);
-      } else {
-        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
-      }
+void ResamplerTest::RunResampleTest(int channels,
+                                    int src_sample_rate_hz,
+                                    int dst_sample_rate_hz) {
+  Resampler resampler;  // Create a new one with every test.
+  const int16_t kSrcLeft = 60;  // Shouldn't overflow for any used sample rate.
+  const int16_t kSrcRight = 30;
+  const float kResamplingFactor = (1.0 * src_sample_rate_hz) /
+      dst_sample_rate_hz;
+  const float kDstLeft = kResamplingFactor * kSrcLeft;
+  const float kDstRight = kResamplingFactor * kSrcRight;
+  if (channels == 1)
+    SetMonoFrame(data_in_, kSrcLeft, src_sample_rate_hz);
+  else
+    SetStereoFrame(data_in_, kSrcLeft, kSrcRight, src_sample_rate_hz);
+
+  if (channels == 1) {
+    SetMonoFrame(data_out_, 0, dst_sample_rate_hz);
+    SetMonoFrame(data_reference_, kDstLeft, dst_sample_rate_hz);
+  } else {
+    SetStereoFrame(data_out_, 0, 0, dst_sample_rate_hz);
+    SetStereoFrame(data_reference_, kDstLeft, kDstRight, dst_sample_rate_hz);
+  }
+
+  // The speex resampler has a known delay dependent on quality and rates,
+  // which we approximate here. Multiplying by two gives us a crude maximum
+  // for any resampling, as the old resampler typically (but not always)
+  // has lower delay.  The actual delay is calculated internally based on the
+  // filter length in the QualityMap.
+  static const int kInputKernelDelaySamples = 16*3;
+  const int max_delay = std::min(1.0f, 1/kResamplingFactor) *
+                        kInputKernelDelaySamples * channels * 2;
+  printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
+      channels, src_sample_rate_hz, channels, dst_sample_rate_hz);
+
+  int in_length = channels * src_sample_rate_hz / 100;
+  int out_length = 0;
+  EXPECT_EQ(0, rs_.Reset(src_sample_rate_hz, dst_sample_rate_hz,
+                         (channels == 1 ?
+                          kResamplerSynchronous :
+                          kResamplerSynchronousStereo)));
+  EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
+                        out_length));
+  EXPECT_EQ(channels * dst_sample_rate_hz / 100, out_length);
+
+  //  EXPECT_EQ(0, Resample(src_frame_, &resampler, &dst_frame_));
+  EXPECT_GT(ComputeSNR(data_reference_, data_out_, dst_sample_rate_hz,
+                       channels, max_delay), 40.0f);
+}
+
+TEST_F(ResamplerTest, Synchronous) {
+  // Number of channels is 1, mono mode.
+  const int kChannels = 1;
+  // We don't attempt to be exhaustive here, but just get good coverage. Some
+  // combinations of rates will not be resampled, and some give an odd
+  // resampling factor which makes it more difficult to evaluate.
+  const int kSampleRates[] = {16000, 32000, 44100, 48000};
+  const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
     }
   }
 }
 
 TEST_F(ResamplerTest, SynchronousStereo) {
   // Number of channels is 2, stereo mode.
   const int kChannels = 2;
-  for (size_t i = 0; i < kRatesSize; ++i) {
-    for (size_t j = 0; j < kRatesSize; ++j) {
-      std::ostringstream ss;
-      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
-      SCOPED_TRACE(ss.str());
-
-      if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kChannels * kRates[i] / 100;
-        int out_length = 0;
-        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j],
-                               kResamplerSynchronousStereo));
-        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
-                              out_length));
-        EXPECT_EQ(kChannels * kRates[j] / 100, out_length);
-      } else {
-        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j],
-                                kResamplerSynchronousStereo));
-      }
+  // We don't attempt to be exhaustive here, but just get good coverage. Some
+  // combinations of rates will not be resampled, and some give an odd
+  // resampling factor which makes it more difficult to evaluate.
+  const int kSampleRates[] = {16000, 32000, 44100, 48000};
+  const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
     }
   }
 }
 }  // namespace
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
@@ -21,21 +21,21 @@ float SincResampler::Convolve_NEON(const
                                    const float* k2,
                                    double kernel_interpolation_factor) {
   float32x4_t m_input;
   float32x4_t m_sums1 = vmovq_n_f32(0);
   float32x4_t m_sums2 = vmovq_n_f32(0);
 
   const float* upper = input_ptr + kKernelSize;
   for (; input_ptr < upper; ) {
-    m_input = vld1q_f32(input_ptr);
+    m_input = vld1q_f32((const float32_t *) input_ptr);
     input_ptr += 4;
-    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
+    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32((const float32_t *) k1));
     k1 += 4;
-    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
+    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32((const float32_t *) k2));
     k2 += 4;
   }
 
   // Linearly interpolate the two "convolutions".
   m_sums1 = vmlaq_f32(
       vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
       m_sums2, vmovq_n_f32(kernel_interpolation_factor));
 
--- a/media/webrtc/trunk/webrtc/common_types.h
+++ b/media/webrtc/trunk/webrtc/common_types.h
@@ -425,17 +425,17 @@ typedef struct        // All levels are 
 enum NsModes    // type of Noise Suppression
 {
     kNsUnchanged = 0,   // previously set mode
     kNsDefault,         // platform default
     kNsConference,      // conferencing default
     kNsLowSuppression,  // lowest suppression
     kNsModerateSuppression,
     kNsHighSuppression,
-    kNsVeryHighSuppression,     // highest suppression
+    kNsVeryHighSuppression     // highest suppression
 };
 
 enum AgcModes                  // type of Automatic Gain Control
 {
     kAgcUnchanged = 0,        // previously set mode
     kAgcDefault,              // platform default
     // adaptive mode for use when analog volume control exists (e.g. for
     // PC softphone)
@@ -450,17 +450,17 @@ enum AgcModes                  // type o
 
 // EC modes
 enum EcModes                   // type of Echo Control
 {
     kEcUnchanged = 0,          // previously set mode
     kEcDefault,                // platform default
     kEcConference,             // conferencing default (aggressive AEC)
     kEcAec,                    // Acoustic Echo Cancellation
-    kEcAecm,                   // AEC mobile
+    kEcAecm                    // AEC mobile
 };
 
 // AECM modes
 enum AecmModes                 // mode of AECM
 {
     kAecmQuietEarpieceOrHeadset = 0,
                                // Quiet earpiece or headset use
     kAecmEarpiece,             // most earpiece use
@@ -503,33 +503,33 @@ enum NetEqModes             // NetEQ pla
     // Improved jitter robustness at the cost of increased delay. Can be
     // used in one-way communication.
     kNetEqStreaming = 1,
     // Optimzed for decodability of fax signals rather than for perceived audio
     // quality.
     kNetEqFax = 2,
     // Minimal buffer management. Inserts zeros for lost packets and during
     // buffer increases.
-    kNetEqOff = 3,
+    kNetEqOff = 3
 };
 
 // TODO(henrika): to be removed.
 enum OnHoldModes            // On Hold direction
 {
     kHoldSendAndPlay = 0,    // Put both sending and playing in on-hold state.
     kHoldSendOnly,           // Put only sending in on-hold state.
     kHoldPlayOnly            // Put only playing in on-hold state.
 };
 
 // TODO(henrika): to be removed.
 enum AmrMode
 {
     kRfc3267BwEfficient = 0,
     kRfc3267OctetAligned = 1,
-    kRfc3267FileStorage = 2,
+    kRfc3267FileStorage = 2
 };
 
 // ==================================================================
 // Video specific types
 // ==================================================================
 
 // Raw video types
 enum RawVideoType
@@ -546,16 +546,26 @@ enum RawVideoType
     kVideoARGB1555 = 9,
     kVideoMJPEG    = 10,
     kVideoNV12     = 11,
     kVideoNV21     = 12,
     kVideoBGRA     = 13,
     kVideoUnknown  = 99
 };
 
+enum VideoReceiveState
+{
+  kReceiveStateInitial,            // No video decoded yet
+  kReceiveStateNormal,
+  kReceiveStatePreemptiveNACK,     // NACK sent for missing packet, no decode stall/fail yet
+  kReceiveStateWaitingKey,         // Decoding stalled, waiting for keyframe or NACK
+  kReceiveStateDecodingWithErrors, // Decoding with errors, waiting for keyframe or NACK
+  kReceiveStateNoIncoming,         // No errors, but no incoming video since last decode
+};
+
 // Video codec
 enum { kConfigParameterSize = 128};
 enum { kPayloadNameSize = 32};
 enum { kMaxSimulcastStreams = 4};
 enum { kMaxTemporalStreams = 4};
 
 enum VideoCodecComplexity
 {
@@ -622,16 +632,20 @@ struct VideoCodecVP9 {
   bool                 frameDroppingOn;
   int                  keyFrameInterval;
   bool                 adaptiveQpMode;
 };
 
 // H264 specific.
 struct VideoCodecH264 {
   VideoCodecProfile profile;
+  uint8_t        profile;
+  uint8_t        constraints;
+  uint8_t        level;
+  uint8_t        packetizationMode; // 0 or 1
   bool           frameDroppingOn;
   int            keyFrameInterval;
   // These are NULL/0 if not externally negotiated.
   const uint8_t* spsData;
   size_t         spsLen;
   const uint8_t* ppsData;
   size_t         ppsLen;
 };
@@ -689,16 +703,18 @@ enum VideoCodecMode {
 // Common video codec properties
 struct VideoCodec {
   VideoCodecType      codecType;
   char                plName[kPayloadNameSize];
   unsigned char       plType;
 
   unsigned short      width;
   unsigned short      height;
+  // width & height modulo resolution_divisor must be 0
+  unsigned char       resolution_divisor;
 
   unsigned int        startBitrate;  // kilobits/sec.
   unsigned int        maxBitrate;  // kilobits/sec.
   unsigned int        minBitrate;  // kilobits/sec.
   unsigned int        targetBitrate;  // kilobits/sec.
 
   unsigned char       maxFramerate;
 
@@ -765,16 +781,35 @@ struct OverUseDetectorOptions {
   double initial_offset;
   double initial_e[2][2];
   double initial_process_noise[2];
   double initial_avg_noise;
   double initial_var_noise;
   double initial_threshold;
 };
 
+enum CPULoadState {
+  kLoadRelaxed,
+  kLoadNormal,
+  kLoadStressed
+};
+
+class CPULoadStateObserver {
+public:
+  virtual void onLoadStateChanged(CPULoadState aNewState) = 0;
+  virtual ~CPULoadStateObserver() {};
+};
+
+class CPULoadStateCallbackInvoker {
+public:
+    virtual void AddObserver(CPULoadStateObserver* aObserver) = 0;
+    virtual void RemoveObserver(CPULoadStateObserver* aObserver) = 0;
+    virtual ~CPULoadStateCallbackInvoker() {};
+};
+
 // This structure will have the information about when packet is actually
 // received by socket.
 struct PacketTime {
   PacketTime() : timestamp(-1), not_before(-1) {}
   PacketTime(int64_t timestamp, int64_t not_before)
       : timestamp(timestamp), not_before(not_before) {
   }
 
--- a/media/webrtc/trunk/webrtc/common_video/libyuv/webrtc_libyuv.cc
+++ b/media/webrtc/trunk/webrtc/common_video/libyuv/webrtc_libyuv.cc
@@ -236,16 +236,38 @@ int ConvertToI420(VideoType src_video_ty
   int dst_width = dst_frame->width();
   int dst_height = dst_frame->height();
   // LibYuv expects pre-rotation values for dst.
   // Stride values should correspond to the destination values.
   if (rotation == kRotate90 || rotation == kRotate270) {
     dst_width = dst_frame->height();
     dst_height =dst_frame->width();
   }
+#ifdef WEBRTC_GONK
+  if (src_video_type == kYV12) {
+    // In gralloc buffer, yv12 color format's cb and cr's strides are aligned
+    // to 16 Bytes boundary. See /system/core/include/system/graphics.h
+    int stride_y = src_width;
+    int stride_uv = (((stride_y + 1) / 2) + 15) & ~0x0F;
+    return libyuv::I420Rotate(src_frame,
+                              stride_y,
+                              src_frame + (stride_y * src_height) + (stride_uv * ((src_height + 1) / 2)),
+                              stride_uv,
+                              src_frame + (stride_y * src_height),
+                              stride_uv,
+                              dst_frame->buffer(kYPlane),
+                              dst_frame->stride(kYPlane),
+                              dst_frame->buffer(kUPlane),
+                              dst_frame->stride(kUPlane),
+                              dst_frame->buffer(kVPlane),
+                              dst_frame->stride(kVPlane),
+                              src_width, src_height,
+                              ConvertRotationMode(rotation));
+  }
+#endif
   return libyuv::ConvertToI420(src_frame, sample_size,
                                dst_frame->buffer(kYPlane),
                                dst_frame->stride(kYPlane),
                                dst_frame->buffer(kUPlane),
                                dst_frame->stride(kUPlane),
                                dst_frame->buffer(kVPlane),
                                dst_frame->stride(kVPlane),
                                crop_x, crop_y,
--- a/media/webrtc/trunk/webrtc/engine_configurations.h
+++ b/media/webrtc/trunk/webrtc/engine_configurations.h
@@ -31,17 +31,19 @@
 #define WEBRTC_CODEC_G722
 #endif  // !WEBRTC_MOZILLA_BUILD
 
 // AVT is included in all builds, along with G.711, NetEQ and CNG
 // (which are mandatory and don't have any defines).
 #define WEBRTC_CODEC_AVT
 
 // PCM16 is useful for testing and incurs only a small binary size cost.
+#ifndef WEBRTC_CODEC_PCM16
 #define WEBRTC_CODEC_PCM16
+#endif
 
 // iLBC and Redundancy coding are excluded from Chromium and Mozilla
 // builds to reduce binary size.
 #if !defined(WEBRTC_CHROMIUM_BUILD) && !defined(WEBRTC_MOZILLA_BUILD)
 #define WEBRTC_CODEC_ILBC
 #define WEBRTC_CODEC_RED
 #endif  // !WEBRTC_CHROMIUM_BUILD && !WEBRTC_MOZILLA_BUILD
 
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus.gypi
@@ -2,35 +2,41 @@
 #
 # Use of this source code is governed by a BSD-style license
 # that can be found in the LICENSE file in the root of the source
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
+  'variables': {
+    'opus_complexity%': 0,
+  },
   'targets': [
     {
       'target_name': 'webrtc_opus',
       'type': 'static_library',
       'conditions': [
         ['build_with_mozilla==1', {
           # Mozilla provides its own build of the opus library.
           'include_dirs': [
-            '$(DIST)/include/opus',
+            '/media/libopus/include',
            ]
         }, {
           'dependencies': [
             '<(DEPTH)/third_party/opus/opus.gyp:opus'
           ],
         }],
       ],
       'include_dirs': [
         '<(webrtc_root)',
       ],
+      'defines': [
+        'OPUS_COMPLEXITY=<(opus_complexity)'
+      ],
       'sources': [
         'audio_encoder_opus.cc',
         'interface/audio_encoder_opus.h',
         'interface/opus_interface.h',
         'opus_inst.h',
         'opus_interface.c',
       ],
     },
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
@@ -83,16 +83,19 @@ int16_t WebRtcOpus_Encode(OpusEncInst* i
   if (res > 0) {
     return res;
   }
   return -1;
 }
 
 int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate) {
   if (inst) {
+#if defined(OPUS_COMPLEXITY) && (OPUS_COMPLEXITY != 0)
+    opus_encoder_ctl(inst->encoder, OPUS_SET_COMPLEXITY(OPUS_COMPLEXITY));
+#endif
     return opus_encoder_ctl(inst->encoder, OPUS_SET_BITRATE(rate));
   } else {
     return -1;
   }
 }
 
 int16_t WebRtcOpus_SetPacketLossRate(OpusEncInst* inst, int32_t loss_rate) {
   if (inst) {
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
@@ -5,31 +5,45 @@
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
   'variables': {
     'audio_coding_dependencies': [
       'CNG',
-      'G711',
-      'G722',
-      'iLBC',
-      'iSAC',
-      'iSACFix',
-      'PCM16B',
       '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
       '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
     ],
     'audio_coding_defines': [],
     'conditions': [
       ['include_opus==1', {
         'audio_coding_dependencies': ['webrtc_opus',],
         'audio_coding_defines': ['WEBRTC_CODEC_OPUS',],
       }],
+      ['include_g711==1', {
+        'audio_coding_dependencies': ['G711',],
+        'audio_coding_defines': ['WEBRTC_CODEC_G711',],
+      }],
+      ['include_g722==1', {
+        'audio_coding_dependencies': ['G722',],
+        'audio_coding_defines': ['WEBRTC_CODEC_G722',],
+      }],
+      ['include_ilbc==1', {
+        'audio_coding_dependencies': ['iLBC',],
+        'audio_coding_defines': ['WEBRTC_CODEC_ILBC',],
+      }],
+      ['include_isac==1', {
+        'audio_coding_dependencies': ['iSAC', 'iSACFix',],
+        'audio_coding_defines': ['WEBRTC_CODEC_ISAC', 'WEBRTC_CODEC_ISACFX',],
+      }],
+      ['include_pcm16b==1', {
+        'audio_coding_dependencies': ['PCM16B',],
+        'audio_coding_defines': ['WEBRTC_CODEC_PCM16',],
+      }],
     ],
   },
   'targets': [
     {
       'target_name': 'audio_coding_module',
       'type': 'static_library',
       'defines': [
         '<@(audio_coding_defines)',
@@ -48,74 +62,79 @@
           '../interface',
           '../../../interface',
           '<(webrtc_root)',
         ],
       },
       'sources': [
         '../interface/audio_coding_module.h',
         '../interface/audio_coding_module_typedefs.h',
-        'acm_amr.cc',
-        'acm_amr.h',
-        'acm_amrwb.cc',
-        'acm_amrwb.h',
-        'acm_celt.cc',
-        'acm_celt.h',
         'acm_cng.cc',
         'acm_cng.h',
         'acm_codec_database.cc',
         'acm_codec_database.h',
         'acm_common_defs.h',
         'acm_dtmf_playout.cc',
         'acm_dtmf_playout.h',
-        'acm_g722.cc',
-        'acm_g722.h',
-        'acm_g7221.cc',
-        'acm_g7221.h',
-        'acm_g7221c.cc',
-        'acm_g7221c.h',
-        'acm_g729.cc',
-        'acm_g729.h',
-        'acm_g7291.cc',
-        'acm_g7291.h',
         'acm_generic_codec.cc',
         'acm_generic_codec.h',
-        'acm_gsmfr.cc',
-        'acm_gsmfr.h',
-        'acm_ilbc.cc',
-        'acm_ilbc.h',
-        'acm_isac.cc',
-        'acm_isac.h',
-        'acm_isac_macros.h',
-        'acm_opus.cc',
-        'acm_opus.h',
-        'acm_speex.cc',
-        'acm_speex.h',
-        'acm_pcm16b.cc',
-        'acm_pcm16b.h',
-        'acm_pcma.cc',
-        'acm_pcma.h',
-        'acm_pcmu.cc',
-        'acm_pcmu.h',
         'acm_red.cc',
         'acm_red.h',
         'acm_receiver.cc',
         'acm_receiver.h',
         'acm_resampler.cc',
         'acm_resampler.h',
         'audio_coding_module.cc',
         'audio_coding_module_impl.cc',
         'audio_coding_module_impl.h',
         'call_statistics.cc',
         'call_statistics.h',
         'initial_delay_manager.cc',
         'initial_delay_manager.h',
         'nack.cc',
         'nack.h',
       ],
+      'conditions': [
+        ['include_opus==1', {
+          'sources': [
+            'acm_opus.cc',
+           'acm_opus.h',
+          ],
+        }],
+        ['include_g711==1', {
+          'sources': [
+            'acm_pcma.cc',
+            'acm_pcma.h',
+            'acm_pcmu.cc',
+            'acm_pcmu.h',
+          ],
+        }],
+        ['include_g722==1', {
+          'sources': [
+             'acm_g722.cc',
+             'acm_g722.h',
+          ],
+        }],
+        ['include_ilbc==1', {
+          'sources': [
+#           FIX
+          ],
+        }],
+        ['include_isac==1', {
+          'sources': [
+#           FIX
+          ],
+        }],
+        ['include_pcm16b==1', {
+          'sources': [
+            'acm_pcm16b.cc',
+            'acm_pcm16b.h',
+          ],
+        }],
+      ],
     },
   ],
   'conditions': [
     ['include_tests==1', {
       'targets': [
         {
           'target_name': 'acm_receive_test',
           'type': 'static_library',
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq.gypi
@@ -5,25 +5,33 @@
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
   'variables': {
     'codecs': [
       'G711',
-      'G722',
       'PCM16B',
-      'iLBC',
-      'iSAC',
-      'iSACFix',
       'CNG',
     ],
     'neteq_defines': [],
     'conditions': [
+      ['include_g722==1', {
+        'neteq_dependencies': ['G722'],
+        'neteq_defines': ['WEBRTC_CODEC_G722',],
+      }],
+      ['include_ilbc==1', {
+        'neteq_dependencies': ['iLBC'],
+        'neteq_defines': ['WEBRTC_CODEC_ILBC',],
+      }],
+      ['include_isac==1', {
+        'neteq_dependencies': ['iSAC', 'iSACFix',],
+        'neteq_defines': ['WEBRTC_CODEC_ISAC', 'WEBRTC_CODEC_ISACFIX',],
+      }],
       ['include_opus==1', {
         'codecs': ['webrtc_opus',],
         'neteq_defines': ['WEBRTC_CODEC_OPUS',],
       }],
     ],
     'neteq_dependencies': [
       '<@(codecs)',
       '<(DEPTH)/third_party/opus/opus.gyp:opus',
@@ -136,16 +144,17 @@
           'type': '<(gtest_target_type)',
           'dependencies': [
             '<@(codecs)',
             'neteq_unittest_tools',
             '<(DEPTH)/testing/gtest.gyp:gtest',
             '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
             '<(webrtc_root)/test/test.gyp:test_support_main',
           ],
+# FIX for include_isac/etc
           'defines': [
             'AUDIO_DECODER_UNITTEST',
             'WEBRTC_CODEC_G722',
             'WEBRTC_CODEC_ILBC',
             'WEBRTC_CODEC_ISACFX',
             'WEBRTC_CODEC_ISAC',
             'WEBRTC_CODEC_PCM16',
             '<@(neteq_defines)',
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.cc
@@ -7,16 +7,17 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
 
 #include <assert.h>
 
+#include "AndroidJNIWrapper.h"
 #include "webrtc/modules/utility/interface/helpers_android.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 namespace webrtc {
 
 static JavaVM* g_jvm_ = NULL;
 static JNIEnv* g_jni_env_ = NULL;
 static jobject g_context_ = NULL;
@@ -46,30 +47,20 @@ void AudioManagerJni::SetAndroidAudioDev
   assert(env);
   assert(context);
 
   // Store global Java VM variables to be accessed by API calls.
   g_jvm_ = reinterpret_cast<JavaVM*>(jvm);
   g_jni_env_ = reinterpret_cast<JNIEnv*>(env);
   g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context));
 
-  // FindClass must be made in this function since this function's contract
-  // requires it to be called by a Java thread.
-  // See
-  // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
-  // as to why this is necessary.
-  // Get the AudioManagerAndroid class object.
-  jclass javaAmClassLocal = g_jni_env_->FindClass(
-      "org/webrtc/voiceengine/AudioManagerAndroid");
-  assert(javaAmClassLocal);
-
   // Create a global reference such that the class object is not recycled by
   // the garbage collector.
-  g_audio_manager_class_ = reinterpret_cast<jclass>(
-      g_jni_env_->NewGlobalRef(javaAmClassLocal));
+  g_audio_manager_class_ = jsjni_GetGlobalClassRef(
+    "org/webrtc/voiceengine/AudioManagerAndroid");
   assert(g_audio_manager_class_);
 }
 
 void AudioManagerJni::ClearAndroidAudioDeviceObjects() {
   g_jni_env_->DeleteGlobalRef(g_audio_manager_class_);
   g_audio_manager_class_ = NULL;
   g_jni_env_->DeleteGlobalRef(g_context_);
   g_context_ = NULL;
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.h
@@ -29,16 +29,17 @@ class AudioManagerJni {
   // called once.
   // This function must be called by a Java thread as calling it from a thread
   // created by the native application will prevent FindClass from working. See
   // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
   // for more details.
   // It has to be called for this class' APIs to be successful. Calling
   // ClearAndroidAudioDeviceObjects will prevent this class' APIs to be called
   // successfully if SetAndroidAudioDeviceObjects is not called after it.
+  static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
   static void SetAndroidAudioDeviceObjects(void* jvm, void* env,
                                            void* context);
   // This function must be called when the AudioManagerJni class is no
   // longer needed. It frees up the global references acquired in
   // SetAndroidAudioDeviceObjects.
   static void ClearAndroidAudioDeviceObjects();
 
   bool low_latency_supported() const { return low_latency_supported_; }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.cc
@@ -10,16 +10,17 @@
 
 /*
  *  Android audio device implementation (JNI/AudioRecord usage)
  */
 
 // TODO(xians): Break out attach and detach current thread to JVM to
 // separate functions.
 
+#include "AndroidJNIWrapper.h"
 #include "webrtc/modules/audio_device/android/audio_record_jni.h"
 
 #include <android/log.h>
 #include <stdlib.h>
 
 #include "webrtc/modules/audio_device/android/audio_common.h"
 #include "webrtc/modules/audio_device/audio_device_config.h"
 #include "webrtc/modules/audio_device/audio_device_utility.h"
@@ -36,60 +37,49 @@ jobject AudioRecordJni::globalContext = 
 jclass AudioRecordJni::globalScClass = NULL;
 
 int32_t AudioRecordJni::SetAndroidAudioDeviceObjects(void* javaVM, void* env,
                                                      void* context) {
   assert(env);
   globalJvm = reinterpret_cast<JavaVM*>(javaVM);
   globalJNIEnv = reinterpret_cast<JNIEnv*>(env);
   // Get java class type (note path to class packet).
-  jclass javaScClassLocal = globalJNIEnv->FindClass(
-      "org/webrtc/voiceengine/WebRtcAudioRecord");
-  if (!javaScClassLocal) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                 "%s: could not find java class", __FUNCTION__);
-    return -1; // exception thrown
+  if (!globalScClass) {
+    globalScClass = jsjni_GetGlobalClassRef(
+        "org/webrtc/voiceengine/WebRtcAudioRecord");
+    if (!globalScClass) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
+                   "%s: could not find java class", __FUNCTION__);
+      return -1; // exception thrown
+    }
   }
 
-  // Create a global reference to the class (to tell JNI that we are
-  // referencing it after this function has returned).
-  globalScClass = reinterpret_cast<jclass> (
-      globalJNIEnv->NewGlobalRef(javaScClassLocal));
-  if (!globalScClass) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                 "%s: could not create reference", __FUNCTION__);
-    return -1;
+  if (!globalContext) {
+    globalContext = jsjni_GetGlobalContextRef();
+    if (!globalContext) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
+                   "%s: could not create context reference", __FUNCTION__);
+      return -1;
+    }
   }
 
-  globalContext = globalJNIEnv->NewGlobalRef(
-      reinterpret_cast<jobject>(context));
-  if (!globalContext) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                 "%s: could not create context reference", __FUNCTION__);
-    return -1;
-  }
-
-  // Delete local class ref, we only use the global ref
-  globalJNIEnv->DeleteLocalRef(javaScClassLocal);
-
   return 0;
 }
 
 void AudioRecordJni::ClearAndroidAudioDeviceObjects() {
   WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
                "%s: env is NULL, assuming deinit", __FUNCTION__);
 
-  globalJvm = NULL;;
+  globalJvm = NULL;
   if (!globalJNIEnv) {
     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
                  "%s: saved env already NULL", __FUNCTION__);
     return;
   }
 
-  globalJNIEnv->DeleteGlobalRef(globalContext);
   globalContext = reinterpret_cast<jobject>(NULL);
 
   globalJNIEnv->DeleteGlobalRef(globalScClass);
   globalScClass = reinterpret_cast<jclass>(NULL);
 
   globalJNIEnv = reinterpret_cast<JNIEnv*>(NULL);
 }
 
@@ -117,17 +107,17 @@ AudioRecordJni::AudioRecordJni(
       _recording(false),
       _recIsInitialized(false),
       _micIsInitialized(false),
       _startRec(false),
       _recWarning(0),
       _recError(0),
       _delayRecording(0),
       _AGC(false),
-      _samplingFreqIn((N_REC_SAMPLES_PER_SEC/1000)),
+      _samplingFreqIn((N_REC_SAMPLES_PER_SEC)),
       _recAudioSource(1) { // 1 is AudioSource.MIC which is our default
   memset(_recBuffer, 0, sizeof(_recBuffer));
 }
 
 AudioRecordJni::~AudioRecordJni() {
   WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
                "%s destroyed", __FUNCTION__);
 
@@ -414,36 +404,30 @@ int32_t AudioRecordJni::InitRecording() 
     }
     isAttached = true;
   }
 
   // get the method ID
   jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
                                                "(II)I");
 
-  int samplingFreq = 44100;
-  if (_samplingFreqIn != 44)
-  {
-    samplingFreq = _samplingFreqIn * 1000;
-  }
-
   int retVal = -1;
 
   // call java sc object method
   jint res = env->CallIntMethod(_javaScObj, initRecordingID, _recAudioSource,
-                                samplingFreq);
+                                _samplingFreqIn);
   if (res < 0)
   {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                  "InitRecording failed (%d)", res);
   }
   else
   {
     // Set the audio device buffer sampling rate
-    _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn * 1000);
+    _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn);
 
     // the init rec function returns a fixed delay
     _delayRecording = res / _samplingFreqIn;
 
     _recIsInitialized = true;
     retVal = 0;
   }
 
@@ -779,24 +763,17 @@ int32_t AudioRecordJni::SetRecordingSamp
   if (samplesPerSec > 48000 || samplesPerSec < 8000)
   {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                  "  Invalid sample rate");
     return -1;
   }
 
   // set the recording sample rate to use
-  if (samplesPerSec == 44100)
-  {
-    _samplingFreqIn = 44;
-  }
-  else
-  {
-    _samplingFreqIn = samplesPerSec / 1000;
-  }
+  _samplingFreqIn = samplesPerSec;
 
   // Update the AudioDeviceBuffer
   _ptrAudioBuffer->SetRecordingSampleRate(samplesPerSec);
 
   return 0;
 }
 
 int32_t AudioRecordJni::InitJavaResources() {
@@ -986,21 +963,17 @@ int32_t AudioRecordJni::InitSampleRate()
       return -1;
     }
     isAttached = true;
   }
 
   if (_samplingFreqIn > 0)
   {
     // read the configured sampling rate
-    samplingFreq = 44100;
-    if (_samplingFreqIn != 44)
-    {
-      samplingFreq = _samplingFreqIn * 1000;
-    }
+    samplingFreq = _samplingFreqIn;
     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                  "  Trying configured recording sampling rate %d",
                  samplingFreq);
   }
 
   // get the method ID
   jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
                                                "(II)I");
@@ -1031,24 +1004,17 @@ int32_t AudioRecordJni::InitSampleRate()
     }
     else
     {
       keepTrying = false;
     }
   }
 
   // set the recording sample rate to use
-  if (samplingFreq == 44100)
-  {
-    _samplingFreqIn = 44;
-  }
-  else
-  {
-    _samplingFreqIn = samplingFreq / 1000;
-  }
+  _samplingFreqIn = samplingFreq;
 
   WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                "Recording sample rate set to (%d)", _samplingFreqIn);
 
   // get the method ID
   jmethodID stopRecordingID = env->GetMethodID(_javaScClass, "StopRecording",
                                                "()I");
 
@@ -1130,34 +1096,34 @@ bool AudioRecordJni::RecThreadProcess()
     _recording = true;
     _recWarning = 0;
     _recError = 0;
     _recStartStopEvent.Set();
   }
 
   if (_recording)
   {
-    uint32_t samplesToRec = _samplingFreqIn * 10;
+    uint32_t samplesToRec = _samplingFreqIn / 100;
 
     // Call java sc object method to record data to direct buffer
     // Will block until data has been recorded (see java sc class),
     // therefore we must release the lock
     UnLock();
     jint recDelayInSamples = _jniEnvRec->CallIntMethod(_javaScObj,
                                                         _javaMidRecAudio,
                                                         2 * samplesToRec);
     if (recDelayInSamples < 0)
     {
       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                    "RecordAudio failed");
       _recWarning = 1;
     }
     else
     {
-      _delayRecording = recDelayInSamples / _samplingFreqIn;
+      _delayRecording = (recDelayInSamples * 1000) / _samplingFreqIn;
     }
     Lock();
 
     // Check again since recording may have stopped during Java call
     if (_recording)
     {
       //            WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
       //                         "total delay is %d", msPlayDelay + _delayRecording);
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.h
@@ -18,24 +18,24 @@
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 
 namespace webrtc {
 
 class EventWrapper;
 class ThreadWrapper;
 class PlayoutDelayProvider;
 
-const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
-const uint32_t N_REC_CHANNELS = 1; // default is mono recording
-const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480; // Handle max 10 ms @ 48 kHz
-
 class AudioRecordJni {
  public:
   static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* env,
                                               void* context);
+
+  static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
+                                              void* context);
+
   static void ClearAndroidAudioDeviceObjects();
 
   AudioRecordJni(const int32_t id, PlayoutDelayProvider* delay_provider);
   ~AudioRecordJni();
 
   // Main initializaton and termination
   int32_t Init();
   int32_t Terminate();
@@ -104,16 +104,20 @@ class AudioRecordJni {
   void ClearRecordingWarning();
   void ClearRecordingError();
 
   // Attach audio buffer
   void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
 
   int32_t SetRecordingSampleRate(const uint32_t samplesPerSec);
 
+  static const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
+  static const uint32_t N_REC_CHANNELS = 1; // default is mono recording
+  static const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480; // Handle max 10 ms @ 48 kHz
+
  private:
   void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) {
     _critSect.Enter();
   }
   void UnLock() UNLOCK_FUNCTION(_critSect) {
     _critSect.Leave();
   }
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.cc
@@ -22,72 +22,66 @@
 
 #include "webrtc/modules/audio_device/audio_device_config.h"
 #include "webrtc/modules/audio_device/audio_device_utility.h"
 
 #include "webrtc/system_wrappers/interface/event_wrapper.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+#include "AndroidJNIWrapper.h"
+
 namespace webrtc {
 
 JavaVM* AudioTrackJni::globalJvm = NULL;
 JNIEnv* AudioTrackJni::globalJNIEnv = NULL;
 jobject AudioTrackJni::globalContext = NULL;
 jclass AudioTrackJni::globalScClass = NULL;
 
 int32_t AudioTrackJni::SetAndroidAudioDeviceObjects(void* javaVM, void* env,
                                                     void* context) {
   assert(env);
   globalJvm = reinterpret_cast<JavaVM*>(javaVM);
   globalJNIEnv = reinterpret_cast<JNIEnv*>(env);
-  // Get java class type (note path to class packet).
-  jclass javaScClassLocal = globalJNIEnv->FindClass(
-      "org/webrtc/voiceengine/WebRtcAudioTrack");
-  if (!javaScClassLocal) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                 "%s: could not find java class", __FUNCTION__);
-    return -1; // exception thrown
+
+  // Check if we already got a reference
+  if (!globalScClass) {
+    // Get java class type (note path to class packet).
+    globalScClass = jsjni_GetGlobalClassRef("org/webrtc/voiceengine/WebRtcAudioTrack");
+    if (!globalScClass) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
+                   "%s: could not find java class", __FUNCTION__);
+      return -1; // exception thrown
+    }
+  }
+  if (!globalContext) {
+    globalContext = jsjni_GetGlobalContextRef();
+    if (!globalContext) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
+                   "%s: could not create context reference", __FUNCTION__);
+      return -1;
+    }
   }
 
-  // Create a global reference to the class (to tell JNI that we are
-  // referencing it after this function has returned).
-  globalScClass = reinterpret_cast<jclass> (
-      globalJNIEnv->NewGlobalRef(javaScClassLocal));
-  if (!globalScClass) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                 "%s: could not create reference", __FUNCTION__);
-    return -1;
-  }
-
-  globalContext = globalJNIEnv->NewGlobalRef(
-      reinterpret_cast<jobject>(context));
-  if (!globalContext) {
-    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                 "%s: could not create context reference", __FUNCTION__);
-    return -1;
-  }
-
-  // Delete local class ref, we only use the global ref
-  globalJNIEnv->DeleteLocalRef(javaScClassLocal);
   return 0;
 }
 
 void AudioTrackJni::ClearAndroidAudioDeviceObjects() {
   WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
                "%s: env is NULL, assuming deinit", __FUNCTION__);
 
   globalJvm = NULL;
   if (!globalJNIEnv) {
     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
                  "%s: saved env already NULL", __FUNCTION__);
     return;
   }
 
-  globalJNIEnv->DeleteGlobalRef(globalContext);
+  // No need to delete the shared global context ref.
+  // globalJNIEnv->DeleteGlobalRef(globalContext);
   globalContext = reinterpret_cast<jobject>(NULL);
 
   globalJNIEnv->DeleteGlobalRef(globalScClass);
   globalScClass = reinterpret_cast<jclass>(NULL);
 
   globalJNIEnv = reinterpret_cast<JNIEnv*>(NULL);
 }
 
@@ -112,17 +106,17 @@ AudioTrackJni::AudioTrackJni(const int32
       _playoutDeviceIsSpecified(false),
       _playing(false),
       _playIsInitialized(false),
       _speakerIsInitialized(false),
       _startPlay(false),
       _playWarning(0),
       _playError(0),
       _delayPlayout(0),
-      _samplingFreqOut((N_PLAY_SAMPLES_PER_SEC/1000)),
+      _samplingFreqOut((N_PLAY_SAMPLES_PER_SEC)),
       _maxSpeakerVolume(0) {
 }
 
 AudioTrackJni::~AudioTrackJni() {
   WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
                "%s destroyed", __FUNCTION__);
 
   Terminate();
@@ -416,35 +410,29 @@ int32_t AudioTrackJni::InitPlayout() {
       }
       isAttached = true;
     }
 
     // get the method ID
     jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback",
                                                 "(I)I");
 
-    int samplingFreq = 44100;
-    if (_samplingFreqOut != 44)
-    {
-      samplingFreq = _samplingFreqOut * 1000;
-    }
-
     int retVal = -1;
 
     // Call java sc object method
-    jint res = env->CallIntMethod(_javaScObj, initPlaybackID, samplingFreq);
+    jint res = env->CallIntMethod(_javaScObj, initPlaybackID, _samplingFreqOut);
     if (res < 0)
     {
       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                    "InitPlayback failed (%d)", res);
     }
     else
     {
       // Set the audio device buffer sampling rate
-      _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut * 1000);
+      _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut);
       _playIsInitialized = true;
       retVal = 0;
     }
 
     // Detach this thread if it was attached
     if (isAttached)
     {
       WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
@@ -858,24 +846,17 @@ int32_t AudioTrackJni::SetPlayoutSampleR
   if (samplesPerSec > 48000 || samplesPerSec < 8000)
   {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                  "  Invalid sample rate");
     return -1;
     }
 
   // set the playout sample rate to use
-  if (samplesPerSec == 44100)
-  {
-    _samplingFreqOut = 44;
-  }
-  else
-  {
-    _samplingFreqOut = samplesPerSec / 1000;
-  }
+  _samplingFreqOut = samplesPerSec;
 
   // Update the AudioDeviceBuffer
   _ptrAudioBuffer->SetPlayoutSampleRate(samplesPerSec);
 
   return 0;
 }
 
 bool AudioTrackJni::PlayoutWarning() const {
@@ -1151,21 +1132,17 @@ int32_t AudioTrackJni::InitSampleRate() 
 
   // get the method ID
   jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback",
                                               "(I)I");
 
   if (_samplingFreqOut > 0)
   {
     // read the configured sampling rate
-    samplingFreq = 44100;
-    if (_samplingFreqOut != 44)
-    {
-      samplingFreq = _samplingFreqOut * 1000;
-    }
+    samplingFreq = _samplingFreqOut;
     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                  "  Trying configured playback sampling rate %d",
                  samplingFreq);
   }
   else
   {
     // set the preferred sampling frequency
     if (samplingFreq == 8000)
@@ -1209,24 +1186,17 @@ int32_t AudioTrackJni::InitSampleRate() 
   if (_maxSpeakerVolume < 1)
   {
     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
                  "  Did not get valid max speaker volume value (%d)",
                  _maxSpeakerVolume);
   }
 
   // set the playback sample rate to use
-  if (samplingFreq == 44100)
-  {
-    _samplingFreqOut = 44;
-  }
-  else
-  {
-    _samplingFreqOut = samplingFreq / 1000;
-  }
+  _samplingFreqOut = samplingFreq;
 
   WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                "Playback sample rate set to (%d)", _samplingFreqOut);
 
   // get the method ID
   jmethodID stopPlaybackID = env->GetMethodID(_javaScClass, "StopPlayback",
                                               "()I");
 
@@ -1355,17 +1325,17 @@ bool AudioTrackJni::PlayThreadProcess()
         {
           WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                        "PlayAudio failed (%d)", res);
             _playWarning = 1;
         }
         else if (res > 0)
         {
           // we are not recording and have got a delay value from playback
-          _delayPlayout = res / _samplingFreqOut;
+          _delayPlayout = (res * 1000) / _samplingFreqOut;
         }
         Lock();
 
   }  // _playing
 
   if (_shutdownPlayThread)
   {
     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.h
@@ -18,23 +18,24 @@
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 
 namespace webrtc {
 
 class EventWrapper;
 class ThreadWrapper;
 
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
-const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout
-
 class AudioTrackJni : public PlayoutDelayProvider {
  public:
   static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* env,
                                               void* context);
+
+  static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
+                                              void* context);
+
   static void ClearAndroidAudioDeviceObjects();
   explicit AudioTrackJni(const int32_t id);
   virtual ~AudioTrackJni();
 
   // Main initializaton and termination
   int32_t Init();
   int32_t Terminate();
   bool Initialized() const { return _initialized; }
@@ -101,16 +102,19 @@ class AudioTrackJni : public PlayoutDela
   bool PlayoutError() const;
   void ClearPlayoutWarning();
   void ClearPlayoutError();
 
   // Speaker audio routing
   int32_t SetLoudspeakerStatus(bool enable);
   int32_t GetLoudspeakerStatus(bool& enable) const;  // NOLINT
 
+  static const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
+  static const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout
+
  protected:
   virtual int PlayoutDelayMs() { return 0; }
 
  private:
   void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) {
     _critSect.Enter();
   }
   void UnLock() UNLOCK_FUNCTION(_critSect) {
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
@@ -11,17 +11,24 @@
 // The functions in this file are called from native code. They can still be
 // accessed even though they are declared private.
 
 package org.webrtc.voiceengine;
 
 import android.content.Context;
 import android.content.pm.PackageManager;
 import android.media.AudioManager;
+import android.util.Log;
 
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+
+import org.mozilla.gecko.mozglue.WebRTCJNITarget;
+
+@WebRTCJNITarget
 class AudioManagerAndroid {
   // Most of Google lead devices use 44.1K as the default sampling rate, 44.1K
   // is also widely used on other android devices.
   private static final int DEFAULT_SAMPLING_RATE = 44100;
   // Randomly picked up frame size which is close to return value on N4.
   // Return this default value when
   // getProperty(PROPERTY_OUTPUT_FRAMES_PER_BUFFER) fails.
   private static final int DEFAULT_FRAMES_PER_BUFFER = 256;
@@ -33,40 +40,47 @@ class AudioManagerAndroid {
 
   @SuppressWarnings("unused")
   private AudioManagerAndroid(Context context) {
     AudioManager audioManager = (AudioManager)
         context.getSystemService(Context.AUDIO_SERVICE);
 
     mNativeOutputSampleRate = DEFAULT_SAMPLING_RATE;
     mAudioLowLatencyOutputFrameSize = DEFAULT_FRAMES_PER_BUFFER;
+    mAudioLowLatencySupported = context.getPackageManager().hasSystemFeature(
+      PackageManager.FEATURE_AUDIO_LOW_LATENCY);
     if (android.os.Build.VERSION.SDK_INT >=
-        android.os.Build.VERSION_CODES.JELLY_BEAN_MR1) {
-      String sampleRateString = audioManager.getProperty(
-          AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
-      if (sampleRateString != null) {
-        mNativeOutputSampleRate = Integer.parseInt(sampleRateString);
-      }
-      String framesPerBuffer = audioManager.getProperty(
-          AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
-      if (framesPerBuffer != null) {
+        17 /*android.os.Build.VERSION_CODES.JELLY_BEAN_MR1*/) {
+      try {
+        Method getProperty = AudioManager.class.getMethod("getProperty", String.class);
+        Field sampleRateField = AudioManager.class.getField("PROPERTY_OUTPUT_SAMPLE_RATE");
+        Field framePerBufferField = AudioManager.class.getField("PROPERTY_OUTPUT_FRAMES_PER_BUFFER");
+        String sampleRateKey = (String)sampleRateField.get(null);
+        String framePerBufferKey = (String)framePerBufferField.get(null);
+        String sampleRateString = (String)getProperty.invoke(audioManager, sampleRateKey);
+        if (sampleRateString != null) {
+          mNativeOutputSampleRate = Integer.parseInt(sampleRateString);
+        }
+        String framesPerBuffer = (String)getProperty.invoke(audioManager, sampleRateKey);
+        if (framesPerBuffer != null) {
           mAudioLowLatencyOutputFrameSize = Integer.parseInt(framesPerBuffer);
+        }
+      } catch (Exception ex) {
+        Log.w("WebRTC", "error getting low latency params", ex);
       }
     }
-    mAudioLowLatencySupported = context.getPackageManager().hasSystemFeature(
-        PackageManager.FEATURE_AUDIO_LOW_LATENCY);
   }
 
     @SuppressWarnings("unused")
     private int getNativeOutputSampleRate() {
       return mNativeOutputSampleRate;
     }
 
     @SuppressWarnings("unused")
     private boolean isAudioLowLatencySupported() {
         return mAudioLowLatencySupported;
     }
 
     @SuppressWarnings("unused")
     private int getAudioLowLatencyOutputFrameSize() {
         return mAudioLowLatencyOutputFrameSize;
     }
-}
\ No newline at end of file
+}
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
@@ -15,45 +15,52 @@ import java.util.concurrent.locks.Reentr
 
 import android.content.Context;
 import android.media.AudioFormat;
 import android.media.AudioManager;
 import android.media.AudioRecord;
 import android.media.MediaRecorder.AudioSource;
 import android.util.Log;
 
+import org.mozilla.gecko.mozglue.WebRTCJNITarget;
+
+@WebRTCJNITarget
 class WebRtcAudioRecord {
-    private AudioRecord _audioRecord = null;
+    private AudioRecord _audioRecord;
 
     private Context _context;
 
     private ByteBuffer _recBuffer;
     private byte[] _tempBufRec;
 
     private final ReentrantLock _recLock = new ReentrantLock();
 
     private boolean _doRecInit = true;
-    private boolean _isRecording = false;
+    private boolean _isRecording;
 
-    private int _bufferedRecSamples = 0;
+    private int _bufferedRecSamples;
 
     WebRtcAudioRecord() {
         try {
             _recBuffer = ByteBuffer.allocateDirect(2 * 480); // Max 10 ms @ 48
                                                              // kHz
         } catch (Exception e) {
             DoLog(e.getMessage());
         }
 
         _tempBufRec = new byte[2 * 480];
     }
 
     @SuppressWarnings("unused")
     private int InitRecording(int audioSource, int sampleRate) {
-        audioSource = AudioSource.VOICE_COMMUNICATION;
+        if(android.os.Build.VERSION.SDK_INT>=11) {
+            audioSource = AudioSource.VOICE_COMMUNICATION;
+        } else {
+            audioSource = AudioSource.DEFAULT;
+        }
         // get the minimum buffer size that can be used
         int minRecBufSize = AudioRecord.getMinBufferSize(
             sampleRate,
             AudioFormat.CHANNEL_IN_MONO,
             AudioFormat.ENCODING_PCM_16BIT);
 
         // DoLog("min rec buf size is " + minRecBufSize);
 
@@ -180,17 +187,17 @@ class WebRtcAudioRecord {
             // Ensure we always unlock, both for success, exception or error
             // return.
             _recLock.unlock();
         }
 
         return _bufferedRecSamples;
     }
 
-    final String logTag = "WebRTC AD java";
+    final String logTag = "WebRTC AR java";
 
     private void DoLog(String msg) {
         Log.d(logTag, msg);
     }
 
     private void DoLogErr(String msg) {
         Log.e(logTag, msg);
     }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
@@ -15,34 +15,37 @@ import java.util.concurrent.locks.Reentr
 
 import android.content.Context;
 import android.media.AudioFormat;
 import android.media.AudioManager;
 import android.media.AudioRecord;
 import android.media.AudioTrack;
 import android.util.Log;
 
+import org.mozilla.gecko.mozglue.WebRTCJNITarget;
+
+@WebRTCJNITarget
 class WebRtcAudioTrack {
-    private AudioTrack _audioTrack = null;
+    private AudioTrack _audioTrack;
 
     private Context _context;
     private AudioManager _audioManager;
 
     private ByteBuffer _playBuffer;
     private byte[] _tempBufPlay;
 
     private final ReentrantLock _playLock = new ReentrantLock();
 
     private boolean _doPlayInit = true;
     private boolean _doRecInit = true;
-    private boolean _isRecording = false;
-    private boolean _isPlaying = false;
+    private boolean _isRecording;
+    private boolean _isPlaying;
 
-    private int _bufferedPlaySamples = 0;
-    private int _playPosition = 0;
+    private int _bufferedPlaySamples;
+    private int _playPosition;
 
     WebRtcAudioTrack() {
         try {
             _playBuffer = ByteBuffer.allocateDirect(2 * 480); // Max 10 ms @ 48
                                                               // kHz
         } catch (Exception e) {
             DoLog(e.getMessage());
         }
@@ -292,17 +295,17 @@ class WebRtcAudioTrack {
         if (_audioManager != null) {
             level = _audioManager.getStreamVolume(
                 AudioManager.STREAM_VOICE_CALL);
         }
 
         return level;
     }
 
-    final String logTag = "WebRTC AD java";
+    final String logTag = "WebRTC AT java";
 
     private void DoLog(String msg) {
         Log.d(logTag, msg);
     }
 
     private void DoLogErr(String msg) {
         Log.e(logTag, msg);
     }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
@@ -6,25 +6,34 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/opensles_input.h"
 
 #include <assert.h>
+#include <dlfcn.h>
 
+#include "OpenSLESProvider.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
 #include "webrtc/modules/audio_device/android/opensles_common.h"
 #include "webrtc/modules/audio_device/android/single_rw_fifo.h"
 #include "webrtc/modules/audio_device/audio_device_buffer.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+#include <media/AudioSystem.h>
+#include <audio_effects/effect_aec.h>
+#include <audio_effects/effect_ns.h>
+#include <utils/Errors.h>
+#endif
+
 #define VOID_RETURN
 #define OPENSL_RETURN_ON_FAILURE(op, ret_val)                    \
   do {                                                           \
     SLresult err = (op);                                         \
     if (err != SL_RESULT_SUCCESS) {                              \
       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,          \
                    "OpenSL error: %d", err);                     \
       assert(false);                                             \
@@ -58,61 +67,111 @@ OpenSlesInput::OpenSlesInput(
       sles_engine_itf_(NULL),
       sles_recorder_(NULL),
       sles_recorder_itf_(NULL),
       sles_recorder_sbq_itf_(NULL),
       audio_buffer_(NULL),
       active_queue_(0),
       rec_sampling_rate_(0),
       agc_enabled_(false),
-      recording_delay_(0) {
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+      aec_(NULL),
+      ns_(NULL),
+#endif
+      recording_delay_(0),
+      opensles_lib_(NULL) {
 }
 
 OpenSlesInput::~OpenSlesInput() {
 }
 
 int32_t OpenSlesInput::SetAndroidAudioDeviceObjects(void* javaVM,
                                                     void* env,
                                                     void* context) {
+#if !defined(WEBRTC_GONK)
+  AudioManagerJni::SetAndroidAudioDeviceObjects(javaVM, env, context);
+#endif
   return 0;
 }
 
 void OpenSlesInput::ClearAndroidAudioDeviceObjects() {
+#if !defined(WEBRTC_GONK)
+  AudioManagerJni::ClearAndroidAudioDeviceObjects();
+#endif
 }
 
 int32_t OpenSlesInput::Init() {
   assert(!initialized_);
 
+  /* Try to dynamically open the OpenSLES library */
+  opensles_lib_ = dlopen("libOpenSLES.so", RTLD_LAZY);
+  if (!opensles_lib_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to dlopen OpenSLES library");
+      return -1;
+  }
+
+  f_slCreateEngine = (slCreateEngine_t)dlsym(opensles_lib_, "slCreateEngine");
+  SL_IID_ENGINE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ENGINE");
+  SL_IID_BUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_BUFFERQUEUE");
+  SL_IID_ANDROIDCONFIGURATION_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDCONFIGURATION");
+  SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
+  SL_IID_RECORD_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_RECORD");
+
+  if (!f_slCreateEngine ||
+      !SL_IID_ENGINE_ ||
+      !SL_IID_BUFFERQUEUE_ ||
+      !SL_IID_ANDROIDCONFIGURATION_ ||
+      !SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ ||
+      !SL_IID_RECORD_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to find OpenSLES function");
+      return -1;
+  }
+
   // Set up OpenSL engine.
-  OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
-                                          NULL, NULL),
+#ifndef MOZILLA_INTERNAL_API
+  OPENSL_RETURN_ON_FAILURE(f_slCreateEngine(&sles_engine_, 1, kOption, 0,
+                                            NULL, NULL),
                            -1);
+#else
+  OPENSL_RETURN_ON_FAILURE(mozilla_get_sles_engine(&sles_engine_, 1, kOption), -1);
+#endif
+#ifndef MOZILLA_INTERNAL_API
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
                                                     SL_BOOLEAN_FALSE),
                            -1);
+#else
+  OPENSL_RETURN_ON_FAILURE(mozilla_realize_sles_engine(sles_engine_), -1);
+#endif
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
-                                                         SL_IID_ENGINE,
+                                                         SL_IID_ENGINE_,
                                                          &sles_engine_itf_),
                            -1);
 
   if (InitSampleRate() != 0) {
     return -1;
   }
   AllocateBuffers();
   initialized_ = true;
   return 0;
 }
 
 int32_t OpenSlesInput::Terminate() {
   // It is assumed that the caller has stopped recording before terminating.
   assert(!recording_);
+#ifndef MOZILLA_INTERNAL_API
   (*sles_engine_)->Destroy(sles_engine_);
+#else
+  mozilla_destroy_sles_engine(&sles_engine_);
+#endif
   initialized_ = false;
   mic_initialized_ = false;
   rec_initialized_ = false;
+  dlclose(opensles_lib_);
   return 0;
 }
 
 int32_t OpenSlesInput::RecordingDeviceName(uint16_t index,
                                            char name[kAdmMaxDeviceNameSize],
                                            char guid[kAdmMaxGuidSize]) {
   assert(index == 0);
   // Empty strings.
@@ -224,16 +283,24 @@ int32_t OpenSlesInput::MicrophoneBoost(b
   return -1;  // Not supported
 }
 
 int32_t OpenSlesInput::StereoRecordingIsAvailable(bool& available) {  // NOLINT
   available = false;  // Stereo recording not supported on Android.
   return 0;
 }
 
+int32_t OpenSlesInput::SetStereoRecording(bool enable) {  // NOLINT
+  if (enable) {
+    return -1;
+  } else {
+    return 0;
+  }
+}
+
 int32_t OpenSlesInput::StereoRecording(bool& enabled) const {  // NOLINT
   enabled = false;
   return 0;
 }
 
 int32_t OpenSlesInput::RecordingDelay(uint16_t& delayMS) const {  // NOLINT
   delayMS = recording_delay_;
   return 0;
@@ -267,18 +334,22 @@ void OpenSlesInput::UpdateRecordingDelay
   // TODO(hellner): Add accurate delay estimate.
   // On average half the current buffer will have been filled with audio.
   int outstanding_samples =
       (TotalBuffersUsed() - 0.5) * buffer_size_samples();
   recording_delay_ = outstanding_samples / (rec_sampling_rate_ / 1000);
 }
 
 void OpenSlesInput::UpdateSampleRate() {
+#if !defined(WEBRTC_GONK)
   rec_sampling_rate_ = audio_manager_.low_latency_supported() ?
       audio_manager_.native_output_sample_rate() : kDefaultSampleRate;
+#else
+  rec_sampling_rate_ = kDefaultSampleRate;
+#endif
 }
 
 void OpenSlesInput::CalculateNumFifoBuffersNeeded() {
   // Buffer size is 10ms of data.
   num_fifo_buffers_needed_ = kNum10MsToBuffer;
 }
 
 void OpenSlesInput::AllocateBuffers() {
@@ -320,16 +391,114 @@ bool OpenSlesInput::EnqueueAllBuffers() 
   // |fifo_|.
   while (fifo_->size() != 0) {
     // Clear the fifo.
     fifo_->Pop();
   }
   return true;
 }
 
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+bool OpenSlesInput::CheckPlatformAEC() {
+  effect_descriptor_t fxDesc;
+  uint32_t numFx;
+
+  if (android::AudioEffect::queryNumberEffects(&numFx) != android::NO_ERROR) {
+    return false;
+  }
+
+  WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "Platform has %d effects", numFx);
+
+  for (uint32_t i = 0; i < numFx; i++) {
+    if (android::AudioEffect::queryEffect(i, &fxDesc) != android::NO_ERROR) {
+      continue;
+    }
+    if (memcmp(&fxDesc.type, FX_IID_AEC, sizeof(fxDesc.type)) == 0) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+void OpenSlesInput::SetupVoiceMode() {
+  SLAndroidConfigurationItf configItf;
+  SLresult res = (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_ANDROIDCONFIGURATION_,
+                                                 (void*)&configItf);
+  WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL GetInterface: %d", res);
+
+  if (res == SL_RESULT_SUCCESS) {
+    SLuint32 voiceMode = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
+    SLuint32 voiceSize = sizeof(voiceMode);
+
+    res = (*configItf)->SetConfiguration(configItf,
+                                         SL_ANDROID_KEY_RECORDING_PRESET,
+                                         &voiceMode, voiceSize);
+    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL Set Voice mode res: %d", res);
+  }
+}
+
+void OpenSlesInput::SetupAECAndNS() {
+  bool hasAec = CheckPlatformAEC();
+  WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "Platform has AEC: %d", hasAec);
+  // This code should not have been enabled if this fails, because it means the
+  // software AEC has will have been disabled as well. If you hit this, you need
+  // to fix your B2G config or fix the hardware AEC on your device.
+  assert(hasAec);
+
+  SLAndroidConfigurationItf configItf;
+  SLresult res = (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_ANDROIDCONFIGURATION_,
+                                                 (void*)&configItf);
+  WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL GetInterface: %d", res);
+
+  if (res == SL_RESULT_SUCCESS) {
+    SLuint32 sessionId = 0;
+    SLuint32 idSize = sizeof(sessionId);
+    res = (*configItf)->GetConfiguration(configItf,
+                                         SL_ANDROID_KEY_RECORDING_SESSION_ID,
+                                         &idSize, &sessionId);
+    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL Get sessionId res: %d", res);
+
+    if (res == SL_RESULT_SUCCESS && idSize == sizeof(sessionId)) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL sessionId: %d", sessionId);
+
+      aec_ = new android::AudioEffect(FX_IID_AEC, NULL, 0, 0, 0, sessionId, 0);
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL aec: %p", aec_);
+
+      if (aec_) {
+        android::status_t status = aec_->initCheck();
+        if (status == android::NO_ERROR || status == android::ALREADY_EXISTS) {
+          WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL aec enabled");
+          aec_->setEnabled(true);
+        } else {
+          WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL aec disabled: %d", status);
+          delete aec_;
+          aec_ = NULL;
+        }
+      }
+
+      ns_ = new android::AudioEffect(FX_IID_NS, NULL, 0, 0, 0, sessionId, 0);
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL ns: %p", ns_);
+
+      if (ns_) {
+        android::status_t status = ns_->initCheck();
+        if (status == android::NO_ERROR || status == android::ALREADY_EXISTS) {
+          WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL ns enabled");
+          ns_->setEnabled(true);
+        } else {
+          WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, "OpenSL ns disabled: %d", status);
+          delete ns_;
+          ns_ = NULL;
+        }
+      }
+    }
+  }
+}
+#endif
+
 bool OpenSlesInput::CreateAudioRecorder() {
   if (!event_.Start()) {
     assert(false);
     return false;
   }
   SLDataLocator_IODevice micLocator = {
     SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
     SL_DEFAULTDEVICEID_AUDIOINPUT, NULL };
@@ -342,66 +511,83 @@ bool OpenSlesInput::CreateAudioRecorder(
   SLDataFormat_PCM configuration =
       webrtc_opensl::CreatePcmConfiguration(rec_sampling_rate_);
   SLDataSink audio_sink = { &simple_buf_queue, &configuration };
 
   // Interfaces for recording android audio data and Android are needed.
   // Note the interfaces still need to be initialized. This only tells OpenSl
   // that the interfaces will be needed at some point.
   const SLInterfaceID id[kNumInterfaces] = {
-    SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
+    SL_IID_ANDROIDSIMPLEBUFFERQUEUE_, SL_IID_ANDROIDCONFIGURATION_ };
   const SLboolean req[kNumInterfaces] = {
     SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
   OPENSL_RETURN_ON_FAILURE(
       (*sles_engine_itf_)->CreateAudioRecorder(sles_engine_itf_,
                                                &sles_recorder_,
                                                &audio_source,
                                                &audio_sink,
                                                kNumInterfaces,
                                                id,
                                                req),
       false);
 
   SLAndroidConfigurationItf recorder_config;
   OPENSL_RETURN_ON_FAILURE(
       (*sles_recorder_)->GetInterface(sles_recorder_,
-                                      SL_IID_ANDROIDCONFIGURATION,
+                                      SL_IID_ANDROIDCONFIGURATION_,
                                       &recorder_config),
       false);
 
   // Set audio recorder configuration to
   // SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION which ensures that we
   // use the main microphone tuned for audio communications.
   SLint32 stream_type = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
   OPENSL_RETURN_ON_FAILURE(
       (*recorder_config)->SetConfiguration(recorder_config,
                                            SL_ANDROID_KEY_RECORDING_PRESET,
                                            &stream_type,
                                            sizeof(SLint32)),
       false);
 
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+  SetupVoiceMode();
+#endif
+
   // Realize the recorder in synchronous mode.
   OPENSL_RETURN_ON_FAILURE((*sles_recorder_)->Realize(sles_recorder_,
                                                       SL_BOOLEAN_FALSE),
                            false);
+
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+  SetupAECAndNS();
+#endif
+
   OPENSL_RETURN_ON_FAILURE(
-      (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD,
+      (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD_,
                                       static_cast<void*>(&sles_recorder_itf_)),
       false);
   OPENSL_RETURN_ON_FAILURE(
       (*sles_recorder_)->GetInterface(
           sles_recorder_,
-          SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+          SL_IID_ANDROIDSIMPLEBUFFERQUEUE_,
           static_cast<void*>(&sles_recorder_sbq_itf_)),
       false);
   return true;
 }
 
 void OpenSlesInput::DestroyAudioRecorder() {
   event_.Stop();
+
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+  delete aec_;
+  delete ns_;
+  aec_ = NULL;
+  ns_ = NULL;
+#endif
+
   if (sles_recorder_sbq_itf_) {
     // Release all buffers currently queued up.
     OPENSL_RETURN_ON_FAILURE(
         (*sles_recorder_sbq_itf_)->Clear(sles_recorder_sbq_itf_),
         VOID_RETURN);
     sles_recorder_sbq_itf_ = NULL;
   }
   sles_recorder_itf_ = NULL;
@@ -528,16 +714,17 @@ bool OpenSlesInput::CbThreadImpl() {
   CriticalSectionScoped lock(crit_sect_.get());
   if (HandleOverrun(event_id, event_msg)) {
     return recording_;
   }
   // If the fifo_ has audio data process it.
   while (fifo_->size() > 0 && recording_) {
     int8_t* audio = fifo_->Pop();
     audio_buffer_->SetRecordedBuffer(audio, buffer_size_samples());
-    audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(),
+    audio_buffer_->SetVQEData(delay_provider_ ?
+                              delay_provider_->PlayoutDelayMs() : 0,
                               recording_delay_, 0);
     audio_buffer_->DeliverRecordedData();
   }
   return recording_;
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
@@ -10,17 +10,21 @@
 
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
 
 #include <SLES/OpenSLES.h>
 #include <SLES/OpenSLES_Android.h>
 #include <SLES/OpenSLES_AndroidConfiguration.h>
 
+#if !defined(WEBRTC_GONK)
 #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
+#else
+#include "media/AudioEffect.h"
+#endif
 #include "webrtc/modules/audio_device/android/low_latency_event.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 #include "webrtc/system_wrappers/interface/scoped_ptr.h"
 
 namespace webrtc {
 
 class AudioDeviceBuffer;
@@ -99,17 +103,17 @@ class OpenSlesInput {
 
   // Microphone boost control
   int32_t MicrophoneBoostIsAvailable(bool& available);  // NOLINT
   int32_t SetMicrophoneBoost(bool enable);
   int32_t MicrophoneBoost(bool& enabled) const;  // NOLINT
 
   // Stereo support
   int32_t StereoRecordingIsAvailable(bool& available);  // NOLINT
-  int32_t SetStereoRecording(bool enable) { return -1; }
+  int32_t SetStereoRecording(bool enable);
   int32_t StereoRecording(bool& enabled) const;  // NOLINT
 
   // Delay information and control
   int32_t RecordingDelay(uint16_t& delayMS) const;  // NOLINT
 
   bool RecordingWarning() const { return false; }
   bool RecordingError() const  { return false; }
   void ClearRecordingWarning() {}
@@ -119,32 +123,37 @@ class OpenSlesInput {
   void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
 
  private:
   enum {
     kNumInterfaces = 2,
     // Keep as few OpenSL buffers as possible to avoid wasting memory. 2 is
     // minimum for playout. Keep 2 for recording as well.
     kNumOpenSlBuffers = 2,
-    kNum10MsToBuffer = 3,
+    kNum10MsToBuffer = 8,
   };
 
   int InitSampleRate();
   int buffer_size_samples() const;
   int buffer_size_bytes() const;
   void UpdateRecordingDelay();
   void UpdateSampleRate();
   void CalculateNumFifoBuffersNeeded();
   void AllocateBuffers();
   int TotalBuffersUsed() const;
   bool EnqueueAllBuffers();
   // This function also configures the audio recorder, e.g. sample rate to use
   // etc, so it should be called when starting recording.
   bool CreateAudioRecorder();
   void DestroyAudioRecorder();
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+  void SetupAECAndNS();
+  void SetupVoiceMode();
+  bool CheckPlatformAEC();
+#endif
 
   // When overrun happens there will be more frames received from OpenSL than
   // the desired number of buffers. It is possible to expand the number of
   // buffers as you go but that would greatly increase the complexity of this
   // code. HandleOverrun gracefully handles the scenario by restarting playout,
   // throwing away all pending audio data. This will sound like a click. This
   // is also logged to identify these types of clicks.
   // This function returns true if there has been overrun. Further processing
@@ -165,18 +174,20 @@ class OpenSlesInput {
   bool StartCbThreads();
   void StopCbThreads();
   static bool CbThread(void* context);
   // This function must be protected against data race with threads calling this
   // class' public functions. It is a requirement for this class to be
   // Thread-compatible.
   bool CbThreadImpl();
 
+#if !defined(WEBRTC_GONK)
   // Java API handle
   AudioManagerJni audio_manager_;
+#endif
 
   int id_;
   PlayoutDelayProvider* delay_provider_;
   bool initialized_;
   bool mic_initialized_;
   bool rec_initialized_;
 
   // Members that are read/write accessed concurrently by the process thread and
@@ -210,15 +221,34 @@ class OpenSlesInput {
   // next time RecorderSimpleBufferQueueCallbackHandler is invoked.
   // Ready means buffer contains audio data from the device.
   int active_queue_;
 
   // Audio settings
   uint32_t rec_sampling_rate_;
   bool agc_enabled_;
 
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+  android::AudioEffect* aec_;
+  android::AudioEffect* ns_;
+#endif
   // Audio status
   uint16_t recording_delay_;
+
+  // dlopen for OpenSLES
+  void *opensles_lib_;
+  typedef SLresult (*slCreateEngine_t)(SLObjectItf *,
+                                       SLuint32,
+                                       const SLEngineOption *,
+                                       SLuint32,
+                                       const SLInterfaceID *,
+                                       const SLboolean *);
+  slCreateEngine_t f_slCreateEngine;
+  SLInterfaceID SL_IID_ENGINE_;
+  SLInterfaceID SL_IID_BUFFERQUEUE_;
+  SLInterfaceID SL_IID_ANDROIDCONFIGURATION_;
+  SLInterfaceID SL_IID_ANDROIDSIMPLEBUFFERQUEUE_;
+  SLInterfaceID SL_IID_RECORD_;
 };
 
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.cc
@@ -3,20 +3,24 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
+
 #include "webrtc/modules/audio_device/android/opensles_output.h"
 
 #include <assert.h>
+#include <dlfcn.h>
 
+#include "OpenSLESProvider.h"
 #include "webrtc/modules/audio_device/android/opensles_common.h"
 #include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
 #include "webrtc/modules/audio_device/android/single_rw_fifo.h"
 #include "webrtc/modules/audio_device/audio_device_buffer.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
@@ -58,17 +62,18 @@ OpenSlesOutput::OpenSlesOutput(const int
       sles_player_itf_(NULL),
       sles_player_sbq_itf_(NULL),
       sles_output_mixer_(NULL),
       audio_buffer_(NULL),
       active_queue_(0),
       speaker_sampling_rate_(kDefaultSampleRate),
       buffer_size_samples_(0),
       buffer_size_bytes_(0),
-      playout_delay_(0) {
+      playout_delay_(0),
+      opensles_lib_(NULL) {
 }
 
 OpenSlesOutput::~OpenSlesOutput() {
 }
 
 int32_t OpenSlesOutput::SetAndroidAudioDeviceObjects(void* javaVM,
                                                      void* env,
                                                      void* context) {
@@ -78,25 +83,61 @@ int32_t OpenSlesOutput::SetAndroidAudioD
 
 void OpenSlesOutput::ClearAndroidAudioDeviceObjects() {
   AudioManagerJni::ClearAndroidAudioDeviceObjects();
 }
 
 int32_t OpenSlesOutput::Init() {
   assert(!initialized_);
 
+  /* Try to dynamically open the OpenSLES library */
+  opensles_lib_ = dlopen("libOpenSLES.so", RTLD_LAZY);
+  if (!opensles_lib_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to dlopen OpenSLES library");
+      return -1;
+  }
+
+  f_slCreateEngine = (slCreateEngine_t)dlsym(opensles_lib_, "slCreateEngine");
+  SL_IID_ENGINE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ENGINE");
+  SL_IID_BUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_BUFFERQUEUE");
+  SL_IID_ANDROIDCONFIGURATION_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDCONFIGURATION");
+  SL_IID_PLAY_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_PLAY");
+  SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
+  SL_IID_VOLUME_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_VOLUME");
+
+  if (!f_slCreateEngine ||
+      !SL_IID_ENGINE_ ||
+      !SL_IID_BUFFERQUEUE_ ||
+      !SL_IID_ANDROIDCONFIGURATION_ ||
+      !SL_IID_PLAY_ ||
+      !SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ ||
+      !SL_IID_VOLUME_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to find OpenSLES function");
+      return -1;
+  }
+
   // Set up OpenSl engine.
-  OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
-                                          NULL, NULL),
+#ifndef MOZILLA_INTERNAL_API
+  OPENSL_RETURN_ON_FAILURE(f_slCreateEngine(&sles_engine_, 1, kOption, 0,
+                                            NULL, NULL),
                            -1);
+#else
+  OPENSL_RETURN_ON_FAILURE(mozilla_get_sles_engine(&sles_engine_, 1, kOption), -1);
+#endif
+#ifndef MOZILLA_INTERNAL_API
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
                                                     SL_BOOLEAN_FALSE),
                            -1);
+#else
+  OPENSL_RETURN_ON_FAILURE(mozilla_realize_sles_engine(sles_engine_), -1);
+#endif
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
-                                                         SL_IID_ENGINE,
+                                                         SL_IID_ENGINE_,
                                                          &sles_engine_itf_),
                            -1);
   // Set up OpenSl output mix.
   OPENSL_RETURN_ON_FAILURE(
       (*sles_engine_itf_)->CreateOutputMix(sles_engine_itf_,
                                            &sles_output_mixer_,
                                            0,
                                            NULL,
@@ -114,20 +155,25 @@ int32_t OpenSlesOutput::Init() {
   initialized_ = true;
   return 0;
 }
 
 int32_t OpenSlesOutput::Terminate() {
   // It is assumed that the caller has stopped recording before terminating.
   assert(!playing_);
   (*sles_output_mixer_)->Destroy(sles_output_mixer_);
+#ifndef MOZILLA_INTERNAL_API
   (*sles_engine_)->Destroy(sles_engine_);
+#else
+  mozilla_destroy_sles_engine(&sles_engine_);
+#endif
   initialized_ = false;
   speaker_initialized_ = false;
   play_initialized_ = false;
+  dlclose(opensles_lib_);
   return 0;
 }
 
 int32_t OpenSlesOutput::PlayoutDeviceName(uint16_t index,
                                           char name[kAdmMaxDeviceNameSize],
                                           char guid[kAdmMaxGuidSize]) {
   assert(index == 0);
   // Empty strings.
@@ -301,24 +347,28 @@ bool OpenSlesOutput::InitSampleRate() {
 void OpenSlesOutput::UpdatePlayoutDelay() {
   // TODO(hellner): Add accurate delay estimate.
   // On average half the current buffer will have been played out.
   int outstanding_samples = (TotalBuffersUsed() - 0.5) * buffer_size_samples_;
   playout_delay_ = outstanding_samples / (speaker_sampling_rate_ / 1000);
 }
 
 bool OpenSlesOutput::SetLowLatency() {
+#if !defined(WEBRTC_GONK)
   if (!audio_manager_.low_latency_supported()) {
     return false;
   }
   buffer_size_samples_ = audio_manager_.native_buffer_size();
   assert(buffer_size_samples_ > 0);
   speaker_sampling_rate_ = audio_manager_.native_output_sample_rate();
   assert(speaker_sampling_rate_ > 0);
   return true;
+#else
+  return false;
+#endif
 }
 
 void OpenSlesOutput::CalculateNumFifoBuffersNeeded() {
   int number_of_bytes_needed =
       (speaker_sampling_rate_ * kNumChannels * sizeof(int16_t)) * 10 / 1000;
 
   // Ceiling of integer division: 1 + ((x - 1) / y)
   int buffers_per_10_ms =
@@ -394,29 +444,29 @@ bool OpenSlesOutput::CreateAudioPlayer()
   locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
   locator_outputmix.outputMix = sles_output_mixer_;
   SLDataSink audio_sink = { &locator_outputmix, NULL };
 
   // Interfaces for streaming audio data, setting volume and Android are needed.
   // Note the interfaces still need to be initialized. This only tells OpenSl
   // that the interfaces will be needed at some point.
   SLInterfaceID ids[kNumInterfaces] = {
-    SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_ANDROIDCONFIGURATION };
+    SL_IID_BUFFERQUEUE_, SL_IID_VOLUME_, SL_IID_ANDROIDCONFIGURATION_ };
   SLboolean req[kNumInterfaces] = {
     SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
   OPENSL_RETURN_ON_FAILURE(
       (*sles_engine_itf_)->CreateAudioPlayer(sles_engine_itf_, &sles_player_,
                                              &audio_source, &audio_sink,
                                              kNumInterfaces, ids, req),
       false);
 
   SLAndroidConfigurationItf player_config;
   OPENSL_RETURN_ON_FAILURE(
       (*sles_player_)->GetInterface(sles_player_,
-                                    SL_IID_ANDROIDCONFIGURATION,
+                                    SL_IID_ANDROIDCONFIGURATION_,
                                     &player_config),
       false);
 
   // Set audio player configuration to SL_ANDROID_STREAM_VOICE which corresponds
   // to android.media.AudioManager.STREAM_VOICE_CALL.
   SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
   OPENSL_RETURN_ON_FAILURE(
       (*player_config)->SetConfiguration(player_config,
@@ -425,21 +475,21 @@ bool OpenSlesOutput::CreateAudioPlayer()
                                          sizeof(SLint32)),
       false);
 
   // Realize the player in synchronous mode.
   OPENSL_RETURN_ON_FAILURE((*sles_player_)->Realize(sles_player_,
                                                     SL_BOOLEAN_FALSE),
                            false);
   OPENSL_RETURN_ON_FAILURE(
-      (*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY,
+      (*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY_,
                                     &sles_player_itf_),
       false);
   OPENSL_RETURN_ON_FAILURE(
-      (*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE,
+      (*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE_,
                                     &sles_player_sbq_itf_),
       false);
   return true;
 }
 
 void OpenSlesOutput::DestroyAudioPlayer() {
   SLAndroidSimpleBufferQueueItf sles_player_sbq_itf = sles_player_sbq_itf_;
   {
@@ -576,8 +626,10 @@ bool OpenSlesOutput::CbThreadImpl() {
     fine_buffer_->GetBufferData(audio);
     fifo_->Push(audio);
     active_queue_ = (active_queue_ + 1) % TotalBuffersUsed();
   }
   return playing_;
 }
 
 }  // namespace webrtc
+
+#endif
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.h
@@ -10,31 +10,36 @@
 
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
 
 #include <SLES/OpenSLES.h>
 #include <SLES/OpenSLES_Android.h>
 #include <SLES/OpenSLES_AndroidConfiguration.h>
 
+#if !defined(WEBRTC_GONK)
 #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
+#endif
 #include "webrtc/modules/audio_device/android/low_latency_event.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 #include "webrtc/system_wrappers/interface/scoped_ptr.h"
 
 namespace webrtc {
 
 class AudioDeviceBuffer;
 class CriticalSectionWrapper;
 class FineAudioBuffer;
 class SingleRwFifo;
 class ThreadWrapper;
 
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
+// allow us to replace it with a dummy
+
 // OpenSL implementation that facilitate playing PCM data to an android device.
 // This class is Thread-compatible. I.e. Given an instance of this class, calls
 // to non-const methods require exclusive access to the object.
 class OpenSlesOutput : public PlayoutDelayProvider {
  public:
   explicit OpenSlesOutput(const int32_t id);
   virtual ~OpenSlesOutput();
 
@@ -183,18 +188,20 @@ class OpenSlesOutput : public PlayoutDel
   bool StartCbThreads();
   void StopCbThreads();
   static bool CbThread(void* context);
   // This function must be protected against data race with threads calling this
   // class' public functions. It is a requirement for this class to be
   // Thread-compatible.
   bool CbThreadImpl();
 
+#if !defined(WEBRTC_GONK)
   // Java API handle
   AudioManagerJni audio_manager_;
+#endif
 
   int id_;
   bool initialized_;
   bool speaker_initialized_;
   bool play_initialized_;
 
   // Members that are read/write accessed concurrently by the process thread and
   // threads calling public functions of this class.
@@ -231,13 +238,223 @@ class OpenSlesOutput : public PlayoutDel
 
   // Audio settings
   uint32_t speaker_sampling_rate_;
   int buffer_size_samples_;
   int buffer_size_bytes_;
 
   // Audio status
   uint16_t playout_delay_;
+
+  // dlopen for OpenSLES
+  void *opensles_lib_;
+  typedef SLresult (*slCreateEngine_t)(SLObjectItf *,
+                                       SLuint32,
+                                       const SLEngineOption *,
+                                       SLuint32,
+                                       const SLInterfaceID *,
+                                       const SLboolean *);
+  slCreateEngine_t f_slCreateEngine;
+  SLInterfaceID SL_IID_ENGINE_;
+  SLInterfaceID SL_IID_BUFFERQUEUE_;
+  SLInterfaceID SL_IID_ANDROIDCONFIGURATION_;
+  SLInterfaceID SL_IID_PLAY_;
+  SLInterfaceID SL_IID_ANDROIDSIMPLEBUFFERQUEUE_;
+  SLInterfaceID SL_IID_VOLUME_;
 };
 
+#else
+
+// Dummy OpenSlesOutput
+class OpenSlesOutput : public PlayoutDelayProvider {
+ public:
+  explicit OpenSlesOutput(const int32_t id) :
+    initialized_(false), speaker_initialized_(false),
+    play_initialized_(false), playing_(false)
+  {}
+  virtual ~OpenSlesOutput() {}
+
+  static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
+                                              void* env,
+                                              void* context) { return 0; }
+  static void ClearAndroidAudioDeviceObjects() {}
+
+  // Main initializaton and termination
+  int32_t Init() { initialized_ = true; return 0; }
+  int32_t Terminate() { initialized_ = false; return 0; }
+  bool Initialized() const { return initialized_; }
+
+  // Device enumeration
+  int16_t PlayoutDevices() { return 1; }
+
+  int32_t PlayoutDeviceName(uint16_t index,
+                            char name[kAdmMaxDeviceNameSize],
+                            char guid[kAdmMaxGuidSize])
+  {
+    assert(index == 0);
+    // Empty strings.
+    name[0] = '\0';
+    guid[0] = '\0';
+    return 0;
+  }
+
+  // Device selection
+  int32_t SetPlayoutDevice(uint16_t index)
+  {
+    assert(index == 0);
+    return 0;
+  }
+  int32_t SetPlayoutDevice(
+      AudioDeviceModule::WindowsDeviceType device) { return 0; }
+
+  // No-op
+  int32_t SetPlayoutSampleRate(uint32_t sample_rate_hz) { return 0; }
+
+  // Audio transport initialization
+  int32_t PlayoutIsAvailable(bool& available)  // NOLINT
+  {
+    available = true;
+    return 0;
+  }
+  int32_t InitPlayout()
+  {
+    assert(initialized_);
+    play_initialized_ = true;
+    return 0;
+  }
+  bool PlayoutIsInitialized() const { return play_initialized_; }
+
+  // Audio transport control
+  int32_t StartPlayout()
+  {
+    assert(play_initialized_);
+    assert(!playing_);
+    playing_ = true;
+    return 0;
+  }
+
+  int32_t StopPlayout()
+  {
+    playing_ = false;
+    return 0;
+  }
+
+  bool Playing() const { return playing_; }
+
+  // Audio mixer initialization
+  int32_t SpeakerIsAvailable(bool& available)  // NOLINT
+  {
+    available = true;
+    return 0;
+  }
+  int32_t InitSpeaker()
+  {
+    assert(!playing_);
+    speaker_initialized_ = true;
+    return 0;
+  }
+  bool SpeakerIsInitialized() const { return speaker_initialized_; }
+
+  // Speaker volume controls
+  int32_t SpeakerVolumeIsAvailable(bool& available)  // NOLINT
+  {
+    available = true;
+    return 0;
+  }
+  int32_t SetSpeakerVolume(uint32_t volume)
+  {
+    assert(speaker_initialized_);
+    assert(initialized_);
+    return 0;
+  }
+  int32_t SpeakerVolume(uint32_t& volume) const { return 0; }  // NOLINT
+  int32_t MaxSpeakerVolume(uint32_t& maxVolume) const  // NOLINT
+  {
+    assert(speaker_initialized_);
+    assert(initialized_);
+    maxVolume = 0;
+    return 0;
+  }
+  int32_t MinSpeakerVolume(uint32_t& minVolume) const  // NOLINT
+  {
+    assert(speaker_initialized_);
+    assert(initialized_);
+    minVolume = 0;
+    return 0;
+  }
+  int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const  // NOLINT
+  {
+    assert(speaker_initialized_);
+    assert(initialized_);
+    stepSize = 0;
+    return 0;
+  }
+
+  // Speaker mute control
+  int32_t SpeakerMuteIsAvailable(bool& available)  // NOLINT
+  {
+    available = true;
+    return 0;
+  }
+  int32_t SetSpeakerMute(bool enable) { return -1; }
+  int32_t SpeakerMute(bool& enabled) const { return -1; }  // NOLINT
+
+
+  // Stereo support
+  int32_t StereoPlayoutIsAvailable(bool& available)  // NOLINT
+  {
+    available = true;
+    return 0;
+  }
+  int32_t SetStereoPlayout(bool enable)
+  {
+    return 0;
+  }
+  int32_t StereoPlayout(bool& enabled) const  // NOLINT
+  {
+    enabled = kNumChannels == 2;
+    return 0;
+  }
+
+  // Delay information and control
+  int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+                                   uint16_t sizeMS) { return -1; }
+  int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,  // NOLINT
+                        uint16_t& sizeMS) const
+  {
+    type = AudioDeviceModule::kAdaptiveBufferSize;
+    sizeMS = 40;
+    return 0;
+  }
+  int32_t PlayoutDelay(uint16_t& delayMS) const  // NOLINT
+  {
+    delayMS = 0;
+    return 0;
+  }
+
+
+  // Error and warning information
+  bool PlayoutWarning() const { return false; }
+  bool PlayoutError() const { return false; }
+  void ClearPlayoutWarning() {}
+  void ClearPlayoutError() {}
+
+  // Attach audio buffer
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {}
+
+  // Speaker audio routing
+  int32_t SetLoudspeakerStatus(bool enable) { return 0; }
+  int32_t GetLoudspeakerStatus(bool& enable) const { enable = true; return 0; }  // NOLINT
+
+ protected:
+  virtual int PlayoutDelayMs() { return 40; }
+
+ private:
+  bool initialized_;
+  bool speaker_initialized_;
+  bool play_initialized_;
+  bool playing_;
+};
+#endif
+
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/single_rw_fifo.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/single_rw_fifo.cc
@@ -3,29 +3,45 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#if defined(_MSC_VER)
+#include <windows.h>
+#endif
+
 #include "webrtc/modules/audio_device/android/single_rw_fifo.h"
 
 #include <assert.h>
 
 static int UpdatePos(int pos, int capacity) {
   return (pos + 1) % capacity;
 }
 
 namespace webrtc {
 
 namespace subtle {
 
-#if defined(__aarch64__)
+// Start with compiler support, then processor-specific hacks
+#if defined(__GNUC__) || defined(__clang__)
+// Available on GCC and clang - others?
+inline void MemoryBarrier() {
+  __sync_synchronize();
+}
+
+#elif defined(_MSC_VER)
+inline void MemoryBarrier() {
+  ::MemoryBarrier();
+}
+
+#elif defined(__aarch64__)
 // From http://http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_arm64_gcc.h
 inline void MemoryBarrier() {
   __asm__ __volatile__ ("dmb ish" ::: "memory");
 }
 
 #elif defined(__ARMEL__)
 // From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_arm_gcc.h
 inline void MemoryBarrier() {
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
@@ -44,21 +44,31 @@
         'dummy/audio_device_dummy.cc',
         'dummy/audio_device_dummy.h',
         'dummy/audio_device_utility_dummy.cc',
         'dummy/audio_device_utility_dummy.h',
         'dummy/file_audio_device.cc',
         'dummy/file_audio_device.h',
       ],
       'conditions': [
-        ['OS=="linux"', {
+        ['build_with_mozilla==1', {
+          'cflags_mozilla': [
+            '$(NSPR_CFLAGS)',
+          ],
+        }],
+        ['hardware_aec_ns==1', {
+          'defines': [
+            'WEBRTC_HARDWARE_AEC_NS',
+          ],
+        }],
+        ['OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1', {
           'include_dirs': [
             'linux',
           ],
-        }], # OS==linux
+        }], # OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1
         ['OS=="ios"', {
           'include_dirs': [
             'ios',
           ],
         }], # OS==ios
         ['OS=="mac"', {
           'include_dirs': [
             'mac',
@@ -66,41 +76,53 @@
         }], # OS==mac
         ['OS=="win"', {
           'include_dirs': [
             'win',
           ],
         }],
         ['OS=="android"', {
           'include_dirs': [
+            '/widget/android',
             'android',
           ],
         }], # OS==android
+        ['moz_widget_toolkit_gonk==1', {
+          'cflags_mozilla': [
+            '-I$(ANDROID_SOURCE)/frameworks/wilhelm/include',
+            '-I$(ANDROID_SOURCE)/frameworks/av/include',
+            '-I$(ANDROID_SOURCE)/system/media/wilhelm/include',
+            '-I$(ANDROID_SOURCE)/system/media/audio_effects/include',
+            '-I$(ANDROID_SOURCE)/frameworks/native/include',
+          ],
+          'include_dirs': [
+            'android',
+          ],
+        }], # moz_widget_toolkit_gonk==1
+        ['enable_android_opensl==1', {
+          'include_dirs': [
+            'opensl',
+          ],
+        }], # enable_android_opensl
         ['include_internal_audio_device==0', {
           'defines': [
             'WEBRTC_DUMMY_AUDIO_BUILD',
           ],
         }],
         ['build_with_chromium==0', {
           'sources': [
             # Don't link these into Chrome since they contain static data.
             'dummy/file_audio_device_factory.cc',
             'dummy/file_audio_device_factory.h',
           ],
         }],
         ['include_internal_audio_device==1', {
           'sources': [
-            'linux/alsasymboltable_linux.cc',
-            'linux/alsasymboltable_linux.h',
-            'linux/audio_device_alsa_linux.cc',
-            'linux/audio_device_alsa_linux.h',
             'linux/audio_device_utility_linux.cc',
             'linux/audio_device_utility_linux.h',
-            'linux/audio_mixer_manager_alsa_linux.cc',
-            'linux/audio_mixer_manager_alsa_linux.h',
             'linux/latebindingsymboltable_linux.cc',
             'linux/latebindingsymboltable_linux.h',
             'ios/audio_device_ios.mm',
             'ios/audio_device_ios.h',
             'ios/audio_device_utility_ios.cc',
             'ios/audio_device_utility_ios.h',
             'mac/audio_device_mac.cc',
             'mac/audio_device_mac.h',
@@ -114,70 +136,103 @@
             'win/audio_device_core_win.cc',
             'win/audio_device_core_win.h',
             'win/audio_device_wave_win.cc',
             'win/audio_device_wave_win.h',
             'win/audio_device_utility_win.cc',
             'win/audio_device_utility_win.h',
             'win/audio_mixer_manager_win.cc',
             'win/audio_mixer_manager_win.h',
+            # used externally for getUserMedia
+            'opensl/single_rw_fifo.cc',
+            'opensl/single_rw_fifo.h',
             'android/audio_device_template.h',
-            'android/audio_device_utility_android.cc',
-            'android/audio_device_utility_android.h',
             'android/audio_manager_jni.cc',
             'android/audio_manager_jni.h',
             'android/audio_record_jni.cc',
             'android/audio_record_jni.h',
             'android/audio_track_jni.cc',
             'android/audio_track_jni.h',
-            'android/fine_audio_buffer.cc',
-            'android/fine_audio_buffer.h',
-            'android/low_latency_event_posix.cc',
-            'android/low_latency_event.h',
-            'android/opensles_common.cc',
-            'android/opensles_common.h',
-            'android/opensles_input.cc',
-            'android/opensles_input.h',
-            'android/opensles_output.cc',
-            'android/opensles_output.h',
-            'android/single_rw_fifo.cc',
-            'android/single_rw_fifo.h',
           ],
           'conditions': [
-            ['OS=="android"', {
+            ['OS=="android" or moz_widget_toolkit_gonk==1', {
               'link_settings': {
                 'libraries': [
                   '-llog',
                   '-lOpenSLES',
                 ],
               },
+              'conditions': [
+                ['enable_android_opensl==1', {
+                  'sources': [
+                    'opensl/fine_audio_buffer.cc',
+                    'opensl/fine_audio_buffer.h',
+                    'opensl/low_latency_event_posix.cc',
+                    'opensl/low_latency_event.h',
+                    'opensl/opensles_common.cc',
+                    'opensl/opensles_common.h',
+                    'opensl/opensles_input.cc',
+                    'opensl/opensles_input.h',
+                    'opensl/opensles_output.h',
+                    'shared/audio_device_utility_shared.cc',
+                    'shared/audio_device_utility_shared.h',
+                  ],
+                }, {
+                  'sources': [
+                    'shared/audio_device_utility_shared.cc',
+                    'shared/audio_device_utility_shared.h',
+                  ],
+                }],
+                ['enable_android_opensl_output==1', {
+                  'sources': [
+                    'opensl/opensles_output.cc'
+                  ],
+                  'defines': [
+                    'WEBRTC_ANDROID_OPENSLES_OUTPUT',
+                  ],
+                }],
+              ],
             }],
             ['OS=="linux"', {
-              'defines': [
-                'LINUX_ALSA',
-              ],
               'link_settings': {
                 'libraries': [
                   '-ldl','-lX11',
                 ],
               },
-              'conditions': [
-                ['include_pulse_audio==1', {
-                  'defines': [
-                    'LINUX_PULSE',
-                  ],
-                  'sources': [
-                    'linux/audio_device_pulse_linux.cc',
-                    'linux/audio_device_pulse_linux.h',
-                    'linux/audio_mixer_manager_pulse_linux.cc',
-                    'linux/audio_mixer_manager_pulse_linux.h',
-                    'linux/pulseaudiosymboltable_linux.cc',
-                    'linux/pulseaudiosymboltable_linux.h',
-                  ],
-                }],
+            }],
+            ['include_alsa_audio==1', {
+              'cflags_mozilla': [
+                '$(MOZ_ALSA_CFLAGS)',
+              ],
+              'defines': [
+                'LINUX_ALSA',
+              ],
+              'sources': [
+                'linux/alsasymboltable_linux.cc',
+                'linux/alsasymboltable_linux.h',
+                'linux/audio_device_alsa_linux.cc',
+                'linux/audio_device_alsa_linux.h',
+                'linux/audio_mixer_manager_alsa_linux.cc',
+                'linux/audio_mixer_manager_alsa_linux.h',
+              ],
+            }],
+            ['include_pulse_audio==1', {
+              'cflags_mozilla': [
+                '$(MOZ_PULSEAUDIO_CFLAGS)',
+              ],
+              'defines': [
+                'LINUX_PULSE',
+              ],
+              'sources': [
+                'linux/audio_device_pulse_linux.cc',
+                'linux/audio_device_pulse_linux.h',
+                'linux/audio_mixer_manager_pulse_linux.cc',
+                'linux/audio_mixer_manager_pulse_linux.h',
+                'linux/pulseaudiosymboltable_linux.cc',
+                'linux/pulseaudiosymboltable_linux.h',
               ],
             }],
             ['OS=="mac"', {
               'link_settings': {
                 'libraries': [
                   '$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
                   '$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
                 ],
@@ -288,9 +343,8 @@
               ],
             },
           ],
         }],
       ],
     }], # include_tests
   ],
 }
-
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
@@ -11,31 +11,38 @@
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_device/audio_device_config.h"
 #include "webrtc/modules/audio_device/audio_device_impl.h"
 #include "webrtc/system_wrappers/interface/ref_count.h"
 
 #include <assert.h>
 #include <string.h>
 
-#if defined(_WIN32)
+#if defined(WEBRTC_DUMMY_AUDIO_BUILD)
+// do not include platform specific headers
+#elif defined(_WIN32)
     #include "audio_device_utility_win.h"
     #include "audio_device_wave_win.h"
  #if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
     #include "audio_device_core_win.h"
  #endif
-#elif defined(WEBRTC_ANDROID)
+#elif defined(WEBRTC_ANDROID_OPENSLES)
+// ANDROID and GONK
     #include <stdlib.h>
+    #include <dlfcn.h>
     #include "audio_device_utility_android.h"
     #include "webrtc/modules/audio_device/android/audio_device_template.h"
+#if !defined(WEBRTC_GONK)
+// GONK only supports opensles; android can use that or jni
     #include "webrtc/modules/audio_device/android/audio_record_jni.h"
     #include "webrtc/modules/audio_device/android/audio_track_jni.h"
+#endif
     #include "webrtc/modules/audio_device/android/opensles_input.h"
     #include "webrtc/modules/audio_device/android/opensles_output.h"
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     #include "audio_device_utility_linux.h"
  #if defined(LINUX_ALSA)
     #include "audio_device_alsa_linux.h"
  #endif
  #if defined(LINUX_PULSE)
     #include "audio_device_pulse_linux.h"
  #endif
 #elif defined(WEBRTC_IOS)
@@ -160,17 +167,17 @@ int32_t AudioDeviceModuleImpl::CheckPlat
     PlatformType platform(kPlatformNotSupported);
 
 #if defined(_WIN32)
     platform = kPlatformWin32;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is WIN32");
 #elif defined(WEBRTC_ANDROID)
     platform = kPlatformAndroid;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is ANDROID");
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     platform = kPlatformLinux;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is LINUX");
 #elif defined(WEBRTC_IOS)
     platform = kPlatformIOS;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is IOS");
 #elif defined(WEBRTC_MAC)
     platform = kPlatformMac;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is MAC");
@@ -267,41 +274,60 @@ int32_t AudioDeviceModuleImpl::CreatePla
         // for Windows.
         //
         ptrAudioDeviceUtility = new AudioDeviceUtilityWindows(Id());
     }
 #endif  // #if defined(_WIN32)
 
     // Create the *Android OpenSLES* implementation of the Audio Device
     //
-#if defined(WEBRTC_ANDROID)
+#if defined(WEBRTC_ANDROID) || defined (WEBRTC_GONK)
     if (audioLayer == kPlatformDefaultAudio)
     {
-        // AudioRecordJni provides hardware AEC and OpenSlesOutput low latency.
-#if defined(WEBRTC_ANDROID_OPENSLES)
-        ptrAudioDevice = new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id());
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "Android OpenSLES Audio APIs will be utilized");
-#else
-        ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "Android JNI Audio APIs will be utilized");
+      // AudioRecordJni provides hardware AEC and OpenSlesOutput low latency.
+#if defined (WEBRTC_ANDROID_OPENSLES)
+      // Android and Gonk
+      // Check if the OpenSLES library is available before going further.
+      void* opensles_lib = dlopen("libOpenSLES.so", RTLD_LAZY);
+      if (opensles_lib) {
+        // That worked, close for now and proceed normally.
+        dlclose(opensles_lib);
+        if (audioLayer == kPlatformDefaultAudio)
+        {
+          // Create *Android OpenSLES Audio* implementation
+          ptrAudioDevice = new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id());
+          WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+                       "Android OpenSLES Audio APIs will be utilized");
+        }
+      }
+#endif
+#if !defined(WEBRTC_GONK)
+      // Fall back to this case if on Android 2.2/OpenSLES not available.
+      if (ptrAudioDevice == NULL) {
+        // Create the *Android Java* implementation of the Audio Device
+        if (audioLayer == kPlatformDefaultAudio)
+        {
+          // Create *Android JNI Audio* implementation
+          ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
+          WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Android JNI Audio APIs will be utilized");
+        }
+      }
 #endif
     }
 
     if (ptrAudioDevice != NULL)
     {
         // Create the Android implementation of the Device Utility.
         ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
     }
-    // END #if defined(WEBRTC_ANDROID)
+    // END #if defined(WEBRTC_ANDROID_OPENSLES)
 
     // Create the *Linux* implementation of the Audio Device
     //
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     if ((audioLayer == kLinuxPulseAudio) || (audioLayer == kPlatformDefaultAudio))
     {
 #if defined(LINUX_PULSE)
         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "attempting to use the Linux PulseAudio APIs...");
 
         // create *Linux PulseAudio* implementation
         AudioDeviceLinuxPulse* pulseDevice = new AudioDeviceLinuxPulse(Id());
         if (pulseDevice->Init() != -1)
@@ -339,17 +365,17 @@ int32_t AudioDeviceModuleImpl::CreatePla
     if (ptrAudioDevice != NULL)
     {
         // Create the Linux implementation of the Device Utility.
         // This class is independent of the selected audio layer
         // for Linux.
         //
         ptrAudioDeviceUtility = new AudioDeviceUtilityLinux(Id());
     }
-#endif  // #if defined(WEBRTC_LINUX)
+#endif  // #if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 
     // Create the *iPhone* implementation of the Audio Device
     //
 #if defined(WEBRTC_IOS)
     if (audioLayer == kPlatformDefaultAudio)
     {
         // Create iOS Audio Device implementation.
         ptrAudioDevice = new AudioDeviceIOS(Id());
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
@@ -41,17 +41,17 @@ bool AudioDeviceUtility::StringCompare(
     const char* str1 , const char* str2,
     const uint32_t length)
 {
 	return ((_strnicmp(str1, str2, length) == 0) ? true : false);
 }
 
 }  // namespace webrtc
 
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 
 // ============================================================================
 //                                 Linux & Mac
 // ============================================================================
 
 #include <stdio.h>      // getchar
 #include <string.h>     // strncasecmp
 #include <sys/time.h>   // gettimeofday
@@ -104,9 +104,9 @@ uint32_t AudioDeviceUtility::GetTimeInMS
 bool AudioDeviceUtility::StringCompare(
     const char* str1 , const char* str2, const uint32_t length)
 {
     return (strncasecmp(str1, str2, length) == 0)?true: false;
 }
 
 }  // namespace webrtc
 
-#endif  // defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#endif  // defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
--- a/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
@@ -14,18 +14,18 @@
 #include <AudioUnit/AudioUnit.h>
 
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 
 namespace webrtc {
 class ThreadWrapper;
 
-const uint32_t N_REC_SAMPLES_PER_SEC = 44000;
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 44000;
+const uint32_t N_REC_SAMPLES_PER_SEC = 44100;
+const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100;
 
 const uint32_t N_REC_CHANNELS = 1;  // default is mono recording
 const uint32_t N_PLAY_CHANNELS = 1;  // default is mono playout
 const uint32_t N_DEVICE_CHANNELS = 8;
 
 const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100);
 const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -14,16 +14,23 @@
 #include "webrtc/modules/audio_device/audio_device_utility.h"
 #include "webrtc/modules/audio_device/linux/audio_device_alsa_linux.h"
 
 #include "webrtc/system_wrappers/interface/event_wrapper.h"
 #include "webrtc/system_wrappers/interface/sleep.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+#include "Latency.h"
+
+#define LOG_FIRST_CAPTURE(x) LogTime(AsyncLatencyLogger::AudioCaptureBase, \
+                                     reinterpret_cast<uint64_t>(x), 0)
+#define LOG_CAPTURE_FRAMES(x, frames) LogLatency(AsyncLatencyLogger::AudioCapture, \
+                                                 reinterpret_cast<uint64_t>(x), frames)
+
 webrtc_adm_linux_alsa::AlsaSymbolTable AlsaSymbolTable;
 
 // Accesses ALSA functions through our late-binding symbol table instead of
 // directly. This way we don't have to link to libasound, which means our binary
 // will work on systems that don't have it.
 #define LATE(sym) \
   LATESYM_GET(webrtc_adm_linux_alsa::AlsaSymbolTable, &AlsaSymbolTable, sym)
 
@@ -90,16 +97,17 @@ AudioDeviceLinuxALSA::AudioDeviceLinuxAL
     _playChannels(ALSA_PLAYOUT_CH),
     _recordingBuffer(NULL),
     _playoutBuffer(NULL),
     _recordingFramesLeft(0),
     _playoutFramesLeft(0),
     _playBufType(AudioDeviceModule::kFixedBufferSize),
     _initialized(false),
     _recording(false),
+    _firstRecord(true),
     _playing(false),
     _recIsInitialized(false),
     _playIsInitialized(false),
     _AGC(false),
     _recordingDelay(0),
     _playoutDelay(0),
     _playWarning(0),
     _playError(0),
@@ -924,17 +932,18 @@ int32_t AudioDeviceLinuxALSA::RecordingD
 
     memset(name, 0, kAdmMaxDeviceNameSize);
 
     if (guid != NULL)
     {
         memset(guid, 0, kAdmMaxGuidSize);
     }
 
-    return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
+    return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize,
+                          guid, kAdmMaxGuidSize);
 }
 
 int16_t AudioDeviceLinuxALSA::RecordingDevices()
 {
 
     return (int16_t)GetDevicesInfo(0, false);
 }
 
@@ -1386,16 +1395,17 @@ int32_t AudioDeviceLinuxALSA::StartRecor
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "   failed to alloc recording buffer");
         _recording = false;
         return -1;
     }
     // RECORDING
     const char* threadName = "webrtc_audio_module_capture_thread";
+    _firstRecord = true;
     _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc,
                                                 this,
                                                 kRealtimePriority,
                                                 threadName);
     if (_ptrThreadRec == NULL)
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "  failed to create the rec audio thread");
@@ -1572,40 +1582,41 @@ int32_t AudioDeviceLinuxALSA::StartPlayo
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "    failed to create the play audio thread");
         _playing = false;
         delete [] _playoutBuffer;
         _playoutBuffer = NULL;
         return -1;
     }
 
+    int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
+    if (errVal < 0)
+    {
+        WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
+                     "     playout snd_pcm_prepare failed (%s)\n",
+                     LATE(snd_strerror)(errVal));
+        // just log error
+        // if snd_pcm_open fails will return -1
+    }
+
+
     unsigned int threadID(0);
     if (!_ptrThreadPlay->Start(threadID))
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "  failed to start the play audio thread");
         _playing = false;
         delete _ptrThreadPlay;
         _ptrThreadPlay = NULL;
         delete [] _playoutBuffer;
         _playoutBuffer = NULL;
         return -1;
     }
     _playThreadID = threadID;
 
-    int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
-    if (errVal < 0)
-    {
-        WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
-                     "     playout snd_pcm_prepare failed (%s)\n",
-                     LATE(snd_strerror)(errVal));
-        // just log error
-        // if snd_pcm_open fails will return -1
-    }
-
     return 0;
 }
 
 int32_t AudioDeviceLinuxALSA::StopPlayout()
 {
 
     {
         CriticalSectionScoped lock(&_critSect);
@@ -1775,17 +1786,19 @@ void AudioDeviceLinuxALSA::ClearRecordin
 //                                 Private Methods
 // ============================================================================
 
 int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
     const int32_t function,
     const bool playback,
     const int32_t enumDeviceNo,
     char* enumDeviceName,
-    const int32_t ednLen) const
+    const int32_t ednLen,
+    char* enumDeviceId,
+    const int32_t ediLen) const
 {
 
     // Device enumeration based on libjingle implementation
     // by Tristan Schmelcher at Google Inc.
 
     const char *type = playback ? "Output" : "Input";
     // dmix and dsnoop are only for playback and capture, respectively, but ALSA
     // stupidly includes them in both lists.
@@ -1814,16 +1827,18 @@ int32_t AudioDeviceLinuxALSA::GetDevices
             return -1;
         }
 
         enumCount++; // default is 0
         if ((function == FUNC_GET_DEVICE_NAME ||
             function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) && enumDeviceNo == 0)
         {
             strcpy(enumDeviceName, "default");
+            if (enumDeviceId)
+                memset(enumDeviceId, 0, ediLen);
 
             err = LATE(snd_device_name_free_hint)(hints);
             if (err != 0)
             {
                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                              "GetDevicesInfo - device name free hint error: %s",
                              LATE(snd_strerror)(err));
             }
@@ -1876,28 +1891,38 @@ int32_t AudioDeviceLinuxALSA::GetDevices
 
                 }
                 if ((FUNC_GET_DEVICE_NAME == function) &&
                     (enumDeviceNo == enumCount))
                 {
                     // We have found the enum device, copy the name to buffer.
                     strncpy(enumDeviceName, desc, ednLen);
                     enumDeviceName[ednLen-1] = '\0';
+                    if (enumDeviceId)
+                    {
+                        strncpy(enumDeviceId, name, ediLen);
+                        enumDeviceId[ediLen-1] = '\0';
+                    }
                     keepSearching = false;
                     // Replace '\n' with '-'.
                     char * pret = strchr(enumDeviceName, '\n'/*0xa*/); //LF
                     if (pret)
                         *pret = '-';
                 }
                 if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
                     (enumDeviceNo == enumCount))
                 {
                     // We have found the enum device, copy the name to buffer.
                     strncpy(enumDeviceName, name, ednLen);
                     enumDeviceName[ednLen-1] = '\0';
+                    if (enumDeviceId)
+                    {
+                        strncpy(enumDeviceId, name, ediLen);
+                        enumDeviceId[ediLen-1] = '\0';
+                    }
                     keepSearching = false;
                 }
 
                 if (keepSearching)
                     ++enumCount;
 
                 if (desc != name)
                     free(desc);
@@ -1912,17 +1937,17 @@ int32_t AudioDeviceLinuxALSA::GetDevices
         err = LATE(snd_device_name_free_hint)(hints);
         if (err != 0)
         {
             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                          "GetDevicesInfo - device name free hint error: %s",
                          LATE(snd_strerror)(err));
             // Continue and return true anyway, since we did get the whole list.
         }
-    }
+      }
 
     if (FUNC_GET_NUM_OF_DEVICE == function)
     {
         if (enumCount == 1) // only default?
             enumCount = 0;
         return enumCount; // Normal return point for function 0
     }
 
@@ -2197,16 +2222,21 @@ bool AudioDeviceLinuxALSA::RecThreadProc
         memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size],
                buffer, size);
         _recordingFramesLeft -= frames;
 
         if (!_recordingFramesLeft)
         { // buf is full
             _recordingFramesLeft = _recordingFramesIn10MS;
 
+            if (_firstRecord) {
+              LOG_FIRST_CAPTURE(this);
+              _firstRecord = false;
+            }
+            LOG_CAPTURE_FRAMES(this, _recordingFramesIn10MS);
             // store the recorded buffer (no action will be taken if the
             // #recorded samples is not a full buffer)
             _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
                                                _recordingFramesIn10MS);
 
             uint32_t currentMicLevel = 0;
             uint32_t newMicLevel = 0;
 
@@ -2302,10 +2332,13 @@ bool AudioDeviceLinuxALSA::KeyPressed() 
     state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
 
   // Save old state
   memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
   return (state != 0);
 #else
   return false;
 #endif
+#else
+  return false;
+#endif
 }
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
@@ -161,17 +161,19 @@ public:
 public:
     virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) OVERRIDE;
 
 private:
     int32_t GetDevicesInfo(const int32_t function,
                            const bool playback,
                            const int32_t enumDeviceNo = 0,
                            char* enumDeviceName = NULL,
-                           const int32_t ednLen = 0) const;
+                           const int32_t ednLen = 0,
+                           char* enumDeviceID = NULL,
+                           const int32_t ediLen = 0) const;
     int32_t ErrorRecovery(int32_t error, snd_pcm_t* deviceHandle);
 
 private:
     bool KeyPressed() const;
 
 private:
     void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) { _critSect.Enter(); };
     void UnLock() UNLOCK_FUNCTION(_critSect) { _critSect.Leave(); };
@@ -227,16 +229,17 @@ private:
     uint32_t _recordingFramesLeft;
     uint32_t _playoutFramesLeft;
 
     AudioDeviceModule::BufferType _playBufType;
 
 private:
     bool _initialized;
     bool _recording;
+    bool _firstRecord;
     bool _playing;
     bool _recIsInitialized;
     bool _playIsInitialized;
     bool _AGC;
 
     snd_pcm_sframes_t _recordingDelay;
     snd_pcm_sframes_t _playoutDelay;
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
@@ -197,23 +197,25 @@ int32_t AudioDeviceLinuxPulse::Init()
         return -1;
     }
 
     _playWarning = 0;
     _playError = 0;
     _recWarning = 0;
     _recError = 0;
 
+#ifdef USE_X11
     //Get X display handle for typing detection
     _XDisplay = XOpenDisplay(NULL);
     if (!_XDisplay)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
           "  failed to open X display, typing detection will not work");
     }
+#endif
 
     // RECORDING
     const char* threadName = "webrtc_audio_module_rec_thread";
     _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this,
                                                 kRealtimePriority, threadName);
     if (_ptrThreadRec == NULL)
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
@@ -318,21 +320,23 @@ int32_t AudioDeviceLinuxPulse::Terminate
     // Terminate PulseAudio
     if (TerminatePulseAudio() < 0)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "  failed to terminate PulseAudio");
         return -1;
     }
 
+#ifdef USE_X11
     if (_XDisplay)
     {
       XCloseDisplay(_XDisplay);
       _XDisplay = NULL;
     }
+#endif
 
     _initialized = false;
     _outputDeviceIsSpecified = false;
     _inputDeviceIsSpecified = false;
 
     return 0;
 }
 
@@ -2408,16 +2412,28 @@ void AudioDeviceLinuxPulse::PaStreamRead
     if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, &_tempSampleDataSize)
         != 0)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "  Can't read data!");
         return;
     }
 
+    // PulseAudio record streams can have holes (for reasons not entirely clear
+    // to the PA developers themselves). Since version 4 of PA, these are passed
+    // over to the application (us), signaled by a non-zero sample data size
+    // (the size of the hole) and a NULL sample data.
+    // We handle stream holes as recommended by PulseAudio, i.e. by skipping
+    // it, which is done with a stream drop.
+    if (_tempSampleDataSize && !_tempSampleData) {
+        LATE(pa_stream_drop)(_recStream);
+        _tempSampleDataSize = 0; // reset
+        return;
+    }
+
     // Since we consume the data asynchronously on a different thread, we have
     // to temporarily disable the read callback or else Pulse will call it
     // continuously until we consume the data. We re-enable it below
     DisableReadCallback();
     _timeEventRec.Set();
 }
 
 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/,
@@ -3020,17 +3036,17 @@ bool AudioDeviceLinuxPulse::RecThreadPro
 
     }  // _recording
 
     UnLock();
     return true;
 }
 
 bool AudioDeviceLinuxPulse::KeyPressed() const{
-
+#ifdef USE_X11
   char szKey[32];
   unsigned int i = 0;
   char state = 0;
 
   if (!_XDisplay)
     return false;
 
   // Check key map status
@@ -3038,10 +3054,13 @@ bool AudioDeviceLinuxPulse::KeyPressed()
 
   // A bit change in keymap means a key is pressed
   for (i = 0; i < sizeof(szKey); i++)
     state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
 
   // Save old state
   memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
   return (state != 0);
+#else
+  return false;
+#endif
 }
 }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
@@ -10,17 +10,19 @@
 
 #ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H
 #define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H
 
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 #include "webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 
+#ifdef USE_X11
 #include <X11/Xlib.h>
+#endif
 #include <pulse/pulseaudio.h>
 
 // We define this flag if it's missing from our headers, because we want to be
 // able to compile against old headers but still use PA_STREAM_ADJUST_LATENCY
 // if run against a recent version of the library.
 #ifndef PA_STREAM_ADJUST_LATENCY
 #define PA_STREAM_ADJUST_LATENCY 0x2000U
 #endif
@@ -368,14 +370,16 @@ private:
     pa_stream* _recStream;
     pa_stream* _playStream;
     uint32_t _recStreamFlags;
     uint32_t _playStreamFlags;
     pa_buffer_attr _playBufferAttr;
     pa_buffer_attr _recBufferAttr;
 
     char _oldKeyState[32];
+#ifdef USE_X11
     Display* _XDisplay;
+#endif
 };
 
 }
 
 #endif  // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_PULSE_LINUX_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
@@ -5,53 +5,53 @@
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h"
 
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 #include <dlfcn.h>
 #endif
 
 // TODO(grunell): Either put inside webrtc namespace or use webrtc:: instead.
 using namespace webrtc;
 
 namespace webrtc_adm_linux {
 
 inline static const char *GetDllError() {
-#ifdef WEBRTC_LINUX
-  char *err = dlerror();
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
+  const char *err = dlerror();
   if (err) {
     return err;
   } else {
     return "No error";
   }
 #else
 #error Not implemented
 #endif
 }
 
 DllHandle InternalLoadDll(const char dll_name[]) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   DllHandle handle = dlopen(dll_name, RTLD_NOW);
 #else
 #error Not implemented
 #endif
   if (handle == kInvalidDllHandle) {
     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
                "Can't load %s : %s", dll_name, GetDllError());
   }
   return handle;
 }
 
 void InternalUnloadDll(DllHandle handle) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 // TODO(pbos): Remove this dlclose() exclusion when leaks and suppressions from
 // here are gone (or AddressSanitizer can display them properly).
 //
 // Skip dlclose() on AddressSanitizer as leaks including this module in the
 // stack trace gets displayed as <unknown module> instead of the actual library
 // -> it can not be suppressed.
 // https://code.google.com/p/address-sanitizer/issues/detail?id=89
 //
@@ -66,19 +66,19 @@ void InternalUnloadDll(DllHandle handle)
 #else
 #error Not implemented
 #endif
 }
 
 static bool LoadSymbol(DllHandle handle,
                        const char *symbol_name,
                        void **symbol) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   *symbol = dlsym(handle, symbol_name);
-  char *err = dlerror();
+  const char *err = dlerror();
   if (err) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "Error loading symbol %s : %d", symbol_name, err);
     return false;
   } else if (!*symbol) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "Symbol %s is NULL", symbol_name);
     return false;
@@ -91,17 +91,17 @@ static bool LoadSymbol(DllHandle handle,
 
 // This routine MUST assign SOME value for every symbol, even if that value is
 // NULL, or else some symbols may be left with uninitialized data that the
 // caller may later interpret as a valid address.
 bool InternalLoadSymbols(DllHandle handle,
                          int num_symbols,
                          const char *const symbol_names[],
                          void *symbols[]) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   // Clear any old errors.
   dlerror();
 #endif
   for (int i = 0; i < num_symbols; ++i) {
     if (!LoadSymbol(handle, symbol_names[i], &symbols[i])) {
       return false;
     }
   }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
@@ -20,17 +20,17 @@
 
 // This file provides macros for creating "symbol table" classes to simplify the
 // dynamic loading of symbols from DLLs. Currently the implementation only
 // supports Linux and pure C symbols.
 // See talk/sound/pulseaudiosymboltable.(h|cc) for an example.
 
 namespace webrtc_adm_linux {
 
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 typedef void *DllHandle;
 
 const DllHandle kInvalidDllHandle = NULL;
 #else
 #error Not implemented
 #endif
 
 // These are helpers for use only by the class below.
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
@@ -24,16 +24,20 @@
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h"
 
 namespace webrtc_adm_linux_pulse {
 
+#if defined(__OpenBSD__) || defined(WEBRTC_GONK)
+LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so")
+#else
 LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so.0")
+#endif
 #define X(sym) \
     LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(PulseAudioSymbolTable, sym)
 PULSE_AUDIO_SYMBOLS_LIST
 #undef X
 LATE_BINDING_SYMBOL_TABLE_DEFINE_END(PulseAudioSymbolTable)
 
 }  // namespace webrtc_adm_linux_pulse
--- a/media/webrtc/trunk/webrtc/modules/audio_device/mac/audio_device_mac.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/mac/audio_device_mac.cc
@@ -18,16 +18,19 @@
 #include "webrtc/system_wrappers/interface/trace.h"
 
 #include <ApplicationServices/ApplicationServices.h>
 #include <assert.h>
 #include <libkern/OSAtomic.h>   // OSAtomicCompareAndSwap()
 #include <mach/mach.h>          // mach_task_self()
 #include <sys/sysctl.h>         // sysctlbyname()
 
+#ifdef MOZILLA_INTERNAL_API
+#include <OSXRunLoopSingleton.h>
+#endif
 
 
 namespace webrtc
 {
 
 #define WEBRTC_CA_RETURN_ON_ERR(expr)                                   \
     do {                                                                \
         err = expr;                                                     \
@@ -379,20 +382,24 @@ int32_t AudioDeviceMac::Init()
     // Setting RunLoop to NULL here instructs HAL to manage its own thread for
     // notifications. This was the default behaviour on OS X 10.5 and earlier,
     // but now must be explicitly specified. HAL would otherwise try to use the
     // main thread to issue notifications.
     AudioObjectPropertyAddress propertyAddress = {
             kAudioHardwarePropertyRunLoop,
             kAudioObjectPropertyScopeGlobal,
             kAudioObjectPropertyElementMaster };
+#ifdef MOZILLA_INTERNAL_API
+    mozilla_set_coreaudio_notification_runloop_if_needed();
+#else
     CFRunLoopRef runLoop = NULL;
     UInt32 size = sizeof(CFRunLoopRef);
     WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(kAudioObjectSystemObject,
             &propertyAddress, 0, NULL, size, &runLoop));
+#endif
 
     // Listen for any device changes.
     propertyAddress.mSelector = kAudioHardwarePropertyDevices;
     WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(kAudioObjectSystemObject,
             &propertyAddress, &objectListenerProc, this));
 
     // Determine if this is a MacBook Pro
     _macBookPro = false;
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_opensles_android.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_opensles_android.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.h
@@ -0,0 +1,6 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_manager_jni.h"
+
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/fine_audio_buffer.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/fine_audio_buffer.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/low_latency_event_posix.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/low_latency_event_posix.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_common.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_common.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_input.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_input.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_output.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_output.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/single_rw_fifo.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/single_rw_fifo.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_utility_android.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_utility_android.h"
--- a/media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
@@ -217,17 +217,17 @@ class AudioDeviceAPITest: public testing
                 kId, AudioDeviceModule::kWindowsCoreAudio)) == NULL);
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kLinuxAlsaAudio)) == NULL);
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kLinuxPulseAudio)) == NULL);
     // Create default implementation instance
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kPlatformDefaultAudio)) != NULL);
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kWindowsWaveAudio)) == NULL);
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kWindowsCoreAudio)) == NULL);
     // create default implementation instance
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kPlatformDefaultAudio)) != NULL);
     audio_device_->AddRef();
@@ -1642,17 +1642,17 @@ TEST_F(AudioDeviceAPITest, CPULoad) {
   EXPECT_EQ(0, load);
 #else
   EXPECT_EQ(-1, audio_device_->CPULoad(&load));
 #endif
 }
 
 // TODO(kjellander): Fix flakiness causing failures on Windows.
 // TODO(phoglund):  Fix flakiness causing failures on Linux.
-#if !defined(_WIN32) && !defined(WEBRTC_LINUX)
+#if !defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_BSD)
 TEST_F(AudioDeviceAPITest, StartAndStopRawOutputFileRecording) {
   // NOTE: this API is better tested in a functional test
   CheckInitialPlayoutStates();
 
   // fail tests
   EXPECT_EQ(-1, audio_device_->StartRawOutputFileRecording(NULL));
 
   // bulk tests
@@ -1711,50 +1711,50 @@ TEST_F(AudioDeviceAPITest, StartAndStopR
       GetFilename("raw_input_not_recording.pcm")));
   EXPECT_EQ(0, audio_device_->StopRawInputFileRecording());
 
   // results after this test:
   //
   // - size of raw_input_not_recording.pcm shall be 0
   // - size of raw_input_not_recording.pcm shall be > 0
 }
-#endif  // !WIN32 && !WEBRTC_LINUX
+#endif  // !WIN32 && !WEBRTC_LINUX && !defined(WEBRTC_BSD)
 
 TEST_F(AudioDeviceAPITest, RecordingSampleRate) {
   uint32_t sampleRate(0);
 
   // bulk tests
   EXPECT_EQ(0, audio_device_->RecordingSampleRate(&sampleRate));
 #if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
   EXPECT_EQ(48000, sampleRate);
 #elif defined(ANDROID)
   TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
 #elif defined(WEBRTC_IOS)
   TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
               (sampleRate == 8000));
 #endif
 
   // @TODO(xians) - add tests for all platforms here...
 }
 
 TEST_F(AudioDeviceAPITest, PlayoutSampleRate) {
   uint32_t sampleRate(0);
 
   // bulk tests
   EXPECT_EQ(0, audio_device_->PlayoutSampleRate(&sampleRate));
 #if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
   EXPECT_EQ(48000, sampleRate);
 #elif defined(ANDROID)
   TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
 #elif defined(WEBRTC_IOS)
   TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
               (sampleRate == 8000));
 #endif
 }
 
 TEST_F(AudioDeviceAPITest, ResetAudioDevice) {
   CheckInitialPlayoutStates();
   CheckInitialRecordingStates();
   EXPECT_EQ(0, audio_device_->SetPlayoutDevice(MACRO_DEFAULT_DEVICE));
--- a/media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
@@ -318,22 +318,16 @@ int32_t AudioTransportImpl::NeedMorePlay
                 const uint8_t nChannelsIn = packet->nChannels;
                 const uint32_t samplesPerSecIn = packet->samplesPerSec;
                 const uint16_t nBytesPerSampleIn =
                     packet->nBytesPerSample;
 
                 int32_t fsInHz(samplesPerSecIn);
                 int32_t fsOutHz(samplesPerSec);
 
-                if (fsInHz == 44100)
-                    fsInHz = 44000;
-
-                if (fsOutHz == 44100)
-                    fsOutHz = 44000;
-
                 if (nChannelsIn == 2 && nBytesPerSampleIn == 4)
                 {
                     // input is stereo => we will resample in stereo
                     ret = _resampler.ResetIfNeeded(fsInHz, fsOutHz,
                                                    kResamplerSynchronousStereo);
                     if (ret == 0)
                     {
                         if (nChannels == 2)
@@ -1234,17 +1228,17 @@ int32_t FuncTestManager::TestAudioTransp
 
         EXPECT_EQ(0, audioDevice->RegisterAudioCallback(_audioTransport));
 
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (samplesPerSec == 48000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile48.c_str()));
-        } else if (samplesPerSec == 44100 || samplesPerSec == 44000) {
+        } else if (samplesPerSec == 44100) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile44.c_str()));
         } else if (samplesPerSec == 16000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile16.c_str()));
         } else if (samplesPerSec == 8000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile8.c_str()));
@@ -1467,17 +1461,17 @@ int32_t FuncTestManager::TestSpeakerVolu
     EXPECT_EQ(0, audioDevice->PlayoutIsAvailable(&available));
     if (available)
     {
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (48000 == samplesPerSec) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile48.c_str()));
-        } else if (44100 == samplesPerSec || samplesPerSec == 44000) {
+        } else if (44100 == samplesPerSec) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile44.c_str()));
         } else if (samplesPerSec == 16000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile16.c_str()));
         } else if (samplesPerSec == 8000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile8.c_str()));
@@ -1568,17 +1562,17 @@ int32_t FuncTestManager::TestSpeakerMute
     EXPECT_EQ(0, audioDevice->RegisterAudioCallback(_audioTransport));
     EXPECT_EQ(0, audioDevice->PlayoutIsAvailable(&available));
     if (available)
     {
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (48000 == samplesPerSec)
             _audioTransport->SetFilePlayout(true, _playoutFile48.c_str());
-        else if (44100 == samplesPerSec || 44000 == samplesPerSec)
+        else if (44100 == samplesPerSec)
             _audioTransport->SetFilePlayout(true, _playoutFile44.c_str());
         else
         {
             TEST_LOG("\nERROR: Sample rate (%d) is not supported!\n \n",
                      samplesPerSec);
             return -1;
         }
         EXPECT_EQ(0, audioDevice->StartPlayout());
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
@@ -28,16 +28,22 @@
 #include "webrtc/modules/audio_processing/aec/aec_common.h"
 #include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
 #include "webrtc/modules/audio_processing/aec/aec_rdft.h"
 #include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
 #include "webrtc/modules/audio_processing/utility/ring_buffer.h"
 #include "webrtc/system_wrappers/interface/cpu_features_wrapper.h"
 #include "webrtc/typedefs.h"
 
+extern int AECDebug();
+extern uint32_t AECDebugMaxSize();
+extern void AECDebugEnable(uint32_t enable);
+extern void AECDebugFilenameBase(char *buffer, size_t size);
+static void OpenCoreDebugFiles(AecCore* aec, int *instance_count);
+
 // Buffer size (samples)
 static const size_t kBufSizePartitions = 250;  // 1 second of audio in 16 kHz.
 
 // Metrics
 static const int subCountLen = 4;
 static const int countLen = 50;
 
 // Quantities to control H band scaling for SWB input
@@ -785,16 +791,79 @@ static void TimeToFrequency(float time_d
   freq_data[0][0] = time_data[0];
   freq_data[0][PART_LEN] = time_data[1];
   for (i = 1; i < PART_LEN; i++) {
     freq_data[0][i] = time_data[2 * i];
     freq_data[1][i] = time_data[2 * i + 1];
   }
 }
 
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+static void
+OpenCoreDebugFiles(AecCore* aec,
+                   int *instance_count)
+{
+  int error = 0;
+  // XXX  If this impacts performance (opening files here), move file open
+  // to Trace::set_aec_debug(), and just grab them here
+  if (AECDebug() && !aec->farFile) {
+    if (!aec->farFile) {
+      char path[1024];
+      char *filename;
+      path[0] = '\0';
+      AECDebugFilenameBase(path, sizeof(path));
+      filename = path + strlen(path);
+      if (&path[sizeof(path)] - filename < 128) {
+        return; // avoid a lot of snprintf's and checks lower
+      }
+      if (filename > path) {
+#ifdef XP_WIN
+        if (*(filename-1) != '\\') {
+          *filename++ = '\\';
+        }
+#else
+        if (*(filename-1) != '/') {
+          *filename++ = '/';
+        }
+#endif
+      }
+      sprintf(filename, "aec_far%d.pcm", webrtc_aec_instance_count);
+      aec->farFile = fopen(path, "wb");
+      sprintf(filename, "aec_near%d.pcm", webrtc_aec_instance_count);
+      aec->nearFile = fopen(path, "wb");
+      sprintf(filename, "aec_out%d.pcm", webrtc_aec_instance_count);
+      aec->outFile = fopen(path, "wb");
+      sprintf(filename, "aec_out_linear%d.pcm", webrtc_aec_instance_count);
+      aec->outLinearFile = fopen(path, "wb");
+      aec->debugWritten = 0;
+      if (!aec->outLinearFile || !aec->outFile || !aec->nearFile || !aec->farFile) {
+        error = 1;
+      }
+    }
+  }
+  if (error ||
+      (!AECDebug() && aec->farFile)) {
+    if (aec->farFile) {
+      fclose(aec->farFile);
+    }
+    if (aec->nearFile) {
+      fclose(aec->nearFile);
+    }
+    if (aec->outFile) {
+      fclose(aec->outFile);
+    }
+    if (aec->outLinearFile) {
+      fclose(aec->outLinearFile);
+    }
+    aec->outLinearFile = aec->outFile = aec->nearFile = aec->farFile = NULL;
+    aec->debugWritten = 0;
+  }
+}
+#endif
+
 static void NonLinearProcessing(AecCore* aec, float* output, float* outputH) {
   float efw[2][PART_LEN1], xfw[2][PART_LEN1];
   complex_t comfortNoiseHband[PART_LEN1];
   float fft[PART_LEN2];
   float scale, dtmp;
   float nlpGainHband;
   int i;
 
@@ -1056,18 +1125,25 @@ static void ProcessBlock(AecCore* aec) {
 
   // ---------- Ooura fft ----------
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
   {
     float farend[PART_LEN];
     float* farend_ptr = NULL;
     WebRtc_ReadBuffer(aec->far_time_buf, (void**)&farend_ptr, farend, 1);
-    rtc_WavWriteSamples(aec->farFile, farend_ptr, PART_LEN);
-    rtc_WavWriteSamples(aec->nearFile, nearend_ptr, PART_LEN);
+    OpenCoreDebugFiles(aec, &webrtc_aec_instance_count);
+    if (aec->farFile) {
+      rtc_WavWriteSamples(aec->farFile, farend_ptr, PART_LEN);
+      rtc_WavWriteSamples(aec->nearFile, nearend_ptr, PART_LEN);
+      aec->debugWritten += sizeof(int16_t) * PART_LEN;
+      if (aec->debugWritten >= AECDebugMaxSize()) {
+        AECDebugEnable(0);
+      }
+    }
   }
 #endif
 
   // We should always have at least one element stored in |far_buf|.
   assert(WebRtc_available_read(aec->far_buf) > 0);
   WebRtc_ReadBuffer(aec->far_buf, (void**)&xf_ptr, &xf[0][0], 1);
 
   // Near fft
@@ -1207,18 +1283,21 @@ static void ProcessBlock(AecCore* aec) {
   // Store the output block.
   WebRtc_WriteBuffer(aec->outFrBuf, output, PART_LEN);
   // For H band
   if (aec->sampFreq == 32000) {
     WebRtc_WriteBuffer(aec->outFrBufH, outputH, PART_LEN);
   }
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
-  rtc_WavWriteSamples(aec->outLinearFile, e, PART_LEN);
-  rtc_WavWriteSamples(aec->outFile, output, PART_LEN);
+    OpenCoreDebugFiles(aec, &webrtc_aec_instance_count);
+    if (aec->outLinearFile) {
+      rtc_WavWriteSamples(aec->outLinearFile, e, PART_LEN);
+      rtc_WavWriteSamples(aec->outFile, output, PART_LEN);
+    }
 #endif
 }
 
 int WebRtcAec_CreateAec(AecCore** aecInst) {
   AecCore* aec = malloc(sizeof(AecCore));
   *aecInst = aec;
   if (aec == NULL) {
     return -1;
@@ -1273,16 +1352,18 @@ int WebRtcAec_CreateAec(AecCore** aecIns
       WebRtc_CreateBuffer(kBufSizePartitions, sizeof(float) * PART_LEN);
   if (!aec->far_time_buf) {
     WebRtcAec_FreeAec(aec);
     aec = NULL;
     return -1;
   }
   aec->farFile = aec->nearFile = aec->outFile = aec->outLinearFile = NULL;
   aec->debug_dump_count = 0;
+  aec->debugWritten = 0;
+  OpenCoreDebugFiles(aec, &webrtc_aec_instance_count);
 #endif
   aec->delay_estimator_farend =
       WebRtc_CreateDelayEstimatorFarend(PART_LEN1, kHistorySizeBlocks);
   if (aec->delay_estimator_farend == NULL) {
     WebRtcAec_FreeAec(aec);
     aec = NULL;
     return -1;
   }
@@ -1331,20 +1412,23 @@ int WebRtcAec_FreeAec(AecCore* aec) {
 
   WebRtc_FreeBuffer(aec->nearFrBufH);
   WebRtc_FreeBuffer(aec->outFrBufH);
 
   WebRtc_FreeBuffer(aec->far_buf);
   WebRtc_FreeBuffer(aec->far_buf_windowed);
 #ifdef WEBRTC_AEC_DEBUG_DUMP
   WebRtc_FreeBuffer(aec->far_time_buf);
-  rtc_WavClose(aec->farFile);
-  rtc_WavClose(aec->nearFile);
-  rtc_WavClose(aec->outFile);
-  rtc_WavClose(aec->outLinearFile);
+  if (aec->farFile) {
+    // we don't let one be open and not the others
+    rtc_WavClose(aec->farFile);
+    rtc_WavClose(aec->nearFile);
+    rtc_WavClose(aec->outFile);
+    rtc_WavClose(aec->outLinearFile);
+  }
 #endif
   WebRtc_FreeDelayEstimator(aec->delay_estimator);
   WebRtc_FreeDelayEstimatorFarend(aec->delay_estimator_farend);
 
   free(aec);
   return 0;
 }
 
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_internal.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_internal.h
@@ -146,16 +146,17 @@ struct AecCore {
   // each time.
   int debug_dump_count;
 
   RingBuffer* far_time_buf;
   rtc_WavWriter* farFile;
   rtc_WavWriter* nearFile;
   rtc_WavWriter* outFile;
   rtc_WavWriter* outLinearFile;
+  uint32_t debugWritten;
 #endif
 };
 
 typedef void (*WebRtcAec_FilterFar_t)(AecCore* aec, float yf[2][PART_LEN1]);
 extern WebRtcAec_FilterFar_t WebRtcAec_FilterFar;
 typedef void (*WebRtcAec_ScaleErrorSignal_t)(AecCore* aec,
                                              float ef[2][PART_LEN1]);
 extern WebRtcAec_ScaleErrorSignal_t WebRtcAec_ScaleErrorSignal;
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/echo_cancellation.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/echo_cancellation.c
@@ -22,16 +22,22 @@
 
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_processing/aec/aec_core.h"
 #include "webrtc/modules/audio_processing/aec/aec_resampler.h"
 #include "webrtc/modules/audio_processing/aec/echo_cancellation_internal.h"
 #include "webrtc/modules/audio_processing/utility/ring_buffer.h"
 #include "webrtc/typedefs.h"
 
+extern int AECDebug();
+extern uint32_t AECDebugMaxSize();
+extern void AECDebugEnable(uint32_t enable);
+extern void AECDebugFilenameBase(char *buffer, size_t size);
+static void OpenDebugFiles(aecpc_t* aecpc, int *instance_count);
+
 // Measured delays [ms]
 // Device                Chrome  GTP
 // MacBook Air           10
 // MacBook Retina        10      100
 // MacPro                30?
 //
 // Win7 Desktop          70      80?
 // Win7 T430s            110
@@ -153,44 +159,39 @@ int32_t WebRtcAec_Create(void** aecInst)
     aecpc = NULL;
     return -1;
   }
 
   aecpc->initFlag = 0;
   aecpc->lastError = 0;
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
-  {
-    char filename[64];
-    sprintf(filename, "aec_buf%d.dat", webrtc_aec_instance_count);
-    aecpc->bufFile = fopen(filename, "wb");
-    sprintf(filename, "aec_skew%d.dat", webrtc_aec_instance_count);
-    aecpc->skewFile = fopen(filename, "wb");
-    sprintf(filename, "aec_delay%d.dat", webrtc_aec_instance_count);
-    aecpc->delayFile = fopen(filename, "wb");
-    webrtc_aec_instance_count++;
-  }
+  aecpc->bufFile = aecpc->skewFile = aecpc->delayFile = NULL;
+  OpenDebugFiles(aecpc, &webrtc_aec_instance_count);
 #endif
 
   return 0;
 }
 
 int32_t WebRtcAec_Free(void* aecInst) {
   aecpc_t* aecpc = aecInst;
 
   if (aecpc == NULL) {
     return -1;
   }
 
   WebRtc_FreeBuffer(aecpc->far_pre_buf);
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
-  fclose(aecpc->bufFile);
-  fclose(aecpc->skewFile);
-  fclose(aecpc->delayFile);
+  if (aecpc->bufFile) {
+    // we don't let one be open and not the others
+    fclose(aecpc->bufFile);
+    fclose(aecpc->skewFile);
+    fclose(aecpc->delayFile);
+  }
 #endif
 
   WebRtcAec_FreeAec(aecpc->aec);
   WebRtcAec_FreeResampler(aecpc->resampler);
   free(aecpc);
 
   return 0;
 }
@@ -403,19 +404,22 @@ int32_t WebRtcAec_Process(void* aecInst,
       retVal = -1;
     }
   }
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
   {
     int16_t far_buf_size_ms = (int16_t)(WebRtcAec_system_delay(aecpc->aec) /
                                         (sampMsNb * aecpc->rate_factor));
-    (void)fwrite(&far_buf_size_ms, 2, 1, aecpc->bufFile);
-    (void)fwrite(
+    OpenDebugFiles(aecpc, &webrtc_aec_instance_count);
+    if (aecpc->bufFile) {
+      (void)fwrite(&far_buf_size_ms, 2, 1, aecpc->bufFile);
+      (void)fwrite(
         &aecpc->knownDelay, sizeof(aecpc->knownDelay), 1, aecpc->delayFile);
+    }
   }
 #endif
 
   return retVal;
 }
 
 int WebRtcAec_set_config(void* handle, AecConfig config) {
   aecpc_t* self = (aecpc_t*)handle;
@@ -642,17 +646,20 @@ static int ProcessNormal(aecpc_t* aecpc,
 
       if (aecpc->skew < minSkewEst) {
         aecpc->skew = minSkewEst;
       } else if (aecpc->skew > maxSkewEst) {
         aecpc->skew = maxSkewEst;
       }
 
 #ifdef WEBRTC_AEC_DEBUG_DUMP
-      (void)fwrite(&aecpc->skew, sizeof(aecpc->skew), 1, aecpc->skewFile);
+      OpenDebugFiles(aecpc, &webrtc_aec_instance_count);
+      if (aecpc->skewFile) {
+        (void)fwrite(&aecpc->skew, sizeof(aecpc->skew), 1, aecpc->skewFile);
+      }
 #endif
     }
   }
 
   nFrames = nrOfSamples / FRAME_LEN;
   nBlocks10ms = nFrames / aecpc->rate_factor;
 
   if (aecpc->startup_phase) {
@@ -935,8 +942,66 @@ static void EstBufDelayExtended(aecpc_t*
     self->timeForDelayChange = 0;
   }
   self->lastDelayDiff = delay_difference;
 
   if (self->timeForDelayChange > 25) {
     self->knownDelay = WEBRTC_SPL_MAX((int)self->filtDelay - 256, 0);
   }
 }
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+static void
+OpenDebugFiles(aecpc_t* aecpc,
+               int *instance_count)
+{
+  int error = 0;
+  // XXX  If this impacts performance (opening files here), move file open
+  // to Trace::set_aec_debug(), and just grab them here
+  if (AECDebug() && !aecpc->bufFile) {
+    char path[1024];
+    char *filename;
+    path[0] = '\0';
+    AECDebugFilenameBase(path, sizeof(path));
+    filename = path + strlen(path);
+    if (&path[sizeof(path)] - filename < 128) {
+      return; // avoid a lot of snprintf's and checks lower
+    }
+    if (filename > path) {
+#ifdef XP_WIN
+      if (*(filename-1) != '\\') {
+        *filename++ = '\\';
+      }
+#else
+      if (*(filename-1) != '/') {
+        *filename++ = '/';
+      }
+#endif
+    }
+    sprintf(filename, "aec_buf%d.dat", *instance_count);
+    aecpc->bufFile = fopen(path, "wb");
+    sprintf(filename, "aec_skew%d.dat", *instance_count);
+    aecpc->skewFile = fopen(path, "wb");
+    sprintf(filename, "aec_delay%d.dat", *instance_count);
+    aecpc->delayFile = fopen(path, "wb");
+
+    if (!aecpc->bufFile || !aecpc->skewFile || !aecpc->delayFile) {
+      error = 1;
+    } else {
+      (*instance_count)++;
+    }
+  }
+  if (error ||
+      (!AECDebug() && aecpc->bufFile)) {
+    if (aecpc->bufFile) {
+      fclose(aecpc->bufFile);
+    }
+    if (aecpc->skewFile) {
+      fclose(aecpc->skewFile);
+    }
+    if (aecpc->delayFile) {
+      fclose(aecpc->delayFile);
+    }
+    aecpc->bufFile = aecpc->skewFile = aecpc->delayFile = NULL;
+  }
+}
+
+#endif
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
@@ -184,16 +184,21 @@
         {
           'target_name': 'audio_processing_sse2',
           'type': 'static_library',
           'sources': [
             'aec/aec_core_sse2.c',
             'aec/aec_rdft_sse2.c',
           ],
           'cflags': ['-msse2',],
+          'conditions': [
+            [ 'os_posix == 1', {
+              'cflags_mozilla': ['-msse2',],
+            }],
+          ],
           'xcode_settings': {
             'OTHER_CFLAGS': ['-msse2',],
           },
         },
       ],
     }],
     ['(target_arch=="arm" and arm_version==7) or target_arch=="armv7"', {
       'targets': [{
@@ -206,27 +211,31 @@
         'sources': [
           'aec/aec_core_neon.c',
           'aec/aec_rdft_neon.c',
           'aecm/aecm_core_neon.c',
           'ns/nsx_core_neon.c',
         ],
         'conditions': [
           ['OS=="android" or OS=="ios"', {
-            'dependencies': [
-              '<(gen_core_neon_offsets_gyp):*',
-            ],
-            'sources': [
+	    # This also provokes it to try to invoke gypi's in libvpx
+            #'dependencies': [
+            #  '<(gen_core_neon_offsets_gyp):*',
+            #],
+	    #
+	    # We disable the ASM source, because our gyp->Makefile translator
+	    # does not support the build steps to get the asm offsets.
+            'sources!': [
               'aecm/aecm_core_neon.S',
               'ns/nsx_core_neon.S',
             ],
             'include_dirs': [
               '<(shared_generated_dir)',
             ],
-            'sources!': [
+            'sources': [
               'aecm/aecm_core_neon.c',
               'ns/nsx_core_neon.c',
             ],
             'includes!': ['../../build/arm_neon.gypi',],
           }],
           # Disable LTO in audio_processing_neon target due to compiler bug
           ['use_lto==1', {
             'cflags!': [
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -62,17 +62,17 @@ EchoCancellationImpl::EchoCancellationIm
     crit_(crit),
     drift_compensation_enabled_(false),
     metrics_enabled_(false),
     suppression_level_(kModerateSuppression),
     stream_drift_samples_(0),
     was_stream_drift_set_(false),
     stream_has_echo_(false),
     delay_logging_enabled_(false),
-    delay_correction_enabled_(false),
+    delay_correction_enabled_(true), // default to long AEC tail in Mozilla
     reported_delay_enabled_(true) {}
 
 EchoCancellationImpl::~EchoCancellationImpl() {}
 
 int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
   if (!is_component_enabled()) {
     return apm_->kNoError;
   }
@@ -317,17 +317,19 @@ int EchoCancellationImpl::Initialize() {
   if (err != apm_->kNoError || !is_component_enabled()) {
     return err;
   }
 
   return apm_->kNoError;
 }
 
 void EchoCancellationImpl::SetExtraOptions(const Config& config) {
+#if 0
   delay_correction_enabled_ = config.Get<DelayCorrection>().enabled;
+#endif
   reported_delay_enabled_ = config.Get<ReportedDelay>().enabled;
   Configure();
 }
 
 void* EchoCancellationImpl::CreateHandle() const {
   Handle* handle = NULL;
   if (WebRtcAec_Create(&handle) != apm_->kNoError) {
     handle = NULL;
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.cc
@@ -0,0 +1,21 @@
+/*
+*  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+*
+*  Use of this source code is governed by a BSD-style license
+*  that can be found in the LICENSE file in the root of the source
+*  tree. An additional intellectual property rights grant can be found
+*  in the file PATENTS.  All contributing project authors may
+*  be found in the AUTHORS file in the root of the source tree.
+*/
+
+#include "webrtc/modules/desktop_capture/app_capturer.h"
+#include "webrtc/modules/desktop_capture/desktop_capture_options.h"
+
+namespace webrtc {
+
+// static
+AppCapturer* AppCapturer::Create() {
+  return Create(DesktopCaptureOptions::CreateDefault());
+}
+
+}  // namespace webrtc
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.h
@@ -0,0 +1,50 @@
+/*
+*  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+*
+*  Use of this source code is governed by a BSD-style license
+*  that can be found in the LICENSE file in the root of the source
+*  tree. An additional intellectual property rights grant can be found
+*  in the file PATENTS.  All contributing project authors may
+*  be found in the AUTHORS file in the root of the source tree.
+*/
+
+#ifndef WEBRTC_MODULES_DESKTOP_CAPTURE_APP_CAPTURER_H_
+#define WEBRTC_MODULES_DESKTOP_CAPTURE_APP_CAPTURER_H_
+
+#include <vector>
+#include <string>
+
+#include "webrtc/modules/desktop_capture/desktop_capture_types.h"
+#include "webrtc/modules/desktop_capture/desktop_capturer.h"
+#include "webrtc/system_wrappers/interface/constructor_magic.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class DesktopCaptureOptions;
+
+class AppCapturer : public DesktopCapturer {
+public:
+    typedef webrtc::ProcessId ProcessId;
+    struct App {
+        ProcessId id;
+        // Application Name in UTF-8 encoding.
+        std::string title;
+    };
+    typedef std::vector<App> AppList;
+
+    static AppCapturer* Create(const DesktopCaptureOptions& options);
+    static AppCapturer* Create();
+
+    virtual ~AppCapturer() {}
+
+    //AppCapturer Interfaces
+    virtual bool GetAppList(AppList* apps) = 0;
+    virtual bool SelectApp(ProcessId id) = 0;
+    virtual bool BringAppToFront() = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_DESKTOP_CAPTURE_APP_CAPTURER_H_
+
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_mac.mm
@@ -0,0 +1,184 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <ApplicationServices/ApplicationServices.h>
+#include <Cocoa/Cocoa.h>
+#include <Carbon/Carbon.h>
+#include <CoreFoundation/CoreFoundation.h>
+#include <AppKit/AppKit.h>
+
+#include "webrtc/modules/desktop_capture/window_capturer.h"
+#include "webrtc/modules/desktop_capture/app_capturer.h"
+
+#include "webrtc/modules/desktop_capture/desktop_frame.h"
+#include "webrtc/system_wrappers/interface/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+class AppCapturerMac : public AppCapturer {
+ public:
+  AppCapturerMac();
+  virtual ~AppCapturerMac();
+
+  // AppCapturer interface.
+  virtual bool GetAppList(AppList* apps) OVERRIDE;
+  virtual bool SelectApp(ProcessId processId) OVERRIDE;
+  virtual bool BringAppToFront() OVERRIDE;
+
+  // DesktopCapturer interface.
+  virtual void Start(Callback* callback) OVERRIDE;
+  virtual void Capture(const DesktopRegion& region) OVERRIDE;
+
+ private:
+  Callback* callback_;
+  ProcessId process_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(AppCapturerMac);
+};
+
+AppCapturerMac::AppCapturerMac()
+  : callback_(NULL),
+    process_id_(0) {
+}
+
+AppCapturerMac::~AppCapturerMac() {
+}
+
+// AppCapturer interface.
+bool AppCapturerMac::GetAppList(AppList* apps) {
+  // handled by DesktopDeviceInfo
+  return true;
+}
+
+bool AppCapturerMac::SelectApp(ProcessId processId) {
+  process_id_ = processId;
+
+  return true;
+}
+
+bool AppCapturerMac::BringAppToFront() {
+  return true;
+}
+
+// DesktopCapturer interface.
+void AppCapturerMac::Start(Callback* callback) {
+  assert(!callback_);
+  assert(callback);
+
+  callback_ = callback;
+}
+
+void AppCapturerMac::Capture(const DesktopRegion& region) {
+  // Check that selected process exists
+  NSRunningApplication *ra = [NSRunningApplication runningApplicationWithProcessIdentifier:process_id_];
+  if (!ra) {
+    callback_->OnCaptureCompleted(NULL);
+    return;
+  }
+
+#if defined(__LP64__)
+#define CaptureWindowID int64_t
+#else
+#define CaptureWindowID CGWindowID
+#endif
+
+  CFArrayRef windowInfos = CGWindowListCopyWindowInfo(
+      kCGWindowListOptionOnScreenOnly | kCGWindowListExcludeDesktopElements,
+      kCGNullWindowID);
+  CFIndex windowInfosCount = CFArrayGetCount(windowInfos);
+  CaptureWindowID *captureWindowList = new CaptureWindowID[windowInfosCount];
+  CFIndex captureWindowListCount = 0;
+  for (CFIndex idx = 0; idx < windowInfosCount; idx++) {
+    CFDictionaryRef info = reinterpret_cast<CFDictionaryRef>(
+        CFArrayGetValueAtIndex(windowInfos, idx));
+    CFNumberRef winOwner = reinterpret_cast<CFNumberRef>(
+        CFDictionaryGetValue(info, kCGWindowOwnerPID));
+    CFNumberRef winId = reinterpret_cast<CFNumberRef>(
+        CFDictionaryGetValue(info, kCGWindowNumber));
+
+    pid_t owner;
+    CFNumberGetValue(winOwner, kCFNumberIntType, &owner);
+    if (owner != process_id_) {
+      continue;
+    }
+
+    CGWindowID ident;
+    CFNumberGetValue(winId, kCFNumberIntType, &ident);
+
+    captureWindowList[captureWindowListCount++] = ident;
+  }
+  CFRelease(windowInfos);
+
+  // Check that window list is not empty
+  if (captureWindowListCount <= 0) {
+    delete [] captureWindowList;
+    callback_->OnCaptureCompleted(NULL);
+    return;
+  }
+
+  // Does not support multi-display; See bug 1037997.
+  CGRect rectCapturedDisplay = CGDisplayBounds(CGMainDisplayID());
+
+  // Capture all windows of selected process, bounded by desktop.
+  CFArrayRef windowIDsArray = CFArrayCreate(kCFAllocatorDefault,
+                                            (const void**)captureWindowList,
+                                            captureWindowListCount,
+                                            NULL);
+  CGImageRef app_image = CGWindowListCreateImageFromArray(rectCapturedDisplay,
+                                                          windowIDsArray,
+                                                          kCGWindowImageDefault);
+  CFRelease (windowIDsArray);
+  delete [] captureWindowList;
+
+  // Wrap raw data into DesktopFrame
+  if (!app_image) {
+    CFRelease(app_image);
+    callback_->OnCaptureCompleted(NULL);
+    return;
+  }
+
+  int bits_per_pixel = CGImageGetBitsPerPixel(app_image);
+  if (bits_per_pixel != 32) {
+      LOG(LS_ERROR) << "Unsupported window image depth: " << bits_per_pixel;
+      CFRelease(app_image);
+      callback_->OnCaptureCompleted(NULL);
+      return;
+  }
+
+  int width = CGImageGetWidth(app_image);
+  int height = CGImageGetHeight(app_image);
+  DesktopFrame* frame = new BasicDesktopFrame(DesktopSize(width, height));
+
+  CGDataProviderRef provider = CGImageGetDataProvider(app_image);
+  CFDataRef cf_data = CGDataProviderCopyData(provider);
+  int src_stride = CGImageGetBytesPerRow(app_image);
+  const uint8_t* src_data = CFDataGetBytePtr(cf_data);
+  for (int y = 0; y < height; ++y) {
+    memcpy(frame->data() + frame->stride() * y, src_data + src_stride * y,
+            DesktopFrame::kBytesPerPixel * width);
+  }
+
+  CFRelease(cf_data);
+  CFRelease(app_image);
+
+  callback_->OnCaptureCompleted(frame);
+}
+
+}  // namespace
+
+// static
+AppCapturer* AppCapturer::Create(const DesktopCaptureOptions& options) {
+  return new AppCapturerMac();
+}
+
+}  // namespace webrtc
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_null.cc
@@ -0,0 +1,83 @@
+/*
+*  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+*
+*  Use of this source code is governed by a BSD-style license
+*  that can be found in the LICENSE file in the root of the source
+*  tree. An additional intellectual property rights grant can be found
+*  in the file PATENTS.  All contributing project authors may
+*  be found in the AUTHORS file in the root of the source tree.
+*/
+#include "webrtc/modules/desktop_capture/window_capturer.h"
+#include "webrtc/modules/desktop_capture/app_capturer.h"
+
+#include <assert.h>
+
+#include "webrtc/modules/desktop_capture/desktop_frame.h"
+
+namespace webrtc {
+
+namespace {
+
+class AppCapturerNull : public AppCapturer {
+public:
+  AppCapturerNull();
+  virtual ~AppCapturerNull();
+
+  // AppCapturer interface.
+  virtual bool GetAppList(AppList* apps) OVERRIDE;
+  virtual bool SelectApp(ProcessId id) OVERRIDE;
+  virtual bool BringAppToFront()	OVERRIDE;
+
+  // DesktopCapturer interface.
+  virtual void Start(Callback* callback) OVERRIDE;
+  virtual void Capture(const DesktopRegion& region) OVERRIDE;
+
+private:
+  Callback* callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(AppCapturerNull);
+};
+
+AppCapturerNull::AppCapturerNull()
+  : callback_(NULL) {
+}
+
+AppCapturerNull::~AppCapturerNull() {
+}
+
+bool AppCapturerNull::GetAppList(AppList* apps) {
+  // Not implemented yet: See Bug 1036653
+  return false;
+}
+
+bool AppCapturerNull::SelectApp(ProcessId id) {
+  // Not implemented yet: See Bug 1036653
+  return false;
+}
+
+bool AppCapturerNull::BringAppToFront() {
+  // Not implemented yet: See Bug 1036653
+  return false;
+}
+
+// DesktopCapturer interface.
+void AppCapturerNull::Start(Callback* callback) {
+  assert(!callback_);
+  assert(callback);
+
+  callback_ = callback;
+}
+
+void AppCapturerNull::Capture(const DesktopRegion& region) {
+  // Not implemented yet: See Bug 1036653
+  callback_->OnCaptureCompleted(NULL);
+}
+
+}  // namespace
+
+// static
+AppCapturer* AppCapturer::Create(const DesktopCaptureOptions& options) {
+  return new AppCapturerNull();
+}
+
+}  // namespace webrtc
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_unittest.cc
@@ -0,0 +1,89 @@
+/*
+*  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+*
+*  Use of this source code is governed by a BSD-style license
+*  that can be found in the LICENSE file in the root of the source
+*  tree. An additional intellectual property rights grant can be found
+*  in the file PATENTS.  All contributing project authors may
+*  be found in the AUTHORS file in the root of the source tree.
+*/
+#include "webrtc/modules/desktop_capture/app_capturer.h"
+
+#include "gtest/gtest.h"
+#include "webrtc/modules/desktop_capture/desktop_capture_options.h"
+#include "webrtc/modules/desktop_capture/desktop_frame.h"
+#include "webrtc/modules/desktop_capture/desktop_region.h"
+#include "webrtc/system_wrappers/interface/logging.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+
+class AppCapturerTest : public testing::Test,
+                        public DesktopCapturer::Callback {
+public:
+  void SetUp() OVERRIDE {
+    capturer_.reset(
+      AppCapturer::Create(DesktopCaptureOptions::CreateDefault())
+    );
+  }
+
+  void TearDown() OVERRIDE {
+  }
+
+  // DesktopCapturer::Callback interface
+  virtual SharedMemory* CreateSharedMemory(size_t size) OVERRIDE {
+    return NULL;
+  }
+
+  virtual void OnCaptureCompleted(DesktopFrame* frame) OVERRIDE {
+    frame_.reset(frame);
+  }
+
+protected:
+  scoped_ptr<AppCapturer> capturer_;
+  scoped_ptr<DesktopFrame> frame_;
+};
+
+// Verify that we can enumerate applications.
+TEST_F(AppCapturerTest, Enumerate) {
+  AppCapturer::AppList apps;
+  EXPECT_TRUE(capturer_->GetAppList(&apps));
+
+  // Verify that window titles are set.
+  for (AppCapturer::AppList::iterator it = apps.begin();
+       it != windows.end(); ++it) {
+    EXPECT_FALSE(it->title.empty());
+  }
+}
+
+// Verify we can capture a app.
+TEST_F(AppCapturerTest, Capture) {
+  AppCapturer::AppList apps;
+  capturer_->Start(this);
+  EXPECT_TRUE(capturer_->GetAppList(&apps));
+
+  // Verify that we can select and capture each app.
+  for (AppCapturer::AppList::iterator it = apps.begin();
+       it != apps.end(); ++it) {
+    frame_.reset();
+    if (capturer_->SelectApp(it->id)) {
+      capturer_->Capture(DesktopRegion());
+    }
+
+    // If we failed to capture a window make sure it no longer exists.
+    if (!frame_.get()) {
+      AppCapturer::AppList new_list;
+      EXPECT_TRUE(capturer_->GetAppList(&new_list));
+      for (AppCapturer::AppList::iterator new_list_it = apps.begin();
+           new_list_it != apps.end(); ++new_list_it) {
+        EXPECT_FALSE(it->id == new_list_it->id);
+      }
+      continue;
+    }
+
+    EXPECT_GT(frame_->size().width(), 0);
+    EXPECT_GT(frame_->size().height(), 0);
+  }
+}
+
+}  // namespace webrtc
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_win.cc
@@ -0,0 +1,420 @@
+/*
+*  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+*
+*  Use of this source code is governed by a BSD-style license
+*  that can be found in the LICENSE file in the root of the source
+*  tree. An additional intellectual property rights grant can be found
+*  in the file PATENTS.  All contributing project authors may
+*  be found in the AUTHORS file in the root of the source tree.
+*/
+
+#include "webrtc/modules/desktop_capture/window_capturer.h"
+#include "webrtc/modules/desktop_capture/app_capturer.h"
+#include "webrtc/modules/desktop_capture/screen_capturer.h"
+#include "webrtc/modules/desktop_capture/shared_desktop_frame.h"
+#include "webrtc/modules/desktop_capture/win/win_shared.h"
+
+#include <windows.h>
+#include <vector>
+#include <cassert>
+
+#include "webrtc/modules/desktop_capture/desktop_frame_win.h"
+#include "webrtc/system_wrappers/interface/logging.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+
+namespace {
+
+// Proxy over the WebRTC window capturer, to allow post-processing
+// of the frame to merge multiple window capture frames into a single frame
+class WindowsCapturerProxy : DesktopCapturer::Callback {
+public:
+  WindowsCapturerProxy() :
+      window_capturer_(WindowCapturer::Create()) {
+    window_capturer_->Start(this);
+  }
+  ~WindowsCapturerProxy(){}
+
+  void SelectWindow(HWND hwnd) { window_capturer_->SelectWindow(reinterpret_cast<WindowId>(hwnd)); }
+  scoped_ptr<DesktopFrame>& GetFrame() { return frame_; }
+  void Capture(const DesktopRegion& region) { window_capturer_->Capture(region); }
+
+  // Callback interface
+  virtual SharedMemory *CreateSharedMemory(size_t) OVERRIDE { return NULL; }
+  virtual void OnCaptureCompleted(DesktopFrame *frame) OVERRIDE { frame_.reset(frame); }
+private:
+  scoped_ptr<WindowCapturer> window_capturer_;
+  scoped_ptr<DesktopFrame> frame_;
+};
+
+// Proxy over the WebRTC screen capturer, to allow post-processing
+// of the frame to mask out non-application windows
+class ScreenCapturerProxy : DesktopCapturer::Callback {
+public:
+  ScreenCapturerProxy(const DesktopCaptureOptions& options) :
+      screen_capturer_(ScreenCapturer::Create(options)) {
+    screen_capturer_->SelectScreen(kFullDesktopScreenId);
+    screen_capturer_->Start(this);
+  }
+  void Capture(const DesktopRegion& region) { screen_capturer_->Capture(region); }
+  scoped_ptr<DesktopFrame>& GetFrame() { return frame_; }
+
+  // Callback interface
+  virtual SharedMemory *CreateSharedMemory(size_t) OVERRIDE { return NULL; }
+  virtual void OnCaptureCompleted(DesktopFrame *frame) OVERRIDE { frame_.reset(frame); }
+protected:
+  scoped_ptr<ScreenCapturer> screen_capturer_;
+  scoped_ptr<DesktopFrame> frame_;
+};
+
+class AppCapturerWin : public AppCapturer {
+public:
+  AppCapturerWin(const DesktopCaptureOptions& options);
+  virtual ~AppCapturerWin();
+
+  // AppCapturer interface.
+  virtual bool GetAppList(AppList* apps) OVERRIDE;
+  virtual bool SelectApp(ProcessId processId) OVERRIDE;
+  virtual bool BringAppToFront() OVERRIDE;
+
+  // DesktopCapturer interface.
+  virtual void Start(Callback* callback) OVERRIDE;
+  virtual void Capture(const DesktopRegion& region) OVERRIDE;
+
+  struct WindowItem {
+    HWND handle;
+    RECT bounds;
+    bool owned;
+  };
+
+  struct EnumWindowsCtx {
+    ProcessId process_id;
+    std::vector<WindowItem> windows;
+    bool list_all;
+  };
+
+  static BOOL CALLBACK EnumWindowsProc(HWND handle, LPARAM lParam);
+protected:
+  void CaptureByWebRTC(const DesktopRegion& region);
+  void CaptureBySample(const DesktopRegion& region);
+private:
+  Callback* callback_;
+
+  ProcessId processId_;
+
+  // Sample Mode
+  ScreenCapturerProxy screen_capturer_proxy_;
+  // Mask of foreground (non-app windows in front of selected)
+  HRGN hrgn_foreground_;
+  // Mask of background (desktop, non-app windows behind selected)
+  HRGN hrgn_background_;
+  // Region of selected windows
+  HRGN hrgn_visual_;
+
+  void UpdateRegions();
+
+  // WebRTC Window mode
+  WindowsCapturerProxy window_capturer_proxy_;
+
+  DISALLOW_COPY_AND_ASSIGN(AppCapturerWin);
+};
+
+AppCapturerWin::AppCapturerWin(const DesktopCaptureOptions& options)
+  : callback_(NULL),
+    screen_capturer_proxy_(options),
+    processId_(NULL) {
+  // Initialize regions to zero
+  hrgn_foreground_ = CreateRectRgn(0, 0, 0, 0);
+  hrgn_background_ = CreateRectRgn(0, 0, 0, 0);
+  hrgn_visual_ = CreateRectRgn(0, 0, 0, 0);
+}
+
+AppCapturerWin::~AppCapturerWin() {
+  if (hrgn_foreground_) {
+    DeleteObject(hrgn_foreground_);
+  }
+  if (hrgn_background_) {
+    DeleteObject(hrgn_background_);
+  }
+  if (hrgn_visual_) {
+    DeleteObject(hrgn_visual_);
+  }
+}
+
+// AppCapturer interface.
+bool AppCapturerWin::GetAppList(AppList* apps){
+  // Implemented via DesktopDeviceInfo
+  return true;
+}
+
+bool AppCapturerWin::SelectApp(ProcessId processId) {
+  processId_ = processId;
+  return true;
+}
+
+bool AppCapturerWin::BringAppToFront() {
+  // Not implemented yet: See Bug 1036653
+  return true;
+}
+
+// DesktopCapturer interface.
+void AppCapturerWin::Start(Callback* callback) {
+  assert(!callback_);
+  assert(callback);
+
+  callback_ = callback;
+}
+void AppCapturerWin::Capture(const DesktopRegion& region) {
+  assert(IsGUIThread(false));
+  CaptureBySample(region);
+}
+
+BOOL CALLBACK AppCapturerWin::EnumWindowsProc(HWND handle, LPARAM lParam) {
+  EnumWindowsCtx *pEnumWindowsCtx = reinterpret_cast<EnumWindowsCtx *>(lParam);
+  if (!pEnumWindowsCtx) {
+    return FALSE;
+  }
+
+  DWORD procId = -1;
+  GetWindowThreadProcessId(handle, &procId);
+  if (procId == pEnumWindowsCtx->process_id || pEnumWindowsCtx->list_all) {
+    WindowItem window_item;
+    window_item.handle = handle;
+
+    if (!IsWindowVisible(handle) || IsIconic(handle)) {
+      return TRUE;
+    }
+
+    GetWindowRect(handle, &window_item.bounds);
+    window_item.owned = (procId == pEnumWindowsCtx->process_id);
+    pEnumWindowsCtx->windows.push_back(window_item);
+  }
+
+  return TRUE;
+}
+
+void AppCapturerWin::CaptureByWebRTC(const DesktopRegion& region) {
+  assert(IsGUIThread(false));
+  // List Windows of selected application
+  EnumWindowsCtx lParamEnumWindows;
+  lParamEnumWindows.process_id = processId_;
+  lParamEnumWindows.list_all = false;
+  EnumWindows(EnumWindowsProc, (LPARAM)&lParamEnumWindows);
+
+  // Prepare capture dc context
+  // TODO: handle multi-monitor setups; see Bug 1037997
+  DesktopRect rcDesktop(DesktopRect::MakeXYWH(
+      GetSystemMetrics(SM_XVIRTUALSCREEN),
+      GetSystemMetrics(SM_YVIRTUALSCREEN),
+      GetSystemMetrics(SM_CXVIRTUALSCREEN),
+      GetSystemMetrics(SM_CYVIRTUALSCREEN)
+  ));
+
+  HDC dcScreen = GetDC(NULL);
+  HDC memDcCapture = CreateCompatibleDC(dcScreen);
+  if (dcScreen) {
+    ReleaseDC(NULL, dcScreen);
+  }
+
+  scoped_ptr<DesktopFrameWin> frameCapture(DesktopFrameWin::Create(
+      DesktopSize(rcDesktop.width(), rcDesktop.height()),
+      NULL, memDcCapture));
+  HBITMAP bmpOrigin = static_cast<HBITMAP>(SelectObject(memDcCapture, frameCapture->bitmap()));
+  BOOL bCaptureAppResult = false;
+  // Capture and Combine all windows into memDcCapture
+  std::vector<WindowItem>::reverse_iterator itItem;
+  for (itItem = lParamEnumWindows.windows.rbegin(); itItem != lParamEnumWindows.windows.rend(); itItem++) {
+    WindowItem window_item = *itItem;
+    HWND hWndCapturer = window_item.handle;
+    if (!IsWindow(hWndCapturer) || !IsWindowVisible(hWndCapturer) || IsIconic(hWndCapturer)) {
+      continue;
+    }
+
+    HDC memDcWin = NULL;
+    HBITMAP bmpOriginWin = NULL;
+    HBITMAP hBitmapFrame = NULL;
+    HDC dcWin = NULL;
+    RECT rcWin = window_item.bounds;
+    bool bCaptureResult = false;
+    scoped_ptr<DesktopFrameWin> frame;
+    do {
+      if (rcWin.left == rcWin.right || rcWin.top == rcWin.bottom) {
+        break;
+      }
+
+      dcWin = GetWindowDC(hWndCapturer);
+      if (!dcWin) {
+        break;
+      }
+      memDcWin = CreateCompatibleDC(dcWin);
+
+      // Capture
+      window_capturer_proxy_.SelectWindow(hWndCapturer);
+      window_capturer_proxy_.Capture(region);
+      if (window_capturer_proxy_.GetFrame() != NULL) {
+        DesktopFrameWin *pDesktopFrameWin = reinterpret_cast<DesktopFrameWin *>(
+            window_capturer_proxy_.GetFrame().get());
+        if (pDesktopFrameWin) {
+          hBitmapFrame = pDesktopFrameWin->bitmap();
+        }
+        if (GetObjectType(hBitmapFrame) != OBJ_BITMAP) {
+          hBitmapFrame = NULL;
+        }
+      }
+      if (!hBitmapFrame) {
+        break;
+      }
+      bmpOriginWin = static_cast<HBITMAP>(SelectObject(memDcWin, hBitmapFrame));
+    } while(0);
+
+    // bitblt to capture memDcCapture
+    if (bmpOriginWin) {
+      BitBlt(memDcCapture,
+          rcWin.left, rcWin.top, rcWin.right - rcWin.left, rcWin.bottom - rcWin.top,
+          memDcWin, 0, 0, SRCCOPY);
+      bCaptureAppResult = true;
+    }
+
+    // Clean resource
+    if (memDcWin) {
+      SelectObject(memDcWin, bmpOriginWin);
+      DeleteDC(memDcWin);
+    }
+    if (dcWin) {
+      ReleaseDC(hWndCapturer, dcWin);
+    }
+  }
+
+  // Clean resource
+  if (memDcCapture) {
+    SelectObject(memDcCapture, bmpOrigin);
+    DeleteDC(memDcCapture);
+  }
+
+  // trigger event
+  if (bCaptureAppResult) {
+    callback_->OnCaptureCompleted(frameCapture.release());
+  }
+}
+
+// Application Capturer by sample and region
+void AppCapturerWin::CaptureBySample(const DesktopRegion& region){
+  assert(IsGUIThread(false));
+  // capture entire screen
+  screen_capturer_proxy_.Capture(region);
+
+  HBITMAP hBitmapFrame = NULL;
+  if (screen_capturer_proxy_.GetFrame() != NULL) {
+    SharedDesktopFrame* pSharedDesktopFrame = reinterpret_cast<SharedDesktopFrame*>(
+        screen_capturer_proxy_.GetFrame().get());
+    if (pSharedDesktopFrame) {
+      DesktopFrameWin *pDesktopFrameWin =reinterpret_cast<DesktopFrameWin *>(
+          pSharedDesktopFrame->GetUnderlyingFrame());
+      if (pDesktopFrameWin) {
+        hBitmapFrame = pDesktopFrameWin->bitmap();
+      }
+      if (GetObjectType(hBitmapFrame) != OBJ_BITMAP) {
+        hBitmapFrame = NULL;
+      }
+    }
+  }
+  if (hBitmapFrame) {
+    // calculate app visual/foreground region
+    UpdateRegions();
+
+    HDC dcScreen = GetDC(NULL);
+    HDC memDcCapture = CreateCompatibleDC(dcScreen);
+
+    RECT rcScreen = {0, 0,
+        screen_capturer_proxy_.GetFrame()->size().width(),
+        screen_capturer_proxy_.GetFrame()->size().height()
+    };
+
+    HBITMAP bmpOriginCapture = (HBITMAP)SelectObject(memDcCapture, hBitmapFrame);
+
+    // TODO: background/foreground mask colors should be configurable; see Bug 1054503
+    // fill background
+    SelectClipRgn(memDcCapture, hrgn_background_);
+    SelectObject(memDcCapture, GetStockObject(DC_BRUSH));
+    SetDCBrushColor(memDcCapture, RGB(0, 0, 0));
+    FillRect(memDcCapture, &rcScreen, (HBRUSH)GetStockObject(DC_BRUSH));
+
+    // fill foreground
+    SelectClipRgn(memDcCapture, hrgn_foreground_);
+    SelectObject(memDcCapture, GetStockObject(DC_BRUSH));
+    SetDCBrushColor(memDcCapture, RGB(0xff, 0xff, 0));
+    FillRect(memDcCapture, &rcScreen, (HBRUSH)GetStockObject(DC_BRUSH));
+
+    if (dcScreen) {
+      ReleaseDC(NULL, dcScreen);
+    }
+    SelectObject(memDcCapture, bmpOriginCapture);
+    DeleteDC(memDcCapture);
+  }
+
+  // trigger event
+  if (callback_) {
+    callback_->OnCaptureCompleted(screen_capturer_proxy_.GetFrame().release());
+  }
+}
+
+void AppCapturerWin::UpdateRegions() {
+  assert(IsGUIThread(false));
+  // List Windows of selected application
+  EnumWindowsCtx lParamEnumWindows;
+  lParamEnumWindows.process_id = processId_;
+  lParamEnumWindows.list_all = true;
+  EnumWindows(EnumWindowsProc, (LPARAM)&lParamEnumWindows);
+
+  SetRectRgn(hrgn_foreground_, 0, 0, 0, 0);
+  SetRectRgn(hrgn_visual_, 0, 0, 0, 0);
+  SetRectRgn(hrgn_background_, 0, 0, 0, 0);
+
+  HRGN hrgn_screen_ = CreateRectRgn(0, 0,
+      GetSystemMetrics(SM_CXVIRTUALSCREEN),
+      GetSystemMetrics(SM_CYVIRTUALSCREEN));
+
+  HRGN hrgn_window = CreateRectRgn(0, 0, 0, 0);
+  HRGN hrgn_internsect = CreateRectRgn(0, 0, 0, 0);
+  std::vector<WindowItem>::reverse_iterator itItem;
+  for (itItem = lParamEnumWindows.windows.rbegin(); itItem != lParamEnumWindows.windows.rend(); itItem++) {
+    WindowItem window_item = *itItem;
+    SetRectRgn(hrgn_window, 0, 0, 0, 0);
+    if (GetWindowRgn(window_item.handle, hrgn_window) == ERROR) {
+      SetRectRgn(hrgn_window, window_item.bounds.left,
+                 window_item.bounds.top,
+                 window_item.bounds.right,
+                 window_item.bounds.bottom);
+    }
+
+    if (window_item.owned) {
+      CombineRgn(hrgn_visual_, hrgn_visual_, hrgn_window, RGN_OR);
+      CombineRgn(hrgn_foreground_, hrgn_foreground_, hrgn_window, RGN_DIFF);
+    } else {
+      SetRectRgn(hrgn_internsect, 0, 0, 0, 0);
+      CombineRgn(hrgn_internsect, hrgn_visual_, hrgn_window, RGN_AND);
+
+      CombineRgn(hrgn_visual_, hrgn_visual_, hrgn_internsect, RGN_DIFF);
+
+      CombineRgn(hrgn_foreground_, hrgn_foreground_, hrgn_internsect, RGN_OR);
+    }
+  }
+  CombineRgn(hrgn_background_, hrgn_screen_, hrgn_visual_, RGN_DIFF);
+
+  if (hrgn_window) {
+    DeleteObject(hrgn_window);
+  }
+  if (hrgn_internsect) {
+    DeleteObject(hrgn_internsect);
+  }
+}
+
+}  // namespace
+
+// static
+AppCapturer* AppCapturer::Create(const DesktopCaptureOptions& options) {
+  return new AppCapturerWin(options);
+}
+
+}  // namespace webrtc
new file mode 100755
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_x11.cc
@@ -0,0 +1,364 @@
+/*
+*  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+*
+*  Use of this source code is governed by a BSD-style license
+*  that can be found in the LICENSE file in the root of the source
+*  tree. An additional intellectual property rights grant can be found
+*  in the file PATENTS.  All contributing project authors may
+*  be found in the AUTHORS file in the root of the source tree.
+*/
+#include "webrtc/modules/desktop_capture/window_capturer.h"
+#include "webrtc/modules/desktop_capture/app_capturer.h"
+#include "webrtc/modules/desktop_capture/screen_capturer.h"
+#include "webrtc/modules/desktop_capture/shared_desktop_frame.h"
+#include "webrtc/modules/desktop_capture/x11/shared_x_util.h"
+
+#include <assert.h>
+#include <string.h>
+#include <X11/Xatom.h>
+#include <X11/extensions/Xcomposite.h>
+#include <X11/extensions/Xrender.h>
+#include <X11/Xutil.h>
+#include <X11/Xregion.h>
+
+#include <algorithm>
+
+#include "webrtc/modules/desktop_capture/desktop_capture_options.h"
+#include "webrtc/modules/desktop_capture/desktop_frame.h"
+#include "webrtc/modules/desktop_capture/x11/shared_x_display.h"
+#include "webrtc/modules/desktop_capture/x11/x_error_trap.h"
+#include "webrtc/modules/desktop_capture/x11/x_server_pixel_buffer.h"
+#include "webrtc/system_wrappers/interface/logging.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/system_wrappers/interface/scoped_refptr.h"
+
+namespace webrtc {
+
+namespace {
+
+class WindowsCapturerProxy : DesktopCapturer::Callback {
+public:
+  WindowsCapturerProxy() : window_capturer_(WindowCapturer::Create()) {
+    window_capturer_->Start(this);
+  }
+  ~WindowsCapturerProxy() {}
+  void SelectWindow(WindowId windowId) { window_capturer_->SelectWindow(windowId); }
+  scoped_ptr<DesktopFrame>& GetFrame() { return frame_; }
+  void Capture(const DesktopRegion& region) { window_capturer_->Capture(region); }
+
+  // Callback interface
+  virtual SharedMemory *CreateSharedMemory(size_t) OVERRIDE { return NULL; }
+  virtual void OnCaptureCompleted(DesktopFrame *frame) OVERRIDE { frame_.reset(frame); }
+
+private:
+  scoped_ptr<WindowCapturer> window_capturer_;
+  scoped_ptr<DesktopFrame> frame_;
+};
+
+class ScreenCapturerProxy : DesktopCapturer::Callback {
+public:
+  ScreenCapturerProxy() : screen_capturer_(ScreenCapturer::Create()) {
+    screen_capturer_->SelectScreen(kFullDesktopScreenId);
+    screen_capturer_->Start(this);
+  }
+  void Capture(const DesktopRegion& region) { screen_capturer_->Capture(region); }
+  scoped_ptr<DesktopFrame>& GetFrame() { return frame_; }
+
+  // Callback interface
+  virtual SharedMemory *CreateSharedMemory(size_t) OVERRIDE { return NULL; }
+  virtual void OnCaptureCompleted(DesktopFrame *frame) OVERRIDE { frame_.reset(frame); }
+protected:
+  scoped_ptr<ScreenCapturer> screen_capturer_;
+  scoped_ptr<DesktopFrame> frame_;
+};
+
+class AppCapturerLinux : public AppCapturer {
+public:
+  AppCapturerLinux(const DesktopCaptureOptions& options);
+  virtual ~AppCapturerLinux();
+
+  // AppCapturer interface.
+  virtual bool GetAppList(AppList* apps) OVERRIDE;
+  virtual bool SelectApp(ProcessId processId) OVERRIDE;
+  virtual bool BringAppToFront() OVERRIDE;
+
+  // DesktopCapturer interface.
+  virtual void Start(Callback* callback) OVERRIDE;
+  virtual void Capture(const DesktopRegion& region) OVERRIDE;
+
+protected:
+  Display* GetDisplay() { return x_display_->display(); }
+  bool UpdateRegions();
+
+  void CaptureWebRTC(const DesktopRegion& region);
+  void CaptureSample(const DesktopRegion& region);
+
+  void FillDesktopFrameRegionWithColor(DesktopFrame* pDesktopFrame,Region rgn, uint32_t color);
+private:
+  Callback* callback_;
+  ProcessId selected_process_;
+
+  // Sample Mode
+  ScreenCapturerProxy screen_capturer_proxy_;
+  // Mask of foreground (non-app windows in front of selected)
+  Region rgn_mask_;
+  // Region of selected windows
+  Region rgn_visual_;
+  // Mask of background (desktop, non-app windows behind selected)
+  Region rgn_background_;
+
+  // WebRtc Window mode
+  WindowsCapturerProxy window_capturer_proxy_;
+
+  scoped_refptr<SharedXDisplay> x_display_;
+  DISALLOW_COPY_AND_ASSIGN(AppCapturerLinux);
+};
+
+AppCapturerLinux::AppCapturerLinux(const DesktopCaptureOptions& options)
+    : callback_(NULL),
+      selected_process_(0),
+      x_display_(options.x_display()) {
+  rgn_mask_ = XCreateRegion();
+  rgn_visual_ = XCreateRegion();
+  rgn_background_ = XCreateRegion();
+}
+
+AppCapturerLinux::~AppCapturerLinux() {
+  if (rgn_mask_) {
+    XDestroyRegion(rgn_mask_);
+  }
+  if (rgn_visual_) {
+    XDestroyRegion(rgn_visual_);
+  }
+  if (rgn_background_) {
+    XDestroyRegion(rgn_background_);
+  }
+}
+
+// AppCapturer interface.
+bool AppCapturerLinux::GetAppList(AppList* apps) {
+  // Implemented in DesktopDeviceInfo
+  return true;
+}
+bool AppCapturerLinux::SelectApp(ProcessId processId) {
+  selected_process_ = processId;
+  return true;
+}
+bool AppCapturerLinux::BringAppToFront() {
+  // Not implemented yet: See Bug 1036653
+  return true;
+}
+
+// DesktopCapturer interface.
+void AppCapturerLinux::Start(Callback* callback) {
+  assert(!callback_);
+  assert(callback);
+
+  callback_ = callback;
+}
+
+void AppCapturerLinux::Capture(const DesktopRegion& region) {
+  CaptureSample(region);
+}
+
+void AppCapturerLinux::CaptureWebRTC(const DesktopRegion& region) {
+  XErrorTrap error_trap(GetDisplay());
+
+  int nScreenWidth = DisplayWidth(GetDisplay(), DefaultScreen(GetDisplay()));
+  int nScreenHeight = DisplayHeight(GetDisplay(), DefaultScreen(GetDisplay()));
+  scoped_ptr<DesktopFrame> frame(new BasicDesktopFrame(DesktopSize(nScreenWidth, nScreenHeight)));
+
+  WindowUtilX11 window_util_x11(x_display_);
+
+  ::Window root_window = XRootWindow(GetDisplay(), DefaultScreen(GetDisplay()));
+  ::Window parent;
+  ::Window root_return;
+  ::Window *children;
+  unsigned int num_children;
+  int status = XQueryTree(GetDisplay(), root_window, &root_return, &parent,
+      &children, &num_children);
+  if (status == 0) {
+    LOG(LS_ERROR) << "Failed to query for child windows for screen "
+        << DefaultScreen(GetDisplay());
+    return;
+  }
+
+  for (unsigned int i = 0; i < num_children; ++i) {
+    ::Window app_window = window_util_x11.GetApplicationWindow(children[i]);
+    if (!app_window) {
+      continue;
+    }
+
+    unsigned int processId = window_util_x11.GetWindowProcessID(app_window);
+    if(processId != 0 && processId == selected_process_) {
+      // capture
+      window_capturer_proxy_.SelectWindow(app_window);
+      window_capturer_proxy_.Capture(region);
+      DesktopFrame* frameWin = window_capturer_proxy_.GetFrame().get();
+      if (frameWin == NULL) {
+        continue;
+      }
+
+      XRectangle  win_rect;
+      window_util_x11.GetWindowRect(app_window,win_rect, false);
+      if (win_rect.width <= 0 || win_rect.height <= 0) {
+        continue;
+      }
+
+      DesktopSize winFrameSize = frameWin->size();
+      DesktopRect target_rect = DesktopRect::MakeXYWH(win_rect.x,
+                                                      win_rect.y,
+                                                      winFrameSize.width(),
+                                                      winFrameSize.height());
+
+      // bitblt into background frame
+      frame->CopyPixelsFrom(*frameWin, DesktopVector(0, 0), target_rect);
+    }
+  }
+
+  if (children) {
+    XFree(children);
+  }
+
+  // trigger event
+  if (callback_) {
+    callback_->OnCaptureCompleted(error_trap.GetLastErrorAndDisable() != 0 ?
+                                  NULL :
+                                  frame.release());
+  }
+}
+
+void AppCapturerLinux::CaptureSample(const DesktopRegion& region) {
+  XErrorTrap error_trap(GetDisplay());
+
+  //Capture screen >> set root window as capture window
+  screen_capturer_proxy_.Capture(region);
+  DesktopFrame* frame = screen_capturer_proxy_.GetFrame().get();
+  if (frame) {
+    // calculate app visual/foreground region
+    UpdateRegions();
+
+    // TODO: background/foreground mask colors should be configurable; see Bug 1054503
+    // fill background with black
+    FillDesktopFrameRegionWithColor(frame, rgn_background_, 0xFF000000);
+
+    // fill foreground with yellow
+    FillDesktopFrameRegionWithColor(frame, rgn_mask_, 0xFFFFFF00);
+ }
+
+  // trigger event
+  if (callback_) {
+    callback_->OnCaptureCompleted(error_trap.GetLastErrorAndDisable() != 0 ?
+                                  NULL :
+                                  screen_capturer_proxy_.GetFrame().release());
+  }
+}
+
+void AppCapturerLinux::FillDesktopFrameRegionWithColor(DesktopFrame* pDesktopFrame, Region rgn, uint32_t color) {
+  XErrorTrap error_trap(GetDisplay());
+
+  if (!pDesktopFrame) {
+    return;
+  }
+  if (XEmptyRegion(rgn)) {
+    return;
+  }
+
+  REGION * st_rgn = (REGION *)rgn;
+  if(st_rgn && st_rgn->numRects > 0) {
+    for (short i = 0; i < st_rgn->numRects; i++) {
+      for (short j = st_rgn->rects[i].y1; j < st_rgn->rects[i].y2; j++) {
+        uint32_t* dst_pos = reinterpret_cast<uint32_t*>(pDesktopFrame->data() + pDesktopFrame->stride() * j);
+        for (short k = st_rgn->rects[i].x1; k < st_rgn->rects[i].x2; k++) {
+          dst_pos[k] = color;
+        }
+      }
+    }
+  }
+}
+
+bool AppCapturerLinux::UpdateRegions() {
+  XErrorTrap error_trap(GetDisplay());
+
+  XSubtractRegion(rgn_visual_, rgn_visual_, rgn_visual_);
+  XSubtractRegion(rgn_mask_, rgn_mask_, rgn_mask_);
+  WindowUtilX11 window_util_x11(x_display_);
+  int num_screens = XScreenCount(GetDisplay());
+  for (int screen = 0; screen < num_screens; ++screen) {
+    int nScreenCX = DisplayWidth(GetDisplay(), screen);
+    int nScreenCY = DisplayHeight(GetDisplay(), screen);
+
+    XRectangle  screen_rect;
+    screen_rect.x = 0;
+    screen_rect.y = 0;
+    screen_rect.width = nScreenCX;
+    screen_rect.height = nScreenCY;
+
+    XUnionRectWithRegion(&screen_rect, rgn_background_, rgn_background_);
+    XXorRegion(rgn_mask_, rgn_mask_, rgn_mask_);
+    XXorRegion(rgn_visual_, rgn_visual_, rgn_visual_);
+
+    ::Window root_window = XRootWindow(GetDisplay(), screen);
+    ::Window parent;
+    ::Window root_return;
+    ::Window *children;
+    unsigned int num_children;
+    int status = XQueryTree(GetDisplay(), root_window, &root_return, &parent, &children, &num_children);
+    if (status == 0) {
+      LOG(LS_ERROR) << "Failed to query for child windows for screen " << screen;
+      continue;
+    }
+    for (unsigned int i = 0; i < num_children; ++i) {
+      ::Window app_window = window_util_x11.GetApplicationWindow(children[i]);
+      if (!app_window) {
+        continue;
+      }
+
+      // Get window region
+      XRectangle  win_rect;
+      window_util_x11.GetWindowRect(app_window, win_rect, true);
+      if (win_rect.width <= 0 || win_rect.height <= 0) {
+        continue;
+      }
+
+      Region win_rgn = XCreateRegion();
+      XUnionRectWithRegion(&win_rect, win_rgn, win_rgn);
+      // update rgn_visual_ , rgn_mask_,
+      unsigned int processId = window_util_x11.GetWindowProcessID(app_window);
+      if (processId != 0 && processId == selected_process_) {
+        XUnionRegion(rgn_visual_, win_rgn, rgn_visual_);
+        XSubtractRegion(rgn_mask_, win_rgn, rgn_mask_);
+      } else {
+        Region win_rgn_intersect = XCreateRegion();
+        XIntersectRegion(rgn_visual_, win_rgn, win_rgn_intersect);
+
+        XSubtractRegion(rgn_visual_, win_rgn_intersect, rgn_visual_);
+        XUnionRegion(win_rgn_intersect, rgn_mask_, rgn_mask_);
+
+        if (win_rgn_intersect) {
+          XDestroyRegion(win_rgn_intersect);
+        }
+      }
+      if (win_rgn) {
+        XDestroyRegion(win_rgn);
+      }
+    }
+
+    if (children) {
+      XFree(children);
+    }
+  }
+
+  XSubtractRegion(rgn_background_, rgn_visual_, rgn_background_);
+
+  return true;
+}
+
+}  // namespace
+
+// static
+AppCapturer* AppCapturer::Create(const DesktopCaptureOptions& options) {
+  return new AppCapturerLinux(options);
+}
+
+}  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_capture.gypi
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_capture.gypi
@@ -2,134 +2,190 @@
 #
 # Use of this source code is governed by a BSD-style license
 # that can be found in the LICENSE file in the root of the source
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
+  'variables': {
+    'multi_monitor_screenshare%' : 0,
+  },
   'targets': [
     {
       'target_name': 'desktop_capture',
       'type': 'static_library',
       'dependencies': [
         '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
         '<(webrtc_root)/base/base.gyp:rtc_base',
       ],
       'sources': [
         "desktop_and_cursor_composer.cc",
         "desktop_and_cursor_composer.h",
         "desktop_capture_types.h",
         "desktop_capturer.h",
         "desktop_frame.cc",
         "desktop_frame.h",
-        "desktop_frame_win.cc",
-        "desktop_frame_win.h",
         "desktop_geometry.cc",
         "desktop_geometry.h",
         "desktop_capture_options.h",
         "desktop_capture_options.cc",
         "desktop_capturer.h",
         "desktop_region.cc",
         "desktop_region.h",
         "differ.cc",
         "differ.h",
         "differ_block.cc",
         "differ_block.h",
-        "mac/desktop_configuration.h",
-        "mac/desktop_configuration.mm",
-        "mac/desktop_configuration_monitor.h",
-        "mac/desktop_configuration_monitor.cc",
-        "mac/full_screen_chrome_window_detector.cc",
-        "mac/full_screen_chrome_window_detector.h",
-        "mac/scoped_pixel_buffer_object.cc",
-        "mac/scoped_pixel_buffer_object.h",
-        "mac/window_list_utils.cc",
-        "mac/window_list_utils.h",
+#        "mac/full_screen_chrome_window_detector.cc",
+#        "mac/full_screen_chrome_window_detector.h",
+#        "mac/window_list_utils.cc",
+#        "mac/window_list_utils.h",
         "mouse_cursor.cc",
         "mouse_cursor.h",
         "mouse_cursor_monitor.h",
-        "mouse_cursor_monitor_mac.mm",
-        "mouse_cursor_monitor_win.cc",
-        "mouse_cursor_monitor_x11.cc",
         "screen_capture_frame_queue.cc",
         "screen_capture_frame_queue.h",
         "screen_capturer.cc",
         "screen_capturer.h",
         "screen_capturer_helper.cc",
         "screen_capturer_helper.h",
-        "screen_capturer_mac.mm",
-        "screen_capturer_win.cc",
-        "screen_capturer_x11.cc",
         "shared_desktop_frame.cc",
         "shared_desktop_frame