Bug 1198458: Rollup of changes previously applied to media/webrtc/trunk/webrtc and fixes to those rs=jesup r=froyd,jib,bwc,jesup,gcp,sotaro,pkerr,pehrsons
authorRandell Jesup <rjesup@jesup.org>
Wed, 18 Nov 2015 15:03:25 -0500
changeset 309817 ae32ad44ce1c048258a881dbaa7727def08c53a4
parent 309816 f62c9e49a44fb473b5c8701776d8f5ae3da22cbc
child 309818 d02f5e8ca0dc36845881530734049a064904c002
push id7649
push usermartin.thomson@gmail.com
push dateThu, 19 Nov 2015 00:06:17 +0000
reviewersjesup, froyd, jib, bwc, jesup, gcp, sotaro, pkerr, pehrsons
bugs1198458
milestone45.0a1
Bug 1198458: Rollup of changes previously applied to media/webrtc/trunk/webrtc and fixes to those rs=jesup r=froyd,jib,bwc,jesup,gcp,sotaro,pkerr,pehrsons Landing as one rolled-up patch to avoid breaking regression tests, and in keeping with previous WebRTC imports. Broken out parts that needed review are on the bug.
build/clang-plugin/clang-plugin.cpp
build/gyp.mozbuild
dom/media/moz.build
dom/media/omx/OMXCodecWrapper.cpp
dom/media/omx/OMXCodecWrapper.h
dom/media/systemservices/CamerasChild.cpp
dom/media/systemservices/CamerasChild.h
dom/media/systemservices/CamerasParent.cpp
dom/media/systemservices/CamerasParent.h
dom/media/systemservices/LoadManager.cpp
dom/media/systemservices/MediaUtils.h
dom/media/systemservices/PCameras.ipdl
dom/media/systemservices/moz.build
dom/media/tests/mochitest/mochitest.ini
dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
dom/media/webrtc/MediaEngineRemoteVideoSource.h
dom/media/webrtc/MediaEngineWebRTC.cpp
dom/media/webrtc/moz.build
ipc/glue/moz.build
media/mtransport/nr_socket_prsock.cpp
media/webrtc/moz.build
media/webrtc/signaling/signaling.gyp
media/webrtc/signaling/src/common/NullTransport.h
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.h
media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
media/webrtc/signaling/src/media-conduit/VideoConduit.h
media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp
media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.h
media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp
media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h
media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.cpp
media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.h
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
media/webrtc/signaling/test/mediaconduit_unittests.cpp
media/webrtc/signaling/test/mediapipeline_unittest.cpp
media/webrtc/trunk/build/android/cpufeatures.gypi
media/webrtc/trunk/peerconnection.gyp
media/webrtc/trunk/webrtc/base/base.gyp
media/webrtc/trunk/webrtc/base/base64.cc
media/webrtc/trunk/webrtc/base/checks.cc
media/webrtc/trunk/webrtc/base/macutils.cc
media/webrtc/trunk/webrtc/base/macutils.h
media/webrtc/trunk/webrtc/base/scoped_ptr.h
media/webrtc/trunk/webrtc/base/sigslot.h
media/webrtc/trunk/webrtc/base/stringutils.h
media/webrtc/trunk/webrtc/base/thread_checker_impl.cc
media/webrtc/trunk/webrtc/build/arm_neon.gypi
media/webrtc/trunk/webrtc/build/common.gypi
media/webrtc/trunk/webrtc/build/merge_libs.gyp
media/webrtc/trunk/webrtc/common_audio/audio_ring_buffer.h
media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
media/webrtc/trunk/webrtc/common_audio/wav_file.cc
media/webrtc/trunk/webrtc/common_audio/wav_header.cc
media/webrtc/trunk/webrtc/common_types.h
media/webrtc/trunk/webrtc/common_video/libyuv/webrtc_libyuv.cc
media/webrtc/trunk/webrtc/common_video/plane.cc
media/webrtc/trunk/webrtc/engine_configurations.h
media/webrtc/trunk/webrtc/modules/audio_coding/audio_coding.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/codecs/audio_decoder.h
media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq.gypi
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_template.h
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_utility_android.h
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager.h
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.h
media/webrtc/trunk/webrtc/modules/audio_device/android/single_rw_fifo.cc
media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.h
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
media/webrtc/trunk/webrtc/modules/audio_device/gonk/audio_manager.cc
media/webrtc/trunk/webrtc/modules/audio_device/gonk/audio_manager.h
media/webrtc/trunk/webrtc/modules/audio_device/include/audio_device.h
media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/mac/audio_device_mac.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.h
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.cc
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.h
media/webrtc/trunk/webrtc/modules/audio_device/sndio/audio_device_sndio.cc
media/webrtc/trunk/webrtc/modules/audio_device/sndio/audio_device_sndio.h
media/webrtc/trunk/webrtc/modules/audio_device/sndio/audio_device_utility_sndio.cc
media/webrtc/trunk/webrtc/modules/audio_device/sndio/audio_device_utility_sndio.h
media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_internal.h
media/webrtc/trunk/webrtc/modules/audio_processing/aec/echo_cancellation.c
media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
media/webrtc/trunk/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.h
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_mac.mm
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_null.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_unittest.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_and_cursor_composer.h
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_capture.gypi
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_capture_types.h
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info.h
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info_null.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/differ_block.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/mac/desktop_device_info_mac.h
media/webrtc/trunk/webrtc/modules/desktop_capture/mac/desktop_device_info_mac.mm
media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/desktop_device_info_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/desktop_device_info_win.h
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capture_utils.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h
media/webrtc/trunk/webrtc/modules/desktop_capture/win/win_shared.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/win_shared.h
media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/desktop_device_info_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/desktop_device_info_x11.h
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/shared_x_util.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/shared_x_util.h
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/x_error_trap.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/x_error_trap.h
media/webrtc/trunk/webrtc/modules/interface/module_common_types.h
media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
media/webrtc/trunk/webrtc/modules/utility/source/process_thread_impl.cc
media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/CaptureCapabilityAndroid.java
media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java
media/webrtc/trunk/webrtc/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.cc
media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.h
media/webrtc/trunk/webrtc/modules/video_capture/include/video_capture.h
media/webrtc/trunk/webrtc/modules/video_capture/include/video_capture_factory.h
media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_objc.mm
media/webrtc/trunk/webrtc/modules/video_capture/video_capture.gypi
media/webrtc/trunk/webrtc/modules/video_capture/video_capture_impl.cc
media/webrtc/trunk/webrtc/modules/video_coding/codecs/h264/include/h264.h
media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/interface/video_coding.h
media/webrtc/trunk/webrtc/modules/video_coding/main/interface/video_coding_defines.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/codec_database.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/content_metrics_processing.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/content_metrics_processing.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/decoding_state.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/encoded_frame.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_decoder.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_estimator.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/media_optimization.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/media_optimization.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/packet.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/qm_select.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/qm_select.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/receiver.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/receiver.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_coding_impl.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_coding_impl.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_receiver.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_sender.cc
media/webrtc/trunk/webrtc/modules/video_processing/main/source/content_analysis.cc
media/webrtc/trunk/webrtc/modules/video_processing/main/source/content_analysis.h
media/webrtc/trunk/webrtc/modules/video_processing/main/source/content_analysis_sse2.cc
media/webrtc/trunk/webrtc/modules/video_processing/video_processing.gypi
media/webrtc/trunk/webrtc/modules/video_render/android/java/src/org/webrtc/videoengine/ViEAndroidGLES20.java
media/webrtc/trunk/webrtc/modules/video_render/android/java/src/org/webrtc/videoengine/ViERenderer.java
media/webrtc/trunk/webrtc/modules/video_render/android/java/src/org/webrtc/videoengine/ViESurfaceRenderer.java
media/webrtc/trunk/webrtc/system_wrappers/cpu_features_webrtc.gyp
media/webrtc/trunk/webrtc/system_wrappers/interface/asm_defines.h
media/webrtc/trunk/webrtc/system_wrappers/interface/thread_wrapper.h
media/webrtc/trunk/webrtc/system_wrappers/interface/tick_util.h
media/webrtc/trunk/webrtc/system_wrappers/interface/trace.h
media/webrtc/trunk/webrtc/system_wrappers/source/atomic32_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/clock.cc
media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable.cc
media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/cpu_features.cc
media/webrtc/trunk/webrtc/system_wrappers/source/cpu_features_android.c
media/webrtc/trunk/webrtc/system_wrappers/source/cpu_info.cc
media/webrtc/trunk/webrtc/system_wrappers/source/droid-cpu-features.c
media/webrtc/trunk/webrtc/system_wrappers/source/droid-cpu-features.h
media/webrtc/trunk/webrtc/system_wrappers/source/event_win.cc
media/webrtc/trunk/webrtc/system_wrappers/source/rw_lock.cc
media/webrtc/trunk/webrtc/system_wrappers/source/spreadsortlib/spreadsort.hpp
media/webrtc/trunk/webrtc/system_wrappers/source/thread.cc
media/webrtc/trunk/webrtc/system_wrappers/source/thread_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/thread_win.cc
media/webrtc/trunk/webrtc/system_wrappers/source/thread_win.h
media/webrtc/trunk/webrtc/system_wrappers/source/tick_util.cc
media/webrtc/trunk/webrtc/system_wrappers/source/trace_impl.cc
media/webrtc/trunk/webrtc/system_wrappers/source/trace_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/system_wrappers.gyp
media/webrtc/trunk/webrtc/test/channel_transport/udp_transport_impl.cc
media/webrtc/trunk/webrtc/tools/loopback_test/adapter.js
media/webrtc/trunk/webrtc/typedefs.h
media/webrtc/trunk/webrtc/video/receive_statistics_proxy.cc
media/webrtc/trunk/webrtc/video/receive_statistics_proxy.h
media/webrtc/trunk/webrtc/video_engine/browser_capture_impl.h
media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.cc
media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.h
media/webrtc/trunk/webrtc/video_engine/include/vie_base.h
media/webrtc/trunk/webrtc/video_engine/include/vie_capture.h
media/webrtc/trunk/webrtc/video_engine/include/vie_codec.h
media/webrtc/trunk/webrtc/video_engine/include/vie_render.h
media/webrtc/trunk/webrtc/video_engine/include/vie_rtp_rtcp.h
media/webrtc/trunk/webrtc/video_engine/test/auto_test/source/vie_autotest_custom_call.cc
media/webrtc/trunk/webrtc/video_engine/vie_base_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_base_impl.h
media/webrtc/trunk/webrtc/video_engine/vie_capturer.cc
media/webrtc/trunk/webrtc/video_engine/vie_capturer.h
media/webrtc/trunk/webrtc/video_engine/vie_channel.cc
media/webrtc/trunk/webrtc/video_engine/vie_channel.h
media/webrtc/trunk/webrtc/video_engine/vie_channel_group.cc
media/webrtc/trunk/webrtc/video_engine/vie_channel_group.h
media/webrtc/trunk/webrtc/video_engine/vie_channel_manager.cc
media/webrtc/trunk/webrtc/video_engine/vie_channel_manager.h
media/webrtc/trunk/webrtc/video_engine/vie_codec_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_codec_impl.h
media/webrtc/trunk/webrtc/video_engine/vie_defines.h
media/webrtc/trunk/webrtc/video_engine/vie_encoder.cc
media/webrtc/trunk/webrtc/video_engine/vie_encoder.h
media/webrtc/trunk/webrtc/video_engine/vie_frame_provider_base.cc
media/webrtc/trunk/webrtc/video_engine/vie_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_input_manager.cc
media/webrtc/trunk/webrtc/video_engine/vie_input_manager.h
media/webrtc/trunk/webrtc/video_engine/vie_receiver.cc
media/webrtc/trunk/webrtc/video_engine/vie_receiver.h
media/webrtc/trunk/webrtc/video_engine/vie_rtp_rtcp_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_rtp_rtcp_impl.h
media/webrtc/trunk/webrtc/video_engine/vie_shared_data.cc
media/webrtc/trunk/webrtc/video_engine/vie_shared_data.h
media/webrtc/trunk/webrtc/video_engine/vie_sync_module.cc
media/webrtc/trunk/webrtc/voice_engine/channel.cc
media/webrtc/trunk/webrtc/voice_engine/channel.h
media/webrtc/trunk/webrtc/voice_engine/include/mock/fake_voe_external_media.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_hardware.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_rtp_rtcp.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_video_sync.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_volume_control.h
media/webrtc/trunk/webrtc/voice_engine/output_mixer.cc
media/webrtc/trunk/webrtc/voice_engine/output_mixer.h
media/webrtc/trunk/webrtc/voice_engine/test/auto_test/standard/external_media_test.cc
media/webrtc/trunk/webrtc/voice_engine/test/auto_test/standard/hardware_before_streaming_test.cc
media/webrtc/trunk/webrtc/voice_engine/test/auto_test/standard/hardware_test.cc
media/webrtc/trunk/webrtc/voice_engine/test/auto_test/standard/video_sync_test.cc
media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.cc
media/webrtc/trunk/webrtc/voice_engine/utility_unittest.cc
media/webrtc/trunk/webrtc/voice_engine/voe_base_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_base_impl.h
media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.h
media/webrtc/trunk/webrtc/voice_engine/voe_hardware_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_hardware_impl.h
media/webrtc/trunk/webrtc/voice_engine/voe_rtp_rtcp_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_rtp_rtcp_impl.h
media/webrtc/trunk/webrtc/voice_engine/voe_video_sync_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_video_sync_impl.h
media/webrtc/trunk/webrtc/voice_engine/voice_engine.gyp
media/webrtc/trunk/webrtc/voice_engine/voice_engine_defines.h
media/webrtc/trunk/webrtc/voice_engine/voice_engine_impl.cc
mobile/android/base/moz.build
--- a/build/clang-plugin/clang-plugin.cpp
+++ b/build/clang-plugin/clang-plugin.cpp
@@ -151,16 +151,17 @@ bool isInIgnoredNamespaceForImplicitCtor
   if (name == "") {
     return false;
   }
 
   return name == "std" ||               // standard C++ lib
          name == "__gnu_cxx" ||         // gnu C++ lib
          name == "boost" ||             // boost
          name == "webrtc" ||            // upstream webrtc
+         name == "rtc" ||               // upstream webrtc 'base' package
          name.substr(0, 4) == "icu_" || // icu
          name == "google" ||            // protobuf
          name == "google_breakpad" ||   // breakpad
          name == "soundtouch" ||        // libsoundtouch
          name == "stagefright" ||       // libstagefright
          name == "MacFileUtilities" ||  // MacFileUtilities
          name == "dwarf2reader" ||      // dwarf2reader
          name == "arm_ex_to_module" ||  // arm_ex_to_module
--- a/build/gyp.mozbuild
+++ b/build/gyp.mozbuild
@@ -1,15 +1,17 @@
 # -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 gyp_vars = {
+    'lsan': 0,
+    'asan': 0,
     'build_with_mozilla': 1,
     'build_with_chromium': 0,
     'use_official_google_api_keys': 0,
     'have_clock_monotonic': 1 if CONFIG['HAVE_CLOCK_MONOTONIC'] else 0,
     'have_ethtool_cmd_speed_hi': 1 if CONFIG['MOZ_WEBRTC_HAVE_ETHTOOL_SPEED_HI'] else 0,
     'include_alsa_audio': 1 if CONFIG['MOZ_ALSA'] else 0,
     'include_pulse_audio': 1 if CONFIG['MOZ_PULSEAUDIO'] else 0,
     # basic stuff for everything
@@ -22,24 +24,24 @@ gyp_vars = {
     # use_system_lib* still seems to be in use in trunk/build
     'use_system_libjpeg': 0,
     'use_system_libvpx': 0,
     'build_json': 0,
     'build_libjpeg': 0,
     'build_libyuv': 0,
     'build_libvpx': 0,
     'build_ssl': 0,
+    'build_json': 0,
+    'build_icu': 0,
+    'build_opus': 0,
     'libyuv_dir': '/media/libyuv',
     'yuv_disable_avx2': 0 if CONFIG['HAVE_X86_AVX2'] else 1,
     # don't use openssl
     'use_openssl': 0,
 
-    # saves 4MB when webrtc_trace is off
-    'enable_lazy_trace_alloc': 1 if CONFIG['RELEASE_BUILD'] else 0,
-
     'use_x11': 1 if CONFIG['MOZ_X11'] else 0,
     'use_glib': 1 if CONFIG['GLIB_LIBS'] else 0,
 
      # turn off mandatory use of NEON and instead use NEON detection
     'arm_neon': 0,
     'arm_neon_optional': 1,
 
     'moz_widget_toolkit_gonk': 0,
@@ -58,17 +60,19 @@ gyp_vars = {
     # Enable and force use of hardware AEC
     'hardware_aec_ns': 1 if CONFIG['MOZ_WEBRTC_HARDWARE_AEC_NS'] else 0,
 
     # codec enable/disables:
     'include_g711': 1,
     'include_opus': 1,
     'include_g722': 1,
     'include_ilbc': 0,
-    'include_isac': 0,
+    # We turn on ISAC because the AGC uses parts of it, and depend on the
+    # linker to throw away uneeded bits.
+    'include_isac': 1,
     'include_pcm16b': 1,
 }
 
 os = CONFIG['OS_TARGET']
 
 if os == 'WINNT':
     gyp_vars.update(
         MSVS_VERSION=CONFIG['_MSVS_VERSION'],
--- a/dom/media/moz.build
+++ b/dom/media/moz.build
@@ -296,16 +296,21 @@ if CONFIG['MOZ_DIRECTSHOW']:
 if CONFIG['MOZ_WEBRTC']:
     LOCAL_INCLUDES += [
         '/media/webrtc/signaling/src/common',
         '/media/webrtc/trunk',
     ]
 
 DEFINES['MOZILLA_INTERNAL_API'] = True
 
+if CONFIG['OS_TARGET'] == 'WINNT':
+    DEFINES['WEBRTC_WIN'] = True
+else:
+    DEFINES['WEBRTC_POSIX'] = True
+
 if CONFIG['MOZ_OMX_DECODER']:
     DEFINES['MOZ_OMX_DECODER'] = True
 
 if CONFIG['ANDROID_VERSION'] > '15':
     DEFINES['MOZ_OMX_WEBM_DECODER'] = True
 
 if CONFIG['MOZ_GONK_MEDIACODEC']:
     DEFINES['MOZ_GONK_MEDIACODEC'] = True
--- a/dom/media/omx/OMXCodecWrapper.cpp
+++ b/dom/media/omx/OMXCodecWrapper.cpp
@@ -46,31 +46,42 @@ enum BufferState
   BUFFER_OK,
   BUFFER_FAIL,
   WAIT_FOR_NEW_BUFFER
 };
 
 bool
 OMXCodecReservation::ReserveOMXCodec()
 {
-  if (mClient) {
-    // Already tried reservation.
-    return false;
+  if (!mClient) {
+    mClient = new mozilla::MediaSystemResourceClient(mType);
+  } else {
+    if (mOwned) {
+      //CODEC_ERROR("OMX Reservation: (%d) already owned", (int) mType);
+      return true;
+    }
+    //CODEC_ERROR("OMX Reservation: (%d) already NOT owned", (int) mType);
   }
-  mClient = new mozilla::MediaSystemResourceClient(mType);
-  return mClient->AcquireSyncNoWait(); // don't wait if resrouce is not available
+  mOwned = mClient->AcquireSyncNoWait(); // don't wait if resource is not available
+  //CODEC_ERROR("OMX Reservation: (%d) Acquire was %s", (int) mType, mOwned ? "Successful" : "Failed");
+  return mOwned;
 }
 
 void
 OMXCodecReservation::ReleaseOMXCodec()
 {
   if (!mClient) {
     return;
   }
-  mClient->ReleaseResource();
+  //CODEC_ERROR("OMX Reservation: Releasing resource: (%d) %s", (int) mType, mOwned ? "Owned" : "Not owned");
+  if (mOwned) {
+    mClient->ReleaseResource();
+    mClient = nullptr;
+    mOwned = false;
+  }
 }
 
 OMXAudioEncoder*
 OMXCodecWrapper::CreateAACEncoder()
 {
   nsAutoPtr<OMXAudioEncoder> aac(new OMXAudioEncoder(CodecType::AAC_ENC));
   // Return the object only when media codec is valid.
   NS_ENSURE_TRUE(aac->IsValid(), nullptr);
--- a/dom/media/omx/OMXCodecWrapper.h
+++ b/dom/media/omx/OMXCodecWrapper.h
@@ -21,17 +21,17 @@
 #include <speex/speex_resampler.h>
 
 namespace android {
 
 // Wrapper class for managing HW codec reservations
 class OMXCodecReservation : public RefBase
 {
 public:
-  OMXCodecReservation(bool aEncoder)
+  OMXCodecReservation(bool aEncoder) : mOwned(false)
   {
     mType = aEncoder ? mozilla::MediaSystemResourceType::VIDEO_ENCODER :
             mozilla::MediaSystemResourceType::VIDEO_DECODER;
   }
 
   virtual ~OMXCodecReservation()
   {
     ReleaseOMXCodec();
@@ -40,16 +40,17 @@ public:
   /** Reserve the Encode or Decode resource for this instance */
   virtual bool ReserveOMXCodec();
 
   /** Release the Encode or Decode resource for this instance */
   virtual void ReleaseOMXCodec();
 
 private:
   mozilla::MediaSystemResourceType mType;
+  bool mOwned;  // We already own this resource
 
   RefPtr<mozilla::MediaSystemResourceClient> mClient;
 };
 
 
 class OMXAudioEncoder;
 class OMXVideoEncoder;
 
--- a/dom/media/systemservices/CamerasChild.cpp
+++ b/dom/media/systemservices/CamerasChild.cpp
@@ -642,17 +642,17 @@ CamerasChild::Shutdown()
   CamerasSingleton::Child() = nullptr;
   CamerasSingleton::Thread() = nullptr;
 }
 
 bool
 CamerasChild::RecvDeliverFrame(const int& capEngine,
                                const int& capId,
                                mozilla::ipc::Shmem&& shmem,
-                               const int& size,
+                               const size_t& size,
                                const uint32_t& time_stamp,
                                const int64_t& ntp_time,
                                const int64_t& render_time)
 {
   MutexAutoLock lock(mCallbackMutex);
   CaptureEngine capEng = static_cast<CaptureEngine>(capEngine);
   if (Callback(capEng, capId)) {
     unsigned char* image = shmem.get<unsigned char>();
--- a/dom/media/systemservices/CamerasChild.h
+++ b/dom/media/systemservices/CamerasChild.h
@@ -80,17 +80,17 @@ class CamerasChild final : public PCamer
 public:
   // We are owned by the PBackground thread only. CamerasSingleton
   // takes a non-owning reference.
   NS_INLINE_DECL_REFCOUNTING(CamerasChild)
 
   // IPC messages recevied, received on the PBackground thread
   // these are the actual callbacks with data
   virtual bool RecvDeliverFrame(const int&, const int&, mozilla::ipc::Shmem&&,
-                                const int&, const uint32_t&, const int64_t&,
+                                const size_t&, const uint32_t&, const int64_t&,
                                 const int64_t&) override;
   virtual bool RecvFrameSizeChange(const int&, const int&,
                                    const int& w, const int& h) override;
 
   // these are response messages to our outgoing requests
   virtual bool RecvReplyNumberOfCaptureDevices(const int&) override;
   virtual bool RecvReplyNumberOfCapabilities(const int&) override;
   virtual bool RecvReplyAllocateCaptureDevice(const int&) override;
@@ -154,19 +154,19 @@ private:
   // The monitor below isn't sufficient for this, as it will drop
   // the lock when Wait-ing for a response, allowing us to send a new
   // request. The Notify on receiving the response will then unblock
   // both waiters and one will be guaranteed to get the wrong result.
   // Take this one before taking mReplyMonitor.
   Mutex mRequestMutex;
   // Hold to wait for an async response to our calls
   Monitor mReplyMonitor;
-  // Async resposne valid?
+  // Async response valid?
   bool mReceivedReply;
-  // Aynsc reponses data contents;
+  // Async responses data contents;
   bool mReplySuccess;
   int mReplyInteger;
   webrtc::CaptureCapability mReplyCapability;
   nsCString mReplyDeviceName;
   nsCString mReplyDeviceID;
 };
 
 } // namespace camera
--- a/dom/media/systemservices/CamerasParent.cpp
+++ b/dom/media/systemservices/CamerasParent.cpp
@@ -11,16 +11,18 @@
 
 #include "mozilla/Assertions.h"
 #include "mozilla/unused.h"
 #include "mozilla/Logging.h"
 #include "mozilla/ipc/BackgroundParent.h"
 #include "nsThreadUtils.h"
 #include "nsXPCOM.h"
 
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+
 #undef LOG
 #undef LOG_ENABLED
 mozilla::LazyLogModule gCamerasParentLog("CamerasParent");
 #define LOG(args) MOZ_LOG(gCamerasParentLog, mozilla::LogLevel::Debug, args)
 #define LOG_ENABLED() MOZ_LOG_TEST(gCamerasParentLog, mozilla::LogLevel::Debug)
 
 namespace mozilla {
 namespace camera {
@@ -85,17 +87,17 @@ CallbackHelper::FrameSizeChange(unsigned
 
 class DeliverFrameRunnable : public nsRunnable {
 public:
   DeliverFrameRunnable(CamerasParent *aParent,
                        CaptureEngine engine,
                        int cap_id,
                        ShmemBuffer buffer,
                        unsigned char* altbuffer,
-                       int size,
+                       size_t size,
                        uint32_t time_stamp,
                        int64_t ntp_time,
                        int64_t render_time)
     : mParent(aParent), mCapEngine(engine), mCapId(cap_id), mBuffer(Move(buffer)),
       mSize(size), mTimeStamp(time_stamp), mNtpTime(ntp_time),
       mRenderTime(render_time) {
     // No ShmemBuffer (of the right size) was available, so make an
     // extra buffer here.  We have no idea when we are going to run and
@@ -131,17 +133,17 @@ public:
   }
 
 private:
   RefPtr<CamerasParent> mParent;
   CaptureEngine mCapEngine;
   int mCapId;
   ShmemBuffer mBuffer;
   mozilla::UniquePtr<unsigned char[]> mAlternateBuffer;
-  int mSize;
+  size_t mSize;
   uint32_t mTimeStamp;
   int64_t mNtpTime;
   int64_t mRenderTime;
   int mResult;
 };
 
 NS_IMPL_ISUPPORTS(CamerasParent, nsIObserver)
 
@@ -227,17 +229,17 @@ CamerasParent::StopVideoCapture()
   }
 }
 
 int
 CamerasParent::DeliverFrameOverIPC(CaptureEngine cap_engine,
                                    int cap_id,
                                    ShmemBuffer buffer,
                                    unsigned char* altbuffer,
-                                   int size,
+                                   size_t size,
                                    uint32_t time_stamp,
                                    int64_t ntp_time,
                                    int64_t render_time)
 {
   // No ShmemBuffers were available, so construct one now of the right size
   // and copy into it. That is an extra copy, but we expect this to be
   // the exceptional case, because we just assured the next call *will* have a
   // buffer of the right size.
@@ -276,17 +278,17 @@ CamerasParent::DeliverFrameOverIPC(Captu
 ShmemBuffer
 CamerasParent::GetBuffer(size_t aSize)
 {
   return mShmemPool.GetIfAvailable(aSize);
 }
 
 int
 CallbackHelper::DeliverFrame(unsigned char* buffer,
-                             int size,
+                             size_t size,
                              uint32_t time_stamp,
                              int64_t ntp_time,
                              int64_t render_time,
                              void *handle)
 {
   // Get a shared memory buffer to copy the frame data into
   ShmemBuffer shMemBuffer = mParent->GetBuffer(size);
   if (!shMemBuffer.Valid()) {
@@ -305,16 +307,27 @@ CallbackHelper::DeliverFrame(unsigned ch
                              Move(shMemBuffer), buffer, size, time_stamp,
                              ntp_time, render_time);
   MOZ_ASSERT(mParent);
   nsIThread* thread = mParent->GetBackgroundThread();
   MOZ_ASSERT(thread != nullptr);
   thread->Dispatch(runnable, NS_DISPATCH_NORMAL);
   return 0;
 }
+// XXX!!! FIX THIS -- we should move to pure DeliverI420Frame
+int
+CallbackHelper::DeliverI420Frame(const webrtc::I420VideoFrame& webrtc_frame)
+{
+  return DeliverFrame(const_cast<uint8_t*>(webrtc_frame.buffer(webrtc::kYPlane)),
+                      CalcBufferSize(webrtc::kI420, webrtc_frame.width(), webrtc_frame.height()),
+                      webrtc_frame.timestamp(),
+                      webrtc_frame.ntp_time_ms(),
+                      webrtc_frame.render_time_ms(),
+                      (void*) webrtc_frame.native_handle());
+}
 
 bool
 CamerasParent::RecvReleaseFrame(mozilla::ipc::Shmem&& s) {
   mShmemPool.Put(ShmemBuffer(s));
   return true;
 }
 
 bool
--- a/dom/media/systemservices/CamerasParent.h
+++ b/dom/media/systemservices/CamerasParent.h
@@ -36,21 +36,22 @@ public:
   CallbackHelper(CaptureEngine aCapEng, int aCapId, CamerasParent *aParent)
     : mCapEngine(aCapEng), mCapturerId(aCapId), mParent(aParent) {};
 
   // ViEExternalRenderer implementation. These callbacks end up
   // running on the VideoCapture thread.
   virtual int FrameSizeChange(unsigned int w, unsigned int h,
                               unsigned int streams) override;
   virtual int DeliverFrame(unsigned char* buffer,
-                           int size,
+                           size_t size,
                            uint32_t time_stamp,
                            int64_t ntp_time,
                            int64_t render_time,
                            void *handle) override;
+  virtual int DeliverI420Frame(const webrtc::I420VideoFrame& webrtc_frame) override;
   virtual bool IsTextureSupported() override { return false; };
 
   friend CamerasParent;
 
 private:
   CaptureEngine mCapEngine;
   int mCapturerId;
   CamerasParent *mParent;
@@ -103,17 +104,17 @@ public:
                               || !mWebRTCAlive; };
   ShmemBuffer GetBuffer(size_t aSize);
 
   // helper to forward to the PBackground thread
   int DeliverFrameOverIPC(CaptureEngine capEng,
                           int cap_id,
                           ShmemBuffer buffer,
                           unsigned char* altbuffer,
-                          int size,
+                          size_t size,
                           uint32_t time_stamp,
                           int64_t ntp_time,
                           int64_t render_time);
 
 
   CamerasParent();
 
 protected:
--- a/dom/media/systemservices/LoadManager.cpp
+++ b/dom/media/systemservices/LoadManager.cpp
@@ -165,24 +165,16 @@ LoadManagerSingleton::LoadHasChanged(web
 }
 
 void
 LoadManagerSingleton::AddObserver(webrtc::CPULoadStateObserver * aObserver)
 {
   LOG(("LoadManager - Adding Observer"));
   MutexAutoLock lock(mLock);
   mObservers.AppendElement(aObserver);
-  if (mObservers.Length() == 1) {
-    if (!mLoadMonitor) {
-      mLoadMonitor = new LoadMonitor(mLoadMeasurementInterval);
-      mLoadMonitor->Init(mLoadMonitor);
-      mLoadMonitor->SetLoadChangeCallback(this);
-      mLastStateChange = TimeStamp::Now();
-    }
-  }
 }
 
 void
 LoadManagerSingleton::RemoveObserver(webrtc::CPULoadStateObserver * aObserver)
 {
   LOG(("LoadManager - Removing Observer"));
   MutexAutoLock lock(mLock);
   if (!mObservers.RemoveElement(aObserver)) {
--- a/dom/media/systemservices/MediaUtils.h
+++ b/dom/media/systemservices/MediaUtils.h
@@ -5,16 +5,17 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef mozilla_MediaUtils_h
 #define mozilla_MediaUtils_h
 
 #include "nsAutoPtr.h"
 #include "nsThreadUtils.h"
 #include "nsIAsyncShutdown.h"
+#include "base/task.h"
 
 namespace mozilla {
 namespace media {
 
 /*
  * media::Pledge - A promise-like pattern for c++ that takes lambda functions.
  *
  * Asynchronous APIs that proxy to another thread or to the chrome process and
--- a/dom/media/systemservices/PCameras.ipdl
+++ b/dom/media/systemservices/PCameras.ipdl
@@ -22,17 +22,17 @@ struct CaptureCapability
 async protocol PCameras
 {
   manager PBackground;
 
 child:
   async FrameSizeChange(int capEngine, int cap_id, int w, int h);
   // transfers ownership of |buffer| from parent to child
   async DeliverFrame(int capEngine, int cap_id,
-                     Shmem buffer, int size, uint32_t time_stamp,
+                     Shmem buffer, size_t size, uint32_t time_stamp,
                      int64_t ntp_time, int64_t render_time);
   async ReplyNumberOfCaptureDevices(int numdev);
   async ReplyNumberOfCapabilities(int numdev);
   async ReplyAllocateCaptureDevice(int numdev);
   async ReplyGetCaptureCapability(CaptureCapability cap);
   async ReplyGetCaptureDevice(nsCString device_name, nsCString device_id);
   async ReplyFailure();
   async ReplySuccess();
--- a/dom/media/systemservices/moz.build
+++ b/dom/media/systemservices/moz.build
@@ -21,16 +21,21 @@ if CONFIG['MOZ_WEBRTC']:
         'LoadManagerFactory.cpp',
         'LoadMonitor.cpp',
         'ShmemPool.cpp',
     ]
     LOCAL_INCLUDES += [
         '/media/webrtc/signaling',
         '/media/webrtc/trunk',
     ]
+if CONFIG['OS_TARGET'] == 'WINNT':
+    DEFINES['WEBRTC_WIN'] = True
+else:
+    DEFINES['WEBRTC_POSIX'] = True
+
 
 if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('android', 'gonk'):
     EXPORTS += [
         'OpenSLESProvider.h'
     ]
     UNIFIED_SOURCES += [
         'OpenSLESProvider.cpp',
     ]
--- a/dom/media/tests/mochitest/mochitest.ini
+++ b/dom/media/tests/mochitest/mochitest.ini
@@ -130,18 +130,18 @@ skip-if = toolkit == 'gonk' # B2G emulat
 skip-if = toolkit == 'gonk' # B2G emulator is too slow to handle a two-way audio call reliably
 [test_peerConnection_offerRequiresReceiveAudio.html]
 [test_peerConnection_offerRequiresReceiveVideo.html]
 skip-if = toolkit == 'gonk' || buildapp == 'mulet' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
 [test_peerConnection_offerRequiresReceiveVideoAudio.html]
 skip-if = toolkit == 'gonk' || buildapp == 'mulet' || (android_version == '18' && debug) # b2g(Bug 960442, video support for WebRTC is disabled on b2g), android(Bug 1189784, timeouts on 4.3 emulator)
 [test_peerConnection_promiseSendOnly.html]
 skip-if = toolkit == 'gonk' || buildapp == 'mulet' || (android_version == '18' && debug) # b2g(Bug 960442, video support for WebRTC is disabled on b2g), android(Bug 1189784, timeouts on 4.3 emulator)
-[test_peerConnection_relayOnly.html]
-skip-if = toolkit == 'gonk' || buildapp == 'mulet' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
+#[test_peerConnection_relayOnly.html]
+#skip-if = toolkit == 'gonk' || buildapp == 'mulet' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
 [test_peerConnection_callbacks.html]
 skip-if = toolkit == 'gonk' || buildapp == 'mulet' || (android_version == '18' && debug) # b2g(Bug 960442, video support for WebRTC is disabled on b2g), android(Bug 1189784, timeouts on 4.3 emulator)
 [test_peerConnection_replaceTrack.html]
 skip-if = toolkit == 'gonk' || buildapp == 'mulet' || android_version == '18' # b2g(Bug 960442, video support for WebRTC is disabled on b2g), android(Bug 1189784, timeouts on 4.3 emulator)
 [test_peerConnection_syncSetDescription.html]
 skip-if = toolkit == 'gonk' || buildapp == 'mulet' || (android_version == '18' && debug) # b2g(Bug 960442, video support for WebRTC is disabled on b2g), android(Bug 1189784, timeouts on 4.3 emulator)
 [test_peerConnection_setLocalAnswerInHaveLocalOffer.html]
 [test_peerConnection_setLocalAnswerInStable.html]
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
@@ -271,29 +271,29 @@ MediaEngineRemoteVideoSource::FrameSizeC
   mWidth = w;
   mHeight = h;
   LOG(("MediaEngineRemoteVideoSource Video FrameSizeChange: %ux%u", w, h));
   return 0;
 }
 
 int
 MediaEngineRemoteVideoSource::DeliverFrame(unsigned char* buffer,
-                                           int size,
+                                           size_t size,
                                            uint32_t time_stamp,
                                            int64_t ntp_time,
                                            int64_t render_time,
                                            void *handle)
 {
   // Check for proper state.
   if (mState != kStarted) {
     LOG(("DeliverFrame: video not started"));
     return 0;
   }
 
-  if (mWidth*mHeight + 2*(((mWidth+1)/2)*((mHeight+1)/2)) != size) {
+  if ((size_t) (mWidth*mHeight + 2*(((mWidth+1)/2)*((mHeight+1)/2))) != size) {
     MOZ_ASSERT(false, "Wrong size frame in DeliverFrame!");
     return 0;
   }
 
   // Create a video frame and append it to the track.
   RefPtr<layers::PlanarYCbCrImage> image = mImageContainer->CreatePlanarYCbCrImage();
 
   uint8_t* frame = static_cast<uint8_t*> (buffer);
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.h
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.h
@@ -33,36 +33,42 @@
 // WebRTC library includes follow
 #include "webrtc/common.h"
 #include "webrtc/video_engine/include/vie_capture.h"
 #include "webrtc/video_engine/include/vie_render.h"
 #include "CamerasChild.h"
 
 #include "NullTransport.h"
 
+namespace webrtc {
+class I420VideoFrame;
+}
+
 namespace mozilla {
 
 /**
  * The WebRTC implementation of the MediaEngine interface.
  */
 class MediaEngineRemoteVideoSource : public MediaEngineCameraVideoSource,
                                      public webrtc::ExternalRenderer
 {
 public:
   NS_DECL_THREADSAFE_ISUPPORTS
 
   // ExternalRenderer
   virtual int FrameSizeChange(unsigned int w, unsigned int h,
                               unsigned int streams) override;
   virtual int DeliverFrame(unsigned char* buffer,
-                           int size,
+                           size_t size,
                            uint32_t time_stamp,
                            int64_t ntp_time,
                            int64_t render_time,
                            void *handle) override;
+  // XXX!!!! FIX THIS
+  virtual int DeliverI420Frame(const webrtc::I420VideoFrame& webrtc_frame) override { return 0; };
   virtual bool IsTextureSupported() override { return false; };
 
   // MediaEngineCameraVideoSource
   MediaEngineRemoteVideoSource(int aIndex, mozilla::camera::CaptureEngine aCapEngine,
                                dom::MediaSourceEnum aMediaSource,
                                const char* aMonitorName = "RemoteVideo.Monitor");
 
   virtual nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
--- a/dom/media/webrtc/MediaEngineWebRTC.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTC.cpp
@@ -246,17 +246,17 @@ MediaEngineWebRTC::EnumerateAudioDevices
 #ifdef MOZ_WIDGET_ANDROID
   jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef();
 
   // get the JVM
   JavaVM* jvm;
   JNIEnv* const env = jni::GetEnvForThread();
   MOZ_ALWAYS_TRUE(!env->GetJavaVM(&jvm));
 
-  if (webrtc::VoiceEngine::SetAndroidObjects(jvm, env, (void*)context) != 0) {
+  if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) {
     LOG(("VoiceEngine:SetAndroidObjects Failed"));
     return;
   }
 #endif
 
   if (!mVoiceEngine) {
     mVoiceEngine = webrtc::VoiceEngine::Create();
     if (!mVoiceEngine) {
--- a/dom/media/webrtc/moz.build
+++ b/dom/media/webrtc/moz.build
@@ -15,16 +15,20 @@ XPIDL_MODULE = 'content_webrtc'
 EXPORTS += [
     'MediaEngine.h',
     'MediaEngineCameraVideoSource.h',
     'MediaEngineDefault.h',
     'MediaTrackConstraints.h',
 ]
 
 if CONFIG['MOZ_WEBRTC']:
+    if CONFIG['OS_TARGET'] == 'WINNT':
+        DEFINES['WEBRTC_WIN'] = True
+    else:
+        DEFINES['WEBRTC_POSIX'] = True
     EXPORTS += ['AudioOutputObserver.h',
                 'MediaEngineRemoteVideoSource.h',
                 'MediaEngineWebRTC.h']
     EXPORTS.mozilla.dom += [ 'RTCIdentityProviderRegistrar.h' ]
     UNIFIED_SOURCES += [
         'MediaEngineCameraVideoSource.cpp',
         'MediaEngineRemoteVideoSource.cpp',
         'MediaEngineTabVideoSource.cpp',
--- a/ipc/glue/moz.build
+++ b/ipc/glue/moz.build
@@ -34,25 +34,27 @@ EXPORTS.mozilla.ipc += [
     'SharedMemorySysV.h',
     'Shmem.h',
     'Transport.h',
     'URIUtils.h',
     'WindowsMessageLoop.h',
 ]
 
 if CONFIG['OS_ARCH'] == 'WINNT':
+    DEFINES['WEBRTC_WIN'] = True
     EXPORTS.mozilla.ipc += [
         'Transport_win.h',
     ]
     SOURCES += [
         'SharedMemory_windows.cpp',
         'Transport_win.cpp',
         'WindowsMessageLoop.cpp',
     ]
 else:
+    DEFINES['WEBRTC_POSIX'] = True
     EXPORTS.mozilla.ipc += [
         'Transport_posix.h',
     ]
     UNIFIED_SOURCES += [
         'SharedMemory_posix.cpp',
         'Transport_posix.cpp',
     ]
 
--- a/media/mtransport/nr_socket_prsock.cpp
+++ b/media/mtransport/nr_socket_prsock.cpp
@@ -170,26 +170,27 @@ namespace mozilla {
 
 #if defined(MOZILLA_INTERNAL_API) && !defined(MOZILLA_XPCOMRT_API)
 class SingletonThreadHolder final
 {
 private:
   ~SingletonThreadHolder()
   {
     r_log(LOG_GENERIC,LOG_DEBUG,"Deleting SingletonThreadHolder");
-    if (NS_WARN_IF(mThread)) {
+    MOZ_ASSERT(!mThread, "SingletonThreads should be Released and shut down before exit!");
+    if (mThread) {
       mThread->Shutdown();
       mThread = nullptr;
     }
   }
 
   DISALLOW_COPY_ASSIGN(SingletonThreadHolder);
 
 public:
-  // Must be threadsafe for ClearOnShutdown
+  // Must be threadsafe for StaticRefPtr/ClearOnShutdown
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SingletonThreadHolder)
 
   explicit SingletonThreadHolder(const nsCSubstring& aName)
     : mName(aName)
   {
     mParentThread = NS_GetCurrentThread();
   }
 
@@ -225,17 +226,18 @@ public:
     nsrefcnt count = --mUseCount;
     MOZ_ASSERT(int32_t(mUseCount) >= 0, "illegal refcnt");
     if (count == 0) {
       // in-use -> idle -- no one forcing it to remain instantiated
       r_log(LOG_GENERIC,LOG_DEBUG,"Shutting down wrapped SingletonThread %p",
             mThread.get());
       mThread->Shutdown();
       mThread = nullptr;
-      // It'd be nice to use a timer instead...
+      // It'd be nice to use a timer instead...  But be careful of
+      // xpcom-shutdown-threads in that case
     }
     r_log(LOG_GENERIC,LOG_DEBUG,"ReleaseUse: %lu", (unsigned long) count);
     return count;
   }
 
 private:
   nsCString mName;
   nsAutoRefCnt mUseCount;
--- a/media/webrtc/moz.build
+++ b/media/webrtc/moz.build
@@ -8,37 +8,46 @@ include('/build/gyp.mozbuild')
 
 webrtc_non_unified_sources = [
     'trunk/webrtc/common_audio/vad/vad_core.c',                                  # Because of name clash in the kInitCheck variable
     'trunk/webrtc/common_audio/vad/webrtc_vad.c',                                # Because of name clash in the kInitCheck variable
     'trunk/webrtc/modules/audio_coding/codecs/g722/g722_decode.c',               # Because of name clash in the saturate function
     'trunk/webrtc/modules/audio_coding/codecs/g722/g722_encode.c',               # Because of name clash in the saturate function
     'trunk/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c',   # Because of name clash in the kDampFilter variable
     'trunk/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c', # Because of name clash in the kDampFilter variable
+    'trunk/webrtc/modules/audio_coding/main/acm2/codec_manager.cc',              # Because of duplicate IsCodecRED/etc
     'trunk/webrtc/modules/audio_coding/neteq/audio_vector.cc',                   # Because of explicit template specializations
+    'trunk/webrtc/modules/audio_device/android/audio_record_jni.cc',             # Becuse of commonly named module static vars
+    'trunk/webrtc/modules/audio_device/android/audio_track_jni.cc',             # Becuse of commonly named module static vars
     'trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc',       # Because of LATE()
     'trunk/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc',# Because of LATE()
     'trunk/webrtc/modules/audio_device/opensl/opensles_input.cc',                # Because of name clash in the kOption variable
     'trunk/webrtc/modules/audio_device/opensl/opensles_output.cc',               # Because of name clash in the kOption variable
     'trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.cc',                # Because of name clash with #define FF
     'trunk/webrtc/modules/audio_device/win/audio_device_core_win.cc',            # Because of ordering assumptions in strsafe.h
     'trunk/webrtc/modules/audio_processing/aec/aec_core.c',                      # Because of name clash in the ComfortNoise function
     'trunk/webrtc/modules/audio_processing/aecm/aecm_core.c',                    # Because of name clash in the ComfortNoise function
     'trunk/webrtc/modules/audio_processing/aecm/echo_control_mobile.c',          # Because of name clash in the kInitCheck variable
-    'trunk/webrtc/modules/audio_processing/agc/analog_agc.c',                    # Because of name clash in the kInitCheck variable
+    'trunk/webrtc/modules/audio_processing/agc/histogram.cc',                    # Because of duplicate definition of static consts with pitch_based_vad.cc
+    'trunk/webrtc/modules/audio_processing/agc/legacy/analog_agc.c',             # Because of name clash in the kInitCheck variable
+    'trunk/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc', # Because of needing to define _USE_MATH_DEFINES before including <cmath>
+    'trunk/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc',  # Because of needing to define _USE_MATH_DEFINES before including <cmath>
     'trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc',           # Because of name clash in the MapError function
     'trunk/webrtc/modules/audio_processing/echo_control_mobile_impl.cc',         # Because of name clash in the MapError function
     'trunk/webrtc/modules/audio_processing/gain_control_impl.cc',                # Because of name clash in the Handle typedef
     'trunk/webrtc/modules/audio_processing/high_pass_filter_impl.cc',            # Because of name clash in the Handle typedef
     'trunk/webrtc/modules/audio_processing/noise_suppression_impl.cc',           # Because of name clash in the Handle typedef
+    'trunk/webrtc/modules/remote_bitrate_estimator/mimd_rate_control.cc',        # Because of duplicate definitions of static consts against aimd_rate_control.cc
+    'trunk/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc', # Because of duplicate definitions of static consts against remote_bitrate_estimator_abs_send_time.cc
     'trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.mm',       # Because of name clash in the nsAutoreleasePool class
     'trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm',  # Because of name clash in the nsAutoreleasePool class
     'trunk/webrtc/modules/video_capture/windows/device_info_ds.cc',              # Because of the MEDIASUBTYPE_HDYC variable
     'trunk/webrtc/modules/video_capture/windows/help_functions_ds.cc',           # Because of initguid.h
     'trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc',              # Because of the MEDIASUBTYPE_HDYC variable and initguid.h
+    'trunk/webrtc/video_engine/overuse_frame_detector.cc',                       # Because of name clash with call_stats.cc on kWeightFactor
 ]
 
 GYP_DIRS += ['trunk']
 
 GYP_DIRS['trunk'].input = 'trunk/peerconnection.gyp'
 GYP_DIRS['trunk'].variables = gyp_vars
 # We allow warnings for third-party code that can be updated from upstream.
 GYP_DIRS['trunk'].sandbox_vars['ALLOW_COMPILER_WARNINGS'] = True
--- a/media/webrtc/signaling/signaling.gyp
+++ b/media/webrtc/signaling/signaling.gyp
@@ -324,16 +324,17 @@
           ],
         }],
         ['OS=="win"', {
           'include_dirs': [
           ],
           'defines': [
             'OS_WIN',
             'SIP_OS_WINDOWS',
+            'WEBRTC_WIN',
             'WIN32',
             'GIPS_VER=3480',
             'SIPCC_BUILD',
             'HAVE_WINSOCK2_H'
           ],
 
           'cflags_mozilla': [
           ],
--- a/media/webrtc/signaling/src/common/NullTransport.h
+++ b/media/webrtc/signaling/src/common/NullTransport.h
@@ -13,23 +13,23 @@
 namespace mozilla {
 
 /**
  * NullTransport is registered as ExternalTransport to throw away data
  */
 class NullTransport : public webrtc::Transport
 {
 public:
-  virtual int SendPacket(int channel, const void *data, int len)
+  virtual int SendPacket(int channel, const void *data, size_t len)
   {
     (void) channel; (void) data;
     return len;
   }
 
-  virtual int SendRTCPPacket(int channel, const void *data, int len)
+  virtual int SendRTCPPacket(int channel, const void *data, size_t len)
   {
     (void) channel; (void) data;
     return len;
   }
 
   NullTransport() {}
 
   virtual ~NullTransport() {}
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -218,22 +218,20 @@ bool WebrtcAudioConduit::GetRTCPSenderRe
  * WebRTCAudioConduit Implementation
  */
 MediaConduitErrorCode WebrtcAudioConduit::Init()
 {
   CSFLogDebug(logTag,  "%s this=%p", __FUNCTION__, this);
 
 #ifdef MOZ_WIDGET_ANDROID
     jobject context = jsjni_GetGlobalContextRef();
-
     // get the JVM
     JavaVM *jvm = jsjni_GetVM();
-    JNIEnv* jenv = jsjni_GetJNIForThread();
 
-    if (webrtc::VoiceEngine::SetAndroidObjects(jvm, jenv, (void*)context) != 0) {
+    if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) {
       CSFLogError(logTag, "%s Unable to set Android objects", __FUNCTION__);
       return kMediaConduitSessionNotInited;
     }
 #endif
 
   // Per WebRTC APIs below function calls return nullptr on failure
   if(!(mVoiceEngine = webrtc::VoiceEngine::Create()))
   {
@@ -834,17 +832,17 @@ WebrtcAudioConduit::StartReceiving()
     mEngineReceiving = true;
   }
 
   return kMediaConduitNoError;
 }
 
 //WebRTC::RTP Callback Implementation
 // Called on AudioGUM or MSG thread
-int WebrtcAudioConduit::SendPacket(int channel, const void* data, int len)
+int WebrtcAudioConduit::SendPacket(int channel, const void* data, size_t len)
 {
   CSFLogDebug(logTag,  "%s : channel %d", __FUNCTION__, channel);
 
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
   if (MOZ_LOG_TEST(GetLatencyLog(), LogLevel::Debug)) {
     if (mProcessing.Length() > 0) {
       TimeStamp started = mProcessing[0].mTimeStamp;
       mProcessing.RemoveElementAt(0);
@@ -863,22 +861,22 @@ int WebrtcAudioConduit::SendPacket(int c
     return len;
   } else {
     CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__);
     return -1;
   }
 }
 
 // Called on WebRTC Process thread and perhaps others
-int WebrtcAudioConduit::SendRTCPPacket(int channel, const void* data, int len)
+int WebrtcAudioConduit::SendRTCPPacket(int channel, const void* data, size_t len)
 {
-  CSFLogDebug(logTag,  "%s : channel %d , len %d, first rtcp = %u ",
+  CSFLogDebug(logTag,  "%s : channel %d , len %lu, first rtcp = %u ",
               __FUNCTION__,
               channel,
-              len,
+              (unsigned long) len,
               static_cast<unsigned>(((uint8_t *) data)[1]));
 
   // We come here if we have only one pipeline/conduit setup,
   // such as for unidirectional streams.
   // We also end up here if we are receiving
   ReentrantMonitorAutoEnter enter(mTransportMonitor);
   if(mReceiverTransport &&
      mReceiverTransport->SendRtcpPacket(data, len) == NS_OK)
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -145,23 +145,23 @@ public:
                                               int32_t capture_delay,
                                               int& lengthSamples) override;
 
 
   /**
    * Webrtc transport implementation to send and receive RTP packet.
    * AudioConduit registers itself as ExternalTransport to the VoiceEngine
    */
-  virtual int SendPacket(int channel, const void *data, int len) override;
+  virtual int SendPacket(int channel, const void *data, size_t len) override;
 
   /**
    * Webrtc transport implementation to send and receive RTCP packet.
    * AudioConduit registers itself as ExternalTransport to the VoiceEngine
    */
-  virtual int SendRTCPPacket(int channel, const void *data, int len) override;
+  virtual int SendRTCPPacket(int channel, const void *data, size_t len) override;
 
 
   virtual uint64_t CodecPluginID() override { return 0; }
 
   WebrtcAudioConduit():
                       mVoiceEngine(nullptr),
                       mTransportMonitor("WebrtcAudioConduit"),
                       mTransmitterTransport(nullptr),
--- a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
@@ -11,16 +11,19 @@
 #include "mozilla/RefPtr.h"
 #include "CodecConfig.h"
 #include "VideoTypes.h"
 #include "MediaConduitErrors.h"
 
 #include "ImageContainer.h"
 
 #include "webrtc/common_types.h"
+namespace webrtc {
+class I420VideoFrame;
+}
 
 #include <vector>
 
 namespace mozilla {
 /**
  * Abstract Interface for transporting RTP packets - audio/vidoeo
  * The consumers of this interface are responsible for passing in
  * the RTPfied media packets
@@ -101,17 +104,24 @@ public:
    * of the frame if needed for time longer than scope of this callback.
    * Such implementations should be quick in processing the frames and return
    * immediately.
    * On the other hand, if decoded video frame is passed through handle, the
    * implementations should keep a reference to the (ref-counted) image object
    * inside until it's no longer needed.
    */
   virtual void RenderVideoFrame(const unsigned char* buffer,
-                                unsigned int buffer_size,
+                                size_t buffer_size,
+                                uint32_t time_stamp,
+                                int64_t render_time,
+                                const ImageHandle& handle) = 0;
+  virtual void RenderVideoFrame(const unsigned char* buffer,
+                                size_t buffer_size,
+                                uint32_t y_stride,
+                                uint32_t cbcr_stride,
                                 uint32_t time_stamp,
                                 int64_t render_time,
                                 const ImageHandle& handle) = 0;
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoRenderer)
 };
 
 
@@ -295,16 +305,17 @@ public:
    *       This ensures the inserted video-frames can be transmitted by the conduit
    */
   virtual MediaConduitErrorCode SendVideoFrame(unsigned char* video_frame,
                                                unsigned int video_frame_length,
                                                unsigned short width,
                                                unsigned short height,
                                                VideoType video_type,
                                                uint64_t capture_time) = 0;
+  virtual MediaConduitErrorCode SendVideoFrame(webrtc::I420VideoFrame& frame) = 0;
 
   virtual MediaConduitErrorCode ConfigureCodecMode(webrtc::VideoCodecMode) = 0;
   /**
    * Function to configure send codec for the video session
    * @param sendSessionConfig: CodecConfiguration
    * @result: On Success, the video engine is configured with passed in codec for send
    *          On failure, video engine transmit functionality is disabled.
    * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
@@ -9,19 +9,21 @@
 #include "VideoConduit.h"
 #include "AudioConduit.h"
 #include "nsThreadUtils.h"
 #include "LoadManager.h"
 #include "YuvStamper.h"
 #include "nsServiceManagerUtils.h"
 #include "nsIPrefService.h"
 #include "nsIPrefBranch.h"
+#include "mozilla/media/MediaUtils.h"
 
 #include "webrtc/common_types.h"
 #include "webrtc/common_video/interface/native_handle.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
 #include "webrtc/video_engine/include/vie_errors.h"
 #include "browser_logging/WebRtcLog.h"
 
 #ifdef MOZ_WIDGET_ANDROID
 #include "AndroidJNIWrapper.h"
 #endif
 
 // for ntohs
@@ -70,99 +72,42 @@ WebrtcVideoConduit::WebrtcVideoConduit()
   mReceiverTransport(nullptr),
   mRenderer(nullptr),
   mPtrExtCapture(nullptr),
   mEngineTransmitting(false),
   mEngineReceiving(false),
   mChannel(-1),
   mCapId(-1),
   mCodecMutex("VideoConduit codec db"),
+  mInReconfig(false),
+  mLastWidth(0), // forces a check for reconfig at start
+  mLastHeight(0),
   mSendingWidth(0),
   mSendingHeight(0),
   mReceivingWidth(640),
   mReceivingHeight(480),
   mSendingFramerate(DEFAULT_VIDEO_MAX_FRAMERATE),
   mLastFramerateTenths(DEFAULT_VIDEO_MAX_FRAMERATE*10),
   mNumReceivingStreams(1),
   mVideoLatencyTestEnable(false),
   mVideoLatencyAvg(0),
   mMinBitrate(200),
   mStartBitrate(300),
   mMaxBitrate(2000),
   mCodecMode(webrtc::kRealtimeVideo)
-{
-}
+{}
 
 WebrtcVideoConduit::~WebrtcVideoConduit()
 {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
   CSFLogDebug(logTag,  "%s ", __FUNCTION__);
 
-  for(std::vector<VideoCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++)
-  {
-    delete mRecvCodecList[i];
-  }
-
-  // The first one of a pair to be deleted shuts down media for both
-  //Deal with External Capturer
-  if(mPtrViECapture)
-  {
-    mPtrViECapture->DisconnectCaptureDevice(mCapId);
-    mPtrViECapture->ReleaseCaptureDevice(mCapId);
-    mPtrExtCapture = nullptr;
-  }
-
-   if (mPtrExtCodec) {
-     mPtrExtCodec->Release();
-     mPtrExtCodec = NULL;
-   }
-
-  //Deal with External Renderer
-  if(mPtrViERender)
-  {
-    if(mRenderer) {
-      mPtrViERender->StopRender(mChannel);
-    }
-    mPtrViERender->RemoveRenderer(mChannel);
-  }
-
-  //Deal with the transport
-  if(mPtrViENetwork)
-  {
-    mPtrViENetwork->DeregisterSendTransport(mChannel);
-  }
-
-  if(mPtrViEBase)
-  {
-    mPtrViEBase->StopSend(mChannel);
-    mPtrViEBase->StopReceive(mChannel);
-    SyncTo(nullptr);
-    mPtrViEBase->DeleteChannel(mChannel);
-  }
-
-  // mVideoCodecStat has a back-ptr to mPtrViECodec that must be released first
-  if (mVideoCodecStat) {
-    mVideoCodecStat->EndOfCallStats();
-  }
-  mVideoCodecStat = nullptr;
-  // We can't delete the VideoEngine until all these are released!
-  // And we can't use a Scoped ptr, since the order is arbitrary
-  mPtrViEBase = nullptr;
-  mPtrViECapture = nullptr;
-  mPtrViECodec = nullptr;
-  mPtrViENetwork = nullptr;
-  mPtrViERender = nullptr;
-  mPtrRTP = nullptr;
-  mPtrExtCodec = nullptr;
-
-  // only one opener can call Delete.  Have it be the last to close.
-  if(mVideoEngine)
-  {
-    webrtc::VideoEngine::Delete(mVideoEngine);
-  }
+  // Release AudioConduit first by dropping reference on MainThread, where it expects to be
+  SyncTo(nullptr);
+  Destroy();
 }
 
 bool WebrtcVideoConduit::SetLocalSSRC(unsigned int ssrc)
 {
   unsigned int oldSsrc;
   if (!GetLocalSSRC(&oldSsrc)) {
     MOZ_ASSERT(false, "GetLocalSSRC failed");
     return false;
@@ -231,17 +176,17 @@ bool WebrtcVideoConduit::GetVideoEncoder
   double framerate = mLastFramerateTenths/10.0; // fetch once
   if (std::abs(*framerateMean - framerate)/framerate > 0.1 &&
       *framerateMean >= 0.5) {
     // unchanged resolution, but adjust bandwidth limits to match camera fps
     CSFLogDebug(logTag, "Encoder frame rate changed from %f to %f",
                 (mLastFramerateTenths/10.0), *framerateMean);
     MutexAutoLock lock(mCodecMutex);
     mLastFramerateTenths = *framerateMean * 10;
-    SelectSendResolution(mSendingWidth, mSendingHeight);
+    SelectSendResolution(mSendingWidth, mSendingHeight, nullptr);
   }
   return true;
 }
 
 bool WebrtcVideoConduit::GetVideoDecoderStats(double* framerateMean,
                                               double* framerateStdDev,
                                               double* bitrateMean,
                                               double* bitrateStdDev,
@@ -262,17 +207,17 @@ bool WebrtcVideoConduit::GetAVStats(int3
                                     int32_t* avSyncOffsetMs) {
   return false;
 }
 
 bool WebrtcVideoConduit::GetRTPStats(unsigned int* jitterMs,
                                      unsigned int* cumulativeLost) {
   unsigned short fractionLost;
   unsigned extendedMax;
-  int rttMs;
+  int64_t rttMs;
   // GetReceivedRTCPStatistics is a poorly named GetRTPStatistics variant
   return !mPtrRTP->GetReceivedRTCPStatistics(mChannel, fractionLost,
                                              *cumulativeLost,
                                              extendedMax,
                                              *jitterMs,
                                              rttMs);
 }
 
@@ -306,24 +251,19 @@ bool WebrtcVideoConduit::GetRTCPSenderRe
     *timestamp = NTPtoDOMHighResTimeStamp(senderInfo.NTP_timestamp_high,
                                           senderInfo.NTP_timestamp_low);
     *packetsSent = senderInfo.sender_packet_count;
     *bytesSent = senderInfo.sender_octet_count;
   }
   return result;
 }
 
-/**
- * Performs initialization of the MANDATORY components of the Video Engine
- */
 MediaConduitErrorCode
-WebrtcVideoConduit::Init()
+WebrtcVideoConduit::InitMain()
 {
-  CSFLogDebug(logTag,  "%s this=%p", __FUNCTION__, this);
-
 #if defined(MOZILLA_INTERNAL_API) && !defined(MOZILLA_XPCOMRT_API)
   // already know we must be on MainThread barring unit test weirdness
   MOZ_ASSERT(NS_IsMainThread());
 
   nsresult rv;
   nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv);
   if (!NS_WARN_IF(NS_FAILED(rv)))
   {
@@ -347,38 +287,54 @@ WebrtcVideoConduit::Init()
       }
       bool use_loadmanager = false;
       (void) NS_WARN_IF(NS_FAILED(branch->GetBoolPref("media.navigator.load_adapt", &use_loadmanager)));
       if (use_loadmanager) {
         mLoadManager = LoadManagerBuild();
       }
     }
   }
-#endif
 
+  EnableWebRtcLog();
 #ifdef MOZ_WIDGET_ANDROID
   // get the JVM
   JavaVM *jvm = jsjni_GetVM();
 
   if (webrtc::VideoEngine::SetAndroidObjects(jvm) != 0) {
     CSFLogError(logTag,  "%s: could not set Android objects", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 #endif
+#endif
+  return kMediaConduitNoError;
+}
+
+/**
+ * Performs initialization of the MANDATORY components of the Video Engine
+ */
+MediaConduitErrorCode
+WebrtcVideoConduit::Init()
+{
+  CSFLogDebug(logTag,  "%s this=%p", __FUNCTION__, this);
+  MediaConduitErrorCode result;
+  // Run code that must run on MainThread first
+  MOZ_ASSERT(NS_IsMainThread());
+  result = InitMain();
+  if (result != kMediaConduitNoError) {
+    return result;
+  }
 
   // Per WebRTC APIs below function calls return nullptr on failure
   mVideoEngine = webrtc::VideoEngine::Create();
   if(!mVideoEngine)
   {
     CSFLogError(logTag, "%s Unable to create video engine ", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
-  EnableWebRtcLog();
-
   if( !(mPtrViEBase = ViEBase::GetInterface(mVideoEngine)))
   {
     CSFLogError(logTag, "%s Unable to get video base interface ", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
   if( !(mPtrViECapture = ViECapture::GetInterface(mVideoEngine)))
   {
@@ -488,42 +444,111 @@ WebrtcVideoConduit::Init()
     mPtrViEBase->SetLoadManager(mLoadManager);
   }
 
   CSFLogError(logTag, "%s Initialization Done", __FUNCTION__);
   return kMediaConduitNoError;
 }
 
 void
+WebrtcVideoConduit::Destroy()
+{
+  for(std::vector<VideoCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++)
+  {
+    delete mRecvCodecList[i];
+  }
+
+  // The first one of a pair to be deleted shuts down media for both
+  //Deal with External Capturer
+  if(mPtrViECapture)
+  {
+    mPtrViECapture->DisconnectCaptureDevice(mCapId);
+    mPtrViECapture->ReleaseCaptureDevice(mCapId);
+    mPtrExtCapture = nullptr;
+  }
+
+   if (mPtrExtCodec) {
+     mPtrExtCodec->Release();
+     mPtrExtCodec = NULL;
+   }
+
+  //Deal with External Renderer
+  if(mPtrViERender)
+  {
+    if(mRenderer) {
+      mPtrViERender->StopRender(mChannel);
+    }
+    mPtrViERender->RemoveRenderer(mChannel);
+  }
+
+  //Deal with the transport
+  if(mPtrViENetwork)
+  {
+    mPtrViENetwork->DeregisterSendTransport(mChannel);
+  }
+
+  if(mPtrViEBase)
+  {
+    mPtrViEBase->StopSend(mChannel);
+    mPtrViEBase->StopReceive(mChannel);
+    mPtrViEBase->DeleteChannel(mChannel);
+  }
+
+  // mVideoCodecStat has a back-ptr to mPtrViECodec that must be released first
+  if (mVideoCodecStat) {
+    mVideoCodecStat->EndOfCallStats();
+  }
+  mVideoCodecStat = nullptr;
+  // We can't delete the VideoEngine until all these are released!
+  // And we can't use a Scoped ptr, since the order is arbitrary
+  mPtrViEBase = nullptr;
+  mPtrViECapture = nullptr;
+  mPtrViECodec = nullptr;
+  mPtrViENetwork = nullptr;
+  mPtrViERender = nullptr;
+  mPtrRTP = nullptr;
+  mPtrExtCodec = nullptr;
+
+  // only one opener can call Delete.  Have it be the last to close.
+  if(mVideoEngine)
+  {
+    webrtc::VideoEngine::Delete(mVideoEngine);
+  }
+}
+
+void
 WebrtcVideoConduit::SyncTo(WebrtcAudioConduit *aConduit)
 {
   CSFLogDebug(logTag, "%s Synced to %p", __FUNCTION__, aConduit);
 
   // SyncTo(value) syncs to the AudioConduit, and if already synced replaces
   // the current sync target.  SyncTo(nullptr) cancels any existing sync and
   // releases the strong ref to AudioConduit.
   if (aConduit) {
     mPtrViEBase->SetVoiceEngine(aConduit->GetVoiceEngine());
     mPtrViEBase->ConnectAudioChannel(mChannel, aConduit->GetChannel());
     // NOTE: this means the VideoConduit will keep the AudioConduit alive!
+  } else {
+    mPtrViEBase->DisconnectAudioChannel(mChannel);
+    mPtrViEBase->SetVoiceEngine(nullptr);
   }
 
   mSyncedTo = aConduit;
 }
 
 MediaConduitErrorCode
 WebrtcVideoConduit::AttachRenderer(RefPtr<VideoRenderer> aVideoRenderer)
 {
   CSFLogDebug(logTag,  "%s ", __FUNCTION__);
 
   //null renderer
   if(!aVideoRenderer)
   {
     CSFLogError(logTag, "%s NULL Renderer", __FUNCTION__);
-    MOZ_ASSERT(PR_FALSE);
+    MOZ_ASSERT(false);
     return kMediaConduitInvalidRenderer;
   }
 
   // This function is called only from main, so we only need to protect against
   // modifying mRenderer while any webrtc.org code is trying to use it.
   bool wasRendering;
   {
     ReentrantMonitorAutoEnter enter(mTransportMonitor);
@@ -890,17 +915,17 @@ WebrtcVideoConduit::ConfigureRecvMediaCo
       break;
     case webrtc::kViEKeyFrameRequestPliRtcp:
       mFrameRequestMethod = FrameRequestPli;
       break;
     case webrtc::kViEKeyFrameRequestFirRtcp:
       mFrameRequestMethod = FrameRequestFir;
       break;
     default:
-      MOZ_ASSERT(PR_FALSE);
+      MOZ_ASSERT(false);
       mFrameRequestMethod = FrameRequestUnknown;
   }
 
   if(use_nack_basic)
   {
     CSFLogDebug(logTag, "Enabling NACK (recv) for video stream\n");
     if (mPtrRTP->SetNACKStatus(mChannel, true) != 0)
     {
@@ -927,20 +952,21 @@ WebrtcVideoConduit::ConfigureRecvMediaCo
   }
 
   // by now we should be successfully started the reception
   mPtrRTP->SetRembStatus(mChannel, false, true);
   DumpCodecDB();
   return kMediaConduitNoError;
 }
 
-void
-WebrtcVideoConduit::SelectBandwidth(webrtc::VideoCodec& vie_codec,
-                                    unsigned short width,
-                                    unsigned short height)
+static void
+SelectBandwidth(webrtc::VideoCodec& vie_codec,
+                unsigned short width,
+                unsigned short height,
+                mozilla::Atomic<int32_t, mozilla::Relaxed>& aLastFramerateTenths)
 {
   // max bandwidth should be proportional (not linearly!) to resolution, and
   // proportional (perhaps linearly, or close) to current frame rate.
   unsigned int fs, mb_width, mb_height;
 
   mb_width = (width + 15) >> 4;
   mb_height = (height + 15) >> 4;
   fs = mb_width * mb_height;
@@ -979,40 +1005,44 @@ WebrtcVideoConduit::SelectBandwidth(webr
     vie_codec.maxBitrate = 500;
   } else {
     // QCIF and below
     vie_codec.minBitrate = 40;
     vie_codec.maxBitrate = 250;
   }
 
   // mLastFramerateTenths is an atomic, and scaled by *10
-  double framerate = std::min((mLastFramerateTenths/10.),60.0);
+  double framerate = std::min((aLastFramerateTenths/10.),60.0);
   MOZ_ASSERT(framerate > 0);
   // Now linear reduction/increase based on fps (max 60fps i.e. doubling)
   if (framerate >= 10) {
     vie_codec.minBitrate = vie_codec.minBitrate * (framerate/30);
     vie_codec.maxBitrate = vie_codec.maxBitrate * (framerate/30);
   } else {
     // At low framerates, don't reduce bandwidth as much - cut slope to 1/2.
     // Mostly this would be ultra-low-light situations/mobile or screensharing.
     vie_codec.minBitrate = vie_codec.minBitrate * ((10-(framerate/2))/30);
     vie_codec.maxBitrate = vie_codec.maxBitrate * ((10-(framerate/2))/30);
   }
 }
 
 // XXX we need to figure out how to feed back changes in preferred capture
-// resolution to the getUserMedia source
-// Invoked under lock of mCodecMutex!
+// resolution to the getUserMedia source.
+// Returns boolean if we've submitted an async change (and took ownership
+// of *frame's data)
 bool
 WebrtcVideoConduit::SelectSendResolution(unsigned short width,
-                                         unsigned short height)
+                                         unsigned short height,
+                                         webrtc::I420VideoFrame *frame) // may be null
 {
   mCodecMutex.AssertCurrentThreadOwns();
   // XXX This will do bandwidth-resolution adaptation as well - bug 877954
 
+  mLastWidth = width;
+  mLastHeight = height;
   // Limit resolution to max-fs while keeping same aspect ratio as the
   // incoming image.
   if (mCurSendCodecConfig && mCurSendCodecConfig->mMaxFrameSize)
   {
     unsigned int cur_fs, max_width, max_height, mb_width, mb_height, mb_max;
 
     mb_width = (width + 15) >> 4;
     mb_height = (height + 15) >> 4;
@@ -1089,46 +1119,103 @@ WebrtcVideoConduit::SelectSendResolution
   // uses mSendingWidth/Height
   unsigned int framerate = SelectSendFrameRate(mSendingFramerate);
   if (mSendingFramerate != framerate) {
     mSendingFramerate = framerate;
     changed = true;
   }
 
   if (changed) {
-    // Get current vie codec.
-    webrtc::VideoCodec vie_codec;
-    int32_t err;
+    // On a resolution change, bounce this to the correct thread to
+    // re-configure (same as used for Init().  Do *not* block the calling
+    // thread since that may be the MSG thread.
+
+    // MUST run on the same thread as Init()/etc
+    if (!NS_IsMainThread()) {
+      // Note: on *initial* config (first frame), best would be to drop
+      // frames until the config is done, then encode the most recent frame
+      // provided and continue from there.  We don't do this, but we do drop
+      // all frames while in the process of a reconfig and then encode the
+      // frame that started the reconfig, which is close.  There may be
+      // barely perceptible glitch in the video due to the dropped frame(s).
+      mInReconfig = true;
+
+      // We can't pass a UniquePtr<> or unique_ptr<> to a lambda directly
+      webrtc::I420VideoFrame *new_frame = nullptr;
+      if (frame) {
+        new_frame = new webrtc::I420VideoFrame();
+        // the internal buffer pointer is refcounted, so we don't have 2 copies here
+        new_frame->ShallowCopy(*frame);
+      }
+      RefPtr<WebrtcVideoConduit> self(this);
+      RefPtr<nsRunnable> webrtc_runnable =
+        media::NewRunnableFrom([self, width, height, new_frame]() -> nsresult {
+            UniquePtr<webrtc::I420VideoFrame> local_frame(new_frame); // Simplify cleanup
 
-    if ((err = mPtrViECodec->GetSendCodec(mChannel, vie_codec)) != 0)
-    {
-      CSFLogError(logTag, "%s: GetSendCodec failed, err %d", __FUNCTION__, err);
-      return false;
+            MutexAutoLock lock(self->mCodecMutex);
+            return self->ReconfigureSendCodec(width, height, new_frame);
+          });
+      // new_frame now owned by lambda
+      CSFLogDebug(logTag, "%s: proxying lambda to WebRTC thread for reconfig (width %u/%u, height %u/%u",
+                  __FUNCTION__, width, mLastWidth, height, mLastHeight);
+      NS_DispatchToMainThread(webrtc_runnable.forget());
+      if (new_frame) {
+        return true; // queued it
+      }
+    } else {
+      // already on the right thread
+      ReconfigureSendCodec(width, height, frame);
     }
-    // Likely spurious unless there was some error, but rarely checked
-    if (vie_codec.width != width || vie_codec.height != height ||
-        vie_codec.maxFramerate != mSendingFramerate)
+  }
+  return false;
+}
+
+nsresult
+WebrtcVideoConduit::ReconfigureSendCodec(unsigned short width,
+                                         unsigned short height,
+                                         webrtc::I420VideoFrame *frame)
+{
+  mCodecMutex.AssertCurrentThreadOwns();
+
+  // Get current vie codec.
+  webrtc::VideoCodec vie_codec;
+  int32_t err;
+
+  mInReconfig = false;
+  if ((err = mPtrViECodec->GetSendCodec(mChannel, vie_codec)) != 0)
+  {
+    CSFLogError(logTag, "%s: GetSendCodec failed, err %d", __FUNCTION__, err);
+    return NS_ERROR_FAILURE;
+  }
+  // Likely spurious unless there was some error, but rarely checked
+  if (vie_codec.width != width || vie_codec.height != height ||
+      vie_codec.maxFramerate != mSendingFramerate)
+  {
+    vie_codec.width = width;
+    vie_codec.height = height;
+    vie_codec.maxFramerate = mSendingFramerate;
+    SelectBandwidth(vie_codec, width, height, mLastFramerateTenths);
+
+    if ((err = mPtrViECodec->SetSendCodec(mChannel, vie_codec)) != 0)
     {
-      vie_codec.width = width;
-      vie_codec.height = height;
-      vie_codec.maxFramerate = mSendingFramerate;
-      SelectBandwidth(vie_codec, width, height);
-
-      if ((err = mPtrViECodec->SetSendCodec(mChannel, vie_codec)) != 0)
-      {
-        CSFLogError(logTag, "%s: SetSendCodec(%ux%u) failed, err %d",
-                    __FUNCTION__, width, height, err);
-        return false;
-      }
-      CSFLogDebug(logTag, "%s: Encoder resolution changed to %ux%u @ %ufps, bitrate %u:%u",
-                  __FUNCTION__, width, height, mSendingFramerate,
-                  vie_codec.minBitrate, vie_codec.maxBitrate);
-    } // else no change; mSendingWidth likely was 0
+      CSFLogError(logTag, "%s: SetSendCodec(%ux%u) failed, err %d",
+                  __FUNCTION__, width, height, err);
+      return NS_ERROR_FAILURE;
+    }
+    CSFLogDebug(logTag, "%s: Encoder resolution changed to %ux%u @ %ufps, bitrate %u:%u",
+                __FUNCTION__, width, height, mSendingFramerate,
+                vie_codec.minBitrate, vie_codec.maxBitrate);
+  } // else no change; mSendingWidth likely was 0
+  if (frame) {
+    // XXX I really don't like doing this from MainThread...
+    mPtrExtCapture->IncomingFrame(*frame);
+    mVideoCodecStat->SentFrame();
+    CSFLogDebug(logTag, "%s Inserted a frame from reconfig lambda", __FUNCTION__);
   }
-  return true;
+  return NS_OK;
 }
 
 // Invoked under lock of mCodecMutex!
 unsigned int
 WebrtcVideoConduit::SelectSendFrameRate(unsigned int framerate) const
 {
   mCodecMutex.AssertCurrentThreadOwns();
   unsigned int new_framerate = framerate;
@@ -1153,30 +1240,32 @@ WebrtcVideoConduit::SelectSendFrameRate(
     }
   }
   return new_framerate;
 }
 
 MediaConduitErrorCode
 WebrtcVideoConduit::SetExternalSendCodec(VideoCodecConfig* config,
                                          VideoEncoder* encoder) {
+  NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
   if (!mPtrExtCodec->RegisterExternalSendCodec(mChannel,
                                               config->mType,
                                               static_cast<WebrtcVideoEncoder*>(encoder),
                                               false)) {
     mExternalSendCodecHandle = encoder;
     mExternalSendCodec = new VideoCodecConfig(*config);
     return kMediaConduitNoError;
   }
   return kMediaConduitInvalidSendCodec;
 }
 
 MediaConduitErrorCode
 WebrtcVideoConduit::SetExternalRecvCodec(VideoCodecConfig* config,
                                          VideoDecoder* decoder) {
+  NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
   if (!mPtrExtCodec->RegisterExternalReceiveCodec(mChannel,
                                                   config->mType,
                                                   static_cast<WebrtcVideoDecoder*>(decoder))) {
     mExternalRecvCodecHandle = decoder;
     mExternalRecvCodec = new VideoCodecConfig(*config);
     return kMediaConduitNoError;
   }
   return kMediaConduitInvalidReceiveCodec;
@@ -1185,62 +1274,65 @@ WebrtcVideoConduit::SetExternalRecvCodec
 MediaConduitErrorCode
 WebrtcVideoConduit::SendVideoFrame(unsigned char* video_frame,
                                    unsigned int video_frame_length,
                                    unsigned short width,
                                    unsigned short height,
                                    VideoType video_type,
                                    uint64_t capture_time)
 {
-  CSFLogDebug(logTag,  "%s ", __FUNCTION__);
 
   //check for  the parameters sanity
   if(!video_frame || video_frame_length == 0 ||
      width == 0 || height == 0)
   {
     CSFLogError(logTag,  "%s Invalid Parameters ",__FUNCTION__);
-    MOZ_ASSERT(PR_FALSE);
+    MOZ_ASSERT(false);
     return kMediaConduitMalformedArgument;
   }
-
-  // NOTE: update when common_types.h changes
-  if (video_type > kVideoBGRA) {
-    CSFLogError(logTag,  "%s VideoType %d Invalid", __FUNCTION__, video_type);
-    MOZ_ASSERT(PR_FALSE);
-    return kMediaConduitMalformedArgument;
-  }
-  // RawVideoType == VideoType
-  webrtc::RawVideoType type = static_cast<webrtc::RawVideoType>((int)video_type);
+  MOZ_ASSERT(video_type == VideoType::kVideoI420);
+  MOZ_ASSERT(mPtrExtCapture);
 
   // Transmission should be enabled before we insert any frames.
   if(!mEngineTransmitting)
   {
     CSFLogError(logTag, "%s Engine not transmitting ", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
+  // insert the frame to video engine in I420 format only
+  webrtc::I420VideoFrame i420_frame;
+  i420_frame.CreateFrame(video_frame, width, height, webrtc::kVideoRotation_0);
+  i420_frame.set_timestamp(capture_time);
+  i420_frame.set_render_time_ms(capture_time);
+
+  return SendVideoFrame(i420_frame);
+}
+
+MediaConduitErrorCode
+WebrtcVideoConduit::SendVideoFrame(webrtc::I420VideoFrame& frame)
+{
+  CSFLogDebug(logTag,  "%s ", __FUNCTION__);
+  // See if we need to recalculate what we're sending.
+  // Don't compate mSendingWidth/Height, since those may not be the same as the input.
   {
     MutexAutoLock lock(mCodecMutex);
-    if (!SelectSendResolution(width, height))
-    {
-      return kMediaConduitCaptureError;
+    if (mInReconfig) {
+      // Waiting for it to finish
+      return kMediaConduitNoError;
+    }
+    if (frame.width() != mLastWidth || frame.height() != mLastHeight) {
+      if (SelectSendResolution(frame.width(), frame.height(), &frame)) {
+        // SelectSendResolution took ownership of the data in i420_frame.
+        // Submit the frame after reconfig is done
+        return kMediaConduitNoError;
+      }
     }
   }
-  // insert the frame to video engine in I420 format only
-  MOZ_ASSERT(mPtrExtCapture);
-  if(mPtrExtCapture->IncomingFrame(video_frame,
-                                   video_frame_length,
-                                   width, height,
-                                   type,
-                                   (unsigned long long)capture_time) == -1)
-  {
-    CSFLogError(logTag,  "%s IncomingFrame Failed %d ", __FUNCTION__,
-                                            mPtrViEBase->LastError());
-    return kMediaConduitCaptureError;
-  }
+  mPtrExtCapture->IncomingFrame(frame);
 
   mVideoCodecStat->SentFrame();
   CSFLogDebug(logTag, "%s Inserted a frame", __FUNCTION__);
   return kMediaConduitNoError;
 }
 
 // Transport Layer Callbacks
 MediaConduitErrorCode
@@ -1325,16 +1417,17 @@ WebrtcVideoConduit::StartTransmitting()
   }
 
   return kMediaConduitNoError;
 }
 
 MediaConduitErrorCode
 WebrtcVideoConduit::StopReceiving()
 {
+  NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
   // Are we receiving already? If so, stop receiving and playout
   // since we can't apply new recv codec when the engine is playing.
   if(mEngineReceiving)
   {
     CSFLogDebug(logTag, "%s Engine Already Receiving . Attemping to Stop ", __FUNCTION__);
     if(mPtrViEBase->StopReceive(mChannel) == -1)
     {
       int error = mPtrViEBase->LastError();
@@ -1370,36 +1463,36 @@ WebrtcVideoConduit::StartReceiving()
     mEngineReceiving = true;
   }
 
   return kMediaConduitNoError;
 }
 
 //WebRTC::RTP Callback Implementation
 // Called on MSG thread
-int WebrtcVideoConduit::SendPacket(int channel, const void* data, int len)
+int WebrtcVideoConduit::SendPacket(int channel, const void* data, size_t len)
 {
-  CSFLogDebug(logTag,  "%s : channel %d len %d", __FUNCTION__, channel, len);
+  CSFLogDebug(logTag,  "%s : channel %d len %lu", __FUNCTION__, channel, (unsigned long) len);
 
   ReentrantMonitorAutoEnter enter(mTransportMonitor);
   if(mTransmitterTransport &&
      (mTransmitterTransport->SendRtpPacket(data, len) == NS_OK))
   {
     CSFLogDebug(logTag, "%s Sent RTP Packet ", __FUNCTION__);
     return len;
   } else {
     CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__);
     return -1;
   }
 }
 
 // Called from multiple threads including webrtc Process thread
-int WebrtcVideoConduit::SendRTCPPacket(int channel, const void* data, int len)
+int WebrtcVideoConduit::SendRTCPPacket(int channel, const void* data, size_t len)
 {
-  CSFLogDebug(logTag,  "%s : channel %d , len %d ", __FUNCTION__, channel,len);
+  CSFLogDebug(logTag,  "%s : channel %d , len %lu ", __FUNCTION__, channel, (unsigned long) len);
 
   // We come here if we have only one pipeline/conduit setup,
   // such as for unidirectional streams.
   // We also end up here if we are receiving
   ReentrantMonitorAutoEnter enter(mTransportMonitor);
   if(mReceiverTransport &&
      mReceiverTransport->SendRtcpPacket(data, len) == NS_OK)
   {
@@ -1437,23 +1530,37 @@ WebrtcVideoConduit::FrameSizeChange(unsi
   }
 
   CSFLogError(logTag,  "%s Renderer is NULL ", __FUNCTION__);
   return -1;
 }
 
 int
 WebrtcVideoConduit::DeliverFrame(unsigned char* buffer,
-                                 int buffer_size,
+                                 size_t buffer_size,
                                  uint32_t time_stamp,
                                  int64_t ntp_time_ms,
                                  int64_t render_time,
                                  void *handle)
 {
-  CSFLogDebug(logTag,  "%s Buffer Size %d", __FUNCTION__, buffer_size);
+  return DeliverFrame(buffer, buffer_size, mReceivingWidth, (mReceivingWidth+1)>>1,
+                      time_stamp, ntp_time_ms, render_time, handle);
+}
+
+int
+WebrtcVideoConduit::DeliverFrame(unsigned char* buffer,
+                                 size_t buffer_size,
+                                 uint32_t y_stride,
+                                 uint32_t cbcr_stride,
+                                 uint32_t time_stamp,
+                                 int64_t ntp_time_ms,
+                                 int64_t render_time,
+                                 void *handle)
+{
+  CSFLogDebug(logTag,  "%s Buffer Size %lu", __FUNCTION__, (unsigned long) buffer_size);
 
   ReentrantMonitorAutoEnter enter(mTransportMonitor);
   if(mRenderer)
   {
     layers::Image* img = nullptr;
     // |handle| should be a webrtc::NativeHandle if available.
     if (handle) {
       webrtc::NativeHandle* native_h = static_cast<webrtc::NativeHandle*>(handle);
@@ -1469,18 +1576,71 @@ WebrtcVideoConduit::DeliverFrame(unsigne
                                    reinterpret_cast<unsigned char*>(&timestamp),
                                    sizeof(timestamp), 0, 0);
       if (ok) {
         VideoLatencyUpdate(now - timestamp);
       }
     }
 
     const ImageHandle img_h(img);
-    mRenderer->RenderVideoFrame(buffer, buffer_size, time_stamp, render_time,
-                                img_h);
+    mRenderer->RenderVideoFrame(buffer, buffer_size, y_stride, cbcr_stride,
+                                time_stamp, render_time, img_h);
+    return 0;
+  }
+
+  CSFLogError(logTag,  "%s Renderer is NULL  ", __FUNCTION__);
+  return -1;
+}
+
+int
+WebrtcVideoConduit::DeliverI420Frame(const webrtc::I420VideoFrame& webrtc_frame)
+{
+  if (!webrtc_frame.native_handle()) {
+    uint32_t y_stride = webrtc_frame.stride(static_cast<webrtc::PlaneType>(0));
+    return DeliverFrame(const_cast<uint8_t*>(webrtc_frame.buffer(webrtc::kYPlane)),
+                        CalcBufferSize(webrtc::kI420, y_stride, webrtc_frame.height()),
+                        y_stride,
+                        webrtc_frame.stride(static_cast<webrtc::PlaneType>(1)),
+                        webrtc_frame.timestamp(),
+                        webrtc_frame.ntp_time_ms(),
+                        webrtc_frame.render_time_ms(), nullptr);
+  }
+  size_t buffer_size = CalcBufferSize(webrtc::kI420, webrtc_frame.width(), webrtc_frame.height());
+  CSFLogDebug(logTag,  "%s Buffer Size %lu", __FUNCTION__, (unsigned long) buffer_size);
+
+  ReentrantMonitorAutoEnter enter(mTransportMonitor);
+  if(mRenderer)
+  {
+    layers::Image* img = nullptr;
+    // |handle| should be a webrtc::NativeHandle if available.
+    webrtc::NativeHandle* native_h = static_cast<webrtc::NativeHandle*>(webrtc_frame.native_handle());
+    if (native_h) {
+      // In the handle, there should be a layers::Image.
+      img = static_cast<layers::Image*>(native_h->GetHandle());
+    }
+
+#if 0
+    //#ifndef MOZ_WEBRTC_OMX
+    // XXX - this may not be possible on GONK with textures!
+    if (mVideoLatencyTestEnable && mReceivingWidth && mReceivingHeight) {
+      uint64_t now = PR_Now();
+      uint64_t timestamp = 0;
+      bool ok = YuvStamper::Decode(mReceivingWidth, mReceivingHeight, mReceivingWidth,
+                                   buffer,
+                                   reinterpret_cast<unsigned char*>(&timestamp),
+                                   sizeof(timestamp), 0, 0);
+      if (ok) {
+        VideoLatencyUpdate(now - timestamp);
+      }
+    }
+#endif
+
+    const ImageHandle img_h(img);
+    mRenderer->RenderVideoFrame(nullptr, buffer_size, webrtc_frame.timestamp(),
+                                webrtc_frame.render_time_ms(), img_h);
     return 0;
   }
 
   CSFLogError(logTag,  "%s Renderer is NULL  ", __FUNCTION__);
   return -1;
 }
 
 /**
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.h
@@ -3,22 +3,24 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef VIDEO_SESSION_H_
 #define VIDEO_SESSION_H_
 
 #include "nsAutoPtr.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/Atomics.h"
+#include "mozilla/SharedThreadPool.h"
 
 #include "MediaConduitInterface.h"
 #include "MediaEngineWrapper.h"
 #include "CodecStatistics.h"
 #include "LoadManagerFactory.h"
 #include "LoadManager.h"
+#include "runnable_utils.h"
 
 // conflicts with #include of scoped_ptr.h
 #undef FF
 // Video Engine Includes
 #include "webrtc/common_types.h"
 #ifdef FF
 #undef FF // Avoid name collision between scoped_ptr.h and nsCRTGlue.h.
 #endif
@@ -41,34 +43,35 @@
  using  webrtc::ViECapture;
  using  webrtc::ViERender;
  using  webrtc::ViEExternalCapture;
  using  webrtc::ViEExternalCodec;
 
 namespace mozilla {
 
 class WebrtcAudioConduit;
+class nsThread;
 
 // Interface of external video encoder for WebRTC.
 class WebrtcVideoEncoder:public VideoEncoder
                          ,public webrtc::VideoEncoder
 {};
 
 // Interface of external video decoder for WebRTC.
 class WebrtcVideoDecoder:public VideoDecoder
                          ,public webrtc::VideoDecoder
 {};
 
 /**
  * Concrete class for Video session. Hooks up
  *  - media-source and target to external transport
  */
-class WebrtcVideoConduit:public VideoSessionConduit
-                         ,public webrtc::Transport
-                         ,public webrtc::ExternalRenderer
+class WebrtcVideoConduit : public VideoSessionConduit
+                         , public webrtc::Transport
+                         , public webrtc::ExternalRenderer
 {
 public:
   //VoiceEngine defined constant for Payload Name Size.
   static const unsigned int CODEC_PLNAME_SIZE;
 
   /**
    * Set up A/V sync between this (incoming) VideoConduit and an audio conduit.
    */
@@ -130,27 +133,35 @@ public:
   /**
    * Register Transport for this Conduit. RTP and RTCP frames from the VideoEngine
    * shall be passed to the registered transport for transporting externally.
    */
   virtual MediaConduitErrorCode SetTransmitterTransport(RefPtr<TransportInterface> aTransport) override;
 
   virtual MediaConduitErrorCode SetReceiverTransport(RefPtr<TransportInterface> aTransport) override;
 
-  void SelectBandwidth(webrtc::VideoCodec& vie_codec,
-                       unsigned short width,
-                       unsigned short height);
   /**
    * Function to select and change the encoding resolution based on incoming frame size
    * and current available bandwidth.
    * @param width, height: dimensions of the frame
-   * @param force: force setting the codec config if framerate may require a bandwidth change
+   * @param frame: optional frame to submit for encoding after reconfig
    */
   bool SelectSendResolution(unsigned short width,
-                            unsigned short height);
+                            unsigned short height,
+                            webrtc::I420VideoFrame *frame);
+
+  /**
+   * Function to reconfigure the current send codec for a different
+   * width/height/framerate/etc.
+   * @param width, height: dimensions of the frame
+   * @param frame: optional frame to submit for encoding after reconfig
+   */
+  nsresult ReconfigureSendCodec(unsigned short width,
+                                unsigned short height,
+                                webrtc::I420VideoFrame *frame);
 
   /**
    * Function to select and change the encoding frame rate based on incoming frame rate
    * and max-mbps setting.
    * @param current framerate
    * @result new framerate
    */
   unsigned int SelectSendFrameRate(unsigned int framerate) const;
@@ -167,16 +178,17 @@ public:
    *       This ensures the inserted video-frames can be transmitted by the conduit
    */
   virtual MediaConduitErrorCode SendVideoFrame(unsigned char* video_frame,
                                                 unsigned int video_frame_length,
                                                 unsigned short width,
                                                 unsigned short height,
                                                 VideoType video_type,
                                                 uint64_t capture_time) override;
+  virtual MediaConduitErrorCode SendVideoFrame(webrtc::I420VideoFrame& frame) override;
 
   /**
    * Set an external encoder object |encoder| to the payload type |pltype|
    * for sender side codec.
    */
   virtual MediaConduitErrorCode SetExternalSendCodec(VideoCodecConfig* config,
                                                      VideoEncoder* encoder) override;
 
@@ -187,34 +199,39 @@ public:
   virtual MediaConduitErrorCode SetExternalRecvCodec(VideoCodecConfig* config,
                                                      VideoDecoder* decoder) override;
 
 
   /**
    * Webrtc transport implementation to send and receive RTP packet.
    * VideoConduit registers itself as ExternalTransport to the VideoEngine
    */
-  virtual int SendPacket(int channel, const void *data, int len) override;
+  virtual int SendPacket(int channel, const void *data, size_t len) override;
 
   /**
    * Webrtc transport implementation to send and receive RTCP packet.
    * VideoConduit registers itself as ExternalTransport to the VideoEngine
    */
-  virtual int SendRTCPPacket(int channel, const void *data, int len) override;
+  virtual int SendRTCPPacket(int channel, const void *data, size_t len) override;
 
 
   /**
    * Webrtc External Renderer Implementation APIs.
    * Raw I420 Frames are delivred to the VideoConduit by the VideoEngine
    */
   virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int) override;
 
-  virtual int DeliverFrame(unsigned char*, int, uint32_t , int64_t,
+  virtual int DeliverFrame(unsigned char*, size_t, uint32_t , int64_t,
                            int64_t, void *handle) override;
 
+  virtual int DeliverFrame(unsigned char*, size_t, uint32_t, uint32_t, uint32_t , int64_t,
+                           int64_t, void *handle);
+
+  virtual int DeliverI420Frame(const webrtc::I420VideoFrame& webrtc_frame) override;
+
   /**
    * Does DeliverFrame() support a null buffer and non-null handle
    * (video texture)?
    * B2G support it (when using HW video decoder with graphic buffer output).
    * XXX Investigate!  Especially for Android
    */
   virtual bool IsTextureSupported() override {
 #ifdef WEBRTC_GONK
@@ -246,17 +263,19 @@ public:
       return mCurSendCodecConfig->mMaxFrameRate;
     }
     return 0;
   }
 
   WebrtcVideoConduit();
   virtual ~WebrtcVideoConduit();
 
-  MediaConduitErrorCode Init();
+  MediaConduitErrorCode InitMain();
+  virtual MediaConduitErrorCode Init();
+  virtual void Destroy();
 
   int GetChannel() { return mChannel; }
   webrtc::VideoEngine* GetVideoEngine() { return mVideoEngine; }
   bool GetLocalSSRC(unsigned int* ssrc) override;
   bool SetLocalSSRC(unsigned int ssrc) override;
   bool GetRemoteSSRC(unsigned int* ssrc) override;
   bool SetLocalCNAME(const char* cname) override;
   bool GetVideoEncoderStats(double* framerateMean,
@@ -280,19 +299,34 @@ public:
                              uint32_t* cumulativeLost,
                              int32_t* rttMs) override;
   bool GetRTCPSenderReport(DOMHighResTimeStamp* timestamp,
                            unsigned int* packetsSent,
                            uint64_t* bytesSent) override;
   uint64_t MozVideoLatencyAvg();
 
 private:
+  DISALLOW_COPY_AND_ASSIGN(WebrtcVideoConduit);
 
-  WebrtcVideoConduit(const WebrtcVideoConduit& other) = delete;
-  void operator=(const WebrtcVideoConduit& other) = delete;
+  static inline bool OnThread(nsIEventTarget *thread)
+  {
+    bool on;
+    nsresult rv;
+    rv = thread->IsOnCurrentThread(&on);
+
+    // If the target thread has already shut down, we don't want to assert.
+    if (rv != NS_ERROR_NOT_INITIALIZED) {
+      MOZ_ASSERT(NS_SUCCEEDED(rv));
+    }
+
+    if (NS_WARN_IF(NS_FAILED(rv))) {
+      return false;
+    }
+    return on;
+  }
 
   //Local database of currently applied receive codecs
   typedef std::vector<VideoCodecConfig* > RecvCodecList;
 
   //Function to convert between WebRTC and Conduit codec structures
   void CodecConfigToWebRTCCodec(const VideoCodecConfig* codecInfo,
                                 webrtc::VideoCodec& cinst);
 
@@ -335,17 +369,20 @@ private:
   mozilla::Atomic<bool> mEngineReceiving;    // if true ==> Receive Sus-sysmtem up and running
 
   int mChannel; // Video Channel for this conduit
   int mCapId;   // Capturer for this conduit
   RecvCodecList    mRecvCodecList;
 
   Mutex mCodecMutex; // protects mCurrSendCodecConfig
   nsAutoPtr<VideoCodecConfig> mCurSendCodecConfig;
+  bool mInReconfig;
 
+  unsigned short mLastWidth;
+  unsigned short mLastHeight;
   unsigned short mSendingWidth;
   unsigned short mSendingHeight;
   unsigned short mReceivingWidth;
   unsigned short mReceivingHeight;
   unsigned int   mSendingFramerate;
   // scaled by *10 because Atomic<double/float> isn't supported
   mozilla::Atomic<int32_t, mozilla::Relaxed> mLastFramerateTenths;
   unsigned short mNumReceivingStreams;
@@ -367,12 +404,11 @@ private:
   nsAutoPtr<VideoDecoder> mExternalRecvCodecHandle;
 
   // statistics object for video codec;
   nsAutoPtr<VideoCodecStatistics> mVideoCodecStat;
 
   nsAutoPtr<LoadManager> mLoadManager;
   webrtc::VideoCodecMode mCodecMode;
 };
-
 } // end namespace
 
 #endif
--- a/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp
@@ -481,17 +481,19 @@ WebrtcGmpVideoEncoder::SetChannelParamet
 {
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
 int32_t
 WebrtcGmpVideoEncoder::SetRates(uint32_t aNewBitRate, uint32_t aFrameRate)
 {
   MOZ_ASSERT(mGMPThread);
-  MOZ_ASSERT(!NS_IsMainThread());
+  if (aFrameRate == 0) {
+    aFrameRate = 30; // Assume 30fps if we don't know the rate
+  }
   mGMPThread->Dispatch(WrapRunnableNM(&WebrtcGmpVideoEncoder::SetRates_g,
                                       RefPtr<WebrtcGmpVideoEncoder>(this),
                                       aNewBitRate,
                                       aFrameRate),
                        NS_DISPATCH_NORMAL);
 
   return WEBRTC_VIDEO_CODEC_OK;
 }
@@ -921,21 +923,18 @@ WebrtcGmpVideoDecoder::Terminated()
 }
 
 void
 WebrtcGmpVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame)
 {
   MutexAutoLock lock(mCallbackMutex);
   if (mCallback) {
     webrtc::I420VideoFrame image;
-    int ret = image.CreateFrame(aDecodedFrame->AllocatedSize(kGMPYPlane),
-                                aDecodedFrame->Buffer(kGMPYPlane),
-                                aDecodedFrame->AllocatedSize(kGMPUPlane),
+    int ret = image.CreateFrame(aDecodedFrame->Buffer(kGMPYPlane),
                                 aDecodedFrame->Buffer(kGMPUPlane),
-                                aDecodedFrame->AllocatedSize(kGMPVPlane),
                                 aDecodedFrame->Buffer(kGMPVPlane),
                                 aDecodedFrame->Width(),
                                 aDecodedFrame->Height(),
                                 aDecodedFrame->Stride(kGMPYPlane),
                                 aDecodedFrame->Stride(kGMPUPlane),
                                 aDecodedFrame->Stride(kGMPVPlane));
     if (ret != 0) {
       return;
--- a/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.h
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.h
@@ -302,17 +302,17 @@ class WebrtcVideoEncoderProxy : public W
 
     const uint64_t PluginID() override
     {
       return mEncoderImpl->PluginID();
     }
 
     int32_t InitEncode(const webrtc::VideoCodec* aCodecSettings,
                        int32_t aNumberOfCores,
-                       uint32_t aMaxPayloadSize) override
+                       size_t aMaxPayloadSize) override
     {
       return mEncoderImpl->InitEncode(aCodecSettings,
                                       aNumberOfCores,
                                       aMaxPayloadSize);
     }
 
     int32_t Encode(
         const webrtc::I420VideoFrame& aInputImage,
@@ -331,17 +331,17 @@ class WebrtcVideoEncoderProxy : public W
     }
 
     int32_t Release() override
     {
       return mEncoderImpl->ReleaseGmp();
     }
 
     int32_t SetChannelParameters(uint32_t aPacketLoss,
-                                 int aRTT) override
+                                 int64_t aRTT) override
     {
       return mEncoderImpl->SetChannelParameters(aPacketLoss, aRTT);
     }
 
     int32_t SetRates(uint32_t aNewBitRate,
                      uint32_t aFrameRate) override
     {
       return mEncoderImpl->SetRates(aNewBitRate, aFrameRate);
--- a/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.cpp
@@ -834,17 +834,17 @@ int32_t WebrtcMediaCodecVP8VideoEncoder:
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
 WebrtcMediaCodecVP8VideoEncoder::~WebrtcMediaCodecVP8VideoEncoder() {
   CSFLogDebug(logTag,  "%s ", __FUNCTION__);
   Release();
 }
 
-int32_t WebrtcMediaCodecVP8VideoEncoder::SetChannelParameters(uint32_t packetLoss, int rtt) {
+int32_t WebrtcMediaCodecVP8VideoEncoder::SetChannelParameters(uint32_t packetLoss, int64_t rtt) {
   CSFLogDebug(logTag,  "%s ", __FUNCTION__);
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
 int32_t WebrtcMediaCodecVP8VideoEncoder::SetRates(uint32_t newBitRate, uint32_t frameRate) {
   CSFLogDebug(logTag,  "%s ", __FUNCTION__);
   if (!mMediaCodecEncoder) {
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
--- a/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcMediaCodecVP8VideoCodec.h
@@ -40,17 +40,17 @@ public:
   virtual int32_t Encode(const webrtc::I420VideoFrame& inputImage,
                           const webrtc::CodecSpecificInfo* codecSpecificInfo,
                           const std::vector<webrtc::VideoFrameType>* frame_types) override;
 
   virtual int32_t RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* callback) override;
 
   virtual int32_t Release() override;
 
-  virtual int32_t SetChannelParameters(uint32_t packetLoss, int rtt) override;
+  virtual int32_t SetChannelParameters(uint32_t packetLoss, int64_t rtt) override;
 
   virtual int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override;
 
 private:
   int32_t VerifyAndAllocate(const uint32_t minimumSize);
   bool ResetInputBuffers();
   bool ResetOutputBuffers();
 
--- a/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.cpp
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.cpp
@@ -16,17 +16,17 @@
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <OMX_Component.h>
 using namespace android;
 
 // WebRTC
-#include "webrtc/common_video/interface/texture_video_frame.h"
+//#include "webrtc/common_video/interface/texture_video_frame.h"
 #include "webrtc/video_engine/include/vie_external_codec.h"
 #include "runnable_utils.h"
 
 // Gecko
 #if defined(MOZ_WIDGET_GONK) && ANDROID_VERSION >= 21
 #include "GonkBufferQueueProducer.h"
 #endif
 #include "GonkNativeWindow.h"
@@ -167,19 +167,22 @@ public:
 
     mInputFrames.push(aFrame);
     // Notify Run() about queued input and it can start working.
     lock.NotifyAll();
   }
 
   NS_IMETHODIMP Run() override
   {
+    MonitorAutoLock lock(mMonitor);
+    if (mEnding) {
+      return NS_OK;
+    }
     MOZ_ASSERT(mThread);
 
-    MonitorAutoLock lock(mMonitor);
     while (true) {
       if (mInputFrames.empty()) {
         // Wait for new input.
         lock.Wait();
       }
 
       if (mEnding) {
         CODEC_LOGD("OMXOutputDrain Run() ending");
@@ -541,22 +544,22 @@ public:
       renderTimeMs = decoded.mRenderTimeMs;
       mDecodedFrames.pop();
     }
     MOZ_ASSERT(timestamp >= 0 && renderTimeMs >= 0);
 
     CODEC_LOGD("Decoder NewFrame: %dx%d, timestamp %lld, renderTimeMs %lld",
                picSize.width, picSize.height, timestamp, renderTimeMs);
 
-    nsAutoPtr<webrtc::I420VideoFrame> videoFrame(
-      new webrtc::TextureVideoFrame(new ImageNativeHandle(grallocImage.forget()),
-                                    picSize.width,
-                                    picSize.height,
-                                    timestamp,
-                                    renderTimeMs));
+    nsAutoPtr<webrtc::I420VideoFrame> videoFrame(new webrtc::I420VideoFrame(
+      new ImageNativeHandle(grallocImage.forget()),
+      grallocData.mPicSize.width,
+      grallocData.mPicSize.height,
+      timestamp,
+      renderTimeMs));
     if (videoFrame != nullptr) {
       mCallback->Decoded(*videoFrame);
     }
   }
 
 private:
   class OutputDrain : public OMXOutputDrain
   {
@@ -1069,19 +1072,19 @@ WebrtcOMXH264VideoEncoder::~WebrtcOMXH26
 }
 
 // Inform the encoder of the new packet loss rate and the round-trip time of
 // the network. aPacketLossRate is fraction lost and can be 0~255
 // (255 means 100% lost).
 // Note: stagefright doesn't handle these parameters.
 int32_t
 WebrtcOMXH264VideoEncoder::SetChannelParameters(uint32_t aPacketLossRate,
-                                                int aRoundTripTimeMs)
+                                                int64_t aRoundTripTimeMs)
 {
-  CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p set channel packet loss:%u, rtt:%d",
+  CODEC_LOGD("WebrtcOMXH264VideoEncoder:%p set channel packet loss:%u, rtt:%" PRIi64,
              this, aPacketLossRate, aRoundTripTimeMs);
 
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
 // TODO: Bug 997567. Find the way to support frame rate change.
 int32_t
 WebrtcOMXH264VideoEncoder::SetRates(uint32_t aBitRateKbps, uint32_t aFrameRate)
--- a/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.h
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcOMXH264VideoCodec.h
@@ -46,17 +46,17 @@ public:
                          const webrtc::CodecSpecificInfo* aCodecSpecificInfo,
                          const std::vector<webrtc::VideoFrameType>* aFrameTypes) override;
 
   virtual int32_t RegisterEncodeCompleteCallback(webrtc::EncodedImageCallback* aCallback) override;
 
   virtual int32_t Release() override;
 
   virtual int32_t SetChannelParameters(uint32_t aPacketLossRate,
-                                       int aRoundTripTimeMs) override;
+                                       int64_t aRoundTripTimeMs) override;
 
   virtual int32_t SetRates(uint32_t aBitRate, uint32_t aFrameRate) override;
 
 private:
   nsAutoPtr<android::OMXVideoEncoder> mOMX;
   android::sp<android::OMXCodecReservation> mReservation;
 
   webrtc::EncodedImageCallback* mCallback;
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -42,16 +42,21 @@
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
 #include "mozilla/PeerIdentity.h"
 #endif
 #include "mozilla/gfx/Point.h"
 #include "mozilla/gfx/Types.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/UniquePtrExtensions.h"
 
+#include "webrtc/common_types.h"
+#include "webrtc/common_video/interface/native_handle.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/video_engine/include/vie_errors.h"
+
 #include "logging.h"
 
 // Should come from MediaEngineWebRTC.h, but that's a pain to include here
 #define DEFAULT_SAMPLE_RATE 32000
 
 using namespace mozilla;
 using namespace mozilla::gfx;
 using namespace mozilla::layers;
@@ -1096,21 +1101,49 @@ void MediaPipelineTransmit::PipelineList
         MOZ_MTLOG(ML_ERROR, "Un-handled GRALLOC buffer type:" << pixelFormat);
         MOZ_CRASH();
     }
     void *basePtr;
     graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &basePtr);
     uint32_t width = graphicBuffer->getWidth();
     uint32_t height = graphicBuffer->getHeight();
     // XXX gralloc buffer's width and stride could be different depends on implementations.
-    conduit->SendVideoFrame(static_cast<unsigned char*>(basePtr),
-                            I420SIZE(width, height),
-                            width,
-                            height,
-                            destFormat, 0);
+
+    if (destFormat != mozilla::kVideoI420) {
+      unsigned char *video_frame = static_cast<unsigned char*>(basePtr);
+      webrtc::I420VideoFrame i420_frame;
+      int stride_y = width;
+      int stride_uv = (width + 1) / 2;
+      int target_width = width;
+      int target_height = height;
+      if (i420_frame.CreateEmptyFrame(target_width,
+                                      abs(target_height),
+                                      stride_y,
+                                      stride_uv, stride_uv) < 0) {
+        MOZ_ASSERT(false, "Can't allocate empty i420frame");
+        return;
+      }
+      webrtc::VideoType commonVideoType =
+        webrtc::RawVideoTypeToCommonVideoVideoType(
+          static_cast<webrtc::RawVideoType>((int)destFormat));
+      if (ConvertToI420(commonVideoType, video_frame, 0, 0, width, height,
+                        I420SIZE(width, height), webrtc::kVideoRotation_0,
+                        &i420_frame)) {
+        MOZ_ASSERT(false, "Can't convert video type for sending to I420");
+        return;
+      }
+      i420_frame.set_ntp_time_ms(0);
+      conduit->SendVideoFrame(i420_frame);
+    } else {
+      conduit->SendVideoFrame(static_cast<unsigned char*>(basePtr),
+                              I420SIZE(width, height),
+                              width,
+                              height,
+                              destFormat, 0);
+    }
     graphicBuffer->unlock();
     return;
   } else
 #endif
   if (format == ImageFormat::PLANAR_YCBCR) {
     // Cast away constness b/c some of the accessors are non-const
     PlanarYCbCrImage* yuv = const_cast<PlanarYCbCrImage *>(
         static_cast<const PlanarYCbCrImage *>(img));
@@ -1459,17 +1492,29 @@ MediaPipelineReceiveVideo::PipelineListe
     monitor_("Video PipelineListener") {
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
   image_container_ = LayerManager::CreateImageContainer();
 #endif
 }
 
 void MediaPipelineReceiveVideo::PipelineListener::RenderVideoFrame(
     const unsigned char* buffer,
-    unsigned int buffer_size,
+    size_t buffer_size,
+    uint32_t time_stamp,
+    int64_t render_time,
+    const RefPtr<Image>& video_image) {
+  RenderVideoFrame(buffer, buffer_size, width_, (width_ + 1) >> 1,
+                   time_stamp, render_time, video_image);
+}
+
+void MediaPipelineReceiveVideo::PipelineListener::RenderVideoFrame(
+    const unsigned char* buffer,
+    size_t buffer_size,
+    uint32_t y_stride,
+    uint32_t cbcr_stride,
     uint32_t time_stamp,
     int64_t render_time,
     const RefPtr<Image>& video_image) {
 
 #ifdef MOZILLA_INTERNAL_API
   ReentrantMonitorAutoEnter enter(monitor_);
 #endif // MOZILLA_INTERNAL_API
 
@@ -1484,22 +1529,22 @@ void MediaPipelineReceiveVideo::Pipeline
     RefPtr<PlanarYCbCrImage> yuvImage = new GrallocImage();
 #else
     RefPtr<PlanarYCbCrImage> yuvImage = image_container_->CreatePlanarYCbCrImage();
 #endif
     uint8_t* frame = const_cast<uint8_t*>(static_cast<const uint8_t*> (buffer));
 
     PlanarYCbCrData yuvData;
     yuvData.mYChannel = frame;
-    yuvData.mYSize = IntSize(width_, height_);
-    yuvData.mYStride = width_;
-    yuvData.mCbCrStride = (width_ + 1) >> 1;
+    yuvData.mYSize = IntSize(y_stride, height_);
+    yuvData.mYStride = y_stride;
+    yuvData.mCbCrStride = cbcr_stride;
     yuvData.mCbChannel = frame + height_ * yuvData.mYStride;
     yuvData.mCrChannel = yuvData.mCbChannel + ((height_ + 1) >> 1) * yuvData.mCbCrStride;
-    yuvData.mCbCrSize = IntSize((width_ + 1) >> 1, (height_ + 1) >> 1);
+    yuvData.mCbCrSize = IntSize(yuvData.mCbCrStride, (height_ + 1) >> 1);
     yuvData.mPicX = 0;
     yuvData.mPicY = 0;
     yuvData.mPicSize = IntSize(width_, height_);
     yuvData.mStereoMode = StereoMode::MONO;
 
     if (!yuvImage->SetData(yuvData)) {
       MOZ_ASSERT(false);
       return;
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
@@ -696,27 +696,40 @@ class MediaPipelineReceiveVideo : public
     explicit PipelineRenderer(MediaPipelineReceiveVideo *pipeline) :
       pipeline_(pipeline) {}
 
     void Detach() { pipeline_ = nullptr; }
 
     // Implement VideoRenderer
     virtual void FrameSizeChange(unsigned int width,
                                  unsigned int height,
-                                 unsigned int number_of_streams) {
+                                 unsigned int number_of_streams) override {
       pipeline_->listener_->FrameSizeChange(width, height, number_of_streams);
     }
 
     virtual void RenderVideoFrame(const unsigned char* buffer,
-                                  unsigned int buffer_size,
+                                  size_t buffer_size,
                                   uint32_t time_stamp,
                                   int64_t render_time,
-                                  const ImageHandle& handle) {
-      pipeline_->listener_->RenderVideoFrame(buffer, buffer_size, time_stamp,
-                                             render_time,
+                                  const ImageHandle& handle) override {
+      pipeline_->listener_->RenderVideoFrame(buffer, buffer_size,
+                                             time_stamp, render_time,
+                                             handle.GetImage());
+    }
+
+    virtual void RenderVideoFrame(const unsigned char* buffer,
+                                  size_t buffer_size,
+                                  uint32_t y_stride,
+                                  uint32_t cbcr_stride,
+                                  uint32_t time_stamp,
+                                  int64_t render_time,
+                                  const ImageHandle& handle) override {
+      pipeline_->listener_->RenderVideoFrame(buffer, buffer_size,
+                                             y_stride, cbcr_stride,
+                                             time_stamp, render_time,
                                              handle.GetImage());
     }
 
    private:
     MediaPipelineReceiveVideo *pipeline_;  // Raw pointer to avoid cycles
   };
 
   // Separate class to allow ref counting
@@ -740,17 +753,24 @@ class MediaPipelineReceiveVideo : public
                          unsigned int number_of_streams) {
       ReentrantMonitorAutoEnter enter(monitor_);
 
       width_ = width;
       height_ = height;
     }
 
     void RenderVideoFrame(const unsigned char* buffer,
-                          unsigned int buffer_size,
+                          size_t buffer_size,
+                          uint32_t time_stamp,
+                          int64_t render_time,
+                          const RefPtr<layers::Image>& video_image);
+    void RenderVideoFrame(const unsigned char* buffer,
+                          size_t buffer_size,
+                          uint32_t y_stride,
+                          uint32_t cbcr_stride,
                           uint32_t time_stamp,
                           int64_t render_time,
                           const RefPtr<layers::Image>& video_image);
 
    private:
     int width_;
     int height_;
 #if defined(MOZILLA_XPCOMRT_API)
--- a/media/webrtc/signaling/test/mediaconduit_unittests.cpp
+++ b/media/webrtc/signaling/test/mediaconduit_unittests.cpp
@@ -373,32 +373,43 @@ public:
   }
 
   virtual ~DummyVideoTarget()
   {
   }
 
 
   void RenderVideoFrame(const unsigned char* buffer,
-                        unsigned int buffer_size,
+                        size_t buffer_size,
+                        uint32_t y_stride,
+                        uint32_t cbcr_stride,
                         uint32_t time_stamp,
                         int64_t render_time,
-                        const mozilla::ImageHandle& handle)
+                        const mozilla::ImageHandle& handle) override
+  {
+    RenderVideoFrame(buffer, buffer_size, time_stamp, render_time, handle);
+  }
+
+  void RenderVideoFrame(const unsigned char* buffer,
+                        size_t buffer_size,
+                        uint32_t time_stamp,
+                        int64_t render_time,
+                        const mozilla::ImageHandle& handle) override
  {
   //write the frame to the file
   if(VerifyFrame(buffer, buffer_size) == 0)
   {
       vidStatsGlobal.numFramesRenderedSuccessfully++;
   } else
   {
       vidStatsGlobal.numFramesRenderedWrongly++;
   }
  }
 
- void FrameSizeChange(unsigned int, unsigned int, unsigned int)
+ void FrameSizeChange(unsigned int, unsigned int, unsigned int) override
  {
     //do nothing
  }
 
  //This is hardcoded to check if the contents of frame is COLOR
  // as we set while sending.
  int VerifyFrame(const unsigned char* buffer, unsigned int buffer_size)
  {
--- a/media/webrtc/signaling/test/mediapipeline_unittest.cpp
+++ b/media/webrtc/signaling/test/mediapipeline_unittest.cpp
@@ -391,20 +391,25 @@ class MediaPipelineTest : public ::testi
     // make any sense.
     ASSERT_FALSE(!aIsRtcpMux && bundle);
 
     p2_.SetBundleFilter(initialFilter);
 
     // Setup transport flows
     InitTransports(aIsRtcpMux);
 
+#if 0 //DEBUG(pkerr)
     mozilla::SyncRunnable::DispatchToThread(
       test_utils->sts_target(),
       WrapRunnable(&p1_, &TestAgent::CreatePipelines_s, aIsRtcpMux));
-
+#else
+    NS_DispatchToMainThread(
+      WrapRunnable(&p1_, &TestAgent::CreatePipelines_s, aIsRtcpMux),
+      NS_DISPATCH_SYNC);
+#endif
     mozilla::SyncRunnable::DispatchToThread(
       test_utils->sts_target(),
       WrapRunnable(&p2_, &TestAgent::CreatePipelines_s, aIsRtcpMux));
 
     p2_.Start();
     p1_.Start();
 
     if (bundle) {
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/build/android/cpufeatures.gypi
@@ -0,0 +1,6 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#dummy gypi: contents should be discarded due to an enclosing 'conditions:' element.
+{}
--- a/media/webrtc/trunk/peerconnection.gyp
+++ b/media/webrtc/trunk/peerconnection.gyp
@@ -31,17 +31,17 @@
 	     'dummy_file.txt',
           ],
           'message': 'Generating scream',
         }, ],
         'dependencies': [
           'webrtc/modules/modules.gyp:audio_device',
           'webrtc/modules/modules.gyp:video_capture_module',
 	  'webrtc/modules/modules.gyp:video_capture_module_internal_impl',
-          'webrtc/modules/modules.gyp:video_render_module_impl',
+          'webrtc/modules/modules.gyp:video_render',
 #          'webrtc/system_wrappers/source/system_wrappers.gyp:system_wrappers',
 #	  'webrtc/system_wrappers/source/system_wrappers.gyp:metrics_default',
           'webrtc/video_engine/video_engine.gyp:video_engine_core',
           'webrtc/voice_engine/voice_engine.gyp:voice_engine',
         ],
         'conditions': [
           ['OS!="android" and OS!="ios"', {
             'dependencies': [
--- a/media/webrtc/trunk/webrtc/base/base.gyp
+++ b/media/webrtc/trunk/webrtc/base/base.gyp
@@ -53,16 +53,30 @@
         'thread_annotations.h',
         'thread_checker.h',
         'thread_checker_impl.cc',
         'thread_checker_impl.h',
         'timeutils.cc',
         'timeutils.h',
         'trace_event.h',
       ],
+      'conditions': [
+        ['OS=="mac"', {
+          'sources': [
+            'macutils.cc',
+            'macutils.h',
+          ],
+        }],
+        ['OS=="win"', {
+          'sources': [
+            'win32.cc',
+            'win32.h',
+          ],
+        }],
+      ],
     },
     {
       'target_name': 'rtc_base',
       'type': 'static_library',
       'dependencies': [
         '<(webrtc_root)/common.gyp:webrtc_common',
         'rtc_base_approved',
       ],
@@ -94,24 +108,24 @@
         'autodetectproxy.h',
         'bandwidthsmoother.cc',
         'bandwidthsmoother.h',
         'base64.cc',
         'base64.h',
         'basicdefs.h',
         'basictypes.h',
         'bind.h',
-        'bind.h.pump',
+#        'bind.h.pump',
         'buffer.cc',
         'buffer.h',
         'bytebuffer.cc',
         'bytebuffer.h',
         'byteorder.h',
         'callback.h',
-        'callback.h.pump',
+#        'callback.h.pump',
         'constructormagic.h',
         'common.cc',
         'common.h',
         'cpumonitor.cc',
         'cpumonitor.h',
         'crc32.cc',
         'crc32.h',
         'criticalsection.cc',
@@ -151,19 +165,19 @@
         'ifaddrs-android.cc',
         'ifaddrs-android.h',
         'iosfilesystem.mm',
         'ipaddress.cc',
         'ipaddress.h',
         'json.cc',
         'json.h',
         'latebindingsymboltable.cc',
-        'latebindingsymboltable.cc.def',
+#        'latebindingsymboltable.cc.def',
         'latebindingsymboltable.h',
-        'latebindingsymboltable.h.def',
+#        'latebindingsymboltable.h.def',
         'libdbusglibsymboltable.cc',
         'libdbusglibsymboltable.h',
         'linux.cc',
         'linux.h',
         'linuxfdwalk.c',
         'linuxfdwalk.h',
         'linked_ptr.h',
         'logging.cc',
@@ -173,18 +187,19 @@
         'maccocoasocketserver.h',
         'maccocoasocketserver.mm',
         'maccocoathreadhelper.h',
         'maccocoathreadhelper.mm',
         'macconversion.cc',
         'macconversion.h',
         'macsocketserver.cc',
         'macsocketserver.h',
-        'macutils.cc',
-        'macutils.h',
+# moved by mozilla
+#        'macutils.cc',
+#        'macutils.h',
         'macwindowpicker.cc',
         'macwindowpicker.h',
         'mathutils.h',
         'messagedigest.cc',
         'messagedigest.h',
         'messagehandler.cc',
         'messagehandler.h',
         'messagequeue.cc',
@@ -292,18 +307,18 @@
         'unixfilesystem.cc',
         'unixfilesystem.h',
         'urlencode.cc',
         'urlencode.h',
         'versionparsing.cc',
         'versionparsing.h',
         'virtualsocketserver.cc',
         'virtualsocketserver.h',
-        'win32.cc',
-        'win32.h',
+#        'win32.cc',
+#        'win32.h',
         'win32filesystem.cc',
         'win32filesystem.h',
         'win32regkey.cc',
         'win32regkey.h',
         'win32securityerrors.cc',
         'win32socketinit.cc',
         'win32socketinit.h',
         'win32socketserver.cc',
@@ -360,37 +375,37 @@
             'asyncinvoker.cc',
             'asyncinvoker.h',
             'asyncinvoker-inl.h',
             'atomicops.h',
             'bandwidthsmoother.cc',
             'bandwidthsmoother.h',
             'basictypes.h',
             'bind.h',
-            'bind.h.pump',
+#            'bind.h.pump',
             'callback.h',
-            'callback.h.pump',
+#            'callback.h.pump',
             'constructormagic.h',
             'dbus.cc',
             'dbus.h',
             'diskcache_win32.cc',
             'diskcache_win32.h',
             'filelock.cc',
             'filelock.h',
             'fileutils_mock.h',
             'genericslot.h',
-            'genericslot.h.pump',
+#            'genericslot.h.pump',
             'httpserver.cc',
             'httpserver.h',
             'json.cc',
             'json.h',
             'latebindingsymboltable.cc',
-            'latebindingsymboltable.cc.def',
+#            'latebindingsymboltable.cc.def',
             'latebindingsymboltable.h',
-            'latebindingsymboltable.h.def',
+#            'latebindingsymboltable.h.def',
             'libdbusglibsymboltable.cc',
             'libdbusglibsymboltable.h',
             'linuxfdwalk.c',
             'linuxfdwalk.h',
             'x11windowpicker.cc',
             'x11windowpicker.h',
             'logging.cc',
             'logging.h',
--- a/media/webrtc/trunk/webrtc/base/base64.cc
+++ b/media/webrtc/trunk/webrtc/base/base64.cc
@@ -74,17 +74,18 @@ bool Base64::IsBase64Char(char ch) {
          (('0' <= ch) && (ch <= '9')) ||
          (ch == '+') || (ch == '/');
 }
 
 bool Base64::GetNextBase64Char(char ch, char* next_ch) {
   if (next_ch == NULL) {
     return false;
   }
-  const char* p = strchr(Base64Table, ch);
+  // Evil due to base/stringutils.h wanting non-standard &char for the second arg
+  const char* p = strchr(Base64Table, &ch);
   if (!p)
     return false;
   ++p;
   *next_ch = (*p) ? *p : Base64Table[0];
   return true;
 }
 
 bool Base64::IsBase64Encoded(const std::string& str) {
--- a/media/webrtc/trunk/webrtc/base/checks.cc
+++ b/media/webrtc/trunk/webrtc/base/checks.cc
@@ -11,17 +11,17 @@
 // Most of this was borrowed (with minor modifications) from V8's and Chromium's
 // src/base/logging.cc.
 
 // Use the C++ version to provide __GLIBCXX__.
 #include <cstdarg>
 #include <cstdio>
 #include <cstdlib>
 
-#if defined(__GLIBCXX__) && !defined(__UCLIBC__)
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
 #include <cxxabi.h>
 #include <execinfo.h>
 #endif
 
 #if defined(WEBRTC_ANDROID)
 #define LOG_TAG "rtc"
 #include <android/log.h>  // NOLINT
 #endif
@@ -50,17 +50,17 @@ void PrintError(const char* format, ...)
   VPrintError(format, args);
   va_end(args);
 }
 
 // TODO(ajm): This works on Mac (although the parsing fails) but I don't seem
 // to get usable symbols on Linux. This is copied from V8. Chromium has a more
 // advanced stace trace system; also more difficult to copy.
 void DumpBacktrace() {
-#if defined(__GLIBCXX__) && !defined(__UCLIBC__)
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
   void* trace[100];
   int size = backtrace(trace, sizeof(trace) / sizeof(*trace));
   char** symbols = backtrace_symbols(trace, size);
   PrintError("\n==== C stack trace ===============================\n\n");
   if (size == 0) {
     PrintError("(empty)\n");
   } else if (symbols == NULL) {
     PrintError("(no symbols)\n");
--- a/media/webrtc/trunk/webrtc/base/macutils.cc
+++ b/media/webrtc/trunk/webrtc/base/macutils.cc
@@ -5,17 +5,19 @@
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include <sstream>
 
+#ifndef WEBRTC_MOZILLA_BUILD
 #include "webrtc/base/common.h"
+#endif
 #include "webrtc/base/logging.h"
 #include "webrtc/base/macutils.h"
 #include "webrtc/base/scoped_ptr.h"
 #include "webrtc/base/stringutils.h"
 
 namespace rtc {
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -65,31 +67,37 @@ void DecodeFourChar(UInt32 fc, std::stri
   } else {
     ss.str("");
     ss << "0x" << std::hex << fc;
   }
   out->append(ss.str());
 }
 
 static bool GetGestalt(OSType ostype, int* value) {
+#ifndef WEBRTC_MOZILLA_BUILD
   ASSERT(NULL != value);
+#endif
   SInt32 native_value;
   OSStatus result = Gestalt(ostype, &native_value);
   if (noErr == result) {
     *value = native_value;
     return true;
   }
   std::string str;
   DecodeFourChar(ostype, &str);
+#ifndef WEBRTC_MOZILLA_BUILD
   LOG_E(LS_ERROR, OS, result) << "Gestalt(" << str << ")";
+#endif
   return false;
 }
 
 bool GetOSVersion(int* major, int* minor, int* bugfix) {
+#ifndef WEBRTC_MOZILLA_BUILD
   ASSERT(major && minor && bugfix);
+#endif
   if (!GetGestalt(gestaltSystemVersion, major)) {
     return false;
   }
   if (*major < 0x1040) {
     *bugfix = *major & 0xF;
     *minor = (*major >> 4) & 0xF;
     *major = (*major >> 8);
     return true;
@@ -136,16 +144,17 @@ bool GetQuickTimeVersion(std::string* ou
   }
 
   std::stringstream ss;
   ss << std::hex << ver;
   *out = ss.str();
   return true;
 }
 
+#ifndef WEBRTC_MOZILLA_BUILD
 bool RunAppleScript(const std::string& script) {
   // TODO(thaloun): Add a .mm file that contains something like this:
   // NSString source from script
   // NSAppleScript* appleScript = [[NSAppleScript alloc] initWithSource:&source]
   // if (appleScript != nil) {
   //   [appleScript executeAndReturnError:nil]
   //   [appleScript release]
 #ifndef CARBON_DEPRECATED
@@ -209,13 +218,15 @@ bool RunAppleScript(const std::string& s
   }
   CloseComponent(component);
   return true;
 #else
   // TODO(thaloun): Support applescripts with the NSAppleScript API.
   return false;
 #endif  // CARBON_DEPRECATED
 }
+#endif // !WEBRTC_MOZILLA
+
 #endif  // WEBRTC_MAC && !defined(WEBRTC_IOS)
 
 ///////////////////////////////////////////////////////////////////////////////
 
 }  // namespace rtc
--- a/media/webrtc/trunk/webrtc/base/macutils.h
+++ b/media/webrtc/trunk/webrtc/base/macutils.h
@@ -42,18 +42,20 @@ enum MacOSVersionName {
   kMacOSMavericks,     // 10.9
   kMacOSNewer,         // 10.10+
 };
 
 bool GetOSVersion(int* major, int* minor, int* bugfix);
 MacOSVersionName GetOSVersionName();
 bool GetQuickTimeVersion(std::string* version);
 
+#ifndef WEBRTC_MOZILLA_BUILD
 // Runs the given apple script. Only supports scripts that does not
 // require user interaction.
 bool RunAppleScript(const std::string& script);
 #endif
+#endif
 
 ///////////////////////////////////////////////////////////////////////////////
 
 }  // namespace rtc
 
 #endif  // WEBRTC_BASE_MACUTILS_H__
--- a/media/webrtc/trunk/webrtc/base/scoped_ptr.h
+++ b/media/webrtc/trunk/webrtc/base/scoped_ptr.h
@@ -104,16 +104,29 @@
 
 #include <algorithm>  // For std::swap().
 
 #include "webrtc/base/constructormagic.h"
 #include "webrtc/base/move.h"
 #include "webrtc/base/template_util.h"
 #include "webrtc/typedefs.h"
 
+// XXX This file creates unused typedefs as a way of doing static assertions,
+// both via COMPILE_ASSERT and via direct typedefs like
+// 'type_must_be_complete'. These trigger a GCC warning (enabled by -Wall in
+// GCC 4.8 and above) which we disable here, just for this file, for GCC > 4.8.
+// This can be removed if & when this file (and COMPILE_ASSERT) stops using
+// these typedefs.
+#if defined(__GNUC__)
+#if !defined(__clang__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-local-typedefs"
+#endif // not clang, and version >= 4.8
+#endif // GCC or clang
+
 namespace rtc {
 
 // Function object which deletes its parameter, which must be a pointer.
 // If C is an array type, invokes 'delete[]' on the parameter; otherwise,
 // invokes 'delete'. The default deleter for scoped_ptr<T>.
 template <class T>
 struct DefaultDeleter {
   DefaultDeleter() {}
@@ -618,9 +631,16 @@ bool operator!=(T* p1, const rtc::scoped
 // A function to convert T* into scoped_ptr<T>
 // Doing e.g. make_scoped_ptr(new FooBarBaz<type>(arg)) is a shorter notation
 // for scoped_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
 template <typename T>
 rtc::scoped_ptr<T> rtc_make_scoped_ptr(T* ptr) {
   return rtc::scoped_ptr<T>(ptr);
 }
 
+// Pop off 'ignored "-Wunused-local-typedefs"':
+#if defined(__GNUC__)
+#if !defined(__clang__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
+#pragma GCC diagnostic pop
+#endif // not clang, and version >= 4.8
+#endif // GCC or clang
+
 #endif  // #ifndef WEBRTC_BASE_SCOPED_PTR_H__
--- a/media/webrtc/trunk/webrtc/base/sigslot.h
+++ b/media/webrtc/trunk/webrtc/base/sigslot.h
@@ -81,16 +81,18 @@
 // has_slots<multi_threaded_local> and signal0<single_threaded>.
 // If has_slots is single threaded the user must ensure that it is not trying
 // to connect or disconnect to signalx concurrently or data race may occur.
 // If signalx is single threaded the user must ensure that disconnect, connect
 // or signal is not happening concurrently or data race may occur.
 
 #ifndef WEBRTC_BASE_SIGSLOT_H__
 #define WEBRTC_BASE_SIGSLOT_H__
+#ifndef TALK_BASE_SIGSLOT_H__
+#define TALK_BASE_SIGSLOT_H__
 
 #include <list>
 #include <set>
 #include <stdlib.h>
 
 // On our copy of sigslot.h, we set single threading as default.
 #define SIGSLOT_DEFAULT_MT_POLICY single_threaded
 
@@ -2796,9 +2798,10 @@ namespace sigslot {
 
 				it = itNext;
 			}
 		}
 	};
 
 }; // namespace sigslot
 
+#endif // TALK_BASE_SIGSLOT_H__
 #endif // WEBRTC_BASE_SIGSLOT_H__
--- a/media/webrtc/trunk/webrtc/base/stringutils.h
+++ b/media/webrtc/trunk/webrtc/base/stringutils.h
@@ -18,17 +18,17 @@
 
 #if defined(WEBRTC_WIN)
 #include <malloc.h>
 #include <wchar.h>
 #define alloca _alloca
 #endif  // WEBRTC_WIN 
 
 #if defined(WEBRTC_POSIX)
-#ifdef BSD
+#ifdef WEBRTC_BSD
 #include <stdlib.h>
 #else  // BSD
 #include <alloca.h>
 #endif  // !BSD
 #endif  // WEBRTC_POSIX
 
 #include <string>
 
--- a/media/webrtc/trunk/webrtc/base/thread_checker_impl.cc
+++ b/media/webrtc/trunk/webrtc/base/thread_checker_impl.cc
@@ -22,20 +22,34 @@ namespace rtc {
 
 PlatformThreadId CurrentThreadId() {
   PlatformThreadId ret;
 #if defined(WEBRTC_WIN)
   ret = GetCurrentThreadId();
 #elif defined(WEBRTC_POSIX)
 #if defined(WEBRTC_MAC) || defined(WEBRTC_IOS)
   ret = pthread_mach_thread_np(pthread_self());
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_GONK)
   ret =  syscall(__NR_gettid);
 #elif defined(WEBRTC_ANDROID)
   ret = gettid();
+#elif defined(__NetBSD__)
+  return _lwp_self();
+#elif defined(__DragonFly__)
+  return lwp_gettid();
+#elif defined(__OpenBSD__)
+  return reinterpret_cast<uintptr_t> (pthread_self());
+#elif defined(__FreeBSD__)
+#if __FreeBSD_version > 900030
+    return pthread_getthreadid_np();
+#else
+    long lwpid;
+    thr_self(&lwpid);
+    return lwpid;
+#endif
 #else
   // Default implementation for nacl and solaris.
   ret = reinterpret_cast<pid_t>(pthread_self());
 #endif
 #endif  // defined(WEBRTC_POSIX)
   DCHECK(ret);
   return ret;
 }
--- a/media/webrtc/trunk/webrtc/build/arm_neon.gypi
+++ b/media/webrtc/trunk/webrtc/build/arm_neon.gypi
@@ -18,17 +18,35 @@
 #   ],
 #   'includes': ['path/to/this/gypi/file'],
 # }
 
 {
   'cflags!': [
     '-mfpu=vfpv3-d16',
   ],
+  'cflags_mozilla!': [
+    '-mfpu=vfpv3-d16',
+  ],
+  'asflags!': [
+    '-mfpu=vfpv3-d16',
+  ],
+  'asflags_mozilla!': [
+    '-mfpu=vfpv3-d16',
+  ],
   'conditions': [
     # "-mfpu=neon" is not requried for arm64 in GCC.
     ['target_arch!="arm64"', {
       'cflags': [
         '-mfpu=neon',
-       ],
+      ],
+      'cflags_mozilla': [
+        '-mfpu=neon',
+      ],
+      'asflags': [
+        '-mfpu=neon',
+      ],
+      'asflags_mozilla': [
+        '-mfpu=neon',
+      ],
     }],
   ],
 }
--- a/media/webrtc/trunk/webrtc/build/common.gypi
+++ b/media/webrtc/trunk/webrtc/build/common.gypi
@@ -35,27 +35,41 @@
       },
       'build_with_chromium%': '<(build_with_chromium)',
       'build_with_libjingle%': '<(build_with_libjingle)',
       'webrtc_root%': '<(webrtc_root)',
       'apk_tests_path%': '<(apk_tests_path)',
       'modules_java_gyp_path%': '<(modules_java_gyp_path)',
       'webrtc_vp8_dir%': '<(webrtc_root)/modules/video_coding/codecs/vp8',
       'webrtc_vp9_dir%': '<(webrtc_root)/modules/video_coding/codecs/vp9',
+      'webrtc_h264_dir%': '<(webrtc_root)/modules/video_coding/codecs/h264',
+      'include_g711%': 1,
+      'include_g722%': 1,
+      'include_ilbc%': 1,
       'include_opus%': 1,
+      'include_isac%': 1,
+      'include_pcm16b%': 1,
       'opus_dir%': '<(DEPTH)/third_party/opus',
     },
     'build_with_chromium%': '<(build_with_chromium)',
     'build_with_libjingle%': '<(build_with_libjingle)',
     'webrtc_root%': '<(webrtc_root)',
     'apk_tests_path%': '<(apk_tests_path)',
     'modules_java_gyp_path%': '<(modules_java_gyp_path)',
     'webrtc_vp8_dir%': '<(webrtc_vp8_dir)',
     'webrtc_vp9_dir%': '<(webrtc_vp9_dir)',
+    'webrtc_h264_dir%': '<(webrtc_h264_dir)',
+
+    'include_g711%': '<(include_g711)',
+    'include_g722%': '<(include_g722)',
+    'include_ilbc%': '<(include_ilbc)',
     'include_opus%': '<(include_opus)',
+    'include_isac%': '<(include_isac)',
+    'include_pcm16b%': '<(include_pcm16b)',
+
     'rtc_relative_path%': 1,
     'external_libraries%': '0',
     'json_root%': '<(DEPTH)/third_party/jsoncpp/source/include/',
     # openssl needs to be defined or gyp will complain. Is is only used when
     # when providing external libraries so just use current directory as a
     # placeholder.
     'ssl_root%': '.',
 
@@ -123,48 +137,75 @@
       ['build_with_chromium==1', {
         # Exclude pulse audio on Chromium since its prerequisites don't require
         # pulse audio.
         'include_pulse_audio%': 0,
 
         # Exclude internal ADM since Chromium uses its own IO handling.
         'include_internal_audio_device%': 0,
 
+        'include_ndk_cpu_features%': 0,
       }, {  # Settings for the standalone (not-in-Chromium) build.
         # TODO(andrew): For now, disable the Chrome plugins, which causes a
         # flood of chromium-style warnings. Investigate enabling them:
         # http://code.google.com/p/webrtc/issues/detail?id=163
         'clang_use_chrome_plugins%': 0,
 
         'include_pulse_audio%': 1,
         'include_internal_audio_device%': 1,
+        'include_ndk_cpu_features%': 0,
       }],
       ['build_with_libjingle==1', {
         'include_tests%': 0,
         'restrict_webrtc_logging%': 1,
       }, {
         'include_tests%': 1,
         'restrict_webrtc_logging%': 0,
       }],
+      ['OS=="linux"', {
+        'include_alsa_audio%': 1,
+      }, {
+        'include_alsa_audio%': 0,
+      }],
+      ['OS=="openbsd"', {
+        'include_sndio_audio%': 1,
+      }, {
+        'include_sndio_audio%': 0,
+      }],
+      ['OS=="solaris" or (OS!="openbsd" and os_bsd==1)', {
+        'include_pulse_audio%': 1,
+      }, {
+        'include_pulse_audio%': 0,
+      }],
+      ['OS=="linux" or OS=="solaris" or os_bsd==1', {
+        'include_v4l2_video_capture%': 1,
+      }, {
+        'include_v4l2_video_capture%': 0,
+      }],
       ['OS=="ios"', {
         'build_libjpeg%': 0,
         'enable_protobuf%': 0,
       }],
       ['target_arch=="arm" or target_arch=="arm64"', {
         'prefer_fixed_point%': 1,
       }],
-      ['OS!="ios" and (target_arch!="arm" or arm_version>=7) and target_arch!="mips64el"', {
+      ['OS!="ios" and (target_arch!="arm" or arm_version>=7) and target_arch!="mips64el" and build_with_mozilla==0', {
         'rtc_use_openmax_dl%': 1,
       }, {
         'rtc_use_openmax_dl%': 0,
       }],
     ], # conditions
   },
   'target_defaults': {
     'conditions': [
+      ['moz_widget_toolkit_gonk==1', {
+        'defines' : [
+          'WEBRTC_GONK',
+        ],
+      }],
       ['restrict_webrtc_logging==1', {
         'defines': ['WEBRTC_RESTRICT_LOGGING',],
       }],
       ['build_with_mozilla==1', {
         'defines': [
           # Changes settings for Mozilla build.
           'WEBRTC_MOZILLA_BUILD',
          ],
@@ -268,28 +309,47 @@
         ],
       }],
       ['target_arch=="arm"', {
         'defines': [
           'WEBRTC_ARCH_ARM',
         ],
         'conditions': [
           ['arm_version>=7', {
-            'defines': ['WEBRTC_ARCH_ARM_V7',],
+            'defines': ['WEBRTC_ARCH_ARM_V7',
+                        'WEBRTC_BUILD_NEON_LIBS'],
             'conditions': [
               ['arm_neon==1', {
                 'defines': ['WEBRTC_ARCH_ARM_NEON',],
               }],
-              ['arm_neon==0 and OS=="android"', {
+              ['arm_neon==0 and (OS=="android" or moz_widget_toolkit_gonk==1)', {
                 'defines': ['WEBRTC_DETECT_ARM_NEON',],
               }],
             ],
           }],
         ],
       }],
+      ['os_bsd==1', {
+        'defines': [
+          'WEBRTC_BSD',
+          'WEBRTC_THREAD_RR',
+        ],
+      }],
+      ['OS=="dragonfly" or OS=="netbsd"', {
+        'defines': [
+          # doesn't support pthread_condattr_setclock
+          'WEBRTC_CLOCK_TYPE_REALTIME',
+        ],
+      }],
+      ['OS=="openbsd"', {
+        'defines' : [
+          'WEBRTC_AUDIO_SNDIO',
+        ],
+      }],
+      # Mozilla: if we support Mozilla on MIPS, we'll need to mod the cflags entries here
       ['target_arch=="mipsel" and mips_arch_variant!="r6" and android_webview_build==0', {
         'defines': [
           'MIPS32_LE',
         ],
         'conditions': [
           ['mips_float_abi=="hard"', {
             'defines': [
               'MIPS_FPU_LE',
@@ -327,16 +387,23 @@
       }],
       ['OS=="ios"', {
         'defines': [
           'WEBRTC_MAC',
           'WEBRTC_IOS',
         ],
       }],
       ['OS=="linux"', {
+#        'conditions': [
+#          ['have_clock_monotonic==1', {
+#            'defines': [
+#              'WEBRTC_CLOCK_TYPE_REALTIME',
+#            ],
+#          }],
+#        ],
         'defines': [
           'WEBRTC_LINUX',
         ],
       }],
       ['OS=="mac"', {
         'defines': [
           'WEBRTC_MAC',
         ],
@@ -350,27 +417,33 @@
         # http://code.google.com/p/webrtc/issues/detail?id=261 is solved.
         'msvs_disabled_warnings': [
           4373,  # legacy warning for ignoring const / volatile in signatures.
           4389,  # Signed/unsigned mismatch.
         ],
         # Re-enable some warnings that Chromium disables.
         'msvs_disabled_warnings!': [4189,],
       }],
+      # used on GONK as well
+      ['enable_android_opensl==1 and (OS=="android" or moz_widget_toolkit_gonk==1)', {
+        'defines': [
+          'WEBRTC_ANDROID_OPENSLES',
+        ],
+      }],
+      ['moz_webrtc_omx==1', {
+        'defines' : [
+          'MOZ_WEBRTC_OMX'
+        ],
+      }],
       ['OS=="android"', {
         'defines': [
           'WEBRTC_LINUX',
           'WEBRTC_ANDROID',
          ],
          'conditions': [
-           ['enable_android_opensl==1', {
-             'defines': [
-               'WEBRTC_ANDROID_OPENSLES',
-             ],
-           }],
            ['clang!=1', {
              # The Android NDK doesn't provide optimized versions of these
              # functions. Ensure they are disabled for all compilers.
              'cflags': [
                '-fno-builtin-cos',
                '-fno-builtin-sin',
                '-fno-builtin-cosf',
                '-fno-builtin-sinf',
--- a/media/webrtc/trunk/webrtc/build/merge_libs.gyp
+++ b/media/webrtc/trunk/webrtc/build/merge_libs.gyp
@@ -43,10 +43,12 @@
           'outputs': ['<(output_lib)'],
           'action': ['python',
                      'merge_libs.py',
                      '<(PRODUCT_DIR)',
                      '<(output_lib)',],
         },
       ],
     },
+#      }],
+#    ],
   ],
 }
--- a/media/webrtc/trunk/webrtc/common_audio/audio_ring_buffer.h
+++ b/media/webrtc/trunk/webrtc/common_audio/audio_ring_buffer.h
@@ -3,16 +3,18 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#ifndef WEBRTC_COMMON_AUDIO_AUDIO_RING_BUFFER_H_
+#define WEBRTC_COMMON_AUDIO_AUDIO_RING_BUFFER_H_
 #include <stddef.h>
 #include <vector>
 
 struct RingBuffer;
 
 namespace webrtc {
 
 // A ring buffer tailored for float deinterleaved audio. Any operation that
@@ -42,8 +44,9 @@ class AudioRingBuffer final {
 
  private:
   // We don't use a ScopedVector because it doesn't support a specialized
   // deleter (like scoped_ptr for instance.)
   std::vector<RingBuffer*> buffers_;
 };
 
 }  // namespace webrtc
+#endif
--- a/media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
+++ b/media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
@@ -195,16 +195,21 @@
         {
           'target_name': 'common_audio_sse2',
           'type': 'static_library',
           'sources': [
             'fir_filter_sse.cc',
             'resampler/sinc_resampler_sse.cc',
           ],
           'cflags': ['-msse2',],
+          'conditions': [
+            [ 'os_posix == 1', {
+              'cflags_mozilla': ['-msse2',],
+            }],
+          ],
           'xcode_settings': {
             'OTHER_CFLAGS': ['-msse2',],
           },
         },
       ],  # targets
     }],
     ['target_arch=="arm" and arm_version>=7 or target_arch=="arm64"', {
       'targets': [
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
@@ -12,19 +12,22 @@
 /*
  * A wrapper for resampling a numerous amount of sampling combinations.
  */
 
 #ifndef WEBRTC_RESAMPLER_RESAMPLER_H_
 #define WEBRTC_RESAMPLER_RESAMPLER_H_
 
 #include "webrtc/typedefs.h"
+#include <speex/speex_resampler.h>
 
 namespace webrtc {
 
+#define FIXED_RATE_RESAMPLER 0x10
+
 // All methods return 0 on success and -1 on failure.
 class Resampler
 {
 
 public:
     Resampler();
     Resampler(int inFreq, int outFreq, int num_channels);
     ~Resampler();
@@ -35,59 +38,18 @@ public:
     // Reset all states if any parameter has changed
     int ResetIfNeeded(int inFreq, int outFreq, int num_channels);
 
     // Resample samplesIn to samplesOut.
     int Push(const int16_t* samplesIn, int lengthIn, int16_t* samplesOut,
              int maxLen, int &outLen);
 
 private:
-    enum ResamplerMode
-    {
-        kResamplerMode1To1,
-        kResamplerMode1To2,
-        kResamplerMode1To3,
-        kResamplerMode1To4,
-        kResamplerMode1To6,
-        kResamplerMode1To12,
-        kResamplerMode2To3,
-        kResamplerMode2To11,
-        kResamplerMode4To11,
-        kResamplerMode8To11,
-        kResamplerMode11To16,
-        kResamplerMode11To32,
-        kResamplerMode2To1,
-        kResamplerMode3To1,
-        kResamplerMode4To1,
-        kResamplerMode6To1,
-        kResamplerMode12To1,
-        kResamplerMode3To2,
-        kResamplerMode11To2,
-        kResamplerMode11To4,
-        kResamplerMode11To8
-    };
+    SpeexResamplerState* state_;
 
-    // Generic pointers since we don't know what states we'll need
-    void* state1_;
-    void* state2_;
-    void* state3_;
-
-    // Storage if needed
-    int16_t* in_buffer_;
-    int16_t* out_buffer_;
-    int in_buffer_size_;
-    int out_buffer_size_;
-    int in_buffer_size_max_;
-    int out_buffer_size_max_;
-
-    int my_in_frequency_khz_;
-    int my_out_frequency_khz_;
-    ResamplerMode my_mode_;
-    int num_channels_;
-
-    // Extra instance for stereo
-    Resampler* slave_left_;
-    Resampler* slave_right_;
+    int in_freq_;
+    int out_freq_;
+    int channels_;
 };
 
 }  // namespace webrtc
 
 #endif // WEBRTC_RESAMPLER_RESAMPLER_H_
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
@@ -8,17 +8,16 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/common_audio/resampler/include/push_resampler.h"
 
 #include <string.h>
 
 #include "webrtc/common_audio/include/audio_util.h"
-#include "webrtc/common_audio/resampler/include/resampler.h"
 #include "webrtc/common_audio/resampler/push_sinc_resampler.h"
 
 namespace webrtc {
 
 template <typename T>
 PushResampler<T>::PushResampler()
     : src_sample_rate_hz_(0),
       dst_sample_rate_hz_(0),
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
@@ -10,950 +10,122 @@
 
 
 /*
  * A wrapper for resampling a numerous amount of sampling combinations.
  */
 
 #include <stdlib.h>
 #include <string.h>
+#include <assert.h>
 
 #include "webrtc/common_audio/resampler/include/resampler.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
+// TODO(jesup) better adjust per platform ability
+// Note: if these are changed (higher), you may need to change the
+// KernelDelay values in the unit tests here and in output_mixer.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_GONK)
+#define RESAMPLER_QUALITY 2
+#else
+#define RESAMPLER_QUALITY 3
+#endif
+
 namespace webrtc {
 
-Resampler::Resampler()
-    : state1_(nullptr),
-      state2_(nullptr),
-      state3_(nullptr),
-      in_buffer_(nullptr),
-      out_buffer_(nullptr),
-      in_buffer_size_(0),
-      out_buffer_size_(0),
-      in_buffer_size_max_(0),
-      out_buffer_size_max_(0),
-      my_in_frequency_khz_(0),
-      my_out_frequency_khz_(0),
-      my_mode_(kResamplerMode1To1),
-      num_channels_(0),
-      slave_left_(nullptr),
-      slave_right_(nullptr) {
+Resampler::Resampler() : state_(NULL), channels_(0)
+{
+  // Note: Push will fail until Reset() is called
 }
 
 Resampler::Resampler(int inFreq, int outFreq, int num_channels)
     : Resampler() {
   Reset(inFreq, outFreq, num_channels);
 }
 
 Resampler::~Resampler()
 {
-    if (state1_)
-    {
-        free(state1_);
-    }
-    if (state2_)
-    {
-        free(state2_);
-    }
-    if (state3_)
-    {
-        free(state3_);
-    }
-    if (in_buffer_)
-    {
-        free(in_buffer_);
-    }
-    if (out_buffer_)
-    {
-        free(out_buffer_);
-    }
-    if (slave_left_)
-    {
-        delete slave_left_;
-    }
-    if (slave_right_)
-    {
-        delete slave_right_;
-    }
+  if (state_)
+  {
+    speex_resampler_destroy(state_);
+  }
 }
 
 int Resampler::ResetIfNeeded(int inFreq, int outFreq, int num_channels)
 {
-    int tmpInFreq_kHz = inFreq / 1000;
-    int tmpOutFreq_kHz = outFreq / 1000;
-
-    if ((tmpInFreq_kHz != my_in_frequency_khz_) || (tmpOutFreq_kHz != my_out_frequency_khz_)
-            || (num_channels != num_channels_))
-    {
-        return Reset(inFreq, outFreq, num_channels);
-    } else
-    {
-        return 0;
-    }
+  if (!state_ || channels_ != num_channels ||
+      inFreq != in_freq_ || outFreq != out_freq_)
+  {
+    // Note that fixed-rate resamplers where input == output rate will
+    // have state_ == NULL, and will call Reset() here - but reset won't
+    // do anything beyond overwrite the member vars unless it needs a
+    // real resampler.
+    return Reset(inFreq, outFreq, num_channels);
+  } else {
+    return 0;
+  }
 }
 
 int Resampler::Reset(int inFreq, int outFreq, int num_channels)
 {
-    if (num_channels != 1 && num_channels != 2) {
+  if (num_channels != 1 && num_channels != 2) {
+    return -1;
+  }
+
+  if (state_)
+  {
+    speex_resampler_destroy(state_);
+    state_ = NULL;
+  }
+  channels_ = num_channels;
+  in_freq_ = inFreq;
+  out_freq_ = outFreq;
+
+  // For fixed-rate, same-rate resamples we just memcpy and so don't spin up a resampler
+  if (inFreq != outFreq)
+  {
+    state_ = speex_resampler_init(num_channels, inFreq, outFreq, RESAMPLER_QUALITY, NULL);
+    if (!state_)
+    {
       return -1;
     }
-    num_channels_ = num_channels;
-
-    if (state1_)
-    {
-        free(state1_);
-        state1_ = NULL;
-    }
-    if (state2_)
-    {
-        free(state2_);
-        state2_ = NULL;
-    }
-    if (state3_)
-    {
-        free(state3_);
-        state3_ = NULL;
-    }
-    if (in_buffer_)
-    {
-        free(in_buffer_);
-        in_buffer_ = NULL;
-    }
-    if (out_buffer_)
-    {
-        free(out_buffer_);
-        out_buffer_ = NULL;
-    }
-    if (slave_left_)
-    {
-        delete slave_left_;
-        slave_left_ = NULL;
-    }
-    if (slave_right_)
-    {
-        delete slave_right_;
-        slave_right_ = NULL;
-    }
-
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-
-    // Start with a math exercise, Euclid's algorithm to find the gcd:
-    int a = inFreq;
-    int b = outFreq;
-    int c = a % b;
-    while (c != 0)
-    {
-        a = b;
-        b = c;
-        c = a % b;
-    }
-    // b is now the gcd;
-
-    // We need to track what domain we're in.
-    my_in_frequency_khz_ = inFreq / 1000;
-    my_out_frequency_khz_ = outFreq / 1000;
-
-    // Scale with GCD
-    inFreq = inFreq / b;
-    outFreq = outFreq / b;
-
-    if (num_channels_ == 2)
-    {
-        // Create two mono resamplers.
-        slave_left_ = new Resampler(inFreq, outFreq, 1);
-        slave_right_ = new Resampler(inFreq, outFreq, 1);
-    }
-
-    if (inFreq == outFreq)
-    {
-        my_mode_ = kResamplerMode1To1;
-    } else if (inFreq == 1)
-    {
-        switch (outFreq)
-        {
-            case 2:
-                my_mode_ = kResamplerMode1To2;
-                break;
-            case 3:
-                my_mode_ = kResamplerMode1To3;
-                break;
-            case 4:
-                my_mode_ = kResamplerMode1To4;
-                break;
-            case 6:
-                my_mode_ = kResamplerMode1To6;
-                break;
-            case 12:
-                my_mode_ = kResamplerMode1To12;
-                break;
-            default:
-                return -1;
-        }
-    } else if (outFreq == 1)
-    {
-        switch (inFreq)
-        {
-            case 2:
-                my_mode_ = kResamplerMode2To1;
-                break;
-            case 3:
-                my_mode_ = kResamplerMode3To1;
-                break;
-            case 4:
-                my_mode_ = kResamplerMode4To1;
-                break;
-            case 6:
-                my_mode_ = kResamplerMode6To1;
-                break;
-            case 12:
-                my_mode_ = kResamplerMode12To1;
-                break;
-            default:
-                return -1;
-        }
-    } else if ((inFreq == 2) && (outFreq == 3))
-    {
-        my_mode_ = kResamplerMode2To3;
-    } else if ((inFreq == 2) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode2To11;
-    } else if ((inFreq == 4) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode4To11;
-    } else if ((inFreq == 8) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode8To11;
-    } else if ((inFreq == 3) && (outFreq == 2))
-    {
-        my_mode_ = kResamplerMode3To2;
-    } else if ((inFreq == 11) && (outFreq == 2))
-    {
-        my_mode_ = kResamplerMode11To2;
-    } else if ((inFreq == 11) && (outFreq == 4))
-    {
-        my_mode_ = kResamplerMode11To4;
-    } else if ((inFreq == 11) && (outFreq == 16))
-    {
-        my_mode_ = kResamplerMode11To16;
-    } else if ((inFreq == 11) && (outFreq == 32))
-    {
-        my_mode_ = kResamplerMode11To32;
-    } else if ((inFreq == 11) && (outFreq == 8))
-    {
-        my_mode_ = kResamplerMode11To8;
-    } else
-    {
-        return -1;
-    }
-
-    // Now create the states we need
-    switch (my_mode_)
-    {
-        case kResamplerMode1To1:
-            // No state needed;
-            break;
-        case kResamplerMode1To2:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode1To3:
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
-            break;
-        case kResamplerMode1To4:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:4
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode1To6:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:6
-            state2_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state2_);
-            break;
-        case kResamplerMode1To12:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:4
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            // 4:12
-            state3_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz(
-                (WebRtcSpl_State16khzTo48khz*) state3_);
-            break;
-        case kResamplerMode2To3:
-            // 2:6
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
-            // 6:3
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode2To11:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            state2_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
-            WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state2_);
-            break;
-        case kResamplerMode4To11:
-            state1_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
-            WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state1_);
-            break;
-        case kResamplerMode8To11:
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo22khz));
-            WebRtcSpl_ResetResample16khzTo22khz((WebRtcSpl_State16khzTo22khz *)state1_);
-            break;
-        case kResamplerMode11To16:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
-            break;
-        case kResamplerMode11To32:
-            // 11 -> 22
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            // 22 -> 16
-            state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
-
-            // 16 -> 32
-            state3_ = malloc(8 * sizeof(int32_t));
-            memset(state3_, 0, 8 * sizeof(int32_t));
-
-            break;
-        case kResamplerMode2To1:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode3To1:
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
-            break;
-        case kResamplerMode4To1:
-            // 4:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:1
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode6To1:
-            // 6:2
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
-            // 2:1
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode12To1:
-            // 12:4
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz(
-                (WebRtcSpl_State48khzTo16khz*) state1_);
-            // 4:2
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            // 2:1
-            state3_ = malloc(8 * sizeof(int32_t));
-            memset(state3_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode3To2:
-            // 3:6
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 6:2
-            state2_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state2_);
-            break;
-        case kResamplerMode11To2:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
-            WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
-
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-
-            break;
-        case kResamplerMode11To4:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
-            WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
-            break;
-        case kResamplerMode11To8:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state1_);
-            break;
-
-    }
-
-    return 0;
+  }
+  return 0;
 }
 
 // Synchronous resampling, all output samples are written to samplesOut
-int Resampler::Push(const int16_t * samplesIn, int lengthIn,
-                    int16_t* samplesOut, int maxLen, int &outLen)
+// TODO(jesup) Change to take samples-per-channel in and out
+int Resampler::Push(const int16_t* samplesIn, int lengthIn, int16_t* samplesOut,
+                    int maxLen, int &outLen)
 {
-    if (num_channels_ == 2)
-    {
-        // Split up the signal and call the slave object for each channel
-        int16_t* left = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
-        int16_t* right = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
-        int16_t* out_left = (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
-        int16_t* out_right =
-                (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
-        int res = 0;
-        for (int i = 0; i < lengthIn; i += 2)
-        {
-            left[i >> 1] = samplesIn[i];
-            right[i >> 1] = samplesIn[i + 1];
-        }
-
-        // It's OK to overwrite the local parameter, since it's just a copy
-        lengthIn = lengthIn / 2;
-
-        int actualOutLen_left = 0;
-        int actualOutLen_right = 0;
-        // Do resampling for right channel
-        res |= slave_left_->Push(left, lengthIn, out_left, maxLen / 2, actualOutLen_left);
-        res |= slave_right_->Push(right, lengthIn, out_right, maxLen / 2, actualOutLen_right);
-        if (res || (actualOutLen_left != actualOutLen_right))
-        {
-            free(left);
-            free(right);
-            free(out_left);
-            free(out_right);
-            return -1;
-        }
-
-        // Reassemble the signal
-        for (int i = 0; i < actualOutLen_left; i++)
-        {
-            samplesOut[i * 2] = out_left[i];
-            samplesOut[i * 2 + 1] = out_right[i];
-        }
-        outLen = 2 * actualOutLen_left;
-
-        free(left);
-        free(right);
-        free(out_left);
-        free(out_right);
-
-        return 0;
-    }
-
-    // Containers for temp samples
-    int16_t* tmp;
-    int16_t* tmp_2;
-    // tmp data for resampling routines
-    int32_t* tmp_mem;
-
-    switch (my_mode_)
+  if (maxLen < lengthIn)
+  {
+    return -1;
+  }
+  if (!state_)
+  {
+    if (in_freq_ != out_freq_ || channels_ == 0)
     {
-        case kResamplerMode1To1:
-            memcpy(samplesOut, samplesIn, lengthIn * sizeof(int16_t));
-            outLen = lengthIn;
-            break;
-        case kResamplerMode1To2:
-            if (maxLen < (lengthIn * 2))
-            {
-                return -1;
-            }
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-            outLen = lengthIn * 2;
-            return 0;
-        case kResamplerMode1To3:
-
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn * 3))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(samplesIn + i, samplesOut + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn * 3;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode1To4:
-            if (maxLen < (lengthIn * 4))
-            {
-                return -1;
-            }
-
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-            // 1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            // 2:4
-            WebRtcSpl_UpsampleBy2(tmp, lengthIn * 2, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn * 4;
-            free(tmp);
-            return 0;
-        case kResamplerMode1To6:
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn * 6))
-            {
-                return -1;
-            }
-
-            //1:2
-
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            outLen = lengthIn * 2;
-
-            for (int i = 0; i < outLen; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state2_,
-                                               tmp_mem);
-            }
-            outLen = outLen * 3;
-            free(tmp_mem);
-            free(tmp);
-
-            return 0;
-        case kResamplerMode1To12:
-            // We can only handle blocks of 40 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 40) != 0) {
-              return -1;
-            }
-            if (maxLen < (lengthIn * 12)) {
-              return -1;
-            }
-
-            tmp_mem = (int32_t*) malloc(336 * sizeof(int32_t));
-            tmp = (int16_t*) malloc(sizeof(int16_t) * 4 * lengthIn);
-            //1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut,
-                                  (int32_t*) state1_);
-            outLen = lengthIn * 2;
-            //2:4
-            WebRtcSpl_UpsampleBy2(samplesOut, outLen, tmp, (int32_t*) state2_);
-            outLen = outLen * 2;
-            // 4:12
-            for (int i = 0; i < outLen; i += 160) {
-              // WebRtcSpl_Resample16khzTo48khz() takes a block of 160 samples
-              // as input and outputs a resampled block of 480 samples. The
-              // data is now actually in 32 kHz sampling rate, despite the
-              // function name, and with a resampling factor of three becomes
-              // 96 kHz.
-              WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
-                                             (WebRtcSpl_State16khzTo48khz*) state3_,
-                                             tmp_mem);
-            }
-            outLen = outLen * 3;
-            free(tmp_mem);
-            free(tmp);
-
-            return 0;
-        case kResamplerMode2To3:
-            if (maxLen < (lengthIn * 3 / 2))
-            {
-                return -1;
-            }
-            // 2:6
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 3));
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(samplesIn + i, tmp + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state1_,
-                                               tmp_mem);
-            }
-            lengthIn = lengthIn * 3;
-            // 6:3
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 2;
-            free(tmp);
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode2To11:
-
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 2))
-            {
-                return -1;
-            }
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-            // 1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            lengthIn *= 2;
-
-            tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 80)
-            {
-                WebRtcSpl_Resample8khzTo22khz(tmp + i, samplesOut + (i * 11) / 4,
-                                              (WebRtcSpl_State8khzTo22khz *)state2_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 4;
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-        case kResamplerMode4To11:
-
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 4))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 80)
-            {
-                WebRtcSpl_Resample8khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 4,
-                                              (WebRtcSpl_State8khzTo22khz *)state1_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 4;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode8To11:
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 8))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(88 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 8,
-                                               (WebRtcSpl_State16khzTo22khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 8;
-            free(tmp_mem);
-            return 0;
-
-        case kResamplerMode11To16:
-            // We can only handle blocks of 110 samples
-            if ((lengthIn % 110) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 16) / 11))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
-
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-
-            for (int i = 0; i < (lengthIn * 2); i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(tmp + i, samplesOut + (i / 220) * 160,
-                                               (WebRtcSpl_State22khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-
-            outLen = (lengthIn * 16) / 11;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-
-        case kResamplerMode11To32:
-
-            // We can only handle blocks of 110 samples
-            if ((lengthIn % 110) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 32) / 11))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
-
-            // 11 -> 22 kHz in samplesOut
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-
-            // 22 -> 16 in tmp
-            for (int i = 0; i < (lengthIn * 2); i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(samplesOut + i, tmp + (i / 220) * 160,
-                                               (WebRtcSpl_State22khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-
-            // 16 -> 32 in samplesOut
-            WebRtcSpl_UpsampleBy2(tmp, (lengthIn * 16) / 11, samplesOut,
-                                  (int32_t*)state3_);
-
-            outLen = (lengthIn * 32) / 11;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-
-        case kResamplerMode2To1:
-            if (maxLen < (lengthIn / 2))
-            {
-                return -1;
-            }
-            WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-            outLen = lengthIn / 2;
-            return 0;
-        case kResamplerMode3To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn / 3))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(samplesIn + i, samplesOut + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode4To1:
-            if (maxLen < (lengthIn / 4))
-            {
-                return -1;
-            }
-            tmp = (int16_t*)malloc(sizeof(int16_t) * lengthIn / 2);
-            // 4:2
-            WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            // 2:1
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn / 2, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 4;
-            free(tmp);
-            return 0;
-
-        case kResamplerMode6To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn / 6))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn) / 3);
-
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            WebRtcSpl_DownsampleBy2(tmp, outLen, samplesOut, (int32_t*)state2_);
-            free(tmp);
-            outLen = outLen / 2;
-            return 0;
-        case kResamplerMode12To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0) {
-              return -1;
-            }
-            if (maxLen < (lengthIn / 12)) {
-              return -1;
-            }
-
-            tmp_mem = (int32_t*) malloc(496 * sizeof(int32_t));
-            tmp = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 3);
-            tmp_2 = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 6);
-            // 12:4
-            for (int i = 0; i < lengthIn; i += 480) {
-              // WebRtcSpl_Resample48khzTo16khz() takes a block of 480 samples
-              // as input and outputs a resampled block of 160 samples. The
-              // data is now actually in 96 kHz sampling rate, despite the
-              // function name, and with a resampling factor of 1/3 becomes
-              // 32 kHz.
-              WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
-                                             (WebRtcSpl_State48khzTo16khz*) state1_,
-                                             tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            // 4:2
-            WebRtcSpl_DownsampleBy2(tmp, outLen, tmp_2,
-                                    (int32_t*) state2_);
-            outLen = outLen / 2;
-            free(tmp);
-            // 2:1
-            WebRtcSpl_DownsampleBy2(tmp_2, outLen, samplesOut,
-                                    (int32_t*) state3_);
-            free(tmp_2);
-            outLen = outLen / 2;
-            return 0;
-        case kResamplerMode3To2:
-            if (maxLen < (lengthIn * 2 / 3))
-            {
-                return -1;
-            }
-            // 3:6
-            tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 2));
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            lengthIn *= 2;
-            // 6:2
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                free(tmp);
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(tmp + i, samplesOut + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp);
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode11To2:
-            // We can only handle blocks of 220 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 2) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((lengthIn * 4) / 11 * sizeof(int16_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo8khz(samplesIn + i, tmp + (i * 4) / 11,
-                                              (WebRtcSpl_State22khzTo8khz *)state1_,
-                                              tmp_mem);
-            }
-            lengthIn = (lengthIn * 4) / 11;
-
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 2;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-        case kResamplerMode11To4:
-            // We can only handle blocks of 220 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 4) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo8khz(samplesIn + i, samplesOut + (i * 4) / 11,
-                                              (WebRtcSpl_State22khzTo8khz *)state1_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 4) / 11;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode11To8:
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 8) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(samplesIn + i, samplesOut + (i * 8) / 11,
-                                               (WebRtcSpl_State22khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = (lengthIn * 8) / 11;
-            free(tmp_mem);
-            return 0;
-            break;
-
+      // Push() will fail until Reset() is called
+      return -1;
     }
+    // Same-freq "resample" - use memcpy, which avoids
+    // filtering and delay.  For non-fixed rates, where we might tweak
+    // from 48000->48000 to 48000->48001 for drift, we need to resample
+    // (and filter) all the time to avoid glitches on rate changes.
+    memcpy(samplesOut, samplesIn, lengthIn*sizeof(*samplesIn));
+    outLen = lengthIn;
     return 0;
+  }
+  assert(channels_ == 1 || channels_ == 2);
+  spx_uint32_t len = lengthIn = (lengthIn >> (channels_ - 1));
+  spx_uint32_t out = (spx_uint32_t) (maxLen >> (channels_ - 1));
+  if ((speex_resampler_process_interleaved_int(state_, samplesIn, &len,
+                             samplesOut, &out) != RESAMPLER_ERR_SUCCESS) ||
+      len != (spx_uint32_t) lengthIn)
+  {
+    return -1;
+  }
+  outLen = (int) (channels_ * out);
+  return 0;
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
@@ -3,16 +3,18 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include <math.h>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 #include "webrtc/common_audio/resampler/include/resampler.h"
 
 // TODO(andrew): this is a work-in-progress. Many more tests are needed.
 
 namespace webrtc {
 namespace {
@@ -21,44 +23,37 @@ const int kNumChannels[] = {1, 2};
 const size_t kNumChannelsSize = sizeof(kNumChannels) / sizeof(*kNumChannels);
 
 // Rates we must support.
 const int kMaxRate = 96000;
 const int kRates[] = {
   8000,
   16000,
   32000,
-  44000,
+  44100,
   48000,
   kMaxRate
 };
 const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
 const int kMaxChannels = 2;
 const size_t kDataSize = static_cast<size_t> (kMaxChannels * kMaxRate / 100);
 
-// TODO(andrew): should we be supporting these combinations?
-bool ValidRates(int in_rate, int out_rate) {
-  // Not the most compact notation, for clarity.
-  if ((in_rate == 44000 && (out_rate == 48000 || out_rate == 96000)) ||
-      (out_rate == 44000 && (in_rate == 48000 || in_rate == 96000))) {
-    return false;
-  }
-
-  return true;
-}
-
 class ResamplerTest : public testing::Test {
  protected:
   ResamplerTest();
   virtual void SetUp();
   virtual void TearDown();
+  void RunResampleTest(int channels,
+                       int src_sample_rate_hz,
+                       int dst_sample_rate_hz);
 
   Resampler rs_;
   int16_t data_in_[kDataSize];
   int16_t data_out_[kDataSize];
+  int16_t data_reference_[kDataSize];
 };
 
 ResamplerTest::ResamplerTest() {}
 
 void ResamplerTest::SetUp() {
   // Initialize input data with anything. The tests are content independent.
   memset(data_in_, 1, sizeof(data_in_));
 }
@@ -73,67 +68,140 @@ TEST_F(ResamplerTest, Reset) {
   // Check that all required combinations are supported.
   for (size_t i = 0; i < kRatesSize; ++i) {
     for (size_t j = 0; j < kRatesSize; ++j) {
       for (size_t k = 0; k < kNumChannelsSize; ++k) {
         std::ostringstream ss;
         ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j]
             << ", channels: " << kNumChannels[k];
         SCOPED_TRACE(ss.str());
-        if (ValidRates(kRates[i], kRates[j]))
-          EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kNumChannels[k]));
-        else
-          EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kNumChannels[k]));
+        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
       }
     }
   }
 }
 
-// TODO(tlegrand): Replace code inside the two tests below with a function
-// with number of channels and ResamplerType as input.
+// Sets the signal value to increase by |data| with every sample. Floats are
+// used so non-integer values result in rounding error, but not an accumulating
+// error.
+void SetMonoFrame(int16_t* buffer, float data, int sample_rate_hz) {
+  for (int i = 0; i < sample_rate_hz / 100; i++) {
+    buffer[i] = data * i;
+  }
+}
+
+// Sets the signal value to increase by |left| and |right| with every sample in
+// each channel respectively.
+void SetStereoFrame(int16_t* buffer, float left, float right,
+                    int sample_rate_hz) {
+  for (int i = 0; i < sample_rate_hz / 100; i++) {
+    buffer[i * 2] = left * i;
+    buffer[i * 2 + 1] = right * i;
+  }
+}
+
+// Computes the best SNR based on the error between |ref_frame| and
+// |test_frame|. It allows for a sample delay between the signals to
+// compensate for the resampling delay.
+float ComputeSNR(const int16_t* reference, const int16_t* test,
+                 int sample_rate_hz, int channels, int max_delay) {
+  float best_snr = 0;
+  int best_delay = 0;
+  int samples_per_channel = sample_rate_hz/100;
+  for (int delay = 0; delay < max_delay; delay++) {
+    float mse = 0;
+    float variance = 0;
+    for (int i = 0; i < samples_per_channel * channels - delay; i++) {
+      int error = reference[i] - test[i + delay];
+      mse += error * error;
+      variance += reference[i] * reference[i];
+    }
+    float snr = 100;  // We assign 100 dB to the zero-error case.
+    if (mse > 0)
+      snr = 10 * log10(variance / mse);
+    if (snr > best_snr) {
+      best_snr = snr;
+      best_delay = delay;
+    }
+  }
+  printf("SNR=%.1f dB at delay=%d\n", best_snr, best_delay);
+  return best_snr;
+}
+
+void ResamplerTest::RunResampleTest(int channels,
+                                    int src_sample_rate_hz,
+                                    int dst_sample_rate_hz) {
+  Resampler resampler;  // Create a new one with every test.
+  const int16_t kSrcLeft = 60;  // Shouldn't overflow for any used sample rate.
+  const int16_t kSrcRight = 30;
+  const float kResamplingFactor = (1.0 * src_sample_rate_hz) /
+      dst_sample_rate_hz;
+  const float kDstLeft = kResamplingFactor * kSrcLeft;
+  const float kDstRight = kResamplingFactor * kSrcRight;
+  if (channels == 1)
+    SetMonoFrame(data_in_, kSrcLeft, src_sample_rate_hz);
+  else
+    SetStereoFrame(data_in_, kSrcLeft, kSrcRight, src_sample_rate_hz);
+
+  if (channels == 1) {
+    SetMonoFrame(data_out_, 0, dst_sample_rate_hz);
+    SetMonoFrame(data_reference_, kDstLeft, dst_sample_rate_hz);
+  } else {
+    SetStereoFrame(data_out_, 0, 0, dst_sample_rate_hz);
+    SetStereoFrame(data_reference_, kDstLeft, kDstRight, dst_sample_rate_hz);
+  }
+
+  // The speex resampler has a known delay dependent on quality and rates,
+  // which we approximate here. Multiplying by two gives us a crude maximum
+  // for any resampling, as the old resampler typically (but not always)
+  // has lower delay.  The actual delay is calculated internally based on the
+  // filter length in the QualityMap.
+  static const int kInputKernelDelaySamples = 16*3;
+  const int max_delay = std::min(1.0f, 1/kResamplingFactor) *
+                        kInputKernelDelaySamples * channels * 2;
+  printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
+      channels, src_sample_rate_hz, channels, dst_sample_rate_hz);
+
+  int in_length = channels * src_sample_rate_hz / 100;
+  int out_length = 0;
+  EXPECT_EQ(0, rs_.Reset(src_sample_rate_hz, dst_sample_rate_hz,
+                         (channels == 1 ?
+                          kResamplerSynchronous :
+                          kResamplerSynchronousStereo)));
+  EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
+                        out_length));
+  EXPECT_EQ(channels * dst_sample_rate_hz / 100, out_length);
+
+  //  EXPECT_EQ(0, Resample(src_frame_, &resampler, &dst_frame_));
+  EXPECT_GT(ComputeSNR(data_reference_, data_out_, dst_sample_rate_hz,
+                       channels, max_delay), 40.0f);
+}
+
 TEST_F(ResamplerTest, Mono) {
   const int kChannels = 1;
-  for (size_t i = 0; i < kRatesSize; ++i) {
-    for (size_t j = 0; j < kRatesSize; ++j) {
-      std::ostringstream ss;
-      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
-      SCOPED_TRACE(ss.str());
-
-      if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kRates[i] / 100;
-        int out_length = 0;
-        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kChannels));
-        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
-                              out_length));
-        EXPECT_EQ(kRates[j] / 100, out_length);
-      } else {
-        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kChannels));
-      }
+  // We don't attempt to be exhaustive here, but just get good coverage. Some
+  // combinations of rates will not be resampled, and some give an odd
+  // resampling factor which makes it more difficult to evaluate.
+  const int kSampleRates[] = {16000, 32000, 44100, 48000};
+  const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
     }
   }
 }
 
 TEST_F(ResamplerTest, Stereo) {
   const int kChannels = 2;
-  for (size_t i = 0; i < kRatesSize; ++i) {
-    for (size_t j = 0; j < kRatesSize; ++j) {
-      std::ostringstream ss;
-      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
-      SCOPED_TRACE(ss.str());
-
-      if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kChannels * kRates[i] / 100;
-        int out_length = 0;
-        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j],
-                               kChannels));
-        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
-                              out_length));
-        EXPECT_EQ(kChannels * kRates[j] / 100, out_length);
-      } else {
-        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j],
-                                kChannels));
-      }
+  // We don't attempt to be exhaustive here, but just get good coverage. Some
+  // combinations of rates will not be resampled, and some give an odd
+  // resampling factor which makes it more difficult to evaluate.
+  const int kSampleRates[] = {16000, 32000, 44100, 48000};
+  const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
     }
   }
 }
 
 }  // namespace
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
@@ -21,21 +21,21 @@ float SincResampler::Convolve_NEON(const
                                    const float* k2,
                                    double kernel_interpolation_factor) {
   float32x4_t m_input;
   float32x4_t m_sums1 = vmovq_n_f32(0);
   float32x4_t m_sums2 = vmovq_n_f32(0);
 
   const float* upper = input_ptr + kKernelSize;
   for (; input_ptr < upper; ) {
-    m_input = vld1q_f32(input_ptr);
+    m_input = vld1q_f32((const float32_t *) input_ptr);
     input_ptr += 4;
-    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
+    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32((const float32_t *) k1));
     k1 += 4;
-    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
+    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32((const float32_t *) k2));
     k2 += 4;
   }
 
   // Linearly interpolate the two "convolutions".
   m_sums1 = vmlaq_f32(
       vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
       m_sums2, vmovq_n_f32(kernel_interpolation_factor));
 
--- a/media/webrtc/trunk/webrtc/common_audio/wav_file.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/wav_file.cc
@@ -51,28 +51,31 @@ WavReader::WavReader(const std::string& 
   CHECK_EQ(kBytesPerSample, bytes_per_sample);
 }
 
 WavReader::~WavReader() {
   Close();
 }
 
 size_t WavReader::ReadSamples(size_t num_samples, int16_t* samples) {
-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
-#error "Need to convert samples to big-endian when reading from WAV file"
-#endif
   // There could be metadata after the audio; ensure we don't read it.
   num_samples = std::min(rtc::checked_cast<uint32_t>(num_samples),
                          num_samples_remaining_);
   const size_t read =
       fread(samples, sizeof(*samples), num_samples, file_handle_);
   // If we didn't read what was requested, ensure we've reached the EOF.
   CHECK(read == num_samples || feof(file_handle_));
   CHECK_LE(read, num_samples_remaining_);
   num_samples_remaining_ -= rtc::checked_cast<uint32_t>(read);
+#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
+  //convert to big-endian
+  for(size_t idx = 0; idx < num_samples; idx++) {
+    samples[idx] = (samples[idx]<<8) | (samples[idx]>>8);
+  }
+#endif
   return read;
 }
 
 size_t WavReader::ReadSamples(size_t num_samples, float* samples) {
   static const size_t kChunksize = 4096 / sizeof(uint16_t);
   size_t read = 0;
   for (size_t i = 0; i < num_samples; i += kChunksize) {
     int16_t isamples[kChunksize];
@@ -110,20 +113,27 @@ WavWriter::WavWriter(const std::string& 
 }
 
 WavWriter::~WavWriter() {
   Close();
 }
 
 void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) {
 #ifndef WEBRTC_ARCH_LITTLE_ENDIAN
-#error "Need to convert samples to little-endian when writing to WAV file"
-#endif
+  int16_t * le_samples = new int16_t[num_samples];
+  for(size_t idx = 0; idx < num_samples; idx++) {
+    le_samples[idx] = (samples[idx]<<8) | (samples[idx]>>8);
+  }
+  const size_t written =
+      fwrite(le_samples, sizeof(*le_samples), num_samples, file_handle_);
+  delete []le_samples;
+#else
   const size_t written =
       fwrite(samples, sizeof(*samples), num_samples, file_handle_);
+#endif
   CHECK_EQ(num_samples, written);
   num_samples_ += static_cast<uint32_t>(written);
   CHECK(written <= std::numeric_limits<uint32_t>::max() ||
         num_samples_ >= written);  // detect uint32_t overflow
   CHECK(CheckWavParameters(num_channels_,
                            sample_rate_,
                            kWavFormat,
                            kBytesPerSample,
--- a/media/webrtc/trunk/webrtc/common_audio/wav_header.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/wav_header.cc
@@ -124,17 +124,49 @@ static inline void WriteFourCC(uint32_t*
 }
 
 static inline uint16_t ReadLE16(uint16_t x) { return x; }
 static inline uint32_t ReadLE32(uint32_t x) { return x; }
 static inline std::string ReadFourCC(uint32_t x) {
   return std::string(reinterpret_cast<char*>(&x), 4);
 }
 #else
-#error "Write be-to-le conversion functions"
+static inline void WriteLE16(uint16_t* f, uint16_t x) {
+  *f = ((x << 8) & 0xff00)  | ( ( x >> 8) & 0x00ff);
+}
+
+static inline void WriteLE32(uint32_t* f, uint32_t x) {
+    *f = ( (x & 0x000000ff) << 24 )
+      | ((x & 0x0000ff00) << 8)
+      | ((x & 0x00ff0000) >> 8)
+      | ((x & 0xff000000) >> 24 );
+}
+
+static inline void WriteFourCC(uint32_t* f, char a, char b, char c, char d) {
+    *f = (static_cast<uint32_t>(a) << 24 )
+      |  (static_cast<uint32_t>(b) << 16)
+      |  (static_cast<uint32_t>(c) << 8)
+      |  (static_cast<uint32_t>(d) );
+}
+
+static inline uint16_t ReadLE16(uint16_t x) {
+  return  (( x & 0x00ff) << 8 )| ((x & 0xff00)>>8);
+}
+
+static inline uint32_t ReadLE32(uint32_t x) {
+  return   ( (x & 0x000000ff) << 24 )
+         | ( (x & 0x0000ff00) << 8 )
+         | ( (x & 0x00ff0000) >> 8)
+         | ( (x & 0xff000000) >> 24 );
+}
+
+static inline std::string ReadFourCC(uint32_t x) {
+  x = ReadLE32(x);
+  return std::string(reinterpret_cast<char*>(&x), 4);
+}
 #endif
 
 static inline uint32_t RiffChunkSize(uint32_t bytes_in_payload) {
   return bytes_in_payload + kWavHeaderSize - sizeof(ChunkHeader);
 }
 
 static inline uint32_t ByteRate(int num_channels, int sample_rate,
                                 int bytes_per_sample) {
--- a/media/webrtc/trunk/webrtc/common_types.h
+++ b/media/webrtc/trunk/webrtc/common_types.h
@@ -430,17 +430,17 @@ typedef struct        // All levels are 
 enum NsModes    // type of Noise Suppression
 {
     kNsUnchanged = 0,   // previously set mode
     kNsDefault,         // platform default
     kNsConference,      // conferencing default
     kNsLowSuppression,  // lowest suppression
     kNsModerateSuppression,
     kNsHighSuppression,
-    kNsVeryHighSuppression,     // highest suppression
+    kNsVeryHighSuppression     // highest suppression
 };
 
 enum AgcModes                  // type of Automatic Gain Control
 {
     kAgcUnchanged = 0,        // previously set mode
     kAgcDefault,              // platform default
     // adaptive mode for use when analog volume control exists (e.g. for
     // PC softphone)
@@ -455,17 +455,17 @@ enum AgcModes                  // type o
 
 // EC modes
 enum EcModes                   // type of Echo Control
 {
     kEcUnchanged = 0,          // previously set mode
     kEcDefault,                // platform default
     kEcConference,             // conferencing default (aggressive AEC)
     kEcAec,                    // Acoustic Echo Cancellation
-    kEcAecm,                   // AEC mobile
+    kEcAecm                    // AEC mobile
 };
 
 // AECM modes
 enum AecmModes                 // mode of AECM
 {
     kAecmQuietEarpieceOrHeadset = 0,
                                // Quiet earpiece or headset use
     kAecmEarpiece,             // most earpiece use
@@ -491,50 +491,51 @@ enum StereoChannel
 
 // Audio device layers
 enum AudioLayers
 {
     kAudioPlatformDefault = 0,
     kAudioWindowsWave = 1,
     kAudioWindowsCore = 2,
     kAudioLinuxAlsa = 3,
-    kAudioLinuxPulse = 4
+    kAudioLinuxPulse = 4,
+    kAudioSndio = 5
 };
 
 // TODO(henrika): to be removed.
 enum NetEqModes             // NetEQ playout configurations
 {
     // Optimized trade-off between low delay and jitter robustness for two-way
     // communication.
     kNetEqDefault = 0,
     // Improved jitter robustness at the cost of increased delay. Can be
     // used in one-way communication.
     kNetEqStreaming = 1,
     // Optimzed for decodability of fax signals rather than for perceived audio
     // quality.
     kNetEqFax = 2,
     // Minimal buffer management. Inserts zeros for lost packets and during
     // buffer increases.
-    kNetEqOff = 3,
+    kNetEqOff = 3
 };
 
 // TODO(henrika): to be removed.
 enum OnHoldModes            // On Hold direction
 {
     kHoldSendAndPlay = 0,    // Put both sending and playing in on-hold state.
     kHoldSendOnly,           // Put only sending in on-hold state.
     kHoldPlayOnly            // Put only playing in on-hold state.
 };
 
 // TODO(henrika): to be removed.
 enum AmrMode
 {
     kRfc3267BwEfficient = 0,
     kRfc3267OctetAligned = 1,
-    kRfc3267FileStorage = 2,
+    kRfc3267FileStorage = 2
 };
 
 // ==================================================================
 // Video specific types
 // ==================================================================
 
 // Raw video types
 enum RawVideoType
@@ -551,16 +552,26 @@ enum RawVideoType
     kVideoARGB1555 = 9,
     kVideoMJPEG    = 10,
     kVideoNV12     = 11,
     kVideoNV21     = 12,
     kVideoBGRA     = 13,
     kVideoUnknown  = 99
 };
 
+enum VideoReceiveState
+{
+  kReceiveStateInitial,            // No video decoded yet
+  kReceiveStateNormal,
+  kReceiveStatePreemptiveNACK,     // NACK sent for missing packet, no decode stall/fail yet
+  kReceiveStateWaitingKey,         // Decoding stalled, waiting for keyframe or NACK
+  kReceiveStateDecodingWithErrors, // Decoding with errors, waiting for keyframe or NACK
+  kReceiveStateNoIncoming,         // No errors, but no incoming video since last decode
+};
+
 // Video codec
 enum { kConfigParameterSize = 128};
 enum { kPayloadNameSize = 32};
 enum { kMaxSimulcastStreams = 4};
 enum { kMaxTemporalStreams = 4};
 
 enum VideoCodecComplexity
 {
@@ -627,16 +638,20 @@ struct VideoCodecVP9 {
   bool                 frameDroppingOn;
   int                  keyFrameInterval;
   bool                 adaptiveQpMode;
 };
 
 // H264 specific.
 struct VideoCodecH264 {
   VideoCodecProfile profile;
+  uint8_t        profile_byte;
+  uint8_t        constraints;
+  uint8_t        level;
+  uint8_t        packetizationMode; // 0 or 1
   bool           frameDroppingOn;
   int            keyFrameInterval;
   // These are NULL/0 if not externally negotiated.
   const uint8_t* spsData;
   size_t         spsLen;
   const uint8_t* ppsData;
   size_t         ppsLen;
 };
@@ -694,16 +709,18 @@ enum VideoCodecMode {
 // Common video codec properties
 struct VideoCodec {
   VideoCodecType      codecType;
   char                plName[kPayloadNameSize];
   unsigned char       plType;
 
   unsigned short      width;
   unsigned short      height;
+  // width & height modulo resolution_divisor must be 0
+  unsigned char       resolution_divisor;
 
   unsigned int        startBitrate;  // kilobits/sec.
   unsigned int        maxBitrate;  // kilobits/sec.
   unsigned int        minBitrate;  // kilobits/sec.
   unsigned int        targetBitrate;  // kilobits/sec.
 
   unsigned char       maxFramerate;
 
@@ -770,16 +787,36 @@ struct OverUseDetectorOptions {
   double initial_offset;
   double initial_e[2][2];
   double initial_process_noise[2];
   double initial_avg_noise;
   double initial_var_noise;
   double initial_threshold;
 };
 
+enum CPULoadState {
+  kLoadRelaxed = 0,
+  kLoadNormal,
+  kLoadStressed,
+  kLoadLast,
+};
+
+class CPULoadStateObserver {
+public:
+  virtual void onLoadStateChanged(CPULoadState aNewState) = 0;
+  virtual ~CPULoadStateObserver() {};
+};
+
+class CPULoadStateCallbackInvoker {
+public:
+    virtual void AddObserver(CPULoadStateObserver* aObserver) = 0;
+    virtual void RemoveObserver(CPULoadStateObserver* aObserver) = 0;
+    virtual ~CPULoadStateCallbackInvoker() {};
+};
+
 // This structure will have the information about when packet is actually
 // received by socket.
 struct PacketTime {
   PacketTime() : timestamp(-1), not_before(-1) {}
   PacketTime(int64_t timestamp, int64_t not_before)
       : timestamp(timestamp), not_before(not_before) {
   }
 
--- a/media/webrtc/trunk/webrtc/common_video/libyuv/webrtc_libyuv.cc
+++ b/media/webrtc/trunk/webrtc/common_video/libyuv/webrtc_libyuv.cc
@@ -241,16 +241,38 @@ int ConvertToI420(VideoType src_video_ty
   int dst_width = dst_frame->width();
   int dst_height = dst_frame->height();
   // LibYuv expects pre-rotation values for dst.
   // Stride values should correspond to the destination values.
   if (rotation == kVideoRotation_90 || rotation == kVideoRotation_270) {
     dst_width = dst_frame->height();
     dst_height =dst_frame->width();
   }
+#ifdef WEBRTC_GONK
+  if (src_video_type == kYV12) {
+    // In gralloc buffer, yv12 color format's cb and cr's strides are aligned
+    // to 16 Bytes boundary. See /system/core/include/system/graphics.h
+    int stride_y = (src_width + 15) & ~0x0F;
+    int stride_uv = (((stride_y + 1) / 2) + 15) & ~0x0F;
+    return libyuv::I420Rotate(src_frame,
+                              stride_y,
+                              src_frame + (stride_y * src_height) + (stride_uv * ((src_height + 1) / 2)),
+                              stride_uv,
+                              src_frame + (stride_y * src_height),
+                              stride_uv,
+                              dst_frame->buffer(kYPlane),
+                              dst_frame->stride(kYPlane),
+                              dst_frame->buffer(kUPlane),
+                              dst_frame->stride(kUPlane),
+                              dst_frame->buffer(kVPlane),
+                              dst_frame->stride(kVPlane),
+                              src_width, src_height,
+                              ConvertRotationMode(rotation));
+  }
+#endif
   return libyuv::ConvertToI420(src_frame, sample_size,
                                dst_frame->buffer(kYPlane),
                                dst_frame->stride(kYPlane),
                                dst_frame->buffer(kUPlane),
                                dst_frame->stride(kUPlane),
                                dst_frame->buffer(kVPlane),
                                dst_frame->stride(kVPlane),
                                crop_x, crop_y,
--- a/media/webrtc/trunk/webrtc/common_video/plane.cc
+++ b/media/webrtc/trunk/webrtc/common_video/plane.cc
@@ -38,16 +38,21 @@ int Plane::CreateEmptyPlane(int allocate
 
 int Plane::MaybeResize(int new_size) {
   if (new_size <= 0)
     return -1;
   if (new_size <= allocated_size_)
     return 0;
   rtc::scoped_ptr<uint8_t, AlignedFreeDeleter> new_buffer(
       static_cast<uint8_t*>(AlignedMalloc(new_size, kBufferAlignment)));
+
+  if (!new_buffer.get()) {
+    return -1;
+  }
+
   if (buffer_.get()) {
     memcpy(new_buffer.get(), buffer_.get(), plane_size_);
   }
   buffer_.reset(new_buffer.release());
   allocated_size_ = new_size;
   return 0;
 }
 
--- a/media/webrtc/trunk/webrtc/engine_configurations.h
+++ b/media/webrtc/trunk/webrtc/engine_configurations.h
@@ -31,17 +31,19 @@
 #define WEBRTC_CODEC_G722
 #endif  // !WEBRTC_MOZILLA_BUILD
 
 // AVT is included in all builds, along with G.711, NetEQ and CNG
 // (which are mandatory and don't have any defines).
 #define WEBRTC_CODEC_AVT
 
 // PCM16 is useful for testing and incurs only a small binary size cost.
+#ifndef WEBRTC_CODEC_PCM16
 #define WEBRTC_CODEC_PCM16
+#endif
 
 // iLBC and Redundancy coding are excluded from Chromium and Mozilla
 // builds to reduce binary size.
 #if !defined(WEBRTC_CHROMIUM_BUILD) && !defined(WEBRTC_MOZILLA_BUILD)
 #define WEBRTC_CODEC_ILBC
 #define WEBRTC_CODEC_RED
 #endif  // !WEBRTC_CHROMIUM_BUILD && !WEBRTC_MOZILLA_BUILD
 
@@ -60,16 +62,17 @@
 
 // ----------------------------------------------------------------------------
 //  Settings for VoiceEngine
 // ----------------------------------------------------------------------------
 
 #define WEBRTC_VOICE_ENGINE_AGC                 // Near-end AGC
 #define WEBRTC_VOICE_ENGINE_ECHO                // Near-end AEC
 #define WEBRTC_VOICE_ENGINE_NR                  // Near-end NS
+#define WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
 
 #if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
 #define WEBRTC_VOICE_ENGINE_TYPING_DETECTION    // Typing detection
 #endif
 
 // ----------------------------------------------------------------------------
 //  VoiceEngine sub-APIs
 // ----------------------------------------------------------------------------
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/audio_coding.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/audio_coding.gypi
@@ -7,23 +7,29 @@
 # be found in the AUTHORS file in the root of the source tree.
 
 {
   'includes': [
     '../../build/common.gypi',
     'codecs/interfaces.gypi',
     'codecs/cng/cng.gypi',
     'codecs/g711/g711.gypi',
-    'codecs/g722/g722.gypi',
-    'codecs/ilbc/ilbc.gypi',
-    'codecs/isac/isac.gypi',
-    'codecs/isac/isacfix.gypi',
     'codecs/pcm16b/pcm16b.gypi',
     'codecs/red/red.gypi',
     'main/acm2/audio_coding_module.gypi',
     'neteq/neteq.gypi',
   ],
   'conditions': [
+    ['include_g722==1', {
+      'includes': ['codecs/g722/g722.gypi',],
+    }],
+    ['include_ilbc==1', {
+      'includes': ['codecs/ilbc/ilbc.gypi',],
+    }],
+    ['include_isac==1', {
+      'includes': ['codecs/isac/isac.gypi',
+                   'codecs/isac/isacfix.gypi',],
+    }],
     ['include_opus==1', {
       'includes': ['codecs/opus/opus.gypi',],
     }],
   ],
 }
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/audio_decoder.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/audio_decoder.h
@@ -27,17 +27,17 @@ class AudioDecoder {
     kSpeech = 1,
     kComfortNoise = 2
   };
 
   // Used by PacketDuration below. Save the value -1 for errors.
   enum { kNotImplemented = -2 };
 
   AudioDecoder() = default;
-  virtual ~AudioDecoder() = default;
+  virtual ~AudioDecoder() {} //= default;
 
   // Decodes |encode_len| bytes from |encoded| and writes the result in
   // |decoded|. The maximum bytes allowed to be written into |decoded| is
   // |max_decoded_bytes|. The number of samples from all channels produced is
   // in the return value. If the decoder produced comfort noise, |speech_type|
   // is set to kComfortNoise, otherwise it is kSpeech. The desired output
   // sample rate is provided in |sample_rate_hz|, which must be valid for the
   // codec at hand.
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus.gypi
@@ -2,16 +2,19 @@
 #
 # Use of this source code is governed by a BSD-style license
 # that can be found in the LICENSE file in the root of the source
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
+  'variables': {
+    'opus_complexity%': 0,
+  },
   'targets': [
     {
       'target_name': 'webrtc_opus',
       'type': 'static_library',
       'conditions': [
         ['build_opus==1', {
           'dependencies': [
             '<(opus_dir)/opus.gyp:opus'
@@ -25,28 +28,40 @@
               '<(opus_dir)/src/celt',
             ],
           },
         }, {
           'conditions': [
             ['build_with_mozilla==1', {
               # Mozilla provides its own build of the opus library.
               'include_dirs': [
-                '$(DIST)/include/opus',
-              ]
+                '/media/libopus/include',
+                '/media/libopus/src',
+                '/media/libopus/celt',
+              ],
+              'direct_dependent_settings': {
+                'include_dirs': [
+                  '/media/libopus/include',
+                  '/media/libopus/src',
+                  '/media/libopus/celt',
+                ],
+              },
             }],
           ],
         }],
       ],
       'dependencies': [
         'audio_encoder_interface',
       ],
       'include_dirs': [
         '<(webrtc_root)',
       ],
+      'defines': [
+        'OPUS_COMPLEXITY=<(opus_complexity)'
+      ],
       'sources': [
         'audio_encoder_opus.cc',
         'interface/audio_encoder_opus.h',
         'interface/opus_interface.h',
         'opus_inst.h',
         'opus_interface.c',
       ],
     },
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
@@ -110,16 +110,19 @@ int16_t WebRtcOpus_Encode(OpusEncInst* i
     return res;
   }
 
   return -1;
 }
 
 int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate) {
   if (inst) {
+#if defined(OPUS_COMPLEXITY) && (OPUS_COMPLEXITY != 0)
+    opus_encoder_ctl(inst->encoder, OPUS_SET_COMPLEXITY(OPUS_COMPLEXITY));
+#endif
     return opus_encoder_ctl(inst->encoder, OPUS_SET_BITRATE(rate));
   } else {
     return -1;
   }
 }
 
 int16_t WebRtcOpus_SetPacketLossRate(OpusEncInst* inst, int32_t loss_rate) {
   if (inst) {
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
@@ -5,33 +5,47 @@
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
   'variables': {
     'audio_coding_dependencies': [
       'CNG',
-      'G711',
-      'G722',
-      'iLBC',
-      'iSAC',
-      'iSACFix',
-      'PCM16B',
       'red',
       '<(webrtc_root)/common.gyp:webrtc_common',
       '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
       '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
     ],
     'audio_coding_defines': [],
     'conditions': [
       ['include_opus==1', {
         'audio_coding_dependencies': ['webrtc_opus',],
         'audio_coding_defines': ['WEBRTC_CODEC_OPUS',],
       }],
+      ['include_g711==1', {
+        'audio_coding_dependencies': ['G711',],
+        'audio_coding_defines': ['WEBRTC_CODEC_G711',],
+      }],
+      ['include_g722==1', {
+        'audio_coding_dependencies': ['G722',],
+        'audio_coding_defines': ['WEBRTC_CODEC_G722',],
+      }],
+      ['include_ilbc==1', {
+        'audio_coding_dependencies': ['iLBC',],
+        'audio_coding_defines': ['WEBRTC_CODEC_ILBC',],
+      }],
+      ['include_isac==1', {
+        'audio_coding_dependencies': ['iSAC', 'iSACFix',],
+#        'audio_coding_defines': ['WEBRTC_CODEC_ISAC', 'WEBRTC_CODEC_ISACFX',],
+      }],
+      ['include_pcm16b==1', {
+        'audio_coding_dependencies': ['PCM16B',],
+        'audio_coding_defines': ['WEBRTC_CODEC_PCM16',],
+      }],
     ],
   },
   'targets': [
     {
       'target_name': 'audio_coding_module',
       'type': 'static_library',
       'defines': [
         '<@(audio_coding_defines)',
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -486,23 +486,21 @@ int AudioCodingModuleImpl::PreprocessToA
 //
 
 bool AudioCodingModuleImpl::REDStatus() const {
   CriticalSectionScoped lock(acm_crit_sect_);
   return codec_manager_.red_enabled();
 }
 
 // Configure RED status i.e on/off.
-int AudioCodingModuleImpl::SetREDStatus(
+int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
+  CriticalSectionScoped lock(acm_crit_sect_);
 #ifdef WEBRTC_CODEC_RED
-    bool enable_red) {
-  CriticalSectionScoped lock(acm_crit_sect_);
   return codec_manager_.SetCopyRed(enable_red) ? 0 : -1;
 #else
-    bool /* enable_red */) {
   WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
                "  WEBRTC_CODEC_RED is undefined");
   return -1;
 #endif
 }
 
 /////////////////////////////////////////
 //   (FEC) Forward Error Correction (codec internal)
@@ -955,17 +953,17 @@ bool AudioCodingImpl::RegisterSendCodec(
   if (frame_size_samples > 0) {
     codec.pacsize = frame_size_samples;
   }
   return acm_old_->RegisterSendCodec(codec) == 0;
 }
 
 const AudioEncoder* AudioCodingImpl::GetSenderInfo() const {
   FATAL() << "Not implemented yet.";
-  return reinterpret_cast<const AudioEncoder*>(NULL);
+  return NULL;
 }
 
 const CodecInst* AudioCodingImpl::GetSenderCodecInst() {
   if (acm_old_->SendCodec(&current_send_codec_) != 0) {
     return NULL;
   }
   return &current_send_codec_;
 }
@@ -974,17 +972,17 @@ int AudioCodingImpl::Add10MsAudio(const 
   acm2::AudioCodingModuleImpl::InputData input_data;
   if (acm_old_->Add10MsDataInternal(audio_frame, &input_data) != 0)
     return -1;
   return acm_old_->Encode(input_data);
 }
 
 const ReceiverInfo* AudioCodingImpl::GetReceiverInfo() const {
   FATAL() << "Not implemented yet.";
-  return reinterpret_cast<const ReceiverInfo*>(NULL);
+  return NULL;
 }
 
 bool AudioCodingImpl::RegisterReceiveCodec(AudioDecoder* receive_codec) {
   FATAL() << "Not implemented yet.";
   return false;
 }
 
 bool AudioCodingImpl::RegisterReceiveCodec(int decoder_type,
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq.gypi
@@ -5,48 +5,106 @@
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
   'variables': {
     'codecs': [
       'G711',
-      'G722',
       'PCM16B',
-      'iLBC',
-      'iSAC',
-      'iSACFix',
       'CNG',
     ],
     'neteq_defines': [],
     'conditions': [
+      ['include_g722==1', {
+        'neteq_dependencies': ['G722'],
+        'neteq_defines': ['WEBRTC_CODEC_G722',],
+      }],
+      ['include_ilbc==1', {
+        'neteq_dependencies': ['iLBC'],
+        'neteq_defines': ['WEBRTC_CODEC_ILBC',],
+      }],
+      ['include_isac==1', {
+        'neteq_dependencies': ['iSAC', 'iSACFix',],
+        'neteq_defines': ['WEBRTC_CODEC_ISAC', 'WEBRTC_CODEC_ISACFIX',],
+      }],
       ['include_opus==1', {
-        'codecs': ['webrtc_opus',],
+        'codecs': ['webrtc_opus'],
+        'neteq_dependencies': ['webrtc_opus'],
         'neteq_defines': ['WEBRTC_CODEC_OPUS',],
+        'conditions': [
+          ['build_with_mozilla==0', {
+            'neteq_dependencies': [
+              '<(DEPTH)/third_party/opus/opus.gyp:opus',
+	    ],
+	  }],
+ 	],
       }],
     ],
     'neteq_dependencies': [
       '<@(codecs)',
       '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
       '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
       'audio_decoder_interface',
     ],
   },
   'targets': [
     {
       'target_name': 'neteq',
       'type': 'static_library',
+      'include_dirs': [
+        '../../../../../../media/opus/celt',
+      ],
+      'direct_dependent_settings': {
+        'include_dirs': [
+          '../../../../../../media/opus/celt',
+	],
+      },
       'dependencies': [
         '<@(neteq_dependencies)',
         '<(webrtc_root)/common.gyp:webrtc_common',
       ],
       'defines': [
         '<@(neteq_defines)',
       ],
+      'conditions': [
+        ['build_with_mozilla==0', {
+          'include_dirs': [
+            # Need Opus header files for the audio classifier.
+            '<(DEPTH)/third_party/opus/src/celt',
+            '<(DEPTH)/third_party/opus/src/src',
+          ],
+          'direct_dependent_settings': {
+            'include_dirs': [
+              # Need Opus header files for the audio classifier.
+              '<(DEPTH)/third_party/opus/src/celt',
+              '<(DEPTH)/third_party/opus/src/src',
+            ],
+          },
+          'export_dependent_settings': [
+            '<(DEPTH)/third_party/opus/opus.gyp:opus',
+          ],
+	}],
+        ['build_with_mozilla==1', {
+          'include_dirs': [
+            # Need Opus header files for the audio classifier.
+            '<(DEPTH)/../../../media/opus/celt',
+#            '<(DEPTH)/third_party/opus/src/src',
+          ],
+          'direct_dependent_settings': {
+            'include_dirs': [
+              '../../../../../../media/opus/celt',
+              # Need Opus header files for the audio classifier.
+              '<(DEPTH)/../../../media/opus/celt',
+#              '<(DEPTH)/third_party/opus/src/src',
+            ],
+          },
+        }],
+      ],
       'sources': [
         'interface/neteq.h',
         'accelerate.cc',
         'accelerate.h',
         'audio_classifier.cc',
         'audio_classifier.h',
         'audio_decoder_impl.cc',
         'audio_decoder_impl.h',
@@ -121,16 +179,17 @@
           'dependencies': [
             '<@(codecs)',
             'audio_decoder_interface',
             'neteq_unittest_tools',
             '<(DEPTH)/testing/gtest.gyp:gtest',
             '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
             '<(webrtc_root)/test/test.gyp:test_support_main',
           ],
+# FIX for include_isac/etc
           'defines': [
             'AUDIO_DECODER_UNITTEST',
             'WEBRTC_CODEC_G722',
             'WEBRTC_CODEC_ILBC',
             'WEBRTC_CODEC_ISACFX',
             'WEBRTC_CODEC_ISAC',
             'WEBRTC_CODEC_PCM16',
             '<@(neteq_defines)',
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_template.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_template.h
@@ -79,18 +79,17 @@ class AudioDeviceTemplate : public Audio
     FATAL() << "Should never be called";
     return -1;
   }
 
   int32_t RecordingDeviceName(
       uint16_t index,
       char name[kAdmMaxDeviceNameSize],
       char guid[kAdmMaxGuidSize]) override {
-    FATAL() << "Should never be called";
-    return -1;
+    return input_.RecordingDeviceName(index, name, guid);
   }
 
   int32_t SetPlayoutDevice(uint16_t index) override {
     // OK to use but it has no effect currently since device selection is
     // done using Andoid APIs instead.
     return 0;
   }
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_utility_android.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_utility_android.h
@@ -10,17 +10,19 @@
 
 /*
  *  Android audio device utility interface
  */
 
 #ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_ANDROID_H
 #define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_ANDROID_H
 
+#if !defined(MOZ_WIDGET_GONK)
 #include <jni.h>
+#endif
 
 #include "webrtc/base/checks.h"
 #include "webrtc/modules/audio_device/audio_device_utility.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 
 namespace webrtc {
 
 // TODO(henrika): this utility class is not used but I would like to keep this
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager.cc
@@ -4,128 +4,152 @@
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/audio_manager.h"
+#if !defined(MOZ_WIDGET_GONK)
+#include "AndroidJNIWrapper.h"
+#endif
 
 #include <android/log.h>
 
 #include "webrtc/base/arraysize.h"
 #include "webrtc/base/checks.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
+#if !defined(MOZ_WIDGET_GONK)
 #include "webrtc/modules/utility/interface/helpers_android.h"
+#endif
 
 #define TAG "AudioManager"
 #define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
 #define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
 #define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
 #define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
 #define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
 
 namespace webrtc {
 
+#if !defined(MOZ_WIDGET_GONK)
 static JavaVM* g_jvm = NULL;
 static jobject g_context = NULL;
 static jclass g_audio_manager_class = NULL;
+#endif
 
 void AudioManager::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
+#if !defined(MOZ_WIDGET_GONK)
   ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
 
   CHECK(jvm);
   CHECK(context);
 
   g_jvm = reinterpret_cast<JavaVM*>(jvm);
   JNIEnv* jni = GetEnv(g_jvm);
   CHECK(jni) << "AttachCurrentThread must be called on this tread";
 
-  g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
-  jclass local_class = FindClass(
-      jni, "org/webrtc/voiceengine/WebRtcAudioManager");
-  g_audio_manager_class = reinterpret_cast<jclass>(
-      NewGlobalRef(jni, local_class));
-  CHECK_EXCEPTION(jni);
+  if (!g_context) {
+    g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
+  }
 
+  if (!g_audio_manager_class) {
+    g_audio_manager_class = jsjni_GetGlobalClassRef(
+                                "org/webrtc/voiceengine/WebRtcAudioManager");
+    DCHECK(g_audio_manager_class);
+  }
   // Register native methods with the WebRtcAudioManager class. These methods
   // are declared private native in WebRtcAudioManager.java.
   JNINativeMethod native_methods[] = {
       {"nativeCacheAudioParameters", "(IIJ)V",
        reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
   jni->RegisterNatives(g_audio_manager_class,
                        native_methods, arraysize(native_methods));
   CHECK_EXCEPTION(jni) << "Error during RegisterNatives";
+#endif
 }
 
 void AudioManager::ClearAndroidAudioDeviceObjects() {
+#if !defined(MOZ_WIDGET_GONK)
   ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
   JNIEnv* jni = GetEnv(g_jvm);
   CHECK(jni) << "AttachCurrentThread must be called on this tread";
   jni->UnregisterNatives(g_audio_manager_class);
   CHECK_EXCEPTION(jni) << "Error during UnregisterNatives";
   DeleteGlobalRef(jni, g_audio_manager_class);
   g_audio_manager_class = NULL;
   DeleteGlobalRef(jni, g_context);
   g_context = NULL;
   g_jvm = NULL;
+#endif
 }
 
 AudioManager::AudioManager()
-    : j_audio_manager_(NULL),
-      initialized_(false) {
+    : initialized_(false) {
+#if !defined(MOZ_WIDGET_GONK)
+  j_audio_manager_ = NULL;
   ALOGD("ctor%s", GetThreadInfo().c_str());
+#endif
   CHECK(HasDeviceObjects());
   CreateJavaInstance();
 }
 
 AudioManager::~AudioManager() {
+#if !defined(MOZ_WIDGET_GONK)
   ALOGD("~dtor%s", GetThreadInfo().c_str());
+#endif
   DCHECK(thread_checker_.CalledOnValidThread());
   Close();
+#if !defined(MOZ_WIDGET_GONK)
   AttachThreadScoped ats(g_jvm);
   JNIEnv* jni = ats.env();
   jni->DeleteGlobalRef(j_audio_manager_);
   j_audio_manager_ = NULL;
+#endif
   DCHECK(!initialized_);
 }
 
 bool AudioManager::Init() {
+#if !defined(MOZ_WIDGET_GONK)
   ALOGD("Init%s", GetThreadInfo().c_str());
   DCHECK(thread_checker_.CalledOnValidThread());
   DCHECK(!initialized_);
   AttachThreadScoped ats(g_jvm);
   JNIEnv* jni = ats.env();
   jmethodID initID = GetMethodID(jni, g_audio_manager_class, "init", "()Z");
   jboolean res = jni->CallBooleanMethod(j_audio_manager_, initID);
   CHECK_EXCEPTION(jni);
   if (!res) {
     ALOGE("init failed!");
     return false;
   }
+#endif
   initialized_ = true;
   return true;
 }
 
 bool AudioManager::Close() {
+#if !defined(MOZ_WIDGET_GONK)
   ALOGD("Close%s", GetThreadInfo().c_str());
   DCHECK(thread_checker_.CalledOnValidThread());
   if (!initialized_)
     return true;
   AttachThreadScoped ats(g_jvm);
   JNIEnv* jni = ats.env();
   jmethodID disposeID = GetMethodID(
       jni, g_audio_manager_class, "dispose", "()V");
   jni->CallVoidMethod(j_audio_manager_, disposeID);
   CHECK_EXCEPTION(jni);
+#endif
   initialized_ = false;
   return true;
 }
 
+#if !defined(MOZ_WIDGET_GONK)
 void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env, jobject obj,
     jint sample_rate, jint channels, jlong nativeAudioManager) {
   webrtc::AudioManager* this_object =
       reinterpret_cast<webrtc::AudioManager*> (nativeAudioManager);
   this_object->OnCacheAudioParameters(env, sample_rate, channels);
 }
 
 void AudioManager::OnCacheAudioParameters(
@@ -133,41 +157,48 @@ void AudioManager::OnCacheAudioParameter
   ALOGD("OnCacheAudioParameters%s", GetThreadInfo().c_str());
   ALOGD("sample_rate: %d", sample_rate);
   ALOGD("channels: %d", channels);
   DCHECK(thread_checker_.CalledOnValidThread());
   // TODO(henrika): add support stereo output.
   playout_parameters_.reset(sample_rate, channels);
   record_parameters_.reset(sample_rate, channels);
 }
+#endif
 
 AudioParameters AudioManager::GetPlayoutAudioParameters() const {
   CHECK(playout_parameters_.is_valid());
   return playout_parameters_;
 }
 
 AudioParameters AudioManager::GetRecordAudioParameters() const {
   CHECK(record_parameters_.is_valid());
   return record_parameters_;
 }
 
 bool AudioManager::HasDeviceObjects() {
+#if !defined(MOZ_WIDGET_GONK)
   return (g_jvm && g_context && g_audio_manager_class);
+#else
+  return true;
+#endif
 }
 
 void AudioManager::CreateJavaInstance() {
+#if !defined(MOZ_WIDGET_GONK)
   ALOGD("CreateJavaInstance");
   AttachThreadScoped ats(g_jvm);
   JNIEnv* jni = ats.env();
   jmethodID constructorID = GetMethodID(
       jni, g_audio_manager_class, "<init>", "(Landroid/content/Context;J)V");
   j_audio_manager_ = jni->NewObject(g_audio_manager_class,
                                     constructorID,
                                     g_context,
                                     reinterpret_cast<intptr_t>(this));
   CHECK_EXCEPTION(jni) << "Error during NewObject";
   CHECK(j_audio_manager_);
   j_audio_manager_ = jni->NewGlobalRef(j_audio_manager_);
   CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
   CHECK(j_audio_manager_);
+#endif
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager.h
@@ -6,23 +6,27 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_H_
 
+#if !defined(MOZ_WIDGET_GONK)
 #include <jni.h>
+#endif
 
 #include "webrtc/base/thread_checker.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 #include "webrtc/modules/audio_device/audio_device_generic.h"
+#if !defined(MOZ_WIDGET_GONK)
 #include "webrtc/modules/utility/interface/helpers_android.h"
+#endif
 
 namespace webrtc {
 
 class AudioParameters {
  public:
   enum { kBitsPerSample = 16 };
   AudioParameters()
       : sample_rate_(0),
@@ -91,37 +95,41 @@ class AudioManager {
 
   // Native audio parameters stored during construction.
   AudioParameters GetPlayoutAudioParameters() const;
   AudioParameters GetRecordAudioParameters() const;
 
   bool initialized() const { return initialized_; }
 
  private:
+#if !defined(MOZ_WIDGET_GONK)
   // Called from Java side so we can cache the native audio parameters.
   // This method will be called by the WebRtcAudioManager constructor, i.e.
   // on the same thread that this object is created on.
   static void JNICALL CacheAudioParameters(JNIEnv* env, jobject obj,
       jint sample_rate, jint channels, jlong nativeAudioManager);
   void OnCacheAudioParameters(JNIEnv* env, jint sample_rate, jint channels);
-
+#endif
+  
   // Returns true if SetAndroidAudioDeviceObjects() has been called
   // successfully.
   bool HasDeviceObjects();
 
   // Called from the constructor. Defines the |j_audio_manager_| member.
   void CreateJavaInstance();
 
   // Stores thread ID in the constructor.
   // We can then use ThreadChecker::CalledOnValidThread() to ensure that
   // other methods are called from the same thread.
   rtc::ThreadChecker thread_checker_;
 
+#if !defined(MOZ_WIDGET_GONK)
   // The Java WebRtcAudioManager instance.
   jobject j_audio_manager_;
+#endif
 
   // Set to true by Init() and false by Close().
   bool initialized_;
 
   // Contains native parameters (e.g. sample rate, channel configuration).
   // Set at construction in OnCacheAudioParameters() which is called from
   // Java on the same thread as this object is created on.
   AudioParameters playout_parameters_;
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.cc
@@ -8,16 +8,17 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
 
 #include <android/log.h>
 #include <assert.h>
 
+#include "AndroidJNIWrapper.h"
 #include "webrtc/modules/utility/interface/helpers_android.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 #define TAG "AudioManagerJni"
 #define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
 
 namespace webrtc {
 
@@ -48,33 +49,28 @@ void AudioManagerJni::SetAndroidAudioDev
   ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
 
   assert(jvm);
   assert(context);
 
   // Store global Java VM variables to be accessed by API calls.
   g_jvm_ = reinterpret_cast<JavaVM*>(jvm);
   g_jni_env_ = GetEnv(g_jvm_);
-  g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context));
+
+  if (!g_context_) {
+    g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context));
+  }
 
-  // FindClass must be made in this function since this function's contract
-  // requires it to be called by a Java thread.
-  // See
-  // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
-  // as to why this is necessary.
-  // Get the AudioManagerAndroid class object.
-  jclass javaAmClassLocal = g_jni_env_->FindClass(
-      "org/webrtc/voiceengine/AudioManagerAndroid");
-  assert(javaAmClassLocal);
-
-  // Create a global reference such that the class object is not recycled by
-  // the garbage collector.
-  g_audio_manager_class_ = reinterpret_cast<jclass>(
-      g_jni_env_->NewGlobalRef(javaAmClassLocal));
-  assert(g_audio_manager_class_);
+  if (!g_audio_manager_class_) {
+    // Create a global reference such that the class object is not recycled by
+    // the garbage collector.
+    g_audio_manager_class_ = jsjni_GetGlobalClassRef(
+                                 "org/webrtc/voiceengine/AudioManagerAndroid");
+    DCHECK(g_audio_manager_class_);
+  }
 }
 
 void AudioManagerJni::ClearAndroidAudioDeviceObjects() {
   ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
   g_jni_env_->DeleteGlobalRef(g_audio_manager_class_);
   g_audio_manager_class_ = NULL;
   g_jni_env_->DeleteGlobalRef(g_context_);
   g_context_ = NULL;
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.cc
@@ -3,16 +3,17 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include "AndroidJNIWrapper.h"
 #include "webrtc/modules/audio_device/android/audio_record_jni.h"
 
 #include <android/log.h>
 
 #include "webrtc/base/arraysize.h"
 #include "webrtc/base/checks.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
 
@@ -39,27 +40,26 @@ void AudioRecordJni::SetAndroidAudioDevi
 
   CHECK(jvm);
   CHECK(context);
 
   g_jvm = reinterpret_cast<JavaVM*>(jvm);
   JNIEnv* jni = GetEnv(g_jvm);
   CHECK(jni) << "AttachCurrentThread must be called on this tread";
 
-  // Protect context from being deleted during garbage collection.
-  g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
+  if (!g_context) {
+    // Protect context from being deleted during garbage collection.
+    g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
+  }
 
-  // Load the locally-defined WebRtcAudioRecord class and create a new global
-  // reference to it.
-  jclass local_class = FindClass(
-      jni, "org/webrtc/voiceengine/WebRtcAudioRecord");
-  g_audio_record_class = reinterpret_cast<jclass>(
-      NewGlobalRef(jni, local_class));
-  jni->DeleteLocalRef(local_class);
-  CHECK_EXCEPTION(jni);
+  if (!g_audio_record_class) {
+    g_audio_record_class = jsjni_GetGlobalClassRef(
+                               "org/webrtc/voiceengine/WebRtcAudioRecord");
+    DCHECK(g_audio_record_class);
+  }
 
   // Register native methods with the WebRtcAudioRecord class. These methods
   // are declared private native in WebRtcAudioRecord.java.
   JNINativeMethod native_methods[] = {
       {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
           reinterpret_cast<void*>(
        &webrtc::AudioRecordJni::CacheDirectBufferAddress)},
       {"nativeDataIsRecorded", "(IJ)V",
@@ -313,9 +313,23 @@ void AudioRecordJni::CreateJavaInstance(
                                    reinterpret_cast<intptr_t>(this));
   CHECK_EXCEPTION(jni) << "Error during NewObject";
   CHECK(j_audio_record_);
   j_audio_record_ = jni->NewGlobalRef(j_audio_record_);
   CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
   CHECK(j_audio_record_);
 }
 
+int32_t AudioRecordJni::RecordingDeviceName(uint16_t index,
+                                            char name[kAdmMaxDeviceNameSize],
+                                            char guid[kAdmMaxGuidSize]) {
+  // Return empty string
+  memset(name, 0, kAdmMaxDeviceNameSize);
+
+  if (guid)
+  {
+    memset(guid, 0, kAdmMaxGuidSize);
+  }
+
+  return 0;
+}
+
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.h
@@ -73,16 +73,19 @@ class AudioRecordJni {
   bool Recording() const { return recording_; }
 
   int32_t RecordingDelay(uint16_t& delayMS) const;
 
   void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
 
   bool BuiltInAECIsAvailable() const;
   int32_t EnableBuiltInAEC(bool enable);
+  int32_t RecordingDeviceName(uint16_t index,
+                              char name[kAdmMaxDeviceNameSize],
+                              char guid[kAdmMaxGuidSize]);
 
  private:
   // Called from Java side so we can cache the address of the Java-manged
   // |byte_buffer| in |direct_buffer_address_|. The size of the buffer
   // is also stored in |direct_buffer_capacity_in_bytes_|.
   // This method will be called by the WebRtcAudioRecord constructor, i.e.,
   // on the same thread that this object is created on.
   static void JNICALL CacheDirectBufferAddress(
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.cc
@@ -5,16 +5,17 @@
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/audio_manager.h"
 #include "webrtc/modules/audio_device/android/audio_track_jni.h"
+#include "AndroidJNIWrapper.h"
 
 #include <android/log.h>
 
 #include "webrtc/base/arraysize.h"
 #include "webrtc/base/checks.h"
 
 #define TAG "AudioTrackJni"
 #define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
@@ -34,23 +35,25 @@ void AudioTrackJni::SetAndroidAudioDevic
 
   CHECK(jvm);
   CHECK(context);
 
   g_jvm = reinterpret_cast<JavaVM*>(jvm);
   JNIEnv* jni = GetEnv(g_jvm);
   CHECK(jni) << "AttachCurrentThread must be called on this tread";
 
-  g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
-  jclass local_class = FindClass(
-      jni, "org/webrtc/voiceengine/WebRtcAudioTrack");
-  g_audio_track_class = reinterpret_cast<jclass>(
-      NewGlobalRef(jni, local_class));
-  jni->DeleteLocalRef(local_class);
-  CHECK_EXCEPTION(jni);
+  if (!g_context) {
+    g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
+  }
+
+  if (!g_audio_track_class) {
+    g_audio_track_class = jsjni_GetGlobalClassRef(
+                              "org/webrtc/voiceengine/WebRtcAudioTrack");
+    DCHECK(g_audio_track_class);
+  }
 
   // Register native methods with the WebRtcAudioTrack class. These methods
   // are declared private native in WebRtcAudioTrack.java.
   JNINativeMethod native_methods[] = {
       {"nativeCacheDirectBufferAddress", "(Ljava/nio/ByteBuffer;J)V",
           reinterpret_cast<void*>(
        &webrtc::AudioTrackJni::CacheDirectBufferAddress)},
       {"nativeGetPlayoutData", "(IJ)V",
@@ -333,9 +336,23 @@ void AudioTrackJni::CreateJavaInstance()
                                   reinterpret_cast<intptr_t>(this));
   CHECK_EXCEPTION(jni) << "Error during NewObject";
   CHECK(j_audio_track_);
   j_audio_track_ = jni->NewGlobalRef(j_audio_track_);
   CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
   CHECK(j_audio_track_);
 }
 
+int32_t AudioTrackJni::PlayoutDeviceName(uint16_t index,
+                                         char name[kAdmMaxDeviceNameSize],
+                                         char guid[kAdmMaxGuidSize]) {
+  // Return empty string
+  memset(name, 0, kAdmMaxDeviceNameSize);
+
+  if (guid)
+  {
+    memset(guid, 0, kAdmMaxGuidSize);
+    }
+
+  return 0;
+}
+
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.h
@@ -69,16 +69,20 @@ class AudioTrackJni : public PlayoutDela
   int SetSpeakerVolume(uint32_t volume);
   int SpeakerVolume(uint32_t& volume) const;
   int MaxSpeakerVolume(uint32_t& max_volume) const;
   int MinSpeakerVolume(uint32_t& min_volume) const;
 
   int32_t PlayoutDelay(uint16_t& delayMS) const;
   void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
 
+  int32_t PlayoutDeviceName(uint16_t index,
+                            char name[kAdmMaxDeviceNameSize],
+                            char guid[kAdmMaxGuidSize]);
+
  protected:
   // PlayoutDelayProvider implementation.
   virtual int PlayoutDelayMs();
 
  private:
   // Called from Java side so we can cache the address of the Java-manged
   // |byte_buffer| in |direct_buffer_address_|. The size of the buffer
   // is also stored in |direct_buffer_capacity_in_bytes_|.
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
@@ -11,17 +11,24 @@
 // The functions in this file are called from native code. They can still be
 // accessed even though they are declared private.
 
 package org.webrtc.voiceengine;
 
 import android.content.Context;
 import android.content.pm.PackageManager;
 import android.media.AudioManager;
+import android.util.Log;
 
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+
+import org.mozilla.gecko.annotation.WebRTCJNITarget;
+
+@WebRTCJNITarget
 class AudioManagerAndroid {
   // Most of Google lead devices use 44.1K as the default sampling rate, 44.1K
   // is also widely used on other android devices.
   private static final int DEFAULT_SAMPLING_RATE = 44100;
   // Randomly picked up frame size which is close to return value on N4.
   // Return this default value when
   // getProperty(PROPERTY_OUTPUT_FRAMES_PER_BUFFER) fails.
   private static final int DEFAULT_FRAMES_PER_BUFFER = 256;
@@ -33,40 +40,47 @@ class AudioManagerAndroid {
 
   @SuppressWarnings("unused")
   private AudioManagerAndroid(Context context) {
     AudioManager audioManager = (AudioManager)
         context.getSystemService(Context.AUDIO_SERVICE);
 
     mNativeOutputSampleRate = DEFAULT_SAMPLING_RATE;
     mAudioLowLatencyOutputFrameSize = DEFAULT_FRAMES_PER_BUFFER;
+    mAudioLowLatencySupported = context.getPackageManager().hasSystemFeature(
+      PackageManager.FEATURE_AUDIO_LOW_LATENCY);
     if (android.os.Build.VERSION.SDK_INT >=
-        android.os.Build.VERSION_CODES.JELLY_BEAN_MR1) {
-      String sampleRateString = audioManager.getProperty(
-          AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
-      if (sampleRateString != null) {
-        mNativeOutputSampleRate = Integer.parseInt(sampleRateString);
-      }
-      String framesPerBuffer = audioManager.getProperty(
-          AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
-      if (framesPerBuffer != null) {
+        17 /*android.os.Build.VERSION_CODES.JELLY_BEAN_MR1*/) {
+      try {
+        Method getProperty = AudioManager.class.getMethod("getProperty", String.class);
+        Field sampleRateField = AudioManager.class.getField("PROPERTY_OUTPUT_SAMPLE_RATE");
+        Field framePerBufferField = AudioManager.class.getField("PROPERTY_OUTPUT_FRAMES_PER_BUFFER");
+        String sampleRateKey = (String)sampleRateField.get(null);
+        String framePerBufferKey = (String)framePerBufferField.get(null);
+        String sampleRateString = (String)getProperty.invoke(audioManager, sampleRateKey);
+        if (sampleRateString != null) {
+          mNativeOutputSampleRate = Integer.parseInt(sampleRateString);
+        }
+        String framesPerBuffer = (String)getProperty.invoke(audioManager, sampleRateKey);
+        if (framesPerBuffer != null) {
           mAudioLowLatencyOutputFrameSize = Integer.parseInt(framesPerBuffer);
+        }
+      } catch (Exception ex) {
+        Log.w("WebRTC", "error getting low latency params", ex);
       }
     }
-    mAudioLowLatencySupported = context.getPackageManager().hasSystemFeature(
-        PackageManager.FEATURE_AUDIO_LOW_LATENCY);
   }
 
     @SuppressWarnings("unused")
     private int getNativeOutputSampleRate() {
       return mNativeOutputSampleRate;
     }
 
     @SuppressWarnings("unused")
     private boolean isAudioLowLatencySupported() {
         return mAudioLowLatencySupported;
     }
 
     @SuppressWarnings("unused")
     private int getAudioLowLatencyOutputFrameSize() {
         return mAudioLowLatencyOutputFrameSize;
     }
-}
\ No newline at end of file
+}
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
@@ -21,16 +21,20 @@ import android.util.Log;
 // The result is then provided to the caller by nativeCacheAudioParameters().
 // It is also possible to call init() to set up the audio environment for best
 // possible "VoIP performance". All settings done in init() are reverted by
 // dispose(). This class can also be used without calling init() if the user
 // prefers to set up the audio environment separately. However, it is
 // recommended to always use AudioManager.MODE_IN_COMMUNICATION.
 // This class also adds support for output volume control of the
 // STREAM_VOICE_CALL-type stream.
+
+import org.mozilla.gecko.annotation.WebRTCJNITarget;
+
+@WebRTCJNITarget
 class WebRtcAudioManager {
   private static final boolean DEBUG = false;
 
   private static final String TAG = "WebRtcAudioManager";
 
    // Use 44.1kHz as the default sampling rate.
   private static final int SAMPLE_RATE_HZ = 44100;
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
@@ -21,16 +21,19 @@ import android.media.audiofx.AudioEffect
 import android.media.audiofx.AudioEffect.Descriptor;
 import android.media.AudioRecord;
 import android.media.MediaRecorder.AudioSource;
 import android.os.Build;
 import android.os.Process;
 import android.os.SystemClock;
 import android.util.Log;
 
+import org.mozilla.gecko.annotation.WebRTCJNITarget;
+
+@WebRTCJNITarget
 class  WebRtcAudioRecord {
   private static final boolean DEBUG = false;
 
   private static final String TAG = "WebRtcAudioRecord";
 
   // Default audio data format is PCM 16 bit per sample.
   // Guaranteed to be supported by all devices.
   private static final int BITS_PER_SAMPLE = 16;
@@ -41,17 +44,17 @@ class  WebRtcAudioRecord {
   // Average number of callbacks per second.
   private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
 
   private final long nativeAudioRecord;
   private final Context context;
 
   private ByteBuffer byteBuffer;
 
-  private AudioRecord audioRecord = null;
+  private AudioRecord audioRecord;
   private AudioRecordThread audioThread = null;
 
   private AcousticEchoCanceler aec = null;
   private boolean useBuiltInAEC = false;
 
   /**
    * Audio thread which keeps calling ByteBuffer.read() waiting for audio
    * to be recorded. Feeds recorded data to the native counterpart as a
@@ -158,17 +161,17 @@ class  WebRtcAudioRecord {
     return true;
   }
 
   private int InitRecording(int sampleRate, int channels) {
     Logd("InitRecording(sampleRate=" + sampleRate + ", channels=" +
         channels + ")");
     final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
     final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
-    byteBuffer = byteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
+    byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
     Logd("byteBuffer.capacity: " + byteBuffer.capacity());
     // Rather than passing the ByteBuffer with every callback (requiring
     // the potentially expensive GetDirectBufferAddress) we simply have the
     // the native class cache the address to the memory once.
     nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
 
     // Get the minimum buffer size required for the successful creation of
     // an AudioRecord object, in byte units.
@@ -183,18 +186,24 @@ class  WebRtcAudioRecord {
     if (aec != null) {
       aec.release();
       aec = null;
     }
     assertTrue(audioRecord == null);
 
     int bufferSizeInBytes = Math.max(byteBuffer.capacity(), minBufferSize);
     Logd("bufferSizeInBytes: " + bufferSizeInBytes);
+
+    int audioSource = AudioSource.VOICE_COMMUNICATION;
+    if (android.os.Build.VERSION.SDK_INT < 11) {
+        audioSource = AudioSource.DEFAULT;
+    }
+
     try {
-      audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
+      audioRecord = new AudioRecord(audioSource,
                                     sampleRate,
                                     AudioFormat.CHANNEL_IN_MONO,
                                     AudioFormat.ENCODING_PCM_16BIT,
                                     bufferSizeInBytes);
 
     } catch (IllegalArgumentException e) {
       Logd(e.getMessage());
       return -1;
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
@@ -15,16 +15,19 @@ import java.nio.ByteBuffer;
 
 import android.content.Context;
 import android.media.AudioFormat;
 import android.media.AudioManager;
 import android.media.AudioTrack;
 import android.os.Process;
 import android.util.Log;
 
+import org.mozilla.gecko.annotation.WebRTCJNITarget;
+
+@WebRTCJNITarget
 class WebRtcAudioTrack {
   private static final boolean DEBUG = false;
 
   private static final String TAG = "WebRtcAudioTrack";
 
   // Default audio data format is PCM 16 bit per sample.
   // Guaranteed to be supported by all devices.
   private static final int BITS_PER_SAMPLE = 16;
@@ -36,17 +39,17 @@ class WebRtcAudioTrack {
   private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
 
   private final Context context;
   private final long nativeAudioTrack;
   private final AudioManager audioManager;
 
   private ByteBuffer byteBuffer;
 
-  private AudioTrack audioTrack = null;
+  private AudioTrack audioTrack;
   private AudioTrackThread audioThread = null;
 
   /**
    * Audio thread which keeps calling AudioTrack.write() to stream audio.
    * Data is periodically acquired from the native WebRTC layer using the
    * nativeGetPlayoutData callback function.
    * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
    */
@@ -144,17 +147,17 @@ class WebRtcAudioTrack {
       WebRtcAudioUtils.logDeviceInfo(TAG);
     }
   }
 
   private int InitPlayout(int sampleRate, int channels) {
     Logd("InitPlayout(sampleRate=" + sampleRate + ", channels=" +
          channels + ")");
     final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
-    byteBuffer = byteBuffer.allocateDirect(
+    byteBuffer = ByteBuffer.allocateDirect(
         bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
     Logd("byteBuffer.capacity: " + byteBuffer.capacity());
     // Rather than passing the ByteBuffer with every callback (requiring
     // the potentially expensive GetDirectBufferAddress) we simply have the
     // the native class cache the address to the memory once.
     nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack);
 
     // Get the minimum buffer size required for the successful creation of an
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
@@ -6,25 +6,34 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/opensles_input.h"
 
 #include <assert.h>
+#include <dlfcn.h>
 
+#include "OpenSLESProvider.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
 #include "webrtc/modules/audio_device/android/opensles_common.h"
 #include "webrtc/modules/audio_device/android/single_rw_fifo.h"
 #include "webrtc/modules/audio_device/audio_device_buffer.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+#include <media/AudioSystem.h>
+#include <audio_effects/effect_aec.h>
+#include <audio_effects/effect_ns.h>
+#include <utils/Errors.h>
+#endif
+
 #define VOID_RETURN
 #define OPENSL_RETURN_ON_FAILURE(op, ret_val)                    \
   do {                                                           \
     SLresult err = (op);                                         \
     if (err != SL_RESULT_SUCCESS) {                              \
       assert(false);                                             \
       return ret_val;                                            \
     }                                                            \
@@ -55,60 +64,110 @@ OpenSlesInput::OpenSlesInput(
       sles_engine_itf_(NULL),
       sles_recorder_(NULL),
       sles_recorder_itf_(NULL),
       sles_recorder_sbq_itf_(NULL),
       audio_buffer_(NULL),
       active_queue_(0),
       rec_sampling_rate_(0),
       agc_enabled_(false),
-      recording_delay_(0) {
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+      aec_(NULL),
+      ns_(NULL),
+#endif
+      recording_delay_(0),
+      opensles_lib_(NULL) {
 }
 
 OpenSlesInput::~OpenSlesInput() {
 }
 
 int32_t OpenSlesInput::SetAndroidAudioDeviceObjects(void* javaVM,
                                                     void* context) {
+#if !defined(WEBRTC_GONK)
+  AudioManagerJni::SetAndroidAudioDeviceObjects(javaVM, context);
+#endif
   return 0;
 }
 
 void OpenSlesInput::ClearAndroidAudioDeviceObjects() {
+#if !defined(WEBRTC_GONK)
+  AudioManagerJni::ClearAndroidAudioDeviceObjects();
+#endif
 }
 
 int32_t OpenSlesInput::Init() {
   assert(!initialized_);
 
+  /* Try to dynamically open the OpenSLES library */
+  opensles_lib_ = dlopen("libOpenSLES.so", RTLD_LAZY);
+  if (!opensles_lib_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0,
+                   "  failed to dlopen OpenSLES library");
+      return -1;
+  }
+
+  f_slCreateEngine = (slCreateEngine_t)dlsym(opensles_lib_, "slCreateEngine");
+  SL_IID_ENGINE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ENGINE");
+  SL_IID_BUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_BUFFERQUEUE");
+  SL_IID_ANDROIDCONFIGURATION_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDCONFIGURATION");
+  SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
+  SL_IID_RECORD_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_RECORD");
+
+  if (!f_slCreateEngine ||
+      !SL_IID_ENGINE_ ||
+      !SL_IID_BUFFERQUEUE_ ||
+      !SL_IID_ANDROIDCONFIGURATION_ ||
+      !SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ ||
+      !SL_IID_RECORD_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0,
+                   "  failed to find OpenSLES function");
+      return -1;
+  }
+
   // Set up OpenSL engine.
-  OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
-                                          NULL, NULL),
+#ifndef MOZILLA_INTERNAL_API
+  OPENSL_RETURN_ON_FAILURE(f_slCreateEngine(&sles_engine_, 1, kOption, 0,
+                                            NULL, NULL),
                            -1);
+#else
+  OPENSL_RETURN_ON_FAILURE(mozilla_get_sles_engine(&sles_engine_, 1, kOption), -1);
+#endif
+#ifndef MOZILLA_INTERNAL_API
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
                                                     SL_BOOLEAN_FALSE),
                            -1);
+#else
+  OPENSL_RETURN_ON_FAILURE(mozilla_realize_sles_engine(sles_engine_), -1);
+#endif
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
-                                                         SL_IID_ENGINE,
+                                                         SL_IID_ENGINE_,
                                                          &sles_engine_itf_),
                            -1);
 
   if (InitSampleRate() != 0) {
     return -1;
   }
   AllocateBuffers();
   initialized_ = true;
   return 0;
 }
 
 int32_t OpenSlesInput::Terminate() {
   // It is assumed that the caller has stopped recording before terminating.
   assert(!recording_);
+#ifndef MOZILLA_INTERNAL_API
   (*sles_engine_)->Destroy(sles_engine_);
+#else
+  mozilla_destroy_sles_engine(&sles_engine_);
+#endif
   initialized_ = false;
   mic_initialized_ = false;
   rec_initialized_ = false;
+  dlclose(opensles_lib_);
   return 0;
 }
 
 int32_t OpenSlesInput::RecordingDeviceName(uint16_t index,
                                            char name[kAdmMaxDeviceNameSize],
                                            char guid[kAdmMaxGuidSize]) {
   assert(index == 0);
   // Empty strings.
@@ -220,16 +279,24 @@ int32_t OpenSlesInput::MicrophoneBoost(b
   return -1;  // Not supported
 }
 
 int32_t OpenSlesInput::StereoRecordingIsAvailable(bool& available) {  // NOLINT
   available = false;  // Stereo recording not supported on Android.
   return 0;
 }
 
+int32_t OpenSlesInput::SetStereoRecording(bool enable) {  // NOLINT
+  if (enable) {
+    return -1;
+  } else {
+    return 0;
+  }
+}
+
 int32_t OpenSlesInput::StereoRecording(bool& enabled) const {  // NOLINT
   enabled = false;
   return 0;
 }
 
 int32_t OpenSlesInput::RecordingDelay(uint16_t& delayMS) const {  // NOLINT
   delayMS = recording_delay_;
   return 0;
@@ -263,18 +330,22 @@ void OpenSlesInput::UpdateRecordingDelay
   // TODO(hellner): Add accurate delay estimate.
   // On average half the current buffer will have been filled with audio.
   int outstanding_samples =
       (TotalBuffersUsed() - 0.5) * buffer_size_samples();
   recording_delay_ = outstanding_samples / (rec_sampling_rate_ / 1000);
 }
 
 void OpenSlesInput::UpdateSampleRate() {
+#if !defined(WEBRTC_GONK)
   rec_sampling_rate_ = audio_manager_.low_latency_supported() ?
       audio_manager_.native_output_sample_rate() : kDefaultSampleRate;
+#else
+  rec_sampling_rate_ = kDefaultSampleRate;
+#endif
 }
 
 void OpenSlesInput::CalculateNumFifoBuffersNeeded() {
   // Buffer size is 10ms of data.
   num_fifo_buffers_needed_ = kNum10MsToBuffer;
 }
 
 void OpenSlesInput::AllocateBuffers() {
@@ -316,16 +387,114 @@ bool OpenSlesInput::EnqueueAllBuffers() 
   // |fifo_|.
   while (fifo_->size() != 0) {
     // Clear the fifo.
     fifo_->Pop();
   }
   return true;
 }
 
+void OpenSlesInput::SetupVoiceMode() {
+  SLAndroidConfigurationItf configItf;
+  SLresult res = (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_ANDROIDCONFIGURATION_,
+                                                 (void*)&configItf);
+  WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL GetInterface: %d", res);
+
+  if (res == SL_RESULT_SUCCESS) {
+    SLuint32 voiceMode = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
+    SLuint32 voiceSize = sizeof(voiceMode);
+
+    res = (*configItf)->SetConfiguration(configItf,
+                                         SL_ANDROID_KEY_RECORDING_PRESET,
+                                         &voiceMode, voiceSize);
+    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL Set Voice mode res: %d", res);
+  }
+}
+
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+bool OpenSlesInput::CheckPlatformAEC() {
+  effect_descriptor_t fxDesc;
+  uint32_t numFx;
+
+  if (android::AudioEffect::queryNumberEffects(&numFx) != android::NO_ERROR) {
+    return false;
+  }
+
+  WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "Platform has %d effects", numFx);
+
+  for (uint32_t i = 0; i < numFx; i++) {
+    if (android::AudioEffect::queryEffect(i, &fxDesc) != android::NO_ERROR) {
+      continue;
+    }
+    if (memcmp(&fxDesc.type, FX_IID_AEC, sizeof(fxDesc.type)) == 0) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+void OpenSlesInput::SetupAECAndNS() {
+  bool hasAec = CheckPlatformAEC();
+  WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "Platform has AEC: %d", hasAec);
+  // This code should not have been enabled if this fails, because it means the
+  // software AEC has will have been disabled as well. If you hit this, you need
+  // to fix your B2G config or fix the hardware AEC on your device.
+  assert(hasAec);
+
+  SLAndroidConfigurationItf configItf;
+  SLresult res = (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_ANDROIDCONFIGURATION_,
+                                                 (void*)&configItf);
+  WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL GetInterface: %d", res);
+
+  if (res == SL_RESULT_SUCCESS) {
+    SLuint32 sessionId = 0;
+    SLuint32 idSize = sizeof(sessionId);
+    res = (*configItf)->GetConfiguration(configItf,
+                                         SL_ANDROID_KEY_RECORDING_SESSION_ID,
+                                         &idSize, &sessionId);
+    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL Get sessionId res: %d", res);
+
+    if (res == SL_RESULT_SUCCESS && idSize == sizeof(sessionId)) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL sessionId: %d", sessionId);
+
+      aec_ = new android::AudioEffect(FX_IID_AEC, NULL, 0, 0, 0, sessionId, 0);
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL aec: %p", aec_);
+
+      if (aec_) {
+        android::status_t status = aec_->initCheck();
+        if (status == android::NO_ERROR || status == android::ALREADY_EXISTS) {
+          WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL aec enabled");
+          aec_->setEnabled(true);
+        } else {
+          WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL aec disabled: %d", status);
+          delete aec_;
+          aec_ = NULL;
+        }
+      }
+
+      ns_ = new android::AudioEffect(FX_IID_NS, NULL, 0, 0, 0, sessionId, 0);
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL ns: %p", ns_);
+
+      if (ns_) {
+        android::status_t status = ns_->initCheck();
+        if (status == android::NO_ERROR || status == android::ALREADY_EXISTS) {
+          WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL ns enabled");
+          ns_->setEnabled(true);
+        } else {
+          WEBRTC_TRACE(kTraceError, kTraceAudioDevice, 0, "OpenSL ns disabled: %d", status);
+          delete ns_;
+          ns_ = NULL;
+        }
+      }
+    }
+  }
+}
+#endif
+
 bool OpenSlesInput::CreateAudioRecorder() {
   if (!event_.Start()) {
     assert(false);
     return false;
   }
   SLDataLocator_IODevice micLocator = {
     SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
     SL_DEFAULTDEVICEID_AUDIOINPUT, NULL };
@@ -338,66 +507,70 @@ bool OpenSlesInput::CreateAudioRecorder(
   SLDataFormat_PCM configuration =
       webrtc_opensl::CreatePcmConfiguration(rec_sampling_rate_);
   SLDataSink audio_sink = { &simple_buf_queue, &configuration };
 
   // Interfaces for recording android audio data and Android are needed.
   // Note the interfaces still need to be initialized. This only tells OpenSl
   // that the interfaces will be needed at some point.
   const SLInterfaceID id[kNumInterfaces] = {
-    SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
+    SL_IID_ANDROIDSIMPLEBUFFERQUEUE_, SL_IID_ANDROIDCONFIGURATION_ };
   const SLboolean req[kNumInterfaces] = {
     SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
   OPENSL_RETURN_ON_FAILURE(
       (*sles_engine_itf_)->CreateAudioRecorder(sles_engine_itf_,
                                                &sles_recorder_,
                                                &audio_source,
                                                &audio_sink,
                                                kNumInterfaces,
                                                id,
                                                req),
       false);
 
   SLAndroidConfigurationItf recorder_config;
   OPENSL_RETURN_ON_FAILURE(
       (*sles_recorder_)->GetInterface(sles_recorder_,
-                                      SL_IID_ANDROIDCONFIGURATION,
+                                      SL_IID_ANDROIDCONFIGURATION_,
                                       &recorder_config),
       false);
 
-  // Set audio recorder configuration to
-  // SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION which ensures that we
-  // use the main microphone tuned for audio communications.
-  SLint32 stream_type = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
-  OPENSL_RETURN_ON_FAILURE(
-      (*recorder_config)->SetConfiguration(recorder_config,
-                                           SL_ANDROID_KEY_RECORDING_PRESET,
-                                           &stream_type,
-                                           sizeof(SLint32)),
-      false);
+  SetupVoiceMode();
 
   // Realize the recorder in synchronous mode.
   OPENSL_RETURN_ON_FAILURE((*sles_recorder_)->Realize(sles_recorder_,
                                                       SL_BOOLEAN_FALSE),
                            false);
+
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+  SetupAECAndNS();
+#endif
+
   OPENSL_RETURN_ON_FAILURE(
-      (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD,
+      (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD_,
                                       static_cast<void*>(&sles_recorder_itf_)),
       false);
   OPENSL_RETURN_ON_FAILURE(
       (*sles_recorder_)->GetInterface(
           sles_recorder_,
-          SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+          SL_IID_ANDROIDSIMPLEBUFFERQUEUE_,
           static_cast<void*>(&sles_recorder_sbq_itf_)),
       false);
   return true;
 }
 
 void OpenSlesInput::DestroyAudioRecorder() {
   event_.Stop();
+
+#if defined(WEBRTC_GONK) && defined(WEBRTC_HARDWARE_AEC_NS)
+  delete aec_;
+  delete ns_;
+  aec_ = NULL;
+  ns_ = NULL;
+#endif
+
   if (sles_recorder_sbq_itf_) {
     // Release all buffers currently queued up.
     OPENSL_RETURN_ON_FAILURE(
         (*sles_recorder_sbq_itf_)->Clear(sles_recorder_sbq_itf_),
         VOID_RETURN);
     sles_recorder_sbq_itf_ = NULL;
   }
   sles_recorder_itf_ = NULL;
@@ -521,16 +694,17 @@ bool OpenSlesInput::CbThreadImpl() {
   CriticalSectionScoped lock(crit_sect_.get());
   if (HandleOverrun(event_id, event_msg)) {
     return recording_;
   }
   // If the fifo_ has audio data process it.
   while (fifo_->size() > 0 && recording_) {
     int8_t* audio = fifo_->Pop();
     audio_buffer_->SetRecordedBuffer(audio, buffer_size_samples());
-    audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(),
+    audio_buffer_->SetVQEData(delay_provider_ ?
+                              delay_provider_->PlayoutDelayMs() : 0,
                               recording_delay_, 0);
     audio_buffer_->DeliverRecordedData();
   }
   return recording_;
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
@@ -11,17 +11,24 @@
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
 
 #include <SLES/OpenSLES.h>
 #include <SLES/OpenSLES_Android.h>
 #include <SLES/OpenSLES_AndroidConfiguration.h>
 
 #include "webrtc/base/scoped_ptr.h"
+// Not defined in the android version we use to build with
+#define SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION ((SLuint32) 0x00000004)
+
+#if !defined(WEBRTC_GONK)
 #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
+#else
+#include "media/AudioEffect.h"
+#endif
 #include "webrtc/modules/audio_device/android/low_latency_event.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 
 namespace webrtc {
 
 class AudioDeviceBuffer;
 class AudioManager;
@@ -100,17 +107,17 @@ class OpenSlesInput {
 
   // Microphone boost control
   int32_t MicrophoneBoostIsAvailable(bool& available);  // NOLINT
   int32_t SetMicrophoneBoost(bool enable);
   int32_t MicrophoneBoost(bool& enabled) const;  // NOLINT
 
   // Stereo support
   int32_t StereoRecordingIsAvailable(bool& available);  // NOLINT
-  int32_t SetStereoRecording(bool enable) { return -1; }
+  int32_t SetStereoRecording(bool enable);
   int32_t StereoRecording(bool& enabled) const;  // NOLINT
 
   // Delay information and control
   int32_t RecordingDelay(uint16_t& delayMS) const;  // NOLINT
 
   bool RecordingWarning() const { return false; }
   bool RecordingError() const  { return false; }
   void ClearRecordingWarning() {}
@@ -124,32 +131,37 @@ class OpenSlesInput {
   int32_t EnableBuiltInAEC(bool enable) { return -1; }
 
  private:
   enum {
     kNumInterfaces = 2,
     //