Bug 987979: Patch 2 - Rollup of changes previously applied to media/webrtc/trunk/webrtc rs=jesup
authorRandell Jesup <rjesup@jesup.org>
Thu, 29 May 2014 17:05:14 -0400
changeset 204966 ad8a86bfd86021b1a92cce6b6847b17d630a602f
parent 204965 d33accc6a20e6fa1b2e48804fe73964fb4007c90
child 204967 7a835877bc62a4e05c568093dfd5fe66ca7b3984
push id3741
push userasasaki@mozilla.com
push dateMon, 21 Jul 2014 20:25:18 +0000
treeherdermozilla-beta@4d6f46f5af68 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup
bugs987979
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 987979: Patch 2 - Rollup of changes previously applied to media/webrtc/trunk/webrtc rs=jesup
content/media/webrtc/MediaEngineWebRTC.h
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
media/webrtc/signaling/src/media-conduit/VideoConduit.h
media/webrtc/trunk/webrtc/build/arm_neon.gypi
media/webrtc/trunk/webrtc/build/common.gypi
media/webrtc/trunk/webrtc/build/merge_libs.gyp
media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
media/webrtc/trunk/webrtc/common_types.h
media/webrtc/trunk/webrtc/engine_configurations.h
media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/main/source/audio_coding_module.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_defines.h
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/packet_buffer.c
media/webrtc/trunk/webrtc/modules/audio_coding/neteq4/neteq.gypi
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.h
media/webrtc/trunk/webrtc/modules/audio_device/android/single_rw_fifo.cc
media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.cc
media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.cc
media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.h
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_jni_android.cc
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_jni_android.h
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.cc
media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.h
media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_sse2.c
media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc
media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.h
media/webrtc/trunk/webrtc/modules/interface/module_common_types.h
media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
media/webrtc/trunk/webrtc/modules/modules.gyp
media/webrtc/trunk/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_h264.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp.gypi
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_objc.mm
media/webrtc/trunk/webrtc/modules/video_capture/video_capture.gypi
media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.h
media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
media/webrtc/trunk/webrtc/modules/video_coding/codecs/h264/include/h264.h
media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/interface/video_coding.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/codec_database.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/decoding_state.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/encoded_frame.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/media_optimization.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/media_optimization.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/packet.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/qm_select.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/qm_select.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/timestamp_extrapolator.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/timestamp_extrapolator.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_coding_impl.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_coding_impl.h
media/webrtc/trunk/webrtc/modules/video_coding/main/source/video_sender.cc
media/webrtc/trunk/webrtc/modules/video_processing/main/source/video_processing.gypi
media/webrtc/trunk/webrtc/modules/video_render/android/java/src/org/webrtc/videoengine/ViEAndroidGLES20.java
media/webrtc/trunk/webrtc/modules/video_render/android/java/src/org/webrtc/videoengine/ViESurfaceRenderer.java
media/webrtc/trunk/webrtc/system_wrappers/interface/asm_defines.h
media/webrtc/trunk/webrtc/system_wrappers/interface/tick_util.h
media/webrtc/trunk/webrtc/system_wrappers/source/atomic32_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/clock.cc
media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable.cc
media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/cpu_info.cc
media/webrtc/trunk/webrtc/system_wrappers/source/rw_lock.cc
media/webrtc/trunk/webrtc/system_wrappers/source/system_wrappers.gyp
media/webrtc/trunk/webrtc/system_wrappers/source/thread_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/tick_util.cc
media/webrtc/trunk/webrtc/system_wrappers/source/trace_impl.cc
media/webrtc/trunk/webrtc/system_wrappers/source/trace_impl.h
media/webrtc/trunk/webrtc/system_wrappers/source/trace_posix.cc
media/webrtc/trunk/webrtc/test/channel_transport/udp_transport_impl.cc
media/webrtc/trunk/webrtc/typedefs.h
media/webrtc/trunk/webrtc/video_engine/include/vie_base.h
media/webrtc/trunk/webrtc/video_engine/include/vie_rtp_rtcp.h
media/webrtc/trunk/webrtc/video_engine/test/auto_test/source/vie_autotest.cc
media/webrtc/trunk/webrtc/video_engine/vie_base_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_base_impl.h
media/webrtc/trunk/webrtc/video_engine/vie_channel.cc
media/webrtc/trunk/webrtc/video_engine/vie_channel.h
media/webrtc/trunk/webrtc/video_engine/vie_channel_manager.cc
media/webrtc/trunk/webrtc/video_engine/vie_channel_manager.h
media/webrtc/trunk/webrtc/video_engine/vie_codec_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_encoder.cc
media/webrtc/trunk/webrtc/video_engine/vie_encoder.h
media/webrtc/trunk/webrtc/video_engine/vie_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_receiver.cc
media/webrtc/trunk/webrtc/video_engine/vie_receiver.h
media/webrtc/trunk/webrtc/video_engine/vie_rtp_rtcp_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_rtp_rtcp_impl.h
media/webrtc/trunk/webrtc/video_engine/vie_shared_data.cc
media/webrtc/trunk/webrtc/video_engine/vie_shared_data.h
media/webrtc/trunk/webrtc/video_engine/vie_sync_module.cc
media/webrtc/trunk/webrtc/voice_engine/channel.cc
media/webrtc/trunk/webrtc/voice_engine/channel.h
media/webrtc/trunk/webrtc/voice_engine/include/mock/fake_voe_external_media.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_base.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_rtp_rtcp.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_video_sync.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_volume_control.h
media/webrtc/trunk/webrtc/voice_engine/output_mixer.cc
media/webrtc/trunk/webrtc/voice_engine/output_mixer.h
media/webrtc/trunk/webrtc/voice_engine/output_mixer_unittest.cc
media/webrtc/trunk/webrtc/voice_engine/test/auto_test/standard/video_sync_test.cc
media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.cc
media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.h
media/webrtc/trunk/webrtc/voice_engine/voe_rtp_rtcp_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_rtp_rtcp_impl.h
media/webrtc/trunk/webrtc/voice_engine/voe_video_sync_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voe_video_sync_impl.h
media/webrtc/trunk/webrtc/voice_engine/voice_engine.gyp
media/webrtc/trunk/webrtc/voice_engine/voice_engine_defines.h
media/webrtc/trunk/webrtc/voice_engine/voice_engine_impl.cc
--- a/content/media/webrtc/MediaEngineWebRTC.h
+++ b/content/media/webrtc/MediaEngineWebRTC.h
@@ -38,16 +38,18 @@
 #include "webrtc/voice_engine/include/voe_network.h"
 #include "webrtc/voice_engine/include/voe_audio_processing.h"
 #include "webrtc/voice_engine/include/voe_volume_control.h"
 #include "webrtc/voice_engine/include/voe_external_media.h"
 #include "webrtc/voice_engine/include/voe_audio_processing.h"
 #include "webrtc/voice_engine/include/voe_call_report.h"
 
 // Video Engine
+// conflicts with #include of scoped_ptr.h
+#undef FF
 #include "webrtc/video_engine/include/vie_base.h"
 #include "webrtc/video_engine/include/vie_codec.h"
 #include "webrtc/video_engine/include/vie_render.h"
 #include "webrtc/video_engine/include/vie_capture.h"
 #ifdef MOZ_B2G_CAMERA
 #include "CameraControlListener.h"
 #include "ICameraControl.h"
 #include "ImageContainer.h"
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -746,17 +746,18 @@ WebrtcAudioConduit::ReceivedRTPPacket(co
     if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) {
       // timestamp is at 32 bits in ([1])
       struct Processing insert = { TimeStamp::Now(),
                                    ntohl(static_cast<const uint32_t *>(data)[1]) };
       mProcessing.AppendElement(insert);
     }
 #endif
 
-    if(mPtrVoENetwork->ReceivedRTPPacket(mChannel,data,len) == -1)
+    // XXX we need to get passed the time the packet was received
+    if(mPtrVoENetwork->ReceivedRTPPacket(mChannel, data, len) == -1)
     {
       int error = mPtrVoEBase->LastError();
       CSFLogError(logTag, "%s RTP Processing Error %d", __FUNCTION__, error);
       if(error == VE_RTP_RTCP_MODULE_ERROR)
       {
         return kMediaConduitRTPRTCPModuleError;
       }
       return kMediaConduitUnknownError;
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
@@ -13,16 +13,17 @@
 #include "AudioConduit.h"
 #include "nsThreadUtils.h"
 #include "LoadManager.h"
 #include "YuvStamper.h"
 #include "nsServiceManagerUtils.h"
 #include "nsIPrefService.h"
 #include "nsIPrefBranch.h"
 
+#include "webrtc/common_types.h"
 #include "webrtc/common_video/interface/native_handle.h"
 #include "webrtc/video_engine/include/vie_errors.h"
 
 #ifdef MOZ_WIDGET_ANDROID
 #include "AndroidJNIWrapper.h"
 #endif
 
 #include <algorithm>
@@ -1020,17 +1021,18 @@ MediaConduitErrorCode
 WebrtcVideoConduit::ReceivedRTPPacket(const void *data, int len)
 {
   CSFLogDebug(logTag, "%s: Channel %d, Len %d ", __FUNCTION__, mChannel, len);
 
   // Media Engine should be receiving already.
   if(mEngineReceiving)
   {
     // let the engine know of a RTP packet to decode
-    if(mPtrViENetwork->ReceivedRTPPacket(mChannel,data,len) == -1)
+    // XXX we need to get passed the time the packet was received
+    if(mPtrViENetwork->ReceivedRTPPacket(mChannel, data, len, webrtc::PacketTime()) == -1)
     {
       int error = mPtrViEBase->LastError();
       CSFLogError(logTag, "%s RTP Processing Failed %d ", __FUNCTION__, error);
       if(error >= kViERtpRtcpInvalidChannelId && error <= kViERtpRtcpRtcpDisabled)
       {
         return kMediaConduitRTPProcessingFailed;
       }
       return kMediaConduitRTPRTCPModuleError;
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.h
@@ -6,16 +6,18 @@
 #define VIDEO_SESSION_H_
 
 #include "nsAutoPtr.h"
 #include "mozilla/Attributes.h"
 
 #include "MediaConduitInterface.h"
 #include "MediaEngineWrapper.h"
 
+// conflicts with #include of scoped_ptr.h
+#undef FF
 // Video Engine Includes
 #include "webrtc/common_types.h"
 #ifdef FF
 #undef FF // Avoid name collision between scoped_ptr.h and nsCRTGlue.h.
 #endif
 #include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
 #include "webrtc/video_engine/include/vie_base.h"
 #include "webrtc/video_engine/include/vie_capture.h"
--- a/media/webrtc/trunk/webrtc/build/arm_neon.gypi
+++ b/media/webrtc/trunk/webrtc/build/arm_neon.gypi
@@ -18,13 +18,35 @@
 #   ],
 #   'includes': ['path/to/this/gypi/file'],
 # }
 
 {
   'cflags!': [
     '-mfpu=vfpv3-d16',
   ],
+  'cflags_mozilla!': [
+    '-mfpu=vfpv3-d16',
+  ],
   'cflags': [
     '-mfpu=neon',
     '-flax-vector-conversions',
   ],
+  'cflags_mozilla': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+  'asflags!': [
+    '-mfpu=vfpv3-d16',
+  ],
+  'asflags_mozilla!': [
+    '-mfpu=vfpv3-d16',
+  ],
+  'asflags': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+  'asflags_mozilla': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+
 }
--- a/media/webrtc/trunk/webrtc/build/common.gypi
+++ b/media/webrtc/trunk/webrtc/build/common.gypi
@@ -37,27 +37,41 @@
       },
       'build_with_chromium%': '<(build_with_chromium)',
       'build_with_libjingle%': '<(build_with_libjingle)',
       'webrtc_root%': '<(webrtc_root)',
       'apk_tests_path%': '<(apk_tests_path)',
       'modules_java_gyp_path%': '<(modules_java_gyp_path)',
       'gen_core_neon_offsets_gyp%': '<(gen_core_neon_offsets_gyp)',
       'webrtc_vp8_dir%': '<(webrtc_root)/modules/video_coding/codecs/vp8',
+      'webrtc_h264_dir%': '<(webrtc_root)/modules/video_coding/codecs/h264',
       'rbe_components_path%': '<(webrtc_root)/modules/remote_bitrate_estimator',
+      'include_g711%': 1,
+      'include_g722%': 1,
+      'include_ilbc%': 1,
       'include_opus%': 1,
+      'include_isac%': 1,
+      'include_pcm16b%': 1,
     },
     'build_with_chromium%': '<(build_with_chromium)',
     'build_with_libjingle%': '<(build_with_libjingle)',
     'webrtc_root%': '<(webrtc_root)',
     'apk_tests_path%': '<(apk_tests_path)',
     'modules_java_gyp_path%': '<(modules_java_gyp_path)',
     'gen_core_neon_offsets_gyp%': '<(gen_core_neon_offsets_gyp)',
     'webrtc_vp8_dir%': '<(webrtc_vp8_dir)',
+    'webrtc_h264_dir%': '<(webrtc_h264_dir)',
+
+    'include_g711%': '<(include_g711)',
+    'include_g722%': '<(include_g722)',
+    'include_ilbc%': '<(include_ilbc)',
     'include_opus%': '<(include_opus)',
+    'include_isac%': '<(include_isac)',
+    'include_pcm16b%': '<(include_pcm16b)',
+
     'rbe_components_path%': '<(rbe_components_path)',
 
     # The Chromium common.gypi we use treats all gyp files without
     # chromium_code==1 as third party code. This disables many of the
     # preferred warning settings.
     #
     # We can set this here to have WebRTC code treated as Chromium code. Our
     # third party code will still have the reduced warning settings.
@@ -107,16 +121,19 @@
         # Exclude internal ADM since Chromium uses its own IO handling.
         'include_internal_audio_device%': 0,
 
         # Exclude internal VCM in Chromium build.
         'include_internal_video_capture%': 0,
 
         # Exclude internal video render module in Chromium build.
         'include_internal_video_render%': 0,
+
+        # lazily allocate the ~4MB of trace message buffers if set
+        'enable_lazy_trace_alloc%': 0,
       }, {  # Settings for the standalone (not-in-Chromium) build.
         # TODO(andrew): For now, disable the Chrome plugins, which causes a
         # flood of chromium-style warnings. Investigate enabling them:
         # http://code.google.com/p/webrtc/issues/detail?id=163
         'clang_use_chrome_plugins%': 0,
 
         'include_pulse_audio%': 1,
         'include_internal_audio_device%': 1,
@@ -125,16 +142,31 @@
       }],
       ['build_with_libjingle==1', {
         'include_tests%': 0,
         'restrict_webrtc_logging%': 1,
       }, {
         'include_tests%': 1,
         'restrict_webrtc_logging%': 0,
       }],
+      ['OS=="linux"', {
+        'include_alsa_audio%': 1,
+      }, {
+        'include_alsa_audio%': 0,
+      }],
+      ['OS=="solaris" or os_bsd==1', {
+        'include_pulse_audio%': 1,
+      }, {
+        'include_pulse_audio%': 0,
+      }],
+      ['OS=="linux" or OS=="solaris" or os_bsd==1', {
+        'include_v4l2_video_capture%': 1,
+      }, {
+        'include_v4l2_video_capture%': 0,
+      }],
       ['OS=="ios"', {
         'build_libjpeg%': 0,
         'enable_protobuf%': 0,
         'include_tests%': 0,
       }],
       ['target_arch=="arm" or target_arch=="armv7"', {
         'prefer_fixed_point%': 1,
       }],
@@ -145,16 +177,21 @@
       # Allow includes to be prefixed with webrtc/ in case it is not an
       # immediate subdirectory of <(DEPTH).
       '../..',
       # To include the top-level directory when building in Chrome, so we can
       # use full paths (e.g. headers inside testing/ or third_party/).
       '<(DEPTH)',
     ],
     'conditions': [
+      ['moz_widget_toolkit_gonk==1', {
+        'defines' : [
+          'WEBRTC_GONK',
+        ],
+      }],
       ['restrict_webrtc_logging==1', {
         'defines': ['WEBRTC_RESTRICT_LOGGING',],
       }],
       ['build_with_mozilla==1', {
         'defines': [
           # Changes settings for Mozilla build.
           'WEBRTC_MOZILLA_BUILD',
          ],
@@ -192,27 +229,41 @@
         ],
       }],
       ['target_arch=="arm" or target_arch=="armv7"', {
         'defines': [
           'WEBRTC_ARCH_ARM',
         ],
         'conditions': [
           ['arm_version==7', {
-            'defines': ['WEBRTC_ARCH_ARM_V7',],
+            'defines': ['WEBRTC_ARCH_ARM_V7',
+                        'WEBRTC_BUILD_NEON_LIBS'],
             'conditions': [
               ['arm_neon==1', {
                 'defines': ['WEBRTC_ARCH_ARM_NEON',],
               }, {
                 'defines': ['WEBRTC_DETECT_ARM_NEON',],
               }],
             ],
           }],
         ],
       }],
+      ['os_bsd==1', {
+        'defines': [
+          'WEBRTC_BSD',
+          'WEBRTC_THREAD_RR',
+        ],
+      }],
+      ['OS=="dragonfly" or OS=="netbsd"', {
+        'defines': [
+          # doesn't support pthread_condattr_setclock
+          'WEBRTC_CLOCK_TYPE_REALTIME',
+        ],
+      }],
+      # Mozilla: if we support Mozilla on MIPS, we'll need to mod the cflags entries here
       ['target_arch=="mipsel"', {
         'defines': [
           'MIPS32_LE',
         ],
         'conditions': [
           ['mips_fpu==1', {
             'defines': [
               'MIPS_FPU_LE',
@@ -263,16 +314,23 @@
       }],
       ['OS=="ios"', {
         'defines': [
           'WEBRTC_MAC',
           'WEBRTC_IOS',
         ],
       }],
       ['OS=="linux"', {
+#        'conditions': [
+#          ['have_clock_monotonic==1', {
+#            'defines': [
+#              'WEBRTC_CLOCK_TYPE_REALTIME',
+#            ],
+#          }],
+#        ],
         'defines': [
           'WEBRTC_LINUX',
         ],
       }],
       ['OS=="mac"', {
         'defines': [
           'WEBRTC_MAC',
         ],
@@ -286,27 +344,28 @@
         # http://code.google.com/p/webrtc/issues/detail?id=261 is solved.
         'msvs_disabled_warnings': [
           4373,  # legacy warning for ignoring const / volatile in signatures.
           4389,  # Signed/unsigned mismatch.
         ],
         # Re-enable some warnings that Chromium disables.
         'msvs_disabled_warnings!': [4189,],
       }],
+      # used on GONK as well
+      ['enable_android_opensl==1 and (OS=="android" or moz_widget_toolkit_gonk==1)', {
+        'defines': [
+          'WEBRTC_ANDROID_OPENSLES',
+        ],
+      }],
       ['OS=="android"', {
         'defines': [
           'WEBRTC_LINUX',
           'WEBRTC_ANDROID',
          ],
          'conditions': [
-           ['enable_android_opensl==1', {
-             'defines': [
-               'WEBRTC_ANDROID_OPENSLES',
-             ],
-           }],
            ['clang!=1', {
              # The Android NDK doesn't provide optimized versions of these
              # functions. Ensure they are disabled for all compilers.
              'cflags': [
                '-fno-builtin-cos',
                '-fno-builtin-sin',
                '-fno-builtin-cosf',
                '-fno-builtin-sinf',
--- a/media/webrtc/trunk/webrtc/build/merge_libs.gyp
+++ b/media/webrtc/trunk/webrtc/build/merge_libs.gyp
@@ -39,10 +39,12 @@
           'outputs': ['<(output_lib)'],
           'action': ['python',
                      'merge_libs.py',
                      '<(PRODUCT_DIR)',
                      '<(output_lib)',],
         },
       ],
     },
+#      }],
+#    ],
   ],
 }
--- a/media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
+++ b/media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
@@ -150,16 +150,17 @@
       'targets': [
         {
           'target_name': 'common_audio_sse2',
           'type': 'static_library',
           'sources': [
             'resampler/sinc_resampler_sse.cc',
           ],
           'cflags': ['-msse2',],
+          'cflags_mozilla': ['-msse2',],
           'xcode_settings': {
             'OTHER_CFLAGS': ['-msse2',],
           },
         },
       ],  # targets
     }],
     ['(target_arch=="arm" and arm_version==7) or target_arch=="armv7"', {
       'targets': [
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
@@ -12,105 +12,54 @@
 /*
  * A wrapper for resampling a numerous amount of sampling combinations.
  */
 
 #ifndef WEBRTC_RESAMPLER_RESAMPLER_H_
 #define WEBRTC_RESAMPLER_RESAMPLER_H_
 
 #include "webrtc/typedefs.h"
+#include "speex/speex_resampler.h"
 
 namespace webrtc
 {
 
-// TODO(andrew): the implementation depends on the exact values of this enum.
-// It should be rewritten in a less fragile way.
+#define FIXED_RATE_RESAMPLER 0x10
 enum ResamplerType
 {
-    // 4 MSB = Number of channels
-    // 4 LSB = Synchronous or asynchronous
-
-    kResamplerSynchronous = 0x10,
-    kResamplerAsynchronous = 0x11,
-    kResamplerSynchronousStereo = 0x20,
-    kResamplerAsynchronousStereo = 0x21,
-    kResamplerInvalid = 0xff
-};
-
-// TODO(andrew): doesn't need to be part of the interface.
-enum ResamplerMode
-{
-    kResamplerMode1To1,
-    kResamplerMode1To2,
-    kResamplerMode1To3,
-    kResamplerMode1To4,
-    kResamplerMode1To6,
-    kResamplerMode1To12,
-    kResamplerMode2To3,
-    kResamplerMode2To11,
-    kResamplerMode4To11,
-    kResamplerMode8To11,
-    kResamplerMode11To16,
-    kResamplerMode11To32,
-    kResamplerMode2To1,
-    kResamplerMode3To1,
-    kResamplerMode4To1,
-    kResamplerMode6To1,
-    kResamplerMode12To1,
-    kResamplerMode3To2,
-    kResamplerMode11To2,
-    kResamplerMode11To4,
-    kResamplerMode11To8
+    kResamplerSynchronous            = 0x00,
+    kResamplerSynchronousStereo      = 0x01,
+    kResamplerFixedSynchronous       = 0x00 | FIXED_RATE_RESAMPLER,
+    kResamplerFixedSynchronousStereo = 0x01 | FIXED_RATE_RESAMPLER,
 };
 
 class Resampler
 {
-
 public:
     Resampler();
     // TODO(andrew): use an init function instead.
-    Resampler(int inFreq, int outFreq, ResamplerType type);
+    Resampler(int in_freq, int out_freq, ResamplerType type);
     ~Resampler();
 
     // Reset all states
-    int Reset(int inFreq, int outFreq, ResamplerType type);
+    int Reset(int in_freq, int out_freq, ResamplerType type);
 
     // Reset all states if any parameter has changed
-    int ResetIfNeeded(int inFreq, int outFreq, ResamplerType type);
+    int ResetIfNeeded(int in_freq, int out_freq, ResamplerType type);
 
     // Synchronous resampling, all output samples are written to samplesOut
-    int Push(const int16_t* samplesIn, int lengthIn, int16_t* samplesOut,
-             int maxLen, int &outLen);
-
-    // Asynchronous resampling, input
-    int Insert(int16_t* samplesIn, int lengthIn);
-
-    // Asynchronous resampling output, remaining samples are buffered
-    int Pull(int16_t* samplesOut, int desiredLen, int &outLen);
+    int Push(const int16_t* samples_in, int length_in,
+             int16_t* samples_out, int max_len, int &out_len);
 
 private:
-    // Generic pointers since we don't know what states we'll need
-    void* state1_;
-    void* state2_;
-    void* state3_;
+    bool IsFixedRate() { return !!(type_ & FIXED_RATE_RESAMPLER); }
+
+    SpeexResamplerState* state_;
 
-    // Storage if needed
-    int16_t* in_buffer_;
-    int16_t* out_buffer_;
-    int in_buffer_size_;
-    int out_buffer_size_;
-    int in_buffer_size_max_;
-    int out_buffer_size_max_;
-
-    // State
-    int my_in_frequency_khz_;
-    int my_out_frequency_khz_;
-    ResamplerMode my_mode_;
-    ResamplerType my_type_;
-
-    // Extra instance for stereo
-    Resampler* slave_left_;
-    Resampler* slave_right_;
+    int in_freq_;
+    int out_freq_;
+    int channels_;
+    ResamplerType type_;
 };
 
 }  // namespace webrtc
 
 #endif // WEBRTC_RESAMPLER_RESAMPLER_H_
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
@@ -8,17 +8,16 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/common_audio/resampler/include/push_resampler.h"
 
 #include <string.h>
 
 #include "webrtc/common_audio/include/audio_util.h"
-#include "webrtc/common_audio/resampler/include/resampler.h"
 #include "webrtc/common_audio/resampler/push_sinc_resampler.h"
 
 namespace webrtc {
 
 PushResampler::PushResampler()
     : src_sample_rate_hz_(0),
       dst_sample_rate_hz_(0),
       num_channels_(0),
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
@@ -10,1075 +10,126 @@
 
 
 /*
  * A wrapper for resampling a numerous amount of sampling combinations.
  */
 
 #include <stdlib.h>
 #include <string.h>
+#include <assert.h>
 
 #include "webrtc/common_audio/resampler/include/resampler.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
+// TODO(jesup) better adjust per platform ability
+// Note: if these are changed (higher), you may need to change the
+// KernelDelay values in the unit tests here and in output_mixer.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_GONK)
+#define RESAMPLER_QUALITY 2
+#else
+#define RESAMPLER_QUALITY 3
+#endif
 
 namespace webrtc
 {
 
-Resampler::Resampler()
+Resampler::Resampler() : state_(NULL), type_(kResamplerSynchronous)
 {
-    state1_ = NULL;
-    state2_ = NULL;
-    state3_ = NULL;
-    in_buffer_ = NULL;
-    out_buffer_ = NULL;
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-    // we need a reset before we will work
-    my_in_frequency_khz_ = 0;
-    my_out_frequency_khz_ = 0;
-    my_mode_ = kResamplerMode1To1;
-    my_type_ = kResamplerInvalid;
-    slave_left_ = NULL;
-    slave_right_ = NULL;
+  // Note: Push will fail until Reset() is called
 }
 
-Resampler::Resampler(int inFreq, int outFreq, ResamplerType type)
+Resampler::Resampler(int in_freq, int out_freq, ResamplerType type) :
+  state_(NULL) // all others get initialized in reset
 {
-    state1_ = NULL;
-    state2_ = NULL;
-    state3_ = NULL;
-    in_buffer_ = NULL;
-    out_buffer_ = NULL;
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-    // we need a reset before we will work
-    my_in_frequency_khz_ = 0;
-    my_out_frequency_khz_ = 0;
-    my_mode_ = kResamplerMode1To1;
-    my_type_ = kResamplerInvalid;
-    slave_left_ = NULL;
-    slave_right_ = NULL;
-
-    Reset(inFreq, outFreq, type);
+  Reset(in_freq, out_freq, type);
 }
 
 Resampler::~Resampler()
 {
-    if (state1_)
-    {
-        free(state1_);
-    }
-    if (state2_)
-    {
-        free(state2_);
-    }
-    if (state3_)
-    {
-        free(state3_);
-    }
-    if (in_buffer_)
-    {
-        free(in_buffer_);
-    }
-    if (out_buffer_)
-    {
-        free(out_buffer_);
-    }
-    if (slave_left_)
-    {
-        delete slave_left_;
-    }
-    if (slave_right_)
-    {
-        delete slave_right_;
-    }
+  if (state_)
+  {
+    speex_resampler_destroy(state_);
+  }
 }
 
-int Resampler::ResetIfNeeded(int inFreq, int outFreq, ResamplerType type)
+int Resampler::ResetIfNeeded(int in_freq, int out_freq, ResamplerType type)
 {
-    int tmpInFreq_kHz = inFreq / 1000;
-    int tmpOutFreq_kHz = outFreq / 1000;
-
-    if ((tmpInFreq_kHz != my_in_frequency_khz_) || (tmpOutFreq_kHz != my_out_frequency_khz_)
-            || (type != my_type_))
-    {
-        return Reset(inFreq, outFreq, type);
-    } else
-    {
-        return 0;
-    }
+  if (!state_ || type != type_ ||
+      in_freq != in_freq_ || out_freq != out_freq_)
+  {
+    // Note that fixed-rate resamplers where input == output rate will
+    // have state_ == NULL, and will call Reset() here - but reset won't
+    // do anything beyond overwrite the member vars unless it needs a
+    // real resampler.
+    return Reset(in_freq, out_freq, type);
+  } else {
+    return 0;
+  }
 }
 
-int Resampler::Reset(int inFreq, int outFreq, ResamplerType type)
+int Resampler::Reset(int in_freq, int out_freq, ResamplerType type)
 {
-
-    if (state1_)
-    {
-        free(state1_);
-        state1_ = NULL;
-    }
-    if (state2_)
-    {
-        free(state2_);
-        state2_ = NULL;
-    }
-    if (state3_)
-    {
-        free(state3_);
-        state3_ = NULL;
-    }
-    if (in_buffer_)
-    {
-        free(in_buffer_);
-        in_buffer_ = NULL;
-    }
-    if (out_buffer_)
-    {
-        free(out_buffer_);
-        out_buffer_ = NULL;
-    }
-    if (slave_left_)
-    {
-        delete slave_left_;
-        slave_left_ = NULL;
-    }
-    if (slave_right_)
-    {
-        delete slave_right_;
-        slave_right_ = NULL;
-    }
-
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-
-    // This might be overridden if parameters are not accepted.
-    my_type_ = type;
-
-    // Start with a math exercise, Euclid's algorithm to find the gcd:
-
-    int a = inFreq;
-    int b = outFreq;
-    int c = a % b;
-    while (c != 0)
-    {
-        a = b;
-        b = c;
-        c = a % b;
-    }
-    // b is now the gcd;
-
-    // We need to track what domain we're in.
-    my_in_frequency_khz_ = inFreq / 1000;
-    my_out_frequency_khz_ = outFreq / 1000;
-
-    // Scale with GCD
-    inFreq = inFreq / b;
-    outFreq = outFreq / b;
-
-    // Do we need stereo?
-    if ((my_type_ & 0xf0) == 0x20)
-    {
-        // Change type to mono
-        type = static_cast<ResamplerType>(
-            ((static_cast<int>(type) & 0x0f) + 0x10));
-        slave_left_ = new Resampler(inFreq, outFreq, type);
-        slave_right_ = new Resampler(inFreq, outFreq, type);
-    }
+  uint32_t channels = (type == kResamplerSynchronousStereo ||
+                       type == kResamplerFixedSynchronousStereo) ? 2 : 1;
 
-    if (inFreq == outFreq)
-    {
-        my_mode_ = kResamplerMode1To1;
-    } else if (inFreq == 1)
-    {
-        switch (outFreq)
-        {
-            case 2:
-                my_mode_ = kResamplerMode1To2;
-                break;
-            case 3:
-                my_mode_ = kResamplerMode1To3;
-                break;
-            case 4:
-                my_mode_ = kResamplerMode1To4;
-                break;
-            case 6:
-                my_mode_ = kResamplerMode1To6;
-                break;
-            case 12:
-                my_mode_ = kResamplerMode1To12;
-                break;
-            default:
-                my_type_ = kResamplerInvalid;
-                return -1;
-        }
-    } else if (outFreq == 1)
-    {
-        switch (inFreq)
-        {
-            case 2:
-                my_mode_ = kResamplerMode2To1;
-                break;
-            case 3:
-                my_mode_ = kResamplerMode3To1;
-                break;
-            case 4:
-                my_mode_ = kResamplerMode4To1;
-                break;
-            case 6:
-                my_mode_ = kResamplerMode6To1;
-                break;
-            case 12:
-                my_mode_ = kResamplerMode12To1;
-                break;
-            default:
-                my_type_ = kResamplerInvalid;
-                return -1;
-        }
-    } else if ((inFreq == 2) && (outFreq == 3))
-    {
-        my_mode_ = kResamplerMode2To3;
-    } else if ((inFreq == 2) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode2To11;
-    } else if ((inFreq == 4) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode4To11;
-    } else if ((inFreq == 8) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode8To11;
-    } else if ((inFreq == 3) && (outFreq == 2))
-    {
-        my_mode_ = kResamplerMode3To2;
-    } else if ((inFreq == 11) && (outFreq == 2))
-    {
-        my_mode_ = kResamplerMode11To2;
-    } else if ((inFreq == 11) && (outFreq == 4))
-    {
-        my_mode_ = kResamplerMode11To4;
-    } else if ((inFreq == 11) && (outFreq == 16))
-    {
-        my_mode_ = kResamplerMode11To16;
-    } else if ((inFreq == 11) && (outFreq == 32))
-    {
-        my_mode_ = kResamplerMode11To32;
-    } else if ((inFreq == 11) && (outFreq == 8))
+  if (state_)
+  {
+    speex_resampler_destroy(state_);
+    state_ = NULL;
+  }
+  type_ = type;
+  channels_ = channels;
+  in_freq_ = in_freq;
+  out_freq_ = out_freq;
+
+  // For fixed-rate, same-rate resamples we just memcpy and so don't spin up a resampler
+  if (in_freq != out_freq || !IsFixedRate())
+  {
+    state_ = speex_resampler_init(channels, in_freq, out_freq, RESAMPLER_QUALITY, NULL);
+    if (!state_)
     {
-        my_mode_ = kResamplerMode11To8;
-    } else
-    {
-        my_type_ = kResamplerInvalid;
-        return -1;
+      return -1;
     }
-
-    // Now create the states we need
-    switch (my_mode_)
-    {
-        case kResamplerMode1To1:
-            // No state needed;
-            break;
-        case kResamplerMode1To2:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode1To3:
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
-            break;
-        case kResamplerMode1To4:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:4
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode1To6:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:6
-            state2_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state2_);
-            break;
-        case kResamplerMode1To12:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:4
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            // 4:12
-            state3_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz(
-                (WebRtcSpl_State16khzTo48khz*) state3_);
-            break;
-        case kResamplerMode2To3:
-            // 2:6
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
-            // 6:3
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode2To11:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            state2_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
-            WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state2_);
-            break;
-        case kResamplerMode4To11:
-            state1_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
-            WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state1_);
-            break;
-        case kResamplerMode8To11:
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo22khz));
-            WebRtcSpl_ResetResample16khzTo22khz((WebRtcSpl_State16khzTo22khz *)state1_);
-            break;
-        case kResamplerMode11To16:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
-            break;
-        case kResamplerMode11To32:
-            // 11 -> 22
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            // 22 -> 16
-            state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
-
-            // 16 -> 32
-            state3_ = malloc(8 * sizeof(int32_t));
-            memset(state3_, 0, 8 * sizeof(int32_t));
-
-            break;
-        case kResamplerMode2To1:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode3To1:
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
-            break;
-        case kResamplerMode4To1:
-            // 4:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:1
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode6To1:
-            // 6:2
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
-            // 2:1
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode12To1:
-            // 12:4
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz(
-                (WebRtcSpl_State48khzTo16khz*) state1_);
-            // 4:2
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            // 2:1
-            state3_ = malloc(8 * sizeof(int32_t));
-            memset(state3_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode3To2:
-            // 3:6
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 6:2
-            state2_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state2_);
-            break;
-        case kResamplerMode11To2:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
-            WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
-
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-
-            break;
-        case kResamplerMode11To4:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
-            WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
-            break;
-        case kResamplerMode11To8:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state1_);
-            break;
-
-    }
-
-    return 0;
+  }
+  return 0;
 }
 
-// Synchronous resampling, all output samples are written to samplesOut
-int Resampler::Push(const int16_t * samplesIn, int lengthIn, int16_t* samplesOut,
-                    int maxLen, int &outLen)
+// Synchronous resampling, all output samples are written to samples_out
+// TODO(jesup) Change to take samples-per-channel in and out
+int Resampler::Push(const int16_t* samples_in, int length_in,
+                    int16_t* samples_out, int max_len, int &out_len)
 {
-    // Check that the resampler is not in asynchronous mode
-    if (my_type_ & 0x0f)
-    {
-        return -1;
-    }
-
-    // Do we have a stereo signal?
-    if ((my_type_ & 0xf0) == 0x20)
+  if (max_len < length_in)
+  {
+    return -1;
+  }
+  if (!state_)
+  {
+    if (!IsFixedRate() || in_freq_ != out_freq_)
     {
-
-        // Split up the signal and call the slave object for each channel
-
-        int16_t* left = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
-        int16_t* right = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
-        int16_t* out_left = (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
-        int16_t* out_right =
-                (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
-        int res = 0;
-        for (int i = 0; i < lengthIn; i += 2)
-        {
-            left[i >> 1] = samplesIn[i];
-            right[i >> 1] = samplesIn[i + 1];
-        }
-
-        // It's OK to overwrite the local parameter, since it's just a copy
-        lengthIn = lengthIn / 2;
-
-        int actualOutLen_left = 0;
-        int actualOutLen_right = 0;
-        // Do resampling for right channel
-        res |= slave_left_->Push(left, lengthIn, out_left, maxLen / 2, actualOutLen_left);
-        res |= slave_right_->Push(right, lengthIn, out_right, maxLen / 2, actualOutLen_right);
-        if (res || (actualOutLen_left != actualOutLen_right))
-        {
-            free(left);
-            free(right);
-            free(out_left);
-            free(out_right);
-            return -1;
-        }
-
-        // Reassemble the signal
-        for (int i = 0; i < actualOutLen_left; i++)
-        {
-            samplesOut[i * 2] = out_left[i];
-            samplesOut[i * 2 + 1] = out_right[i];
-        }
-        outLen = 2 * actualOutLen_left;
-
-        free(left);
-        free(right);
-        free(out_left);
-        free(out_right);
-
-        return 0;
+      // Since we initialize to a non-Fixed type, Push() will fail
+      // until Reset() is called
+      return -1;
     }
 
-    // Containers for temp samples
-    int16_t* tmp;
-    int16_t* tmp_2;
-    // tmp data for resampling routines
-    int32_t* tmp_mem;
-
-    switch (my_mode_)
-    {
-        case kResamplerMode1To1:
-            memcpy(samplesOut, samplesIn, lengthIn * sizeof(int16_t));
-            outLen = lengthIn;
-            break;
-        case kResamplerMode1To2:
-            if (maxLen < (lengthIn * 2))
-            {
-                return -1;
-            }
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-            outLen = lengthIn * 2;
-            return 0;
-        case kResamplerMode1To3:
-
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn * 3))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(samplesIn + i, samplesOut + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn * 3;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode1To4:
-            if (maxLen < (lengthIn * 4))
-            {
-                return -1;
-            }
-
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-            // 1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            // 2:4
-            WebRtcSpl_UpsampleBy2(tmp, lengthIn * 2, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn * 4;
-            free(tmp);
-            return 0;
-        case kResamplerMode1To6:
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn * 6))
-            {
-                return -1;
-            }
-
-            //1:2
-
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            outLen = lengthIn * 2;
-
-            for (int i = 0; i < outLen; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state2_,
-                                               tmp_mem);
-            }
-            outLen = outLen * 3;
-            free(tmp_mem);
-            free(tmp);
-
-            return 0;
-        case kResamplerMode1To12:
-            // We can only handle blocks of 40 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 40) != 0) {
-              return -1;
-            }
-            if (maxLen < (lengthIn * 12)) {
-              return -1;
-            }
-
-            tmp_mem = (int32_t*) malloc(336 * sizeof(int32_t));
-            tmp = (int16_t*) malloc(sizeof(int16_t) * 4 * lengthIn);
-            //1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut,
-                                  (int32_t*) state1_);
-            outLen = lengthIn * 2;
-            //2:4
-            WebRtcSpl_UpsampleBy2(samplesOut, outLen, tmp, (int32_t*) state2_);
-            outLen = outLen * 2;
-            // 4:12
-            for (int i = 0; i < outLen; i += 160) {
-              // WebRtcSpl_Resample16khzTo48khz() takes a block of 160 samples
-              // as input and outputs a resampled block of 480 samples. The
-              // data is now actually in 32 kHz sampling rate, despite the
-              // function name, and with a resampling factor of three becomes
-              // 96 kHz.
-              WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
-                                             (WebRtcSpl_State16khzTo48khz*) state3_,
-                                             tmp_mem);
-            }
-            outLen = outLen * 3;
-            free(tmp_mem);
-            free(tmp);
-
-            return 0;
-        case kResamplerMode2To3:
-            if (maxLen < (lengthIn * 3 / 2))
-            {
-                return -1;
-            }
-            // 2:6
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 3));
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(samplesIn + i, tmp + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state1_,
-                                               tmp_mem);
-            }
-            lengthIn = lengthIn * 3;
-            // 6:3
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 2;
-            free(tmp);
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode2To11:
-
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 2))
-            {
-                return -1;
-            }
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-            // 1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            lengthIn *= 2;
-
-            tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 80)
-            {
-                WebRtcSpl_Resample8khzTo22khz(tmp + i, samplesOut + (i * 11) / 4,
-                                              (WebRtcSpl_State8khzTo22khz *)state2_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 4;
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-        case kResamplerMode4To11:
-
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 4))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 80)
-            {
-                WebRtcSpl_Resample8khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 4,
-                                              (WebRtcSpl_State8khzTo22khz *)state1_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 4;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode8To11:
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 8))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(88 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 8,
-                                               (WebRtcSpl_State16khzTo22khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 8;
-            free(tmp_mem);
-            return 0;
-
-        case kResamplerMode11To16:
-            // We can only handle blocks of 110 samples
-            if ((lengthIn % 110) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 16) / 11))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
-
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-
-            for (int i = 0; i < (lengthIn * 2); i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(tmp + i, samplesOut + (i / 220) * 160,
-                                               (WebRtcSpl_State22khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-
-            outLen = (lengthIn * 16) / 11;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-
-        case kResamplerMode11To32:
-
-            // We can only handle blocks of 110 samples
-            if ((lengthIn % 110) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 32) / 11))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
-
-            // 11 -> 22 kHz in samplesOut
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-
-            // 22 -> 16 in tmp
-            for (int i = 0; i < (lengthIn * 2); i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(samplesOut + i, tmp + (i / 220) * 160,
-                                               (WebRtcSpl_State22khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-
-            // 16 -> 32 in samplesOut
-            WebRtcSpl_UpsampleBy2(tmp, (lengthIn * 16) / 11, samplesOut,
-                                  (int32_t*)state3_);
-
-            outLen = (lengthIn * 32) / 11;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-
-        case kResamplerMode2To1:
-            if (maxLen < (lengthIn / 2))
-            {
-                return -1;
-            }
-            WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-            outLen = lengthIn / 2;
-            return 0;
-        case kResamplerMode3To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn / 3))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(samplesIn + i, samplesOut + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode4To1:
-            if (maxLen < (lengthIn / 4))
-            {
-                return -1;
-            }
-            tmp = (int16_t*)malloc(sizeof(int16_t) * lengthIn / 2);
-            // 4:2
-            WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            // 2:1
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn / 2, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 4;
-            free(tmp);
-            return 0;
-
-        case kResamplerMode6To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn / 6))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn) / 3);
-
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            WebRtcSpl_DownsampleBy2(tmp, outLen, samplesOut, (int32_t*)state2_);
-            free(tmp);
-            outLen = outLen / 2;
-            return 0;
-        case kResamplerMode12To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0) {
-              return -1;
-            }
-            if (maxLen < (lengthIn / 12)) {
-              return -1;
-            }
-
-            tmp_mem = (int32_t*) malloc(496 * sizeof(int32_t));
-            tmp = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 3);
-            tmp_2 = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 6);
-            // 12:4
-            for (int i = 0; i < lengthIn; i += 480) {
-              // WebRtcSpl_Resample48khzTo16khz() takes a block of 480 samples
-              // as input and outputs a resampled block of 160 samples. The
-              // data is now actually in 96 kHz sampling rate, despite the
-              // function name, and with a resampling factor of 1/3 becomes
-              // 32 kHz.
-              WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
-                                             (WebRtcSpl_State48khzTo16khz*) state1_,
-                                             tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            // 4:2
-            WebRtcSpl_DownsampleBy2(tmp, outLen, tmp_2,
-                                    (int32_t*) state2_);
-            outLen = outLen / 2;
-            free(tmp);
-            // 2:1
-            WebRtcSpl_DownsampleBy2(tmp_2, outLen, samplesOut,
-                                    (int32_t*) state3_);
-            free(tmp_2);
-            outLen = outLen / 2;
-            return 0;
-        case kResamplerMode3To2:
-            if (maxLen < (lengthIn * 2 / 3))
-            {
-                return -1;
-            }
-            // 3:6
-            tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 2));
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            lengthIn *= 2;
-            // 6:2
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                free(tmp);
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(tmp + i, samplesOut + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp);
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode11To2:
-            // We can only handle blocks of 220 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 2) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((lengthIn * 4) / 11 * sizeof(int16_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo8khz(samplesIn + i, tmp + (i * 4) / 11,
-                                              (WebRtcSpl_State22khzTo8khz *)state1_,
-                                              tmp_mem);
-            }
-            lengthIn = (lengthIn * 4) / 11;
-
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 2;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-        case kResamplerMode11To4:
-            // We can only handle blocks of 220 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 4) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo8khz(samplesIn + i, samplesOut + (i * 4) / 11,
-                                              (WebRtcSpl_State22khzTo8khz *)state1_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 4) / 11;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode11To8:
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 8) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(samplesIn + i, samplesOut + (i * 8) / 11,
-                                               (WebRtcSpl_State22khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = (lengthIn * 8) / 11;
-            free(tmp_mem);
-            return 0;
-            break;
-
-    }
+    // Fixed-rate, same-freq "resample" - use memcpy, which avoids
+    // filtering and delay.  For non-fixed rates, where we might tweak
+    // from 48000->48000 to 48000->48001 for drift, we need to resample
+    // (and filter) all the time to avoid glitches on rate changes.
+    memcpy(samples_out, samples_in, length_in*sizeof(*samples_in));
+    out_len = length_in;
     return 0;
-}
-
-// Asynchronous resampling, input
-int Resampler::Insert(int16_t * samplesIn, int lengthIn)
-{
-    if (my_type_ != kResamplerAsynchronous)
-    {
-        return -1;
-    }
-    int sizeNeeded, tenMsblock;
-
-    // Determine need for size of outBuffer
-    sizeNeeded = out_buffer_size_ + ((lengthIn + in_buffer_size_) * my_out_frequency_khz_)
-            / my_in_frequency_khz_;
-    if (sizeNeeded > out_buffer_size_max_)
-    {
-        // Round the value upwards to complete 10 ms blocks
-        tenMsblock = my_out_frequency_khz_ * 10;
-        sizeNeeded = (sizeNeeded / tenMsblock + 1) * tenMsblock;
-        out_buffer_ = (int16_t*)realloc(out_buffer_, sizeNeeded * sizeof(int16_t));
-        out_buffer_size_max_ = sizeNeeded;
-    }
-
-    // If we need to use inBuffer, make sure all input data fits there.
-
-    tenMsblock = my_in_frequency_khz_ * 10;
-    if (in_buffer_size_ || (lengthIn % tenMsblock))
-    {
-        // Check if input buffer size is enough
-        if ((in_buffer_size_ + lengthIn) > in_buffer_size_max_)
-        {
-            // Round the value upwards to complete 10 ms blocks
-            sizeNeeded = ((in_buffer_size_ + lengthIn) / tenMsblock + 1) * tenMsblock;
-            in_buffer_ = (int16_t*)realloc(in_buffer_,
-                                           sizeNeeded * sizeof(int16_t));
-            in_buffer_size_max_ = sizeNeeded;
-        }
-        // Copy in data to input buffer
-        memcpy(in_buffer_ + in_buffer_size_, samplesIn, lengthIn * sizeof(int16_t));
-
-        // Resample all available 10 ms blocks
-        int lenOut;
-        int dataLenToResample = (in_buffer_size_ / tenMsblock) * tenMsblock;
-        Push(in_buffer_, dataLenToResample, out_buffer_ + out_buffer_size_,
-             out_buffer_size_max_ - out_buffer_size_, lenOut);
-        out_buffer_size_ += lenOut;
-
-        // Save the rest
-        memmove(in_buffer_, in_buffer_ + dataLenToResample,
-                (in_buffer_size_ - dataLenToResample) * sizeof(int16_t));
-        in_buffer_size_ -= dataLenToResample;
-    } else
-    {
-        // Just resample
-        int lenOut;
-        Push(in_buffer_, lengthIn, out_buffer_ + out_buffer_size_,
-             out_buffer_size_max_ - out_buffer_size_, lenOut);
-        out_buffer_size_ += lenOut;
-    }
-
-    return 0;
-}
-
-// Asynchronous resampling output, remaining samples are buffered
-int Resampler::Pull(int16_t* samplesOut, int desiredLen, int &outLen)
-{
-    if (my_type_ != kResamplerAsynchronous)
-    {
-        return -1;
-    }
-
-    // Check that we have enough data
-    if (desiredLen <= out_buffer_size_)
-    {
-        // Give out the date
-        memcpy(samplesOut, out_buffer_, desiredLen * sizeof(int32_t));
-
-        // Shuffle down remaining
-        memmove(out_buffer_, out_buffer_ + desiredLen,
-                (out_buffer_size_ - desiredLen) * sizeof(int16_t));
-
-        // Update remaining size
-        out_buffer_size_ -= desiredLen;
-
-        return 0;
-    } else
-    {
-        return -1;
-    }
+  }
+  assert(channels_ == 1 || channels_ == 2);
+  spx_uint32_t len = length_in = (length_in >> (channels_ - 1));
+  spx_uint32_t out = (spx_uint32_t) (max_len >> (channels_ - 1));
+  if ((speex_resampler_process_interleaved_int(state_, samples_in, &len,
+                             samples_out, &out) != RESAMPLER_ERR_SUCCESS) ||
+      len != (spx_uint32_t) length_in)
+  {
+    return -1;
+  }
+  out_len = (int) (channels_ * out);
+  return 0;
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
@@ -3,67 +3,59 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include <math.h>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 #include "webrtc/common_audio/resampler/include/resampler.h"
 
 // TODO(andrew): this is a work-in-progress. Many more tests are needed.
 
 namespace webrtc {
 namespace {
 const ResamplerType kTypes[] = {
   kResamplerSynchronous,
-  kResamplerAsynchronous,
   kResamplerSynchronousStereo,
-  kResamplerAsynchronousStereo
-  // kResamplerInvalid excluded
 };
 const size_t kTypesSize = sizeof(kTypes) / sizeof(*kTypes);
 
 // Rates we must support.
 const int kMaxRate = 96000;
 const int kRates[] = {
   8000,
   16000,
   32000,
-  44000,
+  44100,
   48000,
   kMaxRate
 };
 const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
 const int kMaxChannels = 2;
 const size_t kDataSize = static_cast<size_t> (kMaxChannels * kMaxRate / 100);
 
-// TODO(andrew): should we be supporting these combinations?
-bool ValidRates(int in_rate, int out_rate) {
-  // Not the most compact notation, for clarity.
-  if ((in_rate == 44000 && (out_rate == 48000 || out_rate == 96000)) ||
-      (out_rate == 44000 && (in_rate == 48000 || in_rate == 96000))) {
-    return false;
-  }
-
-  return true;
-}
-
 class ResamplerTest : public testing::Test {
  protected:
   ResamplerTest();
   virtual void SetUp();
   virtual void TearDown();
+  void RunResampleTest(int channels,
+                       int src_sample_rate_hz,
+                       int dst_sample_rate_hz);
 
   Resampler rs_;
   int16_t data_in_[kDataSize];
   int16_t data_out_[kDataSize];
+  int16_t data_reference_[kDataSize];
 };
 
 ResamplerTest::ResamplerTest() {}
 
 void ResamplerTest::SetUp() {
   // Initialize input data with anything. The tests are content independent.
   memset(data_in_, 1, sizeof(data_in_));
 }
@@ -78,66 +70,141 @@ TEST_F(ResamplerTest, Reset) {
   // Check that all required combinations are supported.
   for (size_t i = 0; i < kRatesSize; ++i) {
     for (size_t j = 0; j < kRatesSize; ++j) {
       for (size_t k = 0; k < kTypesSize; ++k) {
         std::ostringstream ss;
         ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j]
             << ", type: " << kTypes[k];
         SCOPED_TRACE(ss.str());
-        if (ValidRates(kRates[i], kRates[j]))
-          EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
-        else
-          EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
+        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
       }
     }
   }
 }
 
-// TODO(tlegrand): Replace code inside the two tests below with a function
-// with number of channels and ResamplerType as input.
-TEST_F(ResamplerTest, Synchronous) {
-  for (size_t i = 0; i < kRatesSize; ++i) {
-    for (size_t j = 0; j < kRatesSize; ++j) {
-      std::ostringstream ss;
-      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
-      SCOPED_TRACE(ss.str());
+// Sets the signal value to increase by |data| with every sample. Floats are
+// used so non-integer values result in rounding error, but not an accumulating
+// error.
+void SetMonoFrame(int16_t* buffer, float data, int sample_rate_hz) {
+  for (int i = 0; i < sample_rate_hz / 100; i++) {
+    buffer[i] = data * i;
+  }
+}
+
+// Sets the signal value to increase by |left| and |right| with every sample in
+// each channel respectively.
+void SetStereoFrame(int16_t* buffer, float left, float right,
+                    int sample_rate_hz) {
+  for (int i = 0; i < sample_rate_hz / 100; i++) {
+    buffer[i * 2] = left * i;
+    buffer[i * 2 + 1] = right * i;
+  }
+}
+
+// Computes the best SNR based on the error between |ref_frame| and
+// |test_frame|. It allows for a sample delay between the signals to
+// compensate for the resampling delay.
+float ComputeSNR(const int16_t* reference, const int16_t* test,
+                 int sample_rate_hz, int channels, int max_delay) {
+  float best_snr = 0;
+  int best_delay = 0;
+  int samples_per_channel = sample_rate_hz/100;
+  for (int delay = 0; delay < max_delay; delay++) {
+    float mse = 0;
+    float variance = 0;
+    for (int i = 0; i < samples_per_channel * channels - delay; i++) {
+      int error = reference[i] - test[i + delay];
+      mse += error * error;
+      variance += reference[i] * reference[i];
+    }
+    float snr = 100;  // We assign 100 dB to the zero-error case.
+    if (mse > 0)
+      snr = 10 * log10(variance / mse);
+    if (snr > best_snr) {
+      best_snr = snr;
+      best_delay = delay;
+    }
+  }
+  printf("SNR=%.1f dB at delay=%d\n", best_snr, best_delay);
+  return best_snr;
+}
 
-      if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kRates[i] / 100;
-        int out_length = 0;
-        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
-        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
-                              out_length));
-        EXPECT_EQ(kRates[j] / 100, out_length);
-      } else {
-        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
-      }
+void ResamplerTest::RunResampleTest(int channels,
+                                    int src_sample_rate_hz,
+                                    int dst_sample_rate_hz) {
+  Resampler resampler;  // Create a new one with every test.
+  const int16_t kSrcLeft = 60;  // Shouldn't overflow for any used sample rate.
+  const int16_t kSrcRight = 30;
+  const float kResamplingFactor = (1.0 * src_sample_rate_hz) /
+      dst_sample_rate_hz;
+  const float kDstLeft = kResamplingFactor * kSrcLeft;
+  const float kDstRight = kResamplingFactor * kSrcRight;
+  if (channels == 1)
+    SetMonoFrame(data_in_, kSrcLeft, src_sample_rate_hz);
+  else
+    SetStereoFrame(data_in_, kSrcLeft, kSrcRight, src_sample_rate_hz);
+
+  if (channels == 1) {
+    SetMonoFrame(data_out_, 0, dst_sample_rate_hz);
+    SetMonoFrame(data_reference_, kDstLeft, dst_sample_rate_hz);
+  } else {
+    SetStereoFrame(data_out_, 0, 0, dst_sample_rate_hz);
+    SetStereoFrame(data_reference_, kDstLeft, kDstRight, dst_sample_rate_hz);
+  }
+
+  // The speex resampler has a known delay dependent on quality and rates,
+  // which we approximate here. Multiplying by two gives us a crude maximum
+  // for any resampling, as the old resampler typically (but not always)
+  // has lower delay.  The actual delay is calculated internally based on the
+  // filter length in the QualityMap.
+  static const int kInputKernelDelaySamples = 16*3;
+  const int max_delay = std::min(1.0f, 1/kResamplingFactor) *
+                        kInputKernelDelaySamples * channels * 2;
+  printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
+      channels, src_sample_rate_hz, channels, dst_sample_rate_hz);
+
+  int in_length = channels * src_sample_rate_hz / 100;
+  int out_length = 0;
+  EXPECT_EQ(0, rs_.Reset(src_sample_rate_hz, dst_sample_rate_hz,
+                         (channels == 1 ?
+                          kResamplerSynchronous :
+                          kResamplerSynchronousStereo)));
+  EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
+                        out_length));
+  EXPECT_EQ(channels * dst_sample_rate_hz / 100, out_length);
+
+  //  EXPECT_EQ(0, Resample(src_frame_, &resampler, &dst_frame_));
+  EXPECT_GT(ComputeSNR(data_reference_, data_out_, dst_sample_rate_hz,
+                       channels, max_delay), 40.0f);
+}
+
+TEST_F(ResamplerTest, Synchronous) {
+  // Number of channels is 1, mono mode.
+  const int kChannels = 1;
+  // We don't attempt to be exhaustive here, but just get good coverage. Some
+  // combinations of rates will not be resampled, and some give an odd
+  // resampling factor which makes it more difficult to evaluate.
+  const int kSampleRates[] = {16000, 32000, 44100, 48000};
+  const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
     }
   }
 }
 
 TEST_F(ResamplerTest, SynchronousStereo) {
   // Number of channels is 2, stereo mode.
   const int kChannels = 2;
-  for (size_t i = 0; i < kRatesSize; ++i) {
-    for (size_t j = 0; j < kRatesSize; ++j) {
-      std::ostringstream ss;
-      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
-      SCOPED_TRACE(ss.str());
-
-      if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kChannels * kRates[i] / 100;
-        int out_length = 0;
-        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j],
-                               kResamplerSynchronousStereo));
-        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
-                              out_length));
-        EXPECT_EQ(kChannels * kRates[j] / 100, out_length);
-      } else {
-        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j],
-                                kResamplerSynchronousStereo));
-      }
+  // We don't attempt to be exhaustive here, but just get good coverage. Some
+  // combinations of rates will not be resampled, and some give an odd
+  // resampling factor which makes it more difficult to evaluate.
+  const int kSampleRates[] = {16000, 32000, 44100, 48000};
+  const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
     }
   }
 }
 }  // namespace
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
@@ -21,21 +21,21 @@ float SincResampler::Convolve_NEON(const
                                    const float* k2,
                                    double kernel_interpolation_factor) {
   float32x4_t m_input;
   float32x4_t m_sums1 = vmovq_n_f32(0);
   float32x4_t m_sums2 = vmovq_n_f32(0);
 
   const float* upper = input_ptr + kKernelSize;
   for (; input_ptr < upper; ) {
-    m_input = vld1q_f32(input_ptr);
+    m_input = vld1q_f32((const float32_t *) input_ptr);
     input_ptr += 4;
-    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
+    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32((const float32_t *) k1));
     k1 += 4;
-    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
+    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32((const float32_t *) k2));
     k2 += 4;
   }
 
   // Linearly interpolate the two "convolutions".
   m_sums1 = vmlaq_f32(
       vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
       m_sums2, vmovq_n_f32(kernel_interpolation_factor));
 
--- a/media/webrtc/trunk/webrtc/common_types.h
+++ b/media/webrtc/trunk/webrtc/common_types.h
@@ -6,16 +6,17 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef WEBRTC_COMMON_TYPES_H_
 #define WEBRTC_COMMON_TYPES_H_
 
+#include <stddef.h> // size_t
 #include "webrtc/typedefs.h"
 
 #if defined(_MSC_VER)
 // Disable "new behavior: elements of array will be default initialized"
 // warning. Affects OverUseDetectorOptions.
 #pragma warning(disable:4351)
 #endif
 
@@ -429,17 +430,17 @@ typedef struct        // All levels are 
 enum NsModes    // type of Noise Suppression
 {
     kNsUnchanged = 0,   // previously set mode
     kNsDefault,         // platform default
     kNsConference,      // conferencing default
     kNsLowSuppression,  // lowest suppression
     kNsModerateSuppression,
     kNsHighSuppression,
-    kNsVeryHighSuppression,     // highest suppression
+    kNsVeryHighSuppression     // highest suppression
 };
 
 enum AgcModes                  // type of Automatic Gain Control
 {
     kAgcUnchanged = 0,        // previously set mode
     kAgcDefault,              // platform default
     // adaptive mode for use when analog volume control exists (e.g. for
     // PC softphone)
@@ -454,17 +455,17 @@ enum AgcModes                  // type o
 
 // EC modes
 enum EcModes                   // type of Echo Control
 {
     kEcUnchanged = 0,          // previously set mode
     kEcDefault,                // platform default
     kEcConference,             // conferencing default (aggressive AEC)
     kEcAec,                    // Acoustic Echo Cancellation
-    kEcAecm,                   // AEC mobile
+    kEcAecm                    // AEC mobile
 };
 
 // AECM modes
 enum AecmModes                 // mode of AECM
 {
     kAecmQuietEarpieceOrHeadset = 0,
                                // Quiet earpiece or headset use
     kAecmEarpiece,             // most earpiece use
@@ -506,31 +507,31 @@ enum NetEqModes             // NetEQ pla
     // Improved jitter robustness at the cost of increased delay. Can be
     // used in one-way communication.
     kNetEqStreaming = 1,
     // Optimzed for decodability of fax signals rather than for perceived audio
     // quality.
     kNetEqFax = 2,
     // Minimal buffer management. Inserts zeros for lost packets and during
     // buffer increases.
-    kNetEqOff = 3,
+    kNetEqOff = 3
 };
 
 enum OnHoldModes            // On Hold direction
 {
     kHoldSendAndPlay = 0,    // Put both sending and playing in on-hold state.
     kHoldSendOnly,           // Put only sending in on-hold state.
     kHoldPlayOnly            // Put only playing in on-hold state.
 };
 
 enum AmrMode
 {
     kRfc3267BwEfficient = 0,
     kRfc3267OctetAligned = 1,
-    kRfc3267FileStorage = 2,
+    kRfc3267FileStorage = 2
 };
 
 // ==================================================================
 // Video specific types
 // ==================================================================
 
 // Raw video types
 enum RawVideoType
@@ -593,30 +594,48 @@ struct VideoCodecVP8
     unsigned char        numberOfTemporalLayers;
     bool                 denoisingOn;
     bool                 errorConcealmentOn;
     bool                 automaticResizeOn;
     bool                 frameDroppingOn;
     int                  keyFrameInterval;
 };
 
+// H264 specific
+struct VideoCodecH264
+{
+    uint8_t        profile;
+    uint8_t        constraints;
+    uint8_t        level;
+    uint8_t        packetizationMode; // 0 or 1
+    bool           frameDroppingOn;
+    int            keyFrameInterval;
+    // These are null/0 if not externally negotiated
+    const uint8_t* spsData;
+    size_t         spsLen;
+    const uint8_t* ppsData;
+    size_t         ppsLen;
+};
+
 // Video codec types
 enum VideoCodecType
 {
     kVideoCodecVP8,
+    kVideoCodecH264,
     kVideoCodecI420,
     kVideoCodecRED,
     kVideoCodecULPFEC,
     kVideoCodecGeneric,
     kVideoCodecUnknown
 };
 
 union VideoCodecUnion
 {
     VideoCodecVP8       VP8;
+    VideoCodecH264      H264;
 };
 
 
 // Simulcast is when the same stream is encoded multiple times with different
 // settings such as resolution.
 struct SimulcastStream
 {
     unsigned short      width;
@@ -683,16 +702,35 @@ struct OverUseDetectorOptions {
   double initial_offset;
   double initial_e[2][2];
   double initial_process_noise[2];
   double initial_avg_noise;
   double initial_var_noise;
   double initial_threshold;
 };
 
+enum CPULoadState {
+  kLoadRelaxed,
+  kLoadNormal,
+  kLoadStressed
+};
+
+class CPULoadStateObserver {
+public:
+  virtual void onLoadStateChanged(CPULoadState aNewState) = 0;
+  virtual ~CPULoadStateObserver() {};
+};
+
+class CPULoadStateCallbackInvoker {
+public:
+    virtual void AddObserver(CPULoadStateObserver* aObserver) = 0;
+    virtual void RemoveObserver(CPULoadStateObserver* aObserver) = 0;
+    virtual ~CPULoadStateCallbackInvoker() {};
+};
+
 // This structure will have the information about when packet is actually
 // received by socket.
 struct PacketTime {
   PacketTime() : timestamp(-1), max_error_us(-1) {}
   PacketTime(int64_t timestamp, int64_t max_error_us)
       : timestamp(timestamp), max_error_us(max_error_us) {
   }
 
--- a/media/webrtc/trunk/webrtc/engine_configurations.h
+++ b/media/webrtc/trunk/webrtc/engine_configurations.h
@@ -30,17 +30,19 @@
 #endif  // WEBRTC_ARCH_ARM
 #endif  // !WEBRTC_MOZILLA_BUILD
 
 // AVT is included in all builds, along with G.711, NetEQ and CNG
 // (which are mandatory and don't have any defines).
 #define WEBRTC_CODEC_AVT
 
 // PCM16 is useful for testing and incurs only a small binary size cost.
+#ifndef WEBRTC_CODEC_PCM16
 #define WEBRTC_CODEC_PCM16
+#endif
 
 // iLBC, G.722, and Redundancy coding are excluded from Chromium and Mozilla
 // builds to reduce binary size.
 #if !defined(WEBRTC_CHROMIUM_BUILD) && !defined(WEBRTC_MOZILLA_BUILD)
 #define WEBRTC_CODEC_ILBC
 #define WEBRTC_CODEC_G722
 #define WEBRTC_CODEC_RED
 #endif  // !WEBRTC_CHROMIUM_BUILD && !WEBRTC_MOZILLA_BUILD
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/opus/opus.gypi
@@ -10,17 +10,17 @@
   'targets': [
     {
       'target_name': 'webrtc_opus',
       'type': 'static_library',
       'conditions': [
         ['build_with_mozilla==1', {
           # Mozilla provides its own build of the opus library.
           'include_dirs': [
-            '$(DIST)/include/opus',
+            '/media/libopus/include',
            ]
         }, {
           'dependencies': [
             '<(DEPTH)/third_party/opus/opus.gyp:opus'
           ],
         }],
       ],
       'include_dirs': [
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
@@ -10,32 +10,28 @@
 
 
 #include "pcm16b.h"
 
 #include <stdlib.h>
 
 #include "typedefs.h"
 
-#ifdef WEBRTC_ARCH_BIG_ENDIAN
-#include "signal_processing_library.h"
-#endif
-
 #define HIGHEND 0xFF00
 #define LOWEND    0xFF
 
 
 
 /* Encoder with int16_t Output */
 int16_t WebRtcPcm16b_EncodeW16(int16_t *speechIn16b,
                                int16_t len,
                                int16_t *speechOut16b)
 {
 #ifdef WEBRTC_ARCH_BIG_ENDIAN
-    WEBRTC_SPL_MEMCPY_W16(speechOut16b, speechIn16b, len);
+    memcpy(speechOut16b, speechIn16b, len * sizeof(int16_t));
 #else
     int i;
     for (i=0;i<len;i++) {
         speechOut16b[i]=(((uint16_t)speechIn16b[i])>>8)|((((uint16_t)speechIn16b[i])<<8)&0xFF00);
     }
 #endif
     return(len<<1);
 }
@@ -64,17 +60,17 @@ int16_t WebRtcPcm16b_Encode(int16_t *spe
 /* Decoder with int16_t Input instead of char when the int16_t Encoder is used */
 int16_t WebRtcPcm16b_DecodeW16(void *inst,
                                int16_t *speechIn16b,
                                int16_t len,
                                int16_t *speechOut16b,
                                int16_t* speechType)
 {
 #ifdef WEBRTC_ARCH_BIG_ENDIAN
-    WEBRTC_SPL_MEMCPY_W8(speechOut16b, speechIn16b, ((len*sizeof(int16_t)+1)>>1));
+    memcpy(speechOut16b, speechIn16b, ((len*sizeof(int16_t)+1)>>1));
 #else
     int i;
     int samples=len>>1;
 
     for (i=0;i<samples;i++) {
         speechOut16b[i]=(((uint16_t)speechIn16b[i])>>8)|(((uint16_t)(speechIn16b[i]&0xFF))<<8);
     }
 #endif
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
@@ -41,35 +41,24 @@
         'acm_celt.h',
         'acm_cng.cc',
         'acm_cng.h',
         'acm_codec_database.cc',
         'acm_codec_database.h',
         'acm_common_defs.h',
         'acm_dtmf_playout.cc',
         'acm_dtmf_playout.h',
-        'acm_g722.cc',
-        'acm_g722.h',
-        'acm_g7221.cc',
-        'acm_g7221.h',
-        'acm_g7221c.cc',
-        'acm_g7221c.h',
         'acm_g729.cc',
         'acm_g729.h',
         'acm_g7291.cc',
         'acm_g7291.h',
         'acm_generic_codec.cc',
         'acm_generic_codec.h',
         'acm_gsmfr.cc',
         'acm_gsmfr.h',
-        'acm_ilbc.cc',
-        'acm_ilbc.h',
-        'acm_isac.cc',
-        'acm_isac.h',
-        'acm_isac_macros.h',
         'acm_opus.cc',
         'acm_opus.h',
         'acm_speex.cc',
         'acm_speex.h',
         'acm_pcm16b.cc',
         'acm_pcm16b.h',
         'acm_pcma.cc',
         'acm_pcma.h',
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/main/source/audio_coding_module.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/main/source/audio_coding_module.gypi
@@ -5,32 +5,46 @@
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
   'variables': {
     'audio_coding_dependencies': [
       'CNG',
-      'G711',
-      'G722',
-      'iLBC',
-      'iSAC',
-      'iSACFix',
-      'PCM16B',
       'NetEq',
       '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
       '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
     ],
     'audio_coding_defines': [],
     'conditions': [
       ['include_opus==1', {
         'audio_coding_dependencies': ['webrtc_opus',],
         'audio_coding_defines': ['WEBRTC_CODEC_OPUS',],
       }],
+      ['include_g711==1', {
+        'audio_coding_dependencies': ['G711',],
+        'audio_coding_defines': ['WEBRTC_CODEC_G711',],
+      }],
+      ['include_g722==1', {
+        'audio_coding_dependencies': ['G722',],
+        'audio_coding_defines': ['WEBRTC_CODEC_G722',],
+      }],
+      ['include_ilbc==1', {
+        'audio_coding_dependencies': ['iLBC',],
+        'audio_coding_defines': ['WEBRTC_CODEC_ILBC',],
+      }],
+      ['include_isac==1', {
+        'audio_coding_dependencies': ['iSAC', 'iSACFix',],
+        'audio_coding_defines': ['WEBRTC_CODEC_ISAC', 'WEBRTC_CODEC_ISACFX',],
+      }],
+      ['include_pcm16b==1', {
+        'audio_coding_dependencies': ['PCM16B',],
+        'audio_coding_defines': ['WEBRTC_CODEC_PCM16',],
+      }],
     ],
   },
   'targets': [
     {
       'target_name': 'audio_coding_module',
       'type': 'static_library',
       'defines': [
         '<@(audio_coding_defines)',
@@ -49,68 +63,80 @@
           '../interface',
           '../../../interface',
           '<(webrtc_root)',
         ],
       },
       'sources': [
         '../interface/audio_coding_module.h',
         '../interface/audio_coding_module_typedefs.h',
-        'acm_amr.cc',
-        'acm_amr.h',
-        'acm_amrwb.cc',
-        'acm_amrwb.h',
-        'acm_celt.cc',
-        'acm_celt.h',
         'acm_cng.cc',
         'acm_cng.h',
         'acm_codec_database.cc',
         'acm_codec_database.h',
         'acm_dtmf_detection.cc',
         'acm_dtmf_detection.h',
         'acm_dtmf_playout.cc',
         'acm_dtmf_playout.h',
-        'acm_g722.cc',
-        'acm_g722.h',
-        'acm_g7221.cc',
-        'acm_g7221.h',
-        'acm_g7221c.cc',
-        'acm_g7221c.h',
-        'acm_g729.cc',
-        'acm_g729.h',
-        'acm_g7291.cc',
-        'acm_g7291.h',
         'acm_generic_codec.cc',
         'acm_generic_codec.h',
-        'acm_gsmfr.cc',
-        'acm_gsmfr.h',
-        'acm_ilbc.cc',
-        'acm_ilbc.h',
-        'acm_isac.cc',
-        'acm_isac.h',
-        'acm_isac_macros.h',
         'acm_neteq.cc',
         'acm_neteq.h',
-        'acm_opus.cc',
-        'acm_opus.h',
-        'acm_speex.cc',
-        'acm_speex.h',
-        'acm_pcm16b.cc',
-        'acm_pcm16b.h',
-        'acm_pcma.cc',
-        'acm_pcma.h',
-        'acm_pcmu.cc',
-        'acm_pcmu.h',
         'acm_red.cc',
         'acm_red.h',
         'acm_resampler.cc',
         'acm_resampler.h',
         'audio_coding_module_impl.cc',
         'audio_coding_module_impl.h',
       ],
+      'conditions': [
+        ['include_opus==1', {
+          'sources': [
+            'acm_opus.cc',
+            'acm_opus.h',
+          ],
+        }],
+        ['include_g711==1', {
+          'sources': [
+            'acm_pcma.cc',
+            'acm_pcma.h',
+            'acm_pcmu.cc',
+            'acm_pcmu.h',
+          ],
+        }],
+        ['include_g722==1', {
+          'sources': [
+            'acm_g722.cc',
+            'acm_g722.h',
+            'acm_g7221.cc',
+            'acm_g7221.h',
+            'acm_g7221c.cc',
+            'acm_g7221c.h',
+          ],
+        }],
+        ['include_ilbc==1', {
+          'sources': [
+            'acm_ilbc.cc',
+            'acm_ilbc.h',
+          ],
+        }],
+        ['include_isac==1', {
+          'sources': [
+            'acm_isac.cc',
+            'acm_isac.h',
+            'acm_isac_macros.h',
+          ],
+        }],
+        ['include_pcm16b==1', {
+          'sources': [
+            'acm_pcm16b.cc',
+            'acm_pcm16b.h',
+          ],
+        }],
+      ],
     },
   ],
   'conditions': [
     ['include_tests==1', {
       'targets': [
         {
           'target_name': 'delay_test',
           'type': 'executable',
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_defines.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_defines.h
@@ -64,16 +64,18 @@
  * NETEQ_ISAC_CODEC               Enable iSAC
  *
  * NETEQ_ISAC_SWB_CODEC           Enable iSAC-SWB
  *
  * Note that the decoder of iSAC full-band operates at 32 kHz, that is the
  * decoded signal is at 32 kHz.
  * NETEQ_ISAC_FB_CODEC            Enable iSAC-FB
  *
+ * NETEQ_OPUS_CODEC               Enable Opus
+ *
  * NETEQ_G722_CODEC               Enable G.722
  *
  * NETEQ_G729_CODEC               Enable G.729
  *
  * NETEQ_G729_1_CODEC             Enable G.729.1
  *
  * NETEQ_G726_CODEC               Enable G.726
  *
@@ -299,16 +301,19 @@
     #define NETEQ_SPEEX_CODEC
 
     /* Super wideband 32kHz codecs */
     #define NETEQ_ISAC_SWB_CODEC
     #define NETEQ_32KHZ_WIDEBAND
     #define NETEQ_G722_1C_CODEC
     #define NETEQ_CELT_CODEC
 
+    /* hack in 48 kHz support */
+    #define NETEQ_48KHZ_WIDEBAND
+
     /* Fullband 48 kHz codecs */
     #define NETEQ_OPUS_CODEC
     #define NETEQ_ISAC_FB_CODEC
 #endif 
 
 #if (defined(NETEQ_ALL_CODECS))
     /* Special codecs */
     #define NETEQ_CNG_CODEC
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/packet_buffer.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/packet_buffer.c
@@ -673,16 +673,21 @@ int WebRtcNetEQ_GetDefaultCodecSettings(
             codecBytes = 1560; /* 240ms @ 52kbps (30ms frames) */
             codecBuffers = 8;
         }
         else if (codecID[i] == kDecoderOpus)
         {
             codecBytes = 15300; /* 240ms @ 510kbps (60ms frames) */
             codecBuffers = 30;  /* Replicating the value for PCMu/a */
         }
+        else if (codecID[i] == kDecoderOpus)
+        {
+            codecBytes = 15300; /* 240ms @ 510kbps (60ms frames) */
+            codecBuffers = 30;  /* ?? Codec supports down to 2.5-60 ms frames */
+        }
         else if ((codecID[i] == kDecoderPCM16B) ||
             (codecID[i] == kDecoderPCM16B_2ch))
         {
             codecBytes = 3360; /* 210ms */
             codecBuffers = 15;
         }
         else if ((codecID[i] == kDecoderPCM16Bwb) ||
             (codecID[i] == kDecoderPCM16Bwb_2ch))
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq4/neteq.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq4/neteq.gypi
@@ -5,27 +5,35 @@
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
   'variables': {
     'neteq_dependencies': [
       'G711',
-      'G722',
       'PCM16B',
-      'iLBC',
-      'iSAC',
-      'iSACFix',
       'CNG',
       '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
       '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
     ],
     'neteq_defines': [],
     'conditions': [
+      ['include_g722==1', {
+        'neteq_dependencies': ['G722'],
+        'neteq_defines': ['WEBRTC_CODEC_G722',],
+      }],
+      ['include_ilbc==1', {
+        'neteq_dependencies': ['iLBC'],
+        'neteq_defines': ['WEBRTC_CODEC_ILBC',],
+      }],
+      ['include_isac==1', {
+        'neteq_dependencies': ['iSAC', 'iSACFix',],
+        'neteq_defines': ['WEBRTC_CODEC_ISAC', 'WEBRTC_CODEC_ISACFIX',],
+      }],
       ['include_opus==1', {
         'neteq_dependencies': ['webrtc_opus',],
         'neteq_defines': ['WEBRTC_CODEC_OPUS',],
       }],
     ],
   },
   'targets': [
     {
@@ -124,16 +132,17 @@
           'target_name': 'audio_decoder_unittests',
           'type': '<(gtest_target_type)',
           'dependencies': [
             '<@(neteq_dependencies)',
             '<(DEPTH)/testing/gtest.gyp:gtest',
             '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
             '<(webrtc_root)/test/test.gyp:test_support_main',
           ],
+# FIX for include_isac/etc
           'defines': [
             'AUDIO_DECODER_UNITTEST',
             'WEBRTC_CODEC_G722',
             'WEBRTC_CODEC_ILBC',
             'WEBRTC_CODEC_ISACFX',
             'WEBRTC_CODEC_ISAC',
             'WEBRTC_CODEC_PCM16',
             '<@(neteq_defines)',
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.cc
@@ -7,16 +7,17 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
 
 #include <assert.h>
 
+#include "AndroidJNIWrapper.h"
 #include "webrtc/modules/utility/interface/helpers_android.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 namespace webrtc {
 
 static JavaVM* g_jvm_ = NULL;
 static JNIEnv* g_jni_env_ = NULL;
 static jobject g_context_ = NULL;
@@ -46,30 +47,20 @@ void AudioManagerJni::SetAndroidAudioDev
   assert(env);
   assert(context);
 
   // Store global Java VM variables to be accessed by API calls.
   g_jvm_ = reinterpret_cast<JavaVM*>(jvm);
   g_jni_env_ = reinterpret_cast<JNIEnv*>(env);
   g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context));
 
-  // FindClass must be made in this function since this function's contract
-  // requires it to be called by a Java thread.
-  // See
-  // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
-  // as to why this is necessary.
-  // Get the AudioManagerAndroid class object.
-  jclass javaAmClassLocal = g_jni_env_->FindClass(
-      "org/webrtc/voiceengine/AudioManagerAndroid");
-  assert(javaAmClassLocal);
-
   // Create a global reference such that the class object is not recycled by
   // the garbage collector.
-  g_audio_manager_class_ = reinterpret_cast<jclass>(
-      g_jni_env_->NewGlobalRef(javaAmClassLocal));
+  g_audio_manager_class_ = jsjni_GetGlobalClassRef(
+    "org/webrtc/voiceengine/AudioManagerAndroid");
   assert(g_audio_manager_class_);
 }
 
 void AudioManagerJni::ClearAndroidAudioDeviceObjects() {
   g_jni_env_->DeleteGlobalRef(g_audio_manager_class_);
   g_audio_manager_class_ = NULL;
   g_jni_env_->DeleteGlobalRef(g_context_);
   g_context_ = NULL;
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_manager_jni.h
@@ -29,16 +29,17 @@ class AudioManagerJni {
   // called once.
   // This function must be called by a Java thread as calling it from a thread
   // created by the native application will prevent FindClass from working. See
   // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
   // for more details.
   // It has to be called for this class' APIs to be successful. Calling
   // ClearAndroidAudioDeviceObjects will prevent this class' APIs to be called
   // successfully if SetAndroidAudioDeviceObjects is not called after it.
+  static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
   static void SetAndroidAudioDeviceObjects(void* jvm, void* env,
                                            void* context);
   // This function must be called when the AudioManagerJni class is no
   // longer needed. It frees up the global references acquired in
   // SetAndroidAudioDeviceObjects.
   static void ClearAndroidAudioDeviceObjects();
 
   bool low_latency_supported() const { return low_latency_supported_; }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_record_jni.cc
@@ -117,17 +117,17 @@ AudioRecordJni::AudioRecordJni(
       _recording(false),
       _recIsInitialized(false),
       _micIsInitialized(false),
       _startRec(false),
       _recWarning(0),
       _recError(0),
       _delayRecording(0),
       _AGC(false),
-      _samplingFreqIn((N_REC_SAMPLES_PER_SEC/1000)),
+      _samplingFreqIn((N_REC_SAMPLES_PER_SEC)),
       _recAudioSource(1) { // 1 is AudioSource.MIC which is our default
   memset(_recBuffer, 0, sizeof(_recBuffer));
 }
 
 AudioRecordJni::~AudioRecordJni() {
   WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
                "%s destroyed", __FUNCTION__);
 
@@ -414,36 +414,30 @@ int32_t AudioRecordJni::InitRecording() 
     }
     isAttached = true;
   }
 
   // get the method ID
   jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
                                                "(II)I");
 
-  int samplingFreq = 44100;
-  if (_samplingFreqIn != 44)
-  {
-    samplingFreq = _samplingFreqIn * 1000;
-  }
-
   int retVal = -1;
 
   // call java sc object method
   jint res = env->CallIntMethod(_javaScObj, initRecordingID, _recAudioSource,
-                                samplingFreq);
+                                _samplingFreqIn);
   if (res < 0)
   {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                  "InitRecording failed (%d)", res);
   }
   else
   {
     // Set the audio device buffer sampling rate
-    _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn * 1000);
+    _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn);
 
     // the init rec function returns a fixed delay
     _delayRecording = res / _samplingFreqIn;
 
     _recIsInitialized = true;
     retVal = 0;
   }
 
@@ -785,24 +779,17 @@ int32_t AudioRecordJni::SetRecordingSamp
   if (samplesPerSec > 48000 || samplesPerSec < 8000)
   {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                  "  Invalid sample rate");
     return -1;
   }
 
   // set the recording sample rate to use
-  if (samplesPerSec == 44100)
-  {
-    _samplingFreqIn = 44;
-  }
-  else
-  {
-    _samplingFreqIn = samplesPerSec / 1000;
-  }
+  _samplingFreqin = samplesPerSec;
 
   // Update the AudioDeviceBuffer
   _ptrAudioBuffer->SetRecordingSampleRate(samplesPerSec);
 
   return 0;
 }
 
 int32_t AudioRecordJni::InitJavaResources() {
@@ -992,21 +979,17 @@ int32_t AudioRecordJni::InitSampleRate()
       return -1;
     }
     isAttached = true;
   }
 
   if (_samplingFreqIn > 0)
   {
     // read the configured sampling rate
-    samplingFreq = 44100;
-    if (_samplingFreqIn != 44)
-    {
-      samplingFreq = _samplingFreqIn * 1000;
-    }
+    samplingFreq = _samplingFreqIn;
     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                  "  Trying configured recording sampling rate %d",
                  samplingFreq);
   }
 
   // get the method ID
   jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
                                                "(II)I");
@@ -1037,24 +1020,17 @@ int32_t AudioRecordJni::InitSampleRate()
     }
     else
     {
       keepTrying = false;
     }
   }
 
   // set the recording sample rate to use
-  if (samplingFreq == 44100)
-  {
-    _samplingFreqIn = 44;
-  }
-  else
-  {
-    _samplingFreqIn = samplingFreq / 1000;
-  }
+  _samplingFreqIn = samplingFreq;
 
   WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                "Recording sample rate set to (%d)", _samplingFreqIn);
 
   // get the method ID
   jmethodID stopRecordingID = env->GetMethodID(_javaScClass, "StopRecording",
                                                "()I");
 
@@ -1136,34 +1112,34 @@ bool AudioRecordJni::RecThreadProcess()
     _recording = true;
     _recWarning = 0;
     _recError = 0;
     _recStartStopEvent.Set();
   }
 
   if (_recording)
   {
-    uint32_t samplesToRec = _samplingFreqIn * 10;
+    uint32_t samplesToRec = _samplingFreqIn / 100;
 
     // Call java sc object method to record data to direct buffer
     // Will block until data has been recorded (see java sc class),
     // therefore we must release the lock
     UnLock();
     jint recDelayInSamples = _jniEnvRec->CallIntMethod(_javaScObj,
                                                         _javaMidRecAudio,
                                                         2 * samplesToRec);
     if (recDelayInSamples < 0)
     {
       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                    "RecordAudio failed");
       _recWarning = 1;
     }
     else
     {
-      _delayRecording = recDelayInSamples / _samplingFreqIn;
+      _delayRecording = (recDelayInSamples * 1000) / _samplingFreqIn;
     }
     Lock();
 
     // Check again since recording may have stopped during Java call
     if (_recording)
     {
       //            WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
       //                         "total delay is %d", msPlayDelay + _delayRecording);
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_track_jni.cc
@@ -112,17 +112,17 @@ AudioTrackJni::AudioTrackJni(const int32
       _playoutDeviceIsSpecified(false),
       _playing(false),
       _playIsInitialized(false),
       _speakerIsInitialized(false),
       _startPlay(false),
       _playWarning(0),
       _playError(0),
       _delayPlayout(0),
-      _samplingFreqOut((N_PLAY_SAMPLES_PER_SEC/1000)),
+      _samplingFreqOut((N_PLAY_SAMPLES_PER_SEC)),
       _maxSpeakerVolume(0) {
 }
 
 AudioTrackJni::~AudioTrackJni() {
   WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
                "%s destroyed", __FUNCTION__);
 
   Terminate();
@@ -416,35 +416,29 @@ int32_t AudioTrackJni::InitPlayout() {
       }
       isAttached = true;
     }
 
     // get the method ID
     jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback",
                                                 "(I)I");
 
-    int samplingFreq = 44100;
-    if (_samplingFreqOut != 44)
-    {
-      samplingFreq = _samplingFreqOut * 1000;
-    }
-
     int retVal = -1;
 
     // Call java sc object method
-    jint res = env->CallIntMethod(_javaScObj, initPlaybackID, samplingFreq);
+    jint res = env->CallIntMethod(_javaScObj, initPlaybackID, _samplingFreqOut);
     if (res < 0)
     {
       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                    "InitPlayback failed (%d)", res);
     }
     else
     {
       // Set the audio device buffer sampling rate
-      _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut * 1000);
+      _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut);
       _playIsInitialized = true;
       retVal = 0;
     }
 
     // Detach this thread if it was attached
     if (isAttached)
     {
       WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
@@ -864,24 +858,17 @@ int32_t AudioTrackJni::SetPlayoutSampleR
   if (samplesPerSec > 48000 || samplesPerSec < 8000)
   {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                  "  Invalid sample rate");
     return -1;
     }
 
   // set the playout sample rate to use
-  if (samplesPerSec == 44100)
-  {
-    _samplingFreqOut = 44;
-  }
-  else
-  {
-    _samplingFreqOut = samplesPerSec / 1000;
-  }
+  _samplingFreqOut = samplesPerSec;
 
   // Update the AudioDeviceBuffer
   _ptrAudioBuffer->SetPlayoutSampleRate(samplesPerSec);
 
   return 0;
 }
 
 bool AudioTrackJni::PlayoutWarning() const {
@@ -1157,21 +1144,17 @@ int32_t AudioTrackJni::InitSampleRate() 
 
   // get the method ID
   jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback",
                                               "(I)I");
 
   if (_samplingFreqOut > 0)
   {
     // read the configured sampling rate
-    samplingFreq = 44100;
-    if (_samplingFreqOut != 44)
-    {
-      samplingFreq = _samplingFreqOut * 1000;
-    }
+    samplingFreq = _samplingFreqOut;
     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                  "  Trying configured playback sampling rate %d",
                  samplingFreq);
   }
   else
   {
     // set the preferred sampling frequency
     if (samplingFreq == 8000)
@@ -1215,24 +1198,17 @@ int32_t AudioTrackJni::InitSampleRate() 
   if (_maxSpeakerVolume < 1)
   {
     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
                  "  Did not get valid max speaker volume value (%d)",
                  _maxSpeakerVolume);
   }
 
   // set the playback sample rate to use
-  if (samplingFreq == 44100)
-  {
-    _samplingFreqOut = 44;
-  }
-  else
-  {
-    _samplingFreqOut = samplingFreq / 1000;
-  }
+  _samplingFreqOut = samplingFreq;
 
   WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                "Playback sample rate set to (%d)", _samplingFreqOut);
 
   // get the method ID
   jmethodID stopPlaybackID = env->GetMethodID(_javaScClass, "StopPlayback",
                                               "()I");
 
@@ -1361,17 +1337,17 @@ bool AudioTrackJni::PlayThreadProcess()
         {
           WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                        "PlayAudio failed (%d)", res);
             _playWarning = 1;
         }
         else if (res > 0)
         {
           // we are not recording and have got a delay value from playback
-          _delayPlayout = res / _samplingFreqOut;
+          _delayPlayout = (res * 1000) / _samplingFreqOut;
         }
         Lock();
 
   }  // _playing
 
   if (_shutdownPlayThread)
   {
     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
@@ -11,17 +11,24 @@
 // The functions in this file are called from native code. They can still be
 // accessed even though they are declared private.
 
 package org.webrtc.voiceengine;
 
 import android.content.Context;
 import android.content.pm.PackageManager;
 import android.media.AudioManager;
+import android.util.Log;
 
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+
+import org.mozilla.gecko.mozglue.WebRTCJNITarget;
+
+@WebRTCJNITarget
 class AudioManagerAndroid {
   // Most of Google lead devices use 44.1K as the default sampling rate, 44.1K
   // is also widely used on other android devices.
   private static final int DEFAULT_SAMPLING_RATE = 44100;
   // Randomly picked up frame size which is close to return value on N4.
   // Return this default value when
   // getProperty(PROPERTY_OUTPUT_FRAMES_PER_BUFFER) fails.
   private static final int DEFAULT_FRAMES_PER_BUFFER = 256;
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
@@ -6,16 +6,17 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/opensles_input.h"
 
 #include <assert.h>
+#include <dlfcn.h>
 
 #include "webrtc/modules/audio_device/android/audio_common.h"
 #include "webrtc/modules/audio_device/android/opensles_common.h"
 #include "webrtc/modules/audio_device/android/single_rw_fifo.h"
 #include "webrtc/modules/audio_device/audio_device_buffer.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
@@ -58,17 +59,18 @@ OpenSlesInput::OpenSlesInput(
       sles_engine_itf_(NULL),
       sles_recorder_(NULL),
       sles_recorder_itf_(NULL),
       sles_recorder_sbq_itf_(NULL),
       audio_buffer_(NULL),
       active_queue_(0),
       rec_sampling_rate_(0),
       agc_enabled_(false),
-      recording_delay_(0) {
+      recording_delay_(0),
+      opensles_lib_(NULL) {
 }
 
 OpenSlesInput::~OpenSlesInput() {
 }
 
 int32_t OpenSlesInput::SetAndroidAudioDeviceObjects(void* javaVM,
                                                     void* env,
                                                     void* context) {
@@ -76,25 +78,51 @@ int32_t OpenSlesInput::SetAndroidAudioDe
 }
 
 void OpenSlesInput::ClearAndroidAudioDeviceObjects() {
 }
 
 int32_t OpenSlesInput::Init() {
   assert(!initialized_);
 
+  /* Try to dynamically open the OpenSLES library */
+  opensles_lib_ = dlopen("libOpenSLES.so", RTLD_LAZY);
+  if (!opensles_lib_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to dlopen OpenSLES library");
+      return -1;
+  }
+
+  f_slCreateEngine = (slCreateEngine_t)dlsym(opensles_lib_, "slCreateEngine");
+  SL_IID_ENGINE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ENGINE");
+  SL_IID_BUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_BUFFERQUEUE");
+  SL_IID_ANDROIDCONFIGURATION_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDCONFIGURATION");
+  SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
+  SL_IID_RECORD_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_RECORD");
+
+  if (!f_slCreateEngine ||
+      !SL_IID_ENGINE_ ||
+      !SL_IID_BUFFERQUEUE_ ||
+      !SL_IID_ANDROIDCONFIGURATION_ ||
+      !SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ ||
+      !SL_IID_RECORD_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to find OpenSLES function");
+      return -1;
+  }
+
   // Set up OpenSL engine.
-  OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
-                                          NULL, NULL),
+  OPENSL_RETURN_ON_FAILURE(f_slCreateEngine(&sles_engine_, 1, kOption, 0,
+                                            NULL, NULL),
                            -1);
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
                                                     SL_BOOLEAN_FALSE),
                            -1);
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
-                                                         SL_IID_ENGINE,
+                                                         SL_IID_ENGINE_,
                                                          &sles_engine_itf_),
                            -1);
 
   if (InitSampleRate() != 0) {
     return -1;
   }
   AllocateBuffers();
   initialized_ = true;
@@ -103,16 +131,17 @@ int32_t OpenSlesInput::Init() {
 
 int32_t OpenSlesInput::Terminate() {
   // It is assumed that the caller has stopped recording before terminating.
   assert(!recording_);
   (*sles_engine_)->Destroy(sles_engine_);
   initialized_ = false;
   mic_initialized_ = false;
   rec_initialized_ = false;
+  dlclose(opensles_lib_);
   return 0;
 }
 
 int32_t OpenSlesInput::RecordingDeviceName(uint16_t index,
                                            char name[kAdmMaxDeviceNameSize],
                                            char guid[kAdmMaxGuidSize]) {
   assert(index == 0);
   // Empty strings.
@@ -229,16 +258,24 @@ int32_t OpenSlesInput::MicrophoneBoost(b
   return -1;  // Not supported
 }
 
 int32_t OpenSlesInput::StereoRecordingIsAvailable(bool& available) {  // NOLINT
   available = false;  // Stereo recording not supported on Android.
   return 0;
 }
 
+int32_t OpenSlesInput::SetStereoRecording(bool enable) {  // NOLINT
+  if (enable) {
+    return -1;
+  } else {
+    return 0;
+  }
+}
+
 int32_t OpenSlesInput::StereoRecording(bool& enabled) const {  // NOLINT
   enabled = false;
   return 0;
 }
 
 int32_t OpenSlesInput::RecordingDelay(uint16_t& delayMS) const {  // NOLINT
   delayMS = recording_delay_;
   return 0;
@@ -272,18 +309,22 @@ void OpenSlesInput::UpdateRecordingDelay
   // TODO(hellner): Add accurate delay estimate.
   // On average half the current buffer will have been filled with audio.
   int outstanding_samples =
       (TotalBuffersUsed() - 0.5) * buffer_size_samples();
   recording_delay_ = outstanding_samples / (rec_sampling_rate_ / 1000);
 }
 
 void OpenSlesInput::UpdateSampleRate() {
+#if !defined(WEBRTC_GONK)
   rec_sampling_rate_ = audio_manager_.low_latency_supported() ?
       audio_manager_.native_output_sample_rate() : kDefaultSampleRate;
+#else
+  rec_sampling_rate_ = kDefaultSampleRate;
+#endif
 }
 
 void OpenSlesInput::CalculateNumFifoBuffersNeeded() {
   // Buffer size is 10ms of data.
   num_fifo_buffers_needed_ = kNum10MsToBuffer;
 }
 
 void OpenSlesInput::AllocateBuffers() {
@@ -347,17 +388,17 @@ bool OpenSlesInput::CreateAudioRecorder(
   SLDataFormat_PCM configuration =
       webrtc_opensl::CreatePcmConfiguration(rec_sampling_rate_);
   SLDataSink audio_sink = { &simple_buf_queue, &configuration };
 
   // Interfaces for recording android audio data and Android are needed.
   // Note the interfaces still need to be initialized. This only tells OpenSl
   // that the interfaces will be needed at some point.
   const SLInterfaceID id[kNumInterfaces] = {
-    SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
+    SL_IID_ANDROIDSIMPLEBUFFERQUEUE_, SL_IID_ANDROIDCONFIGURATION_ };
   const SLboolean req[kNumInterfaces] = {
     SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
   OPENSL_RETURN_ON_FAILURE(
       (*sles_engine_itf_)->CreateAudioRecorder(sles_engine_itf_,
                                                &sles_recorder_,
                                                &audio_source,
                                                &audio_sink,
                                                kNumInterfaces,
@@ -365,23 +406,23 @@ bool OpenSlesInput::CreateAudioRecorder(
                                                req),
       false);
 
   // Realize the recorder in synchronous mode.
   OPENSL_RETURN_ON_FAILURE((*sles_recorder_)->Realize(sles_recorder_,
                                                       SL_BOOLEAN_FALSE),
                            false);
   OPENSL_RETURN_ON_FAILURE(
-      (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD,
+      (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD_,
                                       static_cast<void*>(&sles_recorder_itf_)),
       false);
   OPENSL_RETURN_ON_FAILURE(
       (*sles_recorder_)->GetInterface(
           sles_recorder_,
-          SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+          SL_IID_ANDROIDSIMPLEBUFFERQUEUE_,
           static_cast<void*>(&sles_recorder_sbq_itf_)),
       false);
   return true;
 }
 
 void OpenSlesInput::DestroyAudioRecorder() {
   event_.Stop();
   if (sles_recorder_sbq_itf_) {
@@ -515,16 +556,17 @@ bool OpenSlesInput::CbThreadImpl() {
   CriticalSectionScoped lock(crit_sect_.get());
   if (HandleOverrun(event_id, event_msg)) {
     return recording_;
   }
   // If the fifo_ has audio data process it.
   while (fifo_->size() > 0 && recording_) {
     int8_t* audio = fifo_->Pop();
     audio_buffer_->SetRecordedBuffer(audio, buffer_size_samples());
-    audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(),
+    audio_buffer_->SetVQEData(delay_provider_ ?
+                              delay_provider_->PlayoutDelayMs() : 0,
                               recording_delay_, 0);
     audio_buffer_->DeliverRecordedData();
   }
   return recording_;
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
@@ -10,17 +10,19 @@
 
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
 
 #include <SLES/OpenSLES.h>
 #include <SLES/OpenSLES_Android.h>
 #include <SLES/OpenSLES_AndroidConfiguration.h>
 
+#if !defined(WEBRTC_GONK)
 #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
+#endif
 #include "webrtc/modules/audio_device/android/low_latency_event.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 #include "webrtc/system_wrappers/interface/scoped_ptr.h"
 
 namespace webrtc {
 
 class AudioDeviceBuffer;
@@ -100,17 +102,17 @@ class OpenSlesInput {
 
   // Microphone boost control
   int32_t MicrophoneBoostIsAvailable(bool& available);  // NOLINT
   int32_t SetMicrophoneBoost(bool enable);
   int32_t MicrophoneBoost(bool& enabled) const;  // NOLINT
 
   // Stereo support
   int32_t StereoRecordingIsAvailable(bool& available);  // NOLINT
-  int32_t SetStereoRecording(bool enable) { return -1; }
+  int32_t SetStereoRecording(bool enable);
   int32_t StereoRecording(bool& enabled) const;  // NOLINT
 
   // Delay information and control
   int32_t RecordingDelay(uint16_t& delayMS) const;  // NOLINT
 
   bool RecordingWarning() const { return false; }
   bool RecordingError() const  { return false; }
   void ClearRecordingWarning() {}
@@ -120,17 +122,17 @@ class OpenSlesInput {
   void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
 
  private:
   enum {
     kNumInterfaces = 2,
     // Keep as few OpenSL buffers as possible to avoid wasting memory. 2 is
     // minimum for playout. Keep 2 for recording as well.
     kNumOpenSlBuffers = 2,
-    kNum10MsToBuffer = 3,
+    kNum10MsToBuffer = 8,
   };
 
   int InitSampleRate();
   int buffer_size_samples() const;
   int buffer_size_bytes() const;
   void UpdateRecordingDelay();
   void UpdateSampleRate();
   void CalculateNumFifoBuffersNeeded();
@@ -166,18 +168,20 @@ class OpenSlesInput {
   bool StartCbThreads();
   void StopCbThreads();
   static bool CbThread(void* context);
   // This function must be protected against data race with threads calling this
   // class' public functions. It is a requirement for this class to be
   // Thread-compatible.
   bool CbThreadImpl();
 
+#if !defined(WEBRTC_GONK)
   // Java API handle
   AudioManagerJni audio_manager_;
+#endif
 
   int id_;
   PlayoutDelayProvider* delay_provider_;
   bool initialized_;
   bool mic_initialized_;
   bool rec_initialized_;
 
   // Members that are read/write accessed concurrently by the process thread and
@@ -213,13 +217,28 @@ class OpenSlesInput {
   int active_queue_;
 
   // Audio settings
   uint32_t rec_sampling_rate_;
   bool agc_enabled_;
 
   // Audio status
   uint16_t recording_delay_;
+
+  // dlopen for OpenSLES
+  void *opensles_lib_;
+  typedef SLresult (*slCreateEngine_t)(SLObjectItf *,
+                                       SLuint32,
+                                       const SLEngineOption *,
+                                       SLuint32,
+                                       const SLInterfaceID *,
+                                       const SLboolean *);
+  slCreateEngine_t f_slCreateEngine;
+  SLInterfaceID SL_IID_ENGINE_;
+  SLInterfaceID SL_IID_BUFFERQUEUE_;
+  SLInterfaceID SL_IID_ANDROIDCONFIGURATION_;
+  SLInterfaceID SL_IID_ANDROIDSIMPLEBUFFERQUEUE_;
+  SLInterfaceID SL_IID_RECORD_;
 };
 
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.cc
@@ -6,16 +6,17 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/audio_device/android/opensles_output.h"
 
 #include <assert.h>
+#include <dlfcn.h>
 
 #include "webrtc/modules/audio_device/android/opensles_common.h"
 #include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
 #include "webrtc/modules/audio_device/android/single_rw_fifo.h"
 #include "webrtc/modules/audio_device/audio_device_buffer.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
@@ -58,17 +59,18 @@ OpenSlesOutput::OpenSlesOutput(const int
       sles_player_itf_(NULL),
       sles_player_sbq_itf_(NULL),
       sles_output_mixer_(NULL),
       audio_buffer_(NULL),
       active_queue_(0),
       speaker_sampling_rate_(kDefaultSampleRate),
       buffer_size_samples_(0),
       buffer_size_bytes_(0),
-      playout_delay_(0) {
+      playout_delay_(0),
+      opensles_lib_(NULL) {
 }
 
 OpenSlesOutput::~OpenSlesOutput() {
 }
 
 int32_t OpenSlesOutput::SetAndroidAudioDeviceObjects(void* javaVM,
                                                      void* env,
                                                      void* context) {
@@ -78,25 +80,53 @@ int32_t OpenSlesOutput::SetAndroidAudioD
 
 void OpenSlesOutput::ClearAndroidAudioDeviceObjects() {
   AudioManagerJni::ClearAndroidAudioDeviceObjects();
 }
 
 int32_t OpenSlesOutput::Init() {
   assert(!initialized_);
 
+  /* Try to dynamically open the OpenSLES library */
+  opensles_lib_ = dlopen("libOpenSLES.so", RTLD_LAZY);
+  if (!opensles_lib_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to dlopen OpenSLES library");
+      return -1;
+  }
+
+  f_slCreateEngine = (slCreateEngine_t)dlsym(opensles_lib_, "slCreateEngine");
+  SL_IID_ENGINE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ENGINE");
+  SL_IID_BUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_BUFFERQUEUE");
+  SL_IID_ANDROIDCONFIGURATION_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDCONFIGURATION");
+  SL_IID_PLAY_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_PLAY");
+  SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
+  SL_IID_VOLUME_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_VOLUME");
+
+  if (!f_slCreateEngine ||
+      !SL_IID_ENGINE_ ||
+      !SL_IID_BUFFERQUEUE_ ||
+      !SL_IID_ANDROIDCONFIGURATION_ ||
+      !SL_IID_PLAY_ ||
+      !SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ ||
+      !SL_IID_VOLUME_) {
+      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
+                   "  failed to find OpenSLES function");
+      return -1;
+  }
+
   // Set up OpenSl engine.
-  OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
-                                          NULL, NULL),
+  OPENSL_RETURN_ON_FAILURE(f_slCreateEngine(&sles_engine_, 1, kOption, 0,
+                                            NULL, NULL),
                            -1);
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
                                                     SL_BOOLEAN_FALSE),
                            -1);
   OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
-                                                         SL_IID_ENGINE,
+                                                         SL_IID_ENGINE_,
                                                          &sles_engine_itf_),
                            -1);
   // Set up OpenSl output mix.
   OPENSL_RETURN_ON_FAILURE(
       (*sles_engine_itf_)->CreateOutputMix(sles_engine_itf_,
                                            &sles_output_mixer_,
                                            0,
                                            NULL,
@@ -118,16 +148,17 @@ int32_t OpenSlesOutput::Init() {
 int32_t OpenSlesOutput::Terminate() {
   // It is assumed that the caller has stopped recording before terminating.
   assert(!playing_);
   (*sles_output_mixer_)->Destroy(sles_output_mixer_);
   (*sles_engine_)->Destroy(sles_engine_);
   initialized_ = false;
   speaker_initialized_ = false;
   play_initialized_ = false;
+  dlclose(opensles_lib_);
   return 0;
 }
 
 int32_t OpenSlesOutput::PlayoutDeviceName(uint16_t index,
                                           char name[kAdmMaxDeviceNameSize],
                                           char guid[kAdmMaxGuidSize]) {
   assert(index == 0);
   // Empty strings.
@@ -306,24 +337,28 @@ bool OpenSlesOutput::InitSampleRate() {
 void OpenSlesOutput::UpdatePlayoutDelay() {
   // TODO(hellner): Add accurate delay estimate.
   // On average half the current buffer will have been played out.
   int outstanding_samples = (TotalBuffersUsed() - 0.5) * buffer_size_samples_;
   playout_delay_ = outstanding_samples / (speaker_sampling_rate_ / 1000);
 }
 
 bool OpenSlesOutput::SetLowLatency() {
+#if !defined(WEBRTC_GONK)
   if (!audio_manager_.low_latency_supported()) {
     return false;
   }
   buffer_size_samples_ = audio_manager_.native_buffer_size();
   assert(buffer_size_samples_ > 0);
   speaker_sampling_rate_ = audio_manager_.native_output_sample_rate();
   assert(speaker_sampling_rate_ > 0);
   return true;
+#else
+  return false;
+#endif
 }
 
 void OpenSlesOutput::CalculateNumFifoBuffersNeeded() {
   int number_of_bytes_needed =
       (speaker_sampling_rate_ * kNumChannels * sizeof(int16_t)) * 10 / 1000;
 
   // Ceiling of integer division: 1 + ((x - 1) / y)
   int buffers_per_10_ms =
@@ -399,34 +434,34 @@ bool OpenSlesOutput::CreateAudioPlayer()
   locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
   locator_outputmix.outputMix = sles_output_mixer_;
   SLDataSink audio_sink = { &locator_outputmix, NULL };
 
   // Interfaces for streaming audio data, setting volume and Android are needed.
   // Note the interfaces still need to be initialized. This only tells OpenSl
   // that the interfaces will be needed at some point.
   SLInterfaceID ids[kNumInterfaces] = {
-    SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_ANDROIDCONFIGURATION };
+    SL_IID_BUFFERQUEUE_, SL_IID_VOLUME_, SL_IID_ANDROIDCONFIGURATION_ };
   SLboolean req[kNumInterfaces] = {
     SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
   OPENSL_RETURN_ON_FAILURE(
       (*sles_engine_itf_)->CreateAudioPlayer(sles_engine_itf_, &sles_player_,
                                              &audio_source, &audio_sink,
                                              kNumInterfaces, ids, req),
       false);
   // Realize the player in synchronous mode.
   OPENSL_RETURN_ON_FAILURE((*sles_player_)->Realize(sles_player_,
                                                     SL_BOOLEAN_FALSE),
                            false);
   OPENSL_RETURN_ON_FAILURE(
-      (*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY,
+      (*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY_,
                                     &sles_player_itf_),
       false);
   OPENSL_RETURN_ON_FAILURE(
-      (*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE,
+      (*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE_,
                                     &sles_player_sbq_itf_),
       false);
   return true;
 }
 
 void OpenSlesOutput::DestroyAudioPlayer() {
   SLAndroidSimpleBufferQueueItf sles_player_sbq_itf = sles_player_sbq_itf_;
   {
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_output.h
@@ -10,17 +10,19 @@
 
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
 
 #include <SLES/OpenSLES.h>
 #include <SLES/OpenSLES_Android.h>
 #include <SLES/OpenSLES_AndroidConfiguration.h>
 
+#if !defined(WEBRTC_GONK)
 #include "webrtc/modules/audio_device/android/audio_manager_jni.h"
+#endif
 #include "webrtc/modules/audio_device/android/low_latency_event.h"
 #include "webrtc/modules/audio_device/android/audio_common.h"
 #include "webrtc/modules/audio_device/include/audio_device_defines.h"
 #include "webrtc/modules/audio_device/include/audio_device.h"
 #include "webrtc/system_wrappers/interface/scoped_ptr.h"
 
 namespace webrtc {
 
@@ -184,18 +186,20 @@ class OpenSlesOutput : public PlayoutDel
   bool StartCbThreads();
   void StopCbThreads();
   static bool CbThread(void* context);
   // This function must be protected against data race with threads calling this
   // class' public functions. It is a requirement for this class to be
   // Thread-compatible.
   bool CbThreadImpl();
 
+#if !defined(WEBRTC_GONK)
   // Java API handle
   AudioManagerJni audio_manager_;
+#endif
 
   int id_;
   bool initialized_;
   bool speaker_initialized_;
   bool play_initialized_;
 
   // Members that are read/write accessed concurrently by the process thread and
   // threads calling public functions of this class.
@@ -232,13 +236,29 @@ class OpenSlesOutput : public PlayoutDel
 
   // Audio settings
   uint32_t speaker_sampling_rate_;
   int buffer_size_samples_;
   int buffer_size_bytes_;
 
   // Audio status
   uint16_t playout_delay_;
+
+  // dlopen for OpenSLES
+  void *opensles_lib_;
+  typedef SLresult (*slCreateEngine_t)(SLObjectItf *,
+                                       SLuint32,
+                                       const SLEngineOption *,
+                                       SLuint32,
+                                       const SLInterfaceID *,
+                                       const SLboolean *);
+  slCreateEngine_t f_slCreateEngine;
+  SLInterfaceID SL_IID_ENGINE_;
+  SLInterfaceID SL_IID_BUFFERQUEUE_;
+  SLInterfaceID SL_IID_ANDROIDCONFIGURATION_;
+  SLInterfaceID SL_IID_PLAY_;
+  SLInterfaceID SL_IID_ANDROIDSIMPLEBUFFERQUEUE_;
+  SLInterfaceID SL_IID_VOLUME_;
 };
 
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/single_rw_fifo.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/single_rw_fifo.cc
@@ -3,27 +3,43 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#if defined(_MSC_VER)
+#include <windows.h>
+#endif
+
 #include "webrtc/modules/audio_device/android/single_rw_fifo.h"
 
 static int UpdatePos(int pos, int capacity) {
   return (pos + 1) % capacity;
 }
 
 namespace webrtc {
 
 namespace subtle {
 
-#if defined(__ARMEL__)
+// Start with compiler support, then processor-specific hacks
+#if defined(__GNUC__) || defined(__clang__)
+// Available on GCC and clang - others?
+inline void MemoryBarrier() {
+  __sync_synchronize();
+}
+
+#elif defined(_MSC_VER)
+inline void MemoryBarrier() {
+  ::MemoryBarrier();
+}
+
+#elif defined(__ARMEL__)
 // From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_arm_gcc.h
 // Note that it is only the MemoryBarrier function that makes this class arm
 // specific. Borrowing other MemoryBarrier implementations, this class could
 // be extended to more platforms.
 inline void MemoryBarrier() {
   // Note: This is a function call, which is also an implicit compiler
   // barrier.
   typedef void (*KernelMemoryBarrierFunc)();
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
@@ -42,21 +42,26 @@
         'audio_device_impl.h',
         'audio_device_config.h',
         'dummy/audio_device_dummy.cc',
         'dummy/audio_device_dummy.h',
         'dummy/audio_device_utility_dummy.cc',
         'dummy/audio_device_utility_dummy.h',
       ],
       'conditions': [
-        ['OS=="linux"', {
+        ['build_with_mozilla==1', {
+          'cflags_mozilla': [
+            '$(NSPR_CFLAGS)',
+          ],
+        }],
+        ['OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1', {
           'include_dirs': [
             'linux',
           ],
-        }], # OS==linux
+        }], # OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1
         ['OS=="ios"', {
           'include_dirs': [
             'ios',
           ],
         }], # OS==ios
         ['OS=="mac"', {
           'include_dirs': [
             'mac',
@@ -64,34 +69,43 @@
         }], # OS==mac
         ['OS=="win"', {
           'include_dirs': [
             'win',
           ],
         }],
         ['OS=="android"', {
           'include_dirs': [
+            '/widget/android',
             'android',
           ],
         }], # OS==android
+        ['moz_widget_toolkit_gonk==1', {
+          'cflags_mozilla': [
+            '-I$(ANDROID_SOURCE)/frameworks/wilhelm/include',
+            '-I$(ANDROID_SOURCE)/system/media/wilhelm/include',
+          ],
+          'include_dirs': [
+            'android',
+          ],
+        }], # moz_widget_toolkit_gonk==1
+        ['enable_android_opensl==1', {
+          'include_dirs': [
+            'opensl',
+          ],
+        }], # enable_android_opensl
         ['include_internal_audio_device==0', {
           'defines': [
             'WEBRTC_DUMMY_AUDIO_BUILD',
           ],
         }],
         ['include_internal_audio_device==1', {
           'sources': [
-            'linux/alsasymboltable_linux.cc',
-            'linux/alsasymboltable_linux.h',
-            'linux/audio_device_alsa_linux.cc',
-            'linux/audio_device_alsa_linux.h',
             'linux/audio_device_utility_linux.cc',
             'linux/audio_device_utility_linux.h',
-            'linux/audio_mixer_manager_alsa_linux.cc',
-            'linux/audio_mixer_manager_alsa_linux.h',
             'linux/latebindingsymboltable_linux.cc',
             'linux/latebindingsymboltable_linux.h',
             'ios/audio_device_ios.cc',
             'ios/audio_device_ios.h',
             'ios/audio_device_utility_ios.cc',
             'ios/audio_device_utility_ios.h',
             'mac/audio_device_mac.cc',
             'mac/audio_device_mac.h',
@@ -105,70 +119,115 @@
             'win/audio_device_core_win.cc',
             'win/audio_device_core_win.h',
             'win/audio_device_wave_win.cc',
             'win/audio_device_wave_win.h',
             'win/audio_device_utility_win.cc',
             'win/audio_device_utility_win.h',
             'win/audio_mixer_manager_win.cc',
             'win/audio_mixer_manager_win.h',
+            # used externally for getUserMedia
+            'opensl/single_rw_fifo.cc',
+            'opensl/single_rw_fifo.h',
             'android/audio_device_template.h',
-            'android/audio_device_utility_android.cc',
-            'android/audio_device_utility_android.h',
             'android/audio_manager_jni.cc',
             'android/audio_manager_jni.h',
             'android/audio_record_jni.cc',
             'android/audio_record_jni.h',
             'android/audio_track_jni.cc',
             'android/audio_track_jni.h',
-            'android/fine_audio_buffer.cc',
-            'android/fine_audio_buffer.h',
-            'android/low_latency_event_posix.cc',
-            'android/low_latency_event.h',
-            'android/opensles_common.cc',
-            'android/opensles_common.h',
-            'android/opensles_input.cc',
-            'android/opensles_input.h',
-            'android/opensles_output.cc',
-            'android/opensles_output.h',
-            'android/single_rw_fifo.cc',
-            'android/single_rw_fifo.h',
           ],
           'conditions': [
             ['OS=="android"', {
+              'sources': [
+                'opensl/audio_manager_jni.cc',
+                'opensl/audio_manager_jni.h',
+                'android/audio_device_jni_android.cc',
+                'android/audio_device_jni_android.h',
+               ],
+            }],
+            ['OS=="android" or moz_widget_toolkit_gonk==1', {
               'link_settings': {
                 'libraries': [
                   '-llog',
                   '-lOpenSLES',
                 ],
               },
+              'conditions': [
+                ['enable_android_opensl==1', {
+                  'sources': [
+                    'opensl/audio_device_opensles.cc',
+                    'opensl/audio_device_opensles.h',
+                    'opensl/fine_audio_buffer.cc',
+                    'opensl/fine_audio_buffer.h',
+                    'opensl/low_latency_event_posix.cc',
+                    'opensl/low_latency_event.h',
+                    'opensl/opensles_common.cc',
+                    'opensl/opensles_common.h',
+                    'opensl/opensles_input.cc',
+                    'opensl/opensles_input.h',
+                    'opensl/opensles_output.h',
+                    'shared/audio_device_utility_shared.cc',
+                    'shared/audio_device_utility_shared.h',
+                  ],
+                }, {
+                  'sources': [
+                    'shared/audio_device_utility_shared.cc',
+                    'shared/audio_device_utility_shared.h',
+                    'android/audio_device_jni_android.cc',
+                    'android/audio_device_jni_android.h',
+                  ],
+                }],
+                ['enable_android_opensl_output==1', {
+                  'sources': [
+                    'opensl/opensles_output.cc'
+                  ],
+                  'defines': [
+                    'WEBRTC_ANDROID_OPENSLES_OUTPUT',
+                  ],
+                }],
+              ],
             }],
             ['OS=="linux"', {
-              'defines': [
-                'LINUX_ALSA',
-              ],
               'link_settings': {
                 'libraries': [
                   '-ldl','-lX11',
                 ],
               },
-              'conditions': [
-                ['include_pulse_audio==1', {
-                  'defines': [
-                    'LINUX_PULSE',
-                  ],
-                  'sources': [
-                    'linux/audio_device_pulse_linux.cc',
-                    'linux/audio_device_pulse_linux.h',
-                    'linux/audio_mixer_manager_pulse_linux.cc',
-                    'linux/audio_mixer_manager_pulse_linux.h',
-                    'linux/pulseaudiosymboltable_linux.cc',
-                    'linux/pulseaudiosymboltable_linux.h',
-                  ],
-                }],
+            }],
+            ['include_alsa_audio==1', {
+              'cflags_mozilla': [
+                '$(MOZ_ALSA_CFLAGS)',
+              ],
+              'defines': [
+                'LINUX_ALSA',
+              ],
+              'sources': [
+                'linux/alsasymboltable_linux.cc',
+                'linux/alsasymboltable_linux.h',
+                'linux/audio_device_alsa_linux.cc',
+                'linux/audio_device_alsa_linux.h',
+                'linux/audio_mixer_manager_alsa_linux.cc',
+                'linux/audio_mixer_manager_alsa_linux.h',
+              ],
+            }],
+            ['include_pulse_audio==1', {
+              'cflags_mozilla': [
+                '$(MOZ_PULSEAUDIO_CFLAGS)',
+              ],
+              'defines': [
+                'LINUX_PULSE',
+              ],
+              'sources': [
+                'linux/audio_device_pulse_linux.cc',
+                'linux/audio_device_pulse_linux.h',
+                'linux/audio_mixer_manager_pulse_linux.cc',
+                'linux/audio_mixer_manager_pulse_linux.h',
+                'linux/pulseaudiosymboltable_linux.cc',
+                'linux/pulseaudiosymboltable_linux.h',
               ],
             }],
             ['OS=="mac" or OS=="ios"', {
               'link_settings': {
                 'libraries': [
                   '$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
                   '$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
                 ],
@@ -267,9 +326,8 @@
               ],
             },
           ],
         }],
       ],
     }], # include_tests
   ],
 }
-
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
@@ -11,31 +11,38 @@
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 #include "webrtc/modules/audio_device/audio_device_config.h"
 #include "webrtc/modules/audio_device/audio_device_impl.h"
 #include "webrtc/system_wrappers/interface/ref_count.h"
 
 #include <assert.h>
 #include <string.h>
 
-#if defined(_WIN32)
+#if defined(WEBRTC_DUMMY_AUDIO_BUILD)
+// do not include platform specific headers
+#elif defined(_WIN32)
     #include "audio_device_utility_win.h"
     #include "audio_device_wave_win.h"
  #if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
     #include "audio_device_core_win.h"
  #endif
-#elif defined(WEBRTC_ANDROID)
+#elif defined(WEBRTC_ANDROID_OPENSLES)
+// ANDROID and GONK
     #include <stdlib.h>
+    #include <dlfcn.h>
     #include "audio_device_utility_android.h"
     #include "webrtc/modules/audio_device/android/audio_device_template.h"
+#if !defined(WEBRTC_GONK)
+// GONK only supports opensles; android can use that or jni
     #include "webrtc/modules/audio_device/android/audio_record_jni.h"
     #include "webrtc/modules/audio_device/android/audio_track_jni.h"
+#endif
     #include "webrtc/modules/audio_device/android/opensles_input.h"
     #include "webrtc/modules/audio_device/android/opensles_output.h"
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     #include "audio_device_utility_linux.h"
  #if defined(LINUX_ALSA)
     #include "audio_device_alsa_linux.h"
  #endif
  #if defined(LINUX_PULSE)
     #include "audio_device_pulse_linux.h"
  #endif
 #elif defined(WEBRTC_IOS)
@@ -154,17 +161,17 @@ int32_t AudioDeviceModuleImpl::CheckPlat
     PlatformType platform(kPlatformNotSupported);
 
 #if defined(_WIN32)
     platform = kPlatformWin32;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is WIN32");
 #elif defined(WEBRTC_ANDROID)
     platform = kPlatformAndroid;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is ANDROID");
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     platform = kPlatformLinux;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is LINUX");
 #elif defined(WEBRTC_IOS)
     platform = kPlatformIOS;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is IOS");
 #elif defined(WEBRTC_MAC)
     platform = kPlatformMac;
     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is MAC");
@@ -253,39 +260,60 @@ int32_t AudioDeviceModuleImpl::CreatePla
         // for Windows.
         //
         ptrAudioDeviceUtility = new AudioDeviceUtilityWindows(Id());
     }
 #endif  // #if defined(_WIN32)
 
     // Create the *Android OpenSLES* implementation of the Audio Device
     //
-#if defined(WEBRTC_ANDROID)
+#if defined(WEBRTC_ANDROID) || defined (WEBRTC_GONK)
     if (audioLayer == kPlatformDefaultAudio)
     {
-        // AudioRecordJni provides hardware AEC and OpenSlesOutput low latency.
-#if defined(WEBRTC_ANDROID_OPENSLES)
-        ptrAudioDevice = new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id());
-#else
-        ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
+      // AudioRecordJni provides hardware AEC and OpenSlesOutput low latency.
+#if defined (WEBRTC_ANDROID_OPENSLES)
+      // Android and Gonk
+      // Check if the OpenSLES library is available before going further.
+      void* opensles_lib = dlopen("libOpenSLES.so", RTLD_LAZY);
+      if (opensles_lib) {
+        // That worked, close for now and proceed normally.
+        dlclose(opensles_lib);
+        if (audioLayer == kPlatformDefaultAudio)
+        {
+          // Create *Android OpenSLES Audio* implementation
+          ptrAudioDevice = new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id());
+          WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+                       "Android OpenSLES Audio APIs will be utilized");
+        }
+      }
 #endif
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "Android OpenSLES Audio APIs will be utilized");
+#if !defined(WEBRTC_GONK)
+      // Fall back to this case if on Android 2.2/OpenSLES not available.
+      if (ptrAudioDevice == NULL) {
+        // Create the *Android Java* implementation of the Audio Device
+        if (audioLayer == kPlatformDefaultAudio)
+        {
+          // Create *Android JNI Audio* implementation
+          ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
+          WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Android JNI Audio APIs will be utilized");
+        }
+      }
+#endif
     }
 
     if (ptrAudioDevice != NULL)
     {
         // Create the Android implementation of the Device Utility.
         ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
     }
-    // END #if defined(WEBRTC_ANDROID)
+    // END #if defined(WEBRTC_ANDROID_OPENSLES)
 
     // Create the *Linux* implementation of the Audio Device
     //
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     if ((audioLayer == kLinuxPulseAudio) || (audioLayer == kPlatformDefaultAudio))
     {
 #if defined(LINUX_PULSE)
         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "attempting to use the Linux PulseAudio APIs...");
 
         // create *Linux PulseAudio* implementation
         AudioDeviceLinuxPulse* pulseDevice = new AudioDeviceLinuxPulse(Id());
         if (pulseDevice->Init() != -1)
@@ -323,17 +351,17 @@ int32_t AudioDeviceModuleImpl::CreatePla
     if (ptrAudioDevice != NULL)
     {
         // Create the Linux implementation of the Device Utility.
         // This class is independent of the selected audio layer
         // for Linux.
         //
         ptrAudioDeviceUtility = new AudioDeviceUtilityLinux(Id());
     }
-#endif  // #if defined(WEBRTC_LINUX)
+#endif  // #if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 
     // Create the *iPhone* implementation of the Audio Device
     //
 #if defined(WEBRTC_IOS)
     if (audioLayer == kPlatformDefaultAudio)
     {
         // Create *iPhone Audio* implementation
         ptrAudioDevice = new AudioDeviceIPhone(Id());
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
@@ -41,17 +41,17 @@ bool AudioDeviceUtility::StringCompare(
     const char* str1 , const char* str2,
     const uint32_t length)
 {
 	return ((_strnicmp(str1, str2, length) == 0) ? true : false);
 }
 
 }  // namespace webrtc
 
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 
 // ============================================================================
 //                                 Linux & Mac
 // ============================================================================
 
 #include <stdio.h>      // getchar
 #include <string.h>     // strncasecmp
 #include <sys/time.h>   // gettimeofday
@@ -104,9 +104,9 @@ uint32_t AudioDeviceUtility::GetTimeInMS
 bool AudioDeviceUtility::StringCompare(
     const char* str1 , const char* str2, const uint32_t length)
 {
     return (strncasecmp(str1, str2, length) == 0)?true: false;
 }
 
 }  // namespace webrtc
 
-#endif  // defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#endif  // defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
--- a/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.cc
@@ -1327,17 +1327,17 @@ int32_t AudioDeviceIPhone::InitPlayOrRec
                  playoutDesc.mSampleRate);
 
     playoutDesc.mSampleRate = sampleRate;
 
     // Store the sampling frequency to use towards the Audio Device Buffer
     // todo: Add 48 kHz (increase buffer sizes). Other fs?
     if ((playoutDesc.mSampleRate > 44090.0)
         && (playoutDesc.mSampleRate < 44110.0)) {
-        _adbSampFreq = 44000;
+        _adbSampFreq = 44100;
     } else if ((playoutDesc.mSampleRate > 15990.0)
                && (playoutDesc.mSampleRate < 16010.0)) {
         _adbSampFreq = 16000;
     } else if ((playoutDesc.mSampleRate > 7990.0)
                && (playoutDesc.mSampleRate < 8010.0)) {
         _adbSampFreq = 8000;
     } else {
         _adbSampFreq = 0;
--- a/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
@@ -14,18 +14,18 @@
 #include <AudioUnit/AudioUnit.h>
 
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 
 namespace webrtc {
 class ThreadWrapper;
 
-const uint32_t N_REC_SAMPLES_PER_SEC = 44000;
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 44000;
+const uint32_t N_REC_SAMPLES_PER_SEC = 44100;
+const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100;
 
 const uint32_t N_REC_CHANNELS = 1;  // default is mono recording
 const uint32_t N_PLAY_CHANNELS = 1;  // default is mono playout
 const uint32_t N_DEVICE_CHANNELS = 8;
 
 const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100);
 const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -14,16 +14,23 @@
 #include "webrtc/modules/audio_device/audio_device_utility.h"
 #include "webrtc/modules/audio_device/linux/audio_device_alsa_linux.h"
 
 #include "webrtc/system_wrappers/interface/event_wrapper.h"
 #include "webrtc/system_wrappers/interface/sleep.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+#include "Latency.h"
+
+#define LOG_FIRST_CAPTURE(x) LogTime(AsyncLatencyLogger::AudioCaptureBase, \
+                                     reinterpret_cast<uint64_t>(x), 0)
+#define LOG_CAPTURE_FRAMES(x, frames) LogLatency(AsyncLatencyLogger::AudioCapture, \
+                                                 reinterpret_cast<uint64_t>(x), frames)
+
 webrtc_adm_linux_alsa::AlsaSymbolTable AlsaSymbolTable;
 
 // Accesses ALSA functions through our late-binding symbol table instead of
 // directly. This way we don't have to link to libasound, which means our binary
 // will work on systems that don't have it.
 #define LATE(sym) \
   LATESYM_GET(webrtc_adm_linux_alsa::AlsaSymbolTable, &AlsaSymbolTable, sym)
 
@@ -90,16 +97,17 @@ AudioDeviceLinuxALSA::AudioDeviceLinuxAL
     _playChannels(ALSA_PLAYOUT_CH),
     _recordingBuffer(NULL),
     _playoutBuffer(NULL),
     _recordingFramesLeft(0),
     _playoutFramesLeft(0),
     _playBufType(AudioDeviceModule::kFixedBufferSize),
     _initialized(false),
     _recording(false),
+    _firstRecord(true),
     _playing(false),
     _recIsInitialized(false),
     _playIsInitialized(false),
     _AGC(false),
     _recordingDelay(0),
     _playoutDelay(0),
     _playWarning(0),
     _playError(0),
@@ -176,23 +184,25 @@ int32_t AudioDeviceLinuxALSA::Init()
         return -1;
     }
 
     if (_initialized)
     {
         return 0;
     }
 
+#ifdef USE_X11
     //Get X display handle for typing detection
     _XDisplay = XOpenDisplay(NULL);
     if (!_XDisplay)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
           "  failed to open X display, typing detection will not work");
     }
+#endif
 
     _playWarning = 0;
     _playError = 0;
     _recWarning = 0;
     _recError = 0;
 
     _initialized = true;
 
@@ -250,21 +260,23 @@ int32_t AudioDeviceLinuxALSA::Terminate(
         {
             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
                          "  failed to close down the play audio thread");
         }
 
         _critSect.Enter();
     }
 
+#ifdef USE_X11
     if (_XDisplay)
     {
       XCloseDisplay(_XDisplay);
       _XDisplay = NULL;
     }
+#endif
 
     _initialized = false;
     _outputDeviceIsSpecified = false;
     _inputDeviceIsSpecified = false;
 
     return 0;
 }
 
@@ -980,17 +992,18 @@ int32_t AudioDeviceLinuxALSA::RecordingD
 
     memset(name, 0, kAdmMaxDeviceNameSize);
 
     if (guid != NULL)
     {
         memset(guid, 0, kAdmMaxGuidSize);
     }
 
-    return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
+    return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize,
+                          guid, kAdmMaxGuidSize);
 }
 
 int16_t AudioDeviceLinuxALSA::RecordingDevices()
 {
 
     return (int16_t)GetDevicesInfo(0, false);
 }
 
@@ -1442,16 +1455,17 @@ int32_t AudioDeviceLinuxALSA::StartRecor
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "   failed to alloc recording buffer");
         _recording = false;
         return -1;
     }
     // RECORDING
     const char* threadName = "webrtc_audio_module_capture_thread";
+    _firstRecord = true;
     _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc,
                                                 this,
                                                 kRealtimePriority,
                                                 threadName);
     if (_ptrThreadRec == NULL)
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "  failed to create the rec audio thread");
@@ -1628,40 +1642,41 @@ int32_t AudioDeviceLinuxALSA::StartPlayo
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "    failed to create the play audio thread");
         _playing = false;
         delete [] _playoutBuffer;
         _playoutBuffer = NULL;
         return -1;
     }
 
+    int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
+    if (errVal < 0)
+    {
+        WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
+                     "     playout snd_pcm_prepare failed (%s)\n",
+                     LATE(snd_strerror)(errVal));
+        // just log error
+        // if snd_pcm_open fails will return -1
+    }
+
+
     unsigned int threadID(0);
     if (!_ptrThreadPlay->Start(threadID))
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "  failed to start the play audio thread");
         _playing = false;
         delete _ptrThreadPlay;
         _ptrThreadPlay = NULL;
         delete [] _playoutBuffer;
         _playoutBuffer = NULL;
         return -1;
     }
     _playThreadID = threadID;
 
-    int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
-    if (errVal < 0)
-    {
-        WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
-                     "     playout snd_pcm_prepare failed (%s)\n",
-                     LATE(snd_strerror)(errVal));
-        // just log error
-        // if snd_pcm_open fails will return -1
-    }
-
     return 0;
 }
 
 int32_t AudioDeviceLinuxALSA::StopPlayout()
 {
 
     {
         CriticalSectionScoped lock(&_critSect);
@@ -1831,17 +1846,19 @@ void AudioDeviceLinuxALSA::ClearRecordin
 //                                 Private Methods
 // ============================================================================
 
 int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
     const int32_t function,
     const bool playback,
     const int32_t enumDeviceNo,
     char* enumDeviceName,
-    const int32_t ednLen) const
+    const int32_t ednLen,
+    char* enumDeviceId,
+    const int32_t ediLen) const
 {
 
     // Device enumeration based on libjingle implementation
     // by Tristan Schmelcher at Google Inc.
 
     const char *type = playback ? "Output" : "Input";
     // dmix and dsnoop are only for playback and capture, respectively, but ALSA
     // stupidly includes them in both lists.
@@ -1870,16 +1887,18 @@ int32_t AudioDeviceLinuxALSA::GetDevices
             return -1;
         }
 
         enumCount++; // default is 0
         if ((function == FUNC_GET_DEVICE_NAME ||
             function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) && enumDeviceNo == 0)
         {
             strcpy(enumDeviceName, "default");
+            if (enumDeviceId)
+                memset(enumDeviceId, 0, ediLen);
 
             err = LATE(snd_device_name_free_hint)(hints);
             if (err != 0)
             {
                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                              "GetDevicesInfo - device name free hint error: %s",
                              LATE(snd_strerror)(err));
             }
@@ -1932,28 +1951,38 @@ int32_t AudioDeviceLinuxALSA::GetDevices
 
                 }
                 if ((FUNC_GET_DEVICE_NAME == function) &&
                     (enumDeviceNo == enumCount))
                 {
                     // We have found the enum device, copy the name to buffer.
                     strncpy(enumDeviceName, desc, ednLen);
                     enumDeviceName[ednLen-1] = '\0';
+                    if (enumDeviceId)
+                    {
+                        strncpy(enumDeviceId, name, ediLen);
+                        enumDeviceId[ediLen-1] = '\0';
+                    }
                     keepSearching = false;
                     // Replace '\n' with '-'.
                     char * pret = strchr(enumDeviceName, '\n'/*0xa*/); //LF
                     if (pret)
                         *pret = '-';
                 }
                 if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
                     (enumDeviceNo == enumCount))
                 {
                     // We have found the enum device, copy the name to buffer.
                     strncpy(enumDeviceName, name, ednLen);
                     enumDeviceName[ednLen-1] = '\0';
+                    if (enumDeviceId)
+                    {
+                        strncpy(enumDeviceId, name, ediLen);
+                        enumDeviceId[ediLen-1] = '\0';
+                    }
                     keepSearching = false;
                 }
 
                 if (keepSearching)
                     ++enumCount;
 
                 if (desc != name)
                     free(desc);
@@ -1968,17 +1997,17 @@ int32_t AudioDeviceLinuxALSA::GetDevices
         err = LATE(snd_device_name_free_hint)(hints);
         if (err != 0)
         {
             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                          "GetDevicesInfo - device name free hint error: %s",
                          LATE(snd_strerror)(err));
             // Continue and return true anyway, since we did get the whole list.
         }
-    }
+      }
 
     if (FUNC_GET_NUM_OF_DEVICE == function)
     {
         if (enumCount == 1) // only default?
             enumCount = 0;
         return enumCount; // Normal return point for function 0
     }
 
@@ -2253,16 +2282,21 @@ bool AudioDeviceLinuxALSA::RecThreadProc
         memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size],
                buffer, size);
         _recordingFramesLeft -= frames;
 
         if (!_recordingFramesLeft)
         { // buf is full
             _recordingFramesLeft = _recordingFramesIn10MS;
 
+            if (_firstRecord) {
+              LOG_FIRST_CAPTURE(this);
+              _firstRecord = false;
+            }
+            LOG_CAPTURE_FRAMES(this, _recordingFramesIn10MS);
             // store the recorded buffer (no action will be taken if the
             // #recorded samples is not a full buffer)
             _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
                                                _recordingFramesIn10MS);
 
             uint32_t currentMicLevel = 0;
             uint32_t newMicLevel = 0;
 
@@ -2337,17 +2371,17 @@ bool AudioDeviceLinuxALSA::RecThreadProc
     }
 
     UnLock();
     return true;
 }
 
 
 bool AudioDeviceLinuxALSA::KeyPressed() const{
-
+#ifdef USE_X11
   char szKey[32];
   unsigned int i = 0;
   char state = 0;
 
   if (!_XDisplay)
     return false;
 
   // Check key map status
@@ -2355,10 +2389,13 @@ bool AudioDeviceLinuxALSA::KeyPressed() 
 
   // A bit change in keymap means a key is pressed
   for (i = 0; i < sizeof(szKey); i++)
     state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
 
   // Save old state
   memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
   return (state != 0);
+#else
+  return false;
+#endif
 }
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
@@ -11,17 +11,19 @@
 #ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H
 #define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H
 
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 #include "webrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 
 
+#ifdef USE_X11
 #include <X11/Xlib.h>
+#endif
 #include <alsa/asoundlib.h>
 #include <sys/ioctl.h>
 #include <sys/soundcard.h>
 
 
 namespace webrtc
 {
 class EventWrapper;
@@ -162,17 +164,19 @@ public:
 public:
     virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) OVERRIDE;
 
 private:
     int32_t GetDevicesInfo(const int32_t function,
                            const bool playback,
                            const int32_t enumDeviceNo = 0,
                            char* enumDeviceName = NULL,
-                           const int32_t ednLen = 0) const;
+                           const int32_t ednLen = 0,
+                           char* enumDeviceID = NULL,
+                           const int32_t ediLen = 0) const;
     int32_t ErrorRecovery(int32_t error, snd_pcm_t* deviceHandle);
 
 private:
     bool KeyPressed() const;
 
 private:
     void Lock() EXCLUSIVE_LOCK_FUNCTION(_critSect) { _critSect.Enter(); };
     void UnLock() UNLOCK_FUNCTION(_critSect) { _critSect.Leave(); };
@@ -228,16 +232,17 @@ private:
     uint32_t _recordingFramesLeft;
     uint32_t _playoutFramesLeft;
 
     AudioDeviceModule::BufferType _playBufType;
 
 private:
     bool _initialized;
     bool _recording;
+    bool _firstRecord;
     bool _playing;
     bool _recIsInitialized;
     bool _playIsInitialized;
     bool _AGC;
 
     snd_pcm_sframes_t _recordingDelay;
     snd_pcm_sframes_t _playoutDelay;
 
@@ -245,14 +250,16 @@ private:
     uint16_t _playError;
     uint16_t _recWarning;
     uint16_t _recError;
 
     uint16_t _playBufDelay;                 // playback delay
     uint16_t _playBufDelayFixed;            // fixed playback delay
 
     char _oldKeyState[32];
+#ifdef USE_X11
     Display* _XDisplay;
+#endif
 };
 
 }
 
 #endif  // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_ALSA_LINUX_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
@@ -197,23 +197,25 @@ int32_t AudioDeviceLinuxPulse::Init()
         return -1;
     }
 
     _playWarning = 0;
     _playError = 0;
     _recWarning = 0;
     _recError = 0;
 
+#ifdef USE_X11
     //Get X display handle for typing detection
     _XDisplay = XOpenDisplay(NULL);
     if (!_XDisplay)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
           "  failed to open X display, typing detection will not work");
     }
+#endif
 
     // RECORDING
     const char* threadName = "webrtc_audio_module_rec_thread";
     _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this,
                                                 kRealtimePriority, threadName);
     if (_ptrThreadRec == NULL)
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
@@ -318,21 +320,23 @@ int32_t AudioDeviceLinuxPulse::Terminate
     // Terminate PulseAudio
     if (TerminatePulseAudio() < 0)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "  failed to terminate PulseAudio");
         return -1;
     }
 
+#ifdef USE_X11
     if (_XDisplay)
     {
       XCloseDisplay(_XDisplay);
       _XDisplay = NULL;
     }
+#endif
 
     _initialized = false;
     _outputDeviceIsSpecified = false;
     _inputDeviceIsSpecified = false;
 
     return 0;
 }
 
@@ -3076,17 +3080,17 @@ bool AudioDeviceLinuxPulse::RecThreadPro
 
     }  // _recording
 
     UnLock();
     return true;
 }
 
 bool AudioDeviceLinuxPulse::KeyPressed() const{
-
+#ifdef USE_X11
   char szKey[32];
   unsigned int i = 0;
   char state = 0;
 
   if (!_XDisplay)
     return false;
 
   // Check key map status
@@ -3094,10 +3098,13 @@ bool AudioDeviceLinuxPulse::KeyPressed()
 
   // A bit change in keymap means a key is pressed
   for (i = 0; i < sizeof(szKey); i++)
     state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
 
   // Save old state
   memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
   return (state != 0);
+#else
+  return false;
+#endif
 }
 }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
@@ -10,17 +10,19 @@
 
 #ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H
 #define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H
 
 #include "webrtc/modules/audio_device/audio_device_generic.h"
 #include "webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 
+#ifdef USE_X11
 #include <X11/Xlib.h>
+#endif
 #include <pulse/pulseaudio.h>
 
 // We define this flag if it's missing from our headers, because we want to be
 // able to compile against old headers but still use PA_STREAM_ADJUST_LATENCY
 // if run against a recent version of the library.
 #ifndef PA_STREAM_ADJUST_LATENCY
 #define PA_STREAM_ADJUST_LATENCY 0x2000U
 #endif
@@ -370,14 +372,16 @@ private:
     pa_stream* _recStream;
     pa_stream* _playStream;
     uint32_t _recStreamFlags;
     uint32_t _playStreamFlags;
     pa_buffer_attr _playBufferAttr;
     pa_buffer_attr _recBufferAttr;
 
     char _oldKeyState[32];
+#ifdef USE_X11
     Display* _XDisplay;
+#endif
 };
 
 }
 
 #endif  // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_PULSE_LINUX_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
@@ -22,68 +22,68 @@
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h"
 
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 #include <dlfcn.h>
 #endif
 
 // TODO(grunell): Either put inside webrtc namespace or use webrtc:: instead.
 using namespace webrtc;
 
 namespace webrtc_adm_linux {
 
 inline static const char *GetDllError() {
-#ifdef WEBRTC_LINUX
-  char *err = dlerror();
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
+  const char *err = dlerror();
   if (err) {
     return err;
   } else {
     return "No error";
   }
 #else
 #error Not implemented
 #endif
 }
 
 DllHandle InternalLoadDll(const char dll_name[]) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   DllHandle handle = dlopen(dll_name, RTLD_NOW);
 #else
 #error Not implemented
 #endif
   if (handle == kInvalidDllHandle) {
     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
                "Can't load %s : %s", dll_name, GetDllError());
   }
   return handle;
 }
 
 void InternalUnloadDll(DllHandle handle) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   if (dlclose(handle) != 0) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "%s", GetDllError());
   }
 #else
 #error Not implemented
 #endif
 }
 
 static bool LoadSymbol(DllHandle handle,
                        const char *symbol_name,
                        void **symbol) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   *symbol = dlsym(handle, symbol_name);
-  char *err = dlerror();
+  const char *err = dlerror();
   if (err) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "Error loading symbol %s : %d", symbol_name, err);
     return false;
   } else if (!*symbol) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "Symbol %s is NULL", symbol_name);
     return false;
@@ -96,17 +96,17 @@ static bool LoadSymbol(DllHandle handle,
 
 // This routine MUST assign SOME value for every symbol, even if that value is
 // NULL, or else some symbols may be left with uninitialized data that the
 // caller may later interpret as a valid address.
 bool InternalLoadSymbols(DllHandle handle,
                          int num_symbols,
                          const char *const symbol_names[],
                          void *symbols[]) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   // Clear any old errors.
   dlerror();
 #endif
   for (int i = 0; i < num_symbols; ++i) {
     if (!LoadSymbol(handle, symbol_names[i], &symbols[i])) {
       return false;
     }
   }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
@@ -37,17 +37,17 @@
 
 // This file provides macros for creating "symbol table" classes to simplify the
 // dynamic loading of symbols from DLLs. Currently the implementation only
 // supports Linux and pure C symbols.
 // See talk/sound/pulseaudiosymboltable.(h|cc) for an example.
 
 namespace webrtc_adm_linux {
 
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 typedef void *DllHandle;
 
 const DllHandle kInvalidDllHandle = NULL;
 #else
 #error Not implemented
 #endif
 
 // These are helpers for use only by the class below.
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
@@ -24,16 +24,20 @@
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h"
 
 namespace webrtc_adm_linux_pulse {
 
+#if defined(__OpenBSD__) || defined(WEBRTC_GONK)
+LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so")
+#else
 LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so.0")
+#endif
 #define X(sym) \
     LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(PulseAudioSymbolTable, sym)
 PULSE_AUDIO_SYMBOLS_LIST
 #undef X
 LATE_BINDING_SYMBOL_TABLE_DEFINE_END(PulseAudioSymbolTable)
 
 }  // namespace webrtc_adm_linux_pulse
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_opensles_android.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_device_opensles.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_opensles_android.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_manager_jni.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/audio_manager_jni.h
@@ -0,0 +1,6 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_manager_jni.h"
+
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/fine_audio_buffer.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/fine_audio_buffer.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/fine_audio_buffer.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/low_latency_event_posix.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/low_latency_event_posix.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/low_latency_event_posix.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_common.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_common.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_common.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_input.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_input.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_input.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_output.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/opensles_output.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/opensles_output.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/single_rw_fifo.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/single_rw_fifo.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_jni_android.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_jni_android.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_jni_android.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_jni_android.h"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.cc
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_utility_android.cc"
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/shared/audio_device_utility_shared.h
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "../android/audio_device_utility_android.h"
--- a/media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
@@ -209,17 +209,17 @@ class AudioDeviceAPITest: public testing
                 kId, AudioDeviceModule::kWindowsCoreAudio)) == NULL);
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kLinuxAlsaAudio)) == NULL);
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kLinuxPulseAudio)) == NULL);
     // Create default implementation instance
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kPlatformDefaultAudio)) != NULL);
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kWindowsWaveAudio)) == NULL);
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kWindowsCoreAudio)) == NULL);
     // create default implementation instance
     EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
                 kId, AudioDeviceModule::kPlatformDefaultAudio)) != NULL);
     audio_device_->AddRef();
@@ -1702,17 +1702,17 @@ TEST_F(AudioDeviceAPITest, CPULoad) {
   EXPECT_EQ(0, load);
 #else
   EXPECT_EQ(-1, audio_device_->CPULoad(&load));
 #endif
 }
 
 // TODO(kjellander): Fix flakiness causing failures on Windows.
 // TODO(phoglund):  Fix flakiness causing failures on Linux.
-#if !defined(_WIN32) && !defined(WEBRTC_LINUX)
+#if !defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_BSD)
 TEST_F(AudioDeviceAPITest, StartAndStopRawOutputFileRecording) {
   // NOTE: this API is better tested in a functional test
   CheckInitialPlayoutStates();
 
   // fail tests
   EXPECT_EQ(-1, audio_device_->StartRawOutputFileRecording(NULL));
 
   // bulk tests
@@ -1771,50 +1771,50 @@ TEST_F(AudioDeviceAPITest, StartAndStopR
       GetFilename("raw_input_not_recording.pcm")));
   EXPECT_EQ(0, audio_device_->StopRawInputFileRecording());
 
   // results after this test:
   //
   // - size of raw_input_not_recording.pcm shall be 0
   // - size of raw_input_not_recording.pcm shall be > 0
 }
-#endif  // !WIN32 && !WEBRTC_LINUX
+#endif  // !WIN32 && !WEBRTC_LINUX && !defined(WEBRTC_BSD)
 
 TEST_F(AudioDeviceAPITest, RecordingSampleRate) {
   uint32_t sampleRate(0);
 
   // bulk tests
   EXPECT_EQ(0, audio_device_->RecordingSampleRate(&sampleRate));
 #if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
   EXPECT_EQ(48000, sampleRate);
 #elif defined(ANDROID)
   TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
 #elif defined(WEBRTC_IOS)
   TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
               (sampleRate == 8000));
 #endif
 
   // @TODO(xians) - add tests for all platforms here...
 }
 
 TEST_F(AudioDeviceAPITest, PlayoutSampleRate) {
   uint32_t sampleRate(0);
 
   // bulk tests
   EXPECT_EQ(0, audio_device_->PlayoutSampleRate(&sampleRate));
 #if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
   EXPECT_EQ(48000, sampleRate);
 #elif defined(ANDROID)
   TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
 #elif defined(WEBRTC_IOS)
   TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
               (sampleRate == 8000));
 #endif
 }
 
 TEST_F(AudioDeviceAPITest, ResetAudioDevice) {
   CheckInitialPlayoutStates();
   CheckInitialRecordingStates();
   EXPECT_EQ(0, audio_device_->SetPlayoutDevice(MACRO_DEFAULT_DEVICE));
--- a/media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
@@ -316,22 +316,16 @@ int32_t AudioTransportImpl::NeedMorePlay
                 const uint8_t nChannelsIn = packet->nChannels;
                 const uint32_t samplesPerSecIn = packet->samplesPerSec;
                 const uint16_t nBytesPerSampleIn =
                     packet->nBytesPerSample;
 
                 int32_t fsInHz(samplesPerSecIn);
                 int32_t fsOutHz(samplesPerSec);
 
-                if (fsInHz == 44100)
-                    fsInHz = 44000;
-
-                if (fsOutHz == 44100)
-                    fsOutHz = 44000;
-
                 if (nChannelsIn == 2 && nBytesPerSampleIn == 4)
                 {
                     // input is stereo => we will resample in stereo
                     ret = _resampler.ResetIfNeeded(fsInHz, fsOutHz,
                                                    kResamplerSynchronousStereo);
                     if (ret == 0)
                     {
                         if (nChannels == 2)
@@ -1231,17 +1225,17 @@ int32_t FuncTestManager::TestAudioTransp
 
         EXPECT_EQ(0, audioDevice->RegisterAudioCallback(_audioTransport));
 
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (samplesPerSec == 48000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile48.c_str()));
-        } else if (samplesPerSec == 44100 || samplesPerSec == 44000) {
+        } else if (samplesPerSec == 44100) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile44.c_str()));
         } else if (samplesPerSec == 16000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile16.c_str()));
         } else if (samplesPerSec == 8000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile8.c_str()));
@@ -1464,17 +1458,17 @@ int32_t FuncTestManager::TestSpeakerVolu
     EXPECT_EQ(0, audioDevice->PlayoutIsAvailable(&available));
     if (available)
     {
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (48000 == samplesPerSec) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile48.c_str()));
-        } else if (44100 == samplesPerSec || samplesPerSec == 44000) {
+        } else if (44100 == samplesPerSec) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile44.c_str()));
         } else if (samplesPerSec == 16000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile16.c_str()));
         } else if (samplesPerSec == 8000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile8.c_str()));
@@ -1565,17 +1559,17 @@ int32_t FuncTestManager::TestSpeakerMute
     EXPECT_EQ(0, audioDevice->RegisterAudioCallback(_audioTransport));
     EXPECT_EQ(0, audioDevice->PlayoutIsAvailable(&available));
     if (available)
     {
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (48000 == samplesPerSec)
             _audioTransport->SetFilePlayout(true, _playoutFile48.c_str());
-        else if (44100 == samplesPerSec || 44000 == samplesPerSec)
+        else if (44100 == samplesPerSec)
             _audioTransport->SetFilePlayout(true, _playoutFile44.c_str());
         else
         {
             TEST_LOG("\nERROR: Sample rate (%d) is not supported!\n \n",
                      samplesPerSec);
             return -1;
         }
         EXPECT_EQ(0, audioDevice->StartPlayout());
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core.c
@@ -1705,8 +1705,9 @@ static void TimeToFrequency(float time_d
   freq_data[1][PART_LEN] = 0;
   freq_data[0][0] = time_data[0];
   freq_data[0][PART_LEN] = time_data[1];
   for (i = 1; i < PART_LEN; i++) {
     freq_data[0][i] = time_data[2 * i];
     freq_data[1][i] = time_data[2 * i + 1];
   }
 }
+
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_sse2.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/aec/aec_core_sse2.c
@@ -424,8 +424,9 @@ static void OverdriveAndSuppressSSE2(Aec
 }
 
 void WebRtcAec_InitAec_SSE2(void) {
   WebRtcAec_FilterFar = FilterFarSSE2;
   WebRtcAec_ScaleErrorSignal = ScaleErrorSignalSSE2;
   WebRtcAec_FilterAdaptation = FilterAdaptationSSE2;
   WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressSSE2;
 }
+
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
@@ -169,16 +169,17 @@
         {
           'target_name': 'audio_processing_sse2',
           'type': 'static_library',
           'sources': [
             'aec/aec_core_sse2.c',
             'aec/aec_rdft_sse2.c',
           ],
           'cflags': ['-msse2',],
+          'cflags_mozilla': [ '-msse2', ],
           'xcode_settings': {
             'OTHER_CFLAGS': ['-msse2',],
           },
         },
       ],
     }],
     ['(target_arch=="arm" and arm_version==7) or target_arch=="armv7"', {
       'targets': [{
@@ -192,24 +193,27 @@
           'aecm/aecm_core_neon.c',
           'ns/nsx_core_neon.c',
         ],
         'conditions': [
           ['OS=="android" or OS=="ios"', {
             'dependencies': [
               '<(gen_core_neon_offsets_gyp):*',
             ],
-            'sources': [
+	    #
+	    # We disable the ASM source, because our gyp->Makefile translator
+	    # does not support the build steps to get the asm offsets.
+            'sources!': [
               'aecm/aecm_core_neon.S',
               'ns/nsx_core_neon.S',
             ],
             'include_dirs': [
               '<(shared_generated_dir)',
             ],
-            'sources!': [
+            'sources': [
               'aecm/aecm_core_neon.c',
               'ns/nsx_core_neon.c',
             ],
             'includes!': ['../../build/arm_neon.gypi',],
           }],
         ],
       }],
     }],
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -67,17 +67,17 @@ EchoCancellationImpl::EchoCancellationIm
     drift_compensation_enabled_(false),
     metrics_enabled_(false),
     suppression_level_(kModerateSuppression),
     device_sample_rate_hz_(48000),
     stream_drift_samples_(0),
     was_stream_drift_set_(false),
     stream_has_echo_(false),
     delay_logging_enabled_(false),
-    delay_correction_enabled_(false) {}
+    delay_correction_enabled_(true) {} // default to long AEC tail in Mozilla
 
 EchoCancellationImpl::~EchoCancellationImpl() {}
 
 int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
   if (!is_component_enabled()) {
     return apm_->kNoError;
   }
 
@@ -334,20 +334,22 @@ int EchoCancellationImpl::Initialize() {
   int err = ProcessingComponent::Initialize();
   if (err != apm_->kNoError || !is_component_enabled()) {
     return err;
   }
 
   return apm_->kNoError;
 }
 
+#if 0
 void EchoCancellationImpl::SetExtraOptions(const Config& config) {
   delay_correction_enabled_ = config.Get<DelayCorrection>().enabled;
   Configure();
 }
+#endif
 
 void* EchoCancellationImpl::CreateHandle() const {
   Handle* handle = NULL;
   if (WebRtcAec_Create(&handle) != apm_->kNoError) {
     handle = NULL;
   } else {
     assert(handle != NULL);
   }
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/echo_cancellation_impl.h
@@ -29,17 +29,17 @@ class EchoCancellationImpl : public Echo
 
   // EchoCancellation implementation.
   virtual bool is_enabled() const OVERRIDE;
   virtual int device_sample_rate_hz() const OVERRIDE;
   virtual int stream_drift_samples() const OVERRIDE;
 
   // ProcessingComponent implementation.
   virtual int Initialize() OVERRIDE;
-  virtual void SetExtraOptions(const Config& config) OVERRIDE;
+  // virtual void SetExtraOptions(const Config& config) OVERRIDE;
 
  private:
   // EchoCancellation implementation.
   virtual int Enable(bool enable) OVERRIDE;
   virtual int enable_drift_compensation(bool enable) OVERRIDE;
   virtual bool is_drift_compensation_enabled() const OVERRIDE;
   virtual int set_device_sample_rate_hz(int rate) OVERRIDE;
   virtual void set_stream_drift_samples(int drift) OVERRIDE;
--- a/media/webrtc/trunk/webrtc/modules/interface/module_common_types.h
+++ b/media/webrtc/trunk/webrtc/modules/interface/module_common_types.h
@@ -91,24 +91,32 @@ struct RTPVideoHeaderVP8 {
   int8_t temporalIdx;         // Temporal layer index, or kNoTemporalIdx.
   bool layerSync;             // This frame is a layer sync frame.
                               // Disabled if temporalIdx == kNoTemporalIdx.
   int keyIdx;                 // 5 bits; kNoKeyIdx means not used.
   int partitionId;            // VP8 partition ID
   bool beginningOfPartition;  // True if this packet is the first
                               // in a VP8 partition. Otherwise false
 };
+
+struct RTPVideoHeaderH264 {
+  uint8_t nalu_header;
+  bool    single_nalu;
+};
+
 union RTPVideoTypeHeader {
   RTPVideoHeaderVP8 VP8;
+  RTPVideoHeaderH264 H264;
 };
 
 enum RtpVideoCodecTypes {
   kRtpVideoNone,
   kRtpVideoGeneric,
-  kRtpVideoVp8
+  kRtpVideoVp8,
+  kRtpVideoH264
 };
 struct RTPVideoHeader {
   uint16_t width;  // size
   uint16_t height;
 
   bool isFirstPacket;    // first packet in frame
   uint8_t simulcastIdx;  // Index if the simulcast encoder creating
                          // this frame, 0 if not using simulcast.
@@ -892,16 +900,21 @@ inline bool IsNewerSequenceNumber(uint16
          static_cast<uint16_t>(sequence_number - prev_sequence_number) < 0x8000;
 }
 
 inline bool IsNewerTimestamp(uint32_t timestamp, uint32_t prev_timestamp) {
   return timestamp != prev_timestamp &&
          static_cast<uint32_t>(timestamp - prev_timestamp) < 0x80000000;
 }
 
+inline bool IsNewerOrSameTimestamp(uint32_t timestamp, uint32_t prev_timestamp) {
+  return timestamp == prev_timestamp ||
+      static_cast<uint32_t>(timestamp - prev_timestamp) < 0x80000000;
+}
+
 inline uint16_t LatestSequenceNumber(uint16_t sequence_number1,
                                      uint16_t sequence_number2) {
   return IsNewerSequenceNumber(sequence_number1, sequence_number2)
              ? sequence_number1
              : sequence_number2;
 }
 
 inline uint32_t LatestTimestamp(uint32_t timestamp1, uint32_t timestamp2) {
--- a/media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
@@ -605,23 +605,23 @@ int32_t ModuleFileUtility::ReadWavHeader
     // Calculate the number of bytes that 10 ms of audio data correspond to.
     if(_wavFormatObj.formatTag == kWaveFormatPcm)
     {
         // TODO (hellner): integer division for 22050 and 11025 would yield
         //                 the same result as the else statement. Remove those
         //                 special cases?
         if(_wavFormatObj.nSamplesPerSec == 44100)
         {
-            _readSizeBytes = 440 * _wavFormatObj.nChannels *
+            _readSizeBytes = 441 * _wavFormatObj.nChannels *
                 (_wavFormatObj.nBitsPerSample / 8);
         } else if(_wavFormatObj.nSamplesPerSec == 22050) {
-            _readSizeBytes = 220 * _wavFormatObj.nChannels *
+            _readSizeBytes = 220 * _wavFormatObj.nChannels * // XXX inexact!
                 (_wavFormatObj.nBitsPerSample / 8);
         } else if(_wavFormatObj.nSamplesPerSec == 11025) {
-            _readSizeBytes = 110 * _wavFormatObj.nChannels *
+            _readSizeBytes = 110 * _wavFormatObj.nChannels * // XXX inexact!
                 (_wavFormatObj.nBitsPerSample / 8);
         } else {
             _readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
               _wavFormatObj.nChannels * (_wavFormatObj.nBitsPerSample / 8);
         }
 
     } else {
         _readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
@@ -673,32 +673,32 @@ int32_t ModuleFileUtility::InitWavCodec(
             _codecId = kCodecL16_32Khz;
         }
         // Set the packet size for "odd" sampling frequencies so that it
         // properly corresponds to _readSizeBytes.
         else if(samplesPerSec == 11025)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 110;
-            codec_info_.plfreq = 11000;
+            codec_info_.pacsize = 110; // XXX inexact!
+            codec_info_.plfreq = 11000; // XXX inexact!
         }
         else if(samplesPerSec == 22050)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 220;
-            codec_info_.plfreq = 22000;
+            codec_info_.pacsize = 220; // XXX inexact!
+            codec_info_.plfreq = 22000; // XXX inexact!
         }
         else if(samplesPerSec == 44100)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 440;
-            codec_info_.plfreq = 44000;
+            codec_info_.pacsize = 441;
+            codec_info_.plfreq = 44100;
         }
         else if(samplesPerSec == 48000)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
             codec_info_.pacsize = 480;
             codec_info_.plfreq = 48000;
         }
@@ -1121,18 +1121,16 @@ int32_t ModuleFileUtility::WriteWavHeade
     const uint32_t freq,
     const uint32_t bytesPerSample,
     const uint32_t channels,
     const uint32_t format,
     const uint32_t lengthInBytes)
 {
 
     // Frame size in bytes for 10 ms of audio.
-    // TODO (hellner): 44.1 kHz has 440 samples frame size. Doesn't seem to
-    //                 be taken into consideration here!
     int32_t frameSize = (freq / 100) * bytesPerSample * channels;
 
     // Calculate the number of full frames that the wave file contain.
     const int32_t dataLengthInBytes = frameSize *
         (lengthInBytes / frameSize);
 
     int8_t tmpStr[4];
     int8_t tmpChar;
--- a/media/webrtc/trunk/webrtc/modules/modules.gyp
+++ b/media/webrtc/trunk/webrtc/modules/modules.gyp
@@ -6,20 +6,16 @@
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
   'includes': [
     '../build/common.gypi',
     'audio_coding/codecs/cng/cng.gypi',
     'audio_coding/codecs/g711/g711.gypi',
-    'audio_coding/codecs/g722/g722.gypi',
-    'audio_coding/codecs/ilbc/ilbc.gypi',
-    'audio_coding/codecs/isac/main/source/isac.gypi',
-    'audio_coding/codecs/isac/fix/source/isacfix.gypi',
     'audio_coding/codecs/pcm16b/pcm16b.gypi',
     'audio_coding/main/source/audio_coding_module.gypi',
     'audio_coding/neteq/neteq.gypi',
     'audio_coding/neteq4/neteq.gypi',
     'audio_conference_mixer/source/audio_conference_mixer.gypi',
     'audio_device/audio_device.gypi',
     'audio_processing/audio_processing.gypi',
     'bitrate_controller/bitrate_controller.gypi',
@@ -31,30 +27,44 @@
     'utility/source/utility.gypi',
     'video_coding/codecs/i420/main/source/i420.gypi',
     'video_coding/main/source/video_coding.gypi',
     'video_capture/video_capture.gypi',
     'video_processing/main/source/video_processing.gypi',
     'video_render/video_render.gypi',
   ],
   'conditions': [
+    ['include_g722==1', {
+      'includes': ['audio_coding/codecs/g722/g722.gypi',],
+    }],
+    ['include_ilbc==1', {
+      'includes': ['audio_coding/codecs/ilbc/ilbc.gypi',],
+    }],
+    ['include_isac==1', {
+      'includes': ['audio_coding/codecs/isac/main/source/isac.gypi',
+                   'audio_coding/codecs/isac/fix/source/isacfix.gypi',],
+    }],
     ['include_opus==1', {
       'includes': ['audio_coding/codecs/opus/opus.gypi',],
     }],
     ['include_tests==1', {
       'includes': [
-        'audio_coding/codecs/isac/isac_test.gypi',
-        'audio_coding/codecs/isac/isacfix_test.gypi',
         'audio_processing/audio_processing_tests.gypi',
         'rtp_rtcp/test/testFec/test_fec.gypi',
         'video_coding/main/source/video_coding_test.gypi',
         'video_coding/codecs/test/video_codecs_test_framework.gypi',
         'video_coding/codecs/test_framework/test_framework.gypi',
         'video_coding/codecs/tools/video_codecs_tools.gypi',
       ], # includes
+      'conditions': [
+        ['include_isac==1', {
+          'includes': ['audio_coding/codecs/isac/isac_test.gypi',
+                       'audio_coding/codecs/isac/isacfix_test.gypi',],
+        }],
+      ],
       'variables': {
         'conditions': [
           # Desktop capturer is supported only on Windows, OSX and Linux.
           ['OS=="win" or OS=="mac" or OS=="linux"', {
             'desktop_capture_supported%': 1,
           }, {
             'desktop_capture_supported%': 0,
           }],
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h
@@ -436,16 +436,27 @@ class RtpRtcp : public Module {
     /*
     *   Reset RoundTripTime statistics
     *
     *   return -1 on failure else 0
     */
     virtual int32_t ResetRTT(const uint32_t remoteSSRC)= 0 ;
 
     /*
+     *   Get time of last rr, as well as packets received remotely
+     *   (derived from rr report + cached sender-side info).
+     *
+     *   return -1 on failure else 0
+     */
+    virtual int32_t GetReportBlockInfo(const uint32_t remote_ssrc,
+                                       uint32_t* ntp_high,
+                                       uint32_t* ntp_low,
+                                       uint32_t* packets_received,
+                                       uint64_t* octets_received) const = 0;
+    /*
     *   Force a send of a RTCP packet
     *   normal SR and RR are triggered via the process function
     *
     *   return -1 on failure else 0
     */
     virtual int32_t SendRTCP(
         uint32_t rtcpPacketType = kRtcpReport) = 0;
 
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
@@ -223,16 +223,37 @@ bool RTCPReceiver::GetAndResetXrRrRtt(ui
   if (xr_rr_rtt_ms_ == 0) {
     return false;
   }
   *rtt_ms = xr_rr_rtt_ms_;
   xr_rr_rtt_ms_ = 0;
   return true;
 }
 
+int32_t RTCPReceiver::GetReportBlockInfo(uint32_t remoteSSRC,
+                                         uint32_t* NTPHigh,
+                                         uint32_t* NTPLow,
+                                         uint32_t* PacketsReceived,
+                                         uint64_t* OctetsReceived) const
+{
+  CriticalSectionScoped lock(_criticalSectionRTCPReceiver);
+
+  RTCPReportBlockInformation* reportBlock =
+      GetReportBlockInformation(remoteSSRC);
+
+  if (reportBlock == NULL) {
+    return -1;
+  }
+  *NTPHigh = reportBlock->lastReceivedRRNTPsecs;
+  *NTPLow = reportBlock->lastReceivedRRNTPfrac;
+  *PacketsReceived = reportBlock->remotePacketsReceived;
+  *OctetsReceived = reportBlock->remoteOctetsReceived;
+  return 0;
+}
+
 int32_t
 RTCPReceiver::NTP(uint32_t *ReceivedNTPsecs,
                   uint32_t *ReceivedNTPfrac,
                   uint32_t *RTCPArrivalTimeSecs,
                   uint32_t *RTCPArrivalTimeFrac,
                   uint32_t *rtcp_timestamp) const
 {
     CriticalSectionScoped lock(_criticalSectionRTCPReceiver);
@@ -500,18 +521,21 @@ void RTCPReceiver::HandleReportBlock(
       registered_ssrcs_.end()) {
     // This block is not for us ignore it.
     return;
   }
 
   // To avoid problem with acquiring _criticalSectionRTCPSender while holding
   // _criticalSectionRTCPReceiver.
   _criticalSectionRTCPReceiver->Leave();
-  uint32_t sendTimeMS =
-      _rtpRtcp.SendTimeOfSendReport(rtcpPacket.ReportBlockItem.LastSR);
+  uint32_t sendTimeMS = 0;
+  uint32_t sentPackets = 0;
+  uint64_t sentOctets = 0;
+  _rtpRtcp.GetSendReportMetadata(rtcpPacket.ReportBlockItem.LastSR,
+                                 &sendTimeMS, &sentPackets, &sentOctets);
   _criticalSectionRTCPReceiver->Enter();
 
   RTCPReportBlockInformation* reportBlock =
       CreateReportBlockInformation(remoteSSRC);
   if (reportBlock == NULL) {
     WEBRTC_TRACE(kTraceError, kTraceRtpRtcp, _id,
                  "\tfailed to CreateReportBlockInformation(%u)", remoteSSRC);
     return;
@@ -519,16 +543,22 @@ void RTCPReceiver::HandleReportBlock(
 
   _lastReceivedRrMs = _clock->TimeInMilliseconds();
   const RTCPPacketReportBlockItem& rb = rtcpPacket.ReportBlockItem;
   reportBlock->remoteReceiveBlock.remoteSSRC = remoteSSRC;
   reportBlock->remoteReceiveBlock.sourceSSRC = rb.SSRC;
   reportBlock->remoteReceiveBlock.fractionLost = rb.FractionLost;
   reportBlock->remoteReceiveBlock.cumulativeLost =
       rb.CumulativeNumOfPacketsLost;
+  if (sentPackets > rb.CumulativeNumOfPacketsLost) {
+    uint32_t packetsReceived = sentPackets - rb.CumulativeNumOfPacketsLost;
+    reportBlock->remotePacketsReceived = packetsReceived;
+    reportBlock->remoteOctetsReceived = (sentOctets / sentPackets) *
+                                        packetsReceived;
+  }
   if (rb.ExtendedHighestSequenceNumber >
       reportBlock->remoteReceiveBlock.extendedHighSeqNum) {
     // We have successfully delivered new RTP packets to the remote side after
     // the last RR was sent from the remote side.
     _lastIncreasedSequenceNumberMs = _lastReceivedRrMs;
   }
   reportBlock->remoteReceiveBlock.extendedHighSeqNum =
       rb.ExtendedHighestSequenceNumber;
@@ -539,24 +569,25 @@ void RTCPReceiver::HandleReportBlock(
   if (rtcpPacket.ReportBlockItem.Jitter > reportBlock->remoteMaxJitter) {
     reportBlock->remoteMaxJitter = rtcpPacket.ReportBlockItem.Jitter;
   }
 
   uint32_t delaySinceLastSendReport =
       rtcpPacket.ReportBlockItem.DelayLastSR;
 
   // local NTP time when we received this
-  uint32_t lastReceivedRRNTPsecs = 0;
-  uint32_t lastReceivedRRNTPfrac = 0;
+  reportBlock->lastReceivedRRNTPsecs = 0;
+  reportBlock->lastReceivedRRNTPfrac = 0;
 
-  _clock->CurrentNtp(lastReceivedRRNTPsecs, lastReceivedRRNTPfrac);
+  _clock->CurrentNtp(reportBlock->lastReceivedRRNTPsecs,
+                     reportBlock->lastReceivedRRNTPfrac);
 
   // time when we received this in MS
-  uint32_t receiveTimeMS = Clock::NtpToMs(lastReceivedRRNTPsecs,
-                                          lastReceivedRRNTPfrac);
+  uint32_t receiveTimeMS = Clock::NtpToMs(reportBlock->lastReceivedRRNTPsecs,
+                                          reportBlock->lastReceivedRRNTPfrac);
 
   // Estimate RTT
   uint32_t d = (delaySinceLastSendReport & 0x0000ffff) * 1000;
   d /= 65536;
   d += ((delaySinceLastSendReport & 0xffff0000) >> 16) * 1000;
 
   int32_t RTT = 0;
 
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
@@ -75,16 +75,22 @@ public:
     int32_t RTT(uint32_t remoteSSRC,
                 uint16_t* RTT,
                 uint16_t* avgRTT,
                 uint16_t* minRTT,
                 uint16_t* maxRTT) const;
 
     int32_t ResetRTT(const uint32_t remoteSSRC);
 
+    int32_t GetReportBlockInfo(uint32_t remoteSSRC,
+                               uint32_t* NTPHigh,
+                               uint32_t* NTPLow,
+                               uint32_t* PacketsReceived,
+                               uint64_t* OctetsReceived) const;
+
     int32_t SenderInfoReceived(RTCPSenderInfo* senderInfo) const;
 
     bool GetAndResetXrRrRtt(uint16_t* rtt_ms);
 
     // get statistics
     int32_t StatisticsReceived(
         std::vector<RTCPReportBlock>* receiveBlocks) const;
 
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
@@ -98,16 +98,20 @@ RTCPPacketInformation::AddReportInfo(
 {
   this->rtt = report_block_info.RTT;
   report_blocks.push_back(report_block_info.remoteReceiveBlock);
 }
 
 RTCPReportBlockInformation::RTCPReportBlockInformation():
     remoteReceiveBlock(),
     remoteMaxJitter(0),
+    remotePacketsReceived(0),
+    remoteOctetsReceived(0),
+    lastReceivedRRNTPsecs(0),
+    lastReceivedRRNTPfrac(0),
     RTT(0),
     minRTT(0),
     maxRTT(0),
     avgRTT(0),
     numAverageCalcs(0)
 {
     memset(&remoteReceiveBlock,0,sizeof(remoteReceiveBlock));
 }
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
@@ -27,16 +27,20 @@ class RTCPReportBlockInformation
 {
 public:
     RTCPReportBlockInformation();
     ~RTCPReportBlockInformation();
 
     // Statistics
     RTCPReportBlock remoteReceiveBlock;
     uint32_t        remoteMaxJitter;
+    uint32_t        remotePacketsReceived;
+    uint64_t        remoteOctetsReceived;
+    uint32_t        lastReceivedRRNTPsecs;
+    uint32_t        lastReceivedRRNTPfrac;
 
     // RTT
     uint16_t    RTT;
     uint16_t    minRTT;
     uint16_t    maxRTT;
     uint16_t    avgRTT;
     uint32_t    numAverageCalcs;
 };
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
@@ -125,16 +125,18 @@ RTCPSender::RTCPSender(const int32_t id,
     internal_report_blocks_(),
     external_report_blocks_(),
     _csrcCNAMEs(),
 
     _cameraDelayMS(0),
 
     _lastSendReport(),
     _lastRTCPTime(),
+    _lastSRPacketCount(),
+    _lastSROctetCount(),
 
     last_xr_rr_(),
 
     _CSRCs(0),
     _CSRC(),
     _includeCSRCs(true),
 
     _sequenceNumberFIR(0),
@@ -159,16 +161,18 @@ RTCPSender::RTCPSender(const int32_t id,
     _xrVoIPMetric(),
     _nackCount(0),
     _pliCount(0),
     _fullIntraRequestCount(0)
 {
     memset(_CNAME, 0, sizeof(_CNAME));
     memset(_lastSendReport, 0, sizeof(_lastSendReport));
     memset(_lastRTCPTime, 0, sizeof(_lastRTCPTime));
+    memset(_lastSRPacketCount, 0, sizeof(_lastSRPacketCount));
+    memset(_lastSROctetCount, 0, sizeof(_lastSROctetCount));
 
     WEBRTC_TRACE(kTraceMemory, kTraceRtpRtcp, id, "%s created", __FUNCTION__);
 }
 
 RTCPSender::~RTCPSender() {
   delete [] _rembSSRC;
   delete [] _appData;
 
@@ -232,16 +236,18 @@ RTCPSender::Init()
     xrSendReceiverReferenceTimeEnabled_ = false;
 
     _xrSendVoIPMetric = false;
 
     memset(&_xrVoIPMetric, 0, sizeof(_xrVoIPMetric));
     memset(_CNAME, 0, sizeof(_CNAME));
     memset(_lastSendReport, 0, sizeof(_lastSendReport));
     memset(_lastRTCPTime, 0, sizeof(_lastRTCPTime));
+    memset(_lastSRPacketCount, 0, sizeof(_lastSRPacketCount));
+    memset(_lastSROctetCount, 0, sizeof(_lastSROctetCount));
     last_xr_rr_.clear();
 
     _nackCount = 0;
     _pliCount = 0;
     _fullIntraRequestCount = 0;
 
     return 0;
 }
@@ -574,36 +580,42 @@ uint32_t
 RTCPSender::LastSendReport( uint32_t& lastRTCPTime)
 {
     CriticalSectionScoped lock(_criticalSectionRTCPSender);
 
     lastRTCPTime = _lastRTCPTime[0];
     return _lastSendReport[0];
 }
 
-uint32_t
-RTCPSender::SendTimeOfSendReport(const uint32_t sendReport)
+bool
+RTCPSender::GetSendReportMetadata(const uint32_t sendReport,
+                                  uint32_t *timeOfSend,
+                                  uint32_t *packetCount,
+                                  uint64_t *octetCount)
 {
     CriticalSectionScoped lock(_criticalSectionRTCPSender);
 
     // This is only saved when we are the sender
     if((_lastSendReport[0] == 0) || (sendReport == 0))
     {
-        return 0; // will be ignored
+        return false;
     } else
     {
         for(int i = 0; i < RTCP_NUMBER_OF_SR; ++i)
         {
             if( _lastSendReport[i] == sendReport)
             {
-                return _lastRTCPTime[i];
+                *timeOfSend = _lastRTCPTime[i];
+                *packetCount = _lastSRPacketCount[i];
+                *octetCount = _lastSROctetCount[i];
+                return true;
             }
         }
     }
-    return 0;
+    return false;
 }
 
 bool RTCPSender::SendTimeOfXrRrReport(uint32_t mid_ntp,
                                       int64_t* time_ms) const {
   CriticalSectionScoped lock(_criticalSectionRTCPSender);
 
   if (last_xr_rr_.empty()) {
     return false;
@@ -684,20 +696,24 @@ int32_t RTCPSender::BuildSR(const Feedba
     // Sender report
     rtcpbuffer[pos++]=(uint8_t)200;
 
     for(int i = (RTCP_NUMBER_OF_SR-2); i >= 0; i--)
     {
         // shift old
         _lastSendReport[i+1] = _lastSendReport[i];
         _lastRTCPTime[i+1] =_lastRTCPTime[i];
+        _lastSRPacketCount[i+1] = _lastSRPacketCount[i];
+        _lastSROctetCount[i+1] = _lastSROctetCount[i];
     }
 
     _lastRTCPTime[0] = Clock::NtpToMs(NTPsec, NTPfrac);
     _lastSendReport[0] = (NTPsec << 16) + (NTPfrac >> 16);
+    _lastSRPacketCount[0] = feedback_state.packet_count_sent;
+    _lastSROctetCount[0] = feedback_state.byte_count_sent;
 
     // The timestamp of this RTCP packet should be estimated as the timestamp of
     // the frame being captured at this moment. We are calculating that
     // timestamp as the last frame's timestamp + the time since the last frame
     // was captured.
     {
       // Needs protection since this method is called on the process thread.
       CriticalSectionScoped lock(_criticalSectionRTCPSender);
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
@@ -103,17 +103,20 @@ public:
     int32_t CNAME(char cName[RTCP_CNAME_SIZE]);
     int32_t SetCNAME(const char cName[RTCP_CNAME_SIZE]);
 
     int32_t AddMixedCNAME(const uint32_t SSRC,
                           const char cName[RTCP_CNAME_SIZE]);
 
     int32_t RemoveMixedCNAME(const uint32_t SSRC);
 
-    uint32_t SendTimeOfSendReport(const uint32_t sendReport);
+    bool GetSendReportMetadata(const uint32_t sendReport,
+                               uint32_t *timeOfSend,
+                               uint32_t *packetCount,
+                               uint64_t *octetCount);
 
     bool SendTimeOfXrRrReport(uint32_t mid_ntp, int64_t* time_ms) const;
 
     bool TimeToSendRTCPReport(const bool sendKeyframeBeforeRTP = false) const;
 
     uint32_t LastSendReport(uint32_t& lastRTCPTime);
 
     int32_t SendRTCP(
@@ -300,16 +303,18 @@ private:
     std::map<uint32_t, RTCPReportBlock*> external_report_blocks_;
     std::map<uint32_t, RTCPUtility::RTCPCnameInformation*> _csrcCNAMEs;
 
     int32_t         _cameraDelayMS;
 
     // Sent
     uint32_t        _lastSendReport[RTCP_NUMBER_OF_SR];  // allow packet loss and RTT above 1 sec
     uint32_t        _lastRTCPTime[RTCP_NUMBER_OF_SR];
+    uint32_t        _lastSRPacketCount[RTCP_NUMBER_OF_SR];
+    uint64_t        _lastSROctetCount[RTCP_NUMBER_OF_SR];
 
     // Sent XR receiver reference time report.
     // <mid ntp (mid 32 bits of the 64 bits NTP timestamp), send time in ms>.
     std::map<uint32_t, int64_t> last_xr_rr_;
 
     // send CSRCs
     uint8_t         _CSRCs;
     uint32_t        _CSRC[kRtpCsrcSize];
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>  // memcpy
+
+#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
+#include "webrtc/system_wrappers/interface/trace.h"
+
+namespace webrtc {
+
+RtpFormatH264::RtpFormatH264(const uint8_t* payload_data,
+                             uint32_t payload_size,
+                             int max_payload_len)
+    : payload_data_(payload_data),
+      payload_size_(static_cast<int>(payload_size)),
+      max_payload_len_(static_cast<int>(max_payload_len)),
+      fragments_(0),
+      fragment_size_(0),
+      next_fragment_(-1) {
+  if (payload_size_ <= max_payload_len_) {
+    fragments_ = 0;
+  } else {
+    fragment_size_ = max_payload_len_ - kH264FUAHeaderLengthInBytes;
+    fragments_ = ((payload_size_ - kH264NALHeaderLengthInBytes) + (fragment_size_-1)) /
+                 fragment_size_;
+    next_fragment_ = 0;
+  }
+}
+
+RtpFormatH264::~RtpFormatH264() {
+}
+
+int RtpFormatH264::NextPacket(uint8_t* buffer,
+                              int* bytes_to_send,
+                              bool* last_packet) {
+  if (next_fragment_ == fragments_) {
+    *bytes_to_send = 0;
+    *last_packet   = true;
+    return -1;
+  }
+
+  // TODO(jesup) This supports Mode 1 packetization only
+
+  // For mode 0, it's all single-NAL, and maybe deal with that by simply
+  // setting a large max_payload_len when constructing this (and tell the
+  // codec to keep generated NAL sizes less than one packet).  If the codec
+  // goes over, a fragmented RTP packet would be sent (and may work or not).
+  uint8_t header = payload_data_[0];
+  uint8_t type   = header & kH264NAL_TypeMask;
+  if (payload_size_ <= max_payload_len_) {
+    // single NAL_UNIT
+    *bytes_to_send = payload_size_;
+    // TODO(jesup) - this doesn't work correctly for Mode 0.
+    // Unfortunately, we don't have a good signal to which NAL generated by
+    // the encoder is the last NAL of the frame.  We need that to be passed
+    // through to this point, instead of trying to generate it from the packets
+    if (type == kH264NALU_SPS || type == kH264NALU_PPS ||
+        type == kH264NALU_SEI) {
+      *last_packet   = false;
+    } else {
+      *last_packet   = true;
+    }
+    memcpy(buffer, payload_data_, payload_size_);
+    WEBRTC_TRACE(kTraceStream, kTraceRtpRtcp, -1,
+                 "RtpFormatH264(single NALU with type:%d, payload_size:%d",
+                 type, payload_size_);
+    return 0;
+  } else {
+    uint8_t fu_indicator = (header & (kH264NAL_FBit | kH264NAL_NRIMask)) |
+                           kH264NALU_FUA;
+    uint8_t fu_header = 0;
+    bool first_fragment = (next_fragment_ == 0);
+    bool last_fragment = (next_fragment_ == (fragments_ -1));
+
+    // S | E | R | 5 bit type.
+    fu_header |= (first_fragment ? kH264FU_SBit : 0);
+    fu_header |= (last_fragment ? kH264FU_EBit :0);
+    fu_header |= type;
+    buffer[0] = fu_indicator;
+    buffer[1] = fu_header;
+
+    if (last_fragment) {
+      // last fragment
+      *bytes_to_send = payload_size_ -
+                       kH264NALHeaderLengthInBytes -
+                       next_fragment_ * fragment_size_ +
+                       kH264FUAHeaderLengthInBytes;
+      *last_packet   = true;
+      memcpy(buffer + kH264FUAHeaderLengthInBytes,
+             payload_data_ + kH264NALHeaderLengthInBytes +
+                next_fragment_ * fragment_size_,
+             *bytes_to_send - kH264FUAHeaderLengthInBytes);
+      // We do not send original NALU header
+    } else {
+      *bytes_to_send = fragment_size_ + kH264FUAHeaderLengthInBytes;
+      *last_packet   = false;
+      memcpy(buffer + kH264FUAHeaderLengthInBytes,
+             payload_data_ + kH264NALHeaderLengthInBytes +
+                 next_fragment_ * fragment_size_,
+             fragment_size_);  // We do not send original NALU header
+    }
+    next_fragment_++;
+    return 1;
+  }
+}
+
+}  // namespace webrtc
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_h264.h
@@ -0,0 +1,99 @@
+ /*
+  *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+  *
+  *  Use of this source code is governed by a BSD-style license
+  *  that can be found in the LICENSE file in the root of the source
+  *  tree. An additional intellectual property rights grant can be found
+  *  in the file PATENTS.  All contributing project authors may
+  *  be found in the AUTHORS file in the root of the source tree.
+  */
+
+ /*
+  * This file contains the declaration of the H264 packetizer class.
+  * A packetizer object is created for each encoded video frame. The
+  * constructor is called with the payload data and size,
+  * together with the fragmentation information and a packetizer mode
+  * of choice. Alternatively, if no fragmentation info is available, the
+  * second constructor can be used with only payload data and size; in that
+  * case the mode kEqualSize is used.
+  *
+  * After creating the packetizer, the method NextPacket is called
+  * repeatedly to get all packets for the frame. The method returns
+  * false as long as there are more packets left to fetch.
+  */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/system_wrappers/interface/constructor_magic.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// Packetizer for H264.
+class RtpFormatH264 {
+ public:
+  enum {
+    kH264NALU_SLICE             = 1,
+    kH264NALU_IDR               = 5,
+    kH264NALU_SEI               = 6,
+    kH264NALU_SPS               = 7,
+    kH264NALU_PPS               = 8,
+    kH264NALU_STAPA             = 24,
+    kH264NALU_FUA               = 28
+  };
+
+  static const int kH264NALHeaderLengthInBytes = 1;
+  static const int kH264FUAHeaderLengthInBytes = 2;
+
+// bits for FU (A and B) indicators
+  enum H264NalDefs {
+    kH264NAL_FBit = 0x80,
+    kH264NAL_NRIMask = 0x60,
+    kH264NAL_TypeMask = 0x1F
+  };
+
+  enum H264FUDefs {
+    // bits for FU (A and B) headers
+    kH264FU_SBit = 0x80,
+    kH264FU_EBit = 0x40,
+    kH264FU_RBit = 0x20
+  };
+
+  // Initialize with payload from encoder.
+  // The payload_data must be exactly one encoded H264 frame.
+  RtpFormatH264(const uint8_t* payload_data,
+                uint32_t payload_size,
+                int max_payload_len);
+
+  ~RtpFormatH264();
+
+  // Get the next payload with H264 payload header.
+  // max_payload_len limits the sum length of payload and H264 payload header.
+  // buffer is a pointer to where the output will be written.
+  // bytes_to_send is an output variable that will contain number of bytes
+  // written to buffer. Parameter last_packet is true for the last packet of
+  // the frame, false otherwise (i.e., call the function again to get the
+  // next packet).
+  // Returns 0 on success for single NAL_UNIT
+  // Returns 1 on success for fragmentation
+  // return -1 on error.
+  int NextPacket(uint8_t* buffer,
+                 int* bytes_to_send,
+                 bool* last_packet);
+
+ private:
+  const uint8_t* payload_data_;
+  const int payload_size_;
+  const int max_payload_len_;
+  int   fragments_;
+  int   fragment_size_;
+  int   next_fragment_;
+
+  DISALLOW_COPY_AND_ASSIGN(RtpFormatH264);
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
@@ -443,16 +443,18 @@ class RTPPayloadVideoStrategy : public R
       const char payloadName[RTP_PAYLOAD_NAME_SIZE],
       const int8_t payloadType,
       const uint32_t frequency,
       const uint8_t channels,
       const uint32_t rate) const OVERRIDE {
     RtpVideoCodecTypes videoType = kRtpVideoGeneric;
     if (ModuleRTPUtility::StringCompare(payloadName, "VP8", 3)) {
       videoType = kRtpVideoVp8;
+    } else if (ModuleRTPUtility::StringCompare(payloadName, "H264", 4)) {
+      videoType = kRtpVideoH264;
     } else if (ModuleRTPUtility::StringCompare(payloadName, "I420", 4)) {
       videoType = kRtpVideoGeneric;
     } else if (ModuleRTPUtility::StringCompare(payloadName, "ULPFEC", 6)) {
       videoType = kRtpVideoNone;
     } else {
       videoType = kRtpVideoGeneric;
     }
     ModuleRTPUtility::Payload* payload = new ModuleRTPUtility::Payload;
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
@@ -10,16 +10,17 @@
 
 #include "webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h"
 
 #include <assert.h>
 #include <string.h>
 
 #include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
 #include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
+#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
 #include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 #include "webrtc/system_wrappers/interface/trace_event.h"
 
 namespace webrtc {
 
 RTPReceiverStrategy* RTPReceiverStrategy::CreateVideoStrategy(
@@ -119,16 +120,18 @@ int32_t RTPReceiverVideo::ParseVideoCode
                rtp_header->header.timestamp);
 
   switch (rtp_header->type.Video.codec) {
     case kRtpVideoGeneric:
       rtp_header->type.Video.isFirstPacket = is_first_packet;
       return ReceiveGenericCodec(rtp_header, payload_data, payload_data_length);
     case kRtpVideoVp8:
       return ReceiveVp8Codec(rtp_header, payload_data, payload_data_length);
+    case kRtpVideoH264:
+      return ReceiveH264Codec(rtp_header, payload_data, payload_data_length);
     case kRtpVideoNone:
       break;
   }
   return -1;
 }
 
 int32_t RTPReceiverVideo::BuildRTPheader(
     const WebRtcRTPHeader* rtp_header,
@@ -215,16 +218,98 @@ int32_t RTPReceiverVideo::ReceiveVp8Code
   if (data_callback_->OnReceivedPayloadData(parsed_packet.info.VP8.data,
                                             parsed_packet.info.VP8.dataLength,
                                             rtp_header) != 0) {
     return -1;
   }
   return 0;
 }
 
+int32_t RTPReceiverVideo::ReceiveH264Codec(WebRtcRTPHeader* rtp_header,
+                                          const uint8_t* payload_data,
+                                          uint16_t payload_data_length) {
+  // real payload
+  uint8_t* payload;
+  uint16_t payload_length;
+  uint8_t nal_type = payload_data[0] & RtpFormatH264::kH264NAL_TypeMask;
+
+  // Note: This code handles only FU-A and single NALU mode packets.
+  if (nal_type == RtpFormatH264::kH264NALU_FUA) {
+    // Fragmentation
+    uint8_t fnri = payload_data[0] & 
+                   (RtpFormatH264::kH264NAL_FBit | RtpFormatH264::kH264NAL_NRIMask);
+    uint8_t original_nal_type = payload_data[1] & RtpFormatH264::kH264NAL_TypeMask;
+    bool first_fragment = !!(payload_data[1] & RtpFormatH264::kH264FU_SBit);
+    //bool last_fragment = !!(payload_data[1] & RtpFormatH264::kH264FU_EBit);
+
+    uint8_t original_nal_header = fnri | original_nal_type;
+    if (first_fragment) {
+      payload = const_cast<uint8_t*> (payload_data) +
+          RtpFormatH264::kH264NALHeaderLengthInBytes;
+      payload[0] = original_nal_header;
+      payload_length = payload_data_length -
+          RtpFormatH264::kH264NALHeaderLengthInBytes;
+    } else {
+      payload = const_cast<uint8_t*> (payload_data)  +
+          RtpFormatH264::kH264FUAHeaderLengthInBytes;
+      payload_length = payload_data_length -
+          RtpFormatH264::kH264FUAHeaderLengthInBytes;
+    }
+
+    // WebRtcRTPHeader
+    if (original_nal_type == RtpFormatH264::kH264NALU_IDR) {
+      rtp_header->frameType = kVideoFrameKey;
+    } else {
+      rtp_header->frameType = kVideoFrameDelta;
+    }
+    rtp_header->type.Video.codec    = kRtpVideoH264;
+    rtp_header->type.Video.isFirstPacket = first_fragment;
+    RTPVideoHeaderH264* h264_header = &rtp_header->type.Video.codecHeader.H264;
+    h264_header->nalu_header        = original_nal_header;
+    h264_header->single_nalu        = false;
+  } else {
+    // single NALU
+    payload = const_cast<uint8_t*> (payload_data);
+    payload_length = payload_data_length;
+
+    rtp_header->type.Video.codec    = kRtpVideoH264;
+    rtp_header->type.Video.isFirstPacket = true;
+    RTPVideoHeaderH264* h264_header = &rtp_header->type.Video.codecHeader.H264;
+    h264_header->nalu_header        = payload_data[0];
+    h264_header->single_nalu        = true;
+
+    // WebRtcRTPHeader
+    switch (nal_type) {
+      // TODO(jesup): Evil hack.  The jitter buffer *really* doesn't like
+      // "frames" to have the same timestamps.  NOTE: this only works
+      // for SPS/PPS/IDR, not for PPS/SPS/IDR.  Keep this until all issues
+      // are resolved in the jitter buffer
+      case RtpFormatH264::kH264NALU_SPS:
+        rtp_header->header.timestamp -= 10;
+        // fall through
+      case RtpFormatH264::kH264NALU_PPS:
+        rtp_header->header.timestamp -= 10;
+        // fall through
+      case RtpFormatH264::kH264NALU_IDR:
+        rtp_header->frameType = kVideoFrameKey;
+        break;
+      default:
+        rtp_header->frameType = kVideoFrameDelta;
+        break;
+    }
+  }
+
+  if (data_callback_->OnReceivedPayloadData(payload,
+                                            payload_length,
+                                            rtp_header) != 0) {
+    return -1;
+  }
+  return 0;
+}
+
 int32_t RTPReceiverVideo::ReceiveGenericCodec(
     WebRtcRTPHeader* rtp_header,
     const uint8_t* payload_data,
     uint16_t payload_data_length) {
   uint8_t generic_header = *payload_data++;
   --payload_data_length;
 
   rtp_header->frameType =
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h
@@ -64,16 +64,20 @@ class RTPReceiverVideo : public RTPRecei
   int32_t ReceiveGenericCodec(WebRtcRTPHeader* rtp_header,
                               const uint8_t* payload_data,
                               uint16_t payload_data_length);
 
   int32_t ReceiveVp8Codec(WebRtcRTPHeader* rtp_header,
                           const uint8_t* payload_data,
                           uint16_t payload_data_length);
 
+  int32_t ReceiveH264Codec(WebRtcRTPHeader* rtp_header,
+                          const uint8_t* payload_data,
+                          uint16_t payload_data_length);
+
   int32_t BuildRTPheader(const WebRtcRTPHeader* rtp_header,
                          uint8_t* data_buffer) const;
 
  private:
   int32_t ParseVideoCodecSpecific(
       WebRtcRTPHeader* rtp_header,
       const uint8_t* payload_data,
       uint16_t payload_data_length,
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp.gypi
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp.gypi
@@ -77,16 +77,18 @@
         'rtp_payload_registry.cc',
         'rtp_receiver_strategy.cc',
         'rtp_receiver_strategy.h',
         'rtp_receiver_video.cc',
         'rtp_receiver_video.h',
         'rtp_sender_video.cc',
         'rtp_sender_video.h',
         'video_codec_information.h',
+        'rtp_format_h264.cc',
+        'rtp_format_h264.h',
         'rtp_format_vp8.cc',
         'rtp_format_vp8.h',
         'rtp_format_video_generic.h',
         'vp8_partition_aggregator.cc',
         'vp8_partition_aggregator.h',
         # Mocks
         '../mocks/mock_rtp_rtcp.h',
         'mock/mock_rtp_payload_strategy.h',
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -894,16 +894,29 @@ int32_t ModuleRtpRtcpImpl::RTT(const uin
 
 // Reset RoundTripTime statistics.
 int32_t ModuleRtpRtcpImpl::ResetRTT(const uint32_t remote_ssrc) {
   WEBRTC_TRACE(kTraceModuleCall, kTraceRtpRtcp, id_, "ResetRTT(SSRC:%u)",
                remote_ssrc);
   return rtcp_receiver_.ResetRTT(remote_ssrc);
 }
 
+int32_t
+ModuleRtpRtcpImpl::GetReportBlockInfo(const uint32_t remote_ssrc,
+                                      uint32_t* ntp_high,
+                                      uint32_t* ntp_low,
+                                      uint32_t* packets_received,
+                                      uint64_t* octets_received) const {
+  WEBRTC_TRACE(kTraceModuleCall, kTraceRtpRtcp, id_, "RemotePacketsReceived()");
+
+  return rtcp_receiver_.GetReportBlockInfo(remote_ssrc,
+                                           ntp_high, ntp_low,
+                                           packets_received, octets_received);
+}
+
 // Reset RTP data counters for the sending side.
 int32_t ModuleRtpRtcpImpl::ResetSendDataCountersRTP() {
   WEBRTC_TRACE(kTraceModuleCall, kTraceRtpRtcp, id_,
                "ResetSendDataCountersRTP()");
   rtp_sender_.ResetDataCounters();
   return 0;  // TODO(pwestin): change to void.
 }
 
@@ -1524,19 +1537,24 @@ void ModuleRtpRtcpImpl::OnRequestSendRep
 
 int32_t ModuleRtpRtcpImpl::SendRTCPReferencePictureSelection(
     const uint64_t picture_id) {
   RTCPSender::FeedbackState feedback_state(this);
   return rtcp_sender_.SendRTCP(
       feedback_state, kRtcpRpsi, 0, 0, false, picture_id);
 }
 
-uint32_t ModuleRtpRtcpImpl::SendTimeOfSendReport(
-    const uint32_t send_report) {
-  return rtcp_sender_.SendTimeOfSendReport(send_report);
+bool ModuleRtpRtcpImpl::GetSendReportMetadata(const uint32_t send_report,
+                                              uint32_t *time_of_send,
+                                              uint32_t *packet_count,
+                                              uint64_t *octet_count) {
+  return rtcp_sender_.GetSendReportMetadata(send_report,
+                                            time_of_send,
+                                            packet_count,
+                                            octet_count);
 }
 
 bool ModuleRtpRtcpImpl::SendTimeOfXrRrReport(
     uint32_t mid_ntp, int64_t* time_ms) const {
   return rtcp_sender_.SendTimeOfXrRrReport(mid_ntp, time_ms);
 }
 
 void ModuleRtpRtcpImpl::OnReceivedNACK(
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -173,16 +173,22 @@ class ModuleRtpRtcpImpl : public RtpRtcp
                       uint16_t* rtt,
                       uint16_t* avg_rtt,
                       uint16_t* min_rtt,
                       uint16_t* max_rtt) const OVERRIDE;
 
   // Reset RoundTripTime statistics.
   virtual int32_t ResetRTT(const uint32_t remote_ssrc) OVERRIDE;
 
+  virtual int32_t GetReportBlockInfo(const uint32_t remote_ssrc,
+                                     uint32_t* ntp_high,
+                                     uint32_t* ntp_low,
+                                     uint32_t* packets_received,
+                                     uint64_t* octets_received) const OVERRIDE;
+
   // Force a send of an RTCP packet.
   // Normal SR and RR are triggered via the process function.
   virtual int32_t SendRTCP(uint32_t rtcp_packet_type = kRtcpReport) OVERRIDE;
 
   virtual int32_t ResetSendDataCountersRTP() OVERRIDE;
 
   // Statistics of the amount of data sent and received.
   virtual int32_t DataCountersRTP(uint32_t* bytes_sent,
@@ -349,17 +355,20 @@ class ModuleRtpRtcpImpl : public RtpRtcp
                            uint32_t* fec_rate,
                            uint32_t* nackRate) const OVERRIDE;
 
   virtual void RegisterVideoBitrateObserver(BitrateStatisticsObserver* observer)
       OVERRIDE;
 
   virtual BitrateStatisticsObserver* GetVideoBitrateObserver() const OVERRIDE;
 
-  virtual uint32_t SendTimeOfSendReport(const uint32_t send_report);
+  virtual bool GetSendReportMetadata(const uint32_t send_report,
+                                     uint32_t *time_of_send,
+                                     uint32_t *packet_count,
+                                     uint64_t *octet_count);
 
   virtual bool SendTimeOfXrRrReport(uint32_t mid_ntp, int64_t* time_ms) const;
 
   // Good state of RTP receiver inform sender.
   virtual int32_t SendRTCPReferencePictureSelection(
       const uint64_t picture_id) OVERRIDE;
 
   virtual void RegisterSendChannelRtpStatisticsCallback(
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -12,16 +12,17 @@
 
 #include <assert.h>
 #include <stdlib.h>
 #include <string.h>
 
 #include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
 #include "webrtc/modules/rtp_rtcp/source/producer_fec.h"
 #include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
+#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
 #include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h"
 #include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 #include "webrtc/system_wrappers/interface/trace_event.h"
 
 namespace webrtc {
 enum { REDForFECHeaderLength = 1 };
@@ -87,16 +88,18 @@ int32_t RTPSenderVideo::RegisterVideoPay
     const int8_t payloadType,
     const uint32_t maxBitRate,
     ModuleRTPUtility::Payload*& payload) {
   CriticalSectionScoped cs(_sendVideoCritsect);
 
   RtpVideoCodecTypes videoType = kRtpVideoGeneric;
   if (ModuleRTPUtility::StringCompare(payloadName, "VP8",3)) {
     videoType = kRtpVideoVp8;
+  } else if (ModuleRTPUtility::StringCompare(payloadName, "H264", 4))  {
+    videoType = kRtpVideoH264;
   } else if (ModuleRTPUtility::StringCompare(payloadName, "I420", 4)) {
     videoType = kRtpVideoGeneric;
   } else {
     videoType = kRtpVideoGeneric;
   }
   payload = new ModuleRTPUtility::Payload;
   payload->name[RTP_PAYLOAD_NAME_SIZE - 1] = 0;
   strncpy(payload->name, payloadName, RTP_PAYLOAD_NAME_SIZE - 1);
@@ -280,63 +283,70 @@ RTPSenderVideo::SendVideo(const RtpVideo
                           const FrameType frameType,
                           const int8_t payloadType,
                           const uint32_t captureTimeStamp,
                           int64_t capture_time_ms,
                           const uint8_t* payloadData,
                           const uint32_t payloadSize,
                           const RTPFragmentationHeader* fragmentation,
                           VideoCodecInformation* codecInfo,
-                          const RTPVideoTypeHeader* rtpTypeHdr)
-{
-    if( payloadSize == 0)
-    {
-        return -1;
-    }
+                          const RTPVideoTypeHeader* rtpTypeHdr) {
+  if( payloadSize == 0) {
+      return -1;
+  }
 
-    if (frameType == kVideoFrameKey) {
-      producer_fec_.SetFecParameters(&key_fec_params_,
-                                     _numberFirstPartition);
-    } else {
-      producer_fec_.SetFecParameters(&delta_fec_params_,
-                                     _numberFirstPartition);
-    }
+  if (frameType == kVideoFrameKey) {
+    producer_fec_.SetFecParameters(&key_fec_params_,
+                                   _numberFirstPartition);
+  } else {
+    producer_fec_.SetFecParameters(&delta_fec_params_,
+                                   _numberFirstPartition);
+  }
 
-    // Default setting for number of first partition packets:
-    // Will be extracted in SendVP8 for VP8 codec; other codecs use 0
-    _numberFirstPartition = 0;
+  // Default setting for number of first partition packets:
+  // Will be extracted in SendVP8 for VP8 codec; other codecs use 0
+  _numberFirstPartition = 0;
 
-    int32_t retVal = -1;
-    switch(videoType)
-    {
-    case kRtpVideoGeneric:
-        retVal = SendGeneric(frameType, payloadType, captureTimeStamp,
-                             capture_time_ms, payloadData, payloadSize);
-        break;
-    case kRtpVideoVp8:
-        retVal = SendVP8(frameType,
-                         payloadType,
-                         captureTimeStamp,
-                         capture_time_ms,
-                         payloadData,
-                         payloadSize,
-                         fragmentation,
-                         rtpTypeHdr);
-        break;
-    default:
-        assert(false);
-        break;
-    }
-    if(retVal <= 0)
-    {
-        return retVal;
-    }
-    WEBRTC_TRACE(kTraceStream, kTraceRtpRtcp, _id, "%s(timestamp:%u)",
-                 __FUNCTION__, captureTimeStamp);
-    return 0;
+  int32_t retVal = -1;
+  switch(videoType) {
+  case kRtpVideoGeneric:
+    retVal = SendGeneric(frameType, payloadType, captureTimeStamp,
+                         capture_time_ms, payloadData, payloadSize);
+      break;
+  case kRtpVideoVp8:
+    retVal = SendVP8(frameType,
+                     payloadType,
+                     captureTimeStamp,
+                     capture_time_ms,
+                     payloadData,
+                     payloadSize,
+                     fragmentation,
+                     rtpTypeHdr);
+    break;
+  case kRtpVideoH264:
+    retVal = SendH264(frameType,
+                      payloadType,
+                      captureTimeStamp,
+                      capture_time_ms,
+                      payloadData,
+                      payloadSize,
+                      fragmentation,
+                      rtpTypeHdr);
+    break;
+  default:
+    assert(false);
+    break;
+  }
+
+  if(retVal <= 0) {
+    return retVal;
+  }
+  WEBRTC_TRACE(kTraceStream, kTraceRtpRtcp, _id, "%s(timestamp:%u)",
+               __FUNCTION__, captureTimeStamp);
+  return 0;
 }
 
 int32_t RTPSenderVideo::SendGeneric(const FrameType frame_type,
                                     const int8_t payload_type,
                                     const uint32_t capture_timestamp,
                                     int64_t capture_time_ms,
                                     const uint8_t* payload,
                                     uint32_t size) {
@@ -481,16 +491,62 @@ RTPSenderVideo::SendVP8(const FrameType 
                        " %d", _rtpSender.SequenceNumber());
         }
     }
     TRACE_EVENT_ASYNC_END1("webrtc", "Video", capture_time_ms,
                            "timestamp", _rtpSender.Timestamp());
     return 0;
 }
 
+int32_t RTPSenderVideo::SendH264(const FrameType frameType,
+                                 const int8_t payloadType,
+                                 const uint32_t captureTimeStamp,
+                                 int64_t capture_time_ms,
+                                 const uint8_t* payloadData,
+                                 const uint32_t payloadSize,
+                                 const RTPFragmentationHeader* fragmentation,
+                                 const RTPVideoTypeHeader* rtpTypeHdr) {
+  const uint16_t rtpHeaderLength = _rtpSender.RTPHeaderLength();
+  int32_t payloadBytesToSend = payloadSize;
+  const uint8_t* data = payloadData;
+  uint16_t maxPayloadLengthH264 = _rtpSender.MaxDataPayloadLength();
+
+  RtpFormatH264 packetizer(data, payloadBytesToSend, maxPayloadLengthH264);
+
+  StorageType storage = kAllowRetransmission;
+  bool protect = (frameType == kVideoFrameKey);
+  bool last = false;
+
+  while (!last) {
+    // Write H264 Payload
+    uint8_t dataBuffer[IP_PACKET_SIZE] = {0};
+    int payloadBytesInPacket = 0;
+    int ret_val = packetizer.NextPacket(&dataBuffer[rtpHeaderLength],
+                                        &payloadBytesInPacket, &last);
+    if (ret_val < 0) {
+        return -1;
+    }
+
+    // Write RTP header.
+    // Set marker bit true if this is the last packet in frame.
+    _rtpSender.BuildRTPheader(dataBuffer, payloadType, last,
+                              captureTimeStamp, capture_time_ms);
+    if (-1 == SendVideoPacket(dataBuffer, payloadBytesInPacket,
+                              rtpHeaderLength, captureTimeStamp,
+                              capture_time_ms, storage, protect)) {
+    }
+
+    if (ret_val == 0) {
+      // single NAL unit
+      last = true;
+    }
+  }
+  return 0;
+}
+
 void RTPSenderVideo::ProcessBitrate() {
   _videoBitrate.Process();
   _fecOverheadRate.Process();
 }
 
 uint32_t RTPSenderVideo::VideoBitrateSent() const {
   return _videoBitrate.BitrateLast();
 }
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h
@@ -106,16 +106,25 @@ private:
                     const int8_t payloadType,
                     const uint32_t captureTimeStamp,
                     int64_t capture_time_ms,
                     const uint8_t* payloadData,
                     const uint32_t payloadSize,
                     const RTPFragmentationHeader* fragmentation,
                     const RTPVideoTypeHeader* rtpTypeHdr);
 
+    int32_t SendH264(const FrameType frameType,
+                    const int8_t payloadType,
+                    const uint32_t captureTimeStamp,
+                    int64_t capture_time_ms,
+                    const uint8_t* payloadData,
+                    const uint32_t payloadSize,
+                    const RTPFragmentationHeader* fragmentation,
+                    const RTPVideoTypeHeader* rtpTypeHdr);
+
 private:
     int32_t             _id;
     RTPSenderInterface&        _rtpSender;
 
     CriticalSectionWrapper*   _sendVideoCritsect;
     RtpVideoCodecTypes  _videoType;
     VideoCodecInformation*  _videoCodecInformation;
     uint32_t            _maxBitrate;
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
@@ -16,17 +16,17 @@
 
 #if defined(_WIN32)
 // Order for these headers are important
 #include <Windows.h>  // FILETIME
 
 #include <WinSock.h>  // timeval
 
 #include <MMSystem.h>  // timeGetTime
-#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_MAC))
+#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_BSD) || (defined WEBRTC_MAC))
 #include <sys/time.h>  // gettimeofday
 #include <time.h>
 #endif
 #if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400))
 #include <stdio.h>
 #endif
 
 #include "webrtc/system_wrappers/interface/tick_util.h"
@@ -91,19 +91,19 @@ uint32_t GetCurrentRTP(Clock* clock, uin
   local_clock->CurrentNtp(secs, frac);
   if (use_global_clock) {
     delete local_clock;
   }
   return ConvertNTPTimeToRTP(secs, frac, freq);
 }
 
 uint32_t ConvertNTPTimeToRTP(uint32_t NTPsec, uint32_t NTPfrac, uint32_t freq) {
-  float ftemp = (float)NTPfrac / (float)NTP_FRAC;
+  float ftemp = (float)NTPfrac / (float)NTP_FRAC; 
   uint32_t tmp = (uint32_t)(ftemp * freq);
-  return NTPsec * freq + tmp;
+ return NTPsec * freq + tmp;
 }
 
 uint32_t ConvertNTPTimeToMS(uint32_t NTPsec, uint32_t NTPfrac) {
   int freq = 1000;
   float ftemp = (float)NTPfrac / (float)NTP_FRAC;
   uint32_t tmp = (uint32_t)(ftemp * freq);
   uint32_t MStime = NTPsec * freq + tmp;
   return MStime;
@@ -113,17 +113,17 @@ uint32_t ConvertNTPTimeToMS(uint32_t NTP
  * Misc utility routines
  */
 
 #if defined(_WIN32)
 bool StringCompare(const char* str1, const char* str2,
                    const uint32_t length) {
   return (_strnicmp(str1, str2, length) == 0) ? true : false;
 }
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 bool StringCompare(const char* str1, const char* str2,
                    const uint32_t length) {
   return (strncasecmp(str1, str2, length) == 0) ? true : false;
 }
 #endif
 
 /* for RTP/RTCP
     All integer fields are carried in network byte order, that is, most
--- a/media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
@@ -82,17 +82,17 @@ int32_t FilePlayerImpl::Frequency() cons
     if(_codec.plfreq == 11000)
     {
         return 16000;
     }
     else if(_codec.plfreq == 22000)
     {
         return 32000;
     }
-    else if(_codec.plfreq == 44000)
+    else if(_codec.plfreq == 44100 || _codec.plfreq == 44000 ) // XXX just 44100?
     {
         return 32000;
     }
     else if(_codec.plfreq == 48000)
     {
         return 32000;
     }
     else
--- a/media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
@@ -14,17 +14,17 @@
 #include <stdio.h>
 
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 #if defined(_WIN32)
 #include <Windows.h>
 #include <mmsystem.h>
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC) || defined(WEBRTC_BSD)
 #include <string.h>
 #include <sys/time.h>
 #include <time.h>
 #endif
 
 #if (defined(_DEBUG) && defined(_WIN32))
 #define DEBUG_PRINT(expr)   OutputDebugString(##expr)
 #define DEBUG_PRINTP(expr, p)   \
@@ -232,17 +232,17 @@ bool RtpDumpImpl::RTCP(const uint8_t* pa
     return is_rtcp;
 }
 
 // TODO (hellner): why is TickUtil not used here?
 inline uint32_t RtpDumpImpl::GetTimeInMS() const
 {
 #if defined(_WIN32)
     return timeGetTime();
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
     struct timeval tv;
     struct timezone tz;
     unsigned long val;
 
     gettimeofday(&tv, &tz);
     val = tv.tv_sec * 1000 + tv.tv_usec / 1000;
     return val;
 #endif
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
@@ -11,16 +11,19 @@
 #ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
 #define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
 
 #include <jni.h>
 
 #include "webrtc/modules/video_capture/device_info_impl.h"
 #include "webrtc/modules/video_capture/video_capture_impl.h"
 
+#define AndroidJavaCaptureDeviceInfoClass "org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid"
+#define AndroidJavaCaptureCapabilityClass "org/webrtc/videoengine/CaptureCapabilityAndroid"
+
 namespace webrtc
 {
 namespace videocapturemodule
 {
 
 class DeviceInfoAndroid : public DeviceInfoImpl {
  public:
   static void Initialize(JNIEnv* env);
--- a/media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
@@ -43,17 +43,17 @@ int32_t DeviceInfoImpl::NumberOfCapabili
     if (!deviceUniqueIdUTF8)
         return -1;
 
     _apiLock.AcquireLockShared();
 
     if (_lastUsedDeviceNameLength == strlen((char*) deviceUniqueIdUTF8))
     {
         // Is it the same device that is asked for again.
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
         if(strncasecmp((char*)_lastUsedDeviceName,
                        (char*) deviceUniqueIdUTF8,
                        _lastUsedDeviceNameLength)==0)
 #else
         if (_strnicmp((char*) _lastUsedDeviceName,
                       (char*) deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) == 0)
 #endif
@@ -80,17 +80,17 @@ int32_t DeviceInfoImpl::GetCapability(co
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                    "deviceUniqueIdUTF8 parameter not set in call to GetCapability");
         return -1;
     }
     ReadLockScoped cs(_apiLock);
 
     if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
         || (strncasecmp((char*)_lastUsedDeviceName,
                         (char*) deviceUniqueIdUTF8,
                         _lastUsedDeviceNameLength)!=0))
 #else
         || (_strnicmp((char*) _lastUsedDeviceName,
                       (char*) deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) != 0))
 #endif
@@ -128,17 +128,17 @@ int32_t DeviceInfoImpl::GetBestMatchedCa
 {
 
 
     if (!deviceUniqueIdUTF8)
         return -1;
 
     ReadLockScoped cs(_apiLock);
     if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
         || (strncasecmp((char*)_lastUsedDeviceName,
                         (char*) deviceUniqueIdUTF8,
                         _lastUsedDeviceNameLength)!=0))
 #else
         || (_strnicmp((char*) _lastUsedDeviceName,
                       (char*) deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) != 0))
 #endif
--- a/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
@@ -13,17 +13,23 @@
 #include <errno.h>
 #include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <sys/ioctl.h>
 #include <sys/stat.h>
 #include <unistd.h>
 //v4l includes
+#if defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/videoio.h>
+#elif defined(__sun)
+#include <sys/videodev2.h>
+#else
 #include <linux/videodev2.h>
+#endif
 
 #include "webrtc/system_wrappers/interface/ref_count.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 
 namespace webrtc
 {
 namespace videocapturemodule
@@ -88,19 +94,20 @@ int32_t DeviceInfoLinux::GetDeviceName(
 {
     WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCapture, _id, "%s", __FUNCTION__);
 
     // Travel through /dev/video [0-63]
     uint32_t count = 0;
     char device[20];
     int fd = -1;
     bool found = false;
-    for (int n = 0; n < 64; n++)
+    int device_index;
+    for (device_index = 0; device_index < 64; device_index++)
     {
-        sprintf(device, "/dev/video%d", n);
+        sprintf(device, "/dev/video%d", device_index);
         if ((fd = open(device, O_RDONLY)) != -1)
         {
             if (count == deviceNumber) {
                 // Found the device
                 found = true;
                 break;
             } else {
                 close(fd);
@@ -149,73 +156,84 @@ int32_t DeviceInfoLinux::GetDeviceName(
                    strlen((const char*) cap.bus_info));
         }
         else
         {
             WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                        "buffer passed is too small");
             return -1;
         }
+    } else {
+        // if there's no bus info to use for uniqueId, invent one - and it has to be repeatable
+        if (snprintf(deviceUniqueIdUTF8, deviceUniqueIdUTF8Length, "fake_%u", device_index) >=
+            deviceUniqueIdUTF8Length)
+        {
+            WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
+                       "buffer passed is too small");
+            return -1;
+        }
     }
 
     return 0;
 }
 
 int32_t DeviceInfoLinux::CreateCapabilityMap(
                                         const char* deviceUniqueIdUTF8)
 {
     int fd;
     char device[32];
     bool found = false;
+    int device_index;
 
     const int32_t deviceUniqueIdUTF8Length =
                             (int32_t) strlen((char*) deviceUniqueIdUTF8);
     if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength)
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id, "Device name too long");
         return -1;
     }
     WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCapture, _id,
                "CreateCapabilityMap called for device %s", deviceUniqueIdUTF8);
 
     /* detect /dev/video [0-63] entries */
-    for (int n = 0; n < 64; ++n)
+    if (sscanf(deviceUniqueIdUTF8,"fake_%d",&device_index) == 1)
     {
-        sprintf(device, "/dev/video%d", n);
+        sprintf(device, "/dev/video%d", device_index);
         fd = open(device, O_RDONLY);
-        if (fd == -1)
-          continue;
+        if (fd != -1) {
+            found = true;
+        }
+    } else {
+        /* detect /dev/video [0-63] entries */
+        for (int n = 0; n < 64; ++n)
+        {
+            sprintf(device, "/dev/video%d", n);
+            fd = open(device, O_RDONLY);
+            if (fd == -1)
+                continue;
 
-        // query device capabilities
-        struct v4l2_capability cap;
-        if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0)
-        {
-            if (cap.bus_info[0] != 0)
+            // query device capabilities
+            struct v4l2_capability cap;
+            if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0)
             {
-                if (strncmp((const char*) cap.bus_info,
-                            (const char*) deviceUniqueIdUTF8,
-                            strlen((const char*) deviceUniqueIdUTF8)) == 0) //match with device id
+                if (cap.bus_info[0] != 0)
                 {
-                    found = true;
-                    break; // fd matches with device unique id supplied
+                    if (strncmp((const char*) cap.bus_info,
+                                (const char*) deviceUniqueIdUTF8,
+                                strlen((const char*) deviceUniqueIdUTF8)) == 0) //match with device id
+                    {
+                        found = true;
+                        break; // fd matches with device unique id supplied
+                    }
                 }
+                // else can't be a match as the test for fake_* above would have matched it
             }
-            else //match for device name
-            {
-                if (IsDeviceNameMatches((const char*) cap.card,
-                                        (const char*) deviceUniqueIdUTF8))
-                {
-                    found = true;
-                    break;
-                }
-            }
+            close(fd); // close since this is not the matching device
         }
-        close(fd); // close since this is not the matching device
     }
-
     if (!found)
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id, "no matching device found");
         return -1;
     }
 
     // now fd will point to the matching device
     // reset old capability list.
--- a/media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
@@ -5,25 +5,32 @@
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include <errno.h>
 #include <fcntl.h>
-#include <linux/videodev2.h>
 #include <stdio.h>
 #include <string.h>
 #include <sys/ioctl.h>
 #include <sys/mman.h>
 #include <sys/stat.h>
 #include <unistd.h>
 
-#include <iostream>
+//v4l includes
+#if defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/videoio.h>
+#elif defined(__sun)
+#include <sys/videodev2.h>
+#else
+#include <linux/videodev2.h>
+#endif
+
 #include <new>
 
 #include "webrtc/modules/video_capture/linux/video_capture_linux.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/ref_count.h"
 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
@@ -66,16 +73,23 @@ int32_t VideoCaptureModuleV4L2::Init(con
 {
     int len = strlen((const char*) deviceUniqueIdUTF8);
     _deviceUniqueId = new (std::nothrow) char[len + 1];
     if (_deviceUniqueId)
     {
         memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
     }
 
+    int device_index;
+    if (sscanf(deviceUniqueIdUTF8,"fake_%d", &device_index) == 1)
+    {
+      _deviceId = device_index;
+      return 0;
+    }
+
     int fd;
     char device[32];
     bool found = false;
 
     /* detect /dev/video [0-63] entries */
     int n;
     for (n = 0; n < 64; n++)
     {
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.mm
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.mm
@@ -10,16 +10,30 @@
 
 #include "webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.h"
 #import "webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h"
 #import "webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_objc.h"
 #include "webrtc/modules/video_capture/video_capture_config.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+class nsAutoreleasePool {
+public:
+    nsAutoreleasePool()
+    {
+        mLocalPool = [[NSAutoreleasePool alloc] init];
+    }
+    ~nsAutoreleasePool()
+    {
+        [mLocalPool release];
+    }
+private:
+    NSAutoreleasePool *mLocalPool;
+};
+
 namespace webrtc
 {
 
 namespace videocapturemodule
 {
 
 VideoCaptureMacQTKit::VideoCaptureMacQTKit(const int32_t id) :
     VideoCaptureImpl(id),
@@ -36,16 +50,17 @@ VideoCaptureMacQTKit::VideoCaptureMacQTK
     memset(_currentDeviceNameUTF8, 0, MAX_NAME_LENGTH);
     memset(_currentDeviceUniqueIdUTF8, 0, MAX_NAME_LENGTH);
     memset(_currentDeviceProductUniqueIDUTF8, 0, MAX_NAME_LENGTH);
 }
 
 VideoCaptureMacQTKit::~VideoCaptureMacQTKit()
 {
 
+    nsAutoreleasePool localPool;
     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, _id,
                  "~VideoCaptureMacQTKit() called");
     if(_captureDevice)
     {
         [_captureDevice registerOwner:nil];
         [_captureDevice stopCapture];
         [_captureDevice release];
     }
@@ -66,16 +81,18 @@ int32_t VideoCaptureMacQTKit::Init(
         (int32_t) strlen((char*)iDeviceUniqueIdUTF8);
     if(nameLength>kVideoCaptureUniqueNameLength)
         return -1;
 
     // Store the device name
     _deviceUniqueId = new char[nameLength+1];
     memcpy(_deviceUniqueId, iDeviceUniqueIdUTF8,nameLength+1);
 
+    nsAutoreleasePool localPool;
+
     _captureDevice = [[VideoCaptureMacQTKitObjC alloc] init];
     if(NULL == _captureDevice)
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, id,
                      "Failed to create an instance of "
                      "VideoCaptureMacQTKitObjC");
         return -1;
     }
@@ -159,32 +176,34 @@ int32_t VideoCaptureMacQTKit::Init(
                  "successfully Init VideoCaptureMacQTKit" );
     return 0;
 }
 
 int32_t VideoCaptureMacQTKit::StartCapture(
     const VideoCaptureCapability& capability)
 {
 
+    nsAutoreleasePool localPool;
     _captureWidth = capability.width;
     _captureHeight = capability.height;
     _captureFrameRate = capability.maxFPS;
     _captureDelay = 120;
 
     [_captureDevice setCaptureHeight:_captureHeight
                                width:_captureWidth
                            frameRate:_captureFrameRate];
 
     [_captureDevice startCapture];
     _isCapturing = true;
     return 0;
 }
 
 int32_t VideoCaptureMacQTKit::StopCapture()
 {
+    nsAutoreleasePool localPool;
     [_captureDevice stopCapture];
     _isCapturing = false;
     return 0;
 }
 
 bool VideoCaptureMacQTKit::CaptureStarted()
 {
     return _isCapturing;
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
@@ -8,54 +8,71 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #import "webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h"
 #include "webrtc/modules/video_capture/include/video_capture.h"
 #include "webrtc/modules/video_capture/video_capture_config.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
+class nsAutoreleasePool {
+public:
+    nsAutoreleasePool()
+    {
+        mLocalPool = [[NSAutoreleasePool alloc] init];
+    }
+    ~nsAutoreleasePool()
+    {
+        [mLocalPool release];
+    }
+private:
+    NSAutoreleasePool *mLocalPool;
+};
+
 namespace webrtc
 {
 namespace videocapturemodule
 {
 
 VideoCaptureMacQTKitInfo::VideoCaptureMacQTKitInfo(const int32_t id) :
     DeviceInfoImpl(id)
 {
+    nsAutoreleasePool localPool;
     _captureInfo = [[VideoCaptureMacQTKitInfoObjC alloc] init];
 }
 
 VideoCaptureMacQTKitInfo::~VideoCaptureMacQTKitInfo()
 {
+    nsAutoreleasePool localPool;
     [_captureInfo release];
-
 }
 
 int32_t VideoCaptureMacQTKitInfo::Init()
 {
 
     return 0;
 }
 
 uint32_t VideoCaptureMacQTKitInfo::NumberOfDevices()
 {
 
+    nsAutoreleasePool localPool;
     uint32_t captureDeviceCount =
         [[_captureInfo getCaptureDeviceCount]intValue];
     return captureDeviceCount;
 
 }
 
 int32_t VideoCaptureMacQTKitInfo::GetDeviceName(
     uint32_t deviceNumber, char* deviceNameUTF8,
     uint32_t deviceNameLength, char* deviceUniqueIdUTF8,
     uint32_t deviceUniqueIdUTF8Length, char* productUniqueIdUTF8,
     uint32_t productUniqueIdUTF8Length)
 {
+    nsAutoreleasePool localPool;
     int errNum = [[_captureInfo getDeviceNamesFromIndex:deviceNumber
                    DefaultName:deviceNameUTF8 WithLength:deviceNameLength
                    AndUniqueID:deviceUniqueIdUTF8
                    WithLength:deviceUniqueIdUTF8Length
                    AndProductID:productUniqueIdUTF8
                    WithLength:productUniqueIdUTF8Length]intValue];
     return errNum;
 }
@@ -99,16 +116,17 @@ int32_t VideoCaptureMacQTKitInfo::GetBes
 }
 
 int32_t VideoCaptureMacQTKitInfo::DisplayCaptureSettingsDialogBox(
     const char* deviceUniqueIdUTF8,
     const char* dialogTitleUTF8, void* parentWindow,
     uint32_t positionX, uint32_t positionY)
 {
 
+    nsAutoreleasePool localPool;
     return [[_captureInfo
              displayCaptureSettingsDialogBoxWithDevice:deviceUniqueIdUTF8
              AndTitle:dialogTitleUTF8
              AndParentWindow:parentWindow AtX:positionX AndY:positionY]
              intValue];
 }
 
 int32_t VideoCaptureMacQTKitInfo::CreateCapabilityMap(
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h
@@ -20,17 +20,16 @@
 #import <QTKit/QTKit.h>
 
 #include "webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.h"
 #include "webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_utility.h"
 
 @interface VideoCaptureMacQTKitInfoObjC : NSObject{
     bool                                _OSSupportedInfo;
     NSArray*                            _captureDevicesInfo;
-    NSAutoreleasePool*                    _poolInfo;
     int                                    _captureDeviceCountInfo;
 
 }
 
 /**************************************************************************
  *
  *   The following functions are considered to be private
  *
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.mm
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.mm
@@ -88,21 +88,25 @@ using namespace webrtc;
         return [NSNumber numberWithInt:0];
     }
 
     if(index >= (uint32_t)_captureDeviceCountInfo)
     {
         return [NSNumber numberWithInt:-1];
     }
 
-    QTCaptureDevice* tempCaptureDevice =
-        (QTCaptureDevice*)[_captureDevicesInfo objectAtIndex:index];
+    if ([_captureDevicesInfo count] <= index)
+    {
+      return [NSNumber numberWithInt:-1];
+    }
+
+    QTCaptureDevice* tempCaptureDevice = (QTCaptureDevice*)[_captureDevicesInfo objectAtIndex:index];
     if(!tempCaptureDevice)
     {
-        return [NSNumber numberWithInt:-1];
+      return [NSNumber numberWithInt:-1];
     }
 
     memset(deviceName, 0, deviceNameLength);
     memset(deviceUniqueID, 0, deviceUniqueIDLength);
 
     bool successful = NO;
 
     NSString* tempString = [tempCaptureDevice localizedDisplayName];
@@ -132,17 +136,16 @@ using namespace webrtc;
 
 - (NSNumber*)initializeVariables
 {
     if(NO == _OSSupportedInfo)
     {
         return [NSNumber numberWithInt:0];
     }
 
-    _poolInfo = [[NSAutoreleasePool alloc]init];
     _captureDeviceCountInfo = 0;
     [self getCaptureDevices];
 
     return [NSNumber numberWithInt:0];
 }
 
 // ***** Checks to see if the QTCaptureSession framework is available in the OS
 // ***** If it is not, isOSSupprted = NO
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_objc.mm
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_objc.mm
@@ -146,17 +146,27 @@ using namespace videocapturemodule;
   [_captureSession startRunning];
   _capturing = YES;
 }
 
 - (void)stopCapture {
   if (!_capturing)
     return;
 
-  [_captureSession stopRunning];
+  // This method is often called on a secondary thread.  Which means
+  // that the following can sometimes run "too early", causing crashes
+  // and/or weird errors concerning initialization.  On OS X 10.7 and
+  // 10.8, the CoreMediaIO method CMIOUninitializeGraph() is called from
+  // -[QTCaptureSession stopRunning].  If this is called too early,
+  // low-level session data gets uninitialized before low-level code
+  // is finished trying to use it.  The solution is to make stopRunning
+  // always run on the main thread.  See bug 837539.
+  [_captureSession performSelectorOnMainThread:@selector(stopRunning)
+                   withObject:nil
+                   waitUntilDone:NO];
   _capturing = NO;
 }
 
 #pragma mark Private methods
 
 - (BOOL)initializeVariables {
   if (NSClassFromString(@"QTCaptureSession") == nil)
     return NO;
--- a/media/webrtc/trunk/webrtc/modules/video_capture/video_capture.gypi
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/video_capture.gypi
@@ -11,16 +11,19 @@
     {
       'target_name': 'video_capture_module',
       'type': 'static_library',
       'dependencies': [
         'webrtc_utility',
         '<(webrtc_root)/common_video/common_video.gyp:common_video',
         '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
       ],
+      'cflags_mozilla': [
+        '$(NSPR_CFLAGS)',
+      ],
       'sources': [
         'device_info_impl.cc',
         'device_info_impl.h',
         'include/video_capture.h',
         'include/video_capture_defines.h',
         'include/video_capture_factory.h',
         'video_capture_config.h',
         'video_capture_delay.h',
@@ -31,17 +34,17 @@
       'conditions': [
         ['include_internal_video_capture==0', {
           'sources': [
             'external/device_info_external.cc',
             'external/video_capture_external.cc',
           ],
         }, {  # include_internal_video_capture == 1
           'conditions': [
-            ['OS=="linux"', {
+            ['include_v4l2_video_capture==1', {
               'sources': [
                 'linux/device_info_linux.cc',
                 'linux/device_info_linux.h',
                 'linux/video_capture_linux.cc',
                 'linux/video_capture_linux.h',
               ],
             }],  # linux
             ['OS=="mac"', {
@@ -61,33 +64,41 @@
                 'xcode_settings': {
                   'OTHER_LDFLAGS': [
                     '-framework QTKit',
                   ],
                 },
               },
             }],  # mac
             ['OS=="win"', {
-              'dependencies': [
-                '<(DEPTH)/third_party/winsdk_samples/winsdk_samples.gyp:directshow_baseclasses',
+              'conditions': [
+                ['build_with_mozilla==0', {
+                  'dependencies': [
+                    '<(DEPTH)/third_party/winsdk_samples/winsdk_samples.gyp:directshow_baseclasses',
+                  ],
+                }],
               ],
               'sources': [
                 'windows/device_info_ds.cc',
                 'windows/device_info_ds.h',
                 'windows/device_info_mf.cc',
                 'windows/device_info_mf.h',
                 'windows/help_functions_ds.cc',
                 'windows/help_functions_ds.h',
                 'windows/sink_filter_ds.cc',
                 'windows/sink_filter_ds.h',
                 'windows/video_capture_ds.cc',
                 'windows/video_capture_ds.h',
                 'windows/video_capture_factory_windows.cc',
                 'windows/video_capture_mf.cc',
                 'windows/video_capture_mf.h',
+                'windows/BasePin.cpp',
+                'windows/BaseFilter.cpp',
+                'windows/BaseInputPin.cpp',
+                'windows/MediaType.cpp',
               ],
               'link_settings': {
                 'libraries': [
                   '-lStrmiids.lib',
                 ],
               },
             }],  # win
             ['OS=="android"', {
@@ -141,29 +152,33 @@
             '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
             '<(DEPTH)/testing/gtest.gyp:gtest',
           ],
           'sources': [
             'test/video_capture_unittest.cc',
             'test/video_capture_main_mac.mm',
           ],
           'conditions': [
-            ['OS=="mac" or OS=="linux"', {
+            ['OS!="win" and OS!="android"', {
               'cflags': [
                 '-Wno-write-strings',
               ],
               'ldflags': [
                 '-lpthread -lm',
               ],
             }],
+            ['include_v4l2_video_capture==1', {
+              'libraries': [
+                '-lXext',
+                '-lX11',
+              ],
+            }],
             ['OS=="linux"', {
               'libraries': [
                 '-lrt',
-                '-lXext',
-                '-lX11',
               ],
             }],
             ['OS=="mac"', {
               'dependencies': [
                 # Link with a special main for mac so we can use the webcam.
                 '<(webrtc_root)/test/test.gyp:test_support_main_threaded_mac',
               ],
               'xcode_settings': {
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
@@ -12,17 +12,16 @@
 
 #include "webrtc/modules/video_capture/video_capture_config.h"
 #include "webrtc/modules/video_capture/video_capture_delay.h"
 #include "webrtc/modules/video_capture/windows/help_functions_ds.h"
 #include "webrtc/system_wrappers/interface/ref_count.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 #include <Dvdmedia.h>
-#include <Streams.h>
 
 namespace webrtc
 {
 namespace videocapturemodule
 {
 const int32_t NoWindowsCaptureDelays = 1;
 const DelayValues WindowsCaptureDelays[NoWindowsCaptureDelays] = {
   "Microsoft LifeCam Cinema",
@@ -37,30 +36,47 @@ const DelayValues WindowsCaptureDelays[N
     {160,120,109},
     {1280,720,166},
     {960,544,126},
     {800,448,120},
     {800,600,127}
   },
 };
 
+
+  void _FreeMediaType(AM_MEDIA_TYPE& mt)
+{
+    if (mt.cbFormat != 0)
+    {
+        CoTaskMemFree((PVOID)mt.pbFormat);
+        mt.cbFormat = 0;
+        mt.pbFormat = NULL;
+    }
+    if (mt.pUnk != NULL)
+    {
+        // pUnk should not be used.
+        mt.pUnk->Release();
+        mt.pUnk = NULL;
+    }
+}
+
 // static
 DeviceInfoDS* DeviceInfoDS::Create(const int32_t id)
 {
     DeviceInfoDS* dsInfo = new DeviceInfoDS(id);
     if (!dsInfo || dsInfo->Init() != 0)
     {
         delete dsInfo;
         dsInfo = NULL;
     }
     return dsInfo;
 }
 
 DeviceInfoDS::DeviceInfoDS(const int32_t id)
-    : DeviceInfoImpl(id), _dsDevEnum(NULL), _dsMonikerDevEnum(NULL),
+    : DeviceInfoImpl(id), _dsDevEnum(NULL),
       _CoUninitializeIsRequired(true)
 {
     // 1) Initialize the COM library (make Windows load the DLLs).
     //
     // CoInitializeEx must be called at least once, and is usually called only once,
     // for each thread that uses the COM library. Multiple calls to CoInitializeEx
     // by the same thread are allowed as long as they pass the same concurrency flag,
     // but subsequent valid calls return S_FALSE.
@@ -95,17 +111,16 @@ DeviceInfoDS::DeviceInfoDS(const int32_t
                          "RPC_E_CHANGED_MODE, error 0x%x",
                          hr);
         }
     }
 }
 
 DeviceInfoDS::~DeviceInfoDS()
 {
-    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
     RELEASE_AND_CLEAR(_dsDevEnum);
     if (_CoUninitializeIsRequired)
     {
         CoUninitialize();
     }
 }
 
 int32_t DeviceInfoDS::Init()
@@ -152,25 +167,26 @@ int32_t DeviceInfoDS::GetDeviceInfo(
                                        char* deviceUniqueIdUTF8,
                                        uint32_t deviceUniqueIdUTF8Length,
                                        char* productUniqueIdUTF8,
                                        uint32_t productUniqueIdUTF8Length)
 
 {
 
     // enumerate all video capture devices
-    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
+    IEnumMoniker* _dsMonikerDevEnum = NULL;
     HRESULT hr =
         _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
                                           &_dsMonikerDevEnum, 0);
     if (hr != NOERROR)
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                      "Failed to enumerate CLSID_SystemDeviceEnum, error 0x%x."
                      " No webcam exist?", hr);
+        RELEASE_AND_CLEAR(_dsMonikerDevEnum);
         return 0;
     }
 
     _dsMonikerDevEnum->Reset();
     ULONG cFetched;
     IMoniker *pM;
     int index = 0;
     while (S_OK == _dsMonikerDevEnum->Next(1, &pM, &cFetched))
@@ -206,16 +222,17 @@ int32_t DeviceInfoDS::GetDeviceInfo(
                                                              deviceNameLength, NULL,
                                                              NULL);
                             if (convResult == 0)
                             {
                                 WEBRTC_TRACE(webrtc::kTraceError,
                                              webrtc::kTraceVideoCapture, _id,
                                              "Failed to convert device name to UTF8. %d",
                                              GetLastError());
+                                RELEASE_AND_CLEAR(_dsMonikerDevEnum);
                                 return -1;
                             }
                         }
                         if (deviceUniqueIdUTF8Length > 0)
                         {
                             hr = pBag->Read(L"DevicePath", &varName, 0);
                             if (FAILED(hr))
                             {
@@ -237,16 +254,17 @@ int32_t DeviceInfoDS::GetDeviceInfo(
                                                           deviceUniqueIdUTF8Length,
                                                           NULL, NULL);
                                 if (convResult == 0)
                                 {
                                     WEBRTC_TRACE(webrtc::kTraceError,
                                                  webrtc::kTraceVideoCapture, _id,
                                                  "Failed to convert device name to UTF8. %d",
                                                  GetLastError());
+                                    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
                                     return -1;
                                 }
                                 if (productUniqueIdUTF8
                                     && productUniqueIdUTF8Length > 0)
                                 {
                                     GetProductId(deviceUniqueIdUTF8,
                                                  productUniqueIdUTF8,
                                                  productUniqueIdUTF8Length);
@@ -264,16 +282,17 @@ int32_t DeviceInfoDS::GetDeviceInfo(
         }
 
     }
     if (deviceNameLength)
     {
         WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, _id, "%s %s",
                      __FUNCTION__, deviceNameUTF8);
     }
+    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
     return index;
 }
 
 IBaseFilter * DeviceInfoDS::GetDeviceFilter(
                                      const char* deviceUniqueIdUTF8,
                                      char* productUniqueIdUTF8,
                                      uint32_t productUniqueIdUTF8Length)
 {
@@ -282,25 +301,26 @@ IBaseFilter * DeviceInfoDS::GetDeviceFil
         (int32_t) strlen((char*) deviceUniqueIdUTF8); // UTF8 is also NULL terminated
     if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength)
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                      "Device name too long");
         return NULL;
     }
 
+    IEnumMoniker* _dsMonikerDevEnum = NULL;
     // enumerate all video capture devices
-    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
     HRESULT hr = _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
                                                    &_dsMonikerDevEnum, 0);
     if (hr != NOERROR)
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                      "Failed to enumerate CLSID_SystemDeviceEnum, error 0x%x."
                      " No webcam exist?", hr);
+        RELEASE_AND_CLEAR(_dsMonikerDevEnum);
         return 0;
     }
     _dsMonikerDevEnum->Reset();
     ULONG cFetched;
     IMoniker *pM;
 
     IBaseFilter *captureFilter = NULL;
     bool deviceFound = false;
@@ -358,16 +378,17 @@ IBaseFilter * DeviceInfoDS::GetDeviceFil
                     }
                 }
             }
             VariantClear(&varName);
             pBag->Release();
             pM->Release();
         }
     }
+    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
     return captureFilter;
 }
 
 int32_t DeviceInfoDS::GetWindowsCapability(
     const int32_t capabilityIndex,
     VideoCaptureCapabilityWindows& windowsCapability) {
   ReadLockScoped cs(_apiLock);
 
@@ -550,17 +571,17 @@ int32_t DeviceInfoDS::CreateCapabilityMa
                 capability.interlaced = h->dwInterlaceFlags
                                         & (AMINTERLACE_IsInterlaced
                                            | AMINTERLACE_DisplayModeBobOnly);
                 avgTimePerFrame = h->AvgTimePerFrame;
             }
 
             if (hrVC == S_OK)
             {
-                LONGLONG *frameDurationList;
+                LONGLONG *frameDurationList = NULL;
                 LONGLONG maxFPS;
                 long listSize;
                 SIZE size;
                 size.cx = capability.width;
                 size.cy = capability.height;
 
                 // GetMaxAvailableFrameRate doesn't return max frame rate always
                 // eg: Logitech Notebook. This may be due to a bug in that API
@@ -569,17 +590,19 @@ int32_t DeviceInfoDS::CreateCapabilityMa
                 // the max fps.
                 hrVC = videoControlConfig->GetFrameRateList(outputCapturePin,
                                                             tmp, size,
                                                             &listSize,
                                                             &frameDurationList);
 
                 // On some odd cameras, you may get a 0 for duration.
                 // GetMaxOfFrameArray returns the lowest duration (highest FPS)
-                if (hrVC == S_OK && listSize > 0 &&
+                // Initialize and check the returned list for null since
+                // some broken drivers don't modify it.
+                if (hrVC == S_OK && listSize > 0 && frameDurationList &&
                     0 != (maxFPS = GetMaxOfFrameArray(frameDurationList,
                                                       listSize)))
                 {
                     capability.maxFPS = static_cast<int> (10000000
                                                            / maxFPS);
                     capability.supportFrameRateControl = true;
                 }
                 else // use existing method
@@ -664,17 +687,17 @@ int32_t DeviceInfoDS::CreateCapabilityMa
                                                       capability.height);
             _captureCapabilities.push_back(capability);
             _captureCapabilitiesWindows.push_back(capability);
             WEBRTC_TRACE( webrtc::kTraceInfo, webrtc::kTraceVideoCapture, _id,
                          "Camera capability, width:%d height:%d type:%d fps:%d",
                          capability.width, capability.height,
                          capability.rawType, capability.maxFPS);
         }
-        DeleteMediaType(pmt);
+        _FreeMediaType(*pmt);
         pmt = NULL;
     }
     RELEASE_AND_CLEAR(streamConfig);
     RELEASE_AND_CLEAR(videoControlConfig);
     RELEASE_AND_CLEAR(outputCapturePin);
     RELEASE_AND_CLEAR(captureDevice); // Release the capture device
 
     // Store the new used device name
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.h
@@ -92,15 +92,14 @@ protected:
                           char* productUniqueIdUTF8,
                           uint32_t productUniqueIdUTF8Length);
 
     virtual int32_t
         CreateCapabilityMap(const char* deviceUniqueIdUTF8);
 
 private:
     ICreateDevEnum* _dsDevEnum;
-    IEnumMoniker* _dsMonikerDevEnum;
     bool _CoUninitializeIsRequired;
     std::vector<VideoCaptureCapabilityWindows> _captureCapabilitiesWindows;
 };
 }  // namespace videocapturemodule
 }  // namespace webrtc
 #endif // WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_DEVICE_INFO_DS_H_
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
@@ -16,49 +16,52 @@
 #include <Dvdmedia.h> // VIDEOINFOHEADER2
 #include <initguid.h>
 
 #define DELETE_RESET(p) { delete (p) ; (p) = NULL ;}
 
 DEFINE_GUID(CLSID_SINKFILTER, 0x88cdbbdc, 0xa73b, 0x4afa, 0xac, 0xbf, 0x15, 0xd5,
             0xe2, 0xce, 0x12, 0xc3);
 
+using namespace mozilla::media;
+using namespace mozilla;
+
 namespace webrtc
 {
 namespace videocapturemodule
 {
 
 typedef struct tagTHREADNAME_INFO
 {
    DWORD dwType;        // must be 0x1000
    LPCSTR szName;       // pointer to name (in user addr space)
    DWORD dwThreadID;    // thread ID (-1=caller thread)
    DWORD dwFlags;       // reserved for future use, must be zero
 } THREADNAME_INFO;
 
 CaptureInputPin::CaptureInputPin (int32_t moduleId,
                             IN TCHAR * szName,
                             IN CaptureSinkFilter* pFilter,
-                            IN CCritSec * pLock,
+                            IN CriticalSection * pLock,
                             OUT HRESULT * pHr,
                             IN LPCWSTR pszName)
-    : CBaseInputPin (szName, pFilter, pLock, pHr, pszName),
+    : BaseInputPin (szName, pFilter, pLock, pHr, pszName),
       _requestedCapability(),
       _resultingCapability()
 {
     _moduleId=moduleId;
     _threadHandle = NULL;
 }
 
 CaptureInputPin::~CaptureInputPin()
 {
 }
 
 HRESULT
-CaptureInputPin::GetMediaType (IN int iPosition, OUT CMediaType * pmt)
+CaptureInputPin::GetMediaType (IN int iPosition, OUT MediaType * pmt)
 {
     // reset the thread handle
     _threadHandle = NULL;
 
     if(iPosition < 0)
     return E_INVALIDARG;
 
     VIDEOINFOHEADER* pvi = (VIDEOINFOHEADER*) pmt->AllocFormatBuffer(
@@ -156,17 +159,17 @@ CaptureInputPin::GetMediaType (IN int iP
     WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCapture, _moduleId,
              "GetMediaType position %d, width %d, height %d, biCompression 0x%x",
              iPosition, _requestedCapability.width,
              _requestedCapability.height,pvi->bmiHeader.biCompression);
     return NOERROR;
 }
 
 HRESULT
-CaptureInputPin::CheckMediaType ( IN const CMediaType * pMediaType)
+CaptureInputPin::CheckMediaType ( IN const MediaType * pMediaType)
 {
     // reset the thread handle
     _threadHandle = NULL;
 
     const GUID *type = pMediaType->Type();
     if (*type != MEDIATYPE_Video)
     return E_INVALIDARG;
 
@@ -314,18 +317,18 @@ CaptureInputPin::CheckMediaType ( IN con
     return E_INVALIDARG;
 }
 
 HRESULT
 CaptureInputPin::Receive ( IN IMediaSample * pIMediaSample )
 {
     HRESULT hr = S_OK;
 
-    ASSERT (m_pFilter);
-    ASSERT (pIMediaSample);
+    assert (mFilter);
+    assert (pIMediaSample);
 
     // get the thread handle of the delivering thread inc its priority
     if( _threadHandle == NULL)
     {
         HANDLE handle= GetCurrentThread();
         SetThreadPriority(handle, THREAD_PRIORITY_HIGHEST);
         _threadHandle = handle;
         // See http://msdn.microsoft.com/en-us/library/xcb2z8hs(VS.71).aspx for details on the code
@@ -343,37 +346,37 @@ CaptureInputPin::Receive ( IN IMediaSamp
                             (DWORD_PTR*)&info );
         }
         __except (EXCEPTION_CONTINUE_EXECUTION)
         {
         }
 
     }
 
-    reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->LockReceive();
-    hr = CBaseInputPin::Receive (pIMediaSample);
+    reinterpret_cast <CaptureSinkFilter *>(mFilter)->LockReceive();
+    hr = BaseInputPin::Receive (pIMediaSample);
 
     if (SUCCEEDED (hr))
     {
         const int32_t length = pIMediaSample->GetActualDataLength();
 
         unsigned char* pBuffer = NULL;
         if(S_OK != pIMediaSample->GetPointer(&pBuffer))
         {
-            reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->UnlockReceive();
+            reinterpret_cast <CaptureSinkFilter *>(mFilter)->UnlockReceive();
             return S_FALSE;
         }
 
         // NOTE: filter unlocked within Send call
-        reinterpret_cast <CaptureSinkFilter *> (m_pFilter)->ProcessCapturedFrame(
+        reinterpret_cast <CaptureSinkFilter *> (mFilter)->ProcessCapturedFrame(
                                         pBuffer,length,_resultingCapability);
     }
     else
     {
-        reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->UnlockReceive();
+        reinterpret_cast <CaptureSinkFilter *>(mFilter)->UnlockReceive();
     }
 
     return hr;
 }
 
 // called under LockReceive
 HRESULT CaptureInputPin::SetMatchingMediaType(
                                     const VideoCaptureCapability& capability)
@@ -384,23 +387,25 @@ HRESULT CaptureInputPin::SetMatchingMedi
     return S_OK;
 }
 //  ----------------------------------------------------------------------------
 CaptureSinkFilter::CaptureSinkFilter (IN TCHAR * tszName,
                               IN LPUNKNOWN punk,
                               OUT HRESULT * phr,
                               VideoCaptureExternal& captureObserver,
                               int32_t moduleId)
-    : CBaseFilter(tszName,punk,& m_crtFilter,CLSID_SINKFILTER),
+    : BaseFilter(tszName, CLSID_SINKFILTER),
+      m_crtFilter("CaptureSinkFilter::m_crtFilter"),
+      m_crtRecv("CaptureSinkFilter::m_crtRecv"),
       m_pInput(NULL),
       _captureObserver(captureObserver),
       _moduleId(moduleId)
 {
     (* phr) = S_OK;
-    m_pInput = new CaptureInputPin(moduleId,NAME ("VideoCaptureInputPin"),
+    m_pInput = new CaptureInputPin(moduleId, L"VideoCaptureInputPin",
                                    this,
                                    & m_crtFilter,
                                    phr, L"VideoCapture");
     if (m_pInput == NULL || FAILED (* phr))
     {
         (* phr) = FAILED (* phr) ? (* phr) : E_OUTOFMEMORY;
         goto cleanup;
     }
@@ -413,87 +418,87 @@ CaptureSinkFilter::~CaptureSinkFilter()
     delete m_pInput;
 }
 
 int CaptureSinkFilter::GetPinCount()
 {
     return 1;
 }
 
-CBasePin *
+BasePin *
 CaptureSinkFilter::GetPin(IN int Index)
 {
-    CBasePin * pPin;
+    BasePin * pPin;
     LockFilter ();
     if (Index == 0)
     {
         pPin = m_pInput;
     }
     else
     {
         pPin = NULL;
     }
     UnlockFilter ();
     return pPin;
 }
 
 STDMETHODIMP CaptureSinkFilter::Pause()
 {
     LockFilter();
-    if (m_State == State_Stopped)
+    if (mState == State_Stopped)
     {
         //  change the state, THEN activate the input pin
-        m_State = State_Paused;
+        mState = State_Paused;
         if (m_pInput && m_pInput->IsConnected())
         {
             m_pInput->Active();
         }
         if (m_pInput && !m_pInput->IsConnected())
         {
-            m_State = State_Running;
+            mState = State_Running;
         }
     }
-    else if (m_State == State_Running)
+    else if (mState == State_Running)
     {
-        m_State = State_Paused;
+        mState = State_Paused;
     }
     UnlockFilter();
     return S_OK;
 }
 
 STDMETHODIMP CaptureSinkFilter::Stop()
 {
     LockReceive();
     LockFilter();
 
     //  set the state
-    m_State = State_Stopped;
+    mState = State_Stopped;
 
     //  inactivate the pins
     if (m_pInput)
         m_pInput->Inactive();
 
     UnlockFilter();
     UnlockReceive();
     return S_OK;
 }
 
 void CaptureSinkFilter::SetFilterGraph(IGraphBuilder* graph)
 {
     LockFilter();
-    m_pGraph = graph;
+    mGraph = graph;
     UnlockFilter();
 }
 
 void CaptureSinkFilter::ProcessCapturedFrame(unsigned char* pBuffer,
                                          int32_t length,
                                          const VideoCaptureCapability& frameInfo)
 {
     //  we have the receiver lock
-    if (m_State == State_Running)
+    if (mState == State_Running)
     {
         _captureObserver.IncomingFrame(pBuffer, length, frameInfo);
 
         // trying to hold it since it's only a memcpy
         // IMPROVEMENT if this work move critsect
         UnlockReceive();
         return;
     }
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
@@ -6,95 +6,117 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
 #define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
 
-#include <Streams.h> // Include base DS filter header files
-
 #include "webrtc/modules/video_capture/include/video_capture_defines.h"
+#include "BaseInputPin.h"
+#include "BaseFilter.h"
+#include "MediaType.h"
 
 namespace webrtc
 {
 namespace videocapturemodule
 {
 //forward declaration
 
 class CaptureSinkFilter;
 /**
  *	input pin for camera input
  *
  */
-class CaptureInputPin: public CBaseInputPin
+class CaptureInputPin: public mozilla::media::BaseInputPin
 {
 public:
     int32_t _moduleId;
 
     VideoCaptureCapability _requestedCapability;
     VideoCaptureCapability _resultingCapability;
     HANDLE _threadHandle;
 
     CaptureInputPin(int32_t moduleId,
                     IN TCHAR* szName,
                     IN CaptureSinkFilter* pFilter,
-                    IN CCritSec * pLock,
+                    IN mozilla::CriticalSection * pLock,
                     OUT HRESULT * pHr,
                     IN LPCWSTR pszName);
     virtual ~CaptureInputPin();
 
-    HRESULT GetMediaType (IN int iPos, OUT CMediaType * pmt);
-    HRESULT CheckMediaType (IN const CMediaType * pmt);
+    HRESULT GetMediaType (IN int iPos, OUT mozilla::media::MediaType * pmt);
+    HRESULT CheckMediaType (IN const mozilla::media::MediaType * pmt);
     STDMETHODIMP Receive (IN IMediaSample *);
     HRESULT SetMatchingMediaType(const VideoCaptureCapability& capability);
 };
 
-class CaptureSinkFilter: public CBaseFilter
+class CaptureSinkFilter: public mozilla::media::BaseFilter
 {
 
 public:
     CaptureSinkFilter(IN TCHAR * tszName,
                       IN LPUNKNOWN punk,
                       OUT HRESULT * phr,
                       VideoCaptureExternal& captureObserver,
                       int32_t moduleId);
     virtual ~CaptureSinkFilter();
 
     //  --------------------------------------------------------------------
     //  class methods
 
     void ProcessCapturedFrame(unsigned char* pBuffer, int32_t length,
                               const VideoCaptureCapability& frameInfo);
     //  explicit receiver lock aquisition and release
-    void LockReceive()  { m_crtRecv.Lock();}
-    void UnlockReceive() {m_crtRecv.Unlock();}
+    void LockReceive()  { m_crtRecv.Enter();}
+    void UnlockReceive() {m_crtRecv.Leave();}
+
     //  explicit filter lock aquisition and release
-    void LockFilter() {m_crtFilter.Lock();}
-    void UnlockFilter() { m_crtFilter.Unlock(); }
+    void LockFilter() {m_crtFilter.Enter();}
+    void UnlockFilter() { m_crtFilter.Leave(); }
     void SetFilterGraph(IGraphBuilder* graph); // Used if EVR
 
     //  --------------------------------------------------------------------
     //  COM interfaces
-DECLARE_IUNKNOWN    ;
+    STDMETHODIMP QueryInterface(REFIID aIId, void **aInterface)
+    {
+      return mozilla::media::BaseFilter::QueryInterface(aIId, aInterface);
+    }
+    STDMETHODIMP_(ULONG) AddRef()
+    {
+      return ::InterlockedIncrement(&mRefCnt);
+    }
+
+    STDMETHODIMP_(ULONG) Release()
+    {
+      unsigned long newRefCnt = ::InterlockedDecrement(&mRefCnt);
+
+      if (!newRefCnt) {
+        delete this;
+      }
+
+      return newRefCnt;
+    }
+
     STDMETHODIMP SetMatchingMediaType(const VideoCaptureCapability& capability);
 
     //  --------------------------------------------------------------------
     //  CBaseFilter methods
     int GetPinCount ();
-    CBasePin * GetPin ( IN int Index);
+    mozilla::media::BasePin * GetPin ( IN int Index);
     STDMETHODIMP Pause ();
     STDMETHODIMP Stop ();
     STDMETHODIMP GetClassID ( OUT CLSID * pCLSID);
     //  --------------------------------------------------------------------
     //  class factory calls this
-    static CUnknown * CreateInstance (IN LPUNKNOWN punk, OUT HRESULT * phr);
+    static IUnknown * CreateInstance (IN LPUNKNOWN punk, OUT HRESULT * phr);
 private:
-    CCritSec m_crtFilter; //  filter lock
-    CCritSec m_crtRecv;  //  receiver lock; always acquire before filter lock
+    mozilla::CriticalSection m_crtFilter; //  filter lock
+    mozilla::CriticalSection m_crtRecv;  //  receiver lock; always acquire before filter lock
     CaptureInputPin * m_pInput;
     VideoCaptureExternal& _captureObserver;
     int32_t _moduleId;
+    unsigned long mRefCnt;
 };
 }  // namespace videocapturemodule
 }  // namespace webrtc
 #endif // WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/h264/include/h264.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+
+namespace webrtc {
+
+class H264Encoder : public VideoEncoder {
+ public:
+  static H264Encoder* Create();
+
+  virtual ~H264Encoder() {}
+};  // H264Encoder
+
+class H264Decoder : public VideoDecoder {
+ public:
+  static H264Decoder* Create();
+
+  virtual ~H264Decoder() {}
+};  // H264Decoder
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
@@ -42,20 +42,26 @@ struct CodecSpecificInfoVP8
     int              tl0PicIdx;         // Negative value to skip tl0PicIdx
     int8_t     keyIdx;            // negative value to skip keyIdx
 };
 
 struct CodecSpecificInfoGeneric {
   uint8_t simulcast_idx;
 };
 
-union CodecSpecificInfoUnion
-{
+struct CodecSpecificInfoH264 {
+  uint8_t nalu_header;
+  bool    single_nalu;
+  uint8_t simulcastIdx;
+};
+
+union CodecSpecificInfoUnion {
     CodecSpecificInfoGeneric   generic;
     CodecSpecificInfoVP8       VP8;
+    CodecSpecificInfoH264      H264;
 };
 
 // Note: if any pointers are added to this struct or its sub-structs, it
 // must be fitted with a copy-constructor. This is because it is copied
 // in the copy-constructor of VCMEncodedFrame.
 struct CodecSpecificInfo
 {
     VideoCodecType   codecType;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
@@ -19,16 +19,22 @@
         '<(webrtc_root)/modules/video_coding/utility/video_coding_utility.gyp:video_coding_utility',
         '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
       ],
       'conditions': [
         ['build_libvpx==1', {
           'dependencies': [
             '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
           ],
+        },{
+          'link_settings': {
+            'libraries': [
+              '$(LIBVPX_OBJ)/libvpx.a',
+            ],
+          },
         }],
       ],
       'sources': [
         'reference_picture_selection.h',
         'reference_picture_selection.cc',
         'include/vp8.h',
         'include/vp8_common_types.h',
         'vp8_impl.cc',
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -166,17 +166,19 @@ int VP8EncoderImpl::InitEncode(const Vid
                          .Create(num_temporal_layers, rand());
   // random start 16 bits is enough.
   picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF;
 
   // allocate memory for encoded image
   if (encoded_image_._buffer != NULL) {
     delete [] encoded_image_._buffer;
   }
-  encoded_image_._size = CalcBufferSize(kI420, codec_.width, codec_.height);
+  // Reserve 100 extra bytes for overhead at small resolutions.
+  encoded_image_._size = CalcBufferSize(kI420, codec_.width, codec_.height)
+                         + 100;
   encoded_image_._buffer = new uint8_t[encoded_image_._size];
   encoded_image_._completeFrame = true;
 
   // Creating a wrapper to the image - setting image data to NULL. Actual
   // pointer will be set in encode. Setting align to 1, as it is meaningless
   // (actual memory is not allocated).
   raw_ = vpx_img_wrap(NULL, IMG_FMT_I420, codec_.width, codec_.height,
                       1, NULL);
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/interface/video_coding.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/interface/video_coding.h
@@ -582,16 +582,19 @@ public:
     virtual void SetNackSettings(size_t max_nack_list_size,
                                  int max_packet_age_to_nack,
                                  int max_incomplete_time_ms) = 0;
 
     // Setting a desired delay to the VCM receiver. Video rendering will be
     // delayed by at least desired_delay_ms.
     virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
 
+    // Set current load state of the CPU
+    virtual void SetCPULoadState(CPULoadState state) = 0;
+
     // Enables recording of debugging information.
     virtual int StartDebugRecording(const char* file_name_utf8) = 0;
 
     // Disables recording of debugging information.
     virtual int StopDebugRecording() = 0;
 
     // Lets the sender suspend video when the rate drops below
     // |threshold_bps|, and turns back on when the rate goes back up above
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/codec_database.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/codec_database.cc
@@ -338,16 +338,18 @@ bool VCMCodecDataBase::RequiresEncoderRe
       }
       break;
     case kVideoCodecGeneric:
       break;
     // Known codecs without payload-specifics
     case kVideoCodecI420:
     case kVideoCodecRED:
     case kVideoCodecULPFEC:
+    case kVideoCodecH264:
+      // TODO(jesup): analyze codec config for H264
       break;
     // Unknown codec type, reset just to be sure.
     case kVideoCodecUnknown:
       return true;
   }
 
   if (new_send_codec.numberOfSimulcastStreams > 0) {
     for (unsigned char i = 0; i < new_send_codec.numberOfSimulcastStreams;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/decoding_state.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/decoding_state.cc
@@ -46,24 +46,24 @@ uint32_t VCMDecodingState::time_stamp() 
 uint16_t VCMDecodingState::sequence_num() const {
   return sequence_num_;
 }
 
 bool VCMDecodingState::IsOldFrame(const VCMFrameBuffer* frame) const {
   assert(frame != NULL);
   if (in_initial_state_)
     return false;
-  return !IsNewerTimestamp(frame->TimeStamp(), time_stamp_);
+  return !IsNewerOrSameTimestamp(frame->TimeStamp(), time_stamp_);
 }
 
 bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
   assert(packet != NULL);
   if (in_initial_state_)
     return false;
-  return !IsNewerTimestamp(packet->timestamp, time_stamp_);
+  return !IsNewerOrSameTimestamp(packet->timestamp, time_stamp_);
 }
 
 void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
   assert(frame != NULL && frame->GetHighSeqNum() >= 0);
   UpdateSyncState(frame);
   sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
   time_stamp_ = frame->TimeStamp();
   picture_id_ = frame->PictureId();
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/encoded_frame.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/encoded_frame.cc
@@ -95,59 +95,59 @@ void VCMEncodedFrame::Reset()
     _missingFrame = false;
     _length = 0;
     _codecSpecificInfo.codecType = kVideoCodecUnknown;
     _codec = kVideoCodecUnknown;
 }
 
 void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
 {
-    if (header)
-    {
-        switch (header->codec)
-        {
-            case kRtpVideoVp8:
-            {
-                if (_codecSpecificInfo.codecType != kVideoCodecVP8)
-                {
-                    // This is the first packet for this frame.
-                    _codecSpecificInfo.codecSpecific.VP8.pictureId = -1;
-                    _codecSpecificInfo.codecSpecific.VP8.temporalIdx = 0;
-                    _codecSpecificInfo.codecSpecific.VP8.layerSync = false;
-                    _codecSpecificInfo.codecSpecific.VP8.keyIdx = -1;
-                    _codecSpecificInfo.codecType = kVideoCodecVP8;
-                }
-                _codecSpecificInfo.codecSpecific.VP8.nonReference =
-                    header->codecHeader.VP8.nonReference;
-                if (header->codecHeader.VP8.pictureId != kNoPictureId)
-                {
-                    _codecSpecificInfo.codecSpecific.VP8.pictureId =
-                        header->codecHeader.VP8.pictureId;
-                }
-                if (header->codecHeader.VP8.temporalIdx != kNoTemporalIdx)
-                {
-                    _codecSpecificInfo.codecSpecific.VP8.temporalIdx =
-                        header->codecHeader.VP8.temporalIdx;
-                    _codecSpecificInfo.codecSpecific.VP8.layerSync =
-                        header->codecHeader.VP8.layerSync;
-                }
-                if (header->codecHeader.VP8.keyIdx != kNoKeyIdx)
-                {
-                    _codecSpecificInfo.codecSpecific.VP8.keyIdx =
-                        header->codecHeader.VP8.keyIdx;
-                }
-                break;
-            }
-            default:
-            {
-                _codecSpecificInfo.codecType = kVideoCodecUnknown;
-                break;
-            }
-        }
+    if (header) {
+      switch (header->codec) {
+        case kRtpVideoVp8: {
+          if (_codecSpecificInfo.codecType != kVideoCodecVP8) {
+            // This is the first packet for this frame.
+            _codecSpecificInfo.codecSpecific.VP8.pictureId = -1;
+            _codecSpecificInfo.codecSpecific.VP8.temporalIdx = 0;
+            _codecSpecificInfo.codecSpecific.VP8.layerSync = false;
+            _codecSpecificInfo.codecSpecific.VP8.keyIdx = -1;
+            _codecSpecificInfo.codecType = kVideoCodecVP8;
+          }
+          _codecSpecificInfo.codecSpecific.VP8.nonReference =
+              header->codecHeader.VP8.nonReference;
+          if (header->codecHeader.VP8.pictureId != kNoPictureId) {
+            _codecSpecificInfo.codecSpecific.VP8.pictureId =
+                header->codecHeader.VP8.pictureId;
+          }
+          if (header->codecHeader.VP8.temporalIdx != kNoTemporalIdx) {
+            _codecSpecificInfo.codecSpecific.VP8.temporalIdx =
+                header->codecHeader.VP8.temporalIdx;
+            _codecSpecificInfo.codecSpecific.VP8.layerSync =
+                header->codecHeader.VP8.layerSync;
+          }
+          if (header->codecHeader.VP8.keyIdx != kNoKeyIdx) {
+            _codecSpecificInfo.codecSpecific.VP8.keyIdx =
+                header->codecHeader.VP8.keyIdx;
+          }
+          break;
+      }
+      case kRtpVideoH264: {
+        _codecSpecificInfo.codecSpecific.H264.nalu_header =
+            header->codecHeader.H264.nalu_header;
+        _codecSpecificInfo.codecSpecific.H264.single_nalu =
+            header->codecHeader.H264.single_nalu;
+        _codecSpecificInfo.codecType = kVideoCodecH264;
+        break;
+      }
+      default: {
+        _codecSpecificInfo.codecType = kVideoCodecUnknown;
+        break;
+      }
     }
+  }
 }
 
 const RTPFragmentationHeader* VCMEncodedFrame::FragmentationHeader() const {
   return &_fragmentation;
 }
 
 int32_t
 VCMEncodedFrame::VerifyAndAllocate(const uint32_t minimumSize)
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/generic_encoder.cc
@@ -32,16 +32,20 @@ void CopyCodecSpecific(const CodecSpecif
           info->codecSpecific.VP8.nonReference;
       (*rtp)->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
       (*rtp)->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
       (*rtp)->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
       (*rtp)->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
       (*rtp)->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
       return;
     }
+    case kVideoCodecH264:
+      (*rtp)->codec = kRtpVideoH264;
+      (*rtp)->simulcastIdx = info->codecSpecific.H264.simulcastIdx;
+      return;
     case kVideoCodecGeneric:
       (*rtp)->codec = kRtpVideoGeneric;
       (*rtp)->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
       return;
     default:
       // No codec specific info. Change RTP header pointer to NULL.
       *rtp = NULL;
       return;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc
@@ -42,18 +42,55 @@ bool IsKeyFrame(FrameListPair pair) {
 bool HasNonEmptyState(FrameListPair pair) {
   return pair.second->GetState() != kStateEmpty;
 }
 
 void FrameList::InsertFrame(VCMFrameBuffer* frame) {
   insert(rbegin().base(), FrameListPair(frame->TimeStamp(), frame));
 }
 
-VCMFrameBuffer* FrameList::FindFrame(uint32_t timestamp) const {
+// Find a Frame which (may) include seq_num
+// Note: if we don't have an end for the frame yet AND there are multiple Frames
+// with the same timestamp being input, in theory you can get packets
+// for a later Frame mixed with an earlier one where there's a reordering.
+// e.g. for <frame 1: 1 2 3> <frame 2: 4 5 6> and we receive
+//          1 2 4 3 5 6
+// or       4 1 2 3 5 6
+// we'll return <frame 1> for packet 4, and at some point it needs to move to
+// <frame 2>.  You can't key off isFirstPacket or kNaluStart because the OOO packet
+// may be 5.
+
+// This can be done by re-characterizing 4 when <frame 1> becomes complete
+// and we find it doesn't include 4.  Perhaps a better abstraction would be
+// to keep the packets in a single sorted list (per timestamp or not,
+// doesn't really matter), and then on insertion look to see if it's in a
+// complete unit (kNaluComplete or kNaluStart ... kNaluEnd sequence), and
+// remove the set *then*.
+//
+// If we instead limit multiple frames with the same timestamp to
+// kNaluComplete (single-packet) frames, it's simpler.  You do need to be
+// careful to pull off Frames only if they're contiguous in sequence number
+// to the previous frame, but that's normal since you can get 4 5 6 1 2 3
+// Note that you have to be careful reordering still:
+// <frame 1: 1> <frame 2: 2 3 4>
+// and arrival 2 1 3 4
+// means you must not match the frame created for 2 when 1 comes in
+
+VCMFrameBuffer* FrameList::FindFrame(uint16_t seq_num, uint32_t timestamp) const {
   FrameList::const_iterator it = find(timestamp);
+  // TODO(jesup): use seq_num to do the fancier version above, or
+  // rearchitect per above to keep a single list and pull out Frames as they
+  // become complete (or decodable).
+
+  // Simple version: Skip already-complete frames
+  // Note: higher level must deal with the 2 1 3 4 case above by not calling
+  // this for single-nal packets
+  while (it != end() && it->second->GetState() == kStateComplete) {
+    it++;
+  }
   if (it == end())
     return NULL;
   return it->second;
 }
 
 VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
   FrameList::iterator it = find(timestamp);
   if (it == end())
@@ -585,22 +622,31 @@ VCMFrameBufferEnum VCMJitterBuffer::GetF
     if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) {
       Flush();
       return kFlushIndicator;
     }
     return kOldPacket;
   }
   num_consecutive_old_packets_ = 0;
 
-  *frame = incomplete_frames_.FindFrame(packet.timestamp);
-  if (*frame)
-    return kNoError;
-  *frame = decodable_frames_.FindFrame(packet.timestamp);
-  if (*frame)
-    return kNoError;
+  // Handle the 2 1 3 4 case (where 2 3 4 are frame 2 with the timestamp)
+  // from above, for complete nalu's (single-nalus) only.
+
+  // TODO(jesup) To handle a sequence of fragmented nalus which all are
+  // slices of the same lower-case frame (timestamp), the more complete
+  // solution for FindFrame that uses the seqNum and can move packets
+  // between sessions would be needed.
+  if (packet.completeNALU != kNaluComplete) {
+    *frame = incomplete_frames_.FindFrame(packet.seqNum, packet.timestamp);
+    if (*frame)
+      return kNoError;
+    *frame = decodable_frames_.FindFrame(packet.seqNum, packet.timestamp);
+    if (*frame && (*frame)->GetState() != kStateComplete)
+      return kNoError;
+  }
 
   // No match, return empty frame.
   *frame = GetEmptyFrame();