Bug 901583: Reapply mozilla patches on top of webrtc.org 3.34, use NEON detection rs=jesup
authorRandell Jesup <rjesup@jesup.org>
Fri, 30 Aug 2013 02:08:57 -0400
changeset 145084 df48be62a887166b8eb95907e2cbbc63664b770f
parent 145083 c118114b08f1fc72e2e359402ba4bd212886e8f2
child 145085 581d27f7d81194c6411e4844d5226c984cdf2543
push id25192
push useremorley@mozilla.com
push dateFri, 30 Aug 2013 16:23:44 +0000
treeherderautoland@cfe8b0ab6d59 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup
bugs901583
milestone26.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 901583: Reapply mozilla patches on top of webrtc.org 3.34, use NEON detection rs=jesup
content/media/webrtc/Makefile.in
content/media/webrtc/MediaEngineWebRTC.h
content/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/Makefile.in
media/webrtc/shared_libs.mk
media/webrtc/signaling/signaling.gyp
media/webrtc/signaling/src/common/NullTransport.h
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.h
media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
media/webrtc/signaling/src/media-conduit/VideoConduit.h
media/webrtc/trunk/peerconnection.gyp
media/webrtc/trunk/webrtc/build/arm_neon.gypi
media/webrtc/trunk/webrtc/build/common.gypi
media/webrtc/trunk/webrtc/build/merge_libs.gyp
media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
media/webrtc/trunk/webrtc/common_types.h
media/webrtc/trunk/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
media/webrtc/trunk/webrtc/modules/audio_coding/main/source/audio_coding_module.gypi
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_defines.h
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/packet_buffer.c
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.h
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_opensles_android.cc
media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_opensles_android.h
media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_opensles.cc
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_opensles.h
media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.cc
media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/forward_error_correction.cc
media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
media/webrtc/trunk/webrtc/modules/video_capture/android/java/org/webrtc/videoengine/VideoCaptureAndroid.java
media/webrtc/trunk/webrtc/modules/video_capture/android/java/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.cc
media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.h
media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_objc.mm
media/webrtc/trunk/webrtc/modules/video_capture/mac/video_capture_mac.mm
media/webrtc/trunk/webrtc/modules/video_capture/video_capture.gypi
media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
media/webrtc/trunk/webrtc/modules/video_coding/main/source/timestamp_extrapolator.cc
media/webrtc/trunk/webrtc/modules/video_coding/main/source/timestamp_extrapolator.h
media/webrtc/trunk/webrtc/modules/video_processing/main/source/video_processing.gypi
media/webrtc/trunk/webrtc/system_wrappers/interface/asm_defines.h
media/webrtc/trunk/webrtc/system_wrappers/interface/tick_util.h
media/webrtc/trunk/webrtc/system_wrappers/source/atomic32_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable.cc
media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/cpu_info.cc
media/webrtc/trunk/webrtc/system_wrappers/source/rw_lock.cc
media/webrtc/trunk/webrtc/system_wrappers/source/system_wrappers.gyp
media/webrtc/trunk/webrtc/system_wrappers/source/thread_posix.cc
media/webrtc/trunk/webrtc/system_wrappers/source/trace_impl.cc
media/webrtc/trunk/webrtc/system_wrappers/source/trace_posix.cc
media/webrtc/trunk/webrtc/test/channel_transport/udp_transport_impl.cc
media/webrtc/trunk/webrtc/typedefs.h
media/webrtc/trunk/webrtc/video_engine/stream_synchronization.cc
media/webrtc/trunk/webrtc/video_engine/vie_channel.cc
media/webrtc/trunk/webrtc/video_engine/vie_impl.cc
media/webrtc/trunk/webrtc/video_engine/vie_receiver.cc
media/webrtc/trunk/webrtc/video_engine/vie_receiver.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_base.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_volume_control.h
media/webrtc/trunk/webrtc/voice_engine/output_mixer_unittest.cc
media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.cc
media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.cc
media/webrtc/trunk/webrtc/voice_engine/voice_engine.gyp
media/webrtc/trunk/webrtc/voice_engine/voice_engine_defines.h
media/webrtc/trunk/webrtc/voice_engine/voice_engine_impl.cc
media/webrtc/webrtc_config.gypi
--- a/content/media/webrtc/Makefile.in
+++ b/content/media/webrtc/Makefile.in
@@ -14,15 +14,14 @@ OS_CXXFLAGS += -DNOMINMAX
 endif
 
 include $(topsrcdir)/config/rules.mk
 include $(topsrcdir)/ipc/chromium/chromium-config.mk
 
 ifdef MOZ_WEBRTC
 LOCAL_INCLUDES += \
   -I$(topsrcdir)/media/webrtc/trunk \
-  -I$(topsrcdir)/media/webrtc/trunk/webrtc \
   -I$(topsrcdir)/media/webrtc/signaling/src/common \
   -I$(topsrcdir)/media/webrtc/signaling/src/common/browser_logging \
   -I$(topsrcdir)/dom/base \
   -I$(topsrcdir)/dom/camera \
   $(NULL)
 endif
--- a/content/media/webrtc/MediaEngineWebRTC.h
+++ b/content/media/webrtc/MediaEngineWebRTC.h
@@ -25,31 +25,31 @@
 #include "VideoSegment.h"
 #include "AudioSegment.h"
 #include "StreamBuffer.h"
 #include "MediaStreamGraph.h"
 
 // WebRTC library includes follow
 
 // Audio Engine
-#include "voice_engine/include/voe_base.h"
-#include "voice_engine/include/voe_codec.h"
-#include "voice_engine/include/voe_hardware.h"
-#include "voice_engine/include/voe_network.h"
-#include "voice_engine/include/voe_audio_processing.h"
-#include "voice_engine/include/voe_volume_control.h"
-#include "voice_engine/include/voe_external_media.h"
-#include "voice_engine/include/voe_audio_processing.h"
+#include "webrtc/voice_engine/include/voe_base.h"
+#include "webrtc/voice_engine/include/voe_codec.h"
+#include "webrtc/voice_engine/include/voe_hardware.h"
+#include "webrtc/voice_engine/include/voe_network.h"
+#include "webrtc/voice_engine/include/voe_audio_processing.h"
+#include "webrtc/voice_engine/include/voe_volume_control.h"
+#include "webrtc/voice_engine/include/voe_external_media.h"
+#include "webrtc/voice_engine/include/voe_audio_processing.h"
 
 // Video Engine
-#include "video_engine/include/vie_base.h"
-#include "video_engine/include/vie_codec.h"
-#include "video_engine/include/vie_render.h"
-#include "video_engine/include/vie_capture.h"
-#include "video_engine/include/vie_file.h"
+#include "webrtc/video_engine/include/vie_base.h"
+#include "webrtc/video_engine/include/vie_codec.h"
+#include "webrtc/video_engine/include/vie_render.h"
+#include "webrtc/video_engine/include/vie_capture.h"
+#include "webrtc/video_engine/include/vie_file.h"
 #ifdef MOZ_B2G_CAMERA
 #include "CameraPreviewMediaStream.h"
 #include "DOMCameraManager.h"
 #include "GonkCameraControl.h"
 #include "ImageContainer.h"
 #include "nsGlobalWindow.h"
 #include "prprf.h"
 #endif
@@ -296,19 +296,19 @@ public:
                           StreamTime aDesiredTime,
                           TrackTicks &aLastEndTime);
 
   virtual bool IsFake() {
     return false;
   }
 
   // VoEMediaProcess.
-  void Process(const int channel, const webrtc::ProcessingTypes type,
-               WebRtc_Word16 audio10ms[], const int length,
-               const int samplingFreq, const bool isStereo);
+  void Process(int channel, webrtc::ProcessingTypes type,
+               int16_t audio10ms[], int length,
+               int samplingFreq, bool isStereo);
 
   NS_DECL_THREADSAFE_ISUPPORTS
 
 private:
   static const unsigned int KMaxDeviceNameLength = 128;
   static const unsigned int KMaxUniqueIdLength = 256;
 
   void Init();
--- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -338,22 +338,22 @@ MediaEngineWebRTCAudioSource::Shutdown()
 
   mVoERender->Release();
   mVoEBase->Release();
 
   mState = kReleased;
   mInitDone = false;
 }
 
-typedef WebRtc_Word16 sample;
+typedef int16_t sample;
 
 void
-MediaEngineWebRTCAudioSource::Process(const int channel,
-  const webrtc::ProcessingTypes type, sample* audio10ms,
-  const int length, const int samplingFreq, const bool isStereo)
+MediaEngineWebRTCAudioSource::Process(int channel,
+  webrtc::ProcessingTypes type, sample* audio10ms,
+  int length, int samplingFreq, bool isStereo)
 {
   MonitorAutoLock lock(mMonitor);
   if (mState != kStarted)
     return;
 
   uint32_t len = mSources.Length();
   for (uint32_t i = 0; i < len; i++) {
     nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
--- a/dom/media/Makefile.in
+++ b/dom/media/Makefile.in
@@ -10,15 +10,15 @@ relativesrcdir   = @relativesrcdir@
 
 include $(DEPTH)/config/autoconf.mk
 
 include $(topsrcdir)/dom/dom-config.mk
 
 
 ifdef MOZ_WEBRTC
 LOCAL_INCLUDES += \
-  -I$(topsrcdir)/media/webrtc/trunk/webrtc \
+  -I$(topsrcdir)/media/webrtc/trunk \
   -I$(topsrcdir)/media/webrtc/signaling/src/common \
   $(NULL)
 endif
 
 include $(topsrcdir)/config/rules.mk
 include $(topsrcdir)/ipc/chromium/chromium-config.mk
--- a/media/webrtc/shared_libs.mk
+++ b/media/webrtc/shared_libs.mk
@@ -1,68 +1,67 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # shared libs for webrtc
 WEBRTC_LIBS = \
   $(call EXPAND_LIBNAME_PATH,common_video,$(DEPTH)/media/webrtc/trunk/webrtc/common_video/common_video_common_video) \
+  $(call EXPAND_LIBNAME_PATH,common_audio,$(DEPTH)/media/webrtc/trunk/webrtc/common_audio/common_audio_common_audio) \
   $(call EXPAND_LIBNAME_PATH,video_capture_module,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_video_capture_module) \
   $(call EXPAND_LIBNAME_PATH,webrtc_utility,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_webrtc_utility) \
   $(call EXPAND_LIBNAME_PATH,audio_coding_module,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_audio_coding_module) \
   $(call EXPAND_LIBNAME_PATH,CNG,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_CNG) \
-  $(call EXPAND_LIBNAME_PATH,signal_processing,$(DEPTH)/media/webrtc/trunk/webrtc/common_audio/common_audio_signal_processing) \
   $(call EXPAND_LIBNAME_PATH,G711,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_G711) \
   $(call EXPAND_LIBNAME_PATH,PCM16B,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_PCM16B) \
   $(call EXPAND_LIBNAME_PATH,NetEq,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_NetEq) \
-  $(call EXPAND_LIBNAME_PATH,resampler,$(DEPTH)/media/webrtc/trunk/webrtc/common_audio/common_audio_resampler) \
-  $(call EXPAND_LIBNAME_PATH,vad,$(DEPTH)/media/webrtc/trunk/webrtc/common_audio/common_audio_vad) \
   $(call EXPAND_LIBNAME_PATH,system_wrappers,$(DEPTH)/media/webrtc/trunk/webrtc/system_wrappers/source/system_wrappers_system_wrappers) \
   $(call EXPAND_LIBNAME_PATH,webrtc_video_coding,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_webrtc_video_coding) \
   $(call EXPAND_LIBNAME_PATH,video_coding_utility,$(DEPTH)/media/webrtc/trunk/webrtc/modules/video_coding/utility/video_coding_utility_video_coding_utility) \
   $(call EXPAND_LIBNAME_PATH,webrtc_i420,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_webrtc_i420) \
   $(call EXPAND_LIBNAME_PATH,webrtc_vp8,$(DEPTH)/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8_webrtc_vp8) \
   $(call EXPAND_LIBNAME_PATH,webrtc_opus,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_webrtc_opus) \
   $(call EXPAND_LIBNAME_PATH,video_render_module,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_video_render_module) \
   $(call EXPAND_LIBNAME_PATH,video_engine_core,$(DEPTH)/media/webrtc/trunk/webrtc/video_engine/video_engine_video_engine_core) \
+  $(call EXPAND_LIBNAME_PATH,voice_engine,$(DEPTH)/media/webrtc/trunk/webrtc/voice_engine/voice_engine_voice_engine) \
   $(call EXPAND_LIBNAME_PATH,media_file,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_media_file) \
   $(call EXPAND_LIBNAME_PATH,rtp_rtcp,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_rtp_rtcp) \
   $(call EXPAND_LIBNAME_PATH,bitrate_controller,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_bitrate_controller) \
   $(call EXPAND_LIBNAME_PATH,remote_bitrate_estimator,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_remote_bitrate_estimator) \
   $(call EXPAND_LIBNAME_PATH,paced_sender,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_paced_sender) \
   $(call EXPAND_LIBNAME_PATH,video_processing,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_video_processing) \
-  $(call EXPAND_LIBNAME_PATH,voice_engine_core,$(DEPTH)/media/webrtc/trunk/webrtc/voice_engine/voice_engine_voice_engine_core) \
   $(call EXPAND_LIBNAME_PATH,audio_conference_mixer,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_audio_conference_mixer) \
   $(call EXPAND_LIBNAME_PATH,audio_device,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_audio_device) \
   $(call EXPAND_LIBNAME_PATH,audio_processing,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_audio_processing) \
   $(call EXPAND_LIBNAME_PATH,yuv,$(DEPTH)/media/webrtc/trunk/third_party/libyuv/libyuv_libyuv) \
   $(call EXPAND_LIBNAME_PATH,nicer,$(DEPTH)/media/mtransport/third_party/nICEr/nicer_nicer) \
   $(call EXPAND_LIBNAME_PATH,nrappkit,$(DEPTH)/media/mtransport/third_party/nrappkit/nrappkit_nrappkit) \
   $(NULL)
 
 # if we're on an intel arch, we want SSE2 optimizations
 ifneq (,$(INTEL_ARCHITECTURE))
 WEBRTC_LIBS += \
   $(call EXPAND_LIBNAME_PATH,video_processing_sse2,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_video_processing_sse2) \
   $(call EXPAND_LIBNAME_PATH,audio_processing_sse2,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_audio_processing_sse2) \
+  $(call EXPAND_LIBNAME_PATH,common_audio_sse2,$(DEPTH)/media/webrtc/trunk/webrtc/common_audio/common_audio_common_audio_sse2) \
   $(NULL)
 endif
 
 ifeq ($(CPU_ARCH), arm)
 ifeq (Android,$(OS_TARGET))
 # NEON detection on WebRTC is Android only. If WebRTC supports Linux/arm etc,
 # we should remove OS check
 # extra ARM libs
 WEBRTC_LIBS += \
   $(call EXPAND_LIBNAME_PATH,cpu_features_android,$(DEPTH)/media/webrtc/trunk/webrtc/system_wrappers/source/system_wrappers_cpu_features_android) \
   $(NULL)
 # neon for ARM
 ifeq ($(BUILD_ARM_NEON),1)
 WEBRTC_LIBS += \
-  $(call EXPAND_LIBNAME_PATH,signal_processing_neon,$(DEPTH)/media/webrtc/trunk/webrtc/common_audio/common_audio_signal_processing_neon) \
+  $(call EXPAND_LIBNAME_PATH,common_audio_neon,$(DEPTH)/media/webrtc/trunk/webrtc/common_audio/common_audio_common_audio_neon) \
   $(call EXPAND_LIBNAME_PATH,audio_processing_neon,$(DEPTH)/media/webrtc/trunk/webrtc/modules/modules_audio_processing_neon) \
   $(NULL)
 endif
 endif
 endif
 
 
 # If you enable one of these codecs in webrtc_config.gypi, you'll need to re-add the
--- a/media/webrtc/signaling/signaling.gyp
+++ b/media/webrtc/signaling/signaling.gyp
@@ -52,17 +52,16 @@
         '../../../ipc/chromium/src',
         '../../../ipc/chromium/src/base/third_party/nspr',
         '../../../xpcom/base',
         '$(DEPTH)/dist/include',
         '../../../dom/base',
         '../../../content/media',
         '../../../media/mtransport',
         '../trunk',
-        '../trunk/webrtc',
         '../trunk/webrtc/video_engine/include',
         '../trunk/webrtc/voice_engine/include',
         '../trunk/webrtc/modules/interface',
         '../trunk/webrtc/peerconnection',
         '../../../netwerk/srtp/src/include',
         '../../../netwerk/srtp/src/crypto/include',
         '../../../ipc/chromium/src',
       ],
--- a/media/webrtc/signaling/src/common/NullTransport.h
+++ b/media/webrtc/signaling/src/common/NullTransport.h
@@ -3,17 +3,17 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 
 #ifndef NULL_TRANSPORT_H_
 #define NULL_TRANSPORT_H_
 
 #include "mozilla/Attributes.h"
 
-#include "common_types.h"
+#include "webrtc/common_types.h"
 
 namespace mozilla {
 
 /**
  * NullTransport is registered as ExternalTransport to throw away data
  */
 class NullTransport : public webrtc::Transport
 {
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -8,17 +8,17 @@
 #include "AudioConduit.h"
 #include "nsCOMPtr.h"
 #include "mozilla/Services.h"
 #include "nsServiceManagerUtils.h"
 #include "nsIPrefService.h"
 #include "nsIPrefBranch.h"
 #include "nsThreadUtils.h"
 
-#include "voice_engine/include/voe_errors.h"
+#include "webrtc/voice_engine/include/voe_errors.h"
 
 #ifdef MOZ_WIDGET_ANDROID
 #include "AndroidJNIWrapper.h"
 #endif
 
 namespace mozilla {
 
 static const char* logTag ="WebrtcAudioSessionConduit";
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -6,24 +6,24 @@
 #ifndef AUDIO_SESSION_H_
 #define AUDIO_SESSION_H_
 
 #include "mozilla/Attributes.h"
 
 #include "MediaConduitInterface.h"
 
 // Audio Engine Includes
-#include "common_types.h"
-#include "voice_engine/include/voe_base.h"
-#include "voice_engine/include/voe_volume_control.h"
-#include "voice_engine/include/voe_codec.h"
-#include "voice_engine/include/voe_file.h"
-#include "voice_engine/include/voe_network.h"
-#include "voice_engine/include/voe_external_media.h"
-#include "voice_engine/include/voe_audio_processing.h"
+#include "webrtc/common_types.h"
+#include "webrtc/voice_engine/include/voe_base.h"
+#include "webrtc/voice_engine/include/voe_volume_control.h"
+#include "webrtc/voice_engine/include/voe_codec.h"
+#include "webrtc/voice_engine/include/voe_file.h"
+#include "webrtc/voice_engine/include/voe_network.h"
+#include "webrtc/voice_engine/include/voe_external_media.h"
+#include "webrtc/voice_engine/include/voe_audio_processing.h"
 
 //Some WebRTC types for short notations
  using webrtc::VoEBase;
  using webrtc::VoENetwork;
  using webrtc::VoECodec;
  using webrtc::VoEExternalMedia;
  using webrtc::VoEAudioProcessing;
 
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
@@ -2,17 +2,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "CSFLog.h"
 #include "nspr.h"
 
 #include "VideoConduit.h"
 #include "AudioConduit.h"
-#include "video_engine/include/vie_errors.h"
+#include "webrtc/video_engine/include/vie_errors.h"
 
 #ifdef MOZ_WIDGET_ANDROID
 #include "AndroidJNIWrapper.h"
 #endif
 
 namespace mozilla {
 
 static const char* logTag ="WebrtcVideoSessionConduit";
@@ -554,17 +554,17 @@ WebrtcVideoConduit::SelectSendResolution
     // This will avoid us continually retrying this operation if it fails.
     // If the resolution changes, we'll try again.  In the meantime, we'll
     // keep using the old size in the encoder.
     mSendingWidth = width;
     mSendingHeight = height;
 
     // Get current vie codec.
     webrtc::VideoCodec vie_codec;
-    WebRtc_Word32 err;
+    int32_t err;
 
     if ((err = mPtrViECodec->GetSendCodec(mChannel, vie_codec)) != 0)
     {
       CSFLogError(logTag, "%s: GetSendCodec failed, err %d", __FUNCTION__, err);
       return false;
     }
     if (vie_codec.width != width || vie_codec.height != height)
     {
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.h
@@ -5,24 +5,24 @@
 #ifndef VIDEO_SESSION_H_
 #define VIDEO_SESSION_H_
 
 #include "mozilla/Attributes.h"
 
 #include "MediaConduitInterface.h"
 
 // Video Engine Includes
-#include "common_types.h"
-#include "video_engine/include/vie_base.h"
-#include "video_engine/include/vie_capture.h"
-#include "video_engine/include/vie_codec.h"
-#include "video_engine/include/vie_render.h"
-#include "video_engine/include/vie_network.h"
-#include "video_engine/include/vie_file.h"
-#include "video_engine/include/vie_rtp_rtcp.h"
+#include "webrtc/common_types.h"
+#include "webrtc/video_engine/include/vie_base.h"
+#include "webrtc/video_engine/include/vie_capture.h"
+#include "webrtc/video_engine/include/vie_codec.h"
+#include "webrtc/video_engine/include/vie_render.h"
+#include "webrtc/video_engine/include/vie_network.h"
+#include "webrtc/video_engine/include/vie_file.h"
+#include "webrtc/video_engine/include/vie_rtp_rtcp.h"
 
 /** This file hosts several structures identifying different aspects
  * of a RTP Session.
  */
 
  using  webrtc::ViEBase;
  using  webrtc::ViENetwork;
  using  webrtc::ViECodec;
--- a/media/webrtc/trunk/peerconnection.gyp
+++ b/media/webrtc/trunk/peerconnection.gyp
@@ -33,17 +33,17 @@
           'message': 'Generating scream',
         }, ],
         'dependencies': [
           'webrtc/modules/modules.gyp:audio_device',
           'webrtc/modules/modules.gyp:video_capture_module',
 #          'webrtc/modules/modules.gyp:video_render_module',
 #          'webrtc/system_wrappers/source/system_wrappers.gyp:system_wrappers',
           'webrtc/video_engine/video_engine.gyp:video_engine_core',
-          'webrtc/voice_engine/voice_engine.gyp:voice_engine_core',
+          'webrtc/voice_engine/voice_engine.gyp:voice_engine',
           '<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
         ],
       }, ],
     ],
   }, ],
   'conditions': [
     ['build_with_mozilla==0', {
     'targets': [
--- a/media/webrtc/trunk/webrtc/build/arm_neon.gypi
+++ b/media/webrtc/trunk/webrtc/build/arm_neon.gypi
@@ -18,13 +18,35 @@
 #   ],
 #   'includes': ['path/to/this/gypi/file'],
 # }
 
 {
   'cflags!': [
     '-mfpu=vfpv3-d16',
   ],
+  'cflags_mozilla!': [
+    '-mfpu=vfpv3-d16',
+  ],
   'cflags': [
     '-mfpu=neon',
     '-flax-vector-conversions',
   ],
+  'cflags_mozilla': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+  'asflags!': [
+    '-mfpu=vfpv3-d16',
+  ],
+  'asflags_mozilla!': [
+    '-mfpu=vfpv3-d16',
+  ],
+  'asflags': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+  'asflags_mozilla': [
+    '-mfpu=neon',
+    '-flax-vector-conversions',
+  ],
+
 }
--- a/media/webrtc/trunk/webrtc/build/common.gypi
+++ b/media/webrtc/trunk/webrtc/build/common.gypi
@@ -30,23 +30,34 @@
           }],
         ],
       },
       'build_with_chromium%': '<(build_with_chromium)',
       'build_with_libjingle%': '<(build_with_libjingle)',
       'webrtc_root%': '<(webrtc_root)',
 
       'webrtc_vp8_dir%': '<(webrtc_root)/modules/video_coding/codecs/vp8',
+      'include_g711%': 1,
+      'include_g722%': 1,
+      'include_ilbc%': 1,
       'include_opus%': 1,
+      'include_isac%': 1,
+      'include_pcm16b%': 1,
     },
     'build_with_chromium%': '<(build_with_chromium)',
     'build_with_libjingle%': '<(build_with_libjingle)',
     'webrtc_root%': '<(webrtc_root)',
     'webrtc_vp8_dir%': '<(webrtc_vp8_dir)',
+
+    'include_g711%': '<(include_g711)',
+    'include_g722%': '<(include_g722)',
+    'include_ilbc%': '<(include_ilbc)',
     'include_opus%': '<(include_opus)',
+    'include_isac%': '<(include_isac)',
+    'include_pcm16b%': '<(include_pcm16b)',
 
     # The Chromium common.gypi we use treats all gyp files without
     # chromium_code==1 as third party code. This disables many of the
     # preferred warning settings.
     #
     # We can set this here to have WebRTC code treated as Chromium code. Our
     # third party code will still have the reduced warning settings.
     'chromium_code': 1,
@@ -114,16 +125,31 @@
         # flood of chromium-style warnings. Investigate enabling them:
         # http://code.google.com/p/webrtc/issues/detail?id=163
         'clang_use_chrome_plugins%': 0,
 
         # Switch between Android audio device OpenSL ES implementation
         # and Java Implementation
         'enable_android_opensl%': 0,
       }],
+      ['OS=="linux"', {
+        'include_alsa_audio%': 1,
+      }, {
+        'include_alsa_audio%': 0,
+      }],
+      ['OS=="solaris" or os_bsd==1', {
+        'include_pulse_audio%': 1,
+      }, {
+        'include_pulse_audio%': 0,
+      }],
+      ['OS=="linux" or OS=="solaris" or os_bsd==1', {
+        'include_v4l2_video_capture%': 1,
+      }, {
+        'include_v4l2_video_capture%': 0,
+      }],
       ['OS=="ios"', {
         'enable_video%': 0,
         'enable_protobuf%': 0,
         'build_libjpeg%': 0,
         'build_libyuv%': 0,
         'build_libvpx%': 0,
         'include_tests%': 0,
       }],
@@ -144,20 +170,25 @@
       '../..',
       # To include the top-level directory when building in Chrome, so we can
       # use full paths (e.g. headers inside testing/ or third_party/).
       '<(DEPTH)',
     ],
     'defines': [
       # TODO(leozwang): Run this as a gclient hook rather than at build-time:
       # http://code.google.com/p/webrtc/issues/detail?id=687
-      'WEBRTC_SVNREVISION="Unavailable(issue687)"',
+      'WEBRTC_SVNREVISION="\\\"Unavailable_issue687\\\""',
       #'WEBRTC_SVNREVISION="<!(python <(webrtc_root)/build/version.py)"',
     ],
     'conditions': [
+      ['moz_widget_toolkit_gonk==1', {
+        'defines' : [
+          'WEBRTC_GONK',
+        ],
+      }],
       ['enable_tracing==1', {
         'defines': ['WEBRTC_LOGGING',],
       }],
       ['build_with_mozilla==1', {
         'defines': [
           # Changes settings for Mozilla build.
           'WEBRTC_MOZILLA_BUILD',
          ],
@@ -185,27 +216,41 @@
         ],
       }],
       ['target_arch=="arm"', {
         'defines': [
           'WEBRTC_ARCH_ARM',
         ],
         'conditions': [
           ['armv7==1', {
-            'defines': ['WEBRTC_ARCH_ARM_V7',],
+            'defines': ['WEBRTC_ARCH_ARM_V7',
+                        'WEBRTC_BUILD_NEON_LIBS'],
             'conditions': [
               ['arm_neon==1', {
                 'defines': ['WEBRTC_ARCH_ARM_NEON',],
               }, {
                 'defines': ['WEBRTC_DETECT_ARM_NEON',],
               }],
             ],
           }],
         ],
       }],
+      ['os_bsd==1', {
+        'defines': [
+          'WEBRTC_BSD',
+          'WEBRTC_THREAD_RR',
+        ],
+      }],
+      ['OS=="dragonfly" or OS=="netbsd"', {
+        'defines': [
+          # doesn't support pthread_condattr_setclock
+          'WEBRTC_CLOCK_TYPE_REALTIME',
+        ],
+      }],
+      # Mozilla: if we support Mozilla on MIPS, we'll need to mod the cflags entries here
       ['target_arch=="mipsel"', {
         'defines': [
           'MIPS32_LE',
         ],
         'conditions': [
           ['mips_fpu==1', {
             'defines': [
               'MIPS_FPU_LE',
@@ -256,28 +301,36 @@
       }],
       ['OS=="ios"', {
         'defines': [
           'WEBRTC_MAC',
           'WEBRTC_IOS',
         ],
       }],
       ['OS=="linux"', {
+#        'conditions': [
+#          ['have_clock_monotonic==1', {
+#            'defines': [
+#              'WEBRTC_CLOCK_TYPE_REALTIME',
+#            ],
+#          }],
+#        ],
         'defines': [
           'WEBRTC_LINUX',
         ],
       }],
       ['OS=="mac"', {
         'defines': [
           'WEBRTC_MAC',
         ],
       }],
       ['OS=="win"', {
         'defines': [
           'WEBRTC_WIN',
+	  'WEBRTC_EXPORT',
         ],
         # TODO(andrew): enable all warnings when possible.
         # TODO(phoglund): get rid of 4373 supression when
         # http://code.google.com/p/webrtc/issues/detail?id=261 is solved.
         'msvs_disabled_warnings': [
           4373,  # legacy warning for ignoring const / volatile in signatures.
           4389,  # Signed/unsigned mismatch.
         ],
--- a/media/webrtc/trunk/webrtc/build/merge_libs.gyp
+++ b/media/webrtc/trunk/webrtc/build/merge_libs.gyp
@@ -39,10 +39,12 @@
           'outputs': ['<(output_lib)'],
           'action': ['python',
                      'merge_libs.py',
                      '<(PRODUCT_DIR)',
                      '<(output_lib)',],
         },
       ],
     },
+#      }],
+#    ],
   ],
 }
--- a/media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
+++ b/media/webrtc/trunk/webrtc/common_audio/common_audio.gyp
@@ -16,22 +16,36 @@
       'type': 'static_library',
       'dependencies': [
         '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
       ],
       'include_dirs': [
         'resampler/include',
         'signal_processing/include',
       ],
+      'target_conditions': [
+        ['build_with_mozilla==1', {
+          'include_dirs': [
+            '$(DEPTH)/dist/include',
+          ],
+        }],
+      ],
       'direct_dependent_settings': {
         'include_dirs': [
           'resampler/include',
           'signal_processing/include',
           'vad/include',
         ],
+        'conditions': [
+          ['build_with_mozilla==1', {
+            'include_dirs': [
+              '$(DEPTH)/dist/include',
+            ],
+          }],
+        ],
       },
       'sources': [
         'audio_util.cc',
         'include/audio_util.h',
         'resampler/include/push_resampler.h',
         'resampler/include/resampler.h',
         'resampler/push_resampler.cc',
         'resampler/push_sinc_resampler.cc',
@@ -139,16 +153,17 @@
       'targets': [
         {
           'target_name': 'common_audio_sse2',
           'type': 'static_library',
           'sources': [
             'resampler/sinc_resampler_sse.cc',
           ],
           'cflags': ['-msse2',],
+          'cflags_mozilla': ['-msse2',],
           'xcode_settings': {
             'OTHER_CFLAGS': ['-msse2',],
           },
         },
       ],  # targets
     }],
     ['target_arch=="arm" and armv7==1', {
       'targets': [
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/include/resampler.h
@@ -12,105 +12,55 @@
 /*
  * A wrapper for resampling a numerous amount of sampling combinations.
  */
 
 #ifndef WEBRTC_RESAMPLER_RESAMPLER_H_
 #define WEBRTC_RESAMPLER_RESAMPLER_H_
 
 #include "webrtc/typedefs.h"
+#include "speex/speex_resampler.h"
 
 namespace webrtc
 {
 
-// TODO(andrew): the implementation depends on the exact values of this enum.
-// It should be rewritten in a less fragile way.
+#define FIXED_RATE_RESAMPLER 0x10
 enum ResamplerType
 {
-    // 4 MSB = Number of channels
-    // 4 LSB = Synchronous or asynchronous
-
-    kResamplerSynchronous = 0x10,
-    kResamplerAsynchronous = 0x11,
-    kResamplerSynchronousStereo = 0x20,
-    kResamplerAsynchronousStereo = 0x21,
-    kResamplerInvalid = 0xff
-};
-
-// TODO(andrew): doesn't need to be part of the interface.
-enum ResamplerMode
-{
-    kResamplerMode1To1,
-    kResamplerMode1To2,
-    kResamplerMode1To3,
-    kResamplerMode1To4,
-    kResamplerMode1To6,
-    kResamplerMode1To12,
-    kResamplerMode2To3,
-    kResamplerMode2To11,
-    kResamplerMode4To11,
-    kResamplerMode8To11,
-    kResamplerMode11To16,
-    kResamplerMode11To32,
-    kResamplerMode2To1,
-    kResamplerMode3To1,
-    kResamplerMode4To1,
-    kResamplerMode6To1,
-    kResamplerMode12To1,
-    kResamplerMode3To2,
-    kResamplerMode11To2,
-    kResamplerMode11To4,
-    kResamplerMode11To8
+    kResamplerSynchronous            = 0x00,
+    kResamplerSynchronousStereo      = 0x01,
+    kResamplerFixedSynchronous       = 0x00 | FIXED_RATE_RESAMPLER,
+    kResamplerFixedSynchronousStereo = 0x01 | FIXED_RATE_RESAMPLER,
 };
 
 class Resampler
 {
-
 public:
     Resampler();
     // TODO(andrew): use an init function instead.
-    Resampler(int inFreq, int outFreq, ResamplerType type);
+    Resampler(int in_freq, int out_freq, ResamplerType type);
     ~Resampler();
 
     // Reset all states
-    int Reset(int inFreq, int outFreq, ResamplerType type);
+    int Reset(int in_freq, int out_freq, ResamplerType type);
 
     // Reset all states if any parameter has changed
-    int ResetIfNeeded(int inFreq, int outFreq, ResamplerType type);
+    int ResetIfNeeded(int in_freq, int out_freq, ResamplerType type);
 
     // Synchronous resampling, all output samples are written to samplesOut
-    int Push(const int16_t* samplesIn, int lengthIn, int16_t* samplesOut,
-             int maxLen, int &outLen);
-
-    // Asynchronous resampling, input
-    int Insert(int16_t* samplesIn, int lengthIn);
-
-    // Asynchronous resampling output, remaining samples are buffered
-    int Pull(int16_t* samplesOut, int desiredLen, int &outLen);
+    int Push(const int16_t* samples_in, int length_in,
+             int16_t* samples_out, int max_len, int &out_len);
 
 private:
-    // Generic pointers since we don't know what states we'll need
-    void* state1_;
-    void* state2_;
-    void* state3_;
+    bool IsFixedRate() { return !!(type_ & FIXED_RATE_RESAMPLER); }
 
-    // Storage if needed
-    int16_t* in_buffer_;
-    int16_t* out_buffer_;
-    int in_buffer_size_;
-    int out_buffer_size_;
-    int in_buffer_size_max_;
-    int out_buffer_size_max_;
+    SpeexResamplerState* state_;
 
     // State
-    int my_in_frequency_khz_;
-    int my_out_frequency_khz_;
-    ResamplerMode my_mode_;
-    ResamplerType my_type_;
-
-    // Extra instance for stereo
-    Resampler* slave_left_;
-    Resampler* slave_right_;
+    int in_freq_;
+    int out_freq_;
+    int channels_;
+    ResamplerType type_;
 };
 
 } // namespace webrtc
 
 #endif // WEBRTC_RESAMPLER_RESAMPLER_H_
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/push_resampler.cc
@@ -8,17 +8,16 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/common_audio/resampler/include/push_resampler.h"
 
 #include <cstring>
 
 #include "webrtc/common_audio/include/audio_util.h"
-#include "webrtc/common_audio/resampler/include/resampler.h"
 #include "webrtc/common_audio/resampler/push_sinc_resampler.h"
 
 namespace webrtc {
 
 PushResampler::PushResampler()
     : sinc_resampler_(NULL),
       sinc_resampler_right_(NULL),
       src_sample_rate_hz_(0),
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/resampler.cc
@@ -10,1075 +10,126 @@
 
 
 /*
  * A wrapper for resampling a numerous amount of sampling combinations.
  */
 
 #include <stdlib.h>
 #include <string.h>
+#include <assert.h>
 
 #include "webrtc/common_audio/resampler/include/resampler.h"
 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
 
+// TODO(jesup) better adjust per platform ability
+// Note: if these are changed (higher), you may need to change the
+// KernelDelay values in the unit tests here and in output_mixer.
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_GONK)
+#define RESAMPLER_QUALITY 2
+#else
+#define RESAMPLER_QUALITY 3
+#endif
 
 namespace webrtc
 {
 
-Resampler::Resampler()
+Resampler::Resampler() : state_(NULL), type_(kResamplerSynchronous)
 {
-    state1_ = NULL;
-    state2_ = NULL;
-    state3_ = NULL;
-    in_buffer_ = NULL;
-    out_buffer_ = NULL;
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-    // we need a reset before we will work
-    my_in_frequency_khz_ = 0;
-    my_out_frequency_khz_ = 0;
-    my_mode_ = kResamplerMode1To1;
-    my_type_ = kResamplerInvalid;
-    slave_left_ = NULL;
-    slave_right_ = NULL;
+  // Note: Push will fail until Reset() is called
 }
 
-Resampler::Resampler(int inFreq, int outFreq, ResamplerType type)
+Resampler::Resampler(int in_freq, int out_freq, ResamplerType type) :
+  state_(NULL) // all others get initialized in reset
 {
-    state1_ = NULL;
-    state2_ = NULL;
-    state3_ = NULL;
-    in_buffer_ = NULL;
-    out_buffer_ = NULL;
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-    // we need a reset before we will work
-    my_in_frequency_khz_ = 0;
-    my_out_frequency_khz_ = 0;
-    my_mode_ = kResamplerMode1To1;
-    my_type_ = kResamplerInvalid;
-    slave_left_ = NULL;
-    slave_right_ = NULL;
-
-    Reset(inFreq, outFreq, type);
+  Reset(in_freq, out_freq, type);
 }
 
 Resampler::~Resampler()
 {
-    if (state1_)
-    {
-        free(state1_);
-    }
-    if (state2_)
-    {
-        free(state2_);
-    }
-    if (state3_)
-    {
-        free(state3_);
-    }
-    if (in_buffer_)
-    {
-        free(in_buffer_);
-    }
-    if (out_buffer_)
-    {
-        free(out_buffer_);
-    }
-    if (slave_left_)
-    {
-        delete slave_left_;
-    }
-    if (slave_right_)
-    {
-        delete slave_right_;
-    }
+  if (state_)
+  {
+    speex_resampler_destroy(state_);
+  }
 }
 
-int Resampler::ResetIfNeeded(int inFreq, int outFreq, ResamplerType type)
+int Resampler::ResetIfNeeded(int in_freq, int out_freq, ResamplerType type)
 {
-    int tmpInFreq_kHz = inFreq / 1000;
-    int tmpOutFreq_kHz = outFreq / 1000;
-
-    if ((tmpInFreq_kHz != my_in_frequency_khz_) || (tmpOutFreq_kHz != my_out_frequency_khz_)
-            || (type != my_type_))
-    {
-        return Reset(inFreq, outFreq, type);
-    } else
-    {
-        return 0;
-    }
+  if (!state_ || type != type_ ||
+      in_freq != in_freq_ || out_freq != out_freq_)
+  {
+    // Note that fixed-rate resamplers where input == output rate will
+    // have state_ == NULL, and will call Reset() here - but reset won't
+    // do anything beyond overwrite the member vars unless it needs a
+    // real resampler.
+    return Reset(in_freq, out_freq, type);
+  } else {
+    return 0;
+  }
 }
 
-int Resampler::Reset(int inFreq, int outFreq, ResamplerType type)
+int Resampler::Reset(int in_freq, int out_freq, ResamplerType type)
 {
-
-    if (state1_)
-    {
-        free(state1_);
-        state1_ = NULL;
-    }
-    if (state2_)
-    {
-        free(state2_);
-        state2_ = NULL;
-    }
-    if (state3_)
-    {
-        free(state3_);
-        state3_ = NULL;
-    }
-    if (in_buffer_)
-    {
-        free(in_buffer_);
-        in_buffer_ = NULL;
-    }
-    if (out_buffer_)
-    {
-        free(out_buffer_);
-        out_buffer_ = NULL;
-    }
-    if (slave_left_)
-    {
-        delete slave_left_;
-        slave_left_ = NULL;
-    }
-    if (slave_right_)
-    {
-        delete slave_right_;
-        slave_right_ = NULL;
-    }
-
-    in_buffer_size_ = 0;
-    out_buffer_size_ = 0;
-    in_buffer_size_max_ = 0;
-    out_buffer_size_max_ = 0;
-
-    // This might be overridden if parameters are not accepted.
-    my_type_ = type;
-
-    // Start with a math exercise, Euclid's algorithm to find the gcd:
-
-    int a = inFreq;
-    int b = outFreq;
-    int c = a % b;
-    while (c != 0)
-    {
-        a = b;
-        b = c;
-        c = a % b;
-    }
-    // b is now the gcd;
-
-    // We need to track what domain we're in.
-    my_in_frequency_khz_ = inFreq / 1000;
-    my_out_frequency_khz_ = outFreq / 1000;
-
-    // Scale with GCD
-    inFreq = inFreq / b;
-    outFreq = outFreq / b;
-
-    // Do we need stereo?
-    if ((my_type_ & 0xf0) == 0x20)
-    {
-        // Change type to mono
-        type = static_cast<ResamplerType>(
-            ((static_cast<int>(type) & 0x0f) + 0x10));
-        slave_left_ = new Resampler(inFreq, outFreq, type);
-        slave_right_ = new Resampler(inFreq, outFreq, type);
-    }
+  uint32_t channels = (type == kResamplerSynchronousStereo ||
+                       type == kResamplerFixedSynchronousStereo) ? 2 : 1;
 
-    if (inFreq == outFreq)
-    {
-        my_mode_ = kResamplerMode1To1;
-    } else if (inFreq == 1)
-    {
-        switch (outFreq)
-        {
-            case 2:
-                my_mode_ = kResamplerMode1To2;
-                break;
-            case 3:
-                my_mode_ = kResamplerMode1To3;
-                break;
-            case 4:
-                my_mode_ = kResamplerMode1To4;
-                break;
-            case 6:
-                my_mode_ = kResamplerMode1To6;
-                break;
-            case 12:
-                my_mode_ = kResamplerMode1To12;
-                break;
-            default:
-                my_type_ = kResamplerInvalid;
-                return -1;
-        }
-    } else if (outFreq == 1)
-    {
-        switch (inFreq)
-        {
-            case 2:
-                my_mode_ = kResamplerMode2To1;
-                break;
-            case 3:
-                my_mode_ = kResamplerMode3To1;
-                break;
-            case 4:
-                my_mode_ = kResamplerMode4To1;
-                break;
-            case 6:
-                my_mode_ = kResamplerMode6To1;
-                break;
-            case 12:
-                my_mode_ = kResamplerMode12To1;
-                break;
-            default:
-                my_type_ = kResamplerInvalid;
-                return -1;
-        }
-    } else if ((inFreq == 2) && (outFreq == 3))
-    {
-        my_mode_ = kResamplerMode2To3;
-    } else if ((inFreq == 2) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode2To11;
-    } else if ((inFreq == 4) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode4To11;
-    } else if ((inFreq == 8) && (outFreq == 11))
-    {
-        my_mode_ = kResamplerMode8To11;
-    } else if ((inFreq == 3) && (outFreq == 2))
-    {
-        my_mode_ = kResamplerMode3To2;
-    } else if ((inFreq == 11) && (outFreq == 2))
-    {
-        my_mode_ = kResamplerMode11To2;
-    } else if ((inFreq == 11) && (outFreq == 4))
-    {
-        my_mode_ = kResamplerMode11To4;
-    } else if ((inFreq == 11) && (outFreq == 16))
-    {
-        my_mode_ = kResamplerMode11To16;
-    } else if ((inFreq == 11) && (outFreq == 32))
-    {
-        my_mode_ = kResamplerMode11To32;
-    } else if ((inFreq == 11) && (outFreq == 8))
+  if (state_)
+  {
+    speex_resampler_destroy(state_);
+    state_ = NULL;
+  }
+  type_ = type;
+  channels_ = channels;
+  in_freq_ = in_freq;
+  out_freq_ = out_freq;
+
+  // For fixed-rate, same-rate resamples we just memcpy and so don't spin up a resampler
+  if (in_freq != out_freq || !IsFixedRate())
+  {
+    state_ = speex_resampler_init(channels, in_freq, out_freq, RESAMPLER_QUALITY, NULL);
+    if (!state_)
     {
-        my_mode_ = kResamplerMode11To8;
-    } else
-    {
-        my_type_ = kResamplerInvalid;
-        return -1;
+      return -1;
     }
-
-    // Now create the states we need
-    switch (my_mode_)
-    {
-        case kResamplerMode1To1:
-            // No state needed;
-            break;
-        case kResamplerMode1To2:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode1To3:
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
-            break;
-        case kResamplerMode1To4:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:4
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode1To6:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:6
-            state2_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state2_);
-            break;
-        case kResamplerMode1To12:
-            // 1:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:4
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            // 4:12
-            state3_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz(
-                (WebRtcSpl_State16khzTo48khz*) state3_);
-            break;
-        case kResamplerMode2To3:
-            // 2:6
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
-            WebRtcSpl_ResetResample16khzTo48khz((WebRtcSpl_State16khzTo48khz *)state1_);
-            // 6:3
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode2To11:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            state2_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
-            WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state2_);
-            break;
-        case kResamplerMode4To11:
-            state1_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
-            WebRtcSpl_ResetResample8khzTo22khz((WebRtcSpl_State8khzTo22khz *)state1_);
-            break;
-        case kResamplerMode8To11:
-            state1_ = malloc(sizeof(WebRtcSpl_State16khzTo22khz));
-            WebRtcSpl_ResetResample16khzTo22khz((WebRtcSpl_State16khzTo22khz *)state1_);
-            break;
-        case kResamplerMode11To16:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
-            break;
-        case kResamplerMode11To32:
-            // 11 -> 22
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-
-            // 22 -> 16
-            state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state2_);
-
-            // 16 -> 32
-            state3_ = malloc(8 * sizeof(int32_t));
-            memset(state3_, 0, 8 * sizeof(int32_t));
-
-            break;
-        case kResamplerMode2To1:
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode3To1:
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
-            break;
-        case kResamplerMode4To1:
-            // 4:2
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 2:1
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode6To1:
-            // 6:2
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state1_);
-            // 2:1
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode12To1:
-            // 12:4
-            state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz(
-                (WebRtcSpl_State48khzTo16khz*) state1_);
-            // 4:2
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-            // 2:1
-            state3_ = malloc(8 * sizeof(int32_t));
-            memset(state3_, 0, 8 * sizeof(int32_t));
-            break;
-        case kResamplerMode3To2:
-            // 3:6
-            state1_ = malloc(8 * sizeof(int32_t));
-            memset(state1_, 0, 8 * sizeof(int32_t));
-            // 6:2
-            state2_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
-            WebRtcSpl_ResetResample48khzTo16khz((WebRtcSpl_State48khzTo16khz *)state2_);
-            break;
-        case kResamplerMode11To2:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
-            WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
-
-            state2_ = malloc(8 * sizeof(int32_t));
-            memset(state2_, 0, 8 * sizeof(int32_t));
-
-            break;
-        case kResamplerMode11To4:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
-            WebRtcSpl_ResetResample22khzTo8khz((WebRtcSpl_State22khzTo8khz *)state1_);
-            break;
-        case kResamplerMode11To8:
-            state1_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
-            WebRtcSpl_ResetResample22khzTo16khz((WebRtcSpl_State22khzTo16khz *)state1_);
-            break;
-
-    }
-
-    return 0;
+  }
+  return 0;
 }
 
-// Synchronous resampling, all output samples are written to samplesOut
-int Resampler::Push(const int16_t * samplesIn, int lengthIn, int16_t* samplesOut,
-                    int maxLen, int &outLen)
+// Synchronous resampling, all output samples are written to samples_out
+// TODO(jesup) Change to take samples-per-channel in and out
+int Resampler::Push(const int16_t* samples_in, int length_in,
+                    int16_t* samples_out, int max_len, int &out_len)
 {
-    // Check that the resampler is not in asynchronous mode
-    if (my_type_ & 0x0f)
-    {
-        return -1;
-    }
-
-    // Do we have a stereo signal?
-    if ((my_type_ & 0xf0) == 0x20)
+  if (max_len < length_in)
+  {
+    return -1;
+  }
+  if (!state_)
+  {
+    if (!IsFixedRate() || in_freq_ != out_freq_)
     {
-
-        // Split up the signal and call the slave object for each channel
-
-        int16_t* left = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
-        int16_t* right = (int16_t*)malloc(lengthIn * sizeof(int16_t) / 2);
-        int16_t* out_left = (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
-        int16_t* out_right =
-                (int16_t*)malloc(maxLen / 2 * sizeof(int16_t));
-        int res = 0;
-        for (int i = 0; i < lengthIn; i += 2)
-        {
-            left[i >> 1] = samplesIn[i];
-            right[i >> 1] = samplesIn[i + 1];
-        }
-
-        // It's OK to overwrite the local parameter, since it's just a copy
-        lengthIn = lengthIn / 2;
-
-        int actualOutLen_left = 0;
-        int actualOutLen_right = 0;
-        // Do resampling for right channel
-        res |= slave_left_->Push(left, lengthIn, out_left, maxLen / 2, actualOutLen_left);
-        res |= slave_right_->Push(right, lengthIn, out_right, maxLen / 2, actualOutLen_right);
-        if (res || (actualOutLen_left != actualOutLen_right))
-        {
-            free(left);
-            free(right);
-            free(out_left);
-            free(out_right);
-            return -1;
-        }
-
-        // Reassemble the signal
-        for (int i = 0; i < actualOutLen_left; i++)
-        {
-            samplesOut[i * 2] = out_left[i];
-            samplesOut[i * 2 + 1] = out_right[i];
-        }
-        outLen = 2 * actualOutLen_left;
-
-        free(left);
-        free(right);
-        free(out_left);
-        free(out_right);
-
-        return 0;
+      // Since we initialize to a non-Fixed type, Push() will fail
+      // until Reset() is called
+      return -1;
     }
 
-    // Containers for temp samples
-    int16_t* tmp;
-    int16_t* tmp_2;
-    // tmp data for resampling routines
-    int32_t* tmp_mem;
-
-    switch (my_mode_)
-    {
-        case kResamplerMode1To1:
-            memcpy(samplesOut, samplesIn, lengthIn * sizeof(int16_t));
-            outLen = lengthIn;
-            break;
-        case kResamplerMode1To2:
-            if (maxLen < (lengthIn * 2))
-            {
-                return -1;
-            }
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-            outLen = lengthIn * 2;
-            return 0;
-        case kResamplerMode1To3:
-
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn * 3))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(samplesIn + i, samplesOut + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn * 3;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode1To4:
-            if (maxLen < (lengthIn * 4))
-            {
-                return -1;
-            }
-
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-            // 1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            // 2:4
-            WebRtcSpl_UpsampleBy2(tmp, lengthIn * 2, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn * 4;
-            free(tmp);
-            return 0;
-        case kResamplerMode1To6:
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn * 6))
-            {
-                return -1;
-            }
-
-            //1:2
-
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            outLen = lengthIn * 2;
-
-            for (int i = 0; i < outLen; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state2_,
-                                               tmp_mem);
-            }
-            outLen = outLen * 3;
-            free(tmp_mem);
-            free(tmp);
-
-            return 0;
-        case kResamplerMode1To12:
-            // We can only handle blocks of 40 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 40) != 0) {
-              return -1;
-            }
-            if (maxLen < (lengthIn * 12)) {
-              return -1;
-            }
-
-            tmp_mem = (int32_t*) malloc(336 * sizeof(int32_t));
-            tmp = (int16_t*) malloc(sizeof(int16_t) * 4 * lengthIn);
-            //1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut,
-                                  (int32_t*) state1_);
-            outLen = lengthIn * 2;
-            //2:4
-            WebRtcSpl_UpsampleBy2(samplesOut, outLen, tmp, (int32_t*) state2_);
-            outLen = outLen * 2;
-            // 4:12
-            for (int i = 0; i < outLen; i += 160) {
-              // WebRtcSpl_Resample16khzTo48khz() takes a block of 160 samples
-              // as input and outputs a resampled block of 480 samples. The
-              // data is now actually in 32 kHz sampling rate, despite the
-              // function name, and with a resampling factor of three becomes
-              // 96 kHz.
-              WebRtcSpl_Resample16khzTo48khz(tmp + i, samplesOut + i * 3,
-                                             (WebRtcSpl_State16khzTo48khz*) state3_,
-                                             tmp_mem);
-            }
-            outLen = outLen * 3;
-            free(tmp_mem);
-            free(tmp);
-
-            return 0;
-        case kResamplerMode2To3:
-            if (maxLen < (lengthIn * 3 / 2))
-            {
-                return -1;
-            }
-            // 2:6
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 3));
-            tmp_mem = (int32_t*)malloc(336 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo48khz(samplesIn + i, tmp + i * 3,
-                                               (WebRtcSpl_State16khzTo48khz *)state1_,
-                                               tmp_mem);
-            }
-            lengthIn = lengthIn * 3;
-            // 6:3
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 2;
-            free(tmp);
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode2To11:
-
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 2))
-            {
-                return -1;
-            }
-            tmp = (int16_t*)malloc(sizeof(int16_t) * 2 * lengthIn);
-            // 1:2
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            lengthIn *= 2;
-
-            tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 80)
-            {
-                WebRtcSpl_Resample8khzTo22khz(tmp + i, samplesOut + (i * 11) / 4,
-                                              (WebRtcSpl_State8khzTo22khz *)state2_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 4;
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-        case kResamplerMode4To11:
-
-            // We can only handle blocks of 80 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 80) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 4))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(98 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 80)
-            {
-                WebRtcSpl_Resample8khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 4,
-                                              (WebRtcSpl_State8khzTo22khz *)state1_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 4;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode8To11:
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 160) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 11) / 8))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(88 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 160)
-            {
-                WebRtcSpl_Resample16khzTo22khz(samplesIn + i, samplesOut + (i * 11) / 8,
-                                               (WebRtcSpl_State16khzTo22khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = (lengthIn * 11) / 8;
-            free(tmp_mem);
-            return 0;
-
-        case kResamplerMode11To16:
-            // We can only handle blocks of 110 samples
-            if ((lengthIn % 110) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 16) / 11))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
-
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-
-            for (int i = 0; i < (lengthIn * 2); i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(tmp + i, samplesOut + (i / 220) * 160,
-                                               (WebRtcSpl_State22khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-
-            outLen = (lengthIn * 16) / 11;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-
-        case kResamplerMode11To32:
-
-            // We can only handle blocks of 110 samples
-            if ((lengthIn % 110) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 32) / 11))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn * 2));
-
-            // 11 -> 22 kHz in samplesOut
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-
-            // 22 -> 16 in tmp
-            for (int i = 0; i < (lengthIn * 2); i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(samplesOut + i, tmp + (i / 220) * 160,
-                                               (WebRtcSpl_State22khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-
-            // 16 -> 32 in samplesOut
-            WebRtcSpl_UpsampleBy2(tmp, (lengthIn * 16) / 11, samplesOut,
-                                  (int32_t*)state3_);
-
-            outLen = (lengthIn * 32) / 11;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-
-        case kResamplerMode2To1:
-            if (maxLen < (lengthIn / 2))
-            {
-                return -1;
-            }
-            WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, samplesOut, (int32_t*)state1_);
-            outLen = lengthIn / 2;
-            return 0;
-        case kResamplerMode3To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn / 3))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(samplesIn + i, samplesOut + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode4To1:
-            if (maxLen < (lengthIn / 4))
-            {
-                return -1;
-            }
-            tmp = (int16_t*)malloc(sizeof(int16_t) * lengthIn / 2);
-            // 4:2
-            WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            // 2:1
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn / 2, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 4;
-            free(tmp);
-            return 0;
-
-        case kResamplerMode6To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < (lengthIn / 6))
-            {
-                return -1;
-            }
-
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((sizeof(int16_t) * lengthIn) / 3);
-
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            WebRtcSpl_DownsampleBy2(tmp, outLen, samplesOut, (int32_t*)state2_);
-            free(tmp);
-            outLen = outLen / 2;
-            return 0;
-        case kResamplerMode12To1:
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0) {
-              return -1;
-            }
-            if (maxLen < (lengthIn / 12)) {
-              return -1;
-            }
-
-            tmp_mem = (int32_t*) malloc(496 * sizeof(int32_t));
-            tmp = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 3);
-            tmp_2 = (int16_t*) malloc((sizeof(int16_t) * lengthIn) / 6);
-            // 12:4
-            for (int i = 0; i < lengthIn; i += 480) {
-              // WebRtcSpl_Resample48khzTo16khz() takes a block of 480 samples
-              // as input and outputs a resampled block of 160 samples. The
-              // data is now actually in 96 kHz sampling rate, despite the
-              // function name, and with a resampling factor of 1/3 becomes
-              // 32 kHz.
-              WebRtcSpl_Resample48khzTo16khz(samplesIn + i, tmp + i / 3,
-                                             (WebRtcSpl_State48khzTo16khz*) state1_,
-                                             tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp_mem);
-            // 4:2
-            WebRtcSpl_DownsampleBy2(tmp, outLen, tmp_2,
-                                    (int32_t*) state2_);
-            outLen = outLen / 2;
-            free(tmp);
-            // 2:1
-            WebRtcSpl_DownsampleBy2(tmp_2, outLen, samplesOut,
-                                    (int32_t*) state3_);
-            free(tmp_2);
-            outLen = outLen / 2;
-            return 0;
-        case kResamplerMode3To2:
-            if (maxLen < (lengthIn * 2 / 3))
-            {
-                return -1;
-            }
-            // 3:6
-            tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 2));
-            WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp, (int32_t*)state1_);
-            lengthIn *= 2;
-            // 6:2
-            // We can only handle blocks of 480 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 480) != 0)
-            {
-                free(tmp);
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(496 * sizeof(int32_t));
-            for (int i = 0; i < lengthIn; i += 480)
-            {
-                WebRtcSpl_Resample48khzTo16khz(tmp + i, samplesOut + i / 3,
-                                               (WebRtcSpl_State48khzTo16khz *)state2_,
-                                               tmp_mem);
-            }
-            outLen = lengthIn / 3;
-            free(tmp);
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode11To2:
-            // We can only handle blocks of 220 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 2) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
-            tmp = (int16_t*)malloc((lengthIn * 4) / 11 * sizeof(int16_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo8khz(samplesIn + i, tmp + (i * 4) / 11,
-                                              (WebRtcSpl_State22khzTo8khz *)state1_,
-                                              tmp_mem);
-            }
-            lengthIn = (lengthIn * 4) / 11;
-
-            WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut, (int32_t*)state2_);
-            outLen = lengthIn / 2;
-
-            free(tmp_mem);
-            free(tmp);
-            return 0;
-        case kResamplerMode11To4:
-            // We can only handle blocks of 220 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 4) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(126 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo8khz(samplesIn + i, samplesOut + (i * 4) / 11,
-                                              (WebRtcSpl_State22khzTo8khz *)state1_,
-                                              tmp_mem);
-            }
-            outLen = (lengthIn * 4) / 11;
-            free(tmp_mem);
-            return 0;
-        case kResamplerMode11To8:
-            // We can only handle blocks of 160 samples
-            // Can be fixed, but I don't think it's needed
-            if ((lengthIn % 220) != 0)
-            {
-                return -1;
-            }
-            if (maxLen < ((lengthIn * 8) / 11))
-            {
-                return -1;
-            }
-            tmp_mem = (int32_t*)malloc(104 * sizeof(int32_t));
-
-            for (int i = 0; i < lengthIn; i += 220)
-            {
-                WebRtcSpl_Resample22khzTo16khz(samplesIn + i, samplesOut + (i * 8) / 11,
-                                               (WebRtcSpl_State22khzTo16khz *)state1_,
-                                               tmp_mem);
-            }
-            outLen = (lengthIn * 8) / 11;
-            free(tmp_mem);
-            return 0;
-            break;
-
-    }
+    // Fixed-rate, same-freq "resample" - use memcpy, which avoids
+    // filtering and delay.  For non-fixed rates, where we might tweak
+    // from 48000->48000 to 48000->48001 for drift, we need to resample
+    // (and filter) all the time to avoid glitches on rate changes.
+    memcpy(samples_out, samples_in, length_in*sizeof(*samples_in));
+    out_len = length_in;
     return 0;
-}
-
-// Asynchronous resampling, input
-int Resampler::Insert(int16_t * samplesIn, int lengthIn)
-{
-    if (my_type_ != kResamplerAsynchronous)
-    {
-        return -1;
-    }
-    int sizeNeeded, tenMsblock;
-
-    // Determine need for size of outBuffer
-    sizeNeeded = out_buffer_size_ + ((lengthIn + in_buffer_size_) * my_out_frequency_khz_)
-            / my_in_frequency_khz_;
-    if (sizeNeeded > out_buffer_size_max_)
-    {
-        // Round the value upwards to complete 10 ms blocks
-        tenMsblock = my_out_frequency_khz_ * 10;
-        sizeNeeded = (sizeNeeded / tenMsblock + 1) * tenMsblock;
-        out_buffer_ = (int16_t*)realloc(out_buffer_, sizeNeeded * sizeof(int16_t));
-        out_buffer_size_max_ = sizeNeeded;
-    }
-
-    // If we need to use inBuffer, make sure all input data fits there.
-
-    tenMsblock = my_in_frequency_khz_ * 10;
-    if (in_buffer_size_ || (lengthIn % tenMsblock))
-    {
-        // Check if input buffer size is enough
-        if ((in_buffer_size_ + lengthIn) > in_buffer_size_max_)
-        {
-            // Round the value upwards to complete 10 ms blocks
-            sizeNeeded = ((in_buffer_size_ + lengthIn) / tenMsblock + 1) * tenMsblock;
-            in_buffer_ = (int16_t*)realloc(in_buffer_,
-                                           sizeNeeded * sizeof(int16_t));
-            in_buffer_size_max_ = sizeNeeded;
-        }
-        // Copy in data to input buffer
-        memcpy(in_buffer_ + in_buffer_size_, samplesIn, lengthIn * sizeof(int16_t));
-
-        // Resample all available 10 ms blocks
-        int lenOut;
-        int dataLenToResample = (in_buffer_size_ / tenMsblock) * tenMsblock;
-        Push(in_buffer_, dataLenToResample, out_buffer_ + out_buffer_size_,
-             out_buffer_size_max_ - out_buffer_size_, lenOut);
-        out_buffer_size_ += lenOut;
-
-        // Save the rest
-        memmove(in_buffer_, in_buffer_ + dataLenToResample,
-                (in_buffer_size_ - dataLenToResample) * sizeof(int16_t));
-        in_buffer_size_ -= dataLenToResample;
-    } else
-    {
-        // Just resample
-        int lenOut;
-        Push(in_buffer_, lengthIn, out_buffer_ + out_buffer_size_,
-             out_buffer_size_max_ - out_buffer_size_, lenOut);
-        out_buffer_size_ += lenOut;
-    }
-
-    return 0;
-}
-
-// Asynchronous resampling output, remaining samples are buffered
-int Resampler::Pull(int16_t* samplesOut, int desiredLen, int &outLen)
-{
-    if (my_type_ != kResamplerAsynchronous)
-    {
-        return -1;
-    }
-
-    // Check that we have enough data
-    if (desiredLen <= out_buffer_size_)
-    {
-        // Give out the date
-        memcpy(samplesOut, out_buffer_, desiredLen * sizeof(int32_t));
-
-        // Shuffle down remaining
-        memmove(out_buffer_, out_buffer_ + desiredLen,
-                (out_buffer_size_ - desiredLen) * sizeof(int16_t));
-
-        // Update remaining size
-        out_buffer_size_ -= desiredLen;
-
-        return 0;
-    } else
-    {
-        return -1;
-    }
+  }
+  assert(channels_ == 1 || channels_ == 2);
+  spx_uint32_t len = length_in = (length_in >> (channels_ - 1));
+  spx_uint32_t out = (spx_uint32_t) (max_len >> (channels_ - 1));
+  if ((speex_resampler_process_interleaved_int(state_, samples_in, &len,
+                             samples_out, &out) != RESAMPLER_ERR_SUCCESS) ||
+      len != (spx_uint32_t) length_in)
+  {
+    return -1;
+  }
+  out_len = (int) (channels_ * out);
+  return 0;
 }
 
 } // namespace webrtc
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/resampler_unittest.cc
@@ -3,67 +3,59 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include <math.h>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 #include "webrtc/common_audio/resampler/include/resampler.h"
 
 // TODO(andrew): this is a work-in-progress. Many more tests are needed.
 
 namespace webrtc {
 namespace {
 const ResamplerType kTypes[] = {
   kResamplerSynchronous,
-  kResamplerAsynchronous,
   kResamplerSynchronousStereo,
-  kResamplerAsynchronousStereo
-  // kResamplerInvalid excluded
 };
 const size_t kTypesSize = sizeof(kTypes) / sizeof(*kTypes);
 
 // Rates we must support.
 const int kMaxRate = 96000;
 const int kRates[] = {
   8000,
   16000,
   32000,
-  44000,
+  44100,
   48000,
   kMaxRate
 };
 const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
 const int kMaxChannels = 2;
 const size_t kDataSize = static_cast<size_t> (kMaxChannels * kMaxRate / 100);
 
-// TODO(andrew): should we be supporting these combinations?
-bool ValidRates(int in_rate, int out_rate) {
-  // Not the most compact notation, for clarity.
-  if ((in_rate == 44000 && (out_rate == 48000 || out_rate == 96000)) ||
-      (out_rate == 44000 && (in_rate == 48000 || in_rate == 96000))) {
-    return false;
-  }
-
-  return true;
-}
-
 class ResamplerTest : public testing::Test {
  protected:
   ResamplerTest();
   virtual void SetUp();
   virtual void TearDown();
+  void RunResampleTest(int channels,
+                       int src_sample_rate_hz,
+                       int dst_sample_rate_hz);
 
   Resampler rs_;
   int16_t data_in_[kDataSize];
   int16_t data_out_[kDataSize];
+  int16_t data_reference_[kDataSize];
 };
 
 ResamplerTest::ResamplerTest() {}
 
 void ResamplerTest::SetUp() {
   // Initialize input data with anything. The tests are content independent.
   memset(data_in_, 1, sizeof(data_in_));
 }
@@ -78,66 +70,141 @@ TEST_F(ResamplerTest, Reset) {
   // Check that all required combinations are supported.
   for (size_t i = 0; i < kRatesSize; ++i) {
     for (size_t j = 0; j < kRatesSize; ++j) {
       for (size_t k = 0; k < kTypesSize; ++k) {
         std::ostringstream ss;
         ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j]
             << ", type: " << kTypes[k];
         SCOPED_TRACE(ss.str());
-        if (ValidRates(kRates[i], kRates[j]))
-          EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
-        else
-          EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
+        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
       }
     }
   }
 }
 
-// TODO(tlegrand): Replace code inside the two tests below with a function
-// with number of channels and ResamplerType as input.
-TEST_F(ResamplerTest, Synchronous) {
-  for (size_t i = 0; i < kRatesSize; ++i) {
-    for (size_t j = 0; j < kRatesSize; ++j) {
-      std::ostringstream ss;
-      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
-      SCOPED_TRACE(ss.str());
+// Sets the signal value to increase by |data| with every sample. Floats are
+// used so non-integer values result in rounding error, but not an accumulating
+// error.
+void SetMonoFrame(int16_t* buffer, float data, int sample_rate_hz) {
+  for (int i = 0; i < sample_rate_hz / 100; i++) {
+    buffer[i] = data * i;
+  }
+}
+
+// Sets the signal value to increase by |left| and |right| with every sample in
+// each channel respectively.
+void SetStereoFrame(int16_t* buffer, float left, float right,
+                    int sample_rate_hz) {
+  for (int i = 0; i < sample_rate_hz / 100; i++) {
+    buffer[i * 2] = left * i;
+    buffer[i * 2 + 1] = right * i;
+  }
+}
+
+// Computes the best SNR based on the error between |ref_frame| and
+// |test_frame|. It allows for a sample delay between the signals to
+// compensate for the resampling delay.
+float ComputeSNR(const int16_t* reference, const int16_t* test,
+                 int sample_rate_hz, int channels, int max_delay) {
+  float best_snr = 0;
+  int best_delay = 0;
+  int samples_per_channel = sample_rate_hz/100;
+  for (int delay = 0; delay < max_delay; delay++) {
+    float mse = 0;
+    float variance = 0;
+    for (int i = 0; i < samples_per_channel * channels - delay; i++) {
+      int error = reference[i] - test[i + delay];
+      mse += error * error;
+      variance += reference[i] * reference[i];
+    }
+    float snr = 100;  // We assign 100 dB to the zero-error case.
+    if (mse > 0)
+      snr = 10 * log10(variance / mse);
+    if (snr > best_snr) {
+      best_snr = snr;
+      best_delay = delay;
+    }
+  }
+  printf("SNR=%.1f dB at delay=%d\n", best_snr, best_delay);
+  return best_snr;
+}
 
-      if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kRates[i] / 100;
-        int out_length = 0;
-        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
-        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
-                              out_length));
-        EXPECT_EQ(kRates[j] / 100, out_length);
-      } else {
-        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
-      }
+void ResamplerTest::RunResampleTest(int channels,
+                                    int src_sample_rate_hz,
+                                    int dst_sample_rate_hz) {
+  Resampler resampler;  // Create a new one with every test.
+  const int16_t kSrcLeft = 60;  // Shouldn't overflow for any used sample rate.
+  const int16_t kSrcRight = 30;
+  const float kResamplingFactor = (1.0 * src_sample_rate_hz) /
+      dst_sample_rate_hz;
+  const float kDstLeft = kResamplingFactor * kSrcLeft;
+  const float kDstRight = kResamplingFactor * kSrcRight;
+  if (channels == 1)
+    SetMonoFrame(data_in_, kSrcLeft, src_sample_rate_hz);
+  else
+    SetStereoFrame(data_in_, kSrcLeft, kSrcRight, src_sample_rate_hz);
+
+  if (channels == 1) {
+    SetMonoFrame(data_out_, 0, dst_sample_rate_hz);
+    SetMonoFrame(data_reference_, kDstLeft, dst_sample_rate_hz);
+  } else {
+    SetStereoFrame(data_out_, 0, 0, dst_sample_rate_hz);
+    SetStereoFrame(data_reference_, kDstLeft, kDstRight, dst_sample_rate_hz);
+  }
+
+  // The speex resampler has a known delay dependent on quality and rates,
+  // which we approximate here. Multiplying by two gives us a crude maximum
+  // for any resampling, as the old resampler typically (but not always)
+  // has lower delay.  The actual delay is calculated internally based on the
+  // filter length in the QualityMap.
+  static const int kInputKernelDelaySamples = 16*3;
+  const int max_delay = std::min(1.0f, 1/kResamplingFactor) *
+                        kInputKernelDelaySamples * channels * 2;
+  printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
+      channels, src_sample_rate_hz, channels, dst_sample_rate_hz);
+
+  int in_length = channels * src_sample_rate_hz / 100;
+  int out_length = 0;
+  EXPECT_EQ(0, rs_.Reset(src_sample_rate_hz, dst_sample_rate_hz,
+                         (channels == 1 ?
+                          kResamplerSynchronous :
+                          kResamplerSynchronousStereo)));
+  EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
+                        out_length));
+  EXPECT_EQ(channels * dst_sample_rate_hz / 100, out_length);
+
+  //  EXPECT_EQ(0, Resample(src_frame_, &resampler, &dst_frame_));
+  EXPECT_GT(ComputeSNR(data_reference_, data_out_, dst_sample_rate_hz,
+                       channels, max_delay), 40.0f);
+}
+
+TEST_F(ResamplerTest, Synchronous) {
+  // Number of channels is 1, mono mode.
+  const int kChannels = 1;
+  // We don't attempt to be exhaustive here, but just get good coverage. Some
+  // combinations of rates will not be resampled, and some give an odd
+  // resampling factor which makes it more difficult to evaluate.
+  const int kSampleRates[] = {16000, 32000, 44100, 48000};
+  const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
     }
   }
 }
 
 TEST_F(ResamplerTest, SynchronousStereo) {
   // Number of channels is 2, stereo mode.
   const int kChannels = 2;
-  for (size_t i = 0; i < kRatesSize; ++i) {
-    for (size_t j = 0; j < kRatesSize; ++j) {
-      std::ostringstream ss;
-      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
-      SCOPED_TRACE(ss.str());
-
-      if (ValidRates(kRates[i], kRates[j])) {
-        int in_length = kChannels * kRates[i] / 100;
-        int out_length = 0;
-        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j],
-                               kResamplerSynchronousStereo));
-        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
-                              out_length));
-        EXPECT_EQ(kChannels * kRates[j] / 100, out_length);
-      } else {
-        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j],
-                                kResamplerSynchronousStereo));
-      }
+  // We don't attempt to be exhaustive here, but just get good coverage. Some
+  // combinations of rates will not be resampled, and some give an odd
+  // resampling factor which makes it more difficult to evaluate.
+  const int kSampleRates[] = {16000, 32000, 44100, 48000};
+  const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
     }
   }
 }
 }  // namespace
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler_neon.cc
@@ -21,21 +21,21 @@ float SincResampler::Convolve_NEON(const
                                    const float* k2,
                                    double kernel_interpolation_factor) {
   float32x4_t m_input;
   float32x4_t m_sums1 = vmovq_n_f32(0);
   float32x4_t m_sums2 = vmovq_n_f32(0);
 
   const float* upper = input_ptr + kKernelSize;
   for (; input_ptr < upper; ) {
-    m_input = vld1q_f32(input_ptr);
+    m_input = vld1q_f32((const float32_t *) input_ptr);
     input_ptr += 4;
-    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
+    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32((const float32_t *) k1));
     k1 += 4;
-    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
+    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32((const float32_t *) k2));
     k2 += 4;
   }
 
   // Linearly interpolate the two "convolutions".
   m_sums1 = vmlaq_f32(
       vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
       m_sums2, vmovq_n_f32(kernel_interpolation_factor));
 
--- a/media/webrtc/trunk/webrtc/common_types.h
+++ b/media/webrtc/trunk/webrtc/common_types.h
@@ -326,17 +326,17 @@ typedef struct        // All levels are 
 enum NsModes    // type of Noise Suppression
 {
     kNsUnchanged = 0,   // previously set mode
     kNsDefault,         // platform default
     kNsConference,      // conferencing default
     kNsLowSuppression,  // lowest suppression
     kNsModerateSuppression,
     kNsHighSuppression,
-    kNsVeryHighSuppression,     // highest suppression
+    kNsVeryHighSuppression     // highest suppression
 };
 
 enum AgcModes                  // type of Automatic Gain Control
 {
     kAgcUnchanged = 0,        // previously set mode
     kAgcDefault,              // platform default
     // adaptive mode for use when analog volume control exists (e.g. for
     // PC softphone)
@@ -351,17 +351,17 @@ enum AgcModes                  // type o
 
 // EC modes
 enum EcModes                   // type of Echo Control
 {
     kEcUnchanged = 0,          // previously set mode
     kEcDefault,                // platform default
     kEcConference,             // conferencing default (aggressive AEC)
     kEcAec,                    // Acoustic Echo Cancellation
-    kEcAecm,                   // AEC mobile
+    kEcAecm                    // AEC mobile
 };
 
 // AECM modes
 enum AecmModes                 // mode of AECM
 {
     kAecmQuietEarpieceOrHeadset = 0,
                                // Quiet earpiece or headset use
     kAecmEarpiece,             // most earpiece use
@@ -403,31 +403,31 @@ enum NetEqModes             // NetEQ pla
     // Improved jitter robustness at the cost of increased delay. Can be
     // used in one-way communication.
     kNetEqStreaming = 1,
     // Optimzed for decodability of fax signals rather than for perceived audio
     // quality.
     kNetEqFax = 2,
     // Minimal buffer management. Inserts zeros for lost packets and during
     // buffer increases.
-    kNetEqOff = 3,
+    kNetEqOff = 3
 };
 
 enum OnHoldModes            // On Hold direction
 {
     kHoldSendAndPlay = 0,    // Put both sending and playing in on-hold state.
     kHoldSendOnly,           // Put only sending in on-hold state.
     kHoldPlayOnly            // Put only playing in on-hold state.
 };
 
 enum AmrMode
 {
     kRfc3267BwEfficient = 0,
     kRfc3267OctetAligned = 1,
-    kRfc3267FileStorage = 2,
+    kRfc3267FileStorage = 2
 };
 
 // ==================================================================
 // Video specific types
 // ==================================================================
 
 // Raw video types
 enum RawVideoType
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
@@ -10,32 +10,28 @@
 
 
 #include "pcm16b.h"
 
 #include <stdlib.h>
 
 #include "typedefs.h"
 
-#ifdef WEBRTC_BIG_ENDIAN
-#include "signal_processing_library.h"
-#endif
-
 #define HIGHEND 0xFF00
 #define LOWEND    0xFF
 
 
 
 /* Encoder with int16_t Output */
 int16_t WebRtcPcm16b_EncodeW16(int16_t *speechIn16b,
                                int16_t len,
                                int16_t *speechOut16b)
 {
 #ifdef WEBRTC_BIG_ENDIAN
-    WEBRTC_SPL_MEMCPY_W16(speechOut16b, speechIn16b, len);
+    memcpy(speechOut16b, speechIn16b, len * sizeof(WebRtc_Word16));
 #else
     int i;
     for (i=0;i<len;i++) {
         speechOut16b[i]=(((uint16_t)speechIn16b[i])>>8)|((((uint16_t)speechIn16b[i])<<8)&0xFF00);
     }
 #endif
     return(len<<1);
 }
@@ -64,17 +60,17 @@ int16_t WebRtcPcm16b_Encode(int16_t *spe
 /* Decoder with int16_t Input instead of char when the int16_t Encoder is used */
 int16_t WebRtcPcm16b_DecodeW16(void *inst,
                                int16_t *speechIn16b,
                                int16_t len,
                                int16_t *speechOut16b,
                                int16_t* speechType)
 {
 #ifdef WEBRTC_BIG_ENDIAN
-    WEBRTC_SPL_MEMCPY_W8(speechOut16b, speechIn16b, ((len*sizeof(int16_t)+1)>>1));
+    memcpy(speechOut16b, speechIn16b, ((len*sizeof(WebRtc_Word16)+1)>>1));
 #else
     int i;
     int samples=len>>1;
 
     for (i=0;i<samples;i++) {
         speechOut16b[i]=(((uint16_t)speechIn16b[i])>>8)|(((uint16_t)(speechIn16b[i]&0xFF))<<8);
     }
 #endif
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/main/source/audio_coding_module.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/main/source/audio_coding_module.gypi
@@ -5,31 +5,76 @@
 # tree. An additional intellectual property rights grant can be found
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
 {
   'variables': {
     'audio_coding_dependencies': [
       'CNG',
-      'G711',
-      'G722',
-      'iLBC',
-      'iSAC',
-      'iSACFix',
-      'PCM16B',
       'NetEq',
       '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
       '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
     ],
     'audio_coding_defines': [],
     'conditions': [
       ['include_opus==1', {
         'audio_coding_dependencies': ['webrtc_opus',],
         'audio_coding_defines': ['WEBRTC_CODEC_OPUS',],
+        'audio_coding_sources': [
+          'acm_opus.cc',
+          'acm_opus.h',
+        ],
+      }],
+      ['include_g711==1', {
+        'audio_coding_dependencies': ['G711',],
+        'audio_coding_defines': ['WEBRTC_CODEC_G711',],
+        'audio_coding_sources': [
+          'acm_pcma.cc',
+          'acm_pcma.h',
+          'acm_pcmu.cc',
+          'acm_pcmu.h',
+        ],
+      }],
+      ['include_g722==1', {
+        'audio_coding_dependencies': ['G722',],
+        'audio_coding_defines': ['WEBRTC_CODEC_G722',],
+        'audio_coding_sources': [
+          'acm_g722.cc',
+          'acm_g722.h',
+          'acm_g7221.cc',
+          'acm_g7221.h',
+          'acm_g7221c.cc',
+          'acm_g7221c.h',
+        ],
+      }],
+      ['include_ilbc==1', {
+        'audio_coding_dependencies': ['iLBC',],
+        'audio_coding_defines': ['WEBRTC_CODEC_ILBC',],
+        'audio_coding_sources': [
+          'acm_ilbc.cc',
+          'acm_ilbc.h',
+        ],
+      }],
+      ['include_isac==1', {
+        'audio_coding_dependencies': ['iSAC', 'iSACFix',],
+        'audio_coding_defines': ['WEBRTC_CODEC_ISAC', 'WEBRTC_CODEC_ISACFX',],
+        'audio_coding_sources': [
+          'acm_isac.cc',
+          'acm_isac.h',
+          'acm_isac_macros.h',
+        ],
+      }],
+      ['include_pcm16b==1', {
+        'audio_coding_dependencies': ['PCM16B',],
+        'audio_coding_defines': ['WEBRTC_CODEC_PCM16',],
+        'audio_coding_sources': [
+          'acm_pcm16b.cc',
+          'acm_pcm16b.h',
+        ],
       }],
     ],
   },
   'targets': [
     {
       'target_name': 'audio_coding_module',
       'type': 'static_library',
       'defines': [
@@ -44,57 +89,34 @@
       ],
       'direct_dependent_settings': {
         'include_dirs': [
           '../interface',
           '../../../interface',
         ],
       },
       'sources': [
+#        '<@(audio_coding_sources)',
         '../interface/audio_coding_module.h',
         '../interface/audio_coding_module_typedefs.h',
-        'acm_amr.cc',
-        'acm_amr.h',
-        'acm_amrwb.cc',
-        'acm_amrwb.h',
-        'acm_celt.cc',
-        'acm_celt.h',
         'acm_cng.cc',
         'acm_cng.h',
         'acm_codec_database.cc',
         'acm_codec_database.h',
         'acm_dtmf_detection.cc',
         'acm_dtmf_detection.h',
         'acm_dtmf_playout.cc',
         'acm_dtmf_playout.h',
-        'acm_g722.cc',
-        'acm_g722.h',
-        'acm_g7221.cc',
-        'acm_g7221.h',
-        'acm_g7221c.cc',
-        'acm_g7221c.h',
-        'acm_g729.cc',
-        'acm_g729.h',
-        'acm_g7291.cc',
-        'acm_g7291.h',
         'acm_generic_codec.cc',
         'acm_generic_codec.h',
-        'acm_gsmfr.cc',
-        'acm_gsmfr.h',
-        'acm_ilbc.cc',
-        'acm_ilbc.h',
-        'acm_isac.cc',
-        'acm_isac.h',
-        'acm_isac_macros.h',
         'acm_neteq.cc',
         'acm_neteq.h',
+# cheat until I get audio_coding_sources to work
         'acm_opus.cc',
         'acm_opus.h',
-        'acm_speex.cc',
-        'acm_speex.h',
         'acm_pcm16b.cc',
         'acm_pcm16b.h',
         'acm_pcma.cc',
         'acm_pcma.h',
         'acm_pcmu.cc',
         'acm_pcmu.h',
         'acm_red.cc',
         'acm_red.h',
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_defines.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_defines.h
@@ -64,16 +64,18 @@
  * NETEQ_ISAC_CODEC               Enable iSAC
  *
  * NETEQ_ISAC_SWB_CODEC           Enable iSAC-SWB
  *
  * Note that the decoder of iSAC full-band operates at 32 kHz, that is the
  * decoded signal is at 32 kHz.
  * NETEQ_ISAC_FB_CODEC            Enable iSAC-FB
  *
+ * NETEQ_OPUS_CODEC               Enable Opus
+ *
  * NETEQ_G722_CODEC               Enable G.722
  *
  * NETEQ_G729_CODEC               Enable G.729
  *
  * NETEQ_G729_1_CODEC             Enable G.729.1
  *
  * NETEQ_G726_CODEC               Enable G.726
  *
@@ -316,39 +318,46 @@
     #define NETEQ_RED_CODEC
     #define NETEQ_VAD
     #define NETEQ_ARBITRARY_CODEC
 
     /* Narrowband codecs */
     #define NETEQ_PCM16B_CODEC
     #define NETEQ_G711_CODEC
     #define NETEQ_ILBC_CODEC
+    #define NETEQ_OPUS_CODEC
     #define NETEQ_G729_CODEC
     #define NETEQ_G726_CODEC
     #define NETEQ_GSMFR_CODEC
     #define NETEQ_AMR_CODEC
 
     /* Wideband codecs */
     #define NETEQ_WIDEBAND
     #define NETEQ_ISAC_CODEC
+    /*#define NETEQ_OPUS_CODEC define only once */
     #define NETEQ_G722_CODEC
     #define NETEQ_G722_1_CODEC
     #define NETEQ_G729_1_CODEC
     #define NETEQ_SPEEX_CODEC
     #define NETEQ_AMRWB_CODEC
 
     /* Super wideband 32kHz codecs */
     #define NETEQ_ISAC_SWB_CODEC
+    /*#define NETEQ_OPUS_CODEC*/
     #define NETEQ_32KHZ_WIDEBAND
     #define NETEQ_G722_1C_CODEC
     #define NETEQ_CELT_CODEC
+    /*#define NETEQ_OPUS_CODEC*/
+
+    /* hack in 48 kHz support */
+    #define NETEQ_48KHZ_WIDEBAND
 
     /* Super wideband 48kHz codecs */
     #define NETEQ_48KHZ_WIDEBAND
-    #define NETEQ_OPUS_CODEC
+    /*#define NETEQ_OPUS_CODEC*/
     #define NETEQ_ISAC_FB
 #endif
 
 /* Max output size from decoding one frame */
 #if defined(NETEQ_48KHZ_WIDEBAND)
     #define NETEQ_MAX_FRAME_SIZE 5760  /* 120 ms super wideband */
     #define NETEQ_MAX_OUTPUT_SIZE 6480  /* 120+15 ms super wideband (120 ms
                                          * decoded + 15 ms for merge overlap) */
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/packet_buffer.c
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/packet_buffer.c
@@ -673,16 +673,21 @@ int WebRtcNetEQ_GetDefaultCodecSettings(
             codecBytes = 1560; /* 240ms @ 52kbps (30ms frames) */
             codecBuffers = 8;
         }
         else if (codecID[i] == kDecoderOpus)
         {
             codecBytes = 15300; /* 240ms @ 510kbps (60ms frames) */
             codecBuffers = 30;  /* Replicating the value for PCMu/a */
         }
+        else if (codecID[i] == kDecoderOpus)
+        {
+            codecBytes = 15300; /* 240ms @ 510kbps (60ms frames) */
+            codecBuffers = 30;  /* ?? Codec supports down to 2.5-60 ms frames */
+        }
         else if ((codecID[i] == kDecoderPCM16B) ||
             (codecID[i] == kDecoderPCM16B_2ch))
         {
             codecBytes = 3360; /* 210ms */
             codecBuffers = 15;
         }
         else if ((codecID[i] == kDecoderPCM16Bwb) ||
             (codecID[i] == kDecoderPCM16Bwb_2ch))
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.cc
@@ -21,91 +21,92 @@
 #include "audio_device_utility.h"
 #include "audio_device_jni_android.h"
 #include "audio_device_config.h"
 
 #include "trace.h"
 #include "thread_wrapper.h"
 #include "event_wrapper.h"
 
+#include "AndroidJNIWrapper.h"
+
 namespace webrtc
 {
 // TODO(leozwang): Refactor jni and the following global variables, a
 // good example is jni_helper in Chromium.
 JavaVM* AudioDeviceAndroidJni::globalJvm = NULL;
-JNIEnv* AudioDeviceAndroidJni::globalJNIEnv = NULL;
 jobject AudioDeviceAndroidJni::globalContext = NULL;
 jclass AudioDeviceAndroidJni::globalScClass = NULL;
 
 // ----------------------------------------------------------------------------
 //  SetAndroidAudioDeviceObjects
 //
 //  Global function for setting Java pointers and creating Java
 //  objects that are global to all instances of VoiceEngine used
 //  by the same Java application.
 // ----------------------------------------------------------------------------
 
 int32_t AudioDeviceAndroidJni::SetAndroidAudioDeviceObjects(
     void* javaVM,
-    void* env,
     void* context) {
-  __android_log_print(ANDROID_LOG_DEBUG, "WEBRTC", "JNI:%s", __FUNCTION__);
+  return SetAndroidAudioDeviceObjects(javaVM, NULL, context);
+}
+
+int32_t AudioDeviceAndroidJni::SetAndroidAudioDeviceObjects(
+    void* javaVM,
+    void* null_env,
+    void* context) {
+  WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, -1,
+               "%s called", __FUNCTION__);
 
   // TODO(leozwang): Make this function thread-safe.
   globalJvm = reinterpret_cast<JavaVM*>(javaVM);
 
-  if (env) {
-    globalJNIEnv = reinterpret_cast<JNIEnv*>(env);
+  JNIEnv* env = NULL;
+
+  // Check if we already got a reference
+  if (globalJvm && !globalScClass) {
+      if (globalJvm->GetEnv((void**)&env, JNI_VERSION_1_4) != JNI_OK) {
+      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioDevice, -1,
+                   "%s: could not get Java environment", __FUNCTION__);
+      return -1;
+    }
+    globalJvm->AttachCurrentThread(&env, NULL);
+
     // Get java class type (note path to class packet).
-    jclass javaScClassLocal = globalJNIEnv->FindClass(
-        "org/webrtc/voiceengine/WebRTCAudioDevice");
-    if (!javaScClassLocal) {
+    globalScClass = jsjni_GetGlobalClassRef(AudioCaptureClass);
+    if (!globalScClass) {
       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                    "%s: could not find java class", __FUNCTION__);
       return -1; // exception thrown
     }
 
-    // Create a global reference to the class (to tell JNI that we are
-    // referencing it after this function has returned).
-    globalScClass = reinterpret_cast<jclass> (
-        globalJNIEnv->NewGlobalRef(javaScClassLocal));
-    if (!globalScClass) {
-      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
-                   "%s: could not create reference", __FUNCTION__);
-      return -1;
-    }
-
-    globalContext = globalJNIEnv->NewGlobalRef(
+    globalContext = env->NewGlobalRef(
         reinterpret_cast<jobject>(context));
     if (!globalContext) {
       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                    "%s: could not create context reference", __FUNCTION__);
       return -1;
     }
-
-    // Delete local class ref, we only use the global ref
-    globalJNIEnv->DeleteLocalRef(javaScClassLocal);
   }
   else { // User is resetting the env variable
     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
                  "%s: env is NULL, assuming deinit", __FUNCTION__);
 
-    if (!globalJNIEnv) {
+    if (!env) {
       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
                    "%s: saved env already NULL", __FUNCTION__);
       return 0;
     }
 
-    globalJNIEnv->DeleteGlobalRef(globalScClass);
+    env->DeleteGlobalRef(globalScClass);
     globalScClass = reinterpret_cast<jclass>(NULL);
 
-    globalJNIEnv->DeleteGlobalRef(globalContext);
+    env->DeleteGlobalRef(globalContext);
     globalContext = reinterpret_cast<jobject>(NULL);
-
-    globalJNIEnv = reinterpret_cast<JNIEnv*>(NULL);
   }
 
   return 0;
 }
 
 // ============================================================================
 //                            Construction & Destruction
 // ============================================================================
@@ -135,18 +136,18 @@ AudioDeviceAndroidJni::AudioDeviceAndroi
             _playoutDeviceIsSpecified(false), _initialized(false),
             _recording(false), _playing(false), _recIsInitialized(false),
             _playIsInitialized(false), _micIsInitialized(false),
             _speakerIsInitialized(false), _startRec(false),
             _startPlay(false), _playWarning(0),
             _playError(0), _recWarning(0), _recError(0), _delayPlayout(0),
             _delayRecording(0),
             _AGC(false),
-            _samplingFreqIn((N_REC_SAMPLES_PER_SEC/1000)),
-            _samplingFreqOut((N_PLAY_SAMPLES_PER_SEC/1000)),
+            _samplingFreqIn((N_REC_SAMPLES_PER_SEC)),
+            _samplingFreqOut((N_PLAY_SAMPLES_PER_SEC)),
             _maxSpeakerVolume(0),
             _loudSpeakerOn(false),
             _recAudioSource(1), // 1 is AudioSource.MIC which is our default
             _javaVM(NULL), _jniEnvPlay(NULL),
             _jniEnvRec(NULL), _javaScClass(0), _javaScObj(0),
             _javaPlayBuffer(0), _javaRecBuffer(0), _javaDirectPlayBuffer(NULL),
             _javaDirectRecBuffer(NULL), _javaMidPlayAudio(0),
             _javaMidRecAudio(0)
@@ -1380,36 +1381,29 @@ int32_t AudioDeviceAndroidJni::InitPlayo
             return -1;
         }
         isAttached = true;
     }
 
     // get the method ID
     jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback",
                                                 "(I)I");
-
-    int samplingFreq = 44100;
-    if (_samplingFreqOut != 44)
-    {
-        samplingFreq = _samplingFreqOut * 1000;
-    }
-
     int retVal = -1;
 
     // Call java sc object method
-    jint res = env->CallIntMethod(_javaScObj, initPlaybackID, samplingFreq);
+    jint res = env->CallIntMethod(_javaScObj, initPlaybackID, _samplingFreqOut);
     if (res < 0)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "InitPlayback failed (%d)", res);
     }
     else
     {
         // Set the audio device buffer sampling rate
-        _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut * 1000);
+        _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut);
         _playIsInitialized = true;
         retVal = 0;
     }
 
     // Detach this thread if it was attached
     if (isAttached)
     {
         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
@@ -1485,40 +1479,33 @@ int32_t AudioDeviceAndroidJni::InitRecor
             return -1;
         }
         isAttached = true;
     }
 
     // get the method ID
     jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
                                                  "(II)I");
-
-    int samplingFreq = 44100;
-    if (_samplingFreqIn != 44)
-    {
-        samplingFreq = _samplingFreqIn * 1000;
-    }
-
     int retVal = -1;
 
     // call java sc object method
     jint res = env->CallIntMethod(_javaScObj, initRecordingID, _recAudioSource,
-                                  samplingFreq);
+                                  _samplingFreqIn);
     if (res < 0)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "InitRecording failed (%d)", res);
     }
     else
     {
         // Set the audio device buffer sampling rate
-        _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn * 1000);
+        _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn);
 
         // the init rec function returns a fixed delay
-        _delayRecording = res / _samplingFreqIn;
+        _delayRecording = (res * 1000) / _samplingFreqIn;
 
         _recIsInitialized = true;
         retVal = 0;
     }
 
     // Detach this thread if it was attached
     if (isAttached)
     {
@@ -2026,24 +2013,17 @@ int32_t AudioDeviceAndroidJni::SetRecord
     if (samplesPerSec > 48000 || samplesPerSec < 8000)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "  Invalid sample rate");
         return -1;
     }
 
     // set the recording sample rate to use
-    if (samplesPerSec == 44100)
-    {
-        _samplingFreqIn = 44;
-    }
-    else
-    {
-        _samplingFreqIn = samplesPerSec / 1000;
-    }
+    _samplingFreqIn = samplesPerSec;
 
     // Update the AudioDeviceBuffer
     _ptrAudioBuffer->SetRecordingSampleRate(samplesPerSec);
 
     return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -2057,24 +2037,17 @@ int32_t AudioDeviceAndroidJni::SetPlayou
     if (samplesPerSec > 48000 || samplesPerSec < 8000)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "  Invalid sample rate");
         return -1;
     }
 
     // set the playout sample rate to use
-    if (samplesPerSec == 44100)
-    {
-        _samplingFreqOut = 44;
-    }
-    else
-    {
-        _samplingFreqOut = samplesPerSec / 1000;
-    }
+    _samplingFreqOut = samplesPerSec;
 
     // Update the AudioDeviceBuffer
     _ptrAudioBuffer->SetPlayoutSampleRate(samplesPerSec);
 
     return 0;
 }
 
 // ----------------------------------------------------------------------------
@@ -2206,17 +2179,17 @@ int32_t AudioDeviceAndroidJni::InitJavaR
     if (cid == NULL)
     {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                      "%s: could not get constructor ID", __FUNCTION__);
         return -1; /* exception thrown */
     }
 
     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
-                 "construct object", __FUNCTION__);
+                 "%s: construct object", __FUNCTION__);
 
     // construct the object
     jobject javaScObjLocal = env->NewObject(_javaScClass, cid);
     if (!javaScObjLocal)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
                      "%s: could not create Java sc object", __FUNCTION__);
         return -1;
@@ -2418,21 +2391,17 @@ int32_t AudioDeviceAndroidJni::InitSampl
             return -1;
         }
         isAttached = true;
     }
 
     if (_samplingFreqIn > 0)
     {
         // read the configured sampling rate
-        samplingFreq = 44100;
-        if (_samplingFreqIn != 44)
-        {
-            samplingFreq = _samplingFreqIn * 1000;
-        }
+        samplingFreq = _samplingFreqIn;
         WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                      "  Trying configured recording sampling rate %d",
                      samplingFreq);
     }
 
     // get the method ID
     jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
                                                  "(II)I");
@@ -2463,24 +2432,17 @@ int32_t AudioDeviceAndroidJni::InitSampl
         }
         else
         {
             keepTrying = false;
         }
     }
 
     // set the recording sample rate to use
-    if (samplingFreq == 44100)
-    {
-        _samplingFreqIn = 44;
-    }
-    else
-    {
-        _samplingFreqIn = samplingFreq / 1000;
-    }
+    _samplingFreqIn = samplingFreq;
 
     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                  "Recording sample rate set to (%d)", _samplingFreqIn);
 
     // get the method ID
     jmethodID stopRecordingID = env->GetMethodID(_javaScClass, "StopRecording",
                                                  "()I");
 
@@ -2494,21 +2456,17 @@ int32_t AudioDeviceAndroidJni::InitSampl
 
     // get the method ID
     jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback",
                                                 "(I)I");
 
     if (_samplingFreqOut > 0)
     {
         // read the configured sampling rate
-        samplingFreq = 44100;
-        if (_samplingFreqOut != 44)
-        {
-            samplingFreq = _samplingFreqOut * 1000;
-        }
+        samplingFreq = _samplingFreqOut;
         WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                      "  Trying configured playback sampling rate %d",
                      samplingFreq);
     }
     else
     {
         // set the preferred sampling frequency
         if (samplingFreq == 8000)
@@ -2552,25 +2510,17 @@ int32_t AudioDeviceAndroidJni::InitSampl
     if (_maxSpeakerVolume < 1)
     {
         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
                      "  Did not get valid max speaker volume value (%d)",
                      _maxSpeakerVolume);
     }
 
     // set the playback sample rate to use
-    if (samplingFreq == 44100)
-    {
-        _samplingFreqOut = 44;
-    }
-    else
-    {
-        _samplingFreqOut = samplingFreq / 1000;
-    }
-
+    _samplingFreqOut = samplingFreq;
     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
                  "Playback sample rate set to (%d)", _samplingFreqOut);
 
     // get the method ID
     jmethodID stopPlaybackID = env->GetMethodID(_javaScClass, "StopPlayback",
                                                 "()I");
 
     // Call java sc object method
@@ -2673,17 +2623,17 @@ bool AudioDeviceAndroidJni::PlayThreadPr
         _playStartStopEvent.Set();
         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
                      "Sent signal");
     }
 
     if (_playing)
     {
         int8_t playBuffer[2 * 480]; // Max 10 ms @ 48 kHz / 16 bit
-        uint32_t samplesToPlay = _samplingFreqOut * 10;
+        uint32_t samplesToPlay = _samplingFreqOut / 100;
 
         // ask for new PCM data to be played out using the AudioDeviceBuffer
         // ensure that this callback is executed without taking the
         // audio-thread lock
         UnLock();
         uint32_t nSamples =
                 _ptrAudioBuffer->RequestPlayoutData(samplesToPlay);
         Lock();
@@ -2718,17 +2668,17 @@ bool AudioDeviceAndroidJni::PlayThreadPr
         {
             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                          "PlayAudio failed (%d)", res);
             _playWarning = 1;
         }
         else if (res > 0)
         {
             // we are not recording and have got a delay value from playback
-            _delayPlayout = res / _samplingFreqOut;
+            _delayPlayout = (res * 1000) / _samplingFreqOut;
         }
         // If 0 is returned we are recording and then play delay is updated
         // in RecordProcess
 
         Lock();
 
     } // _playing
 
@@ -2816,34 +2766,34 @@ bool AudioDeviceAndroidJni::RecThreadPro
         _recording = true;
         _recWarning = 0;
         _recError = 0;
         _recStartStopEvent.Set();
     }
 
     if (_recording)
     {
-        uint32_t samplesToRec = _samplingFreqIn * 10;
+        uint32_t samplesToRec = _samplingFreqIn / 100;
 
         // Call java sc object method to record data to direct buffer
         // Will block until data has been recorded (see java sc class),
         // therefore we must release the lock
         UnLock();
         jint playDelayInSamples = _jniEnvRec->CallIntMethod(_javaScObj,
                                                             _javaMidRecAudio,
                                                             2 * samplesToRec);
         if (playDelayInSamples < 0)
         {
             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                          "RecordAudio failed");
             _recWarning = 1;
         }
         else
         {
-            _delayPlayout = playDelayInSamples / _samplingFreqOut;
+            _delayPlayout = (playDelayInSamples * 1000) / _samplingFreqOut;
         }
         Lock();
 
         // Check again since recording may have stopped during Java call
         if (_recording)
         {
 //            WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
 //                         "total delay is %d", msPlayDelay + _delayRecording);
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_jni_android.h
@@ -15,37 +15,33 @@
 #ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_JNI_ANDROID_H
 #define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_JNI_ANDROID_H
 
 #include "audio_device_generic.h"
 #include "critical_section_wrapper.h"
 
 #include <jni.h> // For accessing AudioDeviceAndroid java class
 
+#define AudioCaptureClass "org/webrtc/voiceengine/WebRTCAudioDevice"
+
 namespace webrtc
 {
 class EventWrapper;
 
-const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
-
-const uint32_t N_REC_CHANNELS = 1; // default is mono recording
-const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout
-
-const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480; // Handle max 10 ms @ 48 kHz
-
-
 class ThreadWrapper;
 
 class AudioDeviceAndroidJni : public AudioDeviceGeneric {
  public:
   AudioDeviceAndroidJni(const int32_t id);
   ~AudioDeviceAndroidJni();
 
   static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
+                                              void* context);
+
+  static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
                                               void* env,
                                               void* context);
 
   virtual int32_t ActiveAudioLayer(
       AudioDeviceModule::AudioLayer& audioLayer) const;
 
   virtual int32_t Init();
   virtual int32_t Terminate();
@@ -153,16 +149,24 @@ class AudioDeviceAndroidJni : public Aud
   virtual int32_t SetRecordingSampleRate(
       const uint32_t samplesPerSec);
   virtual int32_t SetPlayoutSampleRate(
       const uint32_t samplesPerSec);
 
   virtual int32_t SetLoudspeakerStatus(bool enable);
   virtual int32_t GetLoudspeakerStatus(bool& enable) const;
 
+  static const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
+  static const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default is 16 kHz
+
+  static const uint32_t N_REC_CHANNELS = 1; // default is mono recording
+  static const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout
+
+  static const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480; // Handle max 10 ms @ 48 kHz
+
  private:
   // Lock
   void Lock() {
     _critSect.Enter();
   };
   void UnLock() {
     _critSect.Leave();
   };
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
@@ -39,21 +39,21 @@
         'audio_device_utility.h',
         'audio_device_impl.cc',
         'audio_device_impl.h',
         'audio_device_config.h',
         'dummy/audio_device_dummy.h',
         'dummy/audio_device_utility_dummy.h',
       ],
       'conditions': [
-        ['OS=="linux"', {
+        ['OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1', {
           'include_dirs': [
             'linux',
           ],
-        }], # OS==linux
+        }], # OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1
         ['OS=="ios"', {
           'include_dirs': [
             'ios',
           ],
         }], # OS==ios
         ['OS=="mac"', {
           'include_dirs': [
             'mac',
@@ -61,34 +61,35 @@
         }], # OS==mac
         ['OS=="win"', {
           'include_dirs': [
             'win',
           ],
         }],
         ['OS=="android"', {
           'include_dirs': [
+            '$(topsrcdir)/widget/android',
             'android',
           ],
         }], # OS==android
+        ['moz_widget_toolkit_gonk==1', {
+          'include_dirs': [
+            '$(ANDROID_SOURCE)/frameworks/wilhelm/include',
+            '$(ANDROID_SOURCE)/system/media/wilhelm/include',
+          ],
+        }], # moz_widget_toolkit_gonk==1
         ['include_internal_audio_device==0', {
           'defines': [
             'WEBRTC_DUMMY_AUDIO_BUILD',
           ],
         }],
         ['include_internal_audio_device==1', {
           'sources': [
-            'linux/alsasymboltable_linux.cc',
-            'linux/alsasymboltable_linux.h',
-            'linux/audio_device_alsa_linux.cc',
-            'linux/audio_device_alsa_linux.h',
             'linux/audio_device_utility_linux.cc',
             'linux/audio_device_utility_linux.h',
-            'linux/audio_mixer_manager_alsa_linux.cc',
-            'linux/audio_mixer_manager_alsa_linux.h',
             'linux/latebindingsymboltable_linux.cc',
             'linux/latebindingsymboltable_linux.h',
             'ios/audio_device_ios.cc',
             'ios/audio_device_ios.h',
             'ios/audio_device_utility_ios.cc',
             'ios/audio_device_utility_ios.h',
             'mac/audio_device_mac.cc',
             'mac/audio_device_mac.h',
@@ -104,62 +105,76 @@
             'win/audio_device_wave_win.cc',
             'win/audio_device_wave_win.h',
             'win/audio_device_utility_win.cc',
             'win/audio_device_utility_win.h',
             'win/audio_mixer_manager_win.cc',
             'win/audio_mixer_manager_win.h',
             'android/audio_device_utility_android.cc',
             'android/audio_device_utility_android.h',
+# opensles is shared with gonk, so isn't here
+            'android/audio_device_jni_android.cc',
+            'android/audio_device_jni_android.h',
           ],
           'conditions': [
             ['OS=="android"', {
+              'sources': [
+                'audio_device_opensles.cc',
+                'audio_device_opensles.h',
+              ],
               'link_settings': {
                 'libraries': [
                   '-llog',
                   '-lOpenSLES',
                 ],
               },
-              'conditions': [
-                ['enable_android_opensl==1', {
-                  'sources': [
-                    'android/audio_device_opensles_android.cc',
-                    'android/audio_device_opensles_android.h',
-                  ],
-                }, {
-                  'sources': [
-                    'android/audio_device_jni_android.cc',
-                    'android/audio_device_jni_android.h',
-                  ],
-                }],
+            }],
+            ['moz_widget_toolkit_gonk==1', {
+              'sources': [
+                'audio_device_opensles.cc',
+                'audio_device_opensles.h',
               ],
             }],
             ['OS=="linux"', {
-              'defines': [
-                'LINUX_ALSA',
-              ],
               'link_settings': {
                 'libraries': [
                   '-ldl','-lX11',
                 ],
               },
-              'conditions': [
-                ['include_pulse_audio==1', {
-                  'defines': [
-                    'LINUX_PULSE',
-                  ],
-                  'sources': [
-                    'linux/audio_device_pulse_linux.cc',
-                    'linux/audio_device_pulse_linux.h',
-                    'linux/audio_mixer_manager_pulse_linux.cc',
-                    'linux/audio_mixer_manager_pulse_linux.h',
-                    'linux/pulseaudiosymboltable_linux.cc',
-                    'linux/pulseaudiosymboltable_linux.h',
-                  ],
-                }],
+            }],
+            ['include_alsa_audio==1', {
+              'cflags_mozilla': [
+                '$(MOZ_ALSA_CFLAGS)',
+              ],
+              'defines': [
+                'LINUX_ALSA',
+              ],
+              'sources': [
+                'linux/alsasymboltable_linux.cc',
+                'linux/alsasymboltable_linux.h',
+                'linux/audio_device_alsa_linux.cc',
+                'linux/audio_device_alsa_linux.h',
+                'linux/audio_mixer_manager_alsa_linux.cc',
+                'linux/audio_mixer_manager_alsa_linux.h',
+              ],
+            }],
+            ['include_pulse_audio==1', {
+              'cflags_mozilla': [
+                '$(MOZ_PULSEAUDIO_CFLAGS)',
+              ],
+              'defines': [
+                'LINUX_PULSE',
+              ],
+              'sources': [
+                'linux/audio_device_pulse_linux.cc',
+                'linux/audio_device_pulse_linux.h',
+                'linux/audio_mixer_manager_pulse_linux.cc',
+                'linux/audio_mixer_manager_pulse_linux.h',
+                'linux/pulseaudiosymboltable_linux.cc',
+                'linux/pulseaudiosymboltable_linux.h',
               ],
             }],
             ['OS=="mac" or OS=="ios"', {
               'link_settings': {
                 'libraries': [
                   '$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
                   '$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
                 ],
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_impl.cc
@@ -19,18 +19,25 @@
 #if defined(_WIN32)
     #include "audio_device_utility_win.h"
     #include "audio_device_wave_win.h"
  #if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
     #include "audio_device_core_win.h"
  #endif
 #elif defined(WEBRTC_ANDROID_OPENSLES)
     #include <stdlib.h>
+    #include <dlfcn.h>
     #include "audio_device_utility_android.h"
-    #include "audio_device_opensles_android.h"
+    #include "audio_device_opensles.h"
+    #include "audio_device_jni_android.h"
+#elif defined(WEBRTC_GONK)
+    #include <stdlib.h>
+    #include <dlfcn.h>
+    #include "audio_device_utility_linux.h"
+    #include "audio_device_opensles.h"
 #elif defined(WEBRTC_ANDROID)
     #include <stdlib.h>
     #include "audio_device_utility_android.h"
     #include "audio_device_jni_android.h"
 #elif defined(WEBRTC_LINUX)
     #include "audio_device_utility_linux.h"
  #if defined(LINUX_ALSA)
     #include "audio_device_alsa_linux.h"
@@ -253,49 +260,58 @@ int32_t AudioDeviceModuleImpl::CreatePla
         // for Windows.
         //
         ptrAudioDeviceUtility = new AudioDeviceUtilityWindows(Id());
     }
 #endif  // #if defined(_WIN32)
 
     // Create the *Android OpenSLES* implementation of the Audio Device
     //
-#if defined(WEBRTC_ANDROID_OPENSLES)
-    if (audioLayer == kPlatformDefaultAudio)
-    {
-        // Create *Android OpenELSE Audio* implementation
-        ptrAudioDevice = new AudioDeviceAndroidOpenSLES(Id());
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
-                     "Android OpenSLES Audio APIs will be utilized");
+#if defined(WEBRTC_ANDROID_OPENSLES) || defined(WEBRTC_GONK)
+    // Check if the OpenSLES library is available before going further.
+    void* opensles_lib = dlopen("libOpenSLES.so", RTLD_LAZY);
+    if (opensles_lib) {
+        // That worked, close for now and proceed normally.
+        dlclose(opensles_lib);
+        if (audioLayer == kPlatformDefaultAudio)
+        {
+            // Create *Android OpenSLES Audio* implementation
+            ptrAudioDevice = new AudioDeviceAndroidOpenSLES(Id());
+            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+                         "Android OpenSLES Audio APIs will be utilized");
+        }
     }
 
+#if !defined(WEBRTC_GONK)
     if (ptrAudioDevice != NULL)
     {
         // Create the Android implementation of the Device Utility.
         ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
     }
-    // END #if defined(WEBRTC_ANDROID_OPENSLES)
+#endif
 
-    // Create the *Android Java* implementation of the Audio Device
-    //
-#elif defined(WEBRTC_ANDROID)
-    if (audioLayer == kPlatformDefaultAudio)
-    {
-        // Create *Android JNI Audio* implementation
-        ptrAudioDevice = new AudioDeviceAndroidJni(Id());
-        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Android JNI Audio APIs will be utilized");
+#endif
+#if defined(WEBRTC_ANDROID_OPENSLES) or defined(WEBRTC_ANDROID)
+    // Fall back to this case if on Android 2.2/OpenSLES not available.
+    if (ptrAudioDevice == NULL) {
+        // Create the *Android Java* implementation of the Audio Device
+        if (audioLayer == kPlatformDefaultAudio)
+        {
+            // Create *Android JNI Audio* implementation
+            ptrAudioDevice = new AudioDeviceAndroidJni(Id());
+            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Android JNI Audio APIs will be utilized");
+        }
+
+        if (ptrAudioDevice != NULL)
+        {
+            // Create the Android implementation of the Device Utility.
+            ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
+        }
     }
 
-    if (ptrAudioDevice != NULL)
-    {
-        // Create the Android implementation of the Device Utility.
-        ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
-    }
-    // END #if defined(WEBRTC_ANDROID)
-
     // Create the *Linux* implementation of the Audio Device
     //
 #elif defined(WEBRTC_LINUX)
     if ((audioLayer == kLinuxPulseAudio) || (audioLayer == kPlatformDefaultAudio))
     {
 #if defined(LINUX_PULSE)
         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "attempting to use the Linux PulseAudio APIs...");
 
rename from media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_opensles_android.cc
rename to media/webrtc/trunk/webrtc/modules/audio_device/audio_device_opensles.cc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_opensles_android.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_opensles.cc
@@ -3,25 +3,26 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "modules/audio_device/android/audio_device_opensles_android.h"
+#include "modules/audio_device/audio_device_opensles.h"
 
 #ifdef WEBRTC_ANDROID_DEBUG
 #include <android/log.h>
 #endif
 #include <sys/resource.h>
 #include <sys/syscall.h>
 #include <sys/time.h>
 #include <time.h>
+#include <dlfcn.h>
 
 #include "modules/audio_device/audio_device_utility.h"
 #include "system_wrappers/interface/event_wrapper.h"
 #include "system_wrappers/interface/thread_wrapper.h"
 #include "system_wrappers/interface/trace.h"
 
 #ifdef WEBRTC_ANDROID_DEBUG
 #define WEBRTC_OPENSL_TRACE(a, b, c, ...)                               \
@@ -59,22 +60,24 @@ AudioDeviceAndroidOpenSLES::AudioDeviceA
       is_playing_(false),
       is_rec_initialized_(false),
       is_play_initialized_(false),
       is_mic_initialized_(false),
       is_speaker_initialized_(false),
       playout_delay_(0),
       recording_delay_(0),
       agc_enabled_(false),
+      rec_thread_(NULL),
       rec_timer_(*EventWrapper::Create()),
       mic_sampling_rate_(N_REC_SAMPLES_PER_SEC * 1000),
       speaker_sampling_rate_(N_PLAY_SAMPLES_PER_SEC * 1000),
       max_speaker_vol_(0),
       min_speaker_vol_(0),
-      loundspeaker_on_(false) {
+      loundspeaker_on_(false),
+      opensles_lib_(0) {
   WEBRTC_OPENSL_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created",
                       __FUNCTION__);
   memset(rec_buf_, 0, sizeof(rec_buf_));
   memset(play_buf_, 0, sizeof(play_buf_));
 }
 
 AudioDeviceAndroidOpenSLES::~AudioDeviceAndroidOpenSLES() {
   WEBRTC_OPENSL_TRACE(kTraceMemory, kTraceAudioDevice, id_, "%s destroyed",
@@ -109,20 +112,55 @@ int32_t AudioDeviceAndroidOpenSLES::Acti
 }
 
 int32_t AudioDeviceAndroidOpenSLES::Init() {
   CriticalSectionScoped lock(&crit_sect_);
 
   if (is_initialized_)
     return 0;
 
+  /* Try to dynamically open the OpenSLES library */
+  opensles_lib_ = dlopen("libOpenSLES.so", RTLD_LAZY);
+  if (!opensles_lib_) {
+      WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
+                          "  failed to dlopen OpenSLES library");
+      return -1;
+  }
+
+  typedef SLresult (*slCreateEngine_t)(SLObjectItf *,
+                                       SLuint32,
+                                       const SLEngineOption *,
+                                       SLuint32,
+                                       const SLInterfaceID *,
+                                       const SLboolean *);
+  slCreateEngine_t f_slCreateEngine =
+    (slCreateEngine_t)dlsym(opensles_lib_, "slCreateEngine");
+  SL_IID_ENGINE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ENGINE");
+  SL_IID_BUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_BUFFERQUEUE");
+  SL_IID_ANDROIDCONFIGURATION_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDCONFIGURATION");
+  SL_IID_PLAY_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_PLAY");
+  SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
+  SL_IID_RECORD_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_RECORD");
+
+  if (!f_slCreateEngine ||
+      !SL_IID_ENGINE_ ||
+      !SL_IID_BUFFERQUEUE_ ||
+      !SL_IID_ANDROIDCONFIGURATION_ ||
+      !SL_IID_PLAY_ ||
+      !SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ ||
+      !SL_IID_RECORD_) {
+      WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
+                          "  failed to find OpenSLES function");
+      return -1;
+  }
+
   SLEngineOption EngineOption[] = {
     { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) },
   };
-  int32_t res = slCreateEngine(&sles_engine_, 1, EngineOption, 0, NULL, NULL);
+  int32_t res = f_slCreateEngine(&sles_engine_, 1, EngineOption, 0, NULL, NULL);
 
   if (res != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  failed to create SL Engine Object");
     return -1;
   }
 
   // Realizing the SL Engine in synchronous mode.
@@ -130,17 +168,17 @@ int32_t AudioDeviceAndroidOpenSLES::Init
       != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  failed to Realize SL Engine");
     return -1;
   }
 
   if ((*sles_engine_)->GetInterface(
           sles_engine_,
-          SL_IID_ENGINE,
+          SL_IID_ENGINE_,
           &sles_engine_itf_) != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  failed to get SL Engine interface");
     return -1;
   }
 
   // Check the sample rate to be used for playback and recording
   if (InitSampleRate() != 0) {
@@ -183,16 +221,18 @@ int32_t AudioDeviceAndroidOpenSLES::Term
   StopPlayout();
 
   if (sles_engine_ != NULL) {
     (*sles_engine_)->Destroy(sles_engine_);
     sles_engine_ = NULL;
     sles_engine_itf_ = NULL;
   }
 
+  dlclose(opensles_lib_);
+
   is_initialized_ = false;
   return 0;
 }
 
 bool AudioDeviceAndroidOpenSLES::Initialized() const {
   return (is_initialized_);
 }
 
@@ -278,17 +318,17 @@ int32_t AudioDeviceAndroidOpenSLES::SetS
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "SetSpeakerVolume, SL Engine object doesnt exist");
     return -1;
   }
 
   if (sles_engine_itf_ == NULL) {
     if ((*sles_engine_)->GetInterface(
             sles_engine_,
-            SL_IID_ENGINE,
+            SL_IID_ENGINE_,
             &sles_engine_itf_) != SL_RESULT_SUCCESS) {
       WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                           "  failed to GetInterface SL Engine Interface");
       return -1;
     }
   }
   return 0;
 }
@@ -697,21 +737,17 @@ int32_t AudioDeviceAndroidOpenSLES::Init
                         "  failed to realize SL Output Mix object");
     return -1;
   }
 
   // The code below can be moved to startplayout instead
   // Setup the data source structure for the buffer queue.
   player_pcm_.formatType = SL_DATAFORMAT_PCM;
   player_pcm_.numChannels = N_PLAY_CHANNELS;
-  if (speaker_sampling_rate_ == 44000) {
-      player_pcm_.samplesPerSec = 44100 * 1000;
-  } else {
-    player_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000;
-  }
+  player_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000;
   player_pcm_.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
   player_pcm_.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
   if (1 == player_pcm_.numChannels) {
     player_pcm_.channelMask = SL_SPEAKER_FRONT_CENTER;
   } else if (2 == player_pcm_.numChannels) {
     player_pcm_.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
   } else {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
@@ -719,17 +755,17 @@ int32_t AudioDeviceAndroidOpenSLES::Init
   }
 
   player_pcm_.endianness = SL_BYTEORDER_LITTLEENDIAN;
   // Setup the data sink structure.
   locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
   locator_outputmix.outputMix = sles_output_mixer_;
 
   SLInterfaceID ids[N_MAX_INTERFACES] = {
-    SL_IID_BUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
+    SL_IID_BUFFERQUEUE_, SL_IID_ANDROIDCONFIGURATION_ };
   SLboolean req[N_MAX_INTERFACES] = {
     SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
   res = (*sles_engine_itf_)->CreateAudioPlayer(sles_engine_itf_,
                                                &sles_player_, &audio_source,
                                                &audio_sink, 2, ids, req);
   if (res != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  failed to create AudioPlayer");
@@ -739,25 +775,25 @@ int32_t AudioDeviceAndroidOpenSLES::Init
   // Realizing the player in synchronous mode.
   res = (*sles_player_)->Realize(sles_player_, SL_BOOLEAN_FALSE);
   if (res != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  failed to realize the player");
     return -1;
   }
   res = (*sles_player_)->GetInterface(
-      sles_player_, SL_IID_PLAY,
+      sles_player_, SL_IID_PLAY_,
       static_cast<void*>(&sles_player_itf_));
   if (res != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  failed to get Player interface");
     return -1;
   }
   res = (*sles_player_)->GetInterface(
-      sles_player_, SL_IID_BUFFERQUEUE,
+      sles_player_, SL_IID_BUFFERQUEUE_,
       static_cast<void*>(&sles_player_sbq_itf_));
   if (res != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  failed to get Player SimpleBufferQueue interface");
     return -1;
   }
 
   // Setup to receive buffer queue event callbacks
@@ -821,35 +857,31 @@ int32_t AudioDeviceAndroidOpenSLES::Init
     SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
     static_cast<SLuint32>(N_REC_QUEUE_BUFFERS)
   };
   SLDataSink audio_sink = { &simple_buf_queue, &record_pcm_ };
 
   // Setup the format of the content in the buffer queue
   record_pcm_.formatType = SL_DATAFORMAT_PCM;
   record_pcm_.numChannels = N_REC_CHANNELS;
-  if (speaker_sampling_rate_ == 44000) {
-    record_pcm_.samplesPerSec = 44100 * 1000;
-  } else {
-    record_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000;
-  }
+  record_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000;
   record_pcm_.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
   record_pcm_.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
   if (1 == record_pcm_.numChannels) {
     record_pcm_.channelMask = SL_SPEAKER_FRONT_CENTER;
   } else if (2 == record_pcm_.numChannels) {
     record_pcm_.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
   } else {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  %d rec channels not supported", N_REC_CHANNELS);
   }
   record_pcm_.endianness = SL_BYTEORDER_LITTLEENDIAN;
 
   const SLInterfaceID id[2] = {
-    SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
+    SL_IID_ANDROIDSIMPLEBUFFERQUEUE_, SL_IID_ANDROIDCONFIGURATION_ };
   const SLboolean req[2] = {
     SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
   int32_t res = -1;
   res = (*sles_engine_itf_)->CreateAudioRecorder(sles_engine_itf_,
                                                  &sles_recorder_,
                                                  &audio_source,
                                                  &audio_sink,
                                                  2,
@@ -866,28 +898,28 @@ int32_t AudioDeviceAndroidOpenSLES::Init
   if (res != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  failed to realize Recorder");
     return -1;
   }
 
   // Get the RECORD interface - it is an implicit interface
   res = (*sles_recorder_)->GetInterface(
-      sles_recorder_, SL_IID_RECORD,
+      sles_recorder_, SL_IID_RECORD_,
       static_cast<void*>(&sles_recorder_itf_));
   if (res != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  failed to get Recorder interface");
     return -1;
   }
 
   // Get the simpleBufferQueue interface
   res = (*sles_recorder_)->GetInterface(
       sles_recorder_,
-      SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+      SL_IID_ANDROIDSIMPLEBUFFERQUEUE_,
       static_cast<void*>(&sles_recorder_sbq_itf_));
   if (res != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
                         "  failed to get Recorder Simple Buffer Queue");
     return -1;
   }
 
   // Setup to receive buffer queue event callbacks
@@ -1392,17 +1424,17 @@ void AudioDeviceAndroidOpenSLES::Recorde
     // TODO(leozwang): OpenSL ES doesn't support AudioRecorder
     // volume control now, add it when it's ready.
   }
 }
 
 void AudioDeviceAndroidOpenSLES::CheckErr(SLresult res) {
   if (res != SL_RESULT_SUCCESS) {
     WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
-                        "  AudioDeviceAndroidOpenSLES::CheckErr(%d)", res);
+                        "  AudioDeviceAndroidOpenSLES::CheckErr(%lu)", res);
     exit(-1);
   }
 }
 
 void AudioDeviceAndroidOpenSLES::UpdatePlayoutDelay(
     uint32_t nSamplePlayed) {
   // TODO(leozwang): Add accurate delay estimat.
   playout_delay_ = (N_PLAY_QUEUE_BUFFERS - 0.5) * 10 +
rename from media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_opensles_android.h
rename to media/webrtc/trunk/webrtc/modules/audio_device/audio_device_opensles.h
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_opensles_android.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_opensles.h
@@ -6,63 +6,32 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
 #define SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
 
-#include <jni.h>
 #include <stdio.h>
 #include <stdlib.h>
 
 #include <SLES/OpenSLES.h>
 #include <SLES/OpenSLES_Android.h>
 #include <SLES/OpenSLES_AndroidConfiguration.h>
 
 #include <queue>
 
 #include "modules/audio_device/audio_device_generic.h"
 #include "system_wrappers/interface/critical_section_wrapper.h"
 
 namespace webrtc {
 
 class EventWrapper;
 
-const uint32_t N_MAX_INTERFACES = 3;
-const uint32_t N_MAX_OUTPUT_DEVICES = 6;
-const uint32_t N_MAX_INPUT_DEVICES = 3;
-
-const uint32_t N_REC_SAMPLES_PER_SEC = 16000;  // Default fs
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000;  // Default fs
-
-const uint32_t N_REC_CHANNELS = 1;
-const uint32_t N_PLAY_CHANNELS = 1;
-
-const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480;
-const uint32_t PLAY_BUF_SIZE_IN_SAMPLES = 480;
-
-const uint32_t REC_MAX_TEMP_BUF_SIZE_PER_10ms =
-    N_REC_CHANNELS * REC_BUF_SIZE_IN_SAMPLES * sizeof(int16_t);
-
-const uint32_t PLAY_MAX_TEMP_BUF_SIZE_PER_10ms =
-    N_PLAY_CHANNELS * PLAY_BUF_SIZE_IN_SAMPLES * sizeof(int16_t);
-
-// Number of the buffers in playout queue
-const uint16_t N_PLAY_QUEUE_BUFFERS = 8;
-// Number of buffers in recording queue
-// TODO(xian): Reduce the numbers of buffers to improve the latency.
-const uint16_t N_REC_QUEUE_BUFFERS = 8;
-// Some values returned from getMinBufferSize
-// (Nexus S playout  72ms, recording 64ms)
-// (Galaxy,         167ms,           44ms)
-// (Nexus 7,         72ms,           48ms)
-// (Xoom             92ms,           40ms)
-
 class ThreadWrapper;
 
 class AudioDeviceAndroidOpenSLES: public AudioDeviceGeneric {
  public:
   explicit AudioDeviceAndroidOpenSLES(const int32_t id);
   ~AudioDeviceAndroidOpenSLES();
 
   // Retrieve the currently utilized audio layer
@@ -205,16 +174,46 @@ class AudioDeviceAndroidOpenSLES: public
 
   // Attach audio buffer
   virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
 
   // Speaker audio routing
   virtual int32_t SetLoudspeakerStatus(bool enable);
   virtual int32_t GetLoudspeakerStatus(bool& enable) const;  // NOLINT
 
+  static const uint32_t N_MAX_INTERFACES = 3;
+  static const uint32_t N_MAX_OUTPUT_DEVICES = 6;
+  static const uint32_t N_MAX_INPUT_DEVICES = 3;
+
+  static const uint32_t N_REC_SAMPLES_PER_SEC = 16000;  // Default fs
+  static const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000;  // Default fs
+
+  static const uint32_t N_REC_CHANNELS = 1;
+  static const uint32_t N_PLAY_CHANNELS = 1;
+
+  static const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480;
+  static const uint32_t PLAY_BUF_SIZE_IN_SAMPLES = 480;
+
+  static const uint32_t REC_MAX_TEMP_BUF_SIZE_PER_10ms =
+      N_REC_CHANNELS * REC_BUF_SIZE_IN_SAMPLES * sizeof(int16_t);
+
+  static const uint32_t PLAY_MAX_TEMP_BUF_SIZE_PER_10ms =
+      N_PLAY_CHANNELS * PLAY_BUF_SIZE_IN_SAMPLES * sizeof(int16_t);
+
+  // Number of the buffers in playout queue
+  static const uint16_t N_PLAY_QUEUE_BUFFERS = 8;
+  // Number of buffers in recording queue
+  // TODO(xian): Reduce the numbers of buffers to improve the latency.
+  static const uint16_t N_REC_QUEUE_BUFFERS = 8;
+  // Some values returned from getMinBufferSize
+  // (Nexus S playout  72ms, recording 64ms)
+  // (Galaxy,         167ms,           44ms)
+  // (Nexus 7,         72ms,           48ms)
+  // (Xoom             92ms,           40ms)
+
  private:
   // Lock
   void Lock() {
     crit_sect_.Enter();
   };
   void UnLock() {
     crit_sect_.Leave();
   };
@@ -304,13 +303,22 @@ class AudioDeviceAndroidOpenSLES: public
   int8_t rec_buf_[N_REC_QUEUE_BUFFERS][
       N_REC_CHANNELS * sizeof(int16_t) * REC_BUF_SIZE_IN_SAMPLES];
   int8_t rec_voe_buf_[N_REC_QUEUE_BUFFERS][
       N_REC_CHANNELS * sizeof(int16_t) * REC_BUF_SIZE_IN_SAMPLES];
 
   std::queue<int8_t*> play_queue_;
   int8_t play_buf_[N_PLAY_QUEUE_BUFFERS][
       N_PLAY_CHANNELS * sizeof(int16_t) * PLAY_BUF_SIZE_IN_SAMPLES];
+
+  // dlopen for OpenSLES
+  void *opensles_lib_;
+  SLInterfaceID SL_IID_ENGINE_;
+  SLInterfaceID SL_IID_BUFFERQUEUE_;
+  SLInterfaceID SL_IID_ANDROIDCONFIGURATION_;
+  SLInterfaceID SL_IID_PLAY_;
+  SLInterfaceID SL_IID_ANDROIDSIMPLEBUFFERQUEUE_;
+  SLInterfaceID SL_IID_RECORD_;
 };
 
 }  // namespace webrtc
 
 #endif  // SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device_utility.cc
@@ -41,17 +41,17 @@ bool AudioDeviceUtility::StringCompare(
     const char* str1 , const char* str2,
     const uint32_t length)
 {
 	return ((_strnicmp(str1, str2, length) == 0) ? true : false);
 }
 
 }  // namespace webrtc
 
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 
 // ============================================================================
 //                                 Linux & Mac
 // ============================================================================
 
 #include <sys/time.h>   // gettimeofday
 #include <time.h>       // gettimeofday
 #include <string.h>     // strncasecmp
@@ -104,11 +104,11 @@ uint32_t AudioDeviceUtility::GetTimeInMS
 bool AudioDeviceUtility::StringCompare(
     const char* str1 , const char* str2, const uint32_t length)
 {
     return (strncasecmp(str1, str2, length) == 0)?true: false;
 }
 
 }  // namespace webrtc
 
-#endif  // defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#endif  // defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.cc
@@ -1327,17 +1327,17 @@ int32_t AudioDeviceIPhone::InitPlayOrRec
                  playoutDesc.mSampleRate);
 
     playoutDesc.mSampleRate = sampleRate;
 
     // Store the sampling frequency to use towards the Audio Device Buffer
     // todo: Add 48 kHz (increase buffer sizes). Other fs?
     if ((playoutDesc.mSampleRate > 44090.0)
         && (playoutDesc.mSampleRate < 44110.0)) {
-        _adbSampFreq = 44000;
+        _adbSampFreq = 44100;
     } else if ((playoutDesc.mSampleRate > 15990.0)
                && (playoutDesc.mSampleRate < 16010.0)) {
         _adbSampFreq = 16000;
     } else if ((playoutDesc.mSampleRate > 7990.0)
                && (playoutDesc.mSampleRate < 8010.0)) {
         _adbSampFreq = 8000;
     } else {
         _adbSampFreq = 0;
--- a/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/ios/audio_device_ios.h
@@ -14,18 +14,18 @@
 #include <AudioUnit/AudioUnit.h>
 
 #include "audio_device_generic.h"
 #include "critical_section_wrapper.h"
 
 namespace webrtc {
 class ThreadWrapper;
 
-const uint32_t N_REC_SAMPLES_PER_SEC = 44000;
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 44000;
+const uint32_t N_REC_SAMPLES_PER_SEC = 44100;
+const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100;
 
 const uint32_t N_REC_CHANNELS = 1;  // default is mono recording
 const uint32_t N_PLAY_CHANNELS = 1;  // default is mono playout
 const uint32_t N_DEVICE_CHANNELS = 8;
 
 const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100);
 const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -981,17 +981,18 @@ int32_t AudioDeviceLinuxALSA::RecordingD
 
     memset(name, 0, kAdmMaxDeviceNameSize);
 
     if (guid != NULL)
     {
         memset(guid, 0, kAdmMaxGuidSize);
     }
     
-    return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
+    return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize,
+                          guid, kAdmMaxGuidSize);
 }
 
 int16_t AudioDeviceLinuxALSA::RecordingDevices()
 {
 
     return (int16_t)GetDevicesInfo(0, false);
 }
 
@@ -1629,40 +1630,41 @@ int32_t AudioDeviceLinuxALSA::StartPlayo
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "    failed to create the play audio thread");
         _playing = false;
         delete [] _playoutBuffer;
         _playoutBuffer = NULL;
         return -1;
     }
 
+    int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
+    if (errVal < 0)
+    {
+        WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
+                     "     playout snd_pcm_prepare failed (%s)\n",
+                     LATE(snd_strerror)(errVal));
+        // just log error
+        // if snd_pcm_open fails will return -1
+    }
+
+
     unsigned int threadID(0);
     if (!_ptrThreadPlay->Start(threadID))
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "  failed to start the play audio thread");
         _playing = false;
         delete _ptrThreadPlay;
         _ptrThreadPlay = NULL;
         delete [] _playoutBuffer;
         _playoutBuffer = NULL;
         return -1;
     }
     _playThreadID = threadID;
 
-    int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
-    if (errVal < 0)
-    {
-        WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
-                     "     playout snd_pcm_prepare failed (%s)\n",
-                     LATE(snd_strerror)(errVal));
-        // just log error
-        // if snd_pcm_open fails will return -1
-    }
-
     return 0;
 }
 
 int32_t AudioDeviceLinuxALSA::StopPlayout()
 {
 
     {
         CriticalSectionScoped lock(&_critSect);
@@ -1824,17 +1826,19 @@ void AudioDeviceLinuxALSA::ClearRecordin
 //                                 Private Methods
 // ============================================================================
 
 int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
     const int32_t function,
     const bool playback,
     const int32_t enumDeviceNo,
     char* enumDeviceName,
-    const int32_t ednLen) const
+    const int32_t ednLen,
+    char* enumDeviceId,
+    const int32_t ediLen) const
 {
     
     // Device enumeration based on libjingle implementation
     // by Tristan Schmelcher at Google Inc.
 
     const char *type = playback ? "Output" : "Input";
     // dmix and dsnoop are only for playback and capture, respectively, but ALSA
     // stupidly includes them in both lists.
@@ -1863,16 +1867,18 @@ int32_t AudioDeviceLinuxALSA::GetDevices
             return -1;
         }
 
         enumCount++; // default is 0
         if ((function == FUNC_GET_DEVICE_NAME ||
             function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) && enumDeviceNo == 0)
         {
             strcpy(enumDeviceName, "default");
+            if (enumDeviceId)
+                memset(enumDeviceId, 0, ediLen);
 
             err = LATE(snd_device_name_free_hint)(hints);
             if (err != 0)
             {
                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                              "GetDevicesInfo - device name free hint error: %s",
                              LATE(snd_strerror)(err));
             }
@@ -1925,28 +1931,38 @@ int32_t AudioDeviceLinuxALSA::GetDevices
 
                 }
                 if ((FUNC_GET_DEVICE_NAME == function) &&
                     (enumDeviceNo == enumCount))
                 {
                     // We have found the enum device, copy the name to buffer.
                     strncpy(enumDeviceName, desc, ednLen);
                     enumDeviceName[ednLen-1] = '\0';
+                    if (enumDeviceId)
+                    {
+                        strncpy(enumDeviceId, name, ediLen);
+                        enumDeviceId[ediLen-1] = '\0';
+                    }
                     keepSearching = false;
                     // Replace '\n' with '-'.
                     char * pret = strchr(enumDeviceName, '\n'/*0xa*/); //LF
                     if (pret)
                         *pret = '-';
                 }
                 if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
                     (enumDeviceNo == enumCount))
                 {
                     // We have found the enum device, copy the name to buffer.
                     strncpy(enumDeviceName, name, ednLen);
                     enumDeviceName[ednLen-1] = '\0';
+                    if (enumDeviceId)
+                    {
+                        strncpy(enumDeviceId, name, ediLen);
+                        enumDeviceId[ediLen-1] = '\0';
+                    }
                     keepSearching = false;
                 }
 
                 if (keepSearching)
                     ++enumCount;
 
                 if (desc != name)
                     free(desc);
@@ -1961,17 +1977,17 @@ int32_t AudioDeviceLinuxALSA::GetDevices
         err = LATE(snd_device_name_free_hint)(hints);
         if (err != 0)
         {
             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
                          "GetDevicesInfo - device name free hint error: %s",
                          LATE(snd_strerror)(err));
             // Continue and return true anyway, since we did get the whole list.
         }
-    }
+      }
 
     if (FUNC_GET_NUM_OF_DEVICE == function)
     {
         if (enumCount == 1) // only default?
             enumCount = 0;
         return enumCount; // Normal return point for function 0
     }
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
@@ -161,17 +161,19 @@ public:
 public:
     virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
 
 private:
     int32_t GetDevicesInfo(const int32_t function,
                            const bool playback,
                            const int32_t enumDeviceNo = 0,
                            char* enumDeviceName = NULL,
-                           const int32_t ednLen = 0) const;
+                           const int32_t ednLen = 0,
+                           char* enumDeviceID = NULL,
+                           const int32_t ediLen = 0) const;
     int32_t ErrorRecovery(int32_t error, snd_pcm_t* deviceHandle);
 
 private:
     bool KeyPressed() const;
 
 private:
     void Lock() { _critSect.Enter(); };
     void UnLock() { _critSect.Leave(); };
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.cc
@@ -22,68 +22,68 @@
  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "latebindingsymboltable_linux.h"
 
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 #include <dlfcn.h>
 #endif
 
 // TODO(grunell): Either put inside webrtc namespace or use webrtc:: instead.
 using namespace webrtc;
 
 namespace webrtc_adm_linux {
 
 inline static const char *GetDllError() {
-#ifdef WEBRTC_LINUX
-  char *err = dlerror();
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
+  const char *err = dlerror();
   if (err) {
     return err;
   } else {
     return "No error";
   }
 #else
 #error Not implemented
 #endif
 }
 
 DllHandle InternalLoadDll(const char dll_name[]) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   DllHandle handle = dlopen(dll_name, RTLD_NOW);
 #else
 #error Not implemented
 #endif
   if (handle == kInvalidDllHandle) {
     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
                "Can't load %s : %s", dll_name, GetDllError());
   }
   return handle;
 }
 
 void InternalUnloadDll(DllHandle handle) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   if (dlclose(handle) != 0) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "%s", GetDllError());
   }
 #else
 #error Not implemented
 #endif
 }
 
 static bool LoadSymbol(DllHandle handle,
                        const char *symbol_name,
                        void **symbol) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   *symbol = dlsym(handle, symbol_name);
-  char *err = dlerror();
+  const char *err = dlerror();
   if (err) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "Error loading symbol %s : %d", symbol_name, err);
     return false;
   } else if (!*symbol) {
     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
                "Symbol %s is NULL", symbol_name);
     return false;
@@ -96,17 +96,17 @@ static bool LoadSymbol(DllHandle handle,
 
 // This routine MUST assign SOME value for every symbol, even if that value is
 // NULL, or else some symbols may be left with uninitialized data that the
 // caller may later interpret as a valid address.
 bool InternalLoadSymbols(DllHandle handle,
                          int num_symbols,
                          const char *const symbol_names[],
                          void *symbols[]) {
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   // Clear any old errors.
   dlerror();
 #endif
   for (int i = 0; i < num_symbols; ++i) {
     if (!LoadSymbol(handle, symbol_names[i], &symbols[i])) {
       return false;
     }
   }
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
@@ -37,17 +37,17 @@
 
 // This file provides macros for creating "symbol table" classes to simplify the
 // dynamic loading of symbols from DLLs. Currently the implementation only
 // supports Linux and pure C symbols.
 // See talk/sound/pulseaudiosymboltable.(h|cc) for an example.
 
 namespace webrtc_adm_linux {
 
-#ifdef WEBRTC_LINUX
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
 typedef void *DllHandle;
 
 const DllHandle kInvalidDllHandle = NULL;
 #else
 #error Not implemented
 #endif
 
 // These are helpers for use only by the class below.
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.cc
@@ -24,16 +24,20 @@
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include "pulseaudiosymboltable_linux.h"
 
 namespace webrtc_adm_linux_pulse {
 
+#if defined(__OpenBSD__) || defined(WEBRTC_GONK)
+LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so")
+#else
 LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so.0")
+#endif
 #define X(sym) \
     LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(PulseAudioSymbolTable, sym)
 PULSE_AUDIO_SYMBOLS_LIST
 #undef X
 LATE_BINDING_SYMBOL_TABLE_DEFINE_END(PulseAudioSymbolTable)
 
 }  // namespace webrtc_adm_linux_pulse
--- a/media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/test/audio_device_test_api.cc
@@ -1765,39 +1765,39 @@ TEST_F(AudioDeviceAPITest, RecordingSamp
   uint32_t sampleRate(0);
 
   // bulk tests
   EXPECT_EQ(0, audio_device_->RecordingSampleRate(&sampleRate));
 #if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
   EXPECT_EQ(48000, sampleRate);
 #elif defined(ANDROID)
   TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
 #elif defined(WEBRTC_IOS)
   TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
               (sampleRate == 8000));
 #endif
 
   // @TODO(xians) - add tests for all platforms here...
 }
 
 TEST_F(AudioDeviceAPITest, PlayoutSampleRate) {
   uint32_t sampleRate(0);
 
   // bulk tests
   EXPECT_EQ(0, audio_device_->PlayoutSampleRate(&sampleRate));
 #if defined(_WIN32) && !defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
   EXPECT_EQ(48000, sampleRate);
 #elif defined(ANDROID)
   TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
 #elif defined(WEBRTC_IOS)
   TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
-  EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
+  EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
               (sampleRate == 8000));
 #endif
 }
 
 TEST_F(AudioDeviceAPITest, ResetAudioDevice) {
   CheckInitialPlayoutStates();
   CheckInitialRecordingStates();
   EXPECT_EQ(0, audio_device_->SetPlayoutDevice(MACRO_DEFAULT_DEVICE));
--- a/media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/test/func_test_manager.cc
@@ -343,22 +343,16 @@ int32_t AudioTransportImpl::NeedMorePlay
                 const uint8_t nChannelsIn = packet->nChannels;
                 const uint32_t samplesPerSecIn = packet->samplesPerSec;
                 const uint16_t nBytesPerSampleIn =
                     packet->nBytesPerSample;
 
                 int32_t fsInHz(samplesPerSecIn);
                 int32_t fsOutHz(samplesPerSec);
 
-                if (fsInHz == 44100)
-                    fsInHz = 44000;
-
-                if (fsOutHz == 44100)
-                    fsOutHz = 44000;
-
                 if (nChannelsIn == 2 && nBytesPerSampleIn == 4)
                 {
                     // input is stereo => we will resample in stereo
                     ret = _resampler.ResetIfNeeded(fsInHz, fsOutHz,
                                                    kResamplerSynchronousStereo);
                     if (ret == 0)
                     {
                         if (nChannels == 2)
@@ -1240,17 +1234,17 @@ int32_t FuncTestManager::TestAudioTransp
 
         EXPECT_EQ(0, audioDevice->RegisterAudioCallback(_audioTransport));
 
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (samplesPerSec == 48000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile48.c_str()));
-        } else if (samplesPerSec == 44100 || samplesPerSec == 44000) {
+        } else if (samplesPerSec == 44100) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile44.c_str()));
         } else if (samplesPerSec == 16000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile16.c_str()));
         } else if (samplesPerSec == 8000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile8.c_str()));
@@ -1473,17 +1467,17 @@ int32_t FuncTestManager::TestSpeakerVolu
     EXPECT_EQ(0, audioDevice->PlayoutIsAvailable(&available));
     if (available)
     {
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (48000 == samplesPerSec) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile48.c_str()));
-        } else if (44100 == samplesPerSec || samplesPerSec == 44000) {
+        } else if (44100 == samplesPerSec) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile44.c_str()));
         } else if (samplesPerSec == 16000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile16.c_str()));
         } else if (samplesPerSec == 8000) {
             _audioTransport->SetFilePlayout(
                 true, GetResource(_playoutFile8.c_str()));
@@ -1574,17 +1568,17 @@ int32_t FuncTestManager::TestSpeakerMute
     EXPECT_EQ(0, audioDevice->RegisterAudioCallback(_audioTransport));
     EXPECT_EQ(0, audioDevice->PlayoutIsAvailable(&available));
     if (available)
     {
         EXPECT_EQ(0, audioDevice->InitPlayout());
         EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
         if (48000 == samplesPerSec)
             _audioTransport->SetFilePlayout(true, _playoutFile48.c_str());
-        else if (44100 == samplesPerSec || 44000 == samplesPerSec)
+        else if (44100 == samplesPerSec)
             _audioTransport->SetFilePlayout(true, _playoutFile44.c_str());
         else
         {
             TEST_LOG("\nERROR: Sample rate (%d) is not supported!\n \n",
                      samplesPerSec);
             return -1;
         }
         EXPECT_EQ(0, audioDevice->StartPlayout());
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/audio_processing.gypi
@@ -149,16 +149,17 @@
         {
           'target_name': 'audio_processing_sse2',
           'type': 'static_library',
           'sources': [
             'aec/aec_core_sse2.c',
             'aec/aec_rdft_sse2.c',
           ],
           'cflags': ['-msse2',],
+          'cflags_mozilla': [ '-msse2', ],
           'xcode_settings': {
             'OTHER_CFLAGS': ['-msse2',],
           },
         },
       ],
     }],
     ['target_arch=="arm" and armv7==1', {
       'targets': [{
@@ -172,21 +173,24 @@
           'aecm/aecm_core_neon.c',
           'ns/nsx_core_neon.c',
         ],
         'conditions': [
           ['OS=="android" or OS=="ios"', {
             'dependencies': [
               'audio_processing_offsets',
             ],
-            'sources': [
+	    #
+	    # We disable the ASM source, because our gyp->Makefile translator
+	    # does not support the build steps to get the asm offsets.
+            'sources!': [
               'aecm/aecm_core_neon.S',
               'ns/nsx_core_neon.S',
             ],
-            'sources!': [
+            'sources': [
               'aecm/aecm_core_neon.c',
               'ns/nsx_core_neon.c',
             ],
             'includes!': ['../../build/arm_neon.gypi',],
           }],
         ],
       }],
       'conditions': [
--- a/media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/media_file/source/media_file_utility.cc
@@ -612,23 +612,23 @@ int32_t ModuleFileUtility::ReadWavHeader
     // Calculate the number of bytes that 10 ms of audio data correspond to.
     if(_wavFormatObj.formatTag == kWaveFormatPcm)
     {
         // TODO (hellner): integer division for 22050 and 11025 would yield
         //                 the same result as the else statement. Remove those
         //                 special cases?
         if(_wavFormatObj.nSamplesPerSec == 44100)
         {
-            _readSizeBytes = 440 * _wavFormatObj.nChannels *
+            _readSizeBytes = 441 * _wavFormatObj.nChannels *
                 (_wavFormatObj.nBitsPerSample / 8);
         } else if(_wavFormatObj.nSamplesPerSec == 22050) {
-            _readSizeBytes = 220 * _wavFormatObj.nChannels *
+            _readSizeBytes = 220 * _wavFormatObj.nChannels * // XXX inexact!
                 (_wavFormatObj.nBitsPerSample / 8);
         } else if(_wavFormatObj.nSamplesPerSec == 11025) {
-            _readSizeBytes = 110 * _wavFormatObj.nChannels *
+            _readSizeBytes = 110 * _wavFormatObj.nChannels * // XXX inexact!
                 (_wavFormatObj.nBitsPerSample / 8);
         } else {
             _readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
               _wavFormatObj.nChannels * (_wavFormatObj.nBitsPerSample / 8);
         }
 
     } else {
         _readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
@@ -680,32 +680,32 @@ int32_t ModuleFileUtility::InitWavCodec(
             _codecId = kCodecL16_32Khz;
         }
         // Set the packet size for "odd" sampling frequencies so that it
         // properly corresponds to _readSizeBytes.
         else if(samplesPerSec == 11025)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 110;
-            codec_info_.plfreq = 11000;
+            codec_info_.pacsize = 110; // XXX inexact!
+            codec_info_.plfreq = 11000; // XXX inexact!
         }
         else if(samplesPerSec == 22050)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 220;
-            codec_info_.plfreq = 22000;
+            codec_info_.pacsize = 220; // XXX inexact!
+            codec_info_.plfreq = 22000; // XXX inexact!
         }
         else if(samplesPerSec == 44100)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
-            codec_info_.pacsize = 440;
-            codec_info_.plfreq = 44000;
+            codec_info_.pacsize = 441;
+            codec_info_.plfreq = 44100;
         }
         else if(samplesPerSec == 48000)
         {
             strcpy(codec_info_.plname, "L16");
             _codecId = kCodecL16_16kHz;
             codec_info_.pacsize = 480;
             codec_info_.plfreq = 48000;
         }
@@ -1128,18 +1128,16 @@ int32_t ModuleFileUtility::WriteWavHeade
     const uint32_t freq,
     const uint32_t bytesPerSample,
     const uint32_t channels,
     const uint32_t format,
     const uint32_t lengthInBytes)
 {
 
     // Frame size in bytes for 10 ms of audio.
-    // TODO (hellner): 44.1 kHz has 440 samples frame size. Doesn't seem to
-    //                 be taken into consideration here!
     int32_t frameSize = (freq / 100) * bytesPerSample * channels;
 
     // Calculate the number of full frames that the wave file contain.
     const int32_t dataLengthInBytes = frameSize *
         (lengthInBytes / frameSize);
 
     int8_t tmpStr[4];
     int8_t tmpChar;
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/forward_error_correction.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/forward_error_correction.cc
@@ -7,16 +7,17 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
 
 #include <algorithm>
 #include <cassert>
+#include <cstdlib> // for abs()
 #include <cstring>
 #include <iterator>
 
 #include "webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.h"
 #include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 namespace webrtc {
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
@@ -16,17 +16,17 @@
 
 #if defined(_WIN32)
 // Order for these headers are important
 #include <Windows.h>  // FILETIME
 
 #include <WinSock.h>  // timeval
 
 #include <MMSystem.h>  // timeGetTime
-#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_MAC))
+#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_BSD) || (defined WEBRTC_MAC))
 #include <sys/time.h>  // gettimeofday
 #include <time.h>
 #endif
 #if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400))
 #include <stdio.h>
 #endif
 
 #include "webrtc/system_wrappers/interface/tick_util.h"
@@ -67,19 +67,19 @@ uint32_t GetCurrentRTP(Clock* clock, uin
   local_clock->CurrentNtp(secs, frac);
   if (use_global_clock) {
     delete local_clock;
   }
   return ConvertNTPTimeToRTP(secs, frac, freq);
 }
 
 uint32_t ConvertNTPTimeToRTP(uint32_t NTPsec, uint32_t NTPfrac, uint32_t freq) {
-  float ftemp = (float)NTPfrac / (float)NTP_FRAC;
+  float ftemp = (float)NTPfrac / (float)NTP_FRAC; 
   uint32_t tmp = (uint32_t)(ftemp * freq);
-  return NTPsec * freq + tmp;
+ return NTPsec * freq + tmp;
 }
 
 uint32_t ConvertNTPTimeToMS(uint32_t NTPsec, uint32_t NTPfrac) {
   int freq = 1000;
   float ftemp = (float)NTPfrac / (float)NTP_FRAC;
   uint32_t tmp = (uint32_t)(ftemp * freq);
   uint32_t MStime = NTPsec * freq + tmp;
   return MStime;
@@ -101,17 +101,17 @@ uint16_t GetPayloadDataLength(const RTPH
   return static_cast<uint16_t>(length);
 }
 
 #if defined(_WIN32)
 bool StringCompare(const char* str1, const char* str2,
                    const uint32_t length) {
   return (_strnicmp(str1, str2, length) == 0) ? true : false;
 }
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 bool StringCompare(const char* str1, const char* str2,
                    const uint32_t length) {
   return (strncasecmp(str1, str2, length) == 0) ? true : false;
 }
 #endif
 
 #if !defined(WEBRTC_LITTLE_ENDIAN) && !defined(WEBRTC_BIG_ENDIAN)
 #error Either WEBRTC_LITTLE_ENDIAN or WEBRTC_BIG_ENDIAN must be defined
@@ -141,17 +141,17 @@ void AssignUWord24ToBuffer(uint8_t* data
 #else
   dataBuffer[0] = static_cast<uint8_t>(value);
   dataBuffer[1] = static_cast<uint8_t>(value >> 8);
   dataBuffer[2] = static_cast<uint8_t>(value >> 16);
 #endif
 }
 
 void AssignUWord16ToBuffer(uint8_t* dataBuffer, uint16_t value) {
-#if defined(WEBRTC_LITTLE_ENDIAN)
+#if defined(WEBRTC_LITTLE_ENDIAN) 
   dataBuffer[0] = static_cast<uint8_t>(value >> 8);
   dataBuffer[1] = static_cast<uint8_t>(value);
 #else
   uint16_t* ptr = reinterpret_cast<uint16_t*>(dataBuffer);
   ptr[0] = value;
 #endif
 }
 
--- a/media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/utility/source/file_player_impl.cc
@@ -89,17 +89,17 @@ int32_t FilePlayerImpl::Frequency() cons
     if(_codec.plfreq == 11000)
     {
         return 16000;
     }
     else if(_codec.plfreq == 22000)
     {
         return 32000;
     }
-    else if(_codec.plfreq == 44000)
+    else if(_codec.plfreq == 44100 || _codec.plfreq == 44000 ) // XXX just 44100?
     {
         return 32000;
     }
     else if(_codec.plfreq == 48000)
     {
         return 32000;
     }
     else
--- a/media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/utility/source/rtp_dump_impl.cc
@@ -14,17 +14,17 @@
 #include <stdio.h>
 
 #include "critical_section_wrapper.h"
 #include "trace.h"
 
 #if defined(_WIN32)
 #include <Windows.h>
 #include <mmsystem.h>
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 #include <string.h>
 #include <sys/time.h>
 #include <time.h>
 #endif
 
 #if (defined(_DEBUG) && defined(_WIN32))
 #define DEBUG_PRINT(expr)   OutputDebugString(##expr)
 #define DEBUG_PRINTP(expr, p)   \
@@ -232,17 +232,17 @@ bool RtpDumpImpl::RTCP(const uint8_t* pa
     return is_rtcp;
 }
 
 // TODO (hellner): why is TickUtil not used here?
 inline uint32_t RtpDumpImpl::GetTimeInMS() const
 {
 #if defined(_WIN32)
     return timeGetTime();
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
     struct timeval tv;
     struct timezone tz;
     unsigned long val;
 
     gettimeofday(&tv, &tz);
     val = tv.tv_sec * 1000 + tv.tv_usec / 1000;
     return val;
 #else
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.cc
@@ -11,16 +11,18 @@
 #include "device_info_android.h"
 
 #include <stdio.h>
 
 #include "ref_count.h"
 #include "trace.h"
 #include "video_capture_android.h"
 
+#include "AndroidJNIWrapper.h"
+
 namespace webrtc
 {
 
 namespace videocapturemodule
 {
 
 static jclass g_capabilityClass = NULL;
 
@@ -174,17 +176,17 @@ int32_t DeviceInfoAndroid::CreateCapabil
   if (VideoCaptureAndroid::AttachAndUseAndroidDeviceInfoObjects(
           env,
           javaCmDevInfoClass,
           javaCmDevInfoObject,
           attached) != 0)
     return -1;
 
   // Find the capability class
-  jclass javaCapClass = g_capabilityClass;
+  jclass javaCapClass = jsjni_GetGlobalClassRef(AndroidJavaCaptureCapabilityClass);
   if (javaCapClass == NULL) {
     VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: SetAndroidCaptureClasses must be called first!",
                  __FUNCTION__);
     return -1;
   }
 
@@ -252,16 +254,18 @@ int32_t DeviceInfoAndroid::CreateCapabil
 
   _lastUsedDeviceNameLength = strlen((char*) deviceUniqueIdUTF8);
   _lastUsedDeviceName = (char*) realloc(_lastUsedDeviceName,
                                         _lastUsedDeviceNameLength + 1);
   memcpy(_lastUsedDeviceName,
          deviceUniqueIdUTF8,
          _lastUsedDeviceNameLength + 1);
 
+  env->DeleteGlobalRef(javaCapClass);
+
   VideoCaptureAndroid::ReleaseAndroidDeviceInfoObjects(attached);
   WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCapture, _id,
                "CreateCapabilityMap %d", _captureCapabilities.Size());
 
   return _captureCapabilities.Size();
 }
 
 int32_t DeviceInfoAndroid::GetOrientation(
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/device_info_android.h
@@ -10,16 +10,19 @@
 
 #ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
 #define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
 
 #include <jni.h>
 #include "../video_capture_impl.h"
 #include "../device_info_impl.h"
 
+#define AndroidJavaCaptureDeviceInfoClass "org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid"
+#define AndroidJavaCaptureCapabilityClass "org/webrtc/videoengine/CaptureCapabilityAndroid"
+
 namespace webrtc
 {
 namespace videocapturemodule
 {
 
 // Android logging, uncomment to print trace to
 // logcat instead of trace file/callback
 // #include <android/log.h>
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/java/org/webrtc/videoengine/VideoCaptureAndroid.java
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/java/org/webrtc/videoengine/VideoCaptureAndroid.java
@@ -20,248 +20,380 @@ import org.webrtc.videoengine.VideoCaptu
 import android.graphics.ImageFormat;
 import android.graphics.PixelFormat;
 import android.graphics.Rect;
 import android.graphics.SurfaceTexture;
 import android.graphics.YuvImage;
 import android.hardware.Camera;
 import android.hardware.Camera.PreviewCallback;
 import android.util.Log;
+import android.view.Surface;
 import android.view.SurfaceHolder;
 import android.view.SurfaceHolder.Callback;
+import android.view.SurfaceView;
+import android.view.TextureView;
+import android.view.TextureView.SurfaceTextureListener;
+import android.view.View;
+
+import org.mozilla.gecko.GeckoApp;
+import org.mozilla.gecko.GeckoAppShell;
+import org.mozilla.gecko.GeckoAppShell.AppStateListener;
+import org.mozilla.gecko.util.ThreadUtils;
 
 public class VideoCaptureAndroid implements PreviewCallback, Callback {
 
     private final static String TAG = "WEBRTC-JC";
 
     private Camera camera;
+    private int cameraId;
     private AndroidVideoCaptureDevice currentDevice = null;
     public ReentrantLock previewBufferLock = new ReentrantLock();
     // This lock takes sync with StartCapture and SurfaceChanged
     private ReentrantLock captureLock = new ReentrantLock();
     private int PIXEL_FORMAT = ImageFormat.NV21;
     PixelFormat pixelFormat = new PixelFormat();
     // True when the C++ layer has ordered the camera to be started.
     private boolean isCaptureStarted = false;
     private boolean isCaptureRunning = false;
     private boolean isSurfaceReady = false;
+    private SurfaceHolder surfaceHolder = null;
+    private SurfaceTexture surfaceTexture = null;
+    private SurfaceTexture dummySurfaceTexture = null;
 
     private final int numCaptureBuffers = 3;
     private int expectedFrameSize = 0;
     private int orientation = 0;
     private int id = 0;
     // C++ callback context variable.
     private long context = 0;
     private SurfaceHolder localPreview = null;
-    private SurfaceTexture dummySurfaceTexture = null;
     // True if this class owns the preview video buffers.
     private boolean ownsBuffers = false;
 
     private int mCaptureWidth = -1;
     private int mCaptureHeight = -1;
     private int mCaptureFPS = -1;
 
+    private int mCaptureRotation = 0;
+
+    private AppStateListener mAppStateListener = null;
+
+    public class MySurfaceTextureListener implements TextureView.SurfaceTextureListener {
+        public void onSurfaceTextureAvailable(SurfaceTexture surface, int width, int height) {
+            Log.d(TAG, "VideoCaptureAndroid::onSurfaceTextureAvailable");
+
+            captureLock.lock();
+            isSurfaceReady = true;
+            surfaceTexture = surface;
+
+            tryStartCapture(mCaptureWidth, mCaptureHeight, mCaptureFPS);
+            captureLock.unlock();
+        }
+
+        public void onSurfaceTextureSizeChanged(SurfaceTexture surface,
+                                                int width, int height) {
+            // Ignored, Camera does all the work for us
+            // Note that for a TextureView we start on onSurfaceTextureAvailable,
+            // for a SurfaceView we start on surfaceChanged. TextureView
+            // will not give out an onSurfaceTextureSizeChanged during creation.
+        }
+
+        public boolean onSurfaceTextureDestroyed(SurfaceTexture surface) {
+            Log.d(TAG, "VideoCaptureAndroid::onSurfaceTextureDestroyed");
+            isSurfaceReady = false;
+            DetachCamera();
+            return true;
+        }
+
+        public void onSurfaceTextureUpdated(SurfaceTexture surface) {
+            // Invoked every time there's a new Camera preview frame
+        }
+    }
     public static
     void DeleteVideoCaptureAndroid(VideoCaptureAndroid captureAndroid) {
         Log.d(TAG, "DeleteVideoCaptureAndroid");
-        if (captureAndroid.camera == null) {
-            return;
-        }
+
+        GeckoAppShell.getGeckoInterface().removeAppStateListener(captureAndroid.mAppStateListener);
 
         captureAndroid.StopCapture();
-        captureAndroid.camera.release();
-        captureAndroid.camera = null;
+        if (captureAndroid.camera != null) {
+            captureAndroid.camera.release();
+            captureAndroid.camera = null;
+        }
         captureAndroid.context = 0;
+
+        View cameraView = GeckoAppShell.getGeckoInterface().getCameraView();
+        if (cameraView instanceof SurfaceView) {
+            ((SurfaceView)cameraView).getHolder().removeCallback(captureAndroid);
+        } else if (cameraView instanceof TextureView) {
+            // No need to explicitly remove the Listener:
+            // i.e. ((SurfaceView)cameraView).setSurfaceTextureListener(null);
+        }
+        ThreadUtils.getUiHandler().post(new Runnable() {
+            @Override
+            public void run() {
+                try {
+                    GeckoAppShell.getGeckoInterface().disableCameraView();
+                } catch (Exception e) {
+                    Log.e(TAG,
+                          "VideoCaptureAndroid disableCameraView exception: " +
+                          e.getLocalizedMessage());
+                }
+           }
+        });
     }
 
     public VideoCaptureAndroid(int in_id, long in_context, Camera in_camera,
-            AndroidVideoCaptureDevice in_device) {
+                               AndroidVideoCaptureDevice in_device,
+                               int in_cameraId) {
         id = in_id;
         context = in_context;
         camera = in_camera;
+        cameraId = in_cameraId;
         currentDevice = in_device;
+        mCaptureRotation = GetRotateAmount();
+
+        try {
+            View cameraView = GeckoAppShell.getGeckoInterface().getCameraView();
+            if (cameraView instanceof SurfaceView) {
+                ((SurfaceView)cameraView).getHolder().addCallback(this);
+            } else if (cameraView instanceof TextureView) {
+                MySurfaceTextureListener listener = new MySurfaceTextureListener();
+                ((TextureView)cameraView).setSurfaceTextureListener(listener);
+            }
+            ThreadUtils.getUiHandler().post(new Runnable() {
+                @Override
+                public void run() {
+                    try {
+                        GeckoAppShell.getGeckoInterface().enableCameraView();
+                    } catch (Exception e) {
+                        Log.e(TAG,
+                              "VideoCaptureAndroid enableCameraView exception: "
+                               + e.getLocalizedMessage());
+                    }
+                }
+            });
+        } catch (Exception ex) {
+            Log.e(TAG, "VideoCaptureAndroid constructor exception: " +
+                  ex.getLocalizedMessage());
+        }
+
+        mAppStateListener = new AppStateListener() {
+            @Override
+            public void onPause() {
+                StopCapture();
+                if (camera != null) {
+                    camera.release();
+                    camera = null;
+                }
+            }
+            @Override
+            public void onResume() {
+                try {
+                    if(android.os.Build.VERSION.SDK_INT>8) {
+                        camera = Camera.open(cameraId);
+                    } else {
+                        camera = Camera.open();
+                    }
+                } catch (Exception ex) {
+                    Log.e(TAG, "Error reopening to the camera: " + ex.getMessage());
+                }
+                captureLock.lock();
+                isCaptureStarted = true;
+                tryStartCapture(mCaptureWidth, mCaptureHeight, mCaptureFPS);
+                captureLock.unlock();
+            }
+            @Override
+            public void onOrientationChanged() {
+                mCaptureRotation = GetRotateAmount();
+            }
+        };
+
+        GeckoAppShell.getGeckoInterface().addAppStateListener(mAppStateListener);
+    }
+
+    public int GetRotateAmount() {
+        int rotation = GeckoAppShell.getGeckoInterface().getActivity().getWindowManager().getDefaultDisplay().getRotation();
+        int degrees = 0;
+        switch (rotation) {
+            case Surface.ROTATION_0: degrees = 0; break;
+            case Surface.ROTATION_90: degrees = 90; break;
+            case Surface.ROTATION_180: degrees = 180; break;
+            case Surface.ROTATION_270: degrees = 270; break;
+        }
+        if(android.os.Build.VERSION.SDK_INT>8) {
+            android.hardware.Camera.CameraInfo info =
+                new android.hardware.Camera.CameraInfo();
+            android.hardware.Camera.getCameraInfo(cameraId, info);
+            int result;
+            if (info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
+                result = (info.orientation + degrees) % 360;
+            } else {  // back-facing
+                result = (info.orientation - degrees + 360) % 360;
+            }
+            return result;
+        } else {
+            // Assume 90deg orientation for Froyo devices.
+            // Only back-facing cameras are supported in Froyo.
+            int orientation = 90;
+            int result = (orientation - degrees + 360) % 360;
+            return result;
+        }
     }
 
     private int tryStartCapture(int width, int height, int frameRate) {
         if (camera == null) {
             Log.e(TAG, "Camera not initialized %d" + id);
             return -1;
         }
 
-        Log.d(TAG, "tryStartCapture: " + width +
-            "x" + height +", frameRate: " + frameRate +
-            ", isCaptureRunning: " + isCaptureRunning +
-            ", isSurfaceReady: " + isSurfaceReady +
-            ", isCaptureStarted: " + isCaptureStarted);
+        Log.d(TAG, "tryStartCapture " + width +
+                " height " + height +" frame rate " + frameRate +
+                " isCaptureRunning " + isCaptureRunning +
+                " isSurfaceReady " + isSurfaceReady +
+                " isCaptureStarted " + isCaptureStarted);
 
-        if (isCaptureRunning || !isCaptureStarted) {
+        if (isCaptureRunning || !isSurfaceReady || !isCaptureStarted) {
             return 0;
         }
 
-        CaptureCapabilityAndroid currentCapability =
-                new CaptureCapabilityAndroid();
-        currentCapability.width = width;
-        currentCapability.height = height;
-        currentCapability.maxFPS = frameRate;
-        PixelFormat.getPixelFormatInfo(PIXEL_FORMAT, pixelFormat);
+        try {
+            if (surfaceHolder != null)
+                camera.setPreviewDisplay(surfaceHolder);
+            if (surfaceTexture != null)
+                camera.setPreviewTexture(surfaceTexture);
+            if (surfaceHolder == null && surfaceTexture == null) {
+                // No local renderer.  Camera won't capture without
+                // setPreview{Texture,Display}, so we create a dummy SurfaceTexture
+                // and hand it over to Camera, but never listen for frame-ready
+                // callbacks, and never call updateTexImage on it.
+                try {
+                    dummySurfaceTexture = new SurfaceTexture(42);
+                    camera.setPreviewTexture(dummySurfaceTexture);
+                } catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+
+            CaptureCapabilityAndroid currentCapability =
+                    new CaptureCapabilityAndroid();
+            currentCapability.width = width;
+            currentCapability.height = height;
+            currentCapability.maxFPS = frameRate;
+            PixelFormat.getPixelFormatInfo(PIXEL_FORMAT, pixelFormat);
+
 
-        Camera.Parameters parameters = camera.getParameters();
-        parameters.setPreviewSize(currentCapability.width,
-                currentCapability.height);
-        parameters.setPreviewFormat(PIXEL_FORMAT);
-        parameters.setPreviewFrameRate(currentCapability.maxFPS);
-        try {
+            Camera.Parameters parameters = camera.getParameters();
+            parameters.setPreviewSize(currentCapability.width,
+                    currentCapability.height);
+            parameters.setPreviewFormat(PIXEL_FORMAT);
+            parameters.setPreviewFrameRate(currentCapability.maxFPS);
             camera.setParameters(parameters);
-        } catch (RuntimeException e) {
-            Log.e(TAG, "setParameters failed", e);
+
+            int bufSize = width * height * pixelFormat.bitsPerPixel / 8;
+            byte[] buffer = null;
+            for (int i = 0; i < numCaptureBuffers; i++) {
+                buffer = new byte[bufSize];
+                camera.addCallbackBuffer(buffer);
+            }
+            camera.setPreviewCallbackWithBuffer(this);
+            ownsBuffers = true;
+
+            camera.startPreview();
+            previewBufferLock.lock();
+            expectedFrameSize = bufSize;
+            isCaptureRunning = true;
+            previewBufferLock.unlock();
+
+        }
+        catch (Exception ex) {
+            Log.e(TAG, "Failed to start camera: " + ex.getMessage());
             return -1;
         }
 
-        int bufSize = width * height * pixelFormat.bitsPerPixel / 8;
-        byte[] buffer = null;
-        for (int i = 0; i < numCaptureBuffers; i++) {
-            buffer = new byte[bufSize];
-            camera.addCallbackBuffer(buffer);
-        }
-        camera.setPreviewCallbackWithBuffer(this);
-        ownsBuffers = true;
-
-        camera.startPreview();
-        previewBufferLock.lock();
-        expectedFrameSize = bufSize;
         isCaptureRunning = true;
-        previewBufferLock.unlock();
-
         return 0;
     }
 
     public int StartCapture(int width, int height, int frameRate) {
         Log.d(TAG, "StartCapture width " + width +
                 " height " + height +" frame rate " + frameRate);
-        // Get the local preview SurfaceHolder from the static render class
-        localPreview = ViERenderer.GetLocalRenderer();
-        if (localPreview != null) {
-            if (localPreview.getSurface() != null &&
-                localPreview.getSurface().isValid()) {
-                surfaceCreated(localPreview);
-            }
-            localPreview.addCallback(this);
-        } else {
-          // No local renderer.  Camera won't capture without
-          // setPreview{Texture,Display}, so we create a dummy SurfaceTexture
-          // and hand it over to Camera, but never listen for frame-ready
-          // callbacks, and never call updateTexImage on it.
-          captureLock.lock();
-          try {
-            dummySurfaceTexture = new SurfaceTexture(42);
-            camera.setPreviewTexture(dummySurfaceTexture);
-          } catch (IOException e) {
-            throw new RuntimeException(e);
-          }
-          captureLock.unlock();
-        }
-
         captureLock.lock();
         isCaptureStarted = true;
         mCaptureWidth = width;
         mCaptureHeight = height;
         mCaptureFPS = frameRate;
 
         int res = tryStartCapture(mCaptureWidth, mCaptureHeight, mCaptureFPS);
 
         captureLock.unlock();
         return res;
     }
 
-    public int StopCapture() {
-        Log.d(TAG, "StopCapture");
+    public int DetachCamera() {
         try {
             previewBufferLock.lock();
             isCaptureRunning = false;
             previewBufferLock.unlock();
-            camera.stopPreview();
-            camera.setPreviewCallbackWithBuffer(null);
-        } catch (RuntimeException e) {
-            Log.e(TAG, "Failed to stop camera", e);
+            if (camera != null) {
+                camera.setPreviewCallbackWithBuffer(null);
+                camera.stopPreview();
+            }
+        } catch (Exception ex) {
+            Log.e(TAG, "Failed to stop camera: " + ex.getMessage());
             return -1;
         }
-
-        isCaptureStarted = false;
         return 0;
     }
 
-    native void ProvideCameraFrame(byte[] data, int length, long captureObject);
+    public int StopCapture() {
+        Log.d(TAG, "StopCapture");
+        isCaptureStarted = false;
+        return DetachCamera();
+    }
+
+    native void ProvideCameraFrame(byte[] data, int length, int rotation,
+                                   long captureObject);
 
     public void onPreviewFrame(byte[] data, Camera camera) {
         previewBufferLock.lock();
 
         // The following line is for debug only
-        // Log.v(TAG, "preview frame length " + data.length +
-        //            " context" + context);
+        Log.v(TAG, "preview frame length " + data.length +
+              " context" + context);
         if (isCaptureRunning) {
             // If StartCapture has been called but not StopCapture
             // Call the C++ layer with the captured frame
             if (data.length == expectedFrameSize) {
-                ProvideCameraFrame(data, expectedFrameSize, context);
+                ProvideCameraFrame(data, expectedFrameSize, mCaptureRotation,
+                                   context);
                 if (ownsBuffers) {
                     // Give the video buffer to the camera service again.
                     camera.addCallbackBuffer(data);
                 }
             }
         }
         previewBufferLock.unlock();
     }
 
-    // Sets the rotation of the preview render window.
-    // Does not affect the captured video image.
-    public void SetPreviewRotation(int rotation) {
-        Log.v(TAG, "SetPreviewRotation:" + rotation);
-
-        if (camera == null) {
-            return;
-        }
-
-        int resultRotation = 0;
-        if (currentDevice.frontCameraType ==
-            VideoCaptureDeviceInfoAndroid.FrontFacingCameraType.Android23) {
-            // this is a 2.3 or later front facing camera.
-            // SetDisplayOrientation will flip the image horizontally
-            // before doing the rotation.
-            resultRotation = ( 360 - rotation ) % 360; // compensate the mirror
-        }
-        else {
-            // Back facing or 2.2 or previous front camera
-            resultRotation = rotation;
-        }
-        camera.setDisplayOrientation(resultRotation);
-    }
-
     public void surfaceChanged(SurfaceHolder holder,
                                int format, int width, int height) {
         Log.d(TAG, "VideoCaptureAndroid::surfaceChanged");
+
+        captureLock.lock();
+        isSurfaceReady = true;
+        surfaceHolder = holder;
+
+        tryStartCapture(mCaptureWidth, mCaptureHeight, mCaptureFPS);
+        captureLock.unlock();
+        return;
     }
 
     public void surfaceCreated(SurfaceHolder holder) {
         Log.d(TAG, "VideoCaptureAndroid::surfaceCreated");
-        captureLock.lock();
-        try {
-          if (camera != null) {
-              camera.setPreviewDisplay(holder);
-          }
-        } catch (IOException e) {
-            Log.e(TAG, "Failed to set preview surface!", e);
-        }
-        captureLock.unlock();
     }
 
     public void surfaceDestroyed(SurfaceHolder holder) {
         Log.d(TAG, "VideoCaptureAndroid::surfaceDestroyed");
-        captureLock.lock();
-        try {
-            if (camera != null) {
-                camera.setPreviewDisplay(null);
-            }
-        } catch (IOException e) {
-            Log.e(TAG, "Failed to clear preview surface!", e);
-        }
-        captureLock.unlock();
+        isSurfaceReady = false;
+        DetachCamera();
     }
 }
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/java/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/java/org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid.java
@@ -247,44 +247,47 @@ public class VideoCaptureDeviceInfoAndro
 
     // Returns an instance of VideoCaptureAndroid.
     public VideoCaptureAndroid AllocateCamera(int id, long context,
             String deviceUniqueId) {
         try {
             Log.d(TAG, "AllocateCamera " + deviceUniqueId);
 
             Camera camera = null;
+            int cameraId = 0;
             AndroidVideoCaptureDevice deviceToUse = null;
             for (AndroidVideoCaptureDevice device: deviceList) {
                 if(device.deviceUniqueName.equals(deviceUniqueId)) {
                     // Found the wanted camera
                     deviceToUse = device;
                     switch(device.frontCameraType) {
                         case GalaxyS:
                             camera = AllocateGalaxySFrontCamera();
                             break;
                         case HTCEvo:
                             camera = AllocateEVOFrontFacingCamera();
                             break;
                         default:
                             // From Android 2.3 and onwards)
-                            if(android.os.Build.VERSION.SDK_INT>8)
-                                camera=Camera.open(device.index);
-                            else
-                                camera=Camera.open(); // Default camera
+                            if(android.os.Build.VERSION.SDK_INT>8) {
+                                cameraId = device.index;
+                                camera = Camera.open(device.index);
+                            } else {
+                                camera = Camera.open(); // Default_ camera
+                            }
                     }
                 }
             }
 
             if(camera == null) {
                 return null;
             }
             Log.v(TAG, "AllocateCamera - creating VideoCaptureAndroid");
 
-            return new VideoCaptureAndroid(id, context, camera, deviceToUse);
+            return new VideoCaptureAndroid(id, context, camera, deviceToUse, cameraId);
         } catch (NoSuchMethodException e) {
             Log.e(TAG, "AllocateCamera Failed to open camera", e);
         } catch (ClassNotFoundException e) {
             Log.e(TAG, "AllocateCamera Failed to open camera", e);
         } catch (InvocationTargetException e) {
             Log.e(TAG, "AllocateCamera Failed to open camera", e);
         } catch (IllegalAccessException e) {
             Log.e(TAG, "AllocateCamera Failed to open camera", e);
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.cc
@@ -11,16 +11,18 @@
 #include "video_capture_android.h"
 
 #include <stdio.h>
 
 #include "critical_section_wrapper.h"
 #include "ref_count.h"
 #include "trace.h"
 
+#include "AndroidJNIWrapper.h"
+
 namespace webrtc
 {
 #if defined(WEBRTC_ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
 // TODO(leozwang) These SetAndroidVM apis will be refactored, thus we only
 // keep and reference java vm.
 int32_t SetCaptureAndroidVM(void* javaVM, void* javaContext) {
   return videocapturemodule::VideoCaptureAndroid::SetAndroidObjects(
       javaVM,
@@ -54,103 +56,68 @@ VideoCaptureModule* VideoCaptureImpl::Cr
 
 JavaVM* VideoCaptureAndroid::g_jvm = NULL;
 //VideoCaptureAndroid.java
 jclass VideoCaptureAndroid::g_javaCmClass = NULL;
 //VideoCaptureDeviceInfoAndroid.java
 jclass VideoCaptureAndroid::g_javaCmDevInfoClass = NULL;
 //static instance of VideoCaptureDeviceInfoAndroid.java
 jobject VideoCaptureAndroid::g_javaCmDevInfoObject = NULL;
-jobject VideoCaptureAndroid::g_javaContext = NULL;
 
 /*
  * Register references to Java Capture class.
  */
 int32_t VideoCaptureAndroid::SetAndroidObjects(void* javaVM,
                                                void* javaContext) {
 
   g_jvm = static_cast<JavaVM*> (javaVM);
-  g_javaContext = static_cast<jobject> (javaContext);
 
   if (javaVM) {
+    // Already done? Exit early.
+    if (g_javaCmClass != NULL
+        && g_javaCmDevInfoClass != NULL
+        && g_javaCmDevInfoObject != NULL) {
+        return 0;
+    }
+
     JNIEnv* env = NULL;
     if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
       WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                    "%s: could not get Java environment", __FUNCTION__);
       return -1;
     }
     // get java capture class type (note path to class packet)
-    jclass javaCmClassLocal = env->FindClass(AndroidJavaCaptureClass);
-    if (!javaCmClassLocal) {
+    g_javaCmClass = jsjni_GetGlobalClassRef(AndroidJavaCaptureClass);
+    if (!g_javaCmClass) {
       WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                    "%s: could not find java class", __FUNCTION__);
       return -1;
     }
-    // create a global reference to the class
-    // (to tell JNI that we are referencing it
-    // after this function has returned)
-    g_javaCmClass = static_cast<jclass>
-        (env->NewGlobalRef(javaCmClassLocal));
-    if (!g_javaCmClass) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: InitVideoEngineJava(): could not create"
-                   " Java Camera class reference",
-                   __FUNCTION__);
-      return -1;
-    }
-    // Delete local class ref, we only use the global ref
-    env->DeleteLocalRef(javaCmClassLocal);
     JNINativeMethod nativeFunctions =
-        { "ProvideCameraFrame", "([BIJ)V",
+        { "ProvideCameraFrame", "([BIIJ)V",
           (void*) &VideoCaptureAndroid::ProvideCameraFrame };
     if (env->RegisterNatives(g_javaCmClass, &nativeFunctions, 1) == 0) {
       WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
                    "%s: Registered native functions", __FUNCTION__);
     }
     else {
       WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                    "%s: Failed to register native functions",
                    __FUNCTION__);
       return -1;
     }
 
-    jclass capabilityClassLocal = env->FindClass(
-        "org/webrtc/videoengine/CaptureCapabilityAndroid");
-    if (!capabilityClassLocal) {
+    // get java capture class type (note path to class packet)
+    g_javaCmDevInfoClass = jsjni_GetGlobalClassRef(
+                 AndroidJavaCaptureDeviceInfoClass);
+    if (!g_javaCmDevInfoClass) {
       WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
                    "%s: could not find java class", __FUNCTION__);
       return -1;
     }
-    jclass capabilityClassGlobal = reinterpret_cast<jclass>(env->NewGlobalRef(
-        capabilityClassLocal));
-    DeviceInfoAndroid::SetAndroidCaptureClasses(capabilityClassGlobal);
-
-    // get java capture class type (note path to class packet)
-    jclass javaCmDevInfoClassLocal = env->FindClass(
-        "org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid");
-    if (!javaCmDevInfoClassLocal) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: could not find java class", __FUNCTION__);
-      return -1;
-    }
-
-    // create a global reference to the class
-    // (to tell JNI that we are referencing it
-    // after this function has returned)
-    g_javaCmDevInfoClass = static_cast<jclass>
-        (env->NewGlobalRef(javaCmDevInfoClassLocal));
-    if (!g_javaCmDevInfoClass) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: InitVideoEngineJava(): could not create Java "
-                   "Camera Device info class reference",
-                   __FUNCTION__);
-      return -1;
-    }
-    // Delete local class ref, we only use the global ref
-    env->DeleteLocalRef(javaCmDevInfoClassLocal);
 
     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
                  "VideoCaptureDeviceInfoAndroid get method id");
 
     // get the method ID for the Android Java CaptureClass static
     //CreateVideoCaptureAndroid factory method.
     jmethodID cid = env->GetStaticMethodID(
         g_javaCmDevInfoClass,
@@ -167,17 +134,17 @@ int32_t VideoCaptureAndroid::SetAndroidO
 
     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
                  "%s: construct static java device object", __FUNCTION__);
 
     // construct the object by calling the static constructor object
     jobject javaCameraDeviceInfoObjLocal =
         env->CallStaticObjectMethod(g_javaCmDevInfoClass,
                                     cid, (int) -1,
-                                    g_javaContext);
+                                    javaContext);
     if (!javaCameraDeviceInfoObjLocal) {
       WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCapture, -1,
                    "%s: could not create Java Capture Device info object",
                    __FUNCTION__);
       return -1;
     }
     // create a reference to the object (to tell JNI that
     // we are referencing it after this function has returned)
@@ -279,21 +246,39 @@ int32_t VideoCaptureAndroid::ReleaseAndr
  * Class:     org_webrtc_capturemodule_VideoCaptureAndroid
  * Method:    ProvideCameraFrame
  * Signature: ([BIJ)V
  */
 void JNICALL VideoCaptureAndroid::ProvideCameraFrame(JNIEnv * env,
                                                      jobject,
                                                      jbyteArray javaCameraFrame,
                                                      jint length,
+                                                     jint rotation,
                                                      jlong context) {
   VideoCaptureAndroid* captureModule =
       reinterpret_cast<VideoCaptureAndroid*>(context);
   WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCapture,
                -1, "%s: IncomingFrame %d", __FUNCTION__,length);
+
+  switch (rotation) {
+    case 90:
+      captureModule->SetCaptureRotation(kCameraRotate90);
+      break;
+    case 180:
+      captureModule->SetCaptureRotation(kCameraRotate180);
+      break;
+    case 270:
+      captureModule->SetCaptureRotation(kCameraRotate270);
+      break;
+    case 0:
+    default:
+      captureModule->SetCaptureRotation(kCameraRotate0);
+      break;
+  }
+
   jbyte* cameraFrame= env->GetByteArrayElements(javaCameraFrame,NULL);
   captureModule->IncomingFrame((uint8_t*) cameraFrame,
                                length,captureModule->_frameInfo,0);
   env->ReleaseByteArrayElements(javaCameraFrame,cameraFrame,JNI_ABORT);
 }
 
 
 
@@ -306,17 +291,17 @@ VideoCaptureAndroid::VideoCaptureAndroid
 
 // ----------------------------------------------------------------------------
 //  Init
 //
 //  Initializes needed Java resources like the JNI interface to
 //  VideoCaptureAndroid.java
 // ----------------------------------------------------------------------------
 int32_t VideoCaptureAndroid::Init(const int32_t id,
-                                  const char* deviceUniqueIdUTF8) {
+                                        const char* deviceUniqueIdUTF8) {
   const int nameLength = strlen(deviceUniqueIdUTF8);
   if (nameLength >= kVideoCaptureUniqueNameLength) {
     return -1;
   }
 
   // Store the device name
   _deviceUniqueId = new char[nameLength + 1];
   memcpy(_deviceUniqueId, deviceUniqueIdUTF8, nameLength + 1);
@@ -336,16 +321,17 @@ int32_t VideoCaptureAndroid::Init(const 
   if (!g_jvm) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                  "%s: Not a valid Java VM pointer", __FUNCTION__);
     return -1;
   }
   // get the JNI env for this thread
   JNIEnv *env;
   bool isAttached = false;
+  int32_t rotation = 0;
 
   // get the JNI env for this thread
   if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
     // try to attach the thread and get the env
     // Attach this thread to JVM
     jint res = g_jvm->AttachCurrentThread(&env, NULL);
     if ((res < 0) || !env) {
       WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
@@ -429,37 +415,45 @@ VideoCaptureAndroid::~VideoCaptureAndroi
                      "%s: Could not attach thread to JVM (%d, %p)",
                      __FUNCTION__, res, env);
       }
       else {
         isAttached = true;
       }
     }
 
-    // get the method ID for the Android Java CaptureClass static
-    // DeleteVideoCaptureAndroid  method. Call this to release the camera so
-    // another application can use it.
-    jmethodID cid = env->GetStaticMethodID(
-        g_javaCmClass,
-        "DeleteVideoCaptureAndroid",
-        "(Lorg/webrtc/videoengine/VideoCaptureAndroid;)V");
-    if (cid != NULL) {
-      WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
-                   "%s: Call DeleteVideoCaptureAndroid", __FUNCTION__);
-      // Close the camera by calling the static destruct function.
-      env->CallStaticVoidMethod(g_javaCmClass, cid, _javaCaptureObj);
+    if (env) {
+      // get the method ID for the Android Java CaptureClass static
+      // DeleteVideoCaptureAndroid  method. Call this to release the camera so
+      // another application can use it.
+      jmethodID cid = env->GetStaticMethodID(
+          g_javaCmClass,
+          "DeleteVideoCaptureAndroid",
+          "(Lorg/webrtc/videoengine/VideoCaptureAndroid;)V");
+      if (cid != NULL) {
+        WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, -1,
+                     "%s: Call DeleteVideoCaptureAndroid", __FUNCTION__);
+        // Close the camera by calling the static destruct function.
+        env->CallStaticVoidMethod(g_javaCmClass, cid, _javaCaptureObj);
 
-      // Delete global object ref to the camera.
-      env->DeleteGlobalRef(_javaCaptureObj);
-      _javaCaptureObj = NULL;
-    }
-    else {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: Failed to find DeleteVideoCaptureAndroid id",
-                   __FUNCTION__);
+        // Delete global object ref to the camera.
+        env->DeleteGlobalRef(_javaCaptureObj);
+        // Clean up the global class references
+        env->DeleteGlobalRef(g_javaCmClass);
+        env->DeleteGlobalRef(g_javaCmDevInfoClass);
+
+        _javaCaptureObj = NULL;
+        VideoCaptureAndroid::g_javaCmClass = NULL;
+        VideoCaptureAndroid::g_javaCmDevInfoClass = NULL;
+      }
+      else {
+        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
+                     "%s: Failed to find DeleteVideoCaptureAndroid id",
+                     __FUNCTION__);
+      }
     }
 
     // Detach this thread if it was attached
     if (isAttached) {
       if (g_jvm->DetachCurrentThread() < 0) {
         WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioDevice,
                      _id, "%s: Could not detach thread from JVM",
                      __FUNCTION__);
@@ -471,16 +465,17 @@ VideoCaptureAndroid::~VideoCaptureAndroi
 int32_t VideoCaptureAndroid::StartCapture(
     const VideoCaptureCapability& capability) {
   CriticalSectionScoped cs(&_apiCs);
   WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
                "%s: ", __FUNCTION__);
 
   bool isAttached = false;
   int32_t result = 0;
+  int32_t rotation = 0;
   // get the JNI env for this thread
   JNIEnv *env;
   if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
     // try to attach the thread and get the env
     // Attach this thread to JVM
     jint res = g_jvm->AttachCurrentThread(&env, NULL);
     if ((res < 0) || !env) {
       WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
@@ -524,16 +519,17 @@ int32_t VideoCaptureAndroid::StartCaptur
 
   // Detach this thread if it was attached
   if (isAttached) {
     if (g_jvm->DetachCurrentThread() < 0) {
       WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioDevice, _id,
                    "%s: Could not detach thread from JVM", __FUNCTION__);
     }
   }
+
   if (result == 0) {
     _requestedCapability = capability;
     _captureStarted = true;
   }
   WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceVideoCapture, -1,
                "%s: result %d", __FUNCTION__, result);
   return result;
 }
@@ -605,71 +601,13 @@ int32_t VideoCaptureAndroid::CaptureSett
                "%s: ", __FUNCTION__);
   settings = _requestedCapability;
   return 0;
 }
 
 int32_t VideoCaptureAndroid::SetCaptureRotation(
     VideoCaptureRotation rotation) {
   CriticalSectionScoped cs(&_apiCs);
-  if (VideoCaptureImpl::SetCaptureRotation(rotation) == 0) {
-    if (!g_jvm)
-      return -1;
-
-    // get the JNI env for this thread
-    JNIEnv *env;
-    bool isAttached = false;
-
-    // get the JNI env for this thread
-    if (g_jvm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
-      // try to attach the thread and get the env
-      // Attach this thread to JVM
-      jint res = g_jvm->AttachCurrentThread(&env, NULL);
-      if ((res < 0) || !env) {
-        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture,
-                     _id,
-                     "%s: Could not attach thread to JVM (%d, %p)",
-                     __FUNCTION__, res, env);
-        return -1;
-      }
-      isAttached = true;
-    }
-
-    jmethodID cid = env->GetMethodID(g_javaCmClass, "SetPreviewRotation",
-                                     "(I)V");
-    if (cid == NULL) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
-                   "%s: could not get java SetPreviewRotation ID",
-                   __FUNCTION__);
-      return -1;
-    }
-    jint rotateFrame = 0;
-    switch (rotation) {
-      case kCameraRotate0:
-        rotateFrame = 0;
-        break;
-      case kCameraRotate90:
-        rotateFrame = 90;
-        break;
-      case kCameraRotate180:
-        rotateFrame = 180;
-        break;
-      case kCameraRotate270:
-        rotateFrame = 270;
-        break;
-    }
-    env->CallVoidMethod(_javaCaptureObj, cid, rotateFrame);
-
-    // Detach this thread if it was attached
-    if (isAttached) {
-      if (g_jvm->DetachCurrentThread() < 0) {
-        WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioDevice,
-                     _id, "%s: Could not detach thread from JVM",
-                     __FUNCTION__);
-      }
-    }
-
-  }
-  return 0;
+  return VideoCaptureImpl::SetCaptureRotation(rotation);
 }
 
 }  // namespace videocapturemodule
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/android/video_capture_android.h
@@ -41,25 +41,26 @@ class VideoCaptureAndroid : public Video
   virtual int32_t CaptureSettings(VideoCaptureCapability& settings);
   virtual int32_t SetCaptureRotation(VideoCaptureRotation rotation);
 
  protected:
   virtual ~VideoCaptureAndroid();
   static void JNICALL ProvideCameraFrame (JNIEnv * env,
                                           jobject,
                                           jbyteArray javaCameraFrame,
-                                          jint length, jlong context);
+                                          jint length,
+                                          jint rotation,
+                                          jlong context);
   DeviceInfoAndroid _capInfo;
   jobject _javaCaptureObj; // Java Camera object.
   VideoCaptureCapability _frameInfo;
   bool _captureStarted;
 
   static JavaVM* g_jvm;
   static jclass g_javaCmClass;
   static jclass g_javaCmDevInfoClass;
   //Static java object implementing the needed device info functions;
   static jobject g_javaCmDevInfoObject;
-  static jobject g_javaContext; // Java Application context
 };
 
 }  // namespace videocapturemodule
 }  // namespace webrtc
 #endif // WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_VIDEO_CAPTURE_ANDROID_H_
--- a/media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
@@ -49,17 +49,17 @@ int32_t DeviceInfoImpl::NumberOfCapabili
     if (!deviceUniqueIdUTF8)
         return -1;
 
     _apiLock.AcquireLockShared();
 
     if (_lastUsedDeviceNameLength == strlen((char*) deviceUniqueIdUTF8))
     {
         // Is it the same device that is asked for again.
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
         if(strncasecmp((char*)_lastUsedDeviceName,
                        (char*) deviceUniqueIdUTF8,
                        _lastUsedDeviceNameLength)==0)
 #else
         if (_strnicmp((char*) _lastUsedDeviceName,
                       (char*) deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) == 0)
 #endif
@@ -86,17 +86,17 @@ int32_t DeviceInfoImpl::GetCapability(co
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                    "deviceUniqueIdUTF8 parameter not set in call to GetCapability");
         return -1;
     }
     ReadLockScoped cs(_apiLock);
 
     if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
         || (strncasecmp((char*)_lastUsedDeviceName,
                         (char*) deviceUniqueIdUTF8,
                         _lastUsedDeviceNameLength)!=0))
 #else
         || (_strnicmp((char*) _lastUsedDeviceName,
                       (char*) deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) != 0))
 #endif
@@ -150,17 +150,17 @@ int32_t DeviceInfoImpl::GetBestMatchedCa
 {
 
 
     if (!deviceUniqueIdUTF8)
         return -1;
 
     ReadLockScoped cs(_apiLock);
     if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
         || (strncasecmp((char*)_lastUsedDeviceName,
                         (char*) deviceUniqueIdUTF8,
                         _lastUsedDeviceNameLength)!=0))
 #else
         || (_strnicmp((char*) _lastUsedDeviceName,
                       (char*) deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) != 0))
 #endif
--- a/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
@@ -14,17 +14,23 @@
 #include <unistd.h>
 #include <sys/ioctl.h>
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
 
 //v4l includes
+#if defined(__DragonFly__) || defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/videoio.h>
+#elif defined(__sun)
+#include <sys/videodev2.h>
+#else
 #include <linux/videodev2.h>
+#endif
 
 #include "ref_count.h"
 #include "trace.h"
 
 
 namespace webrtc
 {
 namespace videocapturemodule
@@ -89,19 +95,20 @@ int32_t DeviceInfoLinux::GetDeviceName(
 {
     WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCapture, _id, "%s", __FUNCTION__);
 
     // Travel through /dev/video [0-63]
     uint32_t count = 0;
     char device[20];
     int fd = -1;
     bool found = false;
-    for (int n = 0; n < 64; n++)
+    int device_index;
+    for (device_index = 0; device_index < 64; device_index++)
     {
-        sprintf(device, "/dev/video%d", n);
+        sprintf(device, "/dev/video%d", device_index);
         if ((fd = open(device, O_RDONLY)) != -1)
         {
             if (count == deviceNumber) {
                 // Found the device
                 found = true;
                 break;
             } else {
                 close(fd);
@@ -150,73 +157,84 @@ int32_t DeviceInfoLinux::GetDeviceName(
                    strlen((const char*) cap.bus_info));
         }
         else
         {
             WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                        "buffer passed is too small");
             return -1;
         }
+    } else {
+        // if there's no bus info to use for uniqueId, invent one - and it has to be repeatable
+        if (snprintf(deviceUniqueIdUTF8, deviceUniqueIdUTF8Length, "fake_%u", device_index) >=
+            deviceUniqueIdUTF8Length)
+        {
+            WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
+                       "buffer passed is too small");
+            return -1;
+        }
     }
 
     return 0;
 }
 
 int32_t DeviceInfoLinux::CreateCapabilityMap(
                                         const char* deviceUniqueIdUTF8)
 {
     int fd;
     char device[32];
     bool found = false;
+    int device_index;
 
     const int32_t deviceUniqueIdUTF8Length =
                             (int32_t) strlen((char*) deviceUniqueIdUTF8);
     if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength)
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id, "Device name too long");
         return -1;
     }
     WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCapture, _id,
                "CreateCapabilityMap called for device %s", deviceUniqueIdUTF8);
 
     /* detect /dev/video [0-63] entries */
-    for (int n = 0; n < 64; ++n)
+    if (sscanf(deviceUniqueIdUTF8,"fake_%d",&device_index) == 1)
     {
-        sprintf(device, "/dev/video%d", n);
+        sprintf(device, "/dev/video%d", device_index);
         fd = open(device, O_RDONLY);
-        if (fd == -1)
-          continue;
+        if (fd != -1) {
+            found = true;
+        }
+    } else {
+        /* detect /dev/video [0-63] entries */
+        for (int n = 0; n < 64; ++n)
+        {
+            sprintf(device, "/dev/video%d", n);
+            fd = open(device, O_RDONLY);
+            if (fd == -1)
+                continue;
 
-        // query device capabilities
-        struct v4l2_capability cap;
-        if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0)
-        {
-            if (cap.bus_info[0] != 0)
+            // query device capabilities
+            struct v4l2_capability cap;
+            if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0)
             {
-                if (strncmp((const char*) cap.bus_info,
-                            (const char*) deviceUniqueIdUTF8,
-                            strlen((const char*) deviceUniqueIdUTF8)) == 0) //match with device id
+                if (cap.bus_info[0] != 0)
                 {
-                    found = true;
-                    break; // fd matches with device unique id supplied
+                    if (strncmp((const char*) cap.bus_info,
+                                (const char*) deviceUniqueIdUTF8,
+                                strlen((const char*) deviceUniqueIdUTF8)) == 0) //match with device id
+                    {
+                        found = true;
+                        break; // fd matches with device unique id supplied
+                    }
                 }
+                // else can't be a match as the test for fake_* above would have matched it
             }
-            else //match for device name
-            {
-                if (IsDeviceNameMatches((const char*) cap.card,
-                                        (const char*) deviceUniqueIdUTF8))
-                {
-                    found = true;
-                    break;
-                }
-            }
+            close(fd); // close since this is not the matching device
         }
-        close(fd); // close since this is not the matching device
     }
-
     if (!found)
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id, "no matching device found");
         return -1;
     }
 
     // now fd will point to the matching device
     // reset old capability map
--- a/media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
@@ -7,23 +7,30 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include <sys/ioctl.h>
 #include <unistd.h>
 #include <sys/stat.h>
 #include <fcntl.h>
-#include <linux/videodev2.h>
 #include <errno.h>
 #include <stdio.h>
 #include <sys/mman.h>
 #include <string.h>
 
-#include <iostream>
+//v4l includes
+#if defined(__DragonFly__) || defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/videoio.h>
+#elif defined(__sun)
+#include <sys/videodev2.h>
+#else
+#include <linux/videodev2.h>
+#endif
+
 #include <new>
 
 #include "ref_count.h"
 #include "trace.h"
 #include "thread_wrapper.h"
 #include "critical_section_wrapper.h"
 #include "video_capture_linux.h"
 
@@ -66,16 +73,23 @@ int32_t VideoCaptureModuleV4L2::Init(con
 {
     int len = strlen((const char*) deviceUniqueIdUTF8);
     _deviceUniqueId = new (std::nothrow) char[len + 1];
     if (_deviceUniqueId)
     {
         memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
     }
 
+    int device_index;
+    if (sscanf(deviceUniqueIdUTF8,"fake_%d", &device_index) == 1)
+    {
+      _deviceId = device_index;
+      return 0;
+    }
+
     int fd;
     char device[32];
     bool found = false;
 
     /* detect /dev/video [0-63] entries */
     int n;
     for (n = 0; n < 64; n++)
     {
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.mm
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit.mm
@@ -10,16 +10,30 @@
 
 #include "video_capture_qtkit.h"
 #import "video_capture_qtkit_objc.h"
 #import "video_capture_qtkit_info_objc.h"
 #include "trace.h"
 #include "critical_section_wrapper.h"
 #include "../../video_capture_config.h"
 
+class nsAutoreleasePool {
+public:
+    nsAutoreleasePool()
+    {
+        mLocalPool = [[NSAutoreleasePool alloc] init];
+    }
+    ~nsAutoreleasePool()
+    {
+        [mLocalPool release];
+    }
+private:
+    NSAutoreleasePool *mLocalPool;
+};
+
 namespace webrtc
 {
 
 namespace videocapturemodule
 {
 
 VideoCaptureMacQTKit::VideoCaptureMacQTKit(const int32_t id) :
     VideoCaptureImpl(id),
@@ -36,16 +50,17 @@ VideoCaptureMacQTKit::VideoCaptureMacQTK
     memset(_currentDeviceNameUTF8, 0, MAX_NAME_LENGTH);
     memset(_currentDeviceUniqueIdUTF8, 0, MAX_NAME_LENGTH);
     memset(_currentDeviceProductUniqueIDUTF8, 0, MAX_NAME_LENGTH);
 }
 
 VideoCaptureMacQTKit::~VideoCaptureMacQTKit()
 {
 
+    nsAutoreleasePool localPool;
     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, _id,
                  "~VideoCaptureMacQTKit() called");
     if(_captureDevice)
     {
         [_captureDevice registerOwner:nil];
         [_captureDevice stopCapture];
         [_captureDevice release];
     }
@@ -66,16 +81,18 @@ int32_t VideoCaptureMacQTKit::Init(
         (int32_t) strlen((char*)iDeviceUniqueIdUTF8);
     if(nameLength>kVideoCaptureUniqueNameLength)
         return -1;
 
     // Store the device name
     _deviceUniqueId = new char[nameLength+1];
     memcpy(_deviceUniqueId, iDeviceUniqueIdUTF8,nameLength+1);
 
+    nsAutoreleasePool localPool;
+
     _captureDevice = [[VideoCaptureMacQTKitObjC alloc] init];
     if(NULL == _captureDevice)
     {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, id,
                      "Failed to create an instance of "
                      "VideoCaptureMacQTKitObjC");
         return -1;
     }
@@ -159,32 +176,34 @@ int32_t VideoCaptureMacQTKit::Init(
                  "successfully Init VideoCaptureMacQTKit" );
     return 0;
 }
 
 int32_t VideoCaptureMacQTKit::StartCapture(
     const VideoCaptureCapability& capability)
 {
 
+    nsAutoreleasePool localPool;
     _captureWidth = capability.width;
     _captureHeight = capability.height;
     _captureFrameRate = capability.maxFPS;
     _captureDelay = 120;
 
     [_captureDevice setCaptureHeight:_captureHeight
                                width:_captureWidth
                            frameRate:_captureFrameRate];
 
     [_captureDevice startCapture];
     _isCapturing = true;
     return 0;
 }
 
 int32_t VideoCaptureMacQTKit::StopCapture()
 {
+    nsAutoreleasePool localPool;
     [_captureDevice stopCapture];
     _isCapturing = false;
     return 0;
 }
 
 bool VideoCaptureMacQTKit::CaptureStarted()
 {
     return _isCapturing;
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
@@ -9,54 +9,71 @@
  */
 
 #include "trace.h"
 #include "../../video_capture_config.h"
 #import "video_capture_qtkit_info_objc.h"
 
 #include "video_capture.h"
 
+class nsAutoreleasePool {
+public:
+    nsAutoreleasePool()
+    {
+        mLocalPool = [[NSAutoreleasePool alloc] init];
+    }
+    ~nsAutoreleasePool()
+    {
+        [mLocalPool release];
+    }
+private:
+    NSAutoreleasePool *mLocalPool;
+};
+
 namespace webrtc
 {
 namespace videocapturemodule
 {
 
 VideoCaptureMacQTKitInfo::VideoCaptureMacQTKitInfo(const int32_t id) :
     DeviceInfoImpl(id)
 {
+    nsAutoreleasePool localPool;
     _captureInfo = [[VideoCaptureMacQTKitInfoObjC alloc] init];
 }
 
 VideoCaptureMacQTKitInfo::~VideoCaptureMacQTKitInfo()
 {
+    nsAutoreleasePool localPool;
     [_captureInfo release];
-
 }
 
 int32_t VideoCaptureMacQTKitInfo::Init()
 {
 
     return 0;
 }
 
 uint32_t VideoCaptureMacQTKitInfo::NumberOfDevices()
 {
 
+    nsAutoreleasePool localPool;
     uint32_t captureDeviceCount =
         [[_captureInfo getCaptureDeviceCount]intValue];
     return captureDeviceCount;
 
 }
 
 int32_t VideoCaptureMacQTKitInfo::GetDeviceName(
     uint32_t deviceNumber, char* deviceNameUTF8,
     uint32_t deviceNameLength, char* deviceUniqueIdUTF8,
     uint32_t deviceUniqueIdUTF8Length, char* productUniqueIdUTF8,
     uint32_t productUniqueIdUTF8Length)
 {
+    nsAutoreleasePool localPool;
     int errNum = [[_captureInfo getDeviceNamesFromIndex:deviceNumber
                    DefaultName:deviceNameUTF8 WithLength:deviceNameLength
                    AndUniqueID:deviceUniqueIdUTF8
                    WithLength:deviceUniqueIdUTF8Length
                    AndProductID:productUniqueIdUTF8
                    WithLength:productUniqueIdUTF8Length]intValue];
     return errNum;
 }
@@ -100,16 +117,17 @@ int32_t VideoCaptureMacQTKitInfo::GetBes
 }
 
 int32_t VideoCaptureMacQTKitInfo::DisplayCaptureSettingsDialogBox(
     const char* deviceUniqueIdUTF8,
     const char* dialogTitleUTF8, void* parentWindow,
     uint32_t positionX, uint32_t positionY)
 {
 
+    nsAutoreleasePool localPool;
     return [[_captureInfo
              displayCaptureSettingsDialogBoxWithDevice:deviceUniqueIdUTF8
              AndTitle:dialogTitleUTF8
              AndParentWindow:parentWindow AtX:positionX AndY:positionY]
              intValue];
 }
 
 int32_t VideoCaptureMacQTKitInfo::CreateCapabilityMap(
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h
@@ -19,17 +19,16 @@
 #import <QTKit/QTKit.h>
 #import <Foundation/Foundation.h>
 #include "video_capture_qtkit_utility.h"
 #include "video_capture_qtkit_info.h"
 
 @interface VideoCaptureMacQTKitInfoObjC : NSObject{
     bool                                _OSSupportedInfo;
     NSArray*                            _captureDevicesInfo;
-    NSAutoreleasePool*                    _poolInfo;
     int                                    _captureDeviceCountInfo;
 
 }
 
 /**************************************************************************
  *
  *   The following functions are considered to be private
  *
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.mm
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.mm
@@ -89,21 +89,25 @@ using namespace webrtc;
         return [NSNumber numberWithInt:0];
     }
 
     if(index >= (uint32_t)_captureDeviceCountInfo)
     {
         return [NSNumber numberWithInt:-1];
     }
 
-    QTCaptureDevice* tempCaptureDevice =
-        (QTCaptureDevice*)[_captureDevicesInfo objectAtIndex:index];
+    if ([_captureDevicesInfo count] <= index)
+    {
+      return [NSNumber numberWithInt:-1];
+    }
+
+    QTCaptureDevice* tempCaptureDevice = (QTCaptureDevice*)[_captureDevicesInfo objectAtIndex:index];
     if(!tempCaptureDevice)
     {
-        return [NSNumber numberWithInt:-1];
+      return [NSNumber numberWithInt:-1];
     }
 
     memset(deviceName, 0, deviceNameLength);
     memset(deviceUniqueID, 0, deviceUniqueIDLength);
 
     bool successful = NO;
 
     NSString* tempString = [tempCaptureDevice localizedDisplayName];
@@ -133,17 +137,16 @@ using namespace webrtc;
 
 - (NSNumber*)initializeVariables
 {
     if(NO == _OSSupportedInfo)
     {
         return [NSNumber numberWithInt:0];
     }
 
-    _poolInfo = [[NSAutoreleasePool alloc]init];
     _captureDeviceCountInfo = 0;
     [self getCaptureDevices];
 
     return [NSNumber numberWithInt:0];
 }
 
 // ***** Checks to see if the QTCaptureSession framework is available in the OS
 // ***** If it is not, isOSSupprted = NO
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_objc.mm
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_objc.mm
@@ -146,17 +146,27 @@ using namespace videocapturemodule;
   [_captureSession startRunning];
   _capturing = YES;
 }
 
 - (void)stopCapture {
   if (!_capturing)
     return;
 
-  [_captureSession stopRunning];
+  // This method is often called on a secondary thread.  Which means
+  // that the following can sometimes run "too early", causing crashes
+  // and/or weird errors concerning initialization.  On OS X 10.7 and
+  // 10.8, the CoreMediaIO method CMIOUninitializeGraph() is called from
+  // -[QTCaptureSession stopRunning].  If this is called too early,
+  // low-level session data gets uninitialized before low-level code
+  // is finished trying to use it.  The solution is to make stopRunning
+  // always run on the main thread.  See bug 837539.
+  [_captureSession performSelectorOnMainThread:@selector(stopRunning)
+                   withObject:nil
+                   waitUntilDone:NO];
   _capturing = NO;
 }
 
 #pragma mark Private methods
 
 - (BOOL)initializeVariables {
   if (NSClassFromString(@"QTCaptureSession") == nil)
     return NO;
--- a/media/webrtc/trunk/webrtc/modules/video_capture/mac/video_capture_mac.mm
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/mac/video_capture_mac.mm
@@ -25,18 +25,18 @@
 #include <QuickTime/QuickTime.h>
 
 // 10.4 support must be decided runtime. We will just decide which framework to
 // use at compile time "work" classes. One for QTKit, one for QuickTime
 #if __MAC_OS_X_VERSION_MIN_REQUIRED == __MAC_10_4 // QuickTime version
 #include "QuickTime/video_capture_quick_time.h"
 #include "QuickTime/video_capture_quick_time_info.h"
 #else
-#include "QTKit/video_capture_qtkit.h"
-#include "QTKit/video_capture_qtkit_info.h"
+#include "qtkit/video_capture_qtkit.h"
+#include "qtkit/video_capture_qtkit_info.h"
 #endif
 
 namespace webrtc
 {
 namespace videocapturemodule
 {
 
 // static
--- a/media/webrtc/trunk/webrtc/modules/video_capture/video_capture.gypi
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/video_capture.gypi
@@ -11,20 +11,27 @@
     {
       'target_name': 'video_capture_module',
       'type': 'static_library',
       'dependencies': [
         'webrtc_utility',
         '<(webrtc_root)/common_video/common_video.gyp:common_video',
         '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
       ],
+
+      'cflags_mozilla': [
+        '$(NSPR_CFLAGS)',
+      ],
+
       'include_dirs': [
         'include',
         '../interface',
         '<(webrtc_root)/common_video/libyuv/include',
+# added for mozilla for use_system_libjpeg
+        '$(DIST)/include',
       ],
       'sources': [
         'device_info_impl.cc',
         'device_info_impl.h',
         'include/video_capture.h',
         'include/video_capture_defines.h',
         'include/video_capture_factory.h',
         'video_capture_config.h',
@@ -36,17 +43,17 @@
       'conditions': [
         ['include_internal_video_capture==0', {
           'sources': [
             'external/device_info_external.cc',
             'external/video_capture_external.cc',
           ],
         }, {  # include_internal_video_capture == 1
           'conditions': [
-            ['OS=="linux"', {
+            ['include_v4l2_video_capture==1', {
               'include_dirs': [
                 'linux',
               ],
               'sources': [
                 'linux/device_info_linux.cc',
                 'linux/device_info_linux.h',
                 'linux/video_capture_linux.cc',
                 'linux/video_capture_linux.h',
@@ -72,18 +79,22 @@
                 'xcode_settings': {
                   'OTHER_LDFLAGS': [
                     '-framework QTKit',
                   ],
                 },
               },
             }],  # mac
             ['OS=="win"', {
-              'dependencies': [
-                '<(DEPTH)/third_party/winsdk_samples/winsdk_samples.gyp:directshow_baseclasses',
+              'conditions': [
+                ['build_with_mozilla==0', {
+                  'dependencies': [
+                    '<(DEPTH)/third_party/winsdk_samples/winsdk_samples.gyp:directshow_baseclasses',
+                  ],
+                }],
               ],
               'include_dirs': [
                 'windows',
               ],
               'sources': [
                 'windows/device_info_ds.cc',
                 'windows/device_info_ds.h',
                 'windows/device_info_mf.cc',
@@ -92,16 +103,20 @@
                 'windows/help_functions_ds.h',
                 'windows/sink_filter_ds.cc',
                 'windows/sink_filter_ds.h',
                 'windows/video_capture_ds.cc',
                 'windows/video_capture_ds.h',
                 'windows/video_capture_factory_windows.cc',
                 'windows/video_capture_mf.cc',
                 'windows/video_capture_mf.h',
+                'windows/BasePin.cpp',
+                'windows/BaseFilter.cpp',
+                'windows/BaseInputPin.cpp',
+                'windows/MediaType.cpp',
               ],
               'link_settings': {
                 'libraries': [
                   '-lStrmiids.lib',
                 ],
               },
             }],  # win
             ['OS=="android"', {
@@ -135,29 +150,33 @@
           'include_dirs': [
             'include',
           ],
           'sources': [
             'test/video_capture_unittest.cc',
             'test/video_capture_main_mac.mm',
           ],
           'conditions': [
-            ['OS=="mac" or OS=="linux"', {
+            ['OS!="win" and OS!="android"', {
               'cflags': [
                 '-Wno-write-strings',
               ],
               'ldflags': [
                 '-lpthread -lm',
               ],
             }],
+            ['include_v4l2_video_capture==1', {
+              'libraries': [
+                '-lXext',
+                '-lX11',
+              ],
+            }],
             ['OS=="linux"', {
               'libraries': [
                 '-lrt',
-                '-lXext',
-                '-lX11',
               ],
             }],
             ['OS=="mac"', {
               'dependencies': [
                 # Link with a special main for mac so we can use the webcam.
                 '<(webrtc_root)/test/test.gyp:test_support_main_threaded_mac',
               ],
               'xcode_settings': {
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
@@ -11,17 +11,16 @@
 #include "device_info_ds.h"
 
 #include "../video_capture_config.h"
 #include "../video_capture_delay.h"
 #include "help_functions_ds.h"
 #include "ref_count.h"
 #include "trace.h"
 
-#include <Streams.h>
 #include <Dvdmedia.h>
 
 namespace webrtc
 {
 namespace videocapturemodule
 {
 const int32_t NoWindowsCaptureDelays = 1;
 const DelayValues WindowsCaptureDelays[NoWindowsCaptureDelays] = {
@@ -37,16 +36,33 @@ const DelayValues WindowsCaptureDelays[N
     {160,120,109},
     {1280,720,166},
     {960,544,126},
     {800,448,120},
     {800,600,127}
   },
 };
 
+
+  void _FreeMediaType(AM_MEDIA_TYPE& mt)
+{
+    if (mt.cbFormat != 0)
+    {
+        CoTaskMemFree((PVOID)mt.pbFormat);
+        mt.cbFormat = 0;
+        mt.pbFormat = NULL;
+    }
+    if (mt.pUnk != NULL)
+    {
+        // pUnk should not be used.
+        mt.pUnk->Release();
+        mt.pUnk = NULL;
+    }
+}
+
 // static
 DeviceInfoDS* DeviceInfoDS::Create(const int32_t id)
 {
     DeviceInfoDS* dsInfo = new DeviceInfoDS(id);
     if (!dsInfo || dsInfo->Init() != 0)
     {
         delete dsInfo;
         dsInfo = NULL;
@@ -565,17 +581,17 @@ int32_t DeviceInfoDS::CreateCapabilityMa
                 capability->interlaced = h->dwInterlaceFlags
                                         & (AMINTERLACE_IsInterlaced
                                            | AMINTERLACE_DisplayModeBobOnly);
                 avgTimePerFrame = h->AvgTimePerFrame;
             }
 
             if (hrVC == S_OK)
             {
-                LONGLONG *frameDurationList;
+                LONGLONG *frameDurationList = NULL;
                 LONGLONG maxFPS; 
                 long listSize;
                 SIZE size;
                 size.cx = capability->width;
                 size.cy = capability->height;
 
                 // GetMaxAvailableFrameRate doesn't return max frame rate always
                 // eg: Logitech Notebook. This may be due to a bug in that API
@@ -584,17 +600,19 @@ int32_t DeviceInfoDS::CreateCapabilityMa
                 // the max fps.
                 hrVC = videoControlConfig->GetFrameRateList(outputCapturePin,
                                                             tmp, size,
                                                             &listSize,
                                                             &frameDurationList);
 
                 // On some odd cameras, you may get a 0 for duration.
                 // GetMaxOfFrameArray returns the lowest duration (highest FPS)
-                if (hrVC == S_OK && listSize > 0 &&
+                // Initialize and check the returned list for null since
+                // some broken drivers don't modify it.
+                if (hrVC == S_OK && listSize > 0 && frameDurationList &&
                     0 != (maxFPS = GetMaxOfFrameArray(frameDurationList, 
                                                       listSize)))
                 {
                     capability->maxFPS = static_cast<int> (10000000
                                                            / maxFPS);
                     capability->supportFrameRateControl = true;
                 }
                 else // use existing method
@@ -679,17 +697,17 @@ int32_t DeviceInfoDS::CreateCapabilityMa
                                                       capability->width,
                                                       capability->height);
             _captureCapabilities.Insert(index++, capability);
             WEBRTC_TRACE( webrtc::kTraceInfo, webrtc::kTraceVideoCapture, _id,
                          "Camera capability, width:%d height:%d type:%d fps:%d",
                          capability->width, capability->height,
                          capability->rawType, capability->maxFPS);
         }
-        DeleteMediaType(pmt);
+        _FreeMediaType(*pmt);
         pmt = NULL;
     }
     RELEASE_AND_CLEAR(streamConfig);
     RELEASE_AND_CLEAR(videoControlConfig);
     RELEASE_AND_CLEAR(outputCapturePin);
     RELEASE_AND_CLEAR(captureDevice); // Release the capture device
 
     // Store the new used device name
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
@@ -16,49 +16,52 @@
 #include <Dvdmedia.h> // VIDEOINFOHEADER2
 #include <initguid.h>
 
 #define DELETE_RESET(p) { delete (p) ; (p) = NULL ;}
 
 DEFINE_GUID(CLSID_SINKFILTER, 0x88cdbbdc, 0xa73b, 0x4afa, 0xac, 0xbf, 0x15, 0xd5,
             0xe2, 0xce, 0x12, 0xc3);
 
+using namespace mozilla::media;
+using namespace mozilla;
+
 namespace webrtc
 {
 namespace videocapturemodule
 {
 
 typedef struct tagTHREADNAME_INFO
 {
    DWORD dwType;        // must be 0x1000
    LPCSTR szName;       // pointer to name (in user addr space)
    DWORD dwThreadID;    // thread ID (-1=caller thread)
    DWORD dwFlags;       // reserved for future use, must be zero
 } THREADNAME_INFO;
 
 CaptureInputPin::CaptureInputPin (int32_t moduleId,
                             IN TCHAR * szName,
                             IN CaptureSinkFilter* pFilter,
-                            IN CCritSec * pLock,
+                            IN CriticalSection * pLock,
                             OUT HRESULT * pHr,
                             IN LPCWSTR pszName)
-    : CBaseInputPin (szName, pFilter, pLock, pHr, pszName),
+    : BaseInputPin (szName, pFilter, pLock, pHr, pszName),
       _requestedCapability(),
       _resultingCapability()
 {
     _moduleId=moduleId;
     _threadHandle = NULL;
 }
 
 CaptureInputPin::~CaptureInputPin()
 {
 }
 
 HRESULT
-CaptureInputPin::GetMediaType (IN int iPosition, OUT CMediaType * pmt)
+CaptureInputPin::GetMediaType (IN int iPosition, OUT MediaType * pmt)
 {
     // reset the thread handle
     _threadHandle = NULL;
 
     if(iPosition < 0)
     return E_INVALIDARG;
 
     VIDEOINFOHEADER* pvi = (VIDEOINFOHEADER*) pmt->AllocFormatBuffer(
@@ -156,17 +159,17 @@ CaptureInputPin::GetMediaType (IN int iP
     WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCapture, _moduleId,
              "GetMediaType position %d, width %d, height %d, biCompression 0x%x",
              iPosition, _requestedCapability.width,
              _requestedCapability.height,pvi->bmiHeader.biCompression);
     return NOERROR;
 }
 
 HRESULT
-CaptureInputPin::CheckMediaType ( IN const CMediaType * pMediaType)
+CaptureInputPin::CheckMediaType ( IN const MediaType * pMediaType)
 {
     // reset the thread handle
     _threadHandle = NULL;
 
     const GUID *type = pMediaType->Type();
     if (*type != MEDIATYPE_Video)
     return E_INVALIDARG;
 
@@ -314,18 +317,18 @@ CaptureInputPin::CheckMediaType ( IN con
     return E_INVALIDARG;
 }
 
 HRESULT
 CaptureInputPin::Receive ( IN IMediaSample * pIMediaSample )
 {
     HRESULT hr = S_OK;
 
-    ASSERT (m_pFilter);
-    ASSERT (pIMediaSample);
+    assert (mFilter);
+    assert (pIMediaSample);
 
     // get the thread handle of the delivering thread inc its priority
     if( _threadHandle == NULL)
     {
         HANDLE handle= GetCurrentThread();
         SetThreadPriority(handle, THREAD_PRIORITY_HIGHEST);
         _threadHandle = handle;
         // See http://msdn.microsoft.com/en-us/library/xcb2z8hs(VS.71).aspx for details on the code
@@ -343,37 +346,37 @@ CaptureInputPin::Receive ( IN IMediaSamp
                             (DWORD_PTR*)&info );
         }
         __except (EXCEPTION_CONTINUE_EXECUTION)
         {
         }
 
     }
 
-    reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->LockReceive();
-    hr = CBaseInputPin::Receive (pIMediaSample);
+    reinterpret_cast <CaptureSinkFilter *>(mFilter)->LockReceive();
+    hr = BaseInputPin::Receive (pIMediaSample);
 
     if (SUCCEEDED (hr))
     {
         const int32_t length = pIMediaSample->GetActualDataLength();
 
         unsigned char* pBuffer = NULL;
         if(S_OK != pIMediaSample->GetPointer(&pBuffer))
         {
-            reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->UnlockReceive();
+            reinterpret_cast <CaptureSinkFilter *>(mFilter)->UnlockReceive();
             return S_FALSE;
         }
 
         // NOTE: filter unlocked within Send call
-        reinterpret_cast <CaptureSinkFilter *> (m_pFilter)->ProcessCapturedFrame(
+        reinterpret_cast <CaptureSinkFilter *> (mFilter)->ProcessCapturedFrame(
                                         pBuffer,length,_resultingCapability);
     }
     else
     {
-        reinterpret_cast <CaptureSinkFilter *>(m_pFilter)->UnlockReceive();
+        reinterpret_cast <CaptureSinkFilter *>(mFilter)->UnlockReceive();
     }
 
     return hr;
 }
 
 // called under LockReceive
 HRESULT CaptureInputPin::SetMatchingMediaType(
                                     const VideoCaptureCapability& capability)
@@ -384,23 +387,25 @@ HRESULT CaptureInputPin::SetMatchingMedi
     return S_OK;
 }
 //  ----------------------------------------------------------------------------
 CaptureSinkFilter::CaptureSinkFilter (IN TCHAR * tszName,
                               IN LPUNKNOWN punk,
                               OUT HRESULT * phr,
                               VideoCaptureExternal& captureObserver,
                               int32_t moduleId)
-    : CBaseFilter(tszName,punk,& m_crtFilter,CLSID_SINKFILTER),
+    : BaseFilter(tszName, CLSID_SINKFILTER),
+      m_crtFilter("CaptureSinkFilter::m_crtFilter"),
+      m_crtRecv("CaptureSinkFilter::m_crtRecv"),
       m_pInput(NULL),
       _captureObserver(captureObserver),
       _moduleId(moduleId)
 {
     (* phr) = S_OK;
-    m_pInput = new CaptureInputPin(moduleId,NAME ("VideoCaptureInputPin"),
+    m_pInput = new CaptureInputPin(moduleId, L"VideoCaptureInputPin",
                                    this,
                                    & m_crtFilter,
                                    phr, L"VideoCapture");
     if (m_pInput == NULL || FAILED (* phr))
     {
         (* phr) = FAILED (* phr) ? (* phr) : E_OUTOFMEMORY;
         goto cleanup;
     }
@@ -413,87 +418,87 @@ CaptureSinkFilter::~CaptureSinkFilter()
     delete m_pInput;
 }
 
 int CaptureSinkFilter::GetPinCount()
 {
     return 1;
 }
 
-CBasePin *
+BasePin *
 CaptureSinkFilter::GetPin(IN int Index)
 {
-    CBasePin * pPin;
+    BasePin * pPin;
     LockFilter ();
     if (Index == 0)
     {
         pPin = m_pInput;
     }
     else
     {
         pPin = NULL;
     }
     UnlockFilter ();
     return pPin;
 }
 
 STDMETHODIMP CaptureSinkFilter::Pause()
 {
     LockFilter();
-    if (m_State == State_Stopped)
+    if (mState == State_Stopped)
     {
         //  change the state, THEN activate the input pin
-        m_State = State_Paused;
+        mState = State_Paused;
         if (m_pInput && m_pInput->IsConnected())
         {
             m_pInput->Active();
         }
         if (m_pInput && !m_pInput->IsConnected())
         {
-            m_State = State_Running;
+            mState = State_Running;
         }
     }
-    else if (m_State == State_Running)
+    else if (mState == State_Running)
     {
-        m_State = State_Paused;
+        mState = State_Paused;
     }
     UnlockFilter();
     return S_OK;
 }
 
 STDMETHODIMP CaptureSinkFilter::Stop()
 {
     LockReceive();
     LockFilter();
 
     //  set the state
-    m_State = State_Stopped;
+    mState = State_Stopped;
 
     //  inactivate the pins
     if (m_pInput)
         m_pInput->Inactive();
 
     UnlockFilter();
     UnlockReceive();
     return S_OK;
 }
 
 void CaptureSinkFilter::SetFilterGraph(IGraphBuilder* graph)
 {
     LockFilter();
-    m_pGraph = graph;
+    mGraph = graph;
     UnlockFilter();
 }
 
 void CaptureSinkFilter::ProcessCapturedFrame(unsigned char* pBuffer,
                                          int32_t length,
                                          const VideoCaptureCapability& frameInfo)
 {
     //  we have the receiver lock
-    if (m_State == State_Running)
+    if (mState == State_Running)
     {
         _captureObserver.IncomingFrame(pBuffer, length, frameInfo);
 
         // trying to hold it since it's only a memcpy
         // IMPROVEMENT if this work move critsect
         UnlockReceive();
         return;
     }
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
@@ -6,95 +6,117 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
 #define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
 
-#include <Streams.h> // Include base DS filter header files
-
 #include "video_capture_defines.h"
+#include "BaseInputPin.h"
+#include "BaseFilter.h"
+#include "MediaType.h"
 
 namespace webrtc
 {
 namespace videocapturemodule
 {
 //forward declaration
 
 class CaptureSinkFilter;
 /**
  *	input pin for camera input
  *
  */
-class CaptureInputPin: public CBaseInputPin
+class CaptureInputPin: public mozilla::media::BaseInputPin
 {
 public:
     int32_t _moduleId;
 
     VideoCaptureCapability _requestedCapability;
     VideoCaptureCapability _resultingCapability;
     HANDLE _threadHandle;
 
     CaptureInputPin(int32_t moduleId,
                     IN TCHAR* szName,
                     IN CaptureSinkFilter* pFilter,
-                    IN CCritSec * pLock,
+                    IN mozilla::CriticalSection * pLock,
                     OUT HRESULT * pHr,
                     IN LPCWSTR pszName);
     virtual ~CaptureInputPin();
 
-    HRESULT GetMediaType (IN int iPos, OUT CMediaType * pmt);
-    HRESULT CheckMediaType (IN const CMediaType * pmt);
+    HRESULT GetMediaType (IN int iPos, OUT mozilla::media::MediaType * pmt);
+    HRESULT CheckMediaType (IN const mozilla::media::MediaType * pmt);
     STDMETHODIMP Receive (IN IMediaSample *);
     HRESULT SetMatchingMediaType(const VideoCaptureCapability& capability);
 };
 
-class CaptureSinkFilter: public CBaseFilter
+class CaptureSinkFilter: public mozilla::media::BaseFilter
 {
 
 public:
     CaptureSinkFilter(IN TCHAR * tszName,
                       IN LPUNKNOWN punk,
                       OUT HRESULT * phr,
                       VideoCaptureExternal& captureObserver,
                       int32_t moduleId);
     virtual ~CaptureSinkFilter();
 
     //  --------------------------------------------------------------------
     //  class methods
 
     void ProcessCapturedFrame(unsigned char* pBuffer, int32_t length,
                               const VideoCaptureCapability& frameInfo);
     //  explicit receiver lock aquisition and release
-    void LockReceive()  { m_crtRecv.Lock();}
-    void UnlockReceive() {m_crtRecv.Unlock();}
+    void LockReceive()  { m_crtRecv.Enter();}
+    void UnlockReceive() {m_crtRecv.Leave();}
+
     //  explicit filter lock aquisition and release
-    void LockFilter() {m_crtFilter.Lock();}
-    void UnlockFilter() { m_crtFilter.Unlock(); }
+    void LockFilter() {m_crtFilter.Enter();}
+    void UnlockFilter() { m_crtFilter.Leave(); }
     void SetFilterGraph(IGraphBuilder* graph); // Used if EVR
 
     //  --------------------------------------------------------------------
     //  COM interfaces
-DECLARE_IUNKNOWN    ;
+    STDMETHODIMP QueryInterface(REFIID aIId, void **aInterface)
+    {
+      return mozilla::media::BaseFilter::QueryInterface(aIId, aInterface);
+    }
+    STDMETHODIMP_(ULONG) AddRef()
+    {
+      return ::InterlockedIncrement(&mRefCnt);
+    }
+
+    STDMETHODIMP_(ULONG) Release()
+    {
+      unsigned long newRefCnt = ::InterlockedDecrement(&mRefCnt);
+
+      if (!newRefCnt) {
+        delete this;
+      }
+
+      return newRefCnt;
+    }
+
     STDMETHODIMP SetMatchingMediaType(const VideoCaptureCapability& capability);
 
     //  --------------------------------------------------------------------
     //  CBaseFilter methods
     int GetPinCount ();
-    CBasePin * GetPin ( IN int Index);
+    mozilla::media::BasePin * GetPin ( IN int Index);
     STDMETHODIMP Pause ();
     STDMETHODIMP Stop ();
     STDMETHODIMP GetClassID ( OUT CLSID * pCLSID);
     //  --------------------------------------------------------------------
     //  class factory calls this
-    static CUnknown * CreateInstance (IN LPUNKNOWN punk, OUT HRESULT * phr);
+    static IUnknown * CreateInstance (IN LPUNKNOWN punk, OUT HRESULT * phr);
 private:
-    CCritSec m_crtFilter; //  filter lock
-    CCritSec m_crtRecv;  //  receiver lock; always acquire before filter lock
+    mozilla::CriticalSection m_crtFilter; //  filter lock
+    mozilla::CriticalSection m_crtRecv;  //  receiver lock; always acquire before filter lock
     CaptureInputPin * m_pInput;
     VideoCaptureExternal& _captureObserver;
     int32_t _moduleId;
+    unsigned long mRefCnt;
 };
 } // namespace videocapturemodule
 } // namespace webrtc
 #endif // WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
@@ -25,16 +25,25 @@
         '<(webrtc_root)/modules/video_coding/codecs/interface',
         '<(webrtc_root)/modules/interface',
       ],
       'conditions': [
         ['build_libvpx==1', {
           'dependencies': [
             '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
           ],
+        },{
+          'include_dirs': [
+            '$(DIST)/include',
+          ],
+          'link_settings': {
+            'libraries': [
+              '$(LIBVPX_OBJ)/libvpx.a',
+            ],
+          },
         }],
       ],
       'direct_dependent_settings': {
         'include_dirs': [
           'include',
           '<(webrtc_root)/common_video/interface',
           '<(webrtc_root)/modules/video_coding/codecs/interface',
         ],
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/timestamp_extrapolator.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/timestamp_extrapolator.cc
@@ -51,19 +51,19 @@ void
 VCMTimestampExtrapolator::Reset()
 {
     WriteLockScoped wl(*_rwLock);
     _startMs = _clock->TimeInMilliseconds();
     _prevMs = _startMs;
     _firstTimestamp = 0;
     _w[0] = 90.0;
     _w[1] = 0;
-    _P[0][0] = 1;
-    _P[1][1] = _P11;
-    _P[0][1] = _P[1][0] = 0;
+    _pp[0][0] = 1;
+    _pp[1][1] = _P11;
+    _pp[0][1] = _pp[1][0] = 0;
     _firstAfterReset = true;
     _prevUnwrappedTimestamp = -1;
     _prevWrapTimestamp = -1;
     _wrapArounds = 0;
     _packetCount = 0;
     _detectorAccumulatorPos = 0;
     _detectorAccumulatorNeg = 0;
 }
@@ -116,37 +116,37 @@ VCMTimestampExtrapolator::Update(int64_t
         (static_cast<double>(unwrapped_ts90khz) - _firstTimestamp) -
         static_cast<double>(tMs) * _w[0] - _w[1];
     if (DelayChangeDetection(residual, trace) &&
         _packetCount >= _startUpFilterDelayInPackets)
     {
         // A sudden change of average network delay has been detected.
         // Force the filter to adjust its offset parameter by changing
         // the offset uncertainty. Don't do this during startup.
-        _P[1][1] = _P11;
+        _pp[1][1] = _P11;
     }
     //T = [t(k) 1]';
     //that = T'*w;
     //K = P*T/(lambda + T'*P*T);
     double K[2];
-    K[0] = _P[0][0] * tMs + _P[0][1];
-    K[1] = _P[1][0] * tMs + _P[1][1];
+    K[0] = _pp[0][0] * tMs + _pp[0][1];
+    K[1] = _pp[1][0] * tMs + _pp[1][1];
     double TPT = _lambda + tMs * K[0] + K[1];
     K[0] /= TPT;
     K[1] /= TPT;
     //w = w + K*(ts(k) - that);
     _w[0] = _w[0] + K[0] * residual;
     _w[1] = _w[1] + K[1] * residual;
     //P = 1/lambda*(P - K*T'*P);
-    double p00 = 1 / _lambda * (_P[0][0] - (K[0] * tMs * _P[0][0] + K[0] * _P[1][0]));
-    double p01 = 1 / _lambda * (_P[0][1] - (K[0] * tMs * _P[0][1] + K[0] * _P[1][1]));
-    _P[1][0] = 1 / _lambda * (_P[1][0] - (K[1] * tMs * _P[0][0] + K[1] * _P[1][0]));
-    _P[1][1] = 1 / _lambda * (_P[1][1] - (K[1] * tMs * _P[0][1] + K[1] * _P[1][1]));
-    _P[0][0] = p00;
-    _P[0][1] = p01;
+    double p00 = 1 / _lambda * (_pp[0][0] - (K[0] * tMs * _pp[0][0] + K[0] * _pp[1][0]));
+    double p01 = 1 / _lambda * (_pp[0][1] - (K[0] * tMs * _pp[0][1] + K[0] * _pp[1][1]));
+    _pp[1][0] = 1 / _lambda * (_pp[1][0] - (K[1] * tMs * _pp[0][0] + K[1] * _pp[1][0]));
+    _pp[1][1] = 1 / _lambda * (_pp[1][1] - (K[1] * tMs * _pp[0][1] + K[1] * _pp[1][1]));
+    _pp[0][0] = p00;
+    _pp[0][1] = p01;
     _prevUnwrappedTimestamp = unwrapped_ts90khz;
     if (_packetCount < _startUpFilterDelayInPackets)
     {
         _packetCount++;
     }
     if (trace)
     {
         WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId, _id),  "w[0]=%f w[1]=%f ts=%u tMs=%u", _w[0], _w[1], ts90khz, tMs);
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/timestamp_extrapolator.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/timestamp_extrapolator.h
@@ -33,17 +33,17 @@ public:
 private:
     void CheckForWrapArounds(uint32_t ts90khz);
     bool DelayChangeDetection(double error, bool trace = true);
     RWLockWrapper*        _rwLock;
     int32_t         _vcmId;
     int32_t         _id;
     Clock*                _clock;
     double                _w[2];
-    double                _P[2][2];
+    double                _pp[2][2];
     int64_t         _startMs;
     int64_t         _prevMs;
     uint32_t        _firstTimestamp;
     int32_t         _wrapArounds;
     int64_t         _prevUnwrappedTimestamp;
     int64_t         _prevWrapTimestamp;
     const double          _lambda;
     bool                  _firstAfterReset;
--- a/media/webrtc/trunk/webrtc/modules/video_processing/main/source/video_processing.gypi
+++ b/media/webrtc/trunk/webrtc/modules/video_processing/main/source/video_processing.gypi
@@ -68,16 +68,17 @@
           ],
           'include_dirs': [
             '../interface',
             '../../../interface',
           ],
           'conditions': [
             ['os_posix==1 and OS!="mac"', {
               'cflags': [ '-msse2', ],
+              'cflags_mozilla': [ '-msse2', ],
             }],
             ['OS=="mac"', {
               'xcode_settings': {
                 'OTHER_CFLAGS': [ '-msse2', ],
               },
             }],
           ],
         },
--- a/media/webrtc/trunk/webrtc/system_wrappers/interface/asm_defines.h
+++ b/media/webrtc/trunk/webrtc/system_wrappers/interface/asm_defines.h
@@ -6,17 +6,17 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_ASM_DEFINES_H_
 #define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_ASM_DEFINES_H_
 
-#if defined(__linux__) && defined(__ELF__)
+#if (defined(__linux__) || defined(__FreeBSD__)) && defined(__ELF__)
 .section .note.GNU-stack,"",%progbits
 #endif
 
 // Define the macros used in ARM assembly code, so that for Mac or iOS builds
 // we add leading underscores for the function names.
 #ifdef __APPLE__
 .macro GLOBAL_FUNCTION name
 .global _\name
--- a/media/webrtc/trunk/webrtc/system_wrappers/interface/tick_util.h
+++ b/media/webrtc/trunk/webrtc/system_wrappers/interface/tick_util.h
@@ -189,17 +189,17 @@ inline int64_t TickTime::QueryOsForTicks
     // 0x0fffffff ~3.1 days, the code will not take that long to execute
     // so it must have been a wrap around.
     if (old > 0xf0000000 && now < 0x0fffffff) {
       num_wrap_time_get_time++;
     }
   }
   result.ticks_ = now + (num_wrap_time_get_time << 32);
 #endif
-#elif defined(WEBRTC_LINUX)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
   struct timespec ts;
   // TODO(wu): Remove CLOCK_REALTIME implementation.
 #ifdef WEBRTC_CLOCK_TYPE_REALTIME
   clock_gettime(CLOCK_REALTIME, &ts);
 #else
   clock_gettime(CLOCK_MONOTONIC, &ts);
 #endif
   result.ticks_ = 1000000000LL * static_cast<int64_t>(ts.tv_sec) +
@@ -236,34 +236,34 @@ inline int64_t TickTime::MillisecondTime
 #if _WIN32
 #ifdef USE_QUERY_PERFORMANCE_COUNTER
   LARGE_INTEGER qpfreq;
   QueryPerformanceFrequency(&qpfreq);
   return (ticks * 1000) / qpfreq.QuadPart;
 #else
   return ticks;
 #endif
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
   return ticks / 1000000LL;
 #else
   return ticks / 1000LL;
 #endif
 }
 
 inline int64_t TickTime::MicrosecondTimestamp() {
   int64_t ticks = TickTime::Now().Ticks();
 #if _WIN32
 #ifdef USE_QUERY_PERFORMANCE_COUNTER
   LARGE_INTEGER qpfreq;
   QueryPerformanceFrequency(&qpfreq);
   return (ticks * 1000) / (qpfreq.QuadPart / 1000);
 #else
   return ticks * 1000LL;
 #endif
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
   return ticks / 1000LL;
 #else
   return ticks;
 #endif
 }
 
 inline int64_t TickTime::Ticks() const {
   return ticks_;
@@ -273,33 +273,33 @@ inline int64_t TickTime::MillisecondsToT
 #if _WIN32
 #ifdef USE_QUERY_PERFORMANCE_COUNTER
   LARGE_INTEGER qpfreq;
   QueryPerformanceFrequency(&qpfreq);
   return (qpfreq.QuadPart * ms) / 1000;
 #else
   return ms;
 #endif
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
   return ms * 1000000LL;
 #else
   return ms * 1000LL;
 #endif
 }
 
 inline int64_t TickTime::TicksToMilliseconds(const int64_t ticks) {
 #if _WIN32
 #ifdef USE_QUERY_PERFORMANCE_COUNTER
   LARGE_INTEGER qpfreq;
   QueryPerformanceFrequency(&qpfreq);
   return (ticks * 1000) / qpfreq.QuadPart;
 #else
   return ticks;
 #endif
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
   return ticks / 1000000LL;
 #else
   return ticks / 1000LL;
 #endif
 }
 
 inline TickTime& TickTime::operator+=(const int64_t& ticks) {
   ticks_ += ticks;
@@ -318,17 +318,17 @@ inline int64_t TickInterval::Millisecond
 #ifdef USE_QUERY_PERFORMANCE_COUNTER
   LARGE_INTEGER qpfreq;
   QueryPerformanceFrequency(&qpfreq);
   return (interval_ * 1000) / qpfreq.QuadPart;
 #else
   // interval_ is in ms
   return interval_;
 #endif
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
   // interval_ is in ns
   return interval_ / 1000000;
 #else
   // interval_ is usecs
   return interval_ / 1000;
 #endif
 }
 
@@ -337,17 +337,17 @@ inline int64_t TickInterval::Microsecond
 #ifdef USE_QUERY_PERFORMANCE_COUNTER
   LARGE_INTEGER qpfreq;
   QueryPerformanceFrequency(&qpfreq);
   return (interval_ * 1000000) / qpfreq.QuadPart;
 #else
   // interval_ is in ms
   return interval_ * 1000LL;
 #endif
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
   // interval_ is in ns
   return interval_ / 1000;
 #else
   // interval_ is usecs
   return interval_;
 #endif
 }
 
--- a/media/webrtc/trunk/webrtc/system_wrappers/source/atomic32_posix.cc
+++ b/media/webrtc/trunk/webrtc/system_wrappers/source/atomic32_posix.cc
@@ -7,17 +7,16 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/system_wrappers/interface/atomic32.h"
 
 #include <assert.h>
 #include <inttypes.h>
-#include <malloc.h>
 
 #include "webrtc/common_types.h"
 
 namespace webrtc {
 
 Atomic32::Atomic32(int32_t initial_value)
     : value_(initial_value) {
   assert(Is32bitAligned());
--- a/media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable.cc
+++ b/media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable.cc
@@ -9,33 +9,33 @@
  */
 
 #include "webrtc/system_wrappers/interface/condition_variable_wrapper.h"
 
 #if defined(_WIN32)
 #include <windows.h>
 #include "webrtc/system_wrappers/source/condition_variable_event_win.h"
 #include "webrtc/system_wrappers/source/condition_variable_native_win.h"
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 #include <pthread.h>
 #include "webrtc/system_wrappers/source/condition_variable_posix.h"
 #endif
 
 namespace webrtc {
 
 ConditionVariableWrapper* ConditionVariableWrapper::CreateConditionVariable() {
 #if defined(_WIN32)
   // Try to create native condition variable implementation.
   ConditionVariableWrapper* ret_val = ConditionVariableNativeWin::Create();
   if (!ret_val) {
     // Native condition variable implementation does not exist. Create generic
     // condition variable based on events.
     ret_val = new ConditionVariableEventWin();
   }
   return ret_val;
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
   return ConditionVariablePosix::Create();
 #else
   return NULL;
 #endif
 }
 
 } // namespace webrtc
--- a/media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable_posix.cc
+++ b/media/webrtc/trunk/webrtc/system_wrappers/source/condition_variable_posix.cc
@@ -74,17 +74,17 @@ void ConditionVariablePosix::SleepCS(Cri
       &crit_sect);
   pthread_cond_wait(&cond_, &cs->mutex_);
 }
 
 bool ConditionVariablePosix::SleepCS(CriticalSectionWrapper& crit_sect,
                                      unsigned long max_time_inMS) {
   const unsigned long INFINITE =  0xFFFFFFFF;
   const int MILLISECONDS_PER_SECOND = 1000;
-#ifndef WEBRTC_LINUX
+#if !defined(WEBRTC_LINUX) && !defined(WEBRTC_BSD)
   const int MICROSECONDS_PER_MILLISECOND = 1000;
 #endif
   const int NANOSECONDS_PER_SECOND = 1000000000;
   const int NANOSECONDS_PER_MILLISECOND  = 1000000;
 
   CriticalSectionPosix* cs = reinterpret_cast<CriticalSectionPosix*>(
       &crit_sect);
 
--- a/media/webrtc/trunk/webrtc/system_wrappers/source/cpu_info.cc
+++ b/media/webrtc/trunk/webrtc/system_wrappers/source/cpu_info.cc
@@ -7,20 +7,22 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/system_wrappers/interface/cpu_info.h"
 
 #if defined(_WIN32)
 #include <Windows.h>
-#elif defined(WEBRTC_MAC)
+#elif defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
+#include <sys/types.h>
 #include <sys/sysctl.h>
-#include <sys/types.h>
-#else // defined(WEBRTC_LINUX) or defined(WEBRTC_ANDROID)
+#elif defined(WEBRTC_LINUX) or defined(WEBRTC_ANDROID)
+#include <unistd.h>
+#else // defined(_SC_NPROCESSORS_ONLN)
 #include <unistd.h>
 #endif
 
 #include "webrtc/system_wrappers/interface/trace.h"
 
 namespace webrtc {
 
 uint32_t CpuInfo::number_of_cores_ = 0;
@@ -29,34 +31,43 @@ uint32_t CpuInfo::DetectNumberOfCores() 
   if (!number_of_cores_) {
 #if defined(_WIN32)
     SYSTEM_INFO si;
     GetSystemInfo(&si);
     number_of_cores_ = static_cast<uint32_t>(si.dwNumberOfProcessors);
     WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
                  "Available number of cores:%d", number_of_cores_);
 
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_ANDROID)
+#elif defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID) && !defined(WEBRTC_GONK)
     number_of_cores_ = static_cast<uint32_t>(sysconf(_SC_NPROCESSORS_ONLN));
     WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
                  "Available number of cores:%d", number_of_cores_);
 
-#elif defined(WEBRTC_MAC)
-    int name[] = {CTL_HW, HW_AVAILCPU};
+#elif defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
+    int name[] = {
+      CTL_HW,
+#ifdef HW_AVAILCPU
+      HW_AVAILCPU,
+#else
+      HW_NCPU,
+#endif
+    };
     int ncpu;
     size_t size = sizeof(ncpu);
     if (0 == sysctl(name, 2, &ncpu, &size, NULL, 0)) {
       number_of_cores_ = static_cast<uint32_t>(ncpu);
       WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
                    "Available number of cores:%d", number_of_cores_);
     } else {
       WEBRTC_TRACE(kTraceError, kTraceUtility, -1,
                    "Failed to get number of cores");
       number_of_cores_ = 1;
     }
+#elif defined(_SC_NPROCESSORS_ONLN)
+    number_of_cores_ = sysconf(_SC_NPROCESSORS_ONLN);
 #else
     WEBRTC_TRACE(kTraceWarning, kTraceUtility, -1,
                  "No function to get number of cores");
     number_of_cores_ = 1;
 #endif
   }
   return number_of_cores_;
 }
--- a/media/webrtc/trunk/webrtc/system_wrappers/source/rw_lock.cc
+++ b/media/webrtc/trunk/webrtc/system_wrappers/source/rw_lock.cc
@@ -10,28 +10,33 @@
 
 #include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
 
 #include <assert.h>
 
 #if defined(_WIN32)
 #include "webrtc/system_wrappers/source/rw_lock_generic.h"
 #include "webrtc/system_wrappers/source/rw_lock_win.h"
+#elif defined(ANDROID)
+#include "webrtc/system_wrappers/source/rw_lock_generic.h"
 #else
 #include "webrtc/system_wrappers/source/rw_lock_posix.h"
 #endif
 
 namespace webrtc {
 
 RWLockWrapper* RWLockWrapper::CreateRWLock() {
 #ifdef _WIN32
   // Native implementation is faster, so use that if available.
   RWLockWrapper* lock = RWLockWin::Create();
   if (lock) {
     return lock;
   }
   return new RWLockGeneric();
+#elif defined(ANDROID)
+  // Android 2.2 and before do not have POSIX pthread rwlocks.
+  return new RWLockGeneric();
 #else
   return RWLockPosix::Create();
 #endif
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/system_wrappers/source/system_wrappers.gyp
+++ b/media/webrtc/trunk/webrtc/system_wrappers/source/system_wrappers.gyp
@@ -127,26 +127,31 @@
             'trace_impl.cc',
             'trace_impl.h',
             'trace_posix.cc',
             'trace_posix.h',
             'trace_win.cc',
             'trace_win.h',
           ],
         }],
-        ['OS=="android"', {
+        ['OS=="android" or moz_widget_toolkit_gonk==1', {
           'defines': [
             'WEBRTC_THREAD_RR',
             # TODO(leozwang): Investigate CLOCK_REALTIME and CLOCK_MONOTONIC
             # support on Android. Keep WEBRTC_CLOCK_TYPE_REALTIME for now,
             # remove it after I verify that CLOCK_MONOTONIC is fully functional
             # with condition and event functions in system_wrappers.
             'WEBRTC_CLOCK_TYPE_REALTIME',
            ],
           'dependencies': [ 'cpu_features_android', ],
+          'sources!': [
+            # Android doesn't have these in <=2.2
+            'rw_lock_posix.cc',
+            'rw_lock_posix.h',
+          ],
         }],
         ['OS=="linux"', {
           'defines': [
             'WEBRTC_THREAD_RR',
             # TODO(andrew): can we select this automatically?
             # Define this if the Linux system does not support CLOCK_MONOTONIC.
             #'WEBRTC_CLOCK_TYPE_REALTIME',
           ],
@@ -191,17 +196,17 @@
       # Disable warnings to enable Win64 build, issue 1323.
       'msvs_disabled_warnings': [
         4267,  # size_t to int truncation.
         4334,  # Ignore warning on shift operator promotion.
       ],
     },
   ], # targets
   'conditions': [
-    ['OS=="android"', {
+    ['OS=="android" or moz_widget_toolkit_gonk==1', {
       'targets': [
         {
           'variables': {
             # Treat this as third-party code.
             'chromium_code': 0,
           },
           'target_name': 'cpu_features_android',
           'type': 'static_library',
--- a/media/webrtc/trunk/webrtc/system_wrappers/source/thread_posix.cc
+++ b/media/webrtc/trunk/webrtc/system_wrappers/source/thread_posix.cc
@@ -53,16 +53,27 @@
 #ifdef WEBRTC_LINUX
 #include <linux/unistd.h>
 #include <sched.h>
 #include <sys/prctl.h>
 #include <sys/syscall.h>
 #include <sys/types.h>
 #endif
 
+#if defined(__NetBSD__)
+#include <lwp.h>
+#elif defined(__FreeBSD__)
+#include <sys/param.h>
+#include <sys/thr.h>
+#endif
+
+#if defined(WEBRTC_BSD) && !defined(__NetBSD__)
+#include <pthread_np.h>
+#endif
+
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/event_wrapper.h"
 #include "webrtc/system_wrappers/interface/sleep.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 
 namespace webrtc {
 
 int ConvertToSystemPriority(ThreadPriority priority, int min_prio,
@@ -118,41 +129,55 @@ ThreadPosix::ThreadPosix(ThreadRunFuncti
       obj_(obj),
       crit_state_(CriticalSectionWrapper::CreateCriticalSection()),
       alive_(false),
       dead_(true),
       prio_(prio),
       event_(EventWrapper::Create()),
       name_(),
       set_thread_name_(false),
-#if (defined(WEBRTC_LINUX) || defined(WEBRTC_ANDROID))
+#if (defined(WEBRTC_LINUX) || defined(WEBRTC_ANDROID) || defined(WEBRTC_GONK))
       pid_(-1),
 #endif
       attr_(),
       thread_(0) {
   if (thread_name != NULL) {
     set_thread_name_ = true;
     strncpy(name_, thread_name, kThreadMaxNameLength);
     name_[kThreadMaxNameLength - 1] = '\0';
   }
 }
 
 uint32_t ThreadWrapper::GetThreadId() {
-#if defined(WEBRTC_ANDROID) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_LINUX) || defined(WEBRTC_GONK)
   return static_cast<uint32_t>(syscall(__NR_gettid));
 #elif defined(WEBRTC_MAC) || defined(WEBRTC_IOS)
   return pthread_mach_thread_np(pthread_self());
+#elif defined(__NetBSD__)
+  return _lwp_self();
+#elif defined(__DragonFly__)
+  return lwp_gettid();
+#elif defined(__OpenBSD__)
+  return reinterpret_cast<uintptr_t> (pthread_self());
+#elif defined(__FreeBSD__)
+#  if __FreeBSD_version > 900030
+    return pthread_getthreadid_np();
+#  else
+    long lwpid;
+    thr_self(&lwpid);
+    return lwpid;
+#  endif
 #else
   return reinterpret_cast<uint32_t>(pthread_self());
 #endif
 }
 
 int ThreadPosix::Construct() {
   int result = 0;
-#if !defined(WEBRTC_ANDROID)
+#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_GONK)
   // Enable immediate cancellation if requested, see Shutdown().
   result = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
   if (result != 0) {
     return -1;
   }
   result = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
   if (result != 0) {
     return -1;
@@ -166,28 +191,38 @@ int ThreadPosix::Construct() {
 }
 
 ThreadPosix::~ThreadPosix() {
   pthread_attr_destroy(&attr_);
   delete event_;
   delete crit_state_;
 }
 
-#define HAS_THREAD_ID !defined(WEBRTC_IOS) && !defined(WEBRTC_MAC)
+#define HAS_THREAD_ID !defined(WEBRTC_IOS) && !defined(WEBRTC_MAC) && !defined(WEBRTC_BSD)
 
 bool ThreadPosix::Start(unsigned int& thread_id)
 {
   int result = pthread_attr_setdetachstate(&attr_, PTHREAD_CREATE_DETACHED);
   // Set the stack stack size to 1M.
   result |= pthread_attr_setstacksize(&attr_, 1024 * 1024);
+#if 0
+// Temporarily remove the attempt to set this to real-time scheduling.
+//
+// See: https://code.google.com/p/webrtc/issues/detail?id=1956
+//
+// To be removed when upstream is fixed.
 #ifdef WEBRTC_THREAD_RR
   const int policy = SCHED_RR;
 #else
   const int policy = SCHED_FIFO;
 #endif
+#else
+  const int policy = SCHED_OTHER;
+#endif
+
   event_->Reset();
   // If pthread_create was successful, a thread was created and is running.
   // Don't return false if it was successful since if there are any other
   // failures the state will be: thread was started but not configured as
   // asked for. However, the caller of this API will assume that a false
   // return value means that the thread never started.
   result |= pthread_create(&thread_, &attr_, &StartThread, this);
   if (result != 0) {
@@ -230,31 +265,39 @@ bool ThreadPosix::Start(unsigned int& th
     WEBRTC_TRACE(kTraceError, kTraceUtility, -1,
                  "unable to set thread priority");
   }
   return true;
 }
 
 // CPU_ZERO and CPU_SET are not available in NDK r7, so disable
 // SetAffinity on Android for now.
-#if (defined(WEBRTC_LINUX) && (!defined(WEBRTC_ANDROID)))
+#if defined(__FreeBSD__) || (defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID) && !defined(WEBRTC_GONK))
 bool ThreadPosix::SetAffinity(const int* processor_numbers,
                               const unsigned int amount_of_processors) {
   if (!processor_numbers || (amount_of_processors == 0)) {
     return false;
   }
+#if defined(__FreeBSD__)
+  cpuset_t mask;
+#else
   cpu_set_t mask;
+#endif
   CPU_ZERO(&mask);
 
   for (unsigned int processor = 0;
        processor < amount_of_processors;
        ++processor) {
     CPU_SET(processor_numbers[processor], &mask);
   }
-#if defined(WEBRTC_ANDROID)
+#if defined(__FreeBSD__)
+  const int result = pthread_setaffinity_np(thread_,
+                             sizeof(mask),
+                             &mask);
+#elif defined(WEBRTC_ANDROID) || defined(WEBRTC_GONK)
   // Android.
   const int result = syscall(__NR_sched_setaffinity,
                              pid_,
                              sizeof(mask),
                              &mask);
 #else
   // "Normal" Linux.
   const int result = sched_setaffinity(pid_,
@@ -305,25 +348,29 @@ bool ThreadPosix::Stop() {
   }
 }
 
 void ThreadPosix::Run() {
   {
     CriticalSectionScoped cs(crit_state_);
     alive_ = true;
   }
-#if (defined(WEBRTC_LINUX) || defined(WEBRTC_ANDROID))
+#if (defined(WEBRTC_LINUX) || defined(WEBRTC_ANDROID) || defined(WEBRTC_GONK))
   pid_ = GetThreadId();
 #endif
   // The event the Start() is waiting for.
   event_->Set();
 
   if (set_thread_name_) {
 #ifdef WEBRTC_LINUX
     prctl(PR_SET_NAME, (unsigned long)name_, 0, 0, 0);
+#elif defined(__NetBSD__)
+        pthread_setname_np(pthread_self(), "%s", (void *)name_);
+#elif defined(WEBRTC_BSD)
+        pthread_set_name_np(pthread_self(), name_);
 #endif
     WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
                  "Thread with name:%s started ", name_);
   } else {
     WEBRTC_TRACE(kTraceStateInfo, kTraceUtility, -1,
                  "Thread without name started");
   }
   bool alive = true;
--- a/media/webrtc/trunk/webrtc/system_wrappers/source/trace_impl.cc
+++ b/media/webrtc/trunk/webrtc/system_wrappers/source/trace_impl.cc
@@ -451,23 +451,23 @@ void TraceImpl::AddMessageToList(
 
   uint16_t idx = next_free_idx_[active_queue_];
   next_free_idx_[active_queue_]++;
 
   level_[active_queue_][idx] = level;
   length_[active_queue_][idx] = length;
   memcpy(message_queue_[active_queue_][idx], trace_message, length);
 
-  if (next_free_idx_[active_queue_] == WEBRTC_TRACE_MAX_QUEUE - 1) {
+  if (next_free_idx_[active_queue_] >= WEBRTC_TRACE_MAX_QUEUE - 1) {
     // Logging more messages than can be worked off. Log a warning.
     const char warning_msg[] = "WARNING MISSING TRACE MESSAGES\n";
-    level_[active_queue_][next_free_idx_[active_queue_]] = kTraceWarning;
-    length_[active_queue_][next_free_idx_[active_queue_]] = strlen(warning_msg);
-    memcpy(message_queue_[active_queue_][next_free_idx_[active_queue_]],
-           warning_msg, strlen(warning_msg));
+    level_[active_queue_][WEBRTC_TRACE_MAX_QUEUE-1] = kTraceWarning;
+    length_[active_queue_][WEBRTC_TRACE_MAX_QUEUE-1] = strlen(warning_msg);
+    memcpy(message_queue_[active_queue_][WEBRTC_TRACE_MAX_QUEUE-1],
+           warning_msg, length_[active_queue_][WEBRTC_TRACE_MAX_QUEUE-1]);
     next_free_idx_[active_queue_]++;
   }
 }
 
 bool TraceImpl::Run(void* obj) {
   return static_cast<TraceImpl*>(obj)->Process();
 }
 
--- a/media/webrtc/trunk/webrtc/system_wrappers/source/trace_posix.cc
+++ b/media/webrtc/trunk/webrtc/system_wrappers/source/trace_posix.cc
@@ -13,18 +13,16 @@
 #include <cassert>
 #include <stdarg.h>
 #include <stdio.h>
 #include <string.h>
 #include <sys/time.h>
 #include <time.h>
 #ifdef WEBRTC_ANDROID
 #include <pthread.h>
-#else
-#include <iostream>
 #endif
 
 #if defined(_DEBUG)
 #define BUILDMODE "d"
 #elif defined(DEBUG)
 #define BUILDMODE "d"
 #elif defined(NDEBUG)
 #define BUILDMODE "r"
@@ -52,17 +50,17 @@ TracePosix::~TracePosix() {
 
 int32_t TracePosix::AddTime(char* trace_message, const TraceLevel level) const {
   struct timeval system_time_high_res;
   if (gettimeofday(&system_time_high_res, 0) == -1) {
     return -1;
   }
   struct tm buffer;
   const struct tm* system_time =
-    localtime_r(&system_time_high_res.tv_sec, &buffer);
+    localtime_r((const time_t *)(&system_time_high_res.tv_sec), &buffer);
 
   const uint32_t ms_time = system_time_high_res.tv_usec / 1000;
   uint32_t prev_tickCount = 0;
   {
     CriticalSectionScoped lock(&crit_sect_);
     if (level == kTraceApiCall) {
       prev_tickCount = prev_tick_count_;
       prev_tick_count_ = ms_time;
--- a/media/webrtc/trunk/webrtc/test/channel_transport/udp_transport_impl.cc
+++ b/media/webrtc/trunk/webrtc/test/channel_transport/udp_transport_impl.cc
@@ -13,60 +13,62 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <time.h>
 
 #if defined(_WIN32)
 #include <winsock2.h>
 #include <ws2tcpip.h>
-#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 #include <arpa/inet.h>
 #include <ctype.h>
 #include <fcntl.h>
 #include <net/if.h>
 #include <netdb.h>
+#include <sys/socket.h>
 #include <netinet/in.h>
 #include <stdlib.h>
 #include <sys/ioctl.h>
-#include <sys/socket.h>
 #include <sys/time.h>
 #include <unistd.h>
 #ifndef WEBRTC_IOS
 #include <net/if_arp.h>
 #endif
 #endif // defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
 
 #if defined(WEBRTC_MAC)
+#include <machine/types.h>
+#endif
+#if defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 #include <ifaddrs.h>
-#include <machine/types.h>
 #endif
 #if defined(WEBRTC_LINUX)
 #include <linux/netlink.h>
 #include <linux/rtnetlink.h>
 #endif
 
 #include "webrtc/common_types.h"
 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
 #include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
 #include "webrtc/system_wrappers/interface/trace.h"
 #include "webrtc/test/channel_transport/udp_socket_manager_wrapper.h"
 #include "webrtc/typedefs.h"
 
-#if defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 #define GetLastError() errno
 
 #define IFRSIZE ((int)(size * sizeof (struct ifreq)))
 
 #define NLMSG_OK_NO_WARNING(nlh,len)                                    \
   ((len) >= (int)sizeof(struct nlmsghdr) &&                             \
    (int)(nlh)->nlmsg_len >= (int)sizeof(struct nlmsghdr) &&             \
    (int)(nlh)->nlmsg_len <= (len))
 
-#endif // defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#endif // defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 
 namespace webrtc {
 namespace test {
 
 class SocketFactory : public UdpTransportImpl::SocketFactoryInterface {
  public:
   UdpSocketWrapper* CreateSocket(const int32_t id,
                                  UdpSocketManager* mgr,
@@ -2325,17 +2327,17 @@ uint32_t UdpTransport::InetAddrIPV4(cons
 {
     return ::inet_addr(ip);
 }
 
 int32_t UdpTransport::InetPresentationToNumeric(int32_t af,
                                                 const char* src,
                                                 void* dst)
 {
-#if defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
     const int32_t result = inet_pton(af, src, dst);
     return result > 0 ? 0 : -1;
 
 #elif defined(_WIN32)
     SocketAddress temp;
     int length=sizeof(SocketAddress);
 
     if(af == AF_INET)
@@ -2447,17 +2449,17 @@ int32_t UdpTransport::LocalHostAddressIP
                 break;
         };
     }
     freeaddrinfo(result);
     WEBRTC_TRACE(kTraceWarning, kTraceTransport, -1,
                  "getaddrinfo failed to find address");
     return -1;
 
-#elif defined(WEBRTC_MAC)
+#elif defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
     struct ifaddrs* ptrIfAddrs = NULL;
     struct ifaddrs* ptrIfAddrsStart = NULL;
 
     getifaddrs(&ptrIfAddrsStart);
     ptrIfAddrs = ptrIfAddrsStart;
     while(ptrIfAddrs)
     {
         if(ptrIfAddrs->ifa_addr->sa_family == AF_INET6)
@@ -2639,17 +2641,17 @@ int32_t UdpTransport::LocalHostAddress(u
     }
     else
     {
         int32_t error = WSAGetLastError();
         WEBRTC_TRACE(kTraceWarning, kTraceTransport, -1,
                      "gethostbyname failed, error:%d", error);
         return -1;
     }
-#elif (defined(WEBRTC_MAC))
+#elif (defined(WEBRTC_BSD) || defined(WEBRTC_MAC))
     char localname[255];
     if (gethostname(localname, 255) != -1)
     {
         hostent* localHost;
         localHost = gethostbyname(localname);
         if(localHost)
         {
             if(localHost->h_addrtype != AF_INET)
@@ -2778,17 +2780,17 @@ int32_t UdpTransport::IPAddress(const So
         }
 
         source_port = address._sockaddr_in6.sin6_port;
     }
     // Convert port number to network byte order.
     sourcePort = htons(source_port);
     return 0;
 
- #elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+ #elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
     int32_t ipFamily = address._sockaddr_storage.sin_family;
     const void* ptrNumericIP = NULL;
 
     if(ipFamily == AF_INET)
     {
         ptrNumericIP = &(address._sockaddr_in.sin_addr);
     }
     else if(ipFamily == AF_INET6)
--- a/media/webrtc/trunk/webrtc/typedefs.h
+++ b/media/webrtc/trunk/webrtc/typedefs.h
@@ -12,17 +12,17 @@
 // Much of it is derived from Chromium's build/build_config.h.
 
 #ifndef WEBRTC_TYPEDEFS_H_
 #define WEBRTC_TYPEDEFS_H_
 
 // For access to standard POSIXish features, use WEBRTC_POSIX instead of a
 // more specific macro.
 #if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || \
-    defined(WEBRTC_ANDROID)
+    defined(WEBRTC_ANDROID) || defined(WEBRTC_BSD)
 #define WEBRTC_POSIX
 #endif
 
 // Processor architecture detection.  For more info on what's defined, see:
 //   http://msdn.microsoft.com/en-us/library/b0084kay.aspx
 //   http://www.agner.org/optimize/calling_conventions.pdf
 //   or with gcc, run: "echo | gcc -E -dM -"
 // TODO(andrew): replace WEBRTC_LITTLE_ENDIAN with WEBRTC_ARCH_LITTLE_ENDIAN.
@@ -44,20 +44,80 @@
 // definition warnings.
 //#define WEBRTC_ARCH_ARM
 // TODO(andrew): Chromium uses the following two defines. Should we switch?
 //#define WEBRTC_ARCH_ARM_FAMILY
 //#define WEBRTC_ARCH_ARMEL
 #define WEBRTC_ARCH_32_BITS
 #define WEBRTC_ARCH_LITTLE_ENDIAN
 #define WEBRTC_LITTLE_ENDIAN
-#elif defined(__MIPSEL__)
-#define WEBRTC_ARCH_32_BITS
+#elif defined(__powerpc64__)
+#define WEBRTC_ARCH_PPC64 1
+#define WEBRTC_ARCH_64_BITS 1
+#define WEBRTC_ARCH_BIG_ENDIAN
+#define WEBRTC_BIG_ENDIAN
+#elif defined(__ppc__) || defined(__powerpc__)
+#define WEBRTC_ARCH_PPC 1
+#define WEBRTC_ARCH_32_BITS 1
+#define WEBRTC_ARCH_BIG_ENDIAN
+#define WEBRTC_BIG_ENDIAN
+#elif defined(__sparc64__)
+#define WEBRTC_ARCH_SPARC 1
+#define WEBRTC_ARCH_64_BITS 1
+#define WEBRTC_ARCH_BIG_ENDIAN
+#define WEBRTC_BIG_ENDIAN
+#elif defined(__sparc__)
+#define WEBRTC_ARCH_SPARC 1
+#define WEBRTC_ARCH_32_BITS 1
+#define WEBRTC_ARCH_BIG_ENDIAN
+#define WEBRTC_BIG_ENDIAN
+#elif defined(__mips__)
+#define WEBRTC_ARCH_MIPS 1
+#if defined(_ABI64) && _MIPS_SIM == _ABI64
+#define WEBRTC_ARCH_64_BITS 1
+#else
+#define WEBRTC_ARCH_32_BITS 1
+#endif
+#if defined(__MIPSEB__)
+#define WEBRTC_ARCH_BIG_ENDIAN
+#define WEBRTC_BIG_ENDIAN
+#else
 #define WEBRTC_ARCH_LITTLE_ENDIAN
 #define WEBRTC_LITTLE_ENDIAN
+#endif
+#elif defined(__hppa__)
+#define WEBRTC_ARCH_HPPA 1
+#define WEBRTC_ARCH_32_BITS 1
+#define WEBRTC_ARCH_BIG_ENDIAN
+#define WEBRTC_BIG_ENDIAN
+#elif defined(__ia64__)
+#define WEBRTC_ARCH_IA64 1
+#define WEBRTC_ARCH_64_BITS 1
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#define WEBRTC_LITTLE_ENDIAN
+#elif defined(__s390x__)
+#define WEBRTC_ARCH_S390X 1
+#define WEBRTC_ARCH_64_BITS 1
+#define WEBRTC_ARCH_BIG_ENDIAN
+#define WEBRTC_BIG_ENDIAN
+#elif defined(__s390__)
+#define WEBRTC_ARCH_S390 1
+#define WEBRTC_ARCH_32_BITS 1
+#define WEBRTC_ARCH_BIG_ENDIAN
+#define WEBRTC_BIG_ENDIAN
+#elif defined(__alpha__)
+#define WEBRTC_ARCH_ALPHA 1
+#define WEBRTC_ARCH_64_BITS 1
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#define WEBRTC_LITTLE_ENDIAN
+#elif defined(__avr32__)
+#define WEBRTC_ARCH_AVR32 1
+#define WEBRTC_ARCH_32_BITS 1
+#define WEBRTC_ARCH_BIG_ENDIAN
+#define WEBRTC_BIG_ENDIAN
 #else
 #error Please add support for your architecture in typedefs.h
 #endif
 
 #if defined(__SSE2__) || defined(_MSC_VER)
 #define WEBRTC_USE_SSE2
 #endif
 
--- a/media/webrtc/trunk/webrtc/video_engine/stream_synchronization.cc
+++ b/media/webrtc/trunk/webrtc/video_engine/stream_synchronization.cc
@@ -8,16 +8,17 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "webrtc/video_engine/stream_synchronization.h"
 
 #include <algorithm>
 #include <assert.h>
 #include <cmath>
+#include <cstdlib>
 
 #include "webrtc/system_wrappers/interface/trace.h"
 
 namespace webrtc {
 
 static const int kMaxChangeMs = 80;
 static const int kMaxDeltaDelayMs = 10000;
 static const int kFilterLength = 4;
--- a/media/webrtc/trunk/webrtc/video_engine/vie_channel.cc
+++ b/media/webrtc/trunk/webrtc/video_engine/vie_channel.cc
@@ -1394,16 +1394,17 @@ int32_t ViEChannel::StartSend() {
   CriticalSectionScoped cs_rtp(rtp_rtcp_cs_.get());
   for (std::list<RtpRtcp*>::const_iterator it = simulcast_rtp_rtcp_.begin();
        it != simulcast_rtp_rtcp_.end();
        it++) {
     RtpRtcp* rtp_rtcp = *it;
     rtp_rtcp->SetSendingMediaStatus(true);
     rtp_rtcp->SetSendingStatus(true);
   }
+  vie_receiver_.StartRTCPReceive();
   return 0;
 }
 
 int32_t ViEChannel::StopSend() {
   WEBRTC_TRACE(kTraceInfo, kTraceVideo, ViEId(engine_id_, channel_id_), "%s",
                __FUNCTION__);
 
   CriticalSectionScoped cs(rtp_rtcp_cs_.get());
@@ -1429,16 +1430,17 @@ int32_t ViEChannel::StopSend() {
   }
   for (std::list<RtpRtcp*>::iterator it = simulcast_rtp_rtcp_.begin();
        it != simulcast_rtp_rtcp_.end();
        it++) {
     RtpRtcp* rtp_rtcp = *it;
     rtp_rtcp->ResetSendDataCountersRTP();
     rtp_rtcp->SetSendingStatus(false);
   }
+  vie_receiver_.StopRTCPReceive();
   return 0;
 }
 
 bool ViEChannel::Sending() {
   return rtp_rtcp_->Sending();
 }
 
 int32_t ViEChannel::StartReceive() {
--- a/media/webrtc/trunk/webrtc/video_engine/vie_impl.cc
+++ b/media/webrtc/trunk/webrtc/video_engine/vie_impl.cc
@@ -174,21 +174,23 @@ int VideoEngine::SetAndroidObjects(void*
                "SetAndroidObjects()");
 
 #if defined(WEBRTC_ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
   if (SetCaptureAndroidVM(javaVM, javaContext) != 0) {
     WEBRTC_TRACE(kTraceError, kTraceVideo, kModuleId,
                  "Could not set capture Android VM");
     return -1;
   }
+#ifdef WEBRTC_INCLUDE_INTERNAL_VIDEO_RENDER
   if (SetRenderAndroidVM(javaVM) != 0) {
     WEBRTC_TRACE(kTraceError, kTraceVideo, kModuleId,
                  "Could not set render Android VM");
     return -1;
   }
+#endif
   return 0;
 #else
   WEBRTC_TRACE(kTraceError, kTraceVideo, kModuleId,
                "WEBRTC_ANDROID not defined for VideoEngine::SetAndroidObjects");
   return -1;
 #endif
 }
 
--- a/media/webrtc/trunk/webrtc/video_engine/vie_receiver.cc
+++ b/media/webrtc/trunk/webrtc/video_engine/vie_receiver.cc
@@ -30,17 +30,18 @@ ViEReceiver::ViEReceiver(const int32_t c
       channel_id_(channel_id),
       rtp_header_parser_(RtpHeaderParser::Create()),
       rtp_rtcp_(NULL),
       vcm_(module_vcm),
       remote_bitrate_estimator_(remote_bitrate_estimator),
       external_decryption_(NULL),
       decryption_buffer_(NULL),
       rtp_dump_(NULL),
-      receiving_(false) {
+      receiving_(false),
+      receiving_rtcp_(false) {
   assert(remote_bitrate_estimator);
 }
 
 ViEReceiver::~ViEReceiver() {
   if (decryption_buffer_) {
     delete[] decryption_buffer_;
     decryption_buffer_ = NULL;
   }
@@ -115,17 +116,17 @@ int ViEReceiver::ReceivedRTPPacket(const
     return -1;
   }
   return InsertRTPPacket(static_cast<const int8_t*>(rtp_packet),
                          rtp_packet_length);
 }
 
 int ViEReceiver::ReceivedRTCPPacket(const void* rtcp_packet,
                                     int rtcp_packet_length) {
-  if (!receiving_) {
+  if (!receiving_rtcp_) {
     return -1;
   }
   return InsertRTCPPacket(static_cast<const int8_t*>(rtcp_packet),
                           rtcp_packet_length);
 }
 
 int32_t ViEReceiver::OnReceivedPayloadData(
     const uint8_t* payload_data, const uint16_t payload_size,
@@ -250,16 +251,24 @@ int ViEReceiver::InsertRTCPPacket(const 
 void ViEReceiver::StartReceive() {
   receiving_ = true;
 }
 
 void ViEReceiver::StopReceive() {
   receiving_ = false;
 }
 
+void ViEReceiver::StartRTCPReceive() {
+  receiving_rtcp_ = true;
+}
+
+void ViEReceiver::StopRTCPReceive() {
+  receiving_rtcp_ = false;
+}
+
 int ViEReceiver::StartRTPDump(const char file_nameUTF8[1024]) {
   CriticalSectionScoped cs(receive_cs_.get());
   if (rtp_dump_) {
     // Restart it if it already exists and is started
     rtp_dump_->Stop();
   } else {
     rtp_dump_ = RtpDump::CreateRtpDump();
     if (rtp_dump_ == NULL) {
--- a/media/webrtc/trunk/webrtc/video_engine/vie_receiver.h
+++ b/media/webrtc/trunk/webrtc/video_engine/vie_receiver.h
@@ -43,16 +43,19 @@ class ViEReceiver : public RtpData {
   void RegisterSimulcastRtpRtcpModules(const std::list<RtpRtcp*>& rtp_modules);
 
   bool SetReceiveTimestampOffsetStatus(bool enable, int id);
   bool SetReceiveAbsoluteSendTimeStatus(bool enable, int id);
 
   void StartReceive();
   void StopReceive();
 
+  void StartRTCPReceive();
+  void StopRTCPReceive();
+
   int StartRTPDump(const char file_nameUTF8[1024]);
   int StopRTPDump();
 
   // Receives packets from external transport.
   int ReceivedRTPPacket(const void* rtp_packet, int rtp_packet_length);
   int ReceivedRTCPPacket(const void* rtcp_packet, int rtcp_packet_length);
 
   // Implements RtpData.
@@ -80,13 +83,14 @@ class ViEReceiver : public RtpData {
   std::list<RtpRtcp*> rtp_rtcp_simulcast_;
   VideoCodingModule* vcm_;
   RemoteBitrateEstimator* remote_bitrate_estimator_;
 
   Encryption* external_decryption_;
   uint8_t* decryption_buffer_;
   RtpDump* rtp_dump_;
   bool receiving_;
+  bool receiving_rtcp_;
 };
 
 }  // namespace webrt
 
 #endif  // WEBRTC_VIDEO_ENGINE_VIE_RECEIVER_H_
--- a/media/webrtc/trunk/webrtc/voice_engine/include/voe_base.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/include/voe_base.h
@@ -77,16 +77,17 @@ public:
     // Sets the name of the trace file and enables non-encrypted trace messages.
     static int SetTraceFile(const char* fileNameUTF8,
                             bool addFileCounter = false);
 
     // Installs the TraceCallback implementation to ensure that the user
     // receives callbacks for generated trace messages.
     static int SetTraceCallback(TraceCallback* callback);
 
+    static int SetAndroidObjects(void* javaVM, void* context);
     static int SetAndroidObjects(void* javaVM, void* env, void* context);
 
 protected:
     VoiceEngine() {}
     ~VoiceEngine() {}
 };
 
 // VoEBase
--- a/media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/include/voe_external_media.h
@@ -91,29 +91,30 @@ public:
 
     // This function accepts externally recorded audio. During transmission,
     // this method should be called at as regular an interval as possible
     // with frames of corresponding size.
     virtual int ExternalRecordingInsertData(
         const int16_t speechData10ms[], int lengthSamples,
         int samplingFreqHz, int current_delay_ms) = 0;
 
+
     // This function gets audio for an external playout sink.
     // During transmission, this function should be called every ~10 ms
     // to obtain a new 10 ms frame of audio. The length of the block will
-    // be 160, 320, 440 or 480 samples (for 16, 32, 44 or 48 kHz sampling
-    // rates respectively).
+    // be 160, 320, 440 or 480 samples (for 16000, 32000, 44100 or 48000
+    // kHz sampling rates respectively).
     virtual int ExternalPlayoutGetData(
         int16_t speechData10ms[], int samplingFreqHz,
         int current_delay_ms, int& lengthSamples) = 0;
 
     // Pulls an audio frame from the specified |channel| for external mixing.
     // If the |desired_sample_rate_hz| is 0, the signal will be returned with
     // its native frequency, otherwise it will be resampled. Valid frequencies
-    // are 16, 22, 32, 44 or 48 kHz.
+    // are 16000, 22050, 32000, 44100 or 48000 kHz.
     virtual int GetAudioFrame(int channel, int desired_sample_rate_hz,
                               AudioFrame* frame) = 0;
 
     // Sets the state of external mixing. Cannot be changed during playback.
     virtual int SetExternalMixing(int channel, bool enable) = 0;
 
 protected:
     VoEExternalMedia() {}
--- a/media/webrtc/trunk/webrtc/voice_engine/include/voe_volume_control.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/include/voe_volume_control.h
@@ -113,15 +113,15 @@ public:
     // Scales volume of the |left| and |right| channels independently.
     // Valid scale range is [0.0, 1.0].
     virtual int SetOutputVolumePan(int channel, float left, float right) = 0;
 
     // Gets the current left and right scaling factors.
     virtual int GetOutputVolumePan(int channel, float& left, float& right) = 0;
 
 protected:
-    VoEVolumeControl() {};
-    virtual ~VoEVolumeControl() {};
+    VoEVolumeControl() {}
+    virtual ~VoEVolumeControl() {}
 };
 
 }  // namespace webrtc
 
 #endif  // #ifndef WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_H
--- a/media/webrtc/trunk/webrtc/voice_engine/output_mixer_unittest.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/output_mixer_unittest.cc
@@ -144,34 +144,39 @@ void OutputMixerTest::RunResampleTest(in
   } else {
     SetStereoFrame(&dst_frame_, 0, 0, dst_sample_rate_hz);
     if (src_channels == 1)
       SetStereoFrame(&golden_frame_, dst_left, dst_left, dst_sample_rate_hz);
     else
       SetStereoFrame(&golden_frame_, dst_left, dst_right, dst_sample_rate_hz);
   }
 
-  // The sinc resampler has a known delay, which we compute here. Multiplying by
-  // two gives us a crude maximum for any resampling, as the old resampler
-  // typically (but not always) has lower delay.
-  static const int kInputKernelDelaySamples = 16;
-  const int max_delay = static_cast<double>(dst_sample_rate_hz)
-      / src_sample_rate_hz * kInputKernelDelaySamples * dst_channels * 2;
+  // The speex resampler has a known delay dependent on quality and rates,
+  // which we approximate here. Multiplying by two gives us a crude maximum
+  // for any resampling, as the old resampler typically (but not always)
+  // has lower delay.  The actual delay is calculated internally based on the
+  // filter length in the QualityMap.
+  static const int kInputKernelDelaySamples = 16*3;
+  const int max_delay = std::min(1.0f, 1/kResamplingFactor) *
+                        kInputKernelDelaySamples * dst_channels * 2;
   printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
       src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
   EXPECT_EQ(0, RemixAndResample(src_frame_, &resampler, &dst_frame_));
   if (src_sample_rate_hz == 96000 && dst_sample_rate_hz == 8000) {
     // The sinc resampler gives poor SNR at this extreme conversion, but we
     // expect to see this rarely in practice.
     EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_, max_delay), 14.0f);
   } else {
     EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_, max_delay), 46.0f);
   }
 }
 
+// These two tests assume memcpy() (no delay and no filtering) for input
+// freq == output freq && same channels.  RemixAndResample uses 'Fixed'
+// resamplers to enable this behavior
 TEST_F(OutputMixerTest, RemixAndResampleCopyFrameSucceeds) {
   // Stereo -> stereo.
   SetStereoFrame(&src_frame_, 10, 10);
   SetStereoFrame(&dst_frame_, 0, 0);
   EXPECT_EQ(0, RemixAndResample(src_frame_, &resampler_, &dst_frame_));
   VerifyFramesAreEqual(src_frame_, dst_frame_);
 
   // Mono -> mono.
--- a/media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/transmit_mixer.cc
@@ -1148,16 +1148,18 @@ bool TransmitMixer::IsRecordingCall()
 
 bool TransmitMixer::IsRecordingMic()
 {
 
     return _fileRecording;
 }
 
 // TODO(andrew): use RemixAndResample for this.
+// Note that if drift compensation is done here, a buffering stage will be
+// needed and this will need to switch to non-fixed resamples.
 int TransmitMixer::GenerateAudioFrame(const int16_t audio[],
                                       int samples_per_channel,
                                       int num_channels,
                                       int sample_rate_hz) {
   int destination_rate;
   int num_codec_channels;
   GetSendCodecInfo(&destination_rate, &num_codec_channels);
 
--- a/media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/voe_external_media_impl.cc
@@ -186,17 +186,17 @@ int VoEExternalMediaImpl::ExternalRecord
     }
     if (shared_->NumOfSendingChannels() == 0)
     {
         shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
             "SetExternalRecordingStatus() no channel is sending");
         return -1;
     }
     if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
-        (48000 != samplingFreqHz) && (44000 != samplingFreqHz))
+        (48000 != samplingFreqHz) && (44100 != samplingFreqHz))
     {
          shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
              "SetExternalRecordingStatus() invalid sample rate");
         return -1;
     }
     if ((0 == lengthSamples) ||
         ((lengthSamples % (samplingFreqHz / 100)) != 0))
     {
@@ -296,17 +296,17 @@ int VoEExternalMediaImpl::ExternalPlayou
     }
     if (!shared_->ext_playout())
     {
        shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
            "ExternalPlayoutGetData() external playout is not enabled");
         return -1;
     }
     if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
-        (48000 != samplingFreqHz) && (44000 != samplingFreqHz))
+        (48000 != samplingFreqHz) && (44100 != samplingFreqHz))
     {
         shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
             "ExternalPlayoutGetData() invalid sample rate");
         return -1;
     }
     if (current_delay_ms < 0)
     {
         shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
--- a/media/webrtc/trunk/webrtc/voice_engine/voice_engine.gyp
+++ b/media/webrtc/trunk/webrtc/voice_engine/voice_engine.gyp
@@ -29,16 +29,19 @@
         'include',
         '<(webrtc_root)/modules/audio_device',
       ],
       'direct_dependent_settings': {
         'include_dirs': [
           'include',
         ],
       },
+      'defines': [
+        'WEBRTC_EXTERNAL_TRANSPORT',
+      ],
       'sources': [
         '../common_types.h',
         '../engine_configurations.h',
         '../typedefs.h',
         'include/voe_audio_processing.h',
         'include/voe_base.h',
         'include/voe_call_report.h',
         'include/voe_codec.h',
--- a/media/webrtc/trunk/webrtc/voice_engine/voice_engine_defines.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/voice_engine_defines.h
@@ -309,21 +309,28 @@ inline int VoEChannelId(int moduleId)
 #ifdef ANDROID
 
 // ----------------------------------------------------------------------------
 //  Defines
 // ----------------------------------------------------------------------------
 
   // Always excluded for Android builds
   #undef WEBRTC_CODEC_ISAC
-  #undef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+  // We need WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT to make things work on Android.
+  // Motivation for the commented-out undef below is unclear.
+  //
+  // #undef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
   #undef WEBRTC_CONFERENCING
   #undef WEBRTC_TYPING_DETECTION
 
-  #define ANDROID_NOT_SUPPORTED(stat) NOT_SUPPORTED(stat)
+  // This macro used to cause the calling function to set an error code and return.
+  // However, not doing that seems to cause the unit tests to pass / behave reasonably,
+  // so it's disabled for now; see bug 819856.
+  #define ANDROID_NOT_SUPPORTED(stat)
+  //#define ANDROID_NOT_SUPPORTED(stat) NOT_SUPPORTED(stat)
 
 #else // LINUX PC
 
 // ----------------------------------------------------------------------------
 //  Defines
 // ----------------------------------------------------------------------------
 
   #define ANDROID_NOT_SUPPORTED(stat)
@@ -332,42 +339,45 @@ inline int VoEChannelId(int moduleId)
 
 #else
 #define ANDROID_NOT_SUPPORTED(stat)
 #endif  // #ifdef WEBRTC_LINUX
 
 // *** WEBRTC_MAC ***
 // including iPhone
 
-#ifdef WEBRTC_MAC
+#if defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 
+#if !defined(WEBRTC_BSD)
 #include <AudioUnit/AudioUnit.h>
+#endif
 #include <arpa/inet.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <netinet/in.h>
 #include <pthread.h>
 #include <sched.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <sys/socket.h>
 #include <sys/stat.h>
 #include <sys/time.h>
 #include <sys/types.h>
 #include <time.h>
 #include <unistd.h>
-#if !defined(WEBRTC_IOS)
+#if !defined(WEBRTC_BSD) && !defined(WEBRTC_IOS)
   #include <CoreServices/CoreServices.h>
   #include <CoreAudio/CoreAudio.h>
   #include <AudioToolbox/DefaultAudioOutput.h>
   #include <AudioToolbox/AudioConverter.h>
   #include <CoreAudio/HostTime.h>
 #endif
 
+
 #define DWORD unsigned long int
 #define WINAPI
 #define LPVOID void *
 #define FALSE 0
 #define TRUE 1
 #define SOCKADDR_IN struct sockaddr_in
 #define UINT unsigned int
 #define UCHAR unsigned char
@@ -412,11 +422,11 @@ inline int VoEChannelId(int moduleId)
 //  Defines
 // ----------------------------------------------------------------------------
 
   #define IPHONE_NOT_SUPPORTED(stat)
 #endif
 
 #else
 #define IPHONE_NOT_SUPPORTED(stat)
-#endif  // #ifdef WEBRTC_MAC
+#endif  // #if defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
 
 #endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
--- a/media/webrtc/trunk/webrtc/voice_engine/voice_engine_impl.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/voice_engine_impl.cc
@@ -3,17 +3,17 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#if defined(WEBRTC_ANDROID) && !defined(WEBRTC_ANDROID_OPENSLES)
+#if defined(WEBRTC_ANDROID) && !defined(WEBRTC_GONK)
 #include "webrtc/modules/audio_device/android/audio_device_jni_android.h"
 #endif
 
 #include "webrtc/system_wrappers/interface/trace.h"
 #include "webrtc/voice_engine/voice_engine_impl.h"
 
 namespace webrtc
 {
@@ -136,23 +136,19 @@ bool VoiceEngine::Delete(VoiceEngine*& v
         WEBRTC_TRACE(kTraceWarning, kTraceVoice, -1,
             "VoiceEngine::Delete did not release the very last reference.  "
             "%d references remain.", ref);
     }
 
     return true;
 }
 
-int VoiceEngine::SetAndroidObjects(void* javaVM, void* env, void* context)
+int VoiceEngine::SetAndroidObjects(void* javaVM, void* context)
 {
-#ifdef WEBRTC_ANDROID
-#ifdef WEBRTC_ANDROID_OPENSLES
-  return 0;
-#else
-  return AudioDeviceAndroidJni::SetAndroidAudioDeviceObjects(
-      javaVM, env, context);
-#endif
+#if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
+    return AudioDeviceAndroidJni::SetAndroidAudioDeviceObjects(
+         javaVM, context);
 #else
   return -1;
 #endif
 }
 
 } //namespace webrtc
--- a/media/webrtc/webrtc_config.gypi
+++ b/media/webrtc/webrtc_config.gypi
@@ -17,16 +17,19 @@
     'include_tests': 0,
     'enable_android_opensl': 1,
 # use_system_lib* still seems to be in use in trunk/build
     'use_system_libjpeg': 0,
     'use_system_libvpx': 0,
     'build_libjpeg': 0,
     'build_libvpx': 0,
 
+    # turn off mandatory use of NEON and instead use NEON detection
+    'arm_neon': 0,
+
     #if "-D build_with_gonk=1", then set moz_widget_toolkit_gonk to 1
     'moz_widget_toolkit_gonk': 0,
     'variables': {
       'build_with_gonk%': 0,
     },
     'conditions': [
       ['build_with_gonk==1', {
          'moz_widget_toolkit_gonk': 1,