Bug 1376873 - Rollup conflict fixes for audio/video code; r=pehrsons
authorDan Minor <dminor@mozilla.com>
Mon, 22 Jan 2018 15:04:26 -0500
changeset 445274 93eec571640ee0810da8475ee37e417b88045574
parent 445273 28b57e3ba51de982a4663801a3935580114b5477
child 445275 50f89f4e45b0af87fd6aa45aed60f02f3e69b951
push id35014
push userdvarga@mozilla.com
push dateFri, 09 Nov 2018 10:01:40 +0000
treeherdermozilla-central@5e7636ec12c5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspehrsons
bugs1376873
milestone65.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1376873 - Rollup conflict fixes for audio/video code; r=pehrsons MozReview-Commit-ID: 1T8mgqdkzq3 Differential Revision: https://phabricator.services.mozilla.com/D7427
dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
media/webrtc/trunk/webrtc/api/rtpparameters.cc
media/webrtc/trunk/webrtc/api/rtpparameters.h
media/webrtc/trunk/webrtc/audio/audio_receive_stream.cc
media/webrtc/trunk/webrtc/call/call.cc
media/webrtc/trunk/webrtc/call/video_config.cc
media/webrtc/trunk/webrtc/call/video_config.h
media/webrtc/trunk/webrtc/call/video_receive_stream.h
media/webrtc/trunk/webrtc/call/video_send_stream.h
media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler.cc
media/webrtc/trunk/webrtc/common_types.cc
media/webrtc/trunk/webrtc/common_types.h
media/webrtc/trunk/webrtc/common_video/libyuv/webrtc_libyuv.cc
media/webrtc/trunk/webrtc/media/base/videoadapter.cc
media/webrtc/trunk/webrtc/media/base/videoadapter.h
media/webrtc/trunk/webrtc/modules/audio_coding/acm2/acm_receiver.cc
media/webrtc/trunk/webrtc/modules/audio_coding/acm2/acm_receiver.h
media/webrtc/trunk/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/merge.cc
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_impl.cc
media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_impl.h
media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.h
media/webrtc/trunk/webrtc/modules/audio_processing/logging/apm_data_dumper.h
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.h
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_and_cursor_composer.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_and_cursor_composer.h
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_capture_types.h
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info.h
media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_frame_rotation.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/fake_desktop_capturer.h
media/webrtc/trunk/webrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.h
media/webrtc/trunk/webrtc/modules/desktop_capture/mac/desktop_configuration.mm
media/webrtc/trunk/webrtc/modules/desktop_capture/mac/window_list_utils.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor.h
media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_mac.mm
media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_unittest.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/screen_capturer_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/screen_capturer_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/desktop_device_info_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/desktop_device_info_win.h
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capture_utils.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_directx.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_directx.h
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h
media/webrtc/trunk/webrtc/modules/desktop_capture/win/win_shared.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/win/window_capture_utils.h
media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_mac.mm
media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_win.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/desktop_device_info_x11.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/desktop_device_info_x11.h
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/shared_x_util.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/shared_x_util.h
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/window_list_utils.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/x_error_trap.cc
media/webrtc/trunk/webrtc/modules/desktop_capture/x11/x_error_trap.h
media/webrtc/trunk/webrtc/modules/include/module_common_types.h
media/webrtc/trunk/webrtc/modules/media_file/media_file_utility.cc
media/webrtc/trunk/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc
media/webrtc/trunk/webrtc/modules/utility/source/jvm_android.cc
media/webrtc/trunk/webrtc/modules/utility/source/process_thread_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.h
media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.h
media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
media/webrtc/trunk/webrtc/modules/video_capture/objc/device_info.mm
media/webrtc/trunk/webrtc/modules/video_capture/test/video_capture_unittest.cc
media/webrtc/trunk/webrtc/modules/video_capture/video_capture.h
media/webrtc/trunk/webrtc/modules/video_capture/video_capture_defines.h
media/webrtc/trunk/webrtc/modules/video_capture/video_capture_factory.cc
media/webrtc/trunk/webrtc/modules/video_capture/video_capture_impl.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.h
media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_mf.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
media/webrtc/trunk/webrtc/modules/video_capture/windows/video_capture_ds.cc
media/webrtc/trunk/webrtc/modules/video_coding/codec_database.cc
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
media/webrtc/trunk/webrtc/modules/video_coding/frame_buffer2_unittest.cc
media/webrtc/trunk/webrtc/modules/video_coding/generic_decoder.h
media/webrtc/trunk/webrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc
media/webrtc/trunk/webrtc/modules/video_coding/include/video_coding.h
media/webrtc/trunk/webrtc/modules/video_coding/include/video_coding_defines.h
media/webrtc/trunk/webrtc/modules/video_coding/jitter_buffer.cc
media/webrtc/trunk/webrtc/modules/video_coding/media_optimization.cc
media/webrtc/trunk/webrtc/modules/video_coding/receiver.cc
media/webrtc/trunk/webrtc/modules/video_coding/rtp_frame_reference_finder.cc
media/webrtc/trunk/webrtc/modules/video_coding/session_info.cc
media/webrtc/trunk/webrtc/modules/video_coding/video_coding_impl.cc
media/webrtc/trunk/webrtc/modules/video_coding/video_coding_impl.h
media/webrtc/trunk/webrtc/modules/video_coding/video_receiver.cc
media/webrtc/trunk/webrtc/modules/video_coding/video_sender.cc
media/webrtc/trunk/webrtc/rtc_base/base64.cc
media/webrtc/trunk/webrtc/rtc_base/basictypes.h
media/webrtc/trunk/webrtc/rtc_base/byteorder.h
media/webrtc/trunk/webrtc/rtc_base/platform_thread.cc
media/webrtc/trunk/webrtc/rtc_base/platform_thread.h
media/webrtc/trunk/webrtc/rtc_base/task_queue_libevent.cc
media/webrtc/trunk/webrtc/system_wrappers/source/clock.cc
media/webrtc/trunk/webrtc/test/fuzzers/rtp_packet_fuzzer.cc
media/webrtc/trunk/webrtc/video/rtp_video_stream_receiver.cc
media/webrtc/trunk/webrtc/video/rtp_video_stream_receiver.h
media/webrtc/trunk/webrtc/video/video_receive_stream.cc
media/webrtc/trunk/webrtc/video/video_send_stream.cc
media/webrtc/trunk/webrtc/video/video_send_stream.h
media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.cc
media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.h
media/webrtc/trunk/webrtc/voice_engine/channel.cc
media/webrtc/trunk/webrtc/voice_engine/channel.h
media/webrtc/trunk/webrtc/voice_engine/channel_proxy.cc
media/webrtc/trunk/webrtc/voice_engine/channel_proxy.h
media/webrtc/trunk/webrtc/voice_engine/include/voe_base.h
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
@@ -587,17 +587,17 @@ MediaEngineRemoteVideoSource::DeliverFra
       break;
     }
     default: {
       break;
     }
   }
 
   rtc::Callback0<void> callback_unused;
-  rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer =
+  rtc::scoped_refptr<webrtc::I420BufferInterface> buffer =
     new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
       aProps.width(),
       aProps.height(),
       aBuffer,
       aProps.yStride(),
       aBuffer + aProps.yAllocatedSize(),
       aProps.uStride(),
       aBuffer + aProps.yAllocatedSize() + aProps.uAllocatedSize(),
@@ -826,21 +826,19 @@ LogCapability(const char* aHeader,
     "H264",
     "I420",
     "RED",
     "ULPFEC",
     "Generic codec",
     "Unknown codec"
   };
 
-  LOG(("%s: %4u x %4u x %2u maxFps, %s, %s. Distance = %" PRIu32,
+  LOG(("%s: %4u x %4u x %2u maxFps, %s. Distance = %" PRIu32,
        aHeader, aCapability.width, aCapability.height, aCapability.maxFPS,
-       types[std::min(std::max(uint32_t(0), uint32_t(aCapability.rawType)),
-                      uint32_t(sizeof(types) / sizeof(*types) - 1))],
-       codec[std::min(std::max(uint32_t(0), uint32_t(aCapability.codecType)),
+       codec[std::min(std::max(uint32_t(0), uint32_t(aCapability.videoType)),
                       uint32_t(sizeof(codec) / sizeof(*codec) - 1))],
        aDistance));
 }
 
 bool
 MediaEngineRemoteVideoSource::ChooseCapability(
     const NormalizedConstraints& aConstraints,
     const MediaEnginePrefs& aPrefs,
@@ -998,34 +996,17 @@ MediaEngineRemoteVideoSource::ChooseCapa
 
     for (auto& candidate : candidateSet) {
       candidate.mDistance =
         GetDistance(candidate.mCapability, normPrefs, aDeviceId, aCalculate);
     }
     TrimLessFitCandidates(candidateSet);
   }
 
-  // Any remaining multiples all have the same distance, but may vary on
-  // format. Some formats are more desirable for certain use like WebRTC.
-  // E.g. I420 over RGB24 can remove a needless format conversion.
-
-  bool found = false;
-  for (auto& candidate : candidateSet) {
-    const webrtc::CaptureCapability& cap = candidate.mCapability;
-    if (cap.rawType == webrtc::RawVideoType::kVideoI420 ||
-        cap.rawType == webrtc::RawVideoType::kVideoYUY2 ||
-        cap.rawType == webrtc::RawVideoType::kVideoYV12) {
-      aCapability = cap;
-      found = true;
-      break;
-    }
-  }
-  if (!found) {
-    aCapability = candidateSet[0].mCapability;
-  }
+  aCapability = candidateSet[0].mCapability;
 
   LogCapability("Chosen capability", aCapability, sameDistance);
   return true;
 }
 
 void
 MediaEngineRemoteVideoSource::GetSettings(MediaTrackSettings& aOutSettings) const
 {
--- a/media/webrtc/trunk/webrtc/api/rtpparameters.cc
+++ b/media/webrtc/trunk/webrtc/api/rtpparameters.cc
@@ -110,32 +110,55 @@ const int RtpExtension::kVideoContentTyp
 
 const char RtpExtension::kVideoTimingUri[] =
     "http://www.webrtc.org/experiments/rtp-hdrext/video-timing";
 const int RtpExtension::kVideoTimingDefaultId = 8;
 
 const char RtpExtension::kEncryptHeaderExtensionsUri[] =
     "urn:ietf:params:rtp-hdrext:encrypt";
 
+const char* RtpExtension::kRtpStreamIdUri =
+    "urn:ietf:params:rtp-hdrext:sdes:rtp-stream-id";
+const int RtpExtension::kRtpStreamIdDefaultId = 9;
+
+const char* RtpExtension::kRepairedRtpStreamIdUri =
+    "urn:ietf:params:rtp-hdrext:sdes:repaired-rtp-stream-id";
+const int RtpExtension::kRepairedRtpStreamIdDefaultId = 10;
+
+const char* RtpExtension::kMIdUri =
+    "urn:ietf:params:rtp-hdrext:sdes:mid";
+const int RtpExtension::kMIdDefaultId = 11;
+
+const char* RtpExtension::kCsrcAudioLevelUri =
+    "urn:ietf:params:rtp-hdrext:csrc-audio-level";
+const int RtpExtension::kCsrcAudioLevelDefaultId = 12;
+
 const int RtpExtension::kMinId = 1;
 const int RtpExtension::kMaxId = 14;
 
 bool RtpExtension::IsSupportedForAudio(const std::string& uri) {
   return uri == webrtc::RtpExtension::kAudioLevelUri ||
-         uri == webrtc::RtpExtension::kTransportSequenceNumberUri;
+         uri == webrtc::RtpExtension::kTransportSequenceNumberUri ||
+         uri == webrtc::RtpExtension::kRtpStreamIdUri ||
+         uri == webrtc::RtpExtension::kRepairedRtpStreamIdUri ||
+         uri == webrtc::RtpExtension::kMIdUri ||
+         uri == webrtc::RtpExtension::kCsrcAudioLevelUri;
 }
 
 bool RtpExtension::IsSupportedForVideo(const std::string& uri) {
   return uri == webrtc::RtpExtension::kTimestampOffsetUri ||
          uri == webrtc::RtpExtension::kAbsSendTimeUri ||
          uri == webrtc::RtpExtension::kVideoRotationUri ||
          uri == webrtc::RtpExtension::kTransportSequenceNumberUri ||
          uri == webrtc::RtpExtension::kPlayoutDelayUri ||
          uri == webrtc::RtpExtension::kVideoContentTypeUri ||
-         uri == webrtc::RtpExtension::kVideoTimingUri;
+         uri == webrtc::RtpExtension::kVideoTimingUri ||
+         uri == webrtc::RtpExtension::kRtpStreamIdUri ||
+         uri == webrtc::RtpExtension::kRepairedRtpStreamIdUri ||
+         uri == webrtc::RtpExtension::kMIdUri;
 }
 
 bool RtpExtension::IsEncryptionSupported(const std::string& uri) {
   return uri == webrtc::RtpExtension::kAudioLevelUri ||
          uri == webrtc::RtpExtension::kTimestampOffsetUri ||
 #if !defined(ENABLE_EXTERNAL_AUTH)
          // TODO(jbauch): Figure out a way to always allow "kAbsSendTimeUri"
          // here and filter out later if external auth is really used in
--- a/media/webrtc/trunk/webrtc/api/rtpparameters.h
+++ b/media/webrtc/trunk/webrtc/api/rtpparameters.h
@@ -275,16 +275,28 @@ struct RtpExtension {
 
   static const char kPlayoutDelayUri[];
   static const int kPlayoutDelayDefaultId;
 
   // Encryption of Header Extensions, see RFC 6904 for details:
   // https://tools.ietf.org/html/rfc6904
   static const char kEncryptHeaderExtensionsUri[];
 
+  static const char* kRtpStreamIdUri;
+  static const int kRtpStreamIdDefaultId;
+
+  static const char* kRepairedRtpStreamIdUri;
+  static const int kRepairedRtpStreamIdDefaultId;
+
+  static const char* kMIdUri;
+  static const int kMIdDefaultId;
+
+  static const char* kCsrcAudioLevelUri;
+  static const int kCsrcAudioLevelDefaultId;
+
   // Inclusive min and max IDs for one-byte header extensions, per RFC5285.
   static const int kMinId;
   static const int kMaxId;
 
   std::string uri;
   int id = 0;
   bool encrypt = false;
 };
--- a/media/webrtc/trunk/webrtc/audio/audio_receive_stream.cc
+++ b/media/webrtc/trunk/webrtc/audio/audio_receive_stream.cc
@@ -203,17 +203,17 @@ webrtc::AudioReceiveStream::Stats AudioR
       static_cast<double>(rtc::kNumMillisecsPerSec);
   stats.expand_rate = Q14ToFloat(ns.currentExpandRate);
   stats.speech_expand_rate = Q14ToFloat(ns.currentSpeechExpandRate);
   stats.secondary_decoded_rate = Q14ToFloat(ns.currentSecondaryDecodedRate);
   stats.secondary_discarded_rate = Q14ToFloat(ns.currentSecondaryDiscardedRate);
   stats.accelerate_rate = Q14ToFloat(ns.currentAccelerateRate);
   stats.preemptive_expand_rate = Q14ToFloat(ns.currentPreemptiveRate);
 
-  auto ds = channel_proxy_->GetDecodingCallStatistics();
+  auto ds(channel_proxy_->GetDecodingCallStatistics());
   stats.decoding_calls_to_silence_generator = ds.calls_to_silence_generator;
   stats.decoding_calls_to_neteq = ds.calls_to_neteq;
   stats.decoding_normal = ds.decoded_normal;
   stats.decoding_plc = ds.decoded_plc;
   stats.decoding_cng = ds.decoded_cng;
   stats.decoding_plc_cng = ds.decoded_plc_cng;
   stats.decoding_muted_output = ds.decoded_muted_output;
 
--- a/media/webrtc/trunk/webrtc/call/call.cc
+++ b/media/webrtc/trunk/webrtc/call/call.cc
@@ -233,16 +233,25 @@ class Call : public webrtc::Call,
                         uint8_t fraction_loss,
                         int64_t rtt_ms,
                         int64_t probing_interval_ms) override;
 
   // Implements BitrateAllocator::LimitObserver.
   void OnAllocationLimitsChanged(uint32_t min_send_bitrate_bps,
                                  uint32_t max_padding_bitrate_bps) override;
 
+  VoiceEngine* voice_engine() override {
+    internal::AudioState* audio_state =
+        static_cast<internal::AudioState*>(config_.audio_state.get());
+    if (audio_state)
+      return audio_state->voice_engine();
+    else
+      return nullptr;
+  }
+
  private:
   DeliveryStatus DeliverRtcp(MediaType media_type, const uint8_t* packet,
                              size_t length);
   DeliveryStatus DeliverRtp(MediaType media_type,
                             const uint8_t* packet,
                             size_t length,
                             const PacketTime& packet_time);
   void ConfigureSync(const std::string& sync_group)
--- a/media/webrtc/trunk/webrtc/call/video_config.cc
+++ b/media/webrtc/trunk/webrtc/call/video_config.cc
@@ -18,29 +18,32 @@
 namespace webrtc {
 VideoStream::VideoStream()
     : width(0),
       height(0),
       max_framerate(-1),
       min_bitrate_bps(-1),
       target_bitrate_bps(-1),
       max_bitrate_bps(-1),
-      max_qp(-1) {}
+      max_qp(-1) {
+  rid[0] = '\0';
+}
 
 VideoStream::~VideoStream() = default;
 
 std::string VideoStream::ToString() const {
   std::stringstream ss;
   ss << "{width: " << width;
   ss << ", height: " << height;
   ss << ", max_framerate: " << max_framerate;
   ss << ", min_bitrate_bps:" << min_bitrate_bps;
   ss << ", target_bitrate_bps:" << target_bitrate_bps;
   ss << ", max_bitrate_bps:" << max_bitrate_bps;
   ss << ", max_qp: " << max_qp;
+  ss << ", rid: " << rid;
 
   ss << ", temporal_layer_thresholds_bps: [";
   for (size_t i = 0; i < temporal_layer_thresholds_bps.size(); ++i) {
     ss << temporal_layer_thresholds_bps[i];
     if (i != temporal_layer_thresholds_bps.size() - 1)
       ss << ", ";
   }
   ss << ']';
--- a/media/webrtc/trunk/webrtc/call/video_config.h
+++ b/media/webrtc/trunk/webrtc/call/video_config.h
@@ -33,16 +33,30 @@ struct VideoStream {
   int max_framerate;
 
   int min_bitrate_bps;
   int target_bitrate_bps;
   int max_bitrate_bps;
 
   int max_qp;
 
+  char rid[kRIDSize+1];
+
+  const std::string Rid() const {
+    return std::string(rid);
+  }
+
+  void SetRid(const std::string & aRid) {
+    static_assert(sizeof(rid) > kRIDSize,
+      "mRid must be large enought to hold a RID + null termination");
+    auto len = std::min((size_t)kRIDSize-1, aRid.length());
+    strncpy(&rid[0], aRid.c_str(), len);
+    rid[len] = 0;
+  }
+
   // Bitrate thresholds for enabling additional temporal layers. Since these are
   // thresholds in between layers, we have one additional layer. One threshold
   // gives two temporal layers, one below the threshold and one above, two give
   // three, and so on.
   // The VideoEncoder may redistribute bitrates over the temporal layers so a
   // bitrate threshold of 100k and an estimate of 105k does not imply that we
   // get 100k in one temporal layer and 5k in the other, just that the bitrate
   // in the first temporal layer should not exceed 100k.
--- a/media/webrtc/trunk/webrtc/call/video_receive_stream.h
+++ b/media/webrtc/trunk/webrtc/call/video_receive_stream.h
@@ -18,16 +18,17 @@
 
 #include "api/call/transport.h"
 #include "api/rtpparameters.h"
 #include "call/rtp_config.h"
 #include "common_types.h"  // NOLINT(build/include)
 #include "common_video/include/frame_callback.h"
 #include "media/base/videosinkinterface.h"
 #include "rtc_base/platform_file.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
 
 namespace webrtc {
 
 class RtpPacketSinkInterface;
 class VideoDecoder;
 
 class VideoReceiveStream {
  public:
@@ -155,19 +156,24 @@ class VideoReceiveStream {
       // estimators belonging to the ReceiveSideCongestionController
       // are configured. Decide if this setting should be deleted, and
       // if it needs to be replaced by a setting in PacketRouter to
       // disable REMB feedback.
 
       // See draft-alvestrand-rmcat-remb for information.
       bool remb = false;
 
+      bool tmmbr = false;
+
       // See draft-holmer-rmcat-transport-wide-cc-extensions for details.
       bool transport_cc = false;
 
+      // TODO(jesup) - there should be a kKeyFrameReqNone
+      KeyFrameRequestMethod keyframe_method = kKeyFrameReqPliRtcp;
+
       // See NackConfig for description.
       NackConfig nack;
 
       // Payload types for ULPFEC and RED, respectively.
       int ulpfec_payload_type = -1;
       int red_payload_type = -1;
 
       // SSRC for retransmissions.
@@ -224,16 +230,19 @@ class VideoReceiveStream {
   virtual void Start() = 0;
   // Stops stream activity.
   // When a stream is stopped, it can't receive, process or deliver packets.
   virtual void Stop() = 0;
 
   // TODO(pbos): Add info on currently-received codec to Stats.
   virtual Stats GetStats() const = 0;
 
+  //TODO: find replacement for this using call interface
+  //virtual void SetSyncChannel(VoiceEngine* voice_engine, int audio_channel_id) = 0;
+
   // Takes ownership of the file, is responsible for closing it later.
   // Calling this method will close and finalize any current log.
   // Giving rtc::kInvalidPlatformFileValue disables logging.
   // If a frame to be written would make the log too large the write fails and
   // the log is closed and finalized. A |byte_limit| of 0 means no limit.
   virtual void EnableEncodedFrameRecording(rtc::PlatformFile file,
                                            size_t byte_limit) = 0;
   inline void DisableEncodedFrameRecording() {
--- a/media/webrtc/trunk/webrtc/call/video_send_stream.h
+++ b/media/webrtc/trunk/webrtc/call/video_send_stream.h
@@ -188,16 +188,19 @@ class VideoSendStream {
         std::vector<uint32_t> ssrcs;
 
         // Payload type to use for the RTX stream.
         int payload_type = -1;
       } rtx;
 
       // RTCP CNAME, see RFC 3550.
       std::string c_name;
+
+      std::vector<std::string> rids;
+      std::string mid;
     } rtp;
 
     // Transport for outgoing packets.
     Transport* send_transport = nullptr;
 
     // Called for each I420 frame before encoding the frame. Can be used for
     // effects, snapshots etc. 'nullptr' disables the callback.
     rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback = nullptr;
@@ -257,16 +260,20 @@ class VideoSendStream {
     // Try to strike a "pleasing" balance between frame rate or resolution.
     kBalanced,
   };
 
   virtual void SetSource(
       rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
       const DegradationPreference& degradation_preference) = 0;
 
+  // Gets interface used to signal the current CPU work level to the encoder.
+  // Valid as long as the VideoSendStream is valid.
+  virtual CPULoadStateObserver* LoadStateObserver() = 0;
+
   // Set which streams to send. Must have at least as many SSRCs as configured
   // in the config. Encoder settings are passed on to the encoder instance along
   // with the VideoStream settings.
   virtual void ReconfigureVideoEncoder(VideoEncoderConfig config) = 0;
 
   virtual Stats GetStats() = 0;
 
   // Takes ownership of each file, is responsible for closing them later.
--- a/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler.cc
+++ b/media/webrtc/trunk/webrtc/common_audio/resampler/sinc_resampler.cc
@@ -113,18 +113,16 @@ double SincScaleFactor(double io_ratio) 
   // depending on kKernelSize.
   sinc_scale_factor *= 0.9;
 
   return sinc_scale_factor;
 }
 
 }  // namespace
 
-namespace webrtc {
-
 const size_t SincResampler::kKernelSize;
 
 // If we know the minimum architecture at compile time, avoid CPU detection.
 #if defined(WEBRTC_ARCH_X86_FAMILY)
 #if defined(__SSE2__)
 #define CONVOLVE_FUNC Convolve_SSE
 void SincResampler::InitializeCPUSpecificFeatures() {}
 #else
--- a/media/webrtc/trunk/webrtc/common_types.cc
+++ b/media/webrtc/trunk/webrtc/common_types.cc
@@ -50,17 +50,18 @@ RTPHeaderExtension::RTPHeaderExtension()
       transportSequenceNumber(0),
       hasAudioLevel(false),
       voiceActivity(false),
       audioLevel(0),
       hasVideoRotation(false),
       videoRotation(kVideoRotation_0),
       hasVideoContentType(false),
       videoContentType(VideoContentType::UNSPECIFIED),
-      has_video_timing(false) {}
+      has_video_timing(false),
+      csrcAudioLevels() {}
 
 RTPHeaderExtension::RTPHeaderExtension(const RTPHeaderExtension& other) =
     default;
 
 RTPHeaderExtension& RTPHeaderExtension::operator=(
     const RTPHeaderExtension& other) = default;
 
 RTPHeader::RTPHeader()
--- a/media/webrtc/trunk/webrtc/common_types.h
+++ b/media/webrtc/trunk/webrtc/common_types.h
@@ -6,16 +6,17 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef COMMON_TYPES_H_
 #define COMMON_TYPES_H_
 
+#include <atomic>
 #include <stddef.h>
 #include <string.h>
 #include <ostream>
 #include <string>
 #include <vector>
 
 #include "api/array_view.h"
 #include "api/optional.h"
@@ -361,24 +362,46 @@ struct AudioDecodingCallStats {
       : calls_to_silence_generator(0),
         calls_to_neteq(0),
         decoded_normal(0),
         decoded_plc(0),
         decoded_cng(0),
         decoded_plc_cng(0),
         decoded_muted_output(0) {}
 
-  int calls_to_silence_generator;  // Number of calls where silence generated,
-                                   // and NetEq was disengaged from decoding.
-  int calls_to_neteq;              // Number of calls to NetEq.
-  int decoded_normal;  // Number of calls where audio RTP packet decoded.
-  int decoded_plc;     // Number of calls resulted in PLC.
-  int decoded_cng;  // Number of calls where comfort noise generated due to DTX.
-  int decoded_plc_cng;  // Number of calls resulted where PLC faded to CNG.
-  int decoded_muted_output;  // Number of calls returning a muted state output.
+  AudioDecodingCallStats(const AudioDecodingCallStats& other)
+  {
+    calls_to_silence_generator = other.calls_to_silence_generator.load();
+    calls_to_neteq = other.calls_to_neteq.load();
+    decoded_normal = other.decoded_normal.load();
+    decoded_plc = other.decoded_plc.load();
+    decoded_cng = other.decoded_cng.load();
+    decoded_plc_cng = other.decoded_plc_cng.load();
+    decoded_muted_output = other.decoded_muted_output.load();
+  }
+
+  AudioDecodingCallStats& operator=(const AudioDecodingCallStats& other)
+  {
+    calls_to_silence_generator = other.calls_to_silence_generator.load();
+    calls_to_neteq = other.calls_to_neteq.load();
+    decoded_normal = other.decoded_normal.load();
+    decoded_plc = other.decoded_plc.load();
+    decoded_cng = other.decoded_cng.load();
+    decoded_plc_cng = other.decoded_plc_cng.load();
+    decoded_muted_output = other.decoded_muted_output.load();
+    return *this;
+  }
+
+  std::atomic<int> calls_to_silence_generator;  // Number of calls where silence generated,
+  std::atomic<int> calls_to_neteq;              // Number of calls to NetEq.
+  std::atomic<int> decoded_normal;  // Number of calls where audio RTP packet decoded.
+  std::atomic<int> decoded_plc;     // Number of calls resulted in PLC.
+  std::atomic<int> decoded_cng;  // Number of calls where comfort noise generated due to DTX.
+  std::atomic<int> decoded_plc_cng;  // Number of calls resulted where PLC faded to CNG.
+  std::atomic<int> decoded_muted_output;  // Number of calls returning a muted state output.
 };
 
 // ==================================================================
 // Video specific types
 // ==================================================================
 
 // TODO(nisse): Delete, and switch to fourcc values everywhere?
 // Supported video types.
@@ -401,16 +424,17 @@ enum class VideoType {
   kBGRA,
 };
 
 // Video codec
 enum { kPayloadNameSize = 32 };
 enum { kMaxSimulcastStreams = 4 };
 enum { kMaxSpatialLayers = 5 };
 enum { kMaxTemporalStreams = 4 };
+enum { kRIDSize = 32};
 
 enum VideoCodecComplexity {
   kComplexityNormal = 0,
   kComplexityHigh = 1,
   kComplexityHigher = 2,
   kComplexityMax = 3
 };
 
@@ -753,16 +777,27 @@ class StringRtpHeaderExtension {
 };
 
 // StreamId represents RtpStreamId which is a string.
 typedef StringRtpHeaderExtension StreamId;
 
 // Mid represents RtpMid which is a string.
 typedef StringRtpHeaderExtension Mid;
 
+// Audio level of CSRCs See:
+// https://tools.ietf.org/html/rfc6465
+struct CsrcAudioLevelList {
+  CsrcAudioLevelList() : numAudioLevels(0) { }
+  CsrcAudioLevelList(const CsrcAudioLevelList&) = default;
+  CsrcAudioLevelList& operator=(const CsrcAudioLevelList&) = default;
+  uint8_t numAudioLevels;
+  // arrOfAudioLevels has the same ordering as RTPHeader.arrOfCSRCs
+  uint8_t arrOfAudioLevels[kRtpCsrcSize];
+};
+
 struct RTPHeaderExtension {
   RTPHeaderExtension();
   RTPHeaderExtension(const RTPHeaderExtension& other);
   RTPHeaderExtension& operator=(const RTPHeaderExtension& other);
 
   bool hasTransmissionTimeOffset;
   int32_t transmissionTimeOffset;
   bool hasAbsoluteSendTime;
@@ -796,16 +831,17 @@ struct RTPHeaderExtension {
   // https://tools.ietf.org/html/draft-ietf-avtext-rid-09
   // TODO(danilchap): Update url from draft to release version.
   StreamId stream_id;
   StreamId repaired_stream_id;
 
   // For identifying the media section used to interpret this RTP packet. See
   // https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38
   Mid mid;
+  CsrcAudioLevelList csrcAudioLevels;
 };
 
 struct RTPHeader {
   RTPHeader();
   RTPHeader(const RTPHeader& other);
   RTPHeader& operator=(const RTPHeader& other);
 
   bool markerBit;
--- a/media/webrtc/trunk/webrtc/common_video/libyuv/webrtc_libyuv.cc
+++ b/media/webrtc/trunk/webrtc/common_video/libyuv/webrtc_libyuv.cc
@@ -4,30 +4,33 @@
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "libyuv/planar_functions.h"
 
 #include <string.h>
+#include <limits>
 
 #include "rtc_base/checks.h"
 #include "api/video/i420_buffer.h"
 
 // NOTE(ajm): Path provided by gn.
 #include "libyuv.h"  // NOLINT
 
 namespace webrtc {
 
 size_t CalcBufferSize(VideoType type, int width, int height) {
   RTC_DCHECK_GE(width, 0);
   RTC_DCHECK_GE(height, 0);
+
   size_t buffer_size = 0;
   switch (type) {
     case VideoType::kI420:
     case VideoType::kNV12:
     case VideoType::kNV21:
     case VideoType::kIYUV:
     case VideoType::kYV12: {
       int half_width = (width + 1) >> 1;
--- a/media/webrtc/trunk/webrtc/media/base/videoadapter.cc
+++ b/media/webrtc/trunk/webrtc/media/base/videoadapter.cc
@@ -163,16 +163,26 @@ bool VideoAdapter::AdaptFrameResolution(
                                         int* out_width,
                                         int* out_height) {
   rtc::CritScope cs(&critical_section_);
   ++frames_in_;
 
   // The max output pixel count is the minimum of the requests from
   // OnOutputFormatRequest and OnResolutionRequest.
   int max_pixel_count = resolution_request_max_pixel_count_;
+  if (scale_) {
+    // We calculate the scaled pixel count from the in_width and in_height,
+    // which is the input resolution. We then take the minimum of the scaled
+    // resolution and the current max_pixel_count. This will allow the
+    // quality scaler to reduce the resolution in response to load, but we
+    // will never go above the requested scaled resolution.
+    int scaled_pixel_count = (in_width*in_height/scale_resolution_by_)/scale_resolution_by_;
+    max_pixel_count = std::min(max_pixel_count, scaled_pixel_count);
+  }
+
   if (requested_format_) {
     max_pixel_count = std::min(
         max_pixel_count, requested_format_->width * requested_format_->height);
   }
   int target_pixel_count =
       std::min(resolution_request_target_pixel_count_, max_pixel_count);
 
   // Drop the input frame if necessary.
@@ -231,18 +241,18 @@ bool VideoAdapter::AdaptFrameResolution(
   *out_height = *cropped_height / scale.denominator * scale.numerator;
   RTC_DCHECK_EQ(0, *out_width % required_resolution_alignment_);
   RTC_DCHECK_EQ(0, *out_height % required_resolution_alignment_);
 
   ++frames_out_;
   if (scale.numerator != scale.denominator)
     ++frames_scaled_;
 
-  if (previous_width_ && (previous_width_ != *out_width ||
-                          previous_height_ != *out_height)) {
+  if ((previous_width_ || scale_) && (previous_width_ != *out_width ||
+                                      previous_height_ != *out_height)) {
     ++adaption_changes_;
     RTC_LOG(LS_INFO) << "Frame size changed: scaled " << frames_scaled_
                      << " / out " << frames_out_ << " / in " << frames_in_
                      << " Changes: " << adaption_changes_
                      << " Input: " << in_width << "x" << in_height
                      << " Scale: " << scale.numerator << "/"
                      << scale.denominator << " Output: " << *out_width << "x"
                      << *out_height << " i"
@@ -267,9 +277,17 @@ void VideoAdapter::OnResolutionFramerate
     int max_framerate_fps) {
   rtc::CritScope cs(&critical_section_);
   resolution_request_max_pixel_count_ = max_pixel_count;
   resolution_request_target_pixel_count_ =
       target_pixel_count.value_or(resolution_request_max_pixel_count_);
   max_framerate_request_ = max_framerate_fps;
 }
 
+void VideoAdapter::OnScaleResolutionBy(
+    rtc::Optional<float> scale_resolution_by) {
+  rtc::CritScope cs(&critical_section_);
+  scale_resolution_by_ = scale_resolution_by.value_or(1.0);
+  RTC_DCHECK_GE(scale_resolution_by_, 1.0);
+  scale_ = static_cast<bool>(scale_resolution_by);
+}
+
 }  // namespace cricket
--- a/media/webrtc/trunk/webrtc/media/base/videoadapter.h
+++ b/media/webrtc/trunk/webrtc/media/base/videoadapter.h
@@ -57,16 +57,20 @@ class VideoAdapter {
   // framerate rather than resolution.
   // Set |max_pixel_count| and/or |max_framerate_fps| to
   // std::numeric_limit<int>::max() if no upper limit is desired.
   void OnResolutionFramerateRequest(
       const rtc::Optional<int>& target_pixel_count,
       int max_pixel_count,
       int max_framerate_fps);
 
+  // Requests the output frame size from |AdaptFrameResolution| be scaled
+  // down from the input by a factor of scale_resolution_by (min 1.0)
+  virtual void OnScaleResolutionBy(rtc::Optional<float> scale_resolution_by);
+
  private:
   // Determine if frame should be dropped based on input fps and requested fps.
   bool KeepFrame(int64_t in_timestamp_ns);
 
   int frames_in_;         // Number of input frames.
   int frames_out_;        // Number of output frames.
   int frames_scaled_;     // Number of frames scaled.
   int adaption_changes_;  // Number of changes in scale factor.
@@ -81,16 +85,18 @@ class VideoAdapter {
   // Max number of pixels requested via calls to OnOutputFormatRequest,
   // OnResolutionRequest respectively.
   // The adapted output format is the minimum of these.
   rtc::Optional<VideoFormat> requested_format_
       RTC_GUARDED_BY(critical_section_);
   int resolution_request_target_pixel_count_ RTC_GUARDED_BY(critical_section_);
   int resolution_request_max_pixel_count_ RTC_GUARDED_BY(critical_section_);
   int max_framerate_request_ RTC_GUARDED_BY(critical_section_);
+  float scale_resolution_by_ RTC_GUARDED_BY(critical_section_);
+  bool scale_ RTC_GUARDED_BY(critical_section_);
 
   // The critical section to protect the above variables.
   rtc::CriticalSection critical_section_;
 
   RTC_DISALLOW_COPY_AND_ASSIGN(VideoAdapter);
 };
 
 }  // namespace cricket
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/acm2/acm_receiver.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/acm2/acm_receiver.cc
@@ -97,16 +97,17 @@ int AcmReceiver::InsertPacket(const WebR
       if (last_audio_decoder_ && last_audio_decoder_->channels > 1) {
         // This is a CNG and the audio codec is not mono, so skip pushing in
         // packets into NetEq.
         return 0;
       }
     } else {
       last_audio_decoder_ = ci;
       last_audio_format_ = neteq_->GetDecoderFormat(ci->pltype);
+      last_audio_format_clockrate_hz_ = last_audio_format_->clockrate_hz;
       RTC_DCHECK(last_audio_format_);
       last_packet_sample_rate_hz_ = ci->plfreq;
     }
   }  // |crit_sect_| is released.
 
   if (neteq_->InsertPacket(rtp_header.header, incoming_payload,
                            receive_timestamp) < 0) {
     RTC_LOG(LERROR) << "AcmReceiver::InsertPacket "
@@ -116,18 +117,16 @@ int AcmReceiver::InsertPacket(const WebR
   }
   return 0;
 }
 
 int AcmReceiver::GetAudio(int desired_freq_hz,
                           AudioFrame* audio_frame,
                           bool* muted) {
   RTC_DCHECK(muted);
-  // Accessing members, take the lock.
-  rtc::CritScope lock(&crit_sect_);
 
   if (neteq_->GetAudio(audio_frame, muted) != NetEq::kOK) {
     RTC_LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed.";
     return -1;
   }
 
   const int current_sample_rate_hz = neteq_->last_output_sample_rate_hz();
 
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/acm2/acm_receiver.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/acm2/acm_receiver.h
@@ -10,16 +10,17 @@
 
 #ifndef MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
 #define MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
 
 #include <map>
 #include <memory>
 #include <string>
 #include <vector>
+#include <atomic>
 
 #include "api/array_view.h"
 #include "api/optional.h"
 #include "common_audio/vad/include/webrtc_vad.h"
 #include "modules/audio_coding/acm2/acm_resampler.h"
 #include "modules/audio_coding/acm2/call_statistics.h"
 #include "modules/audio_coding/include/audio_coding_module.h"
 #include "modules/audio_coding/neteq/include/neteq.h"
@@ -279,21 +280,25 @@ class AcmReceiver {
       const RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
 
   uint32_t NowInTimestamp(int decoder_sampling_rate) const;
 
   rtc::CriticalSection crit_sect_;
   rtc::Optional<CodecInst> last_audio_decoder_ RTC_GUARDED_BY(crit_sect_);
   rtc::Optional<SdpAudioFormat> last_audio_format_ RTC_GUARDED_BY(crit_sect_);
   ACMResampler resampler_ RTC_GUARDED_BY(crit_sect_);
-  std::unique_ptr<int16_t[]> last_audio_buffer_ RTC_GUARDED_BY(crit_sect_);
+
+  // After construction, this is only ever touched on the thread that calls
+  // AcmReceiver::GetAudio, and only modified in this method.
+  std::unique_ptr<int16_t[]> last_audio_buffer_;
   CallStatistics call_stats_ RTC_GUARDED_BY(crit_sect_);
   const std::unique_ptr<NetEq> neteq_;  // NetEq is thread-safe; no lock needed.
   const Clock* const clock_;
-  bool resampled_last_output_frame_ RTC_GUARDED_BY(crit_sect_);
+  std::atomic<bool> resampled_last_output_frame_;
   rtc::Optional<int> last_packet_sample_rate_hz_ RTC_GUARDED_BY(crit_sect_);
+  std::atomic<int> last_audio_format_clockrate_hz_;
 };
 
 }  // namespace acm2
 
 }  // namespace webrtc
 
 #endif  // MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
@@ -1063,16 +1063,20 @@ int AudioCodingModuleImpl::ReceiveCodec(
   return receiver_.LastAudioCodec(current_codec);
 }
 
 rtc::Optional<SdpAudioFormat> AudioCodingModuleImpl::ReceiveFormat() const {
   rtc::CritScope lock(&acm_crit_sect_);
   return receiver_.LastAudioFormat();
 }
 
+int AudioCodingModuleImpl::ReceiveSampleRate() const {
+  return receiver_.LastAudioSampleRate();
+}
+
 // Incoming packet from network parsed and ready for decode.
 int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload,
                                           const size_t payload_length,
                                           const WebRtcRTPHeader& rtp_header) {
   RTC_DCHECK_EQ(payload_length == 0, incoming_payload == nullptr);
   return receiver_.InsertPacket(
       rtp_header,
       rtc::ArrayView<const uint8_t>(incoming_payload, payload_length));
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
@@ -31,64 +31,72 @@
 #include "modules/audio_coding/neteq/dtmf_tone_generator.h"
 
 #include "rtc_base/arraysize.h"
 #include "rtc_base/checks.h"
 
 namespace webrtc {
 
 // The filter coefficient a = 2*cos(2*pi*f/fs) for the low frequency tone, for
-// sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0 through 15.
+// sample rates fs = {8000, 16000, 32000, 44100, 48000} Hz, and events 0 through 15.
 // Values are in Q14.
-const int DtmfToneGenerator::kCoeff1[4][16] = {
+const int DtmfToneGenerator::kCoeff1[NumDtmfSampleRates][16] = {
     { 24219, 27980, 27980, 27980, 26956, 26956, 26956, 25701, 25701, 25701,
       24219, 24219, 27980, 26956, 25701, 24219 },
     { 30556, 31548, 31548, 31548, 31281, 31281, 31281, 30951, 30951, 30951,
       30556, 30556, 31548, 31281, 30951, 30556 },
     { 32210, 32462, 32462, 32462, 32394, 32394, 32394, 32311, 32311, 32311,
       32210, 32210, 32462, 32394, 32311, 32210 },
+    { 32474, 32607, 32607, 32607, 32571, 32571, 32571, 32527, 32527, 32527,
+      32474, 32474, 32607, 32571, 32527, 32474 },
     { 32520, 32632, 32632, 32632, 32602, 32602, 32602, 32564, 32564, 32564,
       32520, 32520, 32632, 32602, 32564, 32520 } };
 
 // The filter coefficient a = 2*cos(2*pi*f/fs) for the high frequency tone, for
-// sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0 through 15.
+// sample rates fs = {8000, 16000, 32000, 44100, 48000} Hz, and events 0 through 15.
 // Values are in Q14.
-const int DtmfToneGenerator::kCoeff2[4][16] = {
+const int DtmfToneGenerator::kCoeff2[NumDtmfSampleRates][16] = {
     { 16325, 19073, 16325, 13085, 19073, 16325, 13085, 19073, 16325, 13085,
       19073, 13085, 9315, 9315, 9315, 9315},
     { 28361, 29144, 28361, 27409, 29144, 28361, 27409, 29144, 28361, 27409,
       29144, 27409, 26258, 26258, 26258, 26258},
     { 31647, 31849, 31647, 31400, 31849, 31647, 31400, 31849, 31647, 31400,
       31849, 31400, 31098, 31098, 31098, 31098},
+    { 32176, 32283, 32176, 32045, 32283, 32176, 32045, 32283, 32176, 32045,
+      32283, 32045, 31885, 31885, 31885, 31885},
     { 32268, 32359, 32268, 32157, 32359, 32268, 32157, 32359, 32268, 32157,
       32359, 32157, 32022, 32022, 32022, 32022} };
 
 // The initialization value x[-2] = sin(2*pi*f/fs) for the low frequency tone,
-// for sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0-15.
+// for sample rates fs = {8000, 16000, 32000, 44100, 48000} Hz, and events 0-15.
 // Values are in Q14.
-const int DtmfToneGenerator::kInitValue1[4][16] = {
+const int DtmfToneGenerator::kInitValue1[NumDtmfSampleRates][16] = {
     { 11036, 8528, 8528, 8528, 9315, 9315, 9315, 10163, 10163, 10163, 11036,
       11036, 8528, 9315, 10163, 11036},
     { 5918, 4429, 4429, 4429, 4879, 4879, 4879, 5380, 5380, 5380, 5918, 5918,
       4429, 4879, 5380, 5918},
     { 3010, 2235, 2235, 2235, 2468, 2468, 2468, 2728, 2728, 2728, 3010, 3010,
       2235, 2468, 2728, 3010},
+    { 2190, 1624, 1624, 1624, 1794, 1794, 1794, 1984, 1984, 1984, 2190, 2190,
+      1624, 1794, 1984, 2190},
     { 2013, 1493, 1493, 1493, 1649, 1649, 1649, 1823, 1823, 1823, 2013, 2013,
       1493, 1649, 1823, 2013 } };
 
 // The initialization value x[-2] = sin(2*pi*f/fs) for the high frequency tone,
-// for sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0-15.
+// for sample rates fs = {8000, 16000, 32000, 44100, 48000} Hz, and events 0-15.
 // Values are in Q14.
-const int DtmfToneGenerator::kInitValue2[4][16] = {
+const int DtmfToneGenerator::kInitValue2[NumDtmfSampleRates][16] = {
     { 14206, 13323, 14206, 15021, 13323, 14206, 15021, 13323, 14206, 15021,
       13323, 15021, 15708, 15708, 15708, 15708},
     { 8207, 7490, 8207, 8979, 7490, 8207, 8979, 7490, 8207, 8979, 7490, 8979,
       9801, 9801, 9801, 9801},
     { 4249, 3853, 4249, 4685, 3853, 4249, 4685, 3853, 4249, 4685, 3853, 4685,
       5164, 5164, 5164, 5164},
+    { 3100, 2808, 3100, 3422, 3778, 2808, 3100, 3422, 3778, 2808, 3100, 3422,
+      3778, 3778, 3778, 3778},
     { 2851, 2582, 2851, 3148, 2582, 2851, 3148, 2582, 2851, 3148, 2582, 3148,
       3476, 3476, 3476, 3476} };
 
 // Amplitude multipliers for volume values 0 through 63, corresponding to
 // 0 dBm0 through -63 dBm0. Values are in Q14.
 // for a in range(0, 64):
 //   print round(16141.0 * 10**(-float(a)/20))
 const int DtmfToneGenerator::kAmplitude[64] = {
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
@@ -12,36 +12,46 @@
 #define MODULES_AUDIO_CODING_NETEQ_DTMF_TONE_GENERATOR_H_
 
 #include "modules/audio_coding/neteq/audio_multi_vector.h"
 #include "rtc_base/constructormagic.h"
 #include "typedefs.h"  // NOLINT(build/include)
 
 namespace webrtc {
 
+const int NumDtmfSampleRates = 5;
+const int DtmfSampleRates[NumDtmfSampleRates] = {
+  8000,
+  16000,
+  32000,
+  44100,
+  48000
+};
+
 // This class provides a generator for DTMF tones.
 class DtmfToneGenerator {
  public:
   enum ReturnCodes {
     kNotInitialized = -1,
     kParameterError = -2,
   };
 
   DtmfToneGenerator();
   virtual ~DtmfToneGenerator() {}
   virtual int Init(int fs, int event, int attenuation);
   virtual void Reset();
   virtual int Generate(size_t num_samples, AudioMultiVector* output);
   virtual bool initialized() const;
 
  private:
-  static const int kCoeff1[4][16];  // 1st oscillator model coefficient table.
-  static const int kCoeff2[4][16];  // 2nd oscillator model coefficient table.
-  static const int kInitValue1[4][16];  // Initialization for 1st oscillator.
-  static const int kInitValue2[4][16];  // Initialization for 2nd oscillator.
+  static const int kCoeff1[NumDtmfSampleRates][16];  // 1st oscillator model coefficient table.
+  static const int kCoeff2[NumDtmfSampleRates][16];  // 2nd oscillator model coefficient table.
+  static const int kInitValue1[NumDtmfSampleRates][16];  // Initialization for 1st oscillator.
+  static const int kInitValue2[NumDtmfSampleRates][16];  // Initialization for 2nd oscillator.
+
   static const int kAmplitude[64];  // Amplitude for 0 through -63 dBm0.
   static const int16_t kAmpMultiplier = 23171;  // 3 dB attenuation (in Q15).
 
   bool initialized_;            // True if generator is initialized properly.
   int coeff1_;                  // 1st oscillator coefficient for this event.
   int coeff2_;                  // 2nd oscillator coefficient for this event.
   int amplitude_;               // Amplitude for this event.
   int16_t sample_history1_[2];  // Last 2 samples for the 1st oscillator.
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/merge.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/merge.cc
@@ -209,16 +209,22 @@ size_t Merge::GetExpandedSignal(size_t* 
   return required_length;
 }
 
 int16_t Merge::SignalScaling(const int16_t* input, size_t input_length,
                              const int16_t* expanded_signal) const {
   // Adjust muting factor if new vector is more or less of the BGN energy.
   const auto mod_input_length = rtc::SafeMin<size_t>(
       64 * rtc::dchecked_cast<size_t>(fs_mult_), input_length);
+
+  // Missing input, do no muting
+  if (mod_input_length == 0) {
+    return 16384;
+  }
+
   const int16_t expanded_max =
       WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
   int32_t factor = (expanded_max * expanded_max) /
       (std::numeric_limits<int32_t>::max() /
           static_cast<int32_t>(mod_input_length));
   const int expanded_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
   int32_t energy_expanded = WebRtcSpl_DotProductWithScale(expanded_signal,
                                                           expanded_signal,
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -424,17 +424,16 @@ rtc::Optional<uint32_t> NetEqImpl::GetPl
     // RTP packet. Also, the RTP timestamp is not accurate while playing CNG,
     // which is indicated by returning an empty value.
     return rtc::nullopt;
   }
   return timestamp_scaler_->ToExternal(playout_timestamp_);
 }
 
 int NetEqImpl::last_output_sample_rate_hz() const {
-  rtc::CritScope lock(&crit_sect_);
   return last_output_sample_rate_hz_;
 }
 
 rtc::Optional<CodecInst> NetEqImpl::GetDecoder(int payload_type) const {
   rtc::CritScope lock(&crit_sect_);
   const DecoderDatabase::DecoderInfo* di =
       decoder_database_->GetDecoderInfo(payload_type);
   if (!di) {
--- a/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_impl.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_coding/neteq/neteq_impl.h
@@ -407,17 +407,17 @@ class NetEqImpl : public webrtc::NetEq {
   std::unique_ptr<PreemptiveExpand> preemptive_expand_
       RTC_GUARDED_BY(crit_sect_);
   RandomVector random_vector_ RTC_GUARDED_BY(crit_sect_);
   std::unique_ptr<ComfortNoise> comfort_noise_ RTC_GUARDED_BY(crit_sect_);
   Rtcp rtcp_ RTC_GUARDED_BY(crit_sect_);
   StatisticsCalculator stats_ RTC_GUARDED_BY(crit_sect_);
   int fs_hz_ RTC_GUARDED_BY(crit_sect_);
   int fs_mult_ RTC_GUARDED_BY(crit_sect_);
-  int last_output_sample_rate_hz_ RTC_GUARDED_BY(crit_sect_);
+  std::atomic<int> last_output_sample_rate_hz_;
   size_t output_size_samples_ RTC_GUARDED_BY(crit_sect_);
   size_t decoder_frame_length_ RTC_GUARDED_BY(crit_sect_);
   Modes last_mode_ RTC_GUARDED_BY(crit_sect_);
   Operations last_operation_ RTC_GUARDED_BY(crit_sect_);
   std::unique_ptr<int16_t[]> mute_factor_array_ RTC_GUARDED_BY(crit_sect_);
   size_t decoded_buffer_length_ RTC_GUARDED_BY(crit_sect_);
   std::unique_ptr<int16_t[]> decoded_buffer_ RTC_GUARDED_BY(crit_sect_);
   uint32_t playout_timestamp_ RTC_GUARDED_BY(crit_sect_);
--- a/media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.h
@@ -6,18 +6,18 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
 #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
 
-#include "webrtc/system_wrappers/include/atomic32.h"
-#include "webrtc/typedefs.h"
+#include "system_wrappers/include/atomic32.h"
+#include "typedefs.h"
 
 namespace webrtc {
 
 // Implements a lock-free FIFO losely based on
 // http://src.chromium.org/viewvc/chrome/trunk/src/media/base/audio_fifo.cc
 // Note that this class assumes there is one producer (writer) and one
 // consumer (reader) thread.
 class SingleRwFifo {
--- a/media/webrtc/trunk/webrtc/modules/audio_processing/logging/apm_data_dumper.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_processing/logging/apm_data_dumper.h
@@ -28,17 +28,17 @@
 #endif
 
 namespace webrtc {
 
 #if WEBRTC_APM_DEBUG_DUMP == 1
 // Functor used to use as a custom deleter in the map of file pointers to raw
 // files.
 struct RawFileCloseFunctor {
-  void operator()(FILE* f) const { fclose(f); }
+  void operator()(FILE* f) const { if (f) fclose(f); }
 };
 #endif
 
 // Class that handles dumping of variables into files.
 class ApmDataDumper {
  public:
   // Constructor that takes an instance index that may
   // be used to distinguish data dumped from different
@@ -47,52 +47,69 @@ class ApmDataDumper {
 
   ~ApmDataDumper();
 
   // Reinitializes the data dumping such that new versions
   // of all files being dumped to are created.
   void InitiateNewSetOfRecordings() {
 #if WEBRTC_APM_DEBUG_DUMP == 1
     ++recording_set_index_;
+    debug_written_ = 0;
 #endif
   }
 
   // Methods for performing dumping of data of various types into
   // various formats.
   void DumpRaw(const char* name, double v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    fwrite(&v, sizeof(v), 1, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        fwrite(&v, sizeof(v), 1, file);
+      }
+    }
 #endif
   }
 
   void DumpRaw(const char* name, size_t v_length, const double* v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    fwrite(v, sizeof(v[0]), v_length, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        fwrite(v, sizeof(v[0]), v_length, file);
+      }
+    }
 #endif
   }
 
   void DumpRaw(const char* name, rtc::ArrayView<const double> v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
     DumpRaw(name, v.size(), v.data());
 #endif
   }
 
   void DumpRaw(const char* name, float v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    fwrite(&v, sizeof(v), 1, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        fwrite(&v, sizeof(v), 1, file);
+      }
+    }
 #endif
   }
 
   void DumpRaw(const char* name, size_t v_length, const float* v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    fwrite(v, sizeof(v[0]), v_length, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        fwrite(v, sizeof(v[0]), v_length, file);
+      }
+    }
 #endif
   }
 
   void DumpRaw(const char* name, rtc::ArrayView<const float> v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
     DumpRaw(name, v.size(), v.data());
 #endif
   }
@@ -100,92 +117,128 @@ class ApmDataDumper {
   void DumpRaw(const char* name, bool v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
     DumpRaw(name, static_cast<int16_t>(v));
 #endif
   }
 
   void DumpRaw(const char* name, size_t v_length, const bool* v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    for (size_t k = 0; k < v_length; ++k) {
-      int16_t value = static_cast<int16_t>(v[k]);
-      fwrite(&value, sizeof(value), 1, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        for (int k = 0; k < v_length; ++k) {
+          int16_t value = static_cast<int16_t>(v[k]);
+          fwrite(&value, sizeof(value), 1, file);
+        }
+      }
     }
 #endif
   }
 
   void DumpRaw(const char* name, rtc::ArrayView<const bool> v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
     DumpRaw(name, v.size(), v.data());
 #endif
   }
 
   void DumpRaw(const char* name, int16_t v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    fwrite(&v, sizeof(v), 1, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        fwrite(&v, sizeof(v), 1, file);
+      }
+    }
 #endif
   }
 
   void DumpRaw(const char* name, size_t v_length, const int16_t* v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    fwrite(v, sizeof(v[0]), v_length, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        fwrite(v, sizeof(v[0]), v_length, file);
+      }
+    }
 #endif
   }
 
   void DumpRaw(const char* name, rtc::ArrayView<const int16_t> v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
     DumpRaw(name, v.size(), v.data());
 #endif
   }
 
   void DumpRaw(const char* name, int32_t v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    fwrite(&v, sizeof(v), 1, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        fwrite(&v, sizeof(v), 1, file);
+      }
+    }
 #endif
   }
 
   void DumpRaw(const char* name, size_t v_length, const int32_t* v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    fwrite(v, sizeof(v[0]), v_length, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        fwrite(v, sizeof(v[0]), v_length, file);
+      }
+    }
 #endif
   }
 
   void DumpRaw(const char* name, size_t v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    fwrite(&v, sizeof(v), 1, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        fwrite(&v, sizeof(v), 1, file);
+      }
+    }
 #endif
   }
 
   void DumpRaw(const char* name, size_t v_length, const size_t* v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    FILE* file = GetRawFile(name);
-    fwrite(v, sizeof(v[0]), v_length, file);
+    if (webrtc::Trace::aec_debug()) {
+      FILE* file = GetRawFile(name);
+      if (file) {
+        fwrite(v, sizeof(v[0]), v_length, file);
+      }
+    }
 #endif
   }
 
   void DumpRaw(const char* name, rtc::ArrayView<const int32_t> v) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
     DumpRaw(name, v.size(), v.data());
 #endif
   }
 
   void DumpWav(const char* name,
                size_t v_length,
                const float* v,
                int sample_rate_hz,
                int num_channels) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
-    WavWriter* file = GetWavFile(name, sample_rate_hz, num_channels);
-    file->WriteSamples(v, v_length);
+    if (webrtc::Trace::aec_debug()) {
+      WavWriter* file = GetWavFile(name, sample_rate_hz, num_channels);
+      file->WriteSamples(v, v_length);
+      // Cheat and use aec_near as a stand-in for "size of the largest file"
+      // in the dump.  We're looking to limit the total time, and that's a
+      // reasonable stand-in.
+      if (strcmp(name, "aec_near") == 0) {
+        updateDebugWritten(v_length * sizeof(float));
+      }
+    }
 #endif
   }
 
   void DumpWav(const char* name,
                rtc::ArrayView<const float> v,
                int sample_rate_hz,
                int num_channels) {
 #if WEBRTC_APM_DEBUG_DUMP == 1
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.cc
@@ -3,18 +3,18 @@
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */
 
-#include "webrtc/modules/desktop_capture/app_capturer.h"
-#include "webrtc/modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/app_capturer.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
 
 namespace webrtc {
 
 // static
 AppCapturer* AppCapturer::Create() {
   return Create(DesktopCaptureOptions::CreateDefault());
 }
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer.h
@@ -9,19 +9,19 @@
 */
 
 #ifndef WEBRTC_MODULES_DESKTOP_CAPTURE_APP_CAPTURER_H_
 #define WEBRTC_MODULES_DESKTOP_CAPTURE_APP_CAPTURER_H_
 
 #include <vector>
 #include <string>
 
-#include "webrtc/modules/desktop_capture/desktop_capture_types.h"
-#include "webrtc/modules/desktop_capture/desktop_capturer.h"
-#include "webrtc/typedefs.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "typedefs.h"
 
 namespace webrtc {
 
 class DesktopCaptureOptions;
 
 class AppCapturer : public DesktopCapturer {
 public:
     typedef webrtc::ProcessId ProcessId;
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_win.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_win.cc
@@ -3,28 +3,27 @@
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */
 
-#include "webrtc/modules/desktop_capture/app_capturer.h"
-#include "webrtc/modules/desktop_capture/shared_desktop_frame.h"
-#include "webrtc/modules/desktop_capture/win/win_shared.h"
+#include "modules/desktop_capture/app_capturer.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "modules/desktop_capture/win/win_shared.h"
 
 #include <windows.h>
 #include <vector>
 #include <cassert>
 
-#include "webrtc/modules/desktop_capture/desktop_capturer.h"
-#include "webrtc/modules/desktop_capture/desktop_capture_options.h"
-#include "webrtc/modules/desktop_capture/desktop_frame_win.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "modules/desktop_capture/desktop_capturer.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_frame_win.h"
 
 namespace webrtc {
 
 namespace {
 
 // Proxy over the WebRTC window capturer, to allow post-processing
 // of the frame to merge multiple window capture frames into a single frame
 class WindowsCapturerProxy : DesktopCapturer::Callback {
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_x11.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/app_capturer_x11.cc
@@ -2,36 +2,36 @@
 *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */
-#include "webrtc/modules/desktop_capture/app_capturer.h"
-#include "webrtc/modules/desktop_capture/shared_desktop_frame.h"
-#include "webrtc/modules/desktop_capture/x11/shared_x_util.h"
+#include "modules/desktop_capture/app_capturer.h"
+#include "modules/desktop_capture/shared_desktop_frame.h"
+#include "modules/desktop_capture/x11/shared_x_util.h"
 
 #include <assert.h>
 #include <string.h>
 #include <X11/Xatom.h>
 #include <X11/extensions/Xcomposite.h>
 #include <X11/extensions/Xrender.h>
 #include <X11/Xutil.h>
 #include <X11/Xregion.h>
 
 #include <algorithm>
 
-#include "webrtc/modules/desktop_capture/desktop_capture_options.h"
-#include "webrtc/modules/desktop_capture/desktop_frame.h"
-#include "webrtc/modules/desktop_capture/x11/shared_x_display.h"
-#include "webrtc/modules/desktop_capture/x11/x_error_trap.h"
-#include "webrtc/modules/desktop_capture/x11/x_server_pixel_buffer.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/x11/shared_x_display.h"
+#include "modules/desktop_capture/x11/x_error_trap.h"
+#include "modules/desktop_capture/x11/x_server_pixel_buffer.h"
+#include "rtc_base/logging.h"
 
 namespace webrtc {
 
 namespace {
 
 class ScreenCapturerProxy : DesktopCapturer::Callback {
 public:
   ScreenCapturerProxy()
@@ -215,17 +215,17 @@ bool AppCapturerLinux::UpdateRegions() {
 
     ::Window root_window = XRootWindow(GetDisplay(), screen);
     ::Window parent;
     ::Window root_return;
     ::Window *children;
     unsigned int num_children;
     int status = XQueryTree(GetDisplay(), root_window, &root_return, &parent, &children, &num_children);
     if (status == 0) {
-      LOG(LS_ERROR) << "Failed to query for child windows for screen " << screen;
+      RTC_LOG(LS_ERROR) << "Failed to query for child windows for screen " << screen;
       continue;
     }
     for (unsigned int i = 0; i < num_children; ++i) {
       ::Window app_window = window_util_x11.GetApplicationWindow(children[i]);
       if (!app_window) {
         continue;
       }
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.cc
@@ -31,16 +31,22 @@ BlankDetectorDesktopCapturerWrapper::~Bl
     default;
 
 void BlankDetectorDesktopCapturerWrapper::Start(
     DesktopCapturer::Callback* callback) {
   capturer_->Start(this);
   callback_ = callback;
 }
 
+void BlankDetectorDesktopCapturerWrapper::Stop()
+{
+  capturer_->Stop();
+  callback_ = nullptr;
+}
+
 void BlankDetectorDesktopCapturerWrapper::SetSharedMemoryFactory(
     std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
   capturer_->SetSharedMemoryFactory(std::move(shared_memory_factory));
 }
 
 void BlankDetectorDesktopCapturerWrapper::CaptureFrame() {
   RTC_DCHECK(callback_);
   capturer_->CaptureFrame();
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/blank_detector_desktop_capturer_wrapper.h
@@ -31,16 +31,17 @@ class BlankDetectorDesktopCapturerWrappe
   // takes ownership of |capturer|. The |blank_pixel| is the unmodified color
   // returned by the |capturer|.
   BlankDetectorDesktopCapturerWrapper(std::unique_ptr<DesktopCapturer> capturer,
                                       RgbaColor blank_pixel);
   ~BlankDetectorDesktopCapturerWrapper() override;
 
   // DesktopCapturer interface.
   void Start(DesktopCapturer::Callback* callback) override;
+  void Stop() override;
   void SetSharedMemoryFactory(
       std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
   void CaptureFrame() override;
   void SetExcludedWindow(WindowId window) override;
   bool GetSourceList(SourceList* sources) override;
   bool SelectSource(SourceId id) override;
   bool FocusOnSelectedSource() override;
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_and_cursor_composer.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_and_cursor_composer.cc
@@ -154,16 +154,23 @@ DesktopAndCursorComposer::~DesktopAndCur
 
 void DesktopAndCursorComposer::Start(DesktopCapturer::Callback* callback) {
   callback_ = callback;
   if (mouse_monitor_)
     mouse_monitor_->Init(this, MouseCursorMonitor::SHAPE_AND_POSITION);
   desktop_capturer_->Start(this);
 }
 
+void DesktopAndCursorComposer::Stop() {
+  desktop_capturer_->Stop();
+  if (mouse_monitor_.get())
+    mouse_monitor_->Stop();
+  callback_ = NULL;
+}
+
 void DesktopAndCursorComposer::SetSharedMemoryFactory(
     std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
   desktop_capturer_->SetSharedMemoryFactory(std::move(shared_memory_factory));
 }
 
 void DesktopAndCursorComposer::CaptureFrame() {
   if (mouse_monitor_)
     mouse_monitor_->Capture();
@@ -212,9 +219,13 @@ void DesktopAndCursorComposer::OnMouseCu
 
 void DesktopAndCursorComposer::OnMouseCursorPosition(
     const DesktopVector& position) {
   if (use_desktop_relative_cursor_position_) {
     cursor_position_ = position;
   }
 }
 
+bool DesktopAndCursorComposer::FocusOnSelectedSource() {
+  return desktop_capturer_->FocusOnSelectedSource();
+}
+
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_and_cursor_composer.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_and_cursor_composer.h
@@ -42,20 +42,22 @@ class DesktopAndCursorComposer : public 
   // generated by |desktop_capturer|.
   DesktopAndCursorComposer(std::unique_ptr<DesktopCapturer> desktop_capturer,
                            const DesktopCaptureOptions& options);
 
   ~DesktopAndCursorComposer() override;
 
   // DesktopCapturer interface.
   void Start(DesktopCapturer::Callback* callback) override;
+  void Stop() override;
   void SetSharedMemoryFactory(
       std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
   void CaptureFrame() override;
   void SetExcludedWindow(WindowId window) override;
+  bool FocusOnSelectedSource() override;
 
  private:
   // Allows test cases to use a fake MouseCursorMonitor implementation.
   friend class DesktopAndCursorComposerTest<true>;
   friend class DesktopAndCursorComposerTest<false>;
 
   // Constructor to delegate both deprecated and new constructors and allows
   // test cases to use a fake MouseCursorMonitor implementation.
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc
@@ -107,17 +107,17 @@ class FakeMouseMonitor : public MouseCur
   }
 
   void SetHotspot(const DesktopVector& hotspot) {
     if (!hotspot_.equals(hotspot))
       changed_ = true;
     hotspot_ = hotspot;
   }
 
-  void Start(Callback* callback, Mode mode) { callback_ = callback; }
+  void Init(Callback* callback, Mode mode) { callback_ = callback; }
   void Stop() override {};
 
   void Capture() override {
     if (changed_) {
       std::unique_ptr<DesktopFrame> image(
           new BasicDesktopFrame(DesktopSize(kCursorWidth, kCursorHeight)));
       uint32_t* data = reinterpret_cast<uint32_t*>(image->data());
       memset(data, 0, image->stride() * kCursorHeight);
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_capture_types.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_capture_types.h
@@ -6,16 +6,19 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_TYPES_H_
 #define MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_TYPES_H_
 
+#ifndef XP_WIN
+#include <sys/types.h> // pid_t
+#endif
 #include <stdint.h>
 
 #include "typedefs.h"  // NOLINT(build/include)
 
 namespace webrtc {
 
 // Type used to identify windows on the desktop. Values are platform-specific:
 //   - On Windows: HWND cast to intptr_t.
@@ -33,16 +36,23 @@ const WindowId kNullWindowId = 0;
 // implementation to another usually won't work correctly.
 typedef intptr_t ScreenId;
 
 // The screen id corresponds to all screen combined together.
 const ScreenId kFullDesktopScreenId = -1;
 
 const ScreenId kInvalidScreenId = -2;
 
+typedef intptr_t ProcessId;
+const ProcessId DesktopProcessId = 0;
+
+#ifdef XP_WIN
+typedef int pid_t;
+#endif
+
 // An integer to attach to each DesktopFrame to differentiate the generator of
 // the frame.
 namespace DesktopCapturerId {
   constexpr uint32_t CreateFourCC(char a, char b, char c, char d) {
     return ((static_cast<uint32_t>(a)) |
             (static_cast<uint32_t>(b) << 8) |
             (static_cast<uint32_t>(c) << 16) |
             (static_cast<uint32_t>(d) << 24));
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info.cc
@@ -1,13 +1,13 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "webrtc/modules/desktop_capture/desktop_device_info.h"
+#include "modules/desktop_capture/desktop_device_info.h"
 
 #include <cstddef>
 #include <cstdlib>
 #include <cstdio>
 #include <cstring>
 
 namespace webrtc {
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_device_info.h
@@ -1,17 +1,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
 * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef WEBRTC_MODULES_DESKTOP_CAPTURE_DEVICE_INFO_H_
 #define WEBRTC_MODULES_DESKTOP_CAPTURE_DEVICE_INFO_H_
 
 #include <map>
-#include "webrtc/modules/desktop_capture/desktop_capture_types.h"
+#include "modules/desktop_capture/desktop_capture_types.h"
 
 namespace webrtc {
 
 class DesktopDisplayDevice {
 public:
   DesktopDisplayDevice();
   ~DesktopDisplayDevice();
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_frame_rotation.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/desktop_frame_rotation.cc
@@ -7,17 +7,17 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "modules/desktop_capture/desktop_frame_rotation.h"
 
 #include <string.h>
 
-#include "third_party/libyuv/include/libyuv/rotate_argb.h"
+#include "libyuv/rotate_argb.h"
 #include "rtc_base/checks.h"
 
 namespace webrtc {
 
 namespace {
 
 libyuv::RotationMode ToLibyuvRotationMode(Rotation rotation) {
   switch (rotation) {
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/fake_desktop_capturer.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/fake_desktop_capturer.h
@@ -48,16 +48,17 @@ class FakeDesktopCapturer : public Deskt
   int num_frames_captured() const;
 
   // Count of CaptureFrame() calls have been made. This field would never be
   // negative.
   int num_capture_attempts() const;
 
   // DesktopCapturer interface
   void Start(DesktopCapturer::Callback* callback) override;
+  void Stop() override {};
   void CaptureFrame() override;
   void SetSharedMemoryFactory(
       std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
   bool GetSourceList(DesktopCapturer::SourceList* sources) override;
   bool SelectSource(DesktopCapturer::SourceId id) override;
 
  private:
   static constexpr DesktopCapturer::SourceId kWindowId = 1378277495;
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc
@@ -84,16 +84,21 @@ void FallbackDesktopCapturerWrapper::Sta
   main_capturer_->Start(this);
   // For the secondary capturer, we do not have a backup plan anymore, so
   // FallbackDesktopCapturerWrapper won't check its return value any more. It
   // will directly return to the input |callback|.
   secondary_capturer_->Start(callback);
   callback_ = callback;
 }
 
+void FallbackDesktopCapturerWrapper::Stop()
+{
+  callback_ = nullptr;
+}
+
 void FallbackDesktopCapturerWrapper::SetSharedMemoryFactory(
     std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
   shared_memory_factory_ = std::move(shared_memory_factory);
   if (shared_memory_factory_) {
     main_capturer_->SetSharedMemoryFactory(
         SharedMemoryFactoryProxy::Create(shared_memory_factory_.get()));
     secondary_capturer_->SetSharedMemoryFactory(
         SharedMemoryFactoryProxy::Create(shared_memory_factory_.get()));
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/fallback_desktop_capturer_wrapper.h
@@ -32,16 +32,17 @@ class FallbackDesktopCapturerWrapper fin
  public:
   FallbackDesktopCapturerWrapper(
       std::unique_ptr<DesktopCapturer> main_capturer,
       std::unique_ptr<DesktopCapturer> secondary_capturer);
   ~FallbackDesktopCapturerWrapper() override;
 
   // DesktopCapturer interface.
   void Start(DesktopCapturer::Callback* callback) override;
+  void Stop() override;
   void SetSharedMemoryFactory(
       std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
   void CaptureFrame() override;
   void SetExcludedWindow(WindowId window) override;
   bool GetSourceList(SourceList* sources) override;
   bool SelectSource(SourceId id) override;
   bool FocusOnSelectedSource() override;
   bool IsOccluded(const DesktopVector& pos) override;
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/mac/desktop_configuration.mm
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/mac/desktop_configuration.mm
@@ -9,26 +9,16 @@
  */
 
 #include "modules/desktop_capture/mac/desktop_configuration.h"
 
 #include <math.h>
 #include <algorithm>
 #include <Cocoa/Cocoa.h>
 
-#if !defined(MAC_OS_X_VERSION_10_7) || \
-    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
-
-@interface NSScreen (LionAPI)
-- (CGFloat)backingScaleFactor;
-- (NSRect)convertRectToBacking:(NSRect)aRect;
-@end
-
-#endif  // MAC_OS_X_VERSION_10_7
-
 namespace webrtc {
 
 namespace {
 
 DesktopRect NSRectToDesktopRect(const NSRect& ns_rect) {
   return DesktopRect::MakeLTRB(
       static_cast<int>(floor(ns_rect.origin.x)),
       static_cast<int>(floor(ns_rect.origin.y)),
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/mac/window_list_utils.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/mac/window_list_utils.cc
@@ -143,16 +143,29 @@ bool GetWindowList(rtc::FunctionView<boo
     }
 
     CFNumberRef window_layer = reinterpret_cast<CFNumberRef>(
         CFDictionaryGetValue(window, kCGWindowLayer));
     if (!window_layer) {
       continue;
     }
 
+    //Skip windows of zero area
+    CFDictionaryRef bounds_ref = reinterpret_cast<CFDictionaryRef>(
+         CFDictionaryGetValue(window, kCGWindowBounds));
+    CGRect bounds_rect;
+    if(!(bounds_ref) ||
+      !(CGRectMakeWithDictionaryRepresentation(bounds_ref, &bounds_rect))){
+      continue;
+    }
+    bounds_rect = CGRectStandardize(bounds_rect);
+    if((bounds_rect.size.width <= 0) || (bounds_rect.size.height <= 0)){
+      continue;
+    }
+
     // Skip windows with layer=0 (menu, dock).
     // TODO(zijiehe): The windows with layer != 0 are skipped, is this a bug in
     // code (not likely) or a bug in comments? What's the meaning of window
     // layer number in the first place.
     int layer;
     if (!CFNumberGetValue(window_layer, kCFNumberIntType, &layer)) {
       continue;
     }
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor.h
@@ -92,16 +92,19 @@ class MouseCursorMonitor {
   // from (0, 0).
   static std::unique_ptr<MouseCursorMonitor> Create(
       const DesktopCaptureOptions& options);
 
   // Initializes the monitor with the |callback|, which must remain valid until
   // capturer is destroyed.
   virtual void Init(Callback* callback, Mode mode) = 0;
 
+  // clears the callback
+  virtual void Stop() = 0;
+
   // Captures current cursor shape and position (depending on the |mode| passed
   // to Init()). Calls Callback::OnMouseCursor() if cursor shape has
   // changed since the last call (or when Capture() is called for the first
   // time) and then Callback::OnMouseCursorPosition() if mode is set to
   // SHAPE_AND_POSITION.
   virtual void Capture() = 0;
 };
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_mac.mm
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_mac.mm
@@ -57,17 +57,17 @@ CGImageRef CreateScaledCGImage(CGImageRe
 
 class MouseCursorMonitorMac : public MouseCursorMonitor {
  public:
   MouseCursorMonitorMac(const DesktopCaptureOptions& options,
                         CGWindowID window_id,
                         ScreenId screen_id);
   ~MouseCursorMonitorMac() override;
 
-  void Start(Callback* callback, Mode mode) override;
+  void Init(Callback* callback, Mode mode) override;
   void Stop() override;
   void Capture() override;
 
  private:
   static void DisplaysReconfiguredCallback(CGDirectDisplayID display,
                                            CGDisplayChangeSummaryFlags flags,
                                            void *user_parameter);
   void DisplaysReconfigured(CGDirectDisplayID display,
@@ -104,16 +104,20 @@ MouseCursorMonitorMac::~MouseCursorMonit
 void MouseCursorMonitorMac::Init(Callback* callback, Mode mode) {
   assert(!callback_);
   assert(callback);
 
   callback_ = callback;
   mode_ = mode;
 }
 
+void MouseCursorMonitorMac::Stop() {
+  callback_ = NULL;
+}
+
 void MouseCursorMonitorMac::Capture() {
   assert(callback_);
 
   CursorState state = INSIDE;
 
   CGEventRef event = CGEventCreate(NULL);
   CGPoint gc_position = CGEventGetLocation(event);
   CFRelease(event);
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_unittest.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_unittest.cc
@@ -61,17 +61,17 @@ class MouseCursorMonitorTest : public te
 #endif
 
 TEST_F(MouseCursorMonitorTest, MAYBE(FromScreen)) {
   std::unique_ptr<MouseCursorMonitor> capturer(
       MouseCursorMonitor::CreateForScreen(
           DesktopCaptureOptions::CreateDefault(),
           webrtc::kFullDesktopScreenId));
   assert(capturer.get());
-  capturer->Start(this, MouseCursorMonitor::SHAPE_AND_POSITION);
+  capturer->Init(this, MouseCursorMonitor::SHAPE_AND_POSITION);
   capturer->Capture();
 
   EXPECT_TRUE(cursor_image_.get());
   EXPECT_GE(cursor_image_->hotspot().x(), 0);
   EXPECT_LE(cursor_image_->hotspot().x(),
             cursor_image_->image()->size().width());
   EXPECT_GE(cursor_image_->hotspot().y(), 0);
   EXPECT_LE(cursor_image_->hotspot().y(),
@@ -100,31 +100,31 @@ TEST_F(MouseCursorMonitorTest, MAYBE(Fro
     cursor_image_.reset();
     position_received_ = false;
 
     std::unique_ptr<MouseCursorMonitor> capturer(
         MouseCursorMonitor::CreateForWindow(
             DesktopCaptureOptions::CreateDefault(), sources[i].id));
     assert(capturer.get());
 
-    capturer->Start(this, MouseCursorMonitor::SHAPE_AND_POSITION);
+    capturer->Init(this, MouseCursorMonitor::SHAPE_AND_POSITION);
     capturer->Capture();
 
     EXPECT_TRUE(cursor_image_.get());
     EXPECT_TRUE(position_received_);
   }
 }
 
 // Make sure that OnMouseCursorPosition() is not called in the SHAPE_ONLY mode.
 TEST_F(MouseCursorMonitorTest, MAYBE(ShapeOnly)) {
   std::unique_ptr<MouseCursorMonitor> capturer(
       MouseCursorMonitor::CreateForScreen(
           DesktopCaptureOptions::CreateDefault(),
           webrtc::kFullDesktopScreenId));
   assert(capturer.get());
-  capturer->Start(this, MouseCursorMonitor::SHAPE_ONLY);
+  capturer->Init(this, MouseCursorMonitor::SHAPE_ONLY);
   capturer->Capture();
 
   EXPECT_TRUE(cursor_image_.get());
   EXPECT_FALSE(position_received_);
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_win.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_win.cc
@@ -19,16 +19,18 @@
 #include "modules/desktop_capture/desktop_frame.h"
 #include "modules/desktop_capture/desktop_geometry.h"
 #include "modules/desktop_capture/mouse_cursor.h"
 #include "modules/desktop_capture/win/cursor.h"
 #include "modules/desktop_capture/win/screen_capture_utils.h"
 #include "modules/desktop_capture/win/window_capture_utils.h"
 #include "rtc_base/logging.h"
 
+#include <windows.h>
+
 namespace webrtc {
 
 namespace {
 
 bool IsSameCursorShape(const CURSORINFO& left, const CURSORINFO& right) {
   // If the cursors are not showing, we do not care the hCursor handle.
   return left.flags == right.flags &&
          (left.flags != CURSOR_SHOWING ||
@@ -38,17 +40,17 @@ bool IsSameCursorShape(const CURSORINFO&
 }  // namespace
 
 class MouseCursorMonitorWin : public MouseCursorMonitor {
  public:
   explicit MouseCursorMonitorWin(HWND window);
   explicit MouseCursorMonitorWin(ScreenId screen);
   ~MouseCursorMonitorWin() override;
 
-  void Start(Callback* callback, Mode mode) override;
+  void Init(Callback* callback, Mode mode) override;
   void Stop() override;
   void Capture() override;
 
  private:
   // Get the rect of the currently selected screen, relative to the primary
   // display's top-left. If the screen is disabled or disconnected, or any error
   // happens, an empty rect is returned.
   DesktopRect GetScreenRect();
@@ -87,36 +89,47 @@ MouseCursorMonitorWin::MouseCursorMonito
 MouseCursorMonitorWin::~MouseCursorMonitorWin() {
   if (desktop_dc_)
     ReleaseDC(NULL, desktop_dc_);
 }
 
 void MouseCursorMonitorWin::Init(Callback* callback, Mode mode) {
   assert(!callback_);
   assert(callback);
+  assert(IsGUIThread(false));
 
   callback_ = callback;
   mode_ = mode;
 
   desktop_dc_ = GetDC(NULL);
 }
 
+void MouseCursorMonitorWin::Stop() {
+  callback_ = NULL;
+
+  if (desktop_dc_)
+    ReleaseDC(NULL, desktop_dc_);
+  desktop_dc_ = NULL;
+}
+
 void MouseCursorMonitorWin::Capture() {
+  assert(IsGUIThread(false));
   assert(callback_);
 
   CURSORINFO cursor_info;
   cursor_info.cbSize = sizeof(CURSORINFO);
   if (!GetCursorInfo(&cursor_info)) {
     RTC_LOG_F(LS_ERROR) << "Unable to get cursor info. Error = "
                         << GetLastError();
     return;
   }
 
   if (!IsSameCursorShape(cursor_info, last_cursor_)) {
-    if (cursor_info.flags == CURSOR_SUPPRESSED) {
+    // Mozilla - CURSOR_SUPPRESSED is win8 and above; so we seem not to be able to see the symbol
+    if (cursor_info.flags != CURSOR_SHOWING) {
       // The cursor is intentionally hidden now, send an empty bitmap.
       last_cursor_ = cursor_info;
       callback_->OnMouseCursor(new MouseCursor(
           new BasicDesktopFrame(DesktopSize()), DesktopVector()));
     } else {
       // According to MSDN https://goo.gl/u6gyuC, HCURSOR instances returned by
       // functions other than CreateCursor do not need to be actively destroyed.
       // And CloseHandle function (https://goo.gl/ja5ycW) does not close a
@@ -167,16 +180,17 @@ void MouseCursorMonitorWin::Capture() {
   }
 
   // TODO(zijiehe): Remove this overload.
   callback_->OnMouseCursorPosition(inside ? INSIDE : OUTSIDE, position);
   callback_->OnMouseCursorPosition(position);
 }
 
 DesktopRect MouseCursorMonitorWin::GetScreenRect() {
+  assert(IsGUIThread(false));
   assert(screen_ != kInvalidScreenId);
   if (screen_ == kFullDesktopScreenId) {
     return DesktopRect::MakeXYWH(
         GetSystemMetrics(SM_XVIRTUALSCREEN),
         GetSystemMetrics(SM_YVIRTUALSCREEN),
         GetSystemMetrics(SM_CXVIRTUALSCREEN),
         GetSystemMetrics(SM_CYVIRTUALSCREEN));
   }
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_x11.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/mouse_cursor_monitor_x11.cc
@@ -61,17 +61,17 @@ Window GetTopLevelWindow(Display* displa
 namespace webrtc {
 
 class MouseCursorMonitorX11 : public MouseCursorMonitor,
                               public SharedXDisplay::XEventHandler {
  public:
   MouseCursorMonitorX11(const DesktopCaptureOptions& options, Window window, Window inner_window);
   ~MouseCursorMonitorX11() override;
 
-  void Start(Callback* callback, Mode mode) override;
+  void Init(Callback* callback, Mode mode) override;
   void Stop() override;
   void Capture() override;
 
  private:
   // SharedXDisplay::XEventHandler interface.
   bool HandleXEvent(const XEvent& event) override;
 
   Display* display() { return x_display_->display(); }
@@ -123,44 +123,50 @@ MouseCursorMonitorX11::MouseCursorMonito
       *ptr++ = 0xff;
     }
   }
   DesktopVector hotspot(2, 2);
   cursor_shape_.reset(new MouseCursor(default_cursor.release(), hotspot));
 }
 
 MouseCursorMonitorX11::~MouseCursorMonitorX11() {
-  if (have_xfixes_) {
-    x_display_->RemoveEventHandler(xfixes_event_base_ + XFixesCursorNotify,
-                                   this);
-  }
+  Stop();
 }
 
 void MouseCursorMonitorX11::Init(Callback* callback, Mode mode) {
-  // Init can be called only once per instance of MouseCursorMonitor.
+  // Init can be called only if not started
   RTC_DCHECK(!callback_);
   RTC_DCHECK(callback);
 
   callback_ = callback;
   mode_ = mode;
 
   have_xfixes_ =
       XFixesQueryExtension(display(), &xfixes_event_base_, &xfixes_error_base_);
 
   if (have_xfixes_) {
     // Register for changes to the cursor shape.
+    XErrorTrap error_trap(display());
     XFixesSelectCursorInput(display(), window_, XFixesDisplayCursorNotifyMask);
     x_display_->AddEventHandler(xfixes_event_base_ + XFixesCursorNotify, this);
 
     CaptureCursor();
   } else {
     RTC_LOG(LS_INFO) << "X server does not support XFixes.";
   }
 }
 
+void MouseCursorMonitorX11::Stop() {
+  callback_ = NULL;
+  if (have_xfixes_) {
+    x_display_->RemoveEventHandler(xfixes_event_base_ + XFixesCursorNotify,
+                                   this);
+  }
+}
+
 void MouseCursorMonitorX11::Capture() {
   RTC_DCHECK(callback_);
 
   // Process X11 events in case XFixes has sent cursor notification.
   x_display_->ProcessPendingXEvents();
 
   // cursor_shape_| is set only if we were notified of a cursor shape change.
   if (cursor_shape_.get())
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/screen_capturer_win.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/screen_capturer_win.cc
@@ -33,25 +33,27 @@ std::unique_ptr<DesktopCapturer> CreateS
 }
 
 }  // namespace
 
 // static
 std::unique_ptr<DesktopCapturer> DesktopCapturer::CreateRawScreenCapturer(
     const DesktopCaptureOptions& options) {
   std::unique_ptr<DesktopCapturer> capturer(new ScreenCapturerWinGdi(options));
+#ifdef CAPTURE_ALLOW_DIRECTX
   if (options.allow_directx_capturer()) {
     // |dxgi_duplicator_controller| should be alive in this scope to ensure it
     // won't unload DxgiDuplicatorController.
     auto dxgi_duplicator_controller = DxgiDuplicatorController::Instance();
     if (ScreenCapturerWinDirectx::IsSupported()) {
       capturer.reset(new FallbackDesktopCapturerWrapper(
           CreateScreenCapturerWinDirectx(), std::move(capturer)));
     }
   }
+#endif
 
   if (options.allow_use_magnification_api()) {
     // ScreenCapturerWinMagnifier cannot work on Windows XP or earlier, as well
     // as 64-bit only Windows, and it may randomly crash on multi-screen
     // systems. So we may need to fallback to use original capturer.
     capturer.reset(new FallbackDesktopCapturerWrapper(
         std::unique_ptr<DesktopCapturer>(new ScreenCapturerWinMagnifier()),
         std::move(capturer)));
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/screen_capturer_x11.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/screen_capturer_x11.cc
@@ -413,12 +413,12 @@ std::unique_ptr<DesktopCapturer> Desktop
   if (!options.x_display())
     return nullptr;
 
   std::unique_ptr<ScreenCapturerLinux> capturer(new ScreenCapturerLinux());
   if (!capturer.get()->Init(options)) {
     return nullptr;
   }
 
-  return std::move(capturer);
+  return capturer;
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/desktop_device_info_win.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/desktop_device_info_win.cc
@@ -1,15 +1,15 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "webrtc/modules/desktop_capture/win/desktop_device_info_win.h"
-#include "webrtc/modules/desktop_capture/win/screen_capture_utils.h"
-#include "webrtc/modules/desktop_capture/win/win_shared.h"
+#include "modules/desktop_capture/win/desktop_device_info_win.h"
+#include "modules/desktop_capture/win/screen_capture_utils.h"
+#include "modules/desktop_capture/win/win_shared.h"
 #include <inttypes.h>
 #include <stdio.h>
 #include <VersionHelpers.h>
 
 // Duplicating declaration so that it always resolves in decltype use
 // typedef BOOL (WINAPI *QueryFullProcessImageNameProc)(HANDLE hProcess, DWORD dwFlags, LPTSTR lpExeName, PDWORD lpdwSize);
 WINBASEAPI BOOL WINAPI QueryFullProcessImageName(HANDLE hProcess, DWORD dwFlags, LPWSTR lpExeName, PDWORD lpdwSize);
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/desktop_device_info_win.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/desktop_device_info_win.h
@@ -1,17 +1,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef WEBRTC_MODULES_DESKTOP_CAPTURE_WIN_DEVICE_INFO_H_
 #define WEBRTC_MODULES_DESKTOP_CAPTURE_WIN_DEVICE_INFO_H_
 
-#include "webrtc/typedefs.h"
-#include "webrtc/modules/desktop_capture/desktop_device_info.h"
+#include "typedefs.h"
+#include "modules/desktop_capture/desktop_device_info.h"
 
 namespace webrtc {
 
 class DesktopDeviceInfoWin : public DesktopDeviceInfoImpl {
 public:
   DesktopDeviceInfoWin();
   ~DesktopDeviceInfoWin();
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capture_utils.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capture_utils.cc
@@ -37,17 +37,17 @@ bool GetScreenList(DesktopCapturer::Sour
     // |enum_result| is 0 if we have enumerated all devices.
     if (!enum_result)
       break;
 
     // We only care about active displays.
     if (!(device.StateFlags & DISPLAY_DEVICE_ACTIVE))
       continue;
 
-    screens->push_back({device_index, std::string()});
+    screens->push_back({device_index, 0, std::string()});
     if (device_names) {
       device_names->push_back(rtc::ToUtf8(device.DeviceName));
     }
   }
   return true;
 }
 
 bool IsScreenValid(DesktopCapturer::SourceId screen, std::wstring* device_key) {
@@ -69,16 +69,17 @@ DesktopRect GetFullscreenRect() {
   return DesktopRect::MakeXYWH(GetSystemMetrics(SM_XVIRTUALSCREEN),
                                GetSystemMetrics(SM_YVIRTUALSCREEN),
                                GetSystemMetrics(SM_CXVIRTUALSCREEN),
                                GetSystemMetrics(SM_CYVIRTUALSCREEN));
 }
 
 DesktopRect GetScreenRect(DesktopCapturer::SourceId screen,
                           const std::wstring& device_key) {
+  RTC_DCHECK(IsGUIThread(false));
   if (screen == kFullDesktopScreenId) {
     return GetFullscreenRect();
   }
 
   DISPLAY_DEVICE device;
   device.cb = sizeof(device);
   BOOL result = EnumDisplayDevices(NULL, screen, &device, 0);
   if (!result)
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_directx.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_directx.cc
@@ -105,16 +105,20 @@ ScreenCapturerWinDirectx::~ScreenCapture
 
 void ScreenCapturerWinDirectx::Start(Callback* callback) {
   RTC_DCHECK(!callback_);
   RTC_DCHECK(callback);
 
   callback_ = callback;
 }
 
+void ScreenCapturerWinDirectx::Stop() {
+  callback_ = nullptr;
+}
+
 void ScreenCapturerWinDirectx::SetSharedMemoryFactory(
     std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
   shared_memory_factory_ = std::move(shared_memory_factory);
 }
 
 void ScreenCapturerWinDirectx::CaptureFrame() {
   RTC_DCHECK(callback_);
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_directx.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_directx.h
@@ -69,16 +69,17 @@ class ScreenCapturerWinDirectx : public 
                                   const std::vector<std::string>& device_names);
 
   explicit ScreenCapturerWinDirectx();
 
   ~ScreenCapturerWinDirectx() override;
 
   // DesktopCapturer implementation.
   void Start(Callback* callback) override;
+  void Stop() override;
   void SetSharedMemoryFactory(
       std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
   void CaptureFrame() override;
   bool GetSourceList(SourceList* sources) override;
   bool SelectSource(SourceId id) override;
 
  private:
   const rtc::scoped_refptr<DxgiDuplicatorController> controller_;
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc
@@ -108,18 +108,38 @@ void ScreenCapturerWinGdi::Start(Callbac
   RTC_DCHECK(!callback_);
   RTC_DCHECK(callback);
 
   callback_ = callback;
 
   // Vote to disable Aero composited desktop effects while capturing. Windows
   // will restore Aero automatically if the process exits. This has no effect
   // under Windows 8 or higher.  See crbug.com/124018.
-  if (composition_func_)
-    (*composition_func_)(DWM_EC_DISABLECOMPOSITION);
+  if (disable_composition_) {
+    if (composition_func_)
+      (*composition_func_)(DWM_EC_DISABLECOMPOSITION);
+  }
+}
+
+void ScreenCapturerWinGdi::Stop() {
+  if (desktop_dc_) {
+    ReleaseDC(NULL, desktop_dc_);
+    desktop_dc_ = NULL;
+  }
+  if (memory_dc_) {
+    DeleteDC(memory_dc_);
+    memory_dc_ = NULL;
+  }
+
+  if (disable_composition_) {
+    // Restore Aero.
+    if (composition_func_)
+      (*composition_func_)(DWM_EC_ENABLECOMPOSITION);
+  }
+  callback_ = NULL;
 }
 
 void ScreenCapturerWinGdi::PrepareCaptureResources() {
   // Switch to the desktop receiving user input if different from the current
   // one.
   std::unique_ptr<Desktop> input_desktop(Desktop::GetInputDesktop());
   if (input_desktop && !desktop_.IsSame(*input_desktop)) {
     // Release GDI resources otherwise SetThreadDesktop will fail.
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h
@@ -71,15 +71,18 @@ class ScreenCapturerWinGdi : public Desk
 
   // Queue of the frames buffers.
   ScreenCaptureFrameQueue<SharedDesktopFrame> queue_;
 
   DisplayConfigurationMonitor display_configuration_monitor_;
 
   HMODULE dwmapi_library_ = NULL;
   DwmEnableCompositionFunc composition_func_ = nullptr;
+  DwmIsCompositionEnabledFunc composition_enabled_func_;
+
+  bool disable_composition_;
 
   RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCapturerWinGdi);
 };
 
 }  // namespace webrtc
 
 #endif  // MODULES_DESKTOP_CAPTURE_WIN_SCREEN_CAPTURER_WIN_GDI_H_
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc
@@ -39,41 +39,55 @@ DWORD GetTlsIndex() {
 // API. The other strings can be anything.
 static LPCTSTR kMagnifierHostClass = L"ScreenCapturerWinMagnifierHost";
 static LPCTSTR kHostWindowName = L"MagnifierHost";
 static LPCTSTR kMagnifierWindowClass = L"Magnifier";
 static LPCTSTR kMagnifierWindowName = L"MagnifierWindow";
 
 ScreenCapturerWinMagnifier::ScreenCapturerWinMagnifier() = default;
 ScreenCapturerWinMagnifier::~ScreenCapturerWinMagnifier() {
-  // DestroyWindow must be called before MagUninitialize. magnifier_window_ is
-  // destroyed automatically when host_window_ is destroyed.
-  if (host_window_)
-    DestroyWindow(host_window_);
-
-  if (magnifier_initialized_)
-    mag_uninitialize_func_();
-
-  if (mag_lib_handle_)
-    FreeLibrary(mag_lib_handle_);
-
-  if (desktop_dc_)
-    ReleaseDC(NULL, desktop_dc_);
+  Stop();
 }
 
 void ScreenCapturerWinMagnifier::Start(Callback* callback) {
   RTC_DCHECK(!callback_);
   RTC_DCHECK(callback);
   callback_ = callback;
 
   if (!InitializeMagnifier()) {
     RTC_LOG_F(LS_WARNING) << "Magnifier initialization failed.";
   }
 }
 
+void ScreenCapturerWinMagnifier::Stop() {
+  callback_ = NULL;
+
+  // DestroyWindow must be called before MagUninitialize. magnifier_window_ is
+  // destroyed automatically when host_window_ is destroyed.
+  if (host_window_) {
+    DestroyWindow(host_window_);
+    host_window_ = NULL;
+  }
+
+  if (magnifier_initialized_) {
+    mag_uninitialize_func_();
+    magnifier_initialized_ = false;
+  }
+
+  if (mag_lib_handle_) {
+    FreeLibrary(mag_lib_handle_);
+    mag_lib_handle_ = NULL;
+  }
+
+  if (desktop_dc_) {
+    ReleaseDC(NULL, desktop_dc_);
+    desktop_dc_ = NULL;
+  }
+}
+
 void ScreenCapturerWinMagnifier::SetSharedMemoryFactory(
     std::unique_ptr<SharedMemoryFactory> shared_memory_factory) {
   shared_memory_factory_ = std::move(shared_memory_factory);
 }
 
 void ScreenCapturerWinMagnifier::CaptureFrame() {
   RTC_DCHECK(callback_);
   if (!magnifier_initialized_) {
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h
@@ -39,16 +39,17 @@ class DesktopRect;
 // be used if that functionality is necessary.
 class ScreenCapturerWinMagnifier : public DesktopCapturer {
  public:
   ScreenCapturerWinMagnifier();
   ~ScreenCapturerWinMagnifier() override;
 
   // Overridden from ScreenCapturer:
   void Start(Callback* callback) override;
+  void Stop() override;
   void SetSharedMemoryFactory(
       std::unique_ptr<SharedMemoryFactory> shared_memory_factory) override;
   void CaptureFrame() override;
   bool GetSourceList(SourceList* screens) override;
   bool SelectSource(SourceId id) override;
   void SetExcludedWindow(WindowId window) override;
 
  private:
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/win_shared.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/win_shared.cc
@@ -1,15 +1,15 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include <windows.h>
 #include <assert.h>
-#include "webrtc/modules/desktop_capture/win/win_shared.h"
+#include "modules/desktop_capture/win/win_shared.h"
 
 namespace webrtc {
 
 std::string Utf16ToUtf8(const WCHAR* str) {
     int len_utf8 = WideCharToMultiByte(CP_UTF8, 0, str, -1,
                                        NULL, 0, NULL, NULL);
     if (len_utf8 <= 0) {
         return std::string();
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/win/window_capture_utils.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/win/window_capture_utils.h
@@ -2,18 +2,21 @@
  *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
+#ifndef WEBRTC_MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURE_UTILS_H_
+#define WEBRTC_MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURE_UTILS_H_
 
 #include <windows.h>
+#include <dwmapi.h>
 
 #include "modules/desktop_capture/desktop_geometry.h"
 #include "rtc_base/constructormagic.h"
 
 namespace webrtc {
 
 // Outputs the window rect. The returned DesktopRect is in system coordinates,
 // i.e. the primary monitor on the system always starts from (0, 0). This
@@ -52,24 +55,26 @@ int GetWindowRegionTypeWithBoundary(HWND
 // Retrieves the size of the |hdc|. This function returns false if native APIs
 // fail.
 bool GetDcSize(HDC hdc, DesktopSize* size);
 
 // Retrieves whether the |window| is maximized and stores in |result|. This
 // function returns false if native APIs fail.
 bool IsWindowMaximized(HWND window, bool* result);
 
-typedef HRESULT (WINAPI *DwmIsCompositionEnabledFunc)(BOOL* enabled);
+typedef HRESULT (WINAPI *DwmIsCompositionEnabledFunc)(BOOL*);
 class AeroChecker {
  public:
   AeroChecker();
   ~AeroChecker();
 
   bool IsAeroEnabled();
 
  private:
   HMODULE dwmapi_library_;
   DwmIsCompositionEnabledFunc func_;
 
   RTC_DISALLOW_COPY_AND_ASSIGN(AeroChecker);
 };
 
 }  // namespace webrtc
+
+#endif
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_mac.mm
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_mac.mm
@@ -50,16 +50,17 @@ class WindowCapturerMac : public Desktop
   explicit WindowCapturerMac(rtc::scoped_refptr<FullScreenChromeWindowDetector>
                                  full_screen_chrome_window_detector,
                              rtc::scoped_refptr<DesktopConfigurationMonitor>
                                  configuration_monitor);
   ~WindowCapturerMac() override;
 
   // DesktopCapturer interface.
   void Start(Callback* callback) override;
+  void Stop() override;
   void CaptureFrame() override;
   bool GetSourceList(SourceList* sources) override;
   bool SelectSource(SourceId id) override;
   bool FocusOnSelectedSource() override;
   bool IsOccluded(const DesktopVector& pos) override;
 
  private:
   Callback* callback_ = nullptr;
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_win.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_win.cc
@@ -16,22 +16,24 @@
 #include "modules/desktop_capture/desktop_frame_win.h"
 #include "modules/desktop_capture/window_finder_win.h"
 #include "modules/desktop_capture/win/screen_capture_utils.h"
 #include "modules/desktop_capture/win/window_capture_utils.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/constructormagic.h"
 #include "rtc_base/logging.h"
 #include "rtc_base/win32.h"
+#include <VersionHelpers.h>
 
 namespace webrtc {
 
 namespace {
 
 BOOL CALLBACK WindowsEnumerationHandler(HWND hwnd, LPARAM param) {
+  assert(IsGUIThread(false));
   DesktopCapturer::SourceList* list =
       reinterpret_cast<DesktopCapturer::SourceList*>(param);
 
   // Skip windows that are invisible, minimized, have no title, or are owned,
   // unless they have the app window style set.
   int len = GetWindowTextLength(hwnd);
   HWND owner = GetWindow(hwnd, GW_OWNER);
   LONG exstyle = GetWindowLong(hwnd, GWL_EXSTYLE);
@@ -61,26 +63,37 @@ BOOL CALLBACK WindowsEnumerationHandler(
       (wcscmp(class_name, L"ApplicationFrameWindow") == 0 ||
        wcscmp(class_name, L"Windows.UI.Core.CoreWindow") == 0)) {
     return TRUE;
   }
 
   DesktopCapturer::Source window;
   window.id = reinterpret_cast<WindowId>(hwnd);
 
+  DWORD pid;
+  GetWindowThreadProcessId(hwnd, &pid);
+  window.pid = (pid_t)pid;
+
   const size_t kTitleLength = 500;
   WCHAR window_title[kTitleLength];
   // Truncate the title if it's longer than kTitleLength.
   GetWindowText(hwnd, window_title, kTitleLength);
   window.title = rtc::ToUtf8(window_title);
 
   // Skip windows when we failed to convert the title or it is empty.
   if (window.title.empty())
     return TRUE;
 
+  // Skip windows of zero visible area, except IconicWindows
+  RECT bounds;
+  if(GetClientRect(hwnd,&bounds) && !IsIconic(hwnd)
+    && IsRectEmpty(&bounds)){
+    return TRUE;
+  }
+
   list->push_back(window);
 
   return TRUE;
 }
 
 // Retrieves the rectangle of the window rect which is drawable by either OS or
 // the owner application. The returned DesktopRect is in system coordinates.
 // This function returns false if native APIs fail.
@@ -111,16 +124,17 @@ bool GetWindowDrawableRect(HWND window,
 
 class WindowCapturerWin : public DesktopCapturer {
  public:
   WindowCapturerWin();
   ~WindowCapturerWin() override;
 
   // DesktopCapturer interface.
   void Start(Callback* callback) override;
+  void Stop() override;
   void CaptureFrame() override;
   bool GetSourceList(SourceList* sources) override;
   bool SelectSource(SourceId id) override;
   bool FocusOnSelectedSource() override;
   bool IsOccluded(const DesktopVector& pos) override;
 
  private:
   Callback* callback_ = nullptr;
@@ -141,16 +155,17 @@ class WindowCapturerWin : public Desktop
 
   RTC_DISALLOW_COPY_AND_ASSIGN(WindowCapturerWin);
 };
 
 WindowCapturerWin::WindowCapturerWin() {}
 WindowCapturerWin::~WindowCapturerWin() {}
 
 bool WindowCapturerWin::GetSourceList(SourceList* sources) {
+  assert(IsGUIThread(false));
   SourceList result;
   LPARAM param = reinterpret_cast<LPARAM>(&result);
   // EnumWindows only enumerates root windows.
   if (!EnumWindows(&WindowsEnumerationHandler, param))
     return false;
   sources->swap(result);
 
   std::map<HWND, DesktopSize> new_map;
@@ -195,17 +210,22 @@ bool WindowCapturerWin::IsOccluded(const
 
 void WindowCapturerWin::Start(Callback* callback) {
   assert(!callback_);
   assert(callback);
 
   callback_ = callback;
 }
 
+void WindowCapturerWin::Stop() {
+  callback_ = NULL;
+}
+
 void WindowCapturerWin::CaptureFrame() {
+  assert(IsGUIThread(false));
   if (!window_) {
     RTC_LOG(LS_ERROR) << "Window hasn't been selected: " << GetLastError();
     callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
     return;
   }
 
   // Stop capturing if the window has been closed.
   if (!IsWindow(window_)) {
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_x11.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/window_capturer_x11.cc
@@ -23,44 +23,49 @@
 #include "modules/desktop_capture/x11/shared_x_display.h"
 #include "modules/desktop_capture/x11/window_list_utils.h"
 #include "modules/desktop_capture/x11/x_atom_cache.h"
 #include "modules/desktop_capture/x11/x_server_pixel_buffer.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/constructormagic.h"
 #include "rtc_base/logging.h"
 #include "rtc_base/scoped_ref_ptr.h"
+#include "modules/desktop_capture/x11/shared_x_util.h"
 
 namespace webrtc {
 
 namespace {
 
 class WindowCapturerLinux : public DesktopCapturer,
                             public SharedXDisplay::XEventHandler {
  public:
   WindowCapturerLinux(const DesktopCaptureOptions& options);
   ~WindowCapturerLinux() override;
 
   // DesktopCapturer interface.
   void Start(Callback* callback) override;
+  void Stop() override;
   void CaptureFrame() override;
   bool GetSourceList(SourceList* sources) override;
   bool SelectSource(SourceId id) override;
   bool FocusOnSelectedSource() override;
   bool IsOccluded(const DesktopVector& pos) override;
 
   // SharedXDisplay::XEventHandler interface.
   bool HandleXEvent(const XEvent& event) override;
 
  private:
   Display* display() { return x_display_->display(); }
 
   // Returns window title for the specified X |window|.
   bool GetWindowTitle(::Window window, std::string* title);
 
+  // Returns the id of the owning process.
+  int GetWindowProcessID(::Window window);
+
   Callback* callback_ = nullptr;
 
   rtc::scoped_refptr<SharedXDisplay> x_display_;
 
   bool has_composite_extension_ = false;
 
   ::Window selected_window_ = 0;
   XServerPixelBuffer x_server_pixel_buffer_;
@@ -91,16 +96,17 @@ WindowCapturerLinux::~WindowCapturerLinu
   x_display_->RemoveEventHandler(ConfigureNotify, this);
 }
 
 bool WindowCapturerLinux::GetSourceList(SourceList* sources) {
   return GetWindowList(&atom_cache_,
                        [this, sources](::Window window) {
                          Source w;
                          w.id = window;
+                         w.pid = (pid_t)GetWindowProcessID(window);
                          if (this->GetWindowTitle(window, &w.title)) {
                            sources->push_back(w);
                          }
                          return true;
                        });
 }
 
 bool WindowCapturerLinux::SelectSource(SourceId id) {
@@ -175,25 +181,29 @@ bool WindowCapturerLinux::FocusOnSelecte
 
 void WindowCapturerLinux::Start(Callback* callback) {
   RTC_DCHECK(!callback_);
   RTC_DCHECK(callback);
 
   callback_ = callback;
 }
 
+void WindowCapturerLinux::Stop() {
+  callback_ = NULL;
+}
+
 void WindowCapturerLinux::CaptureFrame() {
+  x_display_->ProcessPendingXEvents();
+
   if (!x_server_pixel_buffer_.IsWindowValid()) {
     RTC_LOG(LS_INFO) << "The window is no longer valid.";
     callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
     return;
   }
 
-  x_display_->ProcessPendingXEvents();
-
   if (!has_composite_extension_) {
     // Without the Xcomposite extension we capture when the whole window is
     // visible on screen and not covered by any other window. This is not
     // something we want so instead, just bail out.
     RTC_LOG(LS_INFO) << "No Xcomposite extension detected.";
     callback_->OnCaptureResult(Result::ERROR_PERMANENT, nullptr);
     return;
   }
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/desktop_device_info_x11.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/desktop_device_info_x11.cc
@@ -1,20 +1,19 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "webrtc/modules/desktop_capture/x11/desktop_device_info_x11.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "modules/desktop_capture/x11/desktop_device_info_x11.h"
 #include <inttypes.h>
 #include <unistd.h>
 #include <stdio.h>
-#include "webrtc/modules/desktop_capture/x11/shared_x_util.h"
-#include "webrtc/modules/desktop_capture/x11/x_error_trap.h"
-#include "webrtc/modules/desktop_capture/x11/x_server_pixel_buffer.h"
+#include "modules/desktop_capture/x11/shared_x_util.h"
+#include "modules/desktop_capture/x11/x_error_trap.h"
+#include "modules/desktop_capture/x11/x_server_pixel_buffer.h"
 
 namespace webrtc {
 
 DesktopDeviceInfo * DesktopDeviceInfoImpl::Create() {
   DesktopDeviceInfoX11 * pDesktopDeviceInfo = new DesktopDeviceInfoX11();
   if (pDesktopDeviceInfo && pDesktopDeviceInfo->Init() != 0){
     delete pDesktopDeviceInfo;
     pDesktopDeviceInfo = NULL;
@@ -56,17 +55,17 @@ void DesktopDeviceInfoX11::InitializeApp
   for (int screen = 0; screen < num_screens; ++screen) {
     ::Window root_window = XRootWindow(SharedDisplay->display(), screen);
     ::Window parent;
     ::Window *children;
     unsigned int num_children;
     int status = XQueryTree(SharedDisplay->display(), root_window, &root_window, &parent,
         &children, &num_children);
     if (status == 0) {
-      LOG(LS_ERROR) << "Failed to query for child windows for screen " << screen;
+      RTC_LOG(LS_ERROR) << "Failed to query for child windows for screen " << screen;
       continue;
     }
 
     for (unsigned int i = 0; i < num_children; ++i) {
       ::Window app_window = window_util_x11.GetApplicationWindow(children[num_children - 1 - i]);
 
       if (!app_window
           || window_util_x11.IsDesktopElement(app_window)
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/desktop_device_info_x11.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/desktop_device_info_x11.h
@@ -4,18 +4,18 @@
 
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef WEBRTC_MODULES_DESKTOP_CAPTURE_X11_DEVICE_INFO_H_
 #define WEBRTC_MODULES_DESKTOP_CAPTURE_X11_DEVICE_INFO_H_
 
-#include "webrtc/typedefs.h"
-#include "webrtc/modules/desktop_capture/desktop_device_info.h"
+#include "typedefs.h"
+#include "modules/desktop_capture/desktop_device_info.h"
 
 namespace webrtc {
 
 class DesktopDeviceInfoX11 : public DesktopDeviceInfoImpl {
 public:
   DesktopDeviceInfoX11();
   ~DesktopDeviceInfoX11();
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/shared_x_util.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/shared_x_util.cc
@@ -3,17 +3,17 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "webrtc/modules/desktop_capture/x11/shared_x_util.h"
+#include "modules/desktop_capture/x11/shared_x_util.h"
 
 namespace webrtc {
 
 WindowUtilX11::WindowUtilX11(rtc::scoped_refptr<SharedXDisplay> x_display) {
   x_display_ = x_display;
   wm_state_atom_ = XInternAtom(display(), "WM_STATE", True);
   window_type_atom_ = XInternAtom(display(), "_NET_WM_WINDOW_TYPE", True);
   normal_window_type_atom_ = XInternAtom(display(), "_NET_WM_WINDOW_TYPE_NORMAL", True);
@@ -39,17 +39,17 @@ WindowUtilX11::~WindowUtilX11() {
     return 0;
   }
 
   // If the window is in WithdrawnState then look at all of its children.
   ::Window root, parent;
   ::Window *children;
   unsigned int num_children;
   if (!XQueryTree(display(), window, &root, &parent, &children, &num_children)) {
-    LOG(LS_ERROR) << "Failed to query for child windows although window"
+    RTC_LOG(LS_ERROR) << "Failed to query for child windows although window"
                   << "does not have a valid WM_STATE.";
     return 0;
   }
   ::Window app_window = 0;
   for (unsigned int i = 0; i < num_children; ++i) {
     app_window = GetApplicationWindow(children[i]);
     if (app_window) {
       break;
@@ -113,17 +113,17 @@ bool WindowUtilX11::GetWindowTitle(::Win
       status = XGetWMName(display(), window, &window_name);
       if (status && window_name.value && window_name.nitems) {
         int cnt;
         char **list = NULL;
         status = Xutf8TextPropertyToTextList(display(), &window_name, &list,
                                              &cnt);
         if (status >= Success && cnt && *list) {
           if (cnt > 1) {
-            LOG(LS_INFO) << "Window has " << cnt << " text properties, only using the first one.";
+            RTC_LOG(LS_INFO) << "Window has " << cnt << " text properties, only using the first one.";
           }
           *title = *list;
           result = true;
         }
         if (list) {
           XFreeStringList(list);
         }
       }
@@ -142,17 +142,17 @@ bool WindowUtilX11::BringWindowToFront(:
 
   unsigned int num_children;
   ::Window* children;
   ::Window parent;
   ::Window root;
   // Find the root window to pass event to.
   int status = XQueryTree(display(), window, &root, &parent, &children, &num_children);
   if (status == 0) {
-    LOG(LS_ERROR) << "Failed to query for the root window.";
+    RTC_LOG(LS_ERROR) << "Failed to query for the root window.";
     return false;
   }
 
   if (children) {
     XFree(children);
   }
 
   XRaiseWindow(display(), window);
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/shared_x_util.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/shared_x_util.h
@@ -6,19 +6,18 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef WEBRTC_MODULES_DESKTOP_CAPTURE_X11_SHARED_X_UTIL_H_
 #define WEBRTC_MODULES_DESKTOP_CAPTURE_X11_SHARED_X_UTIL_H_
 
-#include "webrtc/system_wrappers/include/atomic32.h"
-#include "webrtc/modules/desktop_capture/x11/shared_x_display.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "system_wrappers/include/atomic32.h"
+#include "modules/desktop_capture/x11/shared_x_display.h"
 #include <unistd.h>
 #include <string.h>
 
 #include <map>
 #include <vector>
 #include <vector>
 #include <algorithm>
 
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/window_list_utils.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/window_list_utils.cc
@@ -161,17 +161,17 @@ bool IsDesktopElement(XAtomCache* cache,
   return strcmp("gnome-panel", class_hint.res_name) == 0 ||
          strcmp("desktop_window", class_hint.res_name) == 0;
 }
 
 }  // namespace
 
 int32_t GetWindowState(XAtomCache* cache, ::Window window) {
   // Get WM_STATE property of the window.
-  XWindowProperty<uint32_t> window_state(
+  webrtc::XWindowProperty<uint32_t> window_state(
       cache->display(), window, cache->WmState());
 
   // WM_STATE is considered to be set to WithdrawnState when it missing.
   return window_state.is_valid() ? *window_state.data() : WithdrawnState;
 }
 
 bool GetWindowList(XAtomCache* cache,
                    rtc::FunctionView<bool(::Window)> on_window) {
@@ -204,16 +204,25 @@ bool GetWindowList(XAtomCache* cache,
 
     DeferXFree free_children(children);
 
     for (unsigned int i = 0; i < num_children; i++) {
       // Iterates in reverse order to return windows from front to back.
       ::Window app_window =
           GetApplicationWindow(cache, children[num_children - 1 - i]);
       if (app_window && !IsDesktopElement(cache, app_window)) {
+        XWindowAttributes window_attr;
+        if(!XGetWindowAttributes(display, app_window, &window_attr)) {
+          RTC_LOG(LS_ERROR)<<"Bad request for attributes for window ID:" << app_window;
+          continue;
+        }
+        if((window_attr.width <= 0) || (window_attr.height <=0)){
+          continue;
+        }
+
         if (!on_window(app_window)) {
           return true;
         }
       }
     }
   }
 
   return failed_screens < num_screens;
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/x_error_trap.cc
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/x_error_trap.cc
@@ -7,63 +7,65 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "modules/desktop_capture/x11/x_error_trap.h"
 
 #include <assert.h>
 
-#if defined(TOOLKIT_GTK)
-#include <gdk/gdk.h>
-#endif  // !defined(TOOLKIT_GTK)
+#include <limits>
 
 namespace webrtc {
 
-namespace {
-
-#if !defined(TOOLKIT_GTK)
-
 // TODO(sergeyu): This code is not thread safe. Fix it. Bug 2202.
 static bool g_xserver_error_trap_enabled = false;
 static int g_last_xserver_error_code = 0;
 
-int XServerErrorHandler(Display* display, XErrorEvent* error_event) {
-  assert(g_xserver_error_trap_enabled);
-  g_last_xserver_error_code = error_event->error_code;
-  return 0;
+Bool XErrorTrap::XServerErrorHandler(Display* display, xReply* rep,
+                                     char* /* buf */, int /* len */,
+                                     XPointer data) {
+  XErrorTrap* self = reinterpret_cast<XErrorTrap*>(data);
+  if (rep->generic.type != X_Error ||
+      // Overflow-safe last_request_read <= last_ignored_request_ for skipping
+      // async replies from requests before XErrorTrap was created.
+      self->last_ignored_request_ - display->last_request_read <
+      std::numeric_limits<unsigned long>::max() >> 1)
+    return False;
+  self->last_xserver_error_code_ = rep->error.errorCode;
+  return True;
 }
 
-#endif  // !defined(TOOLKIT_GTK)
-
-}  // namespace
-
 XErrorTrap::XErrorTrap(Display* display)
-    : original_error_handler_(NULL),
+    : display_(display),
+      last_xserver_error_code_(0),
       enabled_(true) {
-#if defined(TOOLKIT_GTK)
-  gdk_error_trap_push();
-#else  // !defined(TOOLKIT_GTK)
-  assert(!g_xserver_error_trap_enabled);
-  original_error_handler_ = XSetErrorHandler(&XServerErrorHandler);
-  g_xserver_error_trap_enabled = true;
-  g_last_xserver_error_code = 0;
-#endif  // !defined(TOOLKIT_GTK)
+  // Use async_handlers instead of XSetErrorHandler().  async_handlers can
+  // remain in place and then be safely removed at the right time even if a
+  // handler change happens concurrently on another thread.  async_handlers
+  // are processed first and so can prevent errors reaching the global
+  // XSetErrorHandler handler.  They also will not see errors from or affect
+  // handling of errors on other Displays, which may be processed on other
+  // threads.
+  LockDisplay(display);
+  async_handler_.next = display->async_handlers;
+  async_handler_.handler = XServerErrorHandler;
+  async_handler_.data = reinterpret_cast<XPointer>(this);
+  display->async_handlers = &async_handler_;
+  last_ignored_request_ = display->request;
+  UnlockDisplay(display);
 }
 
 int XErrorTrap::GetLastErrorAndDisable() {
+  assert(enabled_);
   enabled_ = false;
-#if defined(TOOLKIT_GTK)
-  return gdk_error_trap_push();
-#else  // !defined(TOOLKIT_GTK)
-  assert(g_xserver_error_trap_enabled);
-  XSetErrorHandler(original_error_handler_);
-  g_xserver_error_trap_enabled = false;
-  return g_last_xserver_error_code;
-#endif  // !defined(TOOLKIT_GTK)
+  LockDisplay(display_);
+  DeqAsyncHandler(display_, &async_handler_);
+  UnlockDisplay(display_);
+  return last_xserver_error_code_;
 }
 
 XErrorTrap::~XErrorTrap() {
   if (enabled_)
     GetLastErrorAndDisable();
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/x_error_trap.h
+++ b/media/webrtc/trunk/webrtc/modules/desktop_capture/x11/x_error_trap.h
@@ -6,34 +6,47 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef MODULES_DESKTOP_CAPTURE_X11_X_ERROR_TRAP_H_
 #define MODULES_DESKTOP_CAPTURE_X11_X_ERROR_TRAP_H_
 
-#include <X11/Xlib.h>
+#include <X11/Xlibint.h>
+#undef max // Xlibint.h defines this and it breaks std::max
+#undef min // Xlibint.h defines this and it breaks std::min
 
 #include "rtc_base/constructormagic.h"
 
 namespace webrtc {
 
 // Helper class that registers X Window error handler. Caller can use
 // GetLastErrorAndDisable() to get the last error that was caught, if any.
+// An XErrorTrap may be constructed on any thread, but errors are collected
+// from all threads and so |display| should be used only on one thread.
+// Other Displays are unaffected.
 class XErrorTrap {
  public:
   explicit XErrorTrap(Display* display);
   ~XErrorTrap();
 
   // Returns last error and removes unregisters the error handler.
+  // Must not be called more than once.
   int GetLastErrorAndDisable();
 
  private:
-  XErrorHandler original_error_handler_;
+  static Bool XServerErrorHandler(Display* display, xReply* rep,
+                                  char* /* buf */, int /* len */,
+                                  XPointer data);
+
+  _XAsyncHandler async_handler_;
+  Display* display_;
+  unsigned long last_ignored_request_;
+  int last_xserver_error_code_;
   bool enabled_;
 
   RTC_DISALLOW_COPY_AND_ASSIGN(XErrorTrap);
 };
 
 }  // namespace webrtc
 
 #endif  // MODULES_DESKTOP_CAPTURE_X11_X_ERROR_TRAP_H_
--- a/media/webrtc/trunk/webrtc/modules/include/module_common_types.h
+++ b/media/webrtc/trunk/webrtc/modules/include/module_common_types.h
@@ -587,11 +587,16 @@ struct PacedPacketInfo {
 
   static constexpr int kNotAProbe = -1;
   int send_bitrate_bps = -1;
   int probe_cluster_id = kNotAProbe;
   int probe_cluster_min_probes = -1;
   int probe_cluster_min_bytes = -1;
 };
 
+inline bool IsNewerOrSameTimestamp(uint32_t timestamp, uint32_t prev_timestamp) {
+  return timestamp == prev_timestamp ||
+      static_cast<uint32_t>(timestamp - prev_timestamp) < 0x80000000;
+}
+
 }  // namespace webrtc
 
 #endif  // MODULES_INCLUDE_MODULE_COMMON_TYPES_H_
--- a/media/webrtc/trunk/webrtc/modules/media_file/media_file_utility.cc
+++ b/media/webrtc/trunk/webrtc/modules/media_file/media_file_utility.cc
@@ -253,18 +253,18 @@ int32_t ModuleFileUtility::InitWavCodec(
       } else if (samplesPerSec == 22050) {
         strcpy(codec_info_.plname, "L16");
         _codecId = kCodecL16_16kHz;
         codec_info_.pacsize = 220;
         codec_info_.plfreq = 22000;
       } else if (samplesPerSec == 44100) {
         strcpy(codec_info_.plname, "L16");
         _codecId = kCodecL16_16kHz;
-        codec_info_.pacsize = 440;
-        codec_info_.plfreq = 44000;
+        codec_info_.pacsize = 441;
+        codec_info_.plfreq = 44100;
       } else if (samplesPerSec == 48000) {
         strcpy(codec_info_.plname, "L16");
         _codecId = kCodecL16_16kHz;
         codec_info_.pacsize = 480;
         codec_info_.plfreq = 48000;
       } else {
         RTC_LOG(LS_ERROR) << "Unsupported PCM frequency!";
         return -1;
--- a/media/webrtc/trunk/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc
+++ b/media/webrtc/trunk/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc
@@ -71,17 +71,17 @@ class RemoteEstimatorProxyTest : public 
 
   void Process() {
     clock_.AdvanceTimeMilliseconds(
         RemoteEstimatorProxy::kDefaultSendIntervalMs);
     proxy_.Process();
   }
 
   SimulatedClock clock_;
-  testing::StrictMock<MockTransportFeedbackSender> router_;
+  ::testing::StrictMock<MockTransportFeedbackSender> router_;
   RemoteEstimatorProxy proxy_;
 };
 
 TEST_F(RemoteEstimatorProxyTest, SendsSinglePacketFeedback) {
   IncomingPacket(kBaseSeq, kBaseTimeMs);
 
   EXPECT_CALL(router_, SendTransportFeedback(_))
       .WillOnce(Invoke([](rtcp::TransportFeedback* feedback_packet) {
--- a/media/webrtc/trunk/webrtc/modules/utility/source/jvm_android.cc
+++ b/media/webrtc/trunk/webrtc/modules/utility/source/jvm_android.cc
@@ -11,16 +11,22 @@
 #include <android/log.h>
 
 #include <memory>
 
 #include "modules/utility/include/jvm_android.h"
 
 #include "rtc_base/checks.h"
 
+namespace mozilla {
+namespace jni {
+jclass GetClassRef(JNIEnv* aEnv, const char* aClassName);
+}
+}
+
 #define TAG "JVM"
 #define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
 #define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
 
 namespace webrtc {
 
 JVM* g_jvm;
 
@@ -211,17 +217,19 @@ std::string JNIEnvironment::JavaToStdStr
   jni_->ReleaseStringUTFChars(j_string, jchars);
   CHECK_EXCEPTION(jni_);
   return ret;
 }
 
 // static
 void JVM::Initialize(JavaVM* jvm) {
   ALOGD("JVM::Initialize%s", GetThreadInfo().c_str());
-  RTC_CHECK(!g_jvm);
+  if (g_jvm) {
+    return;
+  }
   g_jvm = new JVM(jvm);
 }
 
 void JVM::Initialize(JavaVM* jvm, jobject context) {
   Initialize(jvm);
 
   // Pass in the context to the new ContextUtils class.
   JNIEnv* jni = g_jvm->jni();
--- a/media/webrtc/trunk/webrtc/modules/utility/source/process_thread_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/utility/source/process_thread_impl.cc
@@ -112,17 +112,17 @@ void ProcessThreadImpl::PostTask(std::un
     rtc::CritScope lock(&lock_);
     queue_.push(task.release());
   }
   wake_up_->Set();
 }
 
 void ProcessThreadImpl::RegisterModule(Module* module,
                                        const rtc::Location& from) {
-  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  // RTC_DCHECK(thread_checker_.CalledOnValidThread());  Not really needed
   RTC_DCHECK(module) << from.ToString();
 
 #if RTC_DCHECK_IS_ON
   {
     // Catch programmer error.
     rtc::CritScope lock(&lock_);
     for (const ModuleCallback& mc : modules_) {
       RTC_DCHECK(mc.module != module)
--- a/media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.cc
@@ -36,17 +36,17 @@ DeviceInfoImpl::~DeviceInfoImpl(void) {
 int32_t DeviceInfoImpl::NumberOfCapabilities(const char* deviceUniqueIdUTF8) {
   if (!deviceUniqueIdUTF8)
     return -1;
 
   _apiLock.AcquireLockShared();
 
   if (_lastUsedDeviceNameLength == strlen((char*)deviceUniqueIdUTF8)) {
 // Is it the same device that is asked for again.
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
     if (strncasecmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
                     _lastUsedDeviceNameLength) == 0)
 #else
     if (_strnicmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
                   _lastUsedDeviceNameLength) == 0)
 #endif
     {
       // yes
@@ -65,17 +65,17 @@ int32_t DeviceInfoImpl::NumberOfCapabili
 int32_t DeviceInfoImpl::GetCapability(const char* deviceUniqueIdUTF8,
                                       const uint32_t deviceCapabilityNumber,
                                       VideoCaptureCapability& capability) {
   assert(deviceUniqueIdUTF8 != NULL);
 
   ReadLockScoped cs(_apiLock);
 
   if ((_lastUsedDeviceNameLength != strlen((char*)deviceUniqueIdUTF8))
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
       || (strncasecmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) != 0))
 #else
       || (_strnicmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
                     _lastUsedDeviceNameLength) != 0))
 #endif
 
   {
@@ -87,17 +87,17 @@ int32_t DeviceInfoImpl::GetCapability(co
       return -1;
     }
     _apiLock.ReleaseLockExclusive();
     _apiLock.AcquireLockShared();
   }
 
   // Make sure the number is valid
   if (deviceCapabilityNumber >= (unsigned int)_captureCapabilities.size()) {
-    RTC_LOG(LS_ERROR) << "Invalid deviceCapabilityNumber "
+    RTC_LOG(LS_ERROR) << deviceUniqueIdUTF8 << " Invalid deviceCapabilityNumber "
                       << deviceCapabilityNumber << ">= number of capabilities ("
                       << _captureCapabilities.size() << ").";
     return -1;
   }
 
   capability = _captureCapabilities[deviceCapabilityNumber];
   return 0;
 }
@@ -106,17 +106,17 @@ int32_t DeviceInfoImpl::GetBestMatchedCa
     const char* deviceUniqueIdUTF8,
     const VideoCaptureCapability& requested,
     VideoCaptureCapability& resulting) {
   if (!deviceUniqueIdUTF8)
     return -1;
 
   ReadLockScoped cs(_apiLock);
   if ((_lastUsedDeviceNameLength != strlen((char*)deviceUniqueIdUTF8))
-#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
       || (strncasecmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
                       _lastUsedDeviceNameLength) != 0))
 #else
       || (_strnicmp((char*)_lastUsedDeviceName, (char*)deviceUniqueIdUTF8,
                     _lastUsedDeviceNameLength) != 0))
 #endif
   {
     _apiLock.ReleaseLockShared();
--- a/media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/device_info_impl.h
@@ -37,16 +37,17 @@ public:
         VideoCaptureCapability& resulting);
     virtual int32_t GetOrientation(const char* deviceUniqueIdUTF8,
                                    VideoRotation& orientation);
 
 protected:
     /* Initialize this object*/
 
     virtual int32_t Init()=0;
+    virtual int32_t Refresh() { return 0; }
     /*
      * Fills the member variable _captureCapabilities with capabilities for the given device name.
      */
     virtual int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8)=0;
 
 protected:
     // Data members
     typedef std::vector<VideoCaptureCapability> VideoCaptureCapabilities;
--- a/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
@@ -17,29 +17,205 @@
 #include <sys/ioctl.h>
 #include <sys/stat.h>
 #include <unistd.h>
 // v4l includes
 #include <linux/videodev2.h>
 
 #include "rtc_base/logging.h"
 
+#ifdef WEBRTC_LINUX
+#define EVENT_SIZE  ( sizeof (struct inotify_event) )
+#define BUF_LEN     ( 1024 * ( EVENT_SIZE + 16 ) )
+#endif
+
 namespace webrtc {
 namespace videocapturemodule {
 VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo() {
   return new videocapturemodule::DeviceInfoLinux();
 }
 
-DeviceInfoLinux::DeviceInfoLinux() : DeviceInfoImpl() {}
+#ifdef WEBRTC_LINUX
+void DeviceInfoLinux::HandleEvent(inotify_event* event, int fd)
+{
+    if (event->mask & IN_CREATE) {
+        if (fd == _fd_v4l || fd == _fd_snd) {
+            DeviceChange();
+        } else if ((event->mask & IN_ISDIR) && (fd == _fd_dev)) {
+            if (_wd_v4l < 0) {
+                // Sometimes inotify_add_watch failed if we call it immediately after receiving this event
+                // Adding 5ms delay to let file system settle down
+                usleep(5*1000);
+                _wd_v4l = inotify_add_watch(_fd_v4l, "/dev/v4l/by-path/", IN_CREATE | IN_DELETE | IN_DELETE_SELF);
+                if (_wd_v4l >= 0) {
+                    DeviceChange();
+                }
+            }
+            if (_wd_snd < 0) {
+                usleep(5*1000);
+                _wd_snd = inotify_add_watch(_fd_snd, "/dev/snd/by-path/", IN_CREATE | IN_DELETE | IN_DELETE_SELF);
+                if (_wd_snd >= 0) {
+                    DeviceChange();
+                }
+            }
+        }
+    } else if (event->mask & IN_DELETE) {
+        if (fd == _fd_v4l || fd == _fd_snd) {
+            DeviceChange();
+        }
+    } else if (event->mask & IN_DELETE_SELF) {
+        if (fd == _fd_v4l) {
+            inotify_rm_watch(_fd_v4l, _wd_v4l);
+            _wd_v4l = -1;
+        } else if (fd == _fd_snd) {
+            inotify_rm_watch(_fd_snd, _wd_snd);
+            _wd_snd = -1;
+        } else {
+            assert(false);
+        }
+    }
+}
+
+int DeviceInfoLinux::EventCheck(int fd)
+{
+    struct timeval timeout;
+    fd_set rfds;
+
+    timeout.tv_sec = 0;
+    timeout.tv_usec = 100000;
+
+    FD_ZERO(&rfds);
+    FD_SET(fd, &rfds);
+
+    return select(fd+1, &rfds, NULL, NULL, &timeout);
+}
+
+int DeviceInfoLinux::HandleEvents(int fd)
+{
+    char buffer[BUF_LEN];
+
+    ssize_t r = read(fd, buffer, BUF_LEN);
+
+    if (r <= 0) {
+        return r;
+    }
+
+    ssize_t buffer_i = 0;
+    inotify_event* pevent;
+    size_t eventSize;
+    int count = 0;
+
+    while (buffer_i < r)
+    {
+        pevent = (inotify_event *) (&buffer[buffer_i]);
+        eventSize = sizeof(inotify_event) + pevent->len;
+        char event[sizeof(inotify_event) + FILENAME_MAX + 1] // null-terminated
+            __attribute__ ((aligned(__alignof__(struct inotify_event))));
+
+        memcpy(event, pevent, eventSize);
+
+        HandleEvent((inotify_event*)(event), fd);
+
+        buffer_i += eventSize;
+        count++;
+    }
+
+    return count;
+}
+
+int DeviceInfoLinux::ProcessInotifyEvents()
+{
+    while (0 == _isShutdown.Value()) {
+        if (EventCheck(_fd_dev) > 0) {
+            if (HandleEvents(_fd_dev) < 0) {
+                break;
+            }
+        }
+        if (EventCheck(_fd_v4l) > 0) {
+            if (HandleEvents(_fd_v4l) < 0) {
+                break;
+            }
+        }
+        if (EventCheck(_fd_snd) > 0) {
+            if (HandleEvents(_fd_snd) < 0) {
+                break;
+            }
+        }
+    }
+    return 0;
+}
+
+bool DeviceInfoLinux::InotifyEventThread(void* obj)
+{
+    return static_cast<DeviceInfoLinux*> (obj)->InotifyProcess();
+}
+
+bool DeviceInfoLinux::InotifyProcess()
+{
+    _fd_v4l = inotify_init();
+    _fd_snd = inotify_init();
+    _fd_dev = inotify_init();
+    if (_fd_v4l >= 0 && _fd_snd >= 0 && _fd_dev >= 0) {
+        _wd_v4l = inotify_add_watch(_fd_v4l, "/dev/v4l/by-path/", IN_CREATE | IN_DELETE | IN_DELETE_SELF);
+        _wd_snd = inotify_add_watch(_fd_snd, "/dev/snd/by-path/", IN_CREATE | IN_DELETE | IN_DELETE_SELF);
+        _wd_dev = inotify_add_watch(_fd_dev, "/dev/", IN_CREATE);
+        ProcessInotifyEvents();
+
+        if (_wd_v4l >= 0) {
+          inotify_rm_watch(_fd_v4l, _wd_v4l);
+        }
+
+        if (_wd_snd >= 0) {
+          inotify_rm_watch(_fd_snd, _wd_snd);
+        }
+
+        if (_wd_dev >= 0) {
+          inotify_rm_watch(_fd_dev, _wd_dev);
+        }
+
+        close(_fd_v4l);
+        close(_fd_snd);
+        close(_fd_dev);
+        return true;
+    } else {
+        return false;
+    }
+}
+#endif
+
+DeviceInfoLinux::DeviceInfoLinux() : DeviceInfoImpl()
+#ifdef WEBRTC_LINUX
+    , _inotifyEventThread(new rtc::PlatformThread(
+                            InotifyEventThread, this, "InotifyEventThread"))
+    , _isShutdown(0)
+#endif
+{
+#ifdef WEBRTC_LINUX
+    if (_inotifyEventThread)
+    {
+        _inotifyEventThread->Start();
+        _inotifyEventThread->SetPriority(rtc::kHighPriority);
+    }
+}
+#endif
 
 int32_t DeviceInfoLinux::Init() {
   return 0;
 }
 
-DeviceInfoLinux::~DeviceInfoLinux() {}
+DeviceInfoLinux::~DeviceInfoLinux() {
+#ifdef WEBRTC_LINUX
+    ++_isShutdown;
+
+    if (_inotifyEventThread) {
+        _inotifyEventThread->Stop();
+        _inotifyEventThread = nullptr;
+    }
+#endif
+}
 
 uint32_t DeviceInfoLinux::NumberOfDevices() {
   RTC_LOG(LS_INFO) << __FUNCTION__;
 
   uint32_t count = 0;
   char device[20];
   int fd = -1;
 
@@ -56,26 +232,28 @@ uint32_t DeviceInfoLinux::NumberOfDevice
 }
 
 int32_t DeviceInfoLinux::GetDeviceName(uint32_t deviceNumber,
                                        char* deviceNameUTF8,
                                        uint32_t deviceNameLength,
                                        char* deviceUniqueIdUTF8,
                                        uint32_t deviceUniqueIdUTF8Length,
                                        char* /*productUniqueIdUTF8*/,
-                                       uint32_t /*productUniqueIdUTF8Length*/) {
+                                       uint32_t /*productUniqueIdUTF8Length*/,
+                                       pid_t* /*pid*/) {
   RTC_LOG(LS_INFO) << __FUNCTION__;
 
   // Travel through /dev/video [0-63]
   uint32_t count = 0;
   char device[20];
   int fd = -1;
   bool found = false;
-  for (int n = 0; n < 64; n++) {
-    sprintf(device, "/dev/video%d", n);
+  int device_index;
+  for (device_index = 0; device_index < 64; device_index++) {
+    sprintf(device, "/dev/video%d", device_index);
     if ((fd = open(device, O_RDONLY)) != -1) {
       if (count == deviceNumber) {
         // Found the device
         found = true;
         break;
       } else {
         close(fd);
         count++;
@@ -114,18 +292,25 @@ int32_t DeviceInfoLinux::GetDeviceName(u
     if (deviceUniqueIdUTF8Length >= strlen((const char*)cap.bus_info)) {
       memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length);
       memcpy(deviceUniqueIdUTF8, cap.bus_info,
              strlen((const char*)cap.bus_info));
     } else {
       RTC_LOG(LS_INFO) << "buffer passed is too small";
       return -1;
     }
+  } else {
+    // if there's no bus info to use for uniqueId, invent one - and it has to be repeatable
+    if (snprintf(deviceUniqueIdUTF8,
+                 deviceUniqueIdUTF8Length, "fake_%u", device_index) >=
+        (int) deviceUniqueIdUTF8Length)
+    {
+      return -1;
+    }
   }
-
   return 0;
 }
 
 int32_t DeviceInfoLinux::CreateCapabilityMap(const char* deviceUniqueIdUTF8) {
   int fd;
   char device[32];
   bool found = false;
 
--- a/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.h
@@ -8,16 +8,23 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_LINUX_DEVICE_INFO_LINUX_H_
 #define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_LINUX_DEVICE_INFO_LINUX_H_
 
 #include "modules/video_capture/device_info_impl.h"
 #include "modules/video_capture/video_capture_impl.h"
+#ifdef WEBRTC_LINUX
+#include <memory>
+
+#include "rtc_base/platform_thread.h"
+#include "system_wrappers/include/atomic32.h"
+#include <sys/inotify.h>
+#endif
 
 namespace webrtc
 {
 namespace videocapturemodule
 {
 class DeviceInfoLinux: public DeviceInfoImpl
 {
 public:
--- a/media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/linux/video_capture_linux.cc
@@ -56,16 +56,23 @@ VideoCaptureModuleV4L2::VideoCaptureModu
 
 int32_t VideoCaptureModuleV4L2::Init(const char* deviceUniqueIdUTF8) {
   int len = strlen((const char*)deviceUniqueIdUTF8);
   _deviceUniqueId = new (std::nothrow) char[len + 1];
   if (_deviceUniqueId) {
     memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
   }
 
+  int device_index;
+  if (sscanf(deviceUniqueIdUTF8,"fake_%d", &device_index) == 1)
+  {
+    _deviceId = device_index;
+    return 0;
+  }
+
   int fd;
   char device[32];
   bool found = false;
 
   /* detect /dev/video [0-63] entries */
   int n;
   for (n = 0; n < 64; n++) {
     sprintf(device, "/dev/video%d", n);
--- a/media/webrtc/trunk/webrtc/modules/video_capture/objc/device_info.mm
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/objc/device_info.mm
@@ -78,17 +78,18 @@ uint32_t DeviceInfoIos::NumberOfDevices(
 }
 
 int32_t DeviceInfoIos::GetDeviceName(uint32_t deviceNumber,
                                      char* deviceNameUTF8,
                                      uint32_t deviceNameUTF8Length,
                                      char* deviceUniqueIdUTF8,
                                      uint32_t deviceUniqueIdUTF8Length,
                                      char* productUniqueIdUTF8,
-                                     uint32_t productUniqueIdUTF8Length) {
+                                     uint32_t productUniqueIdUTF8Length,
+                                     pid_t* pid) {
   NSString* deviceName = [DeviceInfoIosObjC deviceNameForIndex:deviceNumber];
 
   NSString* deviceUniqueId = [DeviceInfoIosObjC deviceUniqueIdForIndex:deviceNumber];
 
   strncpy(deviceNameUTF8, [deviceName UTF8String], deviceNameUTF8Length);
   deviceNameUTF8[deviceNameUTF8Length - 1] = '\0';
 
   strncpy(deviceUniqueIdUTF8, deviceUniqueId.UTF8String, deviceUniqueIdUTF8Length);
--- a/media/webrtc/trunk/webrtc/modules/video_capture/test/video_capture_unittest.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/test/video_capture_unittest.cc
@@ -393,17 +393,18 @@ TEST_F(VideoCaptureExternalTest, TestExt
       webrtc::VideoType::kI420, test_frame_->width(), test_frame_->height());
   std::unique_ptr<uint8_t[]> test_buffer(new uint8_t[length]);
   webrtc::ExtractBuffer(*test_frame_, length, test_buffer.get());
   EXPECT_EQ(0, capture_input_interface_->IncomingFrame(test_buffer.get(),
       length, capture_callback_.capability(), 0));
   EXPECT_TRUE(capture_callback_.CompareLastFrame(*test_frame_));
 }
 
-TEST_F(VideoCaptureExternalTest, Rotation) {
+// Disabled, see Bug 1368816
+TEST_F(VideoCaptureExternalTest, DISABLED_Rotation) {
   EXPECT_EQ(0, capture_module_->SetCaptureRotation(webrtc::kVideoRotation_0));
   size_t length = webrtc::CalcBufferSize(
       webrtc::VideoType::kI420, test_frame_->width(), test_frame_->height());
   std::unique_ptr<uint8_t[]> test_buffer(new uint8_t[length]);
   webrtc::ExtractBuffer(*test_frame_, length, test_buffer.get());
   EXPECT_EQ(0, capture_input_interface_->IncomingFrame(test_buffer.get(),
     length, capture_callback_.capability(), 0));
   EXPECT_EQ(0, capture_module_->SetCaptureRotation(webrtc::kVideoRotation_90));
--- a/media/webrtc/trunk/webrtc/modules/video_capture/video_capture.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/video_capture.h
@@ -6,45 +6,121 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
 #define MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
 
+#include "modules/audio_processing/include/config.h"
 #include "api/video/video_rotation.h"
 #include "media/base/videosinkinterface.h"
 #include "modules/include/module.h"
 #include "modules/video_capture/video_capture_defines.h"
+#include <set>
+
+#if defined(ANDROID)
+#include <jni.h>
+#endif
 
 namespace webrtc {
 
+// Mozilla addition
+enum class CaptureDeviceType {
+  Camera = 0,
+  Screen = 1,
+  Application = 2,
+  Window = 3,
+  Browser = 4
+};
+// Mozilla addition
+
+struct CaptureDeviceInfo {
+  CaptureDeviceType type;
+
+  CaptureDeviceInfo() : type(CaptureDeviceType::Camera) {}
+  CaptureDeviceInfo(CaptureDeviceType t) : type(t) {}
+
+  static const ConfigOptionID identifier = ConfigOptionID::kCaptureDeviceInfo;
+  const char * TypeName() const
+  {
+    switch(type) {
+    case CaptureDeviceType::Camera: {
+      return "Camera";
+    }
+    case CaptureDeviceType::Screen: {
+      return "Screen";
+    }
+    case CaptureDeviceType::Application: {
+      return "Application";
+    }
+    case CaptureDeviceType::Window: {
+      return "Window";
+    }
+    case CaptureDeviceType::Browser: {
+      return "Browser";
+    }
+    }
+    assert(false);
+    return "UNKOWN-CaptureDeviceType!";
+  }
+};
+
+class VideoInputFeedBack
+{
+public:
+    virtual void OnDeviceChange() = 0;
+protected:
+    virtual ~VideoInputFeedBack(){}
+};
+
+#if defined(ANDROID) && !defined(WEBRTC_CHROMIUM_BUILD)
+  int32_t SetCaptureAndroidVM(JavaVM* javaVM);
+#endif
+
 class VideoCaptureModule: public rtc::RefCountInterface {
  public:
   // Interface for receiving information about available camera devices.
   class DeviceInfo {
    public:
     virtual uint32_t NumberOfDevices() = 0;
+    virtual int32_t Refresh() = 0;
+    virtual void DeviceChange() {
+      for (auto inputCallBack : _inputCallBacks) {
+        inputCallBack->OnDeviceChange();
+      }
+    }
+    virtual void RegisterVideoInputFeedBack(VideoInputFeedBack* callBack) {
+      _inputCallBacks.insert(callBack);
+    }
+
+    virtual void DeRegisterVideoInputFeedBack(VideoInputFeedBack* callBack) {
+      auto it = _inputCallBacks.find(callBack);
+      if (it != _inputCallBacks.end()) {
+        _inputCallBacks.erase(it);
+      }
+    }
 
     // Returns the available capture devices.
     // deviceNumber   - Index of capture device.
     // deviceNameUTF8 - Friendly name of the capture device.
     // deviceUniqueIdUTF8 - Unique name of the capture device if it exist.
     //                      Otherwise same as deviceNameUTF8.
     // productUniqueIdUTF8 - Unique product id if it exist.
     //                       Null terminated otherwise.
     virtual int32_t GetDeviceName(
         uint32_t deviceNumber,
         char* deviceNameUTF8,
         uint32_t deviceNameLength,
         char* deviceUniqueIdUTF8,
         uint32_t deviceUniqueIdUTF8Length,
         char* productUniqueIdUTF8 = 0,
-        uint32_t productUniqueIdUTF8Length = 0) = 0;
+        uint32_t productUniqueIdUTF8Length = 0,
+        pid_t* pid = 0) = 0;
 
 
     // Returns the number of capabilities this device.
     virtual int32_t NumberOfCapabilities(
         const char* deviceUniqueIdUTF8) = 0;
 
     // Gets the capabilities of the named device.
     virtual int32_t GetCapability(
--- a/media/webrtc/trunk/webrtc/modules/video_capture/video_capture_defines.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/video_capture_defines.h
@@ -10,16 +10,20 @@
 
 #ifndef MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_
 #define MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_
 
 #include "api/video/video_frame.h"
 #include "modules/include/module_common_types.h"
 #include "typedefs.h"  // NOLINT(build/include)
 
+#ifdef XP_WIN
+typedef int pid_t;
+#endif
+
 namespace webrtc
 {
 // Defines
 #ifndef NULL
     #define NULL    0
 #endif
 
 enum {kVideoCaptureUniqueNameLength =1024}; //Max unique capture device name lenght
--- a/media/webrtc/trunk/webrtc/modules/video_capture/video_capture_factory.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/video_capture_factory.cc
@@ -11,29 +11,21 @@
 #include "modules/video_capture/video_capture_factory.h"
 
 #include "modules/video_capture/video_capture_impl.h"
 
 namespace webrtc {
 
 rtc::scoped_refptr<VideoCaptureModule> VideoCaptureFactory::Create(
     const char* deviceUniqueIdUTF8) {
-#if defined(WEBRTC_ANDROID)
-  return nullptr;
-#else
   return videocapturemodule::VideoCaptureImpl::Create(deviceUniqueIdUTF8);
-#endif
 }
 
 rtc::scoped_refptr<VideoCaptureModule> VideoCaptureFactory::Create(
     VideoCaptureExternal*& externalCapture) {
   return videocapturemodule::VideoCaptureImpl::Create(externalCapture);
 }
 
 VideoCaptureModule::DeviceInfo* VideoCaptureFactory::CreateDeviceInfo() {
-#if defined(WEBRTC_ANDROID)
-  return nullptr;
-#else
   return videocapturemodule::VideoCaptureImpl::CreateDeviceInfo();
-#endif
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/video_capture/video_capture_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/video_capture_impl.cc
@@ -80,48 +80,59 @@ int32_t VideoCaptureImpl::RotationInDegr
   return -1;
 }
 
 VideoCaptureImpl::VideoCaptureImpl()
     : _deviceUniqueId(NULL),
       _requestedCapability(),
       _lastProcessTimeNanos(rtc::TimeNanos()),
       _lastFrameRateCallbackTimeNanos(rtc::TimeNanos()),
-      _dataCallBack(NULL),
       _lastProcessFrameTimeNanos(rtc::TimeNanos()),
       _rotateFrame(kVideoRotation_0),
       apply_rotation_(false) {
   _requestedCapability.width = kDefaultWidth;
   _requestedCapability.height = kDefaultHeight;
   _requestedCapability.maxFPS = 30;
   _requestedCapability.videoType = VideoType::kI420;
   memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
 }
 
 VideoCaptureImpl::~VideoCaptureImpl() {
-  DeRegisterCaptureDataCallback();
   if (_deviceUniqueId)
     delete[] _deviceUniqueId;
 }
 
 void VideoCaptureImpl::RegisterCaptureDataCallback(
     rtc::VideoSinkInterface<VideoFrame>* dataCallBack) {
   rtc::CritScope cs(&_apiCs);
-  _dataCallBack = dataCallBack;
+  _dataCallBacks.insert(dataCallBack);
 }
 
-void VideoCaptureImpl::DeRegisterCaptureDataCallback() {
+void VideoCaptureImpl::DeRegisterCaptureDataCallback(
+    rtc::VideoSinkInterface<VideoFrame>* dataCallBack) {
   rtc::CritScope cs(&_apiCs);
-  _dataCallBack = NULL;
+  auto it = _dataCallBacks.find(dataCallBack);
+  if (it != _dataCallBacks.end()) {
+    _dataCallBacks.erase(it);
+  }
 }
+
+int32_t VideoCaptureImpl::StopCaptureIfAllClientsClose() {
+  if (_dataCallBacks.empty()) {
+    return StopCapture();
+  } else {
+    return 0;
+  }
+}
+
 int32_t VideoCaptureImpl::DeliverCapturedFrame(VideoFrame& captureFrame) {
   UpdateFrameCount();  // frame count used for local frame rate callback.
 
-  if (_dataCallBack) {
-    _dataCallBack->OnFrame(captureFrame);
+  for (auto dataCallBack : _dataCallBacks) {
+    dataCallBack->OnFrame(captureFrame);
   }
 
   return 0;
 }
 
 int32_t VideoCaptureImpl::IncomingFrame(uint8_t* videoFrame,
                                         size_t videoFrameLength,
                                         const VideoCaptureCapability& frameInfo,
@@ -144,23 +155,21 @@ int32_t VideoCaptureImpl::IncomingFrame(
   int stride_y = width;
   int stride_uv = (width + 1) / 2;
   int target_width = width;
   int target_height = height;
 
   // SetApplyRotation doesn't take any lock. Make a local copy here.
   bool apply_rotation = apply_rotation_;
 
-  if (apply_rotation) {
-    // Rotating resolution when for 90/270 degree rotations.
-    if (_rotateFrame == kVideoRotation_90 ||
-        _rotateFrame == kVideoRotation_270) {
-      target_width = abs(height);
-      target_height = width;
-    }
+  if (apply_rotation &&
+      (_rotateFrame == kVideoRotation_90 ||
+       _rotateFrame == kVideoRotation_270)) {
+    target_width = abs(height);
+    target_height = width;
   }
 
   // Setting absolute height (in case it was negative).
   // In Windows, the image starts bottom left, instead of top left.
   // Setting a negative source height, inverts the image (within LibYuv).
 
   // TODO(nisse): Use a pool?
   rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
@@ -186,26 +195,33 @@ int32_t VideoCaptureImpl::IncomingFrame(
 
   const int conversionResult = libyuv::ConvertToI420(
       videoFrame, videoFrameLength, buffer.get()->MutableDataY(),
       buffer.get()->StrideY(), buffer.get()->MutableDataU(),
       buffer.get()->StrideU(), buffer.get()->MutableDataV(),
       buffer.get()->StrideV(), 0, 0,  // No Cropping
       width, height, target_width, target_height, rotation_mode,
       ConvertVideoType(frameInfo.videoType));
-  if (conversionResult < 0) {
+  if (conversionResult != 0) {
     RTC_LOG(LS_ERROR) << "Failed to convert capture frame from type "
                       << static_cast<int>(frameInfo.videoType) << "to I420.";
     return -1;
   }
 
   VideoFrame captureFrame(buffer, 0, rtc::TimeMillis(),
                           !apply_rotation ? _rotateFrame : kVideoRotation_0);
   captureFrame.set_ntp_time_ms(captureTime);
 
+  // This is one ugly hack to let CamerasParent know what rotation
+  // the frame was captured at. Note that this goes against the intended
+  // meaning of rotation of the frame (how to rotate it before rendering).
+  // We do this so CamerasChild can scale to the proper dimensions
+  // later on in the pipe.
+  captureFrame.set_rotation(_rotateFrame);
+
   DeliverCapturedFrame(captureFrame);
 
   return 0;
 }
 
 int32_t VideoCaptureImpl::SetCaptureRotation(VideoRotation rotation) {
   rtc::CritScope cs(&_apiCs);
   _rotateFrame = rotation;
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.cc
@@ -12,34 +12,73 @@
 
 #include <ios>  // std::hex
 
 #include "modules/video_capture/video_capture_config.h"
 #include "modules/video_capture/windows/help_functions_ds.h"
 #include "rtc_base/logging.h"
 
 #include <Dvdmedia.h>
-#include <Streams.h>
+#include <dbt.h>
+#include <ks.h>
 
 namespace webrtc {
 namespace videocapturemodule {
 
+LRESULT CALLBACK WndProc(HWND hWnd, UINT uiMsg, WPARAM wParam, LPARAM lParam)
+{
+    DeviceInfoDS* pParent;
+    if (uiMsg == WM_CREATE)
+    {
+        pParent = (DeviceInfoDS*)((LPCREATESTRUCT)lParam)->lpCreateParams;
+        SetWindowLongPtr(hWnd, GWLP_USERDATA, (LONG_PTR)pParent);
+    }
+    else if (uiMsg == WM_DESTROY)
+    {
+        SetWindowLongPtr(hWnd, GWLP_USERDATA, NULL);
+    }
+    else if (uiMsg == WM_DEVICECHANGE)
+    {
+        pParent = (DeviceInfoDS*)GetWindowLongPtr(hWnd, GWLP_USERDATA);
+        if (pParent)
+        {
+            pParent->DeviceChange();
+        }
+    }
+    return DefWindowProc(hWnd, uiMsg, wParam, lParam);
+}
+
+void _FreeMediaType(AM_MEDIA_TYPE& mt)
+{
+    if (mt.cbFormat != 0)
+    {
+        CoTaskMemFree((PVOID)mt.pbFormat);
+        mt.cbFormat = 0;
+        mt.pbFormat = NULL;
+    }
+    if (mt.pUnk != NULL)
+    {
+        // pUnk should not be used.
+        mt.pUnk->Release();
+        mt.pUnk = NULL;
+    }
+}
+
 // static
 DeviceInfoDS* DeviceInfoDS::Create() {
   DeviceInfoDS* dsInfo = new DeviceInfoDS();
   if (!dsInfo || dsInfo->Init() != 0) {
     delete dsInfo;
     dsInfo = NULL;
   }
   return dsInfo;
 }
 
 DeviceInfoDS::DeviceInfoDS()
-    : _dsDevEnum(NULL),
-      _dsMonikerDevEnum(NULL),
+    : DeviceInfoImpl(), _dsDevEnum(NULL),
       _CoUninitializeIsRequired(true) {
   // 1) Initialize the COM library (make Windows load the DLLs).
   //
   // CoInitializeEx must be called at least once, and is usually called only
   // once, for each thread that uses the COM library. Multiple calls to
   // CoInitializeEx by the same thread are allowed as long as they pass the same
   // concurrency flag, but subsequent valid calls return S_FALSE. To close the
   // COM library gracefully on a thread, each successful call to CoInitializeEx,
@@ -73,71 +112,92 @@ DeviceInfoDS::DeviceInfoDS()
       // Details: hr = 0x80010106 <=> "Cannot change thread mode after it is
       // set".
       //
       RTC_LOG(LS_INFO) << __FUNCTION__
                        << ": CoInitializeEx(NULL, COINIT_APARTMENTTHREADED)"
                        << " => RPC_E_CHANGED_MODE, error 0x" << std::hex << hr;
     }
   }
+
+  _hInstance = reinterpret_cast<HINSTANCE>(GetModuleHandle(NULL));
+  _wndClass = {0};
+  _wndClass.lpfnWndProc = &WndProc;
+  _wndClass.lpszClassName = TEXT("DeviceInfoDS");
+  _wndClass.hInstance = _hInstance;
+
+  if (RegisterClass(&_wndClass))
+  {
+    _hwnd = CreateWindow(_wndClass.lpszClassName, NULL, 0, CW_USEDEFAULT,
+                         CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, NULL,
+                         NULL, _hInstance, this);
+  }
 }
 
 DeviceInfoDS::~DeviceInfoDS() {
-  RELEASE_AND_CLEAR(_dsMonikerDevEnum);
   RELEASE_AND_CLEAR(_dsDevEnum);
   if (_CoUninitializeIsRequired) {
     CoUninitialize();
   }
+  if (_hwnd != NULL)
+  {
+    DestroyWindow(_hwnd);
+  }
+  UnregisterClass(_wndClass.lpszClassName, _hInstance);
 }
 
 int32_t DeviceInfoDS::Init() {
   HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC,
                                 IID_ICreateDevEnum, (void**)&_dsDevEnum);
   if (hr != NOERROR) {
     RTC_LOG(LS_INFO) << "Failed to create CLSID_SystemDeviceEnum, error 0x"
                      << std::hex << hr;
     return -1;
   }
   return 0;
 }
 uint32_t DeviceInfoDS::NumberOfDevices() {
   ReadLockScoped cs(_apiLock);
-  return GetDeviceInfo(0, 0, 0, 0, 0, 0, 0);
+  return GetDeviceInfo(0, 0, 0, 0, 0, 0, 0, 0);
 }
 
 int32_t DeviceInfoDS::GetDeviceName(uint32_t deviceNumber,
                                     char* deviceNameUTF8,
                                     uint32_t deviceNameLength,
                                     char* deviceUniqueIdUTF8,
                                     uint32_t deviceUniqueIdUTF8Length,
                                     char* productUniqueIdUTF8,
-                                    uint32_t productUniqueIdUTF8Length) {
+                                    uint32_t productUniqueIdUTF8Length,
+                                    pid_t* pid)
+{
   ReadLockScoped cs(_apiLock);
   const int32_t result = GetDeviceInfo(
       deviceNumber, deviceNameUTF8, deviceNameLength, deviceUniqueIdUTF8,
-      deviceUniqueIdUTF8Length, productUniqueIdUTF8, productUniqueIdUTF8Length);
+      deviceUniqueIdUTF8Length, productUniqueIdUTF8, productUniqueIdUTF8Length,
+      pid);
   return result > (int32_t)deviceNumber ? 0 : -1;
 }
 
 int32_t DeviceInfoDS::GetDeviceInfo(uint32_t deviceNumber,
                                     char* deviceNameUTF8,
                                     uint32_t deviceNameLength,
                                     char* deviceUniqueIdUTF8,
                                     uint32_t deviceUniqueIdUTF8Length,
                                     char* productUniqueIdUTF8,
-                                    uint32_t productUniqueIdUTF8Length)
-
+                                    uint32_t productUniqueIdUTF8Length,
+                                    pid_t* pid)
 {
   // enumerate all video capture devices
-  RELEASE_AND_CLEAR(_dsMonikerDevEnum);
+  IEnumMoniker* _dsMonikerDevEnum = NULL;
   HRESULT hr = _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
                                                  &_dsMonikerDevEnum, 0);
   if (hr != NOERROR) {
     RTC_LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
                      << std::hex << hr << ". No webcam exist?";
+    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
     return 0;
   }
 
   _dsMonikerDevEnum->Reset();
   ULONG cFetched;
   IMoniker* pM;
   int index = 0;
   while (S_OK == _dsMonikerDevEnum->Next(1, &pM, &cFetched)) {
@@ -160,16 +220,17 @@ int32_t DeviceInfoDS::GetDeviceInfo(uint
             int convResult = 0;
             if (deviceNameLength > 0) {
               convResult = WideCharToMultiByte(CP_UTF8, 0, varName.bstrVal, -1,
                                                (char*)deviceNameUTF8,
                                                deviceNameLength, NULL, NULL);
               if (convResult == 0) {
                 RTC_LOG(LS_INFO) << "Failed to convert device name to UTF8, "
                                  << "error = " << GetLastError();
+                RELEASE_AND_CLEAR(_dsMonikerDevEnum);
                 return -1;
               }
             }
             if (deviceUniqueIdUTF8Length > 0) {
               hr = pBag->Read(L"DevicePath", &varName, 0);
               if (FAILED(hr)) {
                 strncpy_s((char*)deviceUniqueIdUTF8, deviceUniqueIdUTF8Length,
                           (char*)deviceNameUTF8, convResult);
@@ -179,16 +240,17 @@ int32_t DeviceInfoDS::GetDeviceInfo(uint
               } else {
                 convResult = WideCharToMultiByte(
                     CP_UTF8, 0, varName.bstrVal, -1, (char*)deviceUniqueIdUTF8,
                     deviceUniqueIdUTF8Length, NULL, NULL);
                 if (convResult == 0) {
                   RTC_LOG(LS_INFO)
                       << "Failed to convert device "
                       << "name to UTF8, error = " << GetLastError();
+                  RELEASE_AND_CLEAR(_dsMonikerDevEnum);
                   return -1;
                 }
                 if (productUniqueIdUTF8 && productUniqueIdUTF8Length > 0) {
                   GetProductId(deviceUniqueIdUTF8, productUniqueIdUTF8,
                                productUniqueIdUTF8Length);
                 }
               }
             }
@@ -197,38 +259,40 @@ int32_t DeviceInfoDS::GetDeviceInfo(uint
         }
       }
       VariantClear(&varName);
       pBag->Release();
       pM->Release();
     }
   }
   if (deviceNameLength) {
-    RTC_LOG(LS_INFO) << __FUNCTION__ << " " << deviceNameUTF8;
+    RTC_LOG(LS_INFO) << __FUNCTION__ << ": deviceName: " << deviceNameUTF8;
   }
+  RELEASE_AND_CLEAR(_dsMonikerDevEnum);
   return index;
 }
 
 IBaseFilter* DeviceInfoDS::GetDeviceFilter(const char* deviceUniqueIdUTF8,
                                            char* productUniqueIdUTF8,
                                            uint32_t productUniqueIdUTF8Length) {
   const int32_t deviceUniqueIdUTF8Length = (int32_t)strlen(
       (char*)deviceUniqueIdUTF8);  // UTF8 is also NULL terminated
   if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) {
     RTC_LOG(LS_INFO) << "Device name too long";
     return NULL;
   }
 
   // enumerate all video capture devices
-  RELEASE_AND_CLEAR(_dsMonikerDevEnum);
+  IEnumMoniker* _dsMonikerDevEnum = NULL;
   HRESULT hr = _dsDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
                                                  &_dsMonikerDevEnum, 0);
   if (hr != NOERROR) {
     RTC_LOG(LS_INFO) << "Failed to enumerate CLSID_SystemDeviceEnum, error 0x"
                      << std::hex << hr << ". No webcam exist?";
+    RELEASE_AND_CLEAR(_dsMonikerDevEnum);
     return 0;
   }
   _dsMonikerDevEnum->Reset();
   ULONG cFetched;
   IMoniker* pM;
 
   IBaseFilter* captureFilter = NULL;
   bool deviceFound = false;
@@ -274,16 +338,17 @@ IBaseFilter* DeviceInfoDS::GetDeviceFilt
           }
         }
       }
       VariantClear(&varName);
       pBag->Release();
       pM->Release();
     }
   }
+  RELEASE_AND_CLEAR(_dsMonikerDevEnum);
   return captureFilter;
 }
 
 int32_t DeviceInfoDS::GetWindowsCapability(
     const int32_t capabilityIndex,
     VideoCaptureCapabilityWindows& windowsCapability) {
   ReadLockScoped cs(_apiLock);
 
@@ -292,17 +357,16 @@ int32_t DeviceInfoDS::GetWindowsCapabili
     return -1;
   }
 
   windowsCapability = _captureCapabilitiesWindows[capabilityIndex];
   return 0;
 }
 
 int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8)
-
 {
   // Reset old capability list
   _captureCapabilities.clear();
 
   const int32_t deviceUniqueIdUTF8Length =
       (int32_t)strlen((char*)deviceUniqueIdUTF8);
   if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) {
     RTC_LOG(LS_INFO) << "Device name too long";
@@ -366,17 +430,18 @@ int32_t DeviceInfoDS::CreateCapabilityMa
   // been seen having problem with MJPEG and FORMAT_VideoInfo2 Interlace flag is
   // only supported in FORMAT_VideoInfo2
   bool supportFORMAT_VideoInfo2 = false;
   bool supportFORMAT_VideoInfo = false;
   bool foundInterlacedFormat = false;
   GUID preferedVideoFormat = FORMAT_VideoInfo;
   for (int32_t tmp = 0; tmp < count; ++tmp) {
     hr = streamConfig->GetStreamCaps(tmp, &pmt, reinterpret_cast<BYTE*>(&caps));
-    if (!FAILED(hr)) {
+    // Bug 1181265 - perhaps a helper dll returns success with nullptr
+    if (!FAILED(hr) && pmt) {
       if (pmt->majortype == MEDIATYPE_Video &&
           pmt->formattype == FORMAT_VideoInfo2) {
         RTC_LOG(LS_INFO) << "Device support FORMAT_VideoInfo2";
         supportFORMAT_VideoInfo2 = true;
         VIDEOINFOHEADER2* h =
             reinterpret_cast<VIDEOINFOHEADER2*>(pmt->pbFormat);
         assert(h);
         foundInterlacedFormat |=
@@ -431,45 +496,50 @@ int32_t DeviceInfoDS::CreateCapabilityMa
         capability.height = h->bmiHeader.biHeight;
         capability.interlaced =
             h->dwInterlaceFlags &
             (AMINTERLACE_IsInterlaced | AMINTERLACE_DisplayModeBobOnly);
         avgTimePerFrame = h->AvgTimePerFrame;
       }
 
       if (hrVC == S_OK) {
-        LONGLONG* frameDurationList;
+        LONGLONG* frameDurationList = NULL;
         LONGLONG maxFPS;
         long listSize;
         SIZE size;
         size.cx = capability.width;
         size.cy = capability.height;
 
         // GetMaxAvailableFrameRate doesn't return max frame rate always
         // eg: Logitech Notebook. This may be due to a bug in that API
         // because GetFrameRateList array is reversed in the above camera. So
         // a util method written. Can't assume the first value will return
         // the max fps.
         hrVC = videoControlConfig->GetFrameRateList(
             outputCapturePin, tmp, size, &listSize, &frameDurationList);
 
         // On some odd cameras, you may get a 0 for duration.
         // GetMaxOfFrameArray returns the lowest duration (highest FPS)
-        if (hrVC == S_OK && listSize > 0 &&
+        // Initialize and check the returned list for null since
+        // some broken drivers don't modify it.
+        if (hrVC == S_OK && listSize > 0 && frameDurationList &&
             0 != (maxFPS = GetMaxOfFrameArray(frameDurationList, listSize))) {
           capability.maxFPS = static_cast<int>(10000000 / maxFPS);
           capability.supportFrameRateControl = true;
         } else  // use existing method
         {
           RTC_LOG(LS_INFO) << "GetMaxAvailableFrameRate NOT SUPPORTED";
           if (avgTimePerFrame > 0)
             capability.maxFPS = static_cast<int>(10000000 / avgTimePerFrame);
           else
             capability.maxFPS = 0;
         }
+        if (frameDurationList) {
+          CoTaskMemFree((PVOID)frameDurationList); // NULL not safe
+        }
       } else  // use existing method in case IAMVideoControl is not supported
       {
         if (avgTimePerFrame > 0)
           capability.maxFPS = static_cast<int>(10000000 / avgTimePerFrame);
         else
           capability.maxFPS = 0;
       }
 
@@ -505,27 +575,26 @@ int32_t DeviceInfoDS::CreateCapabilityMa
         RTC_LOG(LS_INFO) << "Device support HDYC.";
         capability.videoType = VideoType::kUYVY;
       } else {
         WCHAR strGuid[39];
         StringFromGUID2(pmt->subtype, strGuid, 39);
         RTC_LOG(LS_WARNING)
             << "Device support unknown media type " << strGuid << ", width "
             << capability.width << ", height " << capability.height;
-        continue;
       }
 
       _captureCapabilities.push_back(capability);
       _captureCapabilitiesWindows.push_back(capability);
       RTC_LOG(LS_INFO) << "Camera capability, width:" << capability.width
                        << " height:" << capability.height
                        << " type:" << static_cast<int>(capability.videoType)
                        << " fps:" << capability.maxFPS;
     }
-    DeleteMediaType(pmt);
+    _FreeMediaType(*pmt);
     pmt = NULL;
   }
   RELEASE_AND_CLEAR(streamConfig);
   RELEASE_AND_CLEAR(videoControlConfig);
   RELEASE_AND_CLEAR(outputCapturePin);
   RELEASE_AND_CLEAR(captureDevice);  // Release the capture device
 
   // Store the new used device name
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_ds.h
@@ -9,17 +9,18 @@
  */
 
 #ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_DEVICE_INFO_DS_H_
 #define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_DEVICE_INFO_DS_H_
 
 #include "modules/video_capture/device_info_impl.h"
 #include "modules/video_capture/video_capture_impl.h"
 
-#include <Dshow.h>
+#include <dshow.h>
+#include <windows.h>
 
 namespace webrtc
 {
 namespace videocapturemodule
 {
 struct VideoCaptureCapabilityWindows: public VideoCaptureCapability
 {
     uint32_t directShowCapabilityIndex;
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_mf.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/device_info_mf.cc
@@ -8,17 +8,17 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "modules/video_capture/windows/device_info_mf.h"
 
 namespace webrtc {
 namespace videocapturemodule {
 
-DeviceInfoMF::DeviceInfoMF() {
+DeviceInfoMF::DeviceInfoMF() : DeviceInfoImpl() {
 }
 
 DeviceInfoMF::~DeviceInfoMF() {
 }
 
 int32_t DeviceInfoMF::Init() {
   return -1;
 }
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.cc
@@ -12,17 +12,17 @@
 
 #include <ios>  // std::hex
 
 #include "modules/video_capture/windows/help_functions_ds.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/logging.h"
 #include "rtc_base/platform_thread.h"
 
-#include <Dvdmedia.h>  // VIDEOINFOHEADER2
+#include <dvdmedia.h>  // VIDEOINFOHEADER2
 #include <initguid.h>
 
 #define DELETE_RESET(p) \
   {                     \
     delete (p);         \
     (p) = NULL;         \
   }
 
@@ -34,41 +34,46 @@ DEFINE_GUID(CLSID_SINKFILTER,
             0xbf,
             0x15,
             0xd5,
             0xe2,
             0xce,
             0x12,
             0xc3);
 
+using namespace mozilla::media;
+using namespace mozilla;
+
 namespace webrtc {
 namespace videocapturemodule {
 
 typedef struct tagTHREADNAME_INFO {
   DWORD dwType;      // must be 0x1000
   LPCSTR szName;     // pointer to name (in user addr space)
   DWORD dwThreadID;  // thread ID (-1=caller thread)
   DWORD dwFlags;     // reserved for future use, must be zero
 } THREADNAME_INFO;
 
-CaptureInputPin::CaptureInputPin(IN TCHAR* szName,
+CaptureInputPin::CaptureInputPin(int32_t moduleId,
+                                 IN TCHAR * szName,
                                  IN CaptureSinkFilter* pFilter,
-                                 IN CCritSec* pLock,
+                                 IN CriticalSection * pLock,
                                  OUT HRESULT* pHr,
                                  IN LPCWSTR pszName)
-    : CBaseInputPin(szName, pFilter, pLock, pHr, pszName),
+    : BaseInputPin(szName, pFilter, pLock, pHr, pszName),
       _requestedCapability(),
       _resultingCapability() {
+  _moduleId=moduleId;
   _threadHandle = NULL;
 }
 
 CaptureInputPin::~CaptureInputPin() {}
 
 HRESULT
-CaptureInputPin::GetMediaType(IN int iPosition, OUT CMediaType* pmt) {
+CaptureInputPin::GetMediaType(IN int iPosition, OUT MediaType* pmt) {
   // reset the thread handle
   _threadHandle = NULL;
 
   if (iPosition < 0)
     return E_INVALIDARG;
 
   VIDEOINFOHEADER* pvi =
       (VIDEOINFOHEADER*)pmt->AllocFormatBuffer(sizeof(VIDEOINFOHEADER));
@@ -148,17 +153,17 @@ CaptureInputPin::GetMediaType(IN int iPo
   RTC_LOG(LS_INFO) << "GetMediaType position " << iPosition << ", width "
                    << _requestedCapability.width << ", height "
                    << _requestedCapability.height << ", biCompression 0x"
                    << std::hex << pvi->bmiHeader.biCompression;
   return NOERROR;
 }
 
 HRESULT
-CaptureInputPin::CheckMediaType(IN const CMediaType* pMediaType) {
+CaptureInputPin::CheckMediaType(IN const MediaType* pMediaType) {
   // reset the thread handle
   _threadHandle = NULL;
 
   const GUID* type = pMediaType->Type();
   if (*type != MEDIATYPE_Video)
     return E_INVALIDARG;
 
   const GUID* formatType = pMediaType->FormatType();
@@ -279,68 +284,87 @@ CaptureInputPin::CheckMediaType(IN const
   }
   return E_INVALIDARG;
 }
 
 HRESULT
 CaptureInputPin::Receive(IN IMediaSample* pIMediaSample) {
   HRESULT hr = S_OK;
 
-  RTC_DCHECK(m_pFilter);
+  RTC_DCHECK(mFilter);
   RTC_DCHECK(pIMediaSample);
 
   // get the thread handle of the delivering thread inc its priority
   if (_threadHandle == NULL) {
     HANDLE handle = GetCurrentThread();
     SetThreadPriority(handle, THREAD_PRIORITY_HIGHEST);
     _threadHandle = handle;
+    // See http://msdn.microsoft.com/en-us/library/xcb2z8hs(VS.71).aspx for details on the code
+    // in this function. Name of article is "Setting a Thread Name (Unmanaged)".
 
-    rtc::SetCurrentThreadName("webrtc_video_capture");
+    THREADNAME_INFO info;
+    info.dwType = 0x1000;
+    info.szName = "capture_thread";
+    info.dwThreadID = (DWORD)-1;
+    info.dwFlags = 0;
+
+    __try
+    {
+        RaiseException( 0x406D1388, 0, sizeof(info)/sizeof(DWORD),
+                        (DWORD_PTR*)&info );
+    }
+    __except (EXCEPTION_CONTINUE_EXECUTION)
+    {
+    }
   }
 
-  reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->LockReceive();
-  hr = CBaseInputPin::Receive(pIMediaSample);
+  reinterpret_cast<CaptureSinkFilter*>(mFilter)->LockReceive();
+  hr = BaseInputPin::Receive(pIMediaSample);
 
   if (SUCCEEDED(hr)) {
-    const LONG length = pIMediaSample->GetActualDataLength();
-    RTC_DCHECK(length >= 0);
+    const int32_t length = pIMediaSample->GetActualDataLength();
 
     unsigned char* pBuffer = NULL;
     if (S_OK != pIMediaSample->GetPointer(&pBuffer)) {
-      reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->UnlockReceive();
+      reinterpret_cast <CaptureSinkFilter *>(mFilter)->UnlockReceive();
       return S_FALSE;
     }
 
     // NOTE: filter unlocked within Send call
-    reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->ProcessCapturedFrame(
-        pBuffer, static_cast<size_t>(length), _resultingCapability);
+    reinterpret_cast <CaptureSinkFilter *> (mFilter)->ProcessCapturedFrame(
+        pBuffer,length,_resultingCapability);
   } else {
-    reinterpret_cast<CaptureSinkFilter*>(m_pFilter)->UnlockReceive();
+    reinterpret_cast<CaptureSinkFilter*>(mFilter)->UnlockReceive();
   }
 
   return hr;
 }
 
 // called under LockReceive
 HRESULT CaptureInputPin::SetMatchingMediaType(
     const VideoCaptureCapability& capability) {
   _requestedCapability = capability;
   _resultingCapability = VideoCaptureCapability();
   return S_OK;
 }
 //  ----------------------------------------------------------------------------
 CaptureSinkFilter::CaptureSinkFilter(IN TCHAR* tszName,
                                      IN LPUNKNOWN punk,
                                      OUT HRESULT* phr,
-                                     VideoCaptureExternal& captureObserver)
-    : CBaseFilter(tszName, punk, &m_crtFilter, CLSID_SINKFILTER),
+                              VideoCaptureExternal& captureObserver,
+                              int32_t moduleId)
+    : BaseFilter(tszName, CLSID_SINKFILTER),
+      m_crtFilter("CaptureSinkFilter::m_crtFilter"),
+      m_crtRecv("CaptureSinkFilter::m_crtRecv"),
       m_pInput(NULL),
-      _captureObserver(captureObserver) {
+      _captureObserver(captureObserver),
+      _moduleId(moduleId) {
   (*phr) = S_OK;
-  m_pInput = new CaptureInputPin(NAME("VideoCaptureInputPin"), this,
+  m_pInput = new CaptureInputPin(moduleId, L"VideoCaptureInputPin",
+                                 this,
                                  &m_crtFilter, phr, L"VideoCapture");
   if (m_pInput == NULL || FAILED(*phr)) {
     (*phr) = FAILED(*phr) ? (*phr) : E_OUTOFMEMORY;
     goto cleanup;
   }
 cleanup:
   return;
 }
@@ -348,76 +372,76 @@ cleanup:
 CaptureSinkFilter::~CaptureSinkFilter() {
   delete m_pInput;
 }
 
 int CaptureSinkFilter::GetPinCount() {
   return 1;
 }
 
-CBasePin* CaptureSinkFilter::GetPin(IN int Index) {
-  CBasePin* pPin;
+BasePin* CaptureSinkFilter::GetPin(IN int Index) {
+  BasePin* pPin;
   LockFilter();
   if (Index == 0) {
     pPin = m_pInput;
   } else {
     pPin = NULL;
   }
   UnlockFilter();
   return pPin;
 }
 
 STDMETHODIMP CaptureSinkFilter::Pause() {
   LockReceive();
   LockFilter();
-  if (m_State == State_Stopped) {
+  if (mState == State_Stopped) {
     //  change the state, THEN activate the input pin
-    m_State = State_Paused;
+    mState = State_Paused;
     if (m_pInput && m_pInput->IsConnected()) {
       m_pInput->Active();
     }
     if (m_pInput && !m_pInput->IsConnected()) {
-      m_State = State_Running;
+      mState = State_Running;
     }
-  } else if (m_State == State_Running) {
-    m_State = State_Paused;
+  } else if (mState == State_Running) {
+    mState = State_Paused;
   }
   UnlockFilter();
   UnlockReceive();
   return S_OK;
 }
 
 STDMETHODIMP CaptureSinkFilter::Stop() {
   LockReceive();
   LockFilter();
 
   //  set the state
-  m_State = State_Stopped;
+  mState = State_Stopped;
 
   //  inactivate the pins
   if (m_pInput)
     m_pInput->Inactive();
 
   UnlockFilter();
   UnlockReceive();
   return S_OK;
 }
 
 void CaptureSinkFilter::SetFilterGraph(IGraphBuilder* graph) {
   LockFilter();
-  m_pGraph = graph;
+  mGraph = graph;
   UnlockFilter();
 }
 
 void CaptureSinkFilter::ProcessCapturedFrame(
     unsigned char* pBuffer,
-    size_t length,
+    int32_t length,
     const VideoCaptureCapability& frameInfo) {
   //  we have the receiver lock
-  if (m_State == State_Running) {
+  if (mState == State_Running) {
     _captureObserver.IncomingFrame(pBuffer, length, frameInfo);
 
     // trying to hold it since it's only a memcpy
     // IMPROVEMENT if this work move critsect
     UnlockReceive();
     return;
   }
   UnlockReceive();
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/sink_filter_ds.h
@@ -6,90 +6,100 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
 #define MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
 
-#include <Streams.h> // Include base DS filter header files
-
 #include "modules/video_capture/video_capture_defines.h"
+#include "BaseInputPin.h"
+#include "BaseFilter.h"
+#include "MediaType.h"
 
 namespace webrtc
 {
 namespace videocapturemodule
 {
 //forward declaration
 
 class CaptureSinkFilter;
 /**
  *	input pin for camera input
  *
  */
-class CaptureInputPin: public CBaseInputPin
+class CaptureInputPin: public mozilla::media::BaseInputPin
 {
 public:
+    int32_t _moduleId;
+
     VideoCaptureCapability _requestedCapability;
     VideoCaptureCapability _resultingCapability;
     HANDLE _threadHandle;
 
-    CaptureInputPin(IN TCHAR* szName,
+    CaptureInputPin(int32_t moduleId,
+                    IN TCHAR* szName,
                     IN CaptureSinkFilter* pFilter,
-                    IN CCritSec * pLock,
+                    IN mozilla::CriticalSection * pLock,
                     OUT HRESULT * pHr,
                     IN LPCWSTR pszName);
     virtual ~CaptureInputPin();
 
-    HRESULT GetMediaType (IN int iPos, OUT CMediaType * pmt);
-    HRESULT CheckMediaType (IN const CMediaType * pmt);
+    HRESULT GetMediaType (IN int iPos, OUT mozilla::media::MediaType * pmt);
+    HRESULT CheckMediaType (IN const mozilla::media::MediaType * pmt);
     STDMETHODIMP Receive (IN IMediaSample *);
     HRESULT SetMatchingMediaType(const VideoCaptureCapability& capability);
 };
 
-class CaptureSinkFilter: public CBaseFilter
+class CaptureSinkFilter: public mozilla::media::BaseFilter
 {
 
 public:
     CaptureSinkFilter(IN TCHAR * tszName,
                       IN LPUNKNOWN punk,
                       OUT HRESULT * phr,
-                      VideoCaptureExternal& captureObserver);
+                      VideoCaptureExternal& captureObserver,
+                      int32_t moduleId);
     virtual ~CaptureSinkFilter();
 
     //  --------------------------------------------------------------------
     //  class methods
 
-    void ProcessCapturedFrame(unsigned char* pBuffer, size_t length,
+    void ProcessCapturedFrame(unsigned char* pBuffer, int32_t length,
                               const VideoCaptureCapability& frameInfo);
     //  explicit receiver lock aquisition and release
-    void LockReceive()  { m_crtRecv.Lock();}
-    void UnlockReceive() {m_crtRecv.Unlock();}
+    void LockReceive()  { m_crtRecv.Enter();}
+    void UnlockReceive() {m_crtRecv.Leave();}
     //  explicit filter lock aquisition and release
-    void LockFilter() {m_crtFilter.Lock();}
-    void UnlockFilter() { m_crtFilter.Unlock(); }
+    void LockFilter() {m_crtFilter.Enter();}
+    void UnlockFilter() { m_crtFilter.Leave(); }
     void SetFilterGraph(IGraphBuilder* graph); // Used if EVR
 
     //  --------------------------------------------------------------------
     //  COM interfaces
-DECLARE_IUNKNOWN    ;
+    STDMETHODIMP QueryInterface(REFIID aIId, void **aInterface)
+    {
+      return mozilla::media::BaseFilter::QueryInterface(aIId, aInterface);
+    }
+
     STDMETHODIMP SetMatchingMediaType(const VideoCaptureCapability& capability);
 
     //  --------------------------------------------------------------------
     //  CBaseFilter methods
     int GetPinCount ();
-    CBasePin * GetPin ( IN int Index);
+    mozilla::media::BasePin * GetPin ( IN int Index);
     STDMETHODIMP Pause ();
     STDMETHODIMP Stop ();
     STDMETHODIMP GetClassID ( OUT CLSID * pCLSID);
     //  --------------------------------------------------------------------
     //  class factory calls this
-    static CUnknown * CreateInstance (IN LPUNKNOWN punk, OUT HRESULT * phr);
+    static IUnknown * CreateInstance (IN LPUNKNOWN punk, OUT HRESULT * phr);
 private:
-    CCritSec m_crtFilter; //  filter lock
-    CCritSec m_crtRecv;  //  receiver lock; always acquire before filter lock
+    mozilla::CriticalSection m_crtFilter; //  filter lock
+    mozilla::CriticalSection m_crtRecv;  //  receiver lock; always acquire before filter lock
     CaptureInputPin * m_pInput;
     VideoCaptureExternal& _captureObserver;
+    int32_t _moduleId;
 };
 }  // namespace videocapturemodule
 }  // namespace webrtc
 #endif // MODULES_VIDEO_CAPTURE_MAIN_SOURCE_WINDOWS_SINK_FILTER_DS_H_
--- a/media/webrtc/trunk/webrtc/modules/video_capture/windows/video_capture_ds.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/windows/video_capture_ds.cc
@@ -15,17 +15,17 @@
 #include "modules/video_capture/windows/sink_filter_ds.h"
 #include "rtc_base/logging.h"
 
 #include <Dvdmedia.h>  // VIDEOINFOHEADER2
 
 namespace webrtc {
 namespace videocapturemodule {
 VideoCaptureDS::VideoCaptureDS()
-    : _captureFilter(NULL),
+    : VideoCaptureImpl(), _dsInfo(), _captureFilter(NULL),
       _graphBuilder(NULL),
       _mediaControl(NULL),
       _sinkFilter(NULL),
       _inputSendPin(NULL),
       _outputCapturePin(NULL),
       _dvFilter(NULL),
       _inputDvPin(NULL),
       _outputDvPin(NULL) {}
@@ -97,17 +97,17 @@ int32_t VideoCaptureDS::Init(const char*
   _outputCapturePin = GetOutputPin(_captureFilter, PIN_CATEGORY_CAPTURE);
   if (_outputCapturePin == NULL)
   {
     RTC_LOG(LS_ERROR) << "Failed to get output capture pin";
     return -1;
   }
 
   // Create the sink filte used for receiving Captured frames.
-  _sinkFilter = new CaptureSinkFilter(SINK_FILTER_NAME, NULL, &hr, *this);
+  _sinkFilter = new CaptureSinkFilter(SINK_FILTER_NAME, NULL, &hr, *this, 0);
   if (hr != S_OK) {
     RTC_LOG(LS_INFO) << "Failed to create send filter";
     return -1;
   }
   _sinkFilter->AddRef();
 
   hr = _graphBuilder->AddFilter(_sinkFilter, SINK_FILTER_NAME);
   if (FAILED(hr)) {
@@ -117,52 +117,48 @@ int32_t VideoCaptureDS::Init(const char*
   _inputSendPin = GetInputPin(_sinkFilter);
   if (_inputSendPin == NULL)
   {
     RTC_LOG(LS_ERROR) << "Failed to get input send pin";
     return -1;
   }
   // Temporary connect here.
   // This is done so that no one else can use the capture device.
-  if (SetCameraOutput(_requestedCapability) != 0) {
+  if (SetCameraOutputIfNeeded(_requestedCapability) != 0) {
     return -1;
   }
   hr = _mediaControl->Pause();
   if (FAILED(hr)) {
     RTC_LOG(LS_INFO)
         << "Failed to Pause the Capture device. Is it already occupied? " << hr;
     return -1;
   }
   RTC_LOG(LS_INFO) << "Capture device '" << deviceUniqueIdUTF8
                    << "' initialized.";
   return 0;
 }
 
 int32_t VideoCaptureDS::StartCapture(const VideoCaptureCapability& capability) {
   rtc::CritScope cs(&_apiCs);
 
-  if (capability != _requestedCapability) {
-    DisconnectGraph();
-
-    if (SetCameraOutput(capability) != 0) {
-      return -1;
-    }
+  if (SetCameraOutputIfNeeded(capability) != 0) {
+    return -1;
   }
   HRESULT hr = _mediaControl->Run();
   if (FAILED(hr)) {
     RTC_LOG(LS_INFO) << "Failed to start the Capture device.";
     return -1;
   }
   return 0;
 }
 
 int32_t VideoCaptureDS::StopCapture() {
   rtc::CritScope cs(&_apiCs);
 
-  HRESULT hr = _mediaControl->Pause();
+  HRESULT hr = _mediaControl->Stop();
   if (FAILED(hr)) {
     RTC_LOG(LS_INFO) << "Failed to stop the capture graph. " << hr;
     return -1;
   }
   return 0;
 }
 bool VideoCaptureDS::CaptureStarted() {
   OAFilterState state = 0;
@@ -173,36 +169,54 @@ bool VideoCaptureDS::CaptureStarted() {
   RTC_LOG(LS_INFO) << "CaptureStarted " << state;
   return state == State_Running;
 }
 int32_t VideoCaptureDS::CaptureSettings(VideoCaptureCapability& settings) {
   settings = _requestedCapability;
   return 0;
 }
 
-int32_t VideoCaptureDS::SetCameraOutput(
-    const VideoCaptureCapability& requestedCapability) {
+int32_t VideoCaptureDS::SetCameraOutputIfNeeded(
+    const VideoCaptureCapability& requestedCapability)
+{
   // Get the best matching capability
   VideoCaptureCapability capability;
   int32_t capabilityIndex;
 
   // Store the new requested size
   _requestedCapability = requestedCapability;
   // Match the requested capability with the supported.
   if ((capabilityIndex = _dsInfo.GetBestMatchedCapability(
            _deviceUniqueId, _requestedCapability, capability)) < 0) {
     return -1;
   }
+
+  if (capability != _activeCapability) {
+    DisconnectGraph();
+    // Store the new mode the camera actually selected
+    _activeCapability = capability;
+  } else {
+    // Camera selected the same mode, nothing to do
+    return 0;
+  }
+
   // Reduce the frame rate if possible.
   if (capability.maxFPS > requestedCapability.maxFPS) {
     capability.maxFPS = requestedCapability.maxFPS;
   } else if (capability.maxFPS <= 0) {
     capability.maxFPS = 30;
   }
 
+    return SetCameraOutput(capability, capabilityIndex);
+}
+
+
+int32_t VideoCaptureDS::SetCameraOutput(const VideoCaptureCapability& capability,
+                                        int32_t capabilityIndex)
+{
   // Convert it to the windows capability index since they are not nexessary
   // the same
   VideoCaptureCapabilityWindows windowsCapability;
   if (_dsInfo.GetWindowsCapability(capabilityIndex, windowsCapability) != 0) {
     return -1;
   }
 
   IAMStreamConfig* streamConfig = NULL;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codec_database.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codec_database.cc
@@ -214,17 +214,16 @@ bool VCMCodecDataBase::RequiresEncoderRe
     return true;
 
   // Does not check startBitrate or maxFramerate
   if (new_send_codec.codecType != send_codec_.codecType ||
       strcmp(new_send_codec.plName, send_codec_.plName) != 0 ||
       new_send_codec.plType != send_codec_.plType ||
       new_send_codec.width != send_codec_.width ||
       new_send_codec.height != send_codec_.height ||
-      new_send_codec.resolution_divisor != send_codec_.resolution_divisor ||
       new_send_codec.maxBitrate != send_codec_.maxBitrate ||
       new_send_codec.minBitrate != send_codec_.minBitrate ||
       new_send_codec.qpMax != send_codec_.qpMax ||
       new_send_codec.numberOfSimulcastStreams !=
           send_codec_.numberOfSimulcastStreams ||
       new_send_codec.mode != send_codec_.mode) {
     return true;
   }
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -330,16 +330,17 @@ void VP8EncoderImpl::SetStreamState(bool
   send_stream_[stream_idx] = send_stream;
 }
 
 void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
                                          int num_temporal_layers,
                                          const VideoCodec& codec) {
   RTC_DCHECK(codec.VP8().tl_factory != nullptr);
   const TemporalLayersFactory* tl_factory = codec.VP8().tl_factory;
+  RTC_DCHECK(temporal_layers_.empty());
   if (num_streams == 1) {
     temporal_layers_.emplace_back(
         tl_factory->Create(0, num_temporal_layers, tl0_pic_idx_[0]));
     temporal_layers_checkers_.emplace_back(
         tl_factory->CreateChecker(0, num_temporal_layers, tl0_pic_idx_[0]));
   } else {
     for (int i = 0; i < num_streams; ++i) {
       RTC_CHECK_GT(num_temporal_layers, 0);
@@ -361,17 +362,17 @@ int VP8EncoderImpl::InitEncode(const Vid
   }
   if (inst->maxFramerate < 1) {
     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
   }
   // allow zero to represent an unspecified maxBitRate
   if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
   }
-  if (inst->width <= 1 || inst->height <= 1) {
+  if (inst->width < 1 || inst->height < 1) {
     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
   }
   if (number_of_cores < 1) {
     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
   }
   if (inst->VP8().automaticResizeOn && inst->numberOfSimulcastStreams > 1) {
     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
   }
@@ -430,18 +431,19 @@ int VP8EncoderImpl::InitEncode(const Vid
     downsampling_factors_[number_of_streams - 1].num = 1;
     downsampling_factors_[number_of_streams - 1].den = 1;
   }
   for (int i = 0; i < number_of_streams; ++i) {
     // allocate memory for encoded image
     if (encoded_images_[i]._buffer != NULL) {
       delete[] encoded_images_[i]._buffer;
     }
+    // Reserve 100 extra bytes for overhead at small resolutions.
     encoded_images_[i]._size =
-        CalcBufferSize(VideoType::kI420, codec_.width, codec_.height);
+        CalcBufferSize(VideoType::kI420, codec_.width, codec_.height) + 100;
     encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size];
     encoded_images_[i]._completeFrame = true;
   }
   // populate encoder configuration with default values
   if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &configurations_[0],
                                    0)) {
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
@@ -563,17 +565,19 @@ int VP8EncoderImpl::InitEncode(const Vid
     // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for
     // the y plane, but only half of it to the u and v planes.
     vpx_img_alloc(&raw_images_[i], VPX_IMG_FMT_I420,
                   inst->simulcastStream[stream_idx].width,
                   inst->simulcastStream[stream_idx].height, kVp832ByteAlign);
     SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
     configurations_[i].rc_target_bitrate = stream_bitrates[stream_idx];
     temporal_layers_[stream_idx]->OnRatesUpdated(
-        stream_bitrates[stream_idx], inst->maxBitrate, inst->maxFramerate);
+      // here too - VP8 won't init if it thinks temporal layers have no bits
+      stream_bitrates[stream_idx] > 0 ? stream_bitrates[stream_idx] : inst->simulcastStream[stream_idx].minBitrate,
+      inst->maxBitrate, inst->maxFramerate);
     temporal_layers_[stream_idx]->UpdateConfiguration(&configurations_[i]);
   }
 
   return InitAndSetControlSettings();
 }
 
 int VP8EncoderImpl::SetCpuSpeed(int width, int height) {
 #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) \
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -72,16 +72,17 @@ VP9EncoderImpl::VP9EncoderImpl()
       rc_max_intra_target_(0),
       encoder_(nullptr),
       config_(nullptr),
       raw_(nullptr),
       input_image_(nullptr),
       frames_since_kf_(0),
       num_temporal_layers_(0),
       num_spatial_layers_(0),
+      num_cores_(0),
       is_flexible_mode_(false),
       frames_encoded_(0),
       // Use two spatial when screensharing with flexible mode.
       spatial_layer_(new ScreenshareLayersVP9(2)) {
   memset(&codec_, 0, sizeof(codec_));
   memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
 
   Random random(rtc::TimeMicros());
@@ -495,23 +496,16 @@ int VP9EncoderImpl::Encode(const VideoFr
   if (encoded_complete_callback_ == nullptr) {
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
   }
   FrameType frame_type = kVideoFrameDelta;
   // We only support one stream at the moment.
   if (frame_types && frame_types->size() > 0) {
     frame_type = (*frame_types)[0];
   }
-  if (input_image.width() != codec_.width ||
-      input_image.height() != codec_.height) {
-    int ret = UpdateCodecFrameSize(input_image);
-    if (ret < 0) {
-      return ret;
-    }
-  }
   RTC_DCHECK_EQ(input_image.width(), raw_->d_w);
   RTC_DCHECK_EQ(input_image.height(), raw_->d_h);
 
   // Set input image for use in the callback.
   // This was necessary since you need some information from input_image.
   // You can save only the necessary information (such as timestamp) instead of
   // doing this.
   input_image_ = &input_image;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -117,16 +117,17 @@ class VP9EncoderImpl : public VP9Encoder
   vpx_image_t* raw_;
   vpx_svc_extra_cfg_t svc_params_;
   const VideoFrame* input_image_;
   GofInfoVP9 gof_;       // Contains each frame's temporal information for
                          // non-flexible mode.
   size_t frames_since_kf_;
   uint8_t num_temporal_layers_;
   uint8_t num_spatial_layers_;
+  uint8_t num_cores_;
 
   // Used for flexible mode.
   bool is_flexible_mode_;
   int64_t buffer_updated_at_frame_[kNumVp9Buffers];
   int64_t frames_encoded_;
   uint8_t num_ref_pics_[kMaxVp9NumberOfSpatialLayers];
   uint8_t p_diff_[kMaxVp9NumberOfSpatialLayers][kMaxVp9RefPics];
   std::unique_ptr<ScreenshareLayersVP9> spatial_layer_;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/frame_buffer2_unittest.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/frame_buffer2_unittest.cc
@@ -465,35 +465,16 @@ TEST_F(TestFrameBuffer2, LastContinuousF
   EXPECT_EQ(pid, InsertFrame(pid + 1, 1, ts, true, pid));
   EXPECT_EQ(pid, InsertFrame(pid + 2, 0, ts, false, pid + 1));
   EXPECT_EQ(pid, InsertFrame(pid + 2, 1, ts, true, pid + 1));
   EXPECT_EQ(pid, InsertFrame(pid + 3, 0, ts, false, pid + 2));
   EXPECT_EQ(pid + 3, InsertFrame(pid + 1, 0, ts, false, pid));
   EXPECT_EQ(pid + 3, InsertFrame(pid + 3, 1, ts, true, pid + 2));
 }
 
-TEST_F(TestFrameBuffer2, ForwardJumps) {
-  EXPECT_EQ(5453, InsertFrame(5453, 0, 1, false));
-  ExtractFrame();
-  EXPECT_EQ(5454, InsertFrame(5454, 0, 1, false, 5453));
-  ExtractFrame();
-  EXPECT_EQ(15670, InsertFrame(15670, 0, 1, false));
-  ExtractFrame();
-  EXPECT_EQ(29804, InsertFrame(29804, 0, 1, false));
-  ExtractFrame();
-  EXPECT_EQ(29805, InsertFrame(29805, 0, 1, false, 29804));
-  ExtractFrame();
-  EXPECT_EQ(29806, InsertFrame(29806, 0, 1, false, 29805));
-  ExtractFrame();
-  EXPECT_EQ(33819, InsertFrame(33819, 0, 1, false));
-  ExtractFrame();
-  EXPECT_EQ(41248, InsertFrame(41248, 0, 1, false));
-  ExtractFrame();
-}
-
 TEST_F(TestFrameBuffer2, PictureIdJumpBack) {
   uint16_t pid = Rand();
   uint32_t ts = Rand();
 
   EXPECT_EQ(pid, InsertFrame(pid, 0, ts, false));
   EXPECT_EQ(pid + 1, InsertFrame(pid + 1, 0, ts + 1, false, pid));
   ExtractFrame();
   CheckFrame(0, pid, 0);
--- a/media/webrtc/trunk/webrtc/modules/video_coding/generic_decoder.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/generic_decoder.h
@@ -20,17 +20,17 @@
 #include "modules/video_coding/timing.h"
 #include "rtc_base/criticalsection.h"
 #include "rtc_base/thread_checker.h"
 
 namespace webrtc {
 
 class VCMReceiveCallback;
 
-enum { kDecoderFrameMemoryLength = 10 };
+enum { kDecoderFrameMemoryLength = 30 };
 
 struct VCMFrameInformation {
   int64_t renderTimeMs;
   int64_t decodeStartTimeMs;
   void* userData;
   VideoRotation rotation;
   VideoContentType content_type;
   EncodedImage::Timing timing;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/h264_sprop_parameter_sets_unittest.cc
@@ -13,17 +13,17 @@
 #include <stdint.h>
 
 #include <vector>
 
 #include "test/gtest.h"
 
 namespace webrtc {
 
-class H264SpropParameterSetsTest : public testing::Test {
+class H264SpropParameterSetsTest : public ::testing::Test {
  public:
   H264SpropParameterSets h264_sprop;
 };
 
 TEST_F(H264SpropParameterSetsTest, Base64DecodeSprop) {
   // Example sprop string from https://tools.ietf.org/html/rfc3984 .
   EXPECT_TRUE(h264_sprop.DecodeSprop("Z0IACpZTBYmI,aMljiA=="));
   static const std::vector<uint8_t> raw_sps{0x67, 0x42, 0x00, 0x0A, 0x96,
--- a/media/webrtc/trunk/webrtc/modules/video_coding/include/video_coding.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/include/video_coding.h
@@ -309,29 +309,16 @@ class VideoCodingModule : public Module 
   //
   // Input:
   //              - callback      : The callback to be registered in the VCM.
   //
   // Return value     : VCM_OK,     on success.
   //                    <0,         on error.
   virtual int32_t RegisterPacketRequestCallback(
       VCMPacketRequestCallback* callback) = 0;
- 
-  // Register a receive state change callback. This callback will be called when the
-  // module state has changed
-  //
-  // Input:
-  //      - callback      : The callback object to be used by the module when
-  //                        the receiver decode state changes.
-  //                        De-register with a NULL pointer.
-  //
-  // Return value      : VCM_OK, on success.
-  //                     < 0,         on error.
-  virtual int32_t RegisterReceiveStateCallback(
-      VCMReceiveStateCallback* callback) = 0;
 
   // Waits for the next frame in the jitter buffer to become complete
   // (waits no longer than maxWaitTimeMs), then passes it to the decoder for
   // decoding.
   // Should be called as often as possible to get the most out of the decoder.
   //
   // Return value      : VCM_OK, on success.
   //                     < 0,    on error.
--- a/media/webrtc/trunk/webrtc/modules/video_coding/include/video_coding_defines.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/include/video_coding_defines.h
@@ -142,27 +142,16 @@ class VCMPacketRequestCallback {
  public:
   virtual int32_t ResendPackets(const uint16_t* sequenceNumbers,
                                 uint16_t length) = 0;
 
  protected:
   virtual ~VCMPacketRequestCallback() {}
 };
 
-// Callback class used for telling the user about the state of the decoder & jitter buffer.
-//
-class VCMReceiveStateCallback {
- public:
-  virtual void ReceiveStateChange(VideoReceiveState state) = 0;
-
- protected:
-  virtual ~VCMReceiveStateCallback() {
-  }
-};
-
 class NackSender {
  public:
   virtual void SendNack(const std::vector<uint16_t>& sequence_numbers) = 0;
 
  protected:
   virtual ~NackSender() {}
 };
 
--- a/media/webrtc/trunk/webrtc/modules/video_coding/jitter_buffer.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/jitter_buffer.cc
@@ -568,16 +568,22 @@ VCMEncodedFrame* VCMJitterBuffer::Extrac
 
   // We have a frame - update the last decoded state and nack list.
   last_decoded_state_.SetState(frame);
   DropPacketsFromNackList(last_decoded_state_.sequence_num());
 
   if ((*frame).IsSessionComplete())
     UpdateAveragePacketsPerFrame(frame->NumPackets());
 
+  if (frame->Length() == 0) {
+    // Normally only if MakeDecodable() on an incomplete frame threw it all away
+    ReleaseFrame(frame);
+    return NULL;
+  }
+
   return frame;
 }
 
 // Release frame when done with decoding. Should never be used to release
 // frames from within the jitter buffer.
 void VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) {
   RTC_CHECK(frame != nullptr);
   rtc::CritScope cs(&crit_sect_);
--- a/media/webrtc/trunk/webrtc/modules/video_coding/media_optimization.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/media_optimization.cc
@@ -144,15 +144,17 @@ void MediaOptimization::ProcessIncomingF
         incoming_frame_times_[0] - incoming_frame_times_[num - 1];
     incoming_frame_rate_ = 0.0;  // No frame rate estimate available.
     if (diff > 0) {
       incoming_frame_rate_ = nr_of_frames * 1000.0f / static_cast<float>(diff);
     }
   }
 }
 
+/* TODO: Fix CpuLoadState
 void MediaOptimization::SetCPULoadState(CPULoadState state) {
     CriticalSectionScoped lock(crit_sect_.get());
     loadstate_ = state;
 }
+*/
 
 }  // namespace media_optimization
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/video_coding/receiver.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/receiver.cc
@@ -208,17 +208,16 @@ VCMEncodedFrame* VCMReceiver::FrameForDe
   // Extract the frame from the jitter buffer and set the render time.
   VCMEncodedFrame* frame = jitter_buffer_.ExtractAndSetDecode(frame_timestamp);
   if (frame == NULL) {
     return NULL;
   }
   frame->SetRenderTime(render_time_ms);
   TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(), "SetRenderTS",
                           "render_time", frame->RenderTimeMs());
-  UpdateReceiveState(*frame);
   if (!frame->Complete()) {
     // Update stats for incomplete frames.
     bool retransmitted = false;
     const int64_t last_packet_time_ms =
         jitter_buffer_.LastPacketTime(frame, &retransmitted);
     if (last_packet_time_ms >= 0 && !retransmitted) {
       // We don't want to include timestamps which have suffered from
       // retransmission here, since we compensate with extra retransmission
@@ -259,21 +258,16 @@ VCMNackMode VCMReceiver::NackMode() cons
   rtc::CritScope cs(&crit_sect_);
   return jitter_buffer_.nack_mode();
 }
 
 std::vector<uint16_t> VCMReceiver::NackList(bool* request_key_frame) {
   return jitter_buffer_.GetNackList(request_key_frame);
 }
 
-VideoReceiveState VCMReceiver::ReceiveState() const {
-  CriticalSectionScoped cs(crit_sect_);
-  return receiveState_;
-}
-
 void VCMReceiver::SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) {
   jitter_buffer_.SetDecodeErrorMode(decode_error_mode);
 }
 
 VCMDecodeErrorMode VCMReceiver::DecodeErrorMode() const {
   return jitter_buffer_.decode_error_mode();
 }
 
@@ -283,26 +277,14 @@ int VCMReceiver::SetMinReceiverDelay(int
     return -1;
   }
   max_video_delay_ms_ = desired_delay_ms + kMaxVideoDelayMs;
   // Initializing timing to the desired delay.
   timing_->set_min_playout_delay(desired_delay_ms);
   return 0;
 }
 
-void VCMReceiver::UpdateReceiveState(const VCMEncodedFrame& frame) {
-  if (frame.Complete() && frame.FrameType() == kVideoFrameKey) {
-    receiveState_ = kReceiveStateNormal;
-    return;
-  }
-  if (frame.MissingFrame() || !frame.Complete()) {
-    // State is corrupted
-    receiveState_ = kReceiveStateWaitingKey;
-  }
-  // state continues
-}
-
 void VCMReceiver::RegisterStatsCallback(
     VCMReceiveStatisticsCallback* callback) {
   jitter_buffer_.RegisterStatsCallback(callback);
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/video_coding/rtp_frame_reference_finder.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/rtp_frame_reference_finder.cc
@@ -456,18 +456,21 @@ RtpFrameReferenceFinder::FrameDecision R
   gof_info_.erase(gof_info_.begin(), clean_gof_info_to);
 
   if (frame->frame_type() == kVideoFrameKey) {
     // When using GOF all keyframes must include the scalability structure.
     if (!codec_header.ss_data_available)
       RTC_LOG(LS_WARNING) << "Received keyframe without scalability structure";
 
     frame->num_references = 0;
-    GofInfo info = gof_info_.find(codec_header.tl0_pic_idx)->second;
-    FrameReceivedVp9(frame->picture_id, &info);
+    auto gof_info_it = gof_info_.find(codec_header.tl0_pic_idx);
+    if (gof_info_it == gof_info_.end())
+      return kDrop;
+
+    FrameReceivedVp9(frame->picture_id, &gof_info_it->second);
     UnwrapPictureIds(frame);
     return kHandOff;
   }
 
   auto gof_info_it = gof_info_.find(
       (codec_header.temporal_idx == 0 && !codec_header.ss_data_available)
           ? codec_header.tl0_pic_idx - 1
           : codec_header.tl0_pic_idx);
@@ -528,18 +531,18 @@ RtpFrameReferenceFinder::FrameDecision R
 bool RtpFrameReferenceFinder::MissingRequiredFrameVp9(uint16_t picture_id,
                                                       const GofInfo& info) {
   size_t diff =
       ForwardDiff<uint16_t, kPicIdLength>(info.gof->pid_start, picture_id);
   size_t gof_idx = diff % info.gof->num_frames_in_gof;
   size_t temporal_idx = info.gof->temporal_idx[gof_idx];
 
   if (temporal_idx >= kMaxTemporalLayers) {
-    LOG(LS_WARNING) << "At most " << kMaxTemporalLayers << " temporal "
-                    << "layers are supported.";
+    RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers << " temporal "
+                        << "layers are supported.";
     return true;
   }
 
   // For every reference this frame has, check if there is a frame missing in
   // the interval (|ref_pid|, |picture_id|) in any of the lower temporal
   // layers. If so, we are missing a required frame.
   uint8_t num_references = info.gof->num_ref_pics[gof_idx];
   for (size_t i = 0; i < num_references; ++i) {
--- a/media/webrtc/trunk/webrtc/modules/video_coding/session_info.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/session_info.cc
@@ -195,17 +195,17 @@ size_t VCMSessionInfo::InsertBuffer(uint
     while (nalu_ptr + kLengthFieldLength <= packet_buffer + packet.sizeBytes) {
       size_t length = BufferToUWord16(nalu_ptr);
       if (nalu_ptr + kLengthFieldLength + length <= packet_buffer + packet.sizeBytes) {
         required_length +=
           length + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
         nalu_ptr += kLengthFieldLength + length;
       } else {
         // Something is very wrong!
-        LOG(LS_ERROR) << "Failed to insert packet due to corrupt H264 STAP-A";
+        RTC_LOG(LS_ERROR) << "Failed to insert packet due to corrupt H264 STAP-A";
         return 0;
       }
     }
     ShiftSubsequentPackets(packet_it, required_length);
     nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
     uint8_t* frame_buffer_ptr = frame_buffer + offset;
     // we already know we won't go past end-of-buffer
     while (nalu_ptr + kLengthFieldLength <= packet_buffer + packet.sizeBytes) {
--- a/media/webrtc/trunk/webrtc/modules/video_coding/video_coding_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/video_coding_impl.cc
@@ -237,30 +237,37 @@ class VideoCodingModuleImpl : public Vid
     return receiver_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
                                      max_incomplete_time_ms);
   }
 
   void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) override {
     return receiver_.SetDecodeErrorMode(decode_error_mode);
   }
 
+  virtual void SetCPULoadState(CPULoadState state) override {
+    /* TODO: fix CPULoadState stuff...
+    return sender_.SetCPULoadState(state);
+    */
+  }
+
   int SetMinReceiverDelay(int desired_delay_ms) override {
     return receiver_.SetMinReceiverDelay(desired_delay_ms);
   }
 
   int32_t SetReceiveChannelParameters(int64_t rtt) override {
     return receiver_.SetReceiveChannelParameters(rtt);
   }
 
   void RegisterPostEncodeImageCallback(
       EncodedImageCallback* observer) override {
     post_encode_callback_.Register(observer);
   }
 
   void TriggerDecoderShutdown() override { receiver_.TriggerDecoderShutdown(); }
+  void Reset() override {receiver_.Reset(); }
 
  private:
   rtc::ThreadChecker construction_thread_;
   EncodedImageCallbackWrapper post_encode_callback_;
   vcm::VideoSender sender_;
   std::unique_ptr<VideoBitrateAllocator> rate_allocator_;
   std::unique_ptr<VCMTiming> timing_;
   vcm::VideoReceiver receiver_;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/video_coding_impl.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/video_coding_impl.h
@@ -180,20 +180,23 @@ class VideoReceiver : public Module {
                        int max_incomplete_time_ms);
 
   void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode);
   int SetMinReceiverDelay(int desired_delay_ms);
 
   int32_t SetReceiveChannelParameters(int64_t rtt);
   int32_t SetVideoProtection(VCMVideoProtection videoProtection, bool enable);
 
+  void SetCPULoadState(CPULoadState state);
+
   int64_t TimeUntilNextProcess() override;
   void Process() override;
 
   void TriggerDecoderShutdown();
+  void Reset();
 
  protected:
   int32_t Decode(const webrtc::VCMEncodedFrame& frame)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(receive_crit_);
   int32_t RequestKeyFrame();
 
  private:
   rtc::ThreadChecker construction_thread_;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/video_receiver.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/video_receiver.cc
@@ -102,35 +102,16 @@ void VideoReceiver::Process() {
         if (_packetRequestCallback != nullptr) {
           _packetRequestCallback->ResendPackets(&nackList[0], nackList.size());
         }
       }
     }
   }
 }
 
-void VideoReceiver::SetReceiveState(VideoReceiveState state) {
-  if (state == _receiveState) {
-    return;
-  }
-  if (state == kReceiveStatePreemptiveNACK &&
-      (_receiveState == kReceiveStateWaitingKey ||
-       _receiveState == kReceiveStateDecodingWithErrors)) {
-    // invalid state transition - this lets us try to set it on NACK
-    // without worrying about the current state
-    return;
-  }
- _receiveState = state;
-
- rtc::CritScope cs(&process_crit_);
-  if (_receiveStateCallback != NULL) {
-    _receiveStateCallback->ReceiveStateChange(_receiveState);
-  }
-}
-
 int64_t VideoReceiver::TimeUntilNextProcess() {
   int64_t timeUntilNextProcess = _receiveStatsTimer.TimeUntilProcess();
   if (_receiver.NackMode() != kNoNack) {
     // We need a Process call more often if we are relying on
     // retransmissions
     timeUntilNextProcess =
         VCM_MIN(timeUntilNextProcess, _retransmissionTimer.TimeUntilProcess());
   }
@@ -232,16 +213,20 @@ int32_t VideoReceiver::RegisterPacketReq
   return VCM_OK;
 }
 
 void VideoReceiver::TriggerDecoderShutdown() {
   RTC_DCHECK(construction_thread_.CalledOnValidThread());
   _receiver.TriggerDecoderShutdown();
 }
 
+void VideoReceiver::Reset() {
+  _receiver.Reset();
+}
+
 // Decode next frame, blocking.
 // Should be called as often as possible to get the most out of the decoder.
 int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
   bool prefer_late_decoding = false;
   {
     // TODO(tommi): Chances are that this lock isn't required.
     rtc::CritScope cs(&receive_crit_);
     prefer_late_decoding = _codecDataBase.PrefersLateDecoding();
@@ -254,17 +239,16 @@ int32_t VideoReceiver::Decode(uint16_t m
     return VCM_FRAME_NOT_READY;
 
   {
     rtc::CritScope cs(&process_crit_);
     if (drop_frames_until_keyframe_) {
       // Still getting delta frames, schedule another keyframe request as if
       // decode failed.
       if (frame->FrameType() != kVideoFrameKey) {
-        LOG(LS_INFO) << "Dropping delta frame for receiver " << (void*) this;
         _scheduleKeyRequest = true;
         _receiver.ReleaseFrame(frame);
         return VCM_FRAME_NOT_READY;
       }
       drop_frames_until_keyframe_ = false;
     }
   }
 
@@ -382,17 +366,16 @@ int32_t VideoReceiver::IncomingPacket(co
   // TODO(holmer): Investigate if this somehow should use the key frame
   // request scheduling to throttle the requests.
   if (ret == VCM_FLUSH_INDICATOR) {
     {
       rtc::CritScope cs(&process_crit_);
       drop_frames_until_keyframe_ = true;
     }
     RequestKeyFrame();
-    SetReceiveState(kReceiveStateWaitingKey);
   } else if (ret < 0) {
     return ret;
   }
   return VCM_OK;
 }
 
 // Minimum playout delay (used for lip-sync). This is the minimum delay required
 // to sync with audio. Not included in  VideoCodingModule::Delay()
--- a/media/webrtc/trunk/webrtc/modules/video_coding/video_sender.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/video_sender.cc
@@ -171,17 +171,17 @@ int VideoSender::FrameRate(unsigned int*
 }
 
 EncoderParameters VideoSender::UpdateEncoderParameters(
     const EncoderParameters& params,
     VideoBitrateAllocator* bitrate_allocator,
     uint32_t target_bitrate_bps) {
   uint32_t video_target_rate_bps = _mediaOpt.SetTargetRates(target_bitrate_bps);
   uint32_t input_frame_rate = _mediaOpt.InputFrameRate();
-  if (input_frame_rate == 0)
+  if (input_frame_rate == 0 || input_frame_rate > current_codec_.maxFramerate)
     input_frame_rate = current_codec_.maxFramerate;
 
   BitrateAllocation bitrate_allocation;
   // Only call allocators if bitrate > 0 (ie, not suspended), otherwise they
   // might cap the bitrate to the min bitrate configured.
   if (target_bitrate_bps > 0) {
     if (bitrate_allocator) {
       bitrate_allocation = bitrate_allocator->GetAllocation(
@@ -388,15 +388,16 @@ int32_t VideoSender::IntraFrameRequest(s
 
 int32_t VideoSender::EnableFrameDropper(bool enable) {
   rtc::CritScope lock(&encoder_crit_);
   frame_dropper_enabled_ = enable;
   _mediaOpt.EnableFrameDropper(enable);
   return VCM_OK;
 }
 
+/* TODO: Fixup SetCPULoadState
 void VideoSender::SetCPULoadState(CPULoadState state) {
   rtc::CritScope lock(&encoder_crit_);
   _mediaOpt.SetCPULoadState(state);
 }
-
+*/
 }  // namespace vcm
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/rtc_base/base64.cc
+++ b/media/webrtc/trunk/webrtc/rtc_base/base64.cc
@@ -72,17 +72,17 @@ bool Base64::IsBase64Char(char ch) {
   return (('A' <= ch) && (ch <= 'Z')) || (('a' <= ch) && (ch <= 'z')) ||
          (('0' <= ch) && (ch <= '9')) || (ch == '+') || (ch == '/');
 }
 
 bool Base64::GetNextBase64Char(char ch, char* next_ch) {
   if (next_ch == nullptr) {
     return false;
   }
-  const char* p = strchr(Base64Table, ch);
+  const char* p = ::strchr(Base64Table, ch);
   if (!p)
     return false;
   ++p;
   *next_ch = (*p) ? *p : Base64Table[0];
   return true;
 }
 
 bool Base64::IsBase64Encoded(const std::string& str) {
--- a/media/webrtc/trunk/webrtc/rtc_base/basictypes.h
+++ b/media/webrtc/trunk/webrtc/rtc_base/basictypes.h
@@ -6,16 +6,17 @@
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #ifndef RTC_BASE_BASICTYPES_H_
 #define RTC_BASE_BASICTYPES_H_
 
+#include <sys/types.h> // for pid_t
 #include <stddef.h>  // for NULL, size_t
 #include <stdint.h>  // for uintptr_t and (u)int_t types.
 
 // Detect compiler is for x86 or x64.
 #if defined(__x86_64__) || defined(_M_X64) || \
     defined(__i386__) || defined(_M_IX86)
 #define CPU_X86 1
 #endif
--- a/media/webrtc/trunk/webrtc/rtc_base/byteorder.h
+++ b/media/webrtc/trunk/webrtc/rtc_base/byteorder.h
@@ -96,29 +96,52 @@ inline void SetBE16(void* memory, uint16
   *static_cast<uint16_t*>(memory) = htobe16(v);
 }
 
 inline void SetBE32(void* memory, uint32_t v) {
   *static_cast<uint32_t*>(memory) = htobe32(v);
 }
 
 inline void SetBE64(void* memory, uint64_t v) {
+#ifdef WEBRTC_WIN
+  //Mozilla: because we support Win7, htonll is not visible to us
+  Set8(memory, 0, static_cast<uint8_t>(v >> 56));
+  Set8(memory, 1, static_cast<uint8_t>(v >> 48));
+  Set8(memory, 2, static_cast<uint8_t>(v >> 40));
+  Set8(memory, 3, static_cast<uint8_t>(v >> 32));
+  Set8(memory, 4, static_cast<uint8_t>(v >> 24));
+  Set8(memory, 5, static_cast<uint8_t>(v >> 16));
+  Set8(memory, 6, static_cast<uint8_t>(v >> 8));
+  Set8(memory, 7, static_cast<uint8_t>(v >> 0));
+#else
   *static_cast<uint64_t*>(memory) = htobe64(v);
+#endif
 }
 
 inline uint16_t GetBE16(const void* memory) {
   return be16toh(*static_cast<const uint16_t*>(memory));
 }
 
 inline uint32_t GetBE32(const void* memory) {
   return be32toh(*static_cast<const uint32_t*>(memory));
 }
 
 inline uint64_t GetBE64(const void* memory) {
+#ifdef WEBRTC_WIN
+  return (static_cast<uint64_t>(Get8(memory, 0)) << 56) |
+         (static_cast<uint64_t>(Get8(memory, 1)) << 48) |
+         (static_cast<uint64_t>(Get8(memory, 2)) << 40) |
+         (static_cast<uint64_t>(Get8(memory, 3)) << 32) |
+         (static_cast<uint64_t>(Get8(memory, 4)) << 24) |
+         (static_cast<uint64_t>(Get8(memory, 5)) << 16) |
+         (static_cast<uint64_t>(Get8(memory, 6)) << 8) |
+         (static_cast<uint64_t>(Get8(memory, 7)) << 0);
+#else
   return be64toh(*static_cast<const uint64_t*>(memory));
+#endif
 }
 
 inline void SetLE16(void* memory, uint16_t v) {
   *static_cast<uint16_t*>(memory) = htole16(v);
 }
 
 inline void SetLE32(void* memory, uint32_t v) {
   *static_cast<uint32_t*>(memory) = htole32(v);
@@ -153,26 +176,36 @@ inline uint16_t HostToNetwork16(uint16_t
   return htobe16(n);
 }
 
 inline uint32_t HostToNetwork32(uint32_t n) {
   return htobe32(n);
 }
 
 inline uint64_t HostToNetwork64(uint64_t n) {
+#ifdef WEBRTC_WIN
+  uint64_t result;
+  SetBE64(&result, n);
+  return result;
+#else
   return htobe64(n);
+#endif
 }
 
 inline uint16_t NetworkToHost16(uint16_t n) {
   return be16toh(n);
 }
 
 inline uint32_t NetworkToHost32(uint32_t n) {
   return be32toh(n);
 }
 
 inline uint64_t NetworkToHost64(uint64_t n) {
+#ifdef WEBRTC_WIN
+  return GetBE64(&n);
+#else
   return be64toh(n);
+#endif
 }
 
 }  // namespace rtc
 
 #endif  // RTC_BASE_BYTEORDER_H_
--- a/media/webrtc/trunk/webrtc/rtc_base/platform_thread.cc
+++ b/media/webrtc/trunk/webrtc/rtc_base/platform_thread.cc
@@ -17,16 +17,25 @@
 
 #if defined(WEBRTC_LINUX)
 #include <sys/prctl.h>
 #include <sys/syscall.h>
 #endif
 
 namespace rtc {
 
+#if defined(WEBRTC_WIN)
+// For use in ThreadWindowsUI callbacks
+static UINT static_reg_windows_msg = RegisterWindowMessageW(L"WebrtcWindowsUIThreadEvent");
+// timer id used in delayed callbacks
+static const UINT_PTR kTimerId = 1;
+static const wchar_t kThisProperty[] = L"ThreadWindowsUIPtr";
+static const wchar_t kThreadWindow[] = L"WebrtcWindowsUIThread";
+#endif
+
 PlatformThreadId CurrentThreadId() {
   PlatformThreadId ret;
 #if defined(WEBRTC_WIN)
   ret = GetCurrentThreadId();
 #elif defined(WEBRTC_POSIX)
 #if defined(WEBRTC_MAC) || defined(WEBRTC_IOS)
   ret = pthread_mach_thread_np(pthread_self());
 #elif defined(WEBRTC_ANDROID)
@@ -121,16 +130,69 @@ PlatformThread::~PlatformThread() {
   RTC_DCHECK(thread_checker_.CalledOnValidThread());
 #if defined(WEBRTC_WIN)
   RTC_DCHECK(!thread_);
   RTC_DCHECK(!thread_id_);
 #endif  // defined(WEBRTC_WIN)
 }
 
 #if defined(WEBRTC_WIN)
+bool PlatformUIThread::InternalInit() {
+  // Create an event window for use in generating callbacks to capture
+  // objects.
+  CritScope scoped_lock(&cs_);
+  if (hwnd_ == NULL) {
+    WNDCLASSW wc;
+    HMODULE hModule = GetModuleHandle(NULL);
+    if (!GetClassInfoW(hModule, kThreadWindow, &wc)) {
+      ZeroMemory(&wc, sizeof(WNDCLASSW));
+      wc.hInstance = hModule;
+      wc.lpfnWndProc = EventWindowProc;
+      wc.lpszClassName = kThreadWindow;
+      RegisterClassW(&wc);
+    }
+    hwnd_ = CreateWindowW(kThreadWindow, L"",
+                          0, 0, 0, 0, 0,
+                          NULL, NULL, hModule, NULL);
+    RTC_DCHECK(hwnd_);
+    SetPropW(hwnd_, kThisProperty, this);
+
+    if (timeout_) {
+      // if someone set the timer before we started
+      RequestCallbackTimer(timeout_);
+    }
+  }
+  return !!hwnd_;
+}
+
+void PlatformUIThread::RequestCallback() {
+  RTC_DCHECK(hwnd_);
+  RTC_DCHECK(static_reg_windows_msg);
+  PostMessage(hwnd_, static_reg_windows_msg, 0, 0);
+}
+
+bool PlatformUIThread::RequestCallbackTimer(unsigned int milliseconds) {
+  CritScope scoped_lock(&cs_);
+  if (!hwnd_) {
+    // There is a condition that thread_ (PlatformUIThread) has been
+    // created but PlatformUIThread::Run() hasn't been run yet (hwnd_ is
+    // null while thread_ is not). If we do RTC_DCHECK(!thread_) here,
+    // it would lead to crash in this condition.
+
+    // set timer once thread starts
+  } else {
+    if (timerid_) {
+      KillTimer(hwnd_, timerid_);
+    }
+    timerid_ = SetTimer(hwnd_, kTimerId, milliseconds, NULL);
+  }
+  timeout_ = milliseconds;
+  return !!timerid_;
+}
+
 DWORD WINAPI PlatformThread::StartThread(void* param) {
   // The GetLastError() function only returns valid results when it is called
   // after a Win32 API function that returns a "failed" result. A crash dump
   // contains the result from GetLastError() and to make sure it does not
   // falsely report a Windows error we call SetLastError here.
   ::SetLastError(ERROR_SUCCESS);
   static_cast<PlatformThread*>(param)->Run();
   return 0;
@@ -200,16 +262,33 @@ void PlatformThread::Stop() {
   RTC_CHECK_EQ(0, pthread_join(thread_, nullptr));
   if (!run_function_)
     AtomicOps::ReleaseStore(&stop_flag_, 0);
   thread_ = 0;
 #endif  // defined(WEBRTC_WIN)
   spawned_thread_checker_.DetachFromThread();
 }
 
+#ifdef WEBRTC_WIN
+void PlatformUIThread::Stop() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  // Shut down the dispatch loop and let the background thread exit.
+  if (timerid_) {
+    KillTimer(hwnd_, timerid_);
+    timerid_ = 0;
+  }
+
+  PostMessage(hwnd_, WM_CLOSE, 0, 0);
+
+  hwnd_ = NULL;
+
+  PlatformThread::Stop();
+}
+#endif
+
 // TODO(tommi): Deprecate the loop behavior in PlatformThread.
 // * Introduce a new callback type that returns void.
 // * Remove potential for a busy loop in PlatformThread.
 // * Delegate the responsibility for how to stop the thread, to the
 //   implementation that actually uses the thread.
 // All implementations will need to be aware of how the thread should be stopped
 // and encouraging a busy polling loop, can be costly in terms of power and cpu.
 void PlatformThread::Run() {
@@ -269,16 +348,75 @@ void PlatformThread::Run() {
 #else
     static const struct timespec ts_null = {0};
     nanosleep(&ts_null, nullptr);
 #endif
   } while (!AtomicOps::AcquireLoad(&stop_flag_));
 #endif  // defined(WEBRTC_WIN)
 }
 
+#if defined(WEBRTC_WIN)
+void PlatformUIThread::Run() {
+  RTC_CHECK(InternalInit()); // always evaluates
+  do {
+    // The interface contract of Start/Stop is that for a successful call to
+    // Start, there should be at least one call to the run function.  So we
+    // call the function before checking |stop_|.
+    run_function_deprecated_(obj_);
+
+    // Alertable sleep to permit RaiseFlag to run and update |stop_|.
+    if (MsgWaitForMultipleObjectsEx(0, nullptr, INFINITE, QS_ALLINPUT,
+                                    MWMO_ALERTABLE | MWMO_INPUTAVAILABLE) ==
+        WAIT_OBJECT_0) {
+      MSG msg;
+      if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) {
+        if (msg.message == WM_QUIT) {
+          stop_ = true;
+          break;
+        }
+        TranslateMessage(&msg);
+        DispatchMessage(&msg);
+      }
+    }
+
+  } while (!stop_);
+}
+
+void PlatformUIThread::NativeEventCallback() {
+  if (!run_function_deprecated_) {
+    stop_ = true;
+    return;
+  }
+  run_function_deprecated_(obj_);
+}
+
+/* static */
+LRESULT CALLBACK
+PlatformUIThread::EventWindowProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam) {
+  if (uMsg == WM_DESTROY) {
+    RemovePropW(hwnd, kThisProperty);
+    PostQuitMessage(0);
+    return 0;
+  }
+
+  PlatformUIThread *twui = static_cast<PlatformUIThread*>(GetPropW(hwnd, kThisProperty));
+  if (!twui) {
+    return DefWindowProc(hwnd, uMsg, wParam, lParam);
+  }
+
+  if ((uMsg == static_reg_windows_msg && uMsg != WM_NULL) ||
+      (uMsg == WM_TIMER && wParam == kTimerId)) {
+    twui->NativeEventCallback();
+    return 0;
+  }
+
+  return DefWindowProc(hwnd, uMsg, wParam, lParam);
+}
+#endif
+
 bool PlatformThread::SetPriority(ThreadPriority priority) {
 #if RTC_DCHECK_IS_ON
   if (run_function_) {
     // The non-deprecated way of how this function gets called, is that it must
     // be called on the worker thread itself.
     RTC_DCHECK(!thread_checker_.CalledOnValidThread());
     RTC_DCHECK(spawned_thread_checker_.CalledOnValidThread());
   } else {
--- a/media/webrtc/trunk/webrtc/rtc_base/platform_thread.h
+++ b/media/webrtc/trunk/webrtc/rtc_base/platform_thread.h
@@ -64,61 +64,99 @@ class PlatformThread {
                  const char* thread_name,
                  ThreadPriority priority = kNormalPriority);
   virtual ~PlatformThread();
 
   const std::string& name() const { return name_; }
 
   // Spawns a thread and tries to set thread priority according to the priority
   // from when CreateThread was called.
-  void Start();
+  virtual void Start();
 
   bool IsRunning() const;
 
   // Returns an identifier for the worker thread that can be used to do
   // thread checks.
   PlatformThreadRef GetThreadRef() const;
 
   // Stops (joins) the spawned thread.
-  void Stop();
+  virtual void Stop();
 
   // Set the priority of the thread. Must be called when thread is running.
   // TODO(tommi): Make private and only allow public support via ctor.
   bool SetPriority(ThreadPriority priority);
 
  protected:
 #if defined(WEBRTC_WIN)
   // Exposed to derived classes to allow for special cases specific to Windows.
   bool QueueAPC(PAPCFUNC apc_function, ULONG_PTR data);
 #endif
 
- private:
-  void Run();
+  virtual void Run();
 
   ThreadRunFunctionDeprecated const run_function_deprecated_ = nullptr;
   ThreadRunFunction const run_function_ = nullptr;
   const ThreadPriority priority_ = kNormalPriority;
   void* const obj_;
   // TODO(pbos): Make sure call sites use string literals and update to a const
   // char* instead of a std::string.
   const std::string name_;
   rtc::ThreadChecker thread_checker_;
   rtc::ThreadChecker spawned_thread_checker_;
 #if defined(WEBRTC_WIN)
   static DWORD WINAPI StartThread(void* param);
 
   bool stop_ = false;
   HANDLE thread_ = nullptr;
   DWORD thread_id_ = 0;
+  CriticalSection cs_;
 #else
   static void* StartThread(void* param);
 
   // An atomic flag that we use to stop the thread. Only modified on the
   // controlling thread and checked on the worker thread.
   volatile int stop_flag_ = 0;
   pthread_t thread_ = 0;
 #endif  // defined(WEBRTC_WIN)
   RTC_DISALLOW_COPY_AND_ASSIGN(PlatformThread);
 };
 
+#if defined(WEBRTC_WIN)
+class PlatformUIThread : public PlatformThread {
+ public:
+  PlatformUIThread(ThreadRunFunctionDeprecated func, void* obj,
+		  const char* thread_name) :
+  PlatformThread(func, obj, thread_name),
+  hwnd_(nullptr),
+  timerid_(0),
+  timeout_(0) {
+ }
+ virtual ~PlatformUIThread() {}
+
+ void Stop() override;
+
+ /**
+  * Request an async callback soon.
+  */
+ void RequestCallback();
+
+ /**
+  * Request a recurring callback.
+  */
+ bool RequestCallbackTimer(unsigned int milliseconds);
+
+ protected:
+  void Run() override;
+
+ private:
+  static LRESULT CALLBACK EventWindowProc(HWND, UINT, WPARAM, LPARAM);
+  void NativeEventCallback();
+  bool InternalInit();
+
+  HWND hwnd_;
+  UINT_PTR timerid_;
+  unsigned int timeout_;
+};
+#endif
+
 }  // namespace rtc
 
 #endif  // RTC_BASE_PLATFORM_THREAD_H_
--- a/media/webrtc/trunk/webrtc/rtc_base/task_queue_libevent.cc
+++ b/media/webrtc/trunk/webrtc/rtc_base/task_queue_libevent.cc
@@ -10,17 +10,19 @@
 
 #include "rtc_base/task_queue.h"
 
 #include <fcntl.h>
 #include <signal.h>
 #include <string.h>
 #include <unistd.h>
 
-#include "base/third_party/libevent/event.h"
+#include "event2/event.h"
+#include "event2/event_compat.h"
+#include "event2/event_struct.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/logging.h"
 #include "rtc_base/numerics/safe_conversions.h"
 #include "rtc_base/platform_thread.h"
 #include "rtc_base/refcount.h"
 #include "rtc_base/refcountedobject.h"
 #include "rtc_base/task_queue.h"
 #include "rtc_base/task_queue_posix.h"
@@ -371,18 +373,18 @@ void TaskQueue::Impl::PostDelayedTask(st
                                       uint32_t milliseconds) {
   if (IsCurrent()) {
     TimerEvent* timer = new TimerEvent(std::move(task));
     EventAssign(&timer->ev, event_base_, -1, 0, &TaskQueue::Impl::RunTimer,
                 timer);
     QueueContext* ctx =
         static_cast<QueueContext*>(pthread_getspecific(GetQueuePtrTls()));
     ctx->pending_timers_.push_back(timer);
-    timeval tv = {rtc::dchecked_cast<int>(milliseconds / 1000),
-                  rtc::dchecked_cast<int>(milliseconds % 1000) * 1000};
+    timeval tv = {static_cast<time_t>(milliseconds) / 1000,
+		  static_cast<suseconds_t>((milliseconds % 1000) * 1000)};
     event_add(&timer->ev, &tv);
   } else {
     PostTask(std::unique_ptr<QueuedTask>(
         new SetTimerTask(std::move(task), milliseconds)));
   }
 }
 
 void TaskQueue::Impl::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
--- a/media/webrtc/trunk/webrtc/system_wrappers/source/clock.cc
+++ b/media/webrtc/trunk/webrtc/system_wrappers/source/clock.cc
@@ -12,17 +12,17 @@
 
 #if defined(_WIN32)
 
 // Windows needs to be included before mmsystem.h
 #include "rtc_base/win32.h"
 
 #include <MMSystem.h>
 
-#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_MAC))
+#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_MAC) || (defined WEBRTC_BSD))
 
 #include <sys/time.h>
 #include <time.h>
 
 #endif
 
 #include "rtc_base/criticalsection.h"
 #include "rtc_base/timeutils.h"
--- a/media/webrtc/trunk/webrtc/test/fuzzers/rtp_packet_fuzzer.cc
+++ b/media/webrtc/trunk/webrtc/test/fuzzers/rtp_packet_fuzzer.cc
@@ -104,12 +104,17 @@ void FuzzOneInput(const uint8_t* data, s
         packet.GetExtension<RepairedRtpStreamId>(&rsid);
         break;
       }
       case kRtpExtensionMid: {
         std::string mid;
         packet.GetExtension<RtpMid>(&mid);
         break;
       }
+      case kRtpExtensionCsrcAudioLevel: {
+        CsrcAudioLevelList levels;
+        packet.GetExtension<CsrcAudioLevel>(&levels);
+        break;
+      }
     }
   }
 }
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/video/rtp_video_stream_receiver.cc
+++ b/media/webrtc/trunk/webrtc/video/rtp_video_stream_receiver.cc
@@ -155,16 +155,20 @@ RtpVideoStreamReceiver::RtpVideoStreamRe
   if (IsRedEnabled()) {
     VideoCodec red_codec = {};
     red_codec.codecType = kVideoCodecRED;
     strncpy(red_codec.plName, "red", sizeof(red_codec.plName));
     red_codec.plType = config_.rtp.red_payload_type;
     RTC_CHECK(AddReceiveCodec(red_codec));
   }
 
+  rtp_rtcp_->SetTMMBRStatus(config_.rtp.tmmbr);
+
+  rtp_rtcp_->SetKeyFrameRequestMethod(config_.rtp.keyframe_method);
+
   if (config_.rtp.rtcp_xr.receiver_reference_time_report)
     rtp_rtcp_->SetRtcpXrRrtrStatus(true);
 
   // Stats callback for CNAME changes.
   rtp_rtcp_->RegisterRtcpStatisticsCallback(receive_stats_proxy);
 
   process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE);
 
@@ -211,16 +215,20 @@ bool RtpVideoStreamReceiver::AddReceiveC
 uint32_t RtpVideoStreamReceiver::GetRemoteSsrc() const {
   return config_.rtp.remote_ssrc;
 }
 
 int RtpVideoStreamReceiver::GetCsrcs(uint32_t* csrcs) const {
   return rtp_receiver_->CSRCs(csrcs);
 }
 
+void RtpVideoStreamReceiver::GetRID(char rid[256]) const {
+  rtp_receiver_->GetRID(rid);
+}
+
 RtpReceiver* RtpVideoStreamReceiver::GetRtpReceiver() const {
   return rtp_receiver_.get();
 }
 
 int32_t RtpVideoStreamReceiver::OnReceivedPayloadData(
     const uint8_t* payload_data,
     size_t payload_size,
     const WebRtcRTPHeader* rtp_header) {
@@ -316,16 +324,38 @@ void RtpVideoStreamReceiver::OnRtpPacket
       int32_t time_offset;
       if (packet.GetExtension<TransmissionOffset>(&time_offset)) {
         ss << ", toffset: " << time_offset;
       }
       uint32_t send_time;
       if (packet.GetExtension<AbsoluteSendTime>(&send_time)) {
         ss << ", abs send time: " << send_time;
       }
+      StringRtpHeaderExtension rtp_stream_id;
+      if (packet.GetExtension<RtpStreamId>(&rtp_stream_id)) {
+        ss << ", rid: " << rtp_stream_id.data();
+      }
+      StringRtpHeaderExtension repaired_rtp_stream_id;
+      if (packet.GetExtension<RepairedRtpStreamId>(&repaired_rtp_stream_id)) {
+        ss << ", repaired rid: " << repaired_rtp_stream_id.data();
+      }
+      StringRtpHeaderExtension mid;
+      if (packet.GetExtension<RtpMid>(&mid)) {
+        ss << ", mid: " << mid.data();
+      }
+      CsrcAudioLevelList csrc_audio_levels;
+      if (packet.GetExtension<CsrcAudioLevel>(&csrc_audio_levels)) {
+        if (csrc_audio_levels.numAudioLevels) {
+          ss << ", csrc audio levels : {" << csrc_audio_levels.arrOfAudioLevels[0];
+          for (uint8_t i = 1; i < csrc_audio_levels.numAudioLevels; i++) {
+            ss << ", " << csrc_audio_levels.arrOfAudioLevels[i];
+          }
+          ss << "}";
+        }
+      }
       RTC_LOG(LS_INFO) << ss.str();
       last_packet_log_ms_ = now_ms;
     }
   }
 
   // TODO(nisse): Delete use of GetHeader, but needs refactoring of
   // ReceivePacket and IncomingPacket methods below.
   RTPHeader header;
--- a/media/webrtc/trunk/webrtc/video/rtp_video_stream_receiver.h
+++ b/media/webrtc/trunk/webrtc/video/rtp_video_stream_receiver.h
@@ -81,16 +81,17 @@ class RtpVideoStreamReceiver : public Rt
       video_coding::OnCompleteFrameCallback* complete_frame_callback,
       VCMTiming* timing);
   ~RtpVideoStreamReceiver();
 
   bool AddReceiveCodec(const VideoCodec& video_codec,
                        const std::map<std::string, std::string>& codec_params);
   uint32_t GetRemoteSsrc() const;
   int GetCsrcs(uint32_t* csrcs) const;
+  void GetRID(char rid[256]) const;
 
   RtpReceiver* GetRtpReceiver() const;
   RtpRtcp* rtp_rtcp() const { return rtp_rtcp_.get(); }
 
   void StartReceive();
   void StopReceive();
 
   bool DeliverRtcp(const uint8_t* rtcp_packet, size_t rtcp_packet_length);
--- a/media/webrtc/trunk/webrtc/video/video_receive_stream.cc
+++ b/media/webrtc/trunk/webrtc/video/video_receive_stream.cc
@@ -165,16 +165,17 @@ void VideoReceiveStream::SetSync(Syncabl
   RTC_DCHECK_CALLED_SEQUENTIALLY(&worker_sequence_checker_);
   rtp_stream_sync_.ConfigureSync(audio_syncable);
 }
 
 void VideoReceiveStream::Start() {
   RTC_DCHECK_CALLED_SEQUENTIALLY(&worker_sequence_checker_);
   if (decode_thread_.IsRunning())
     return;
+  video_receiver_.Reset();
 
   bool protected_by_fec = config_.rtp.protected_by_flexfec ||
                           rtp_video_stream_receiver_.IsUlpfecEnabled();
 
   frame_buffer_->Start();
   call_stats_->RegisterStatsObserver(&rtp_video_stream_receiver_);
   call_stats_->RegisterStatsObserver(this);
 
@@ -453,10 +454,16 @@ bool VideoReceiveStream::Decode() {
     if (stream_is_active && !receiving_keyframe) {
       RTC_LOG(LS_WARNING) << "No decodable frame in " << wait_ms
                           << " ms, requesting keyframe.";
       RequestKeyFrame();
     }
   }
   return true;
 }
+
+bool
+VideoReceiveStream::GetRemoteRTCPSenderInfo(RTCPSenderInfo* sender_info) const {
+  return -1 != rtp_stream_receiver_.rtp_rtcp()->RemoteRTCPStat(sender_info);
+}
+
 }  // namespace internal
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/video/video_send_stream.cc
+++ b/media/webrtc/trunk/webrtc/video/video_send_stream.cc
@@ -590,16 +590,21 @@ VideoSendStream::VideoSendStream(
   ReconfigureVideoEncoder(std::move(encoder_config));
 }
 
 VideoSendStream::~VideoSendStream() {
   RTC_DCHECK_RUN_ON(&thread_checker_);
   RTC_DCHECK(!send_stream_);
 }
 
+CPULoadStateObserver* VideoSendStream::LoadStateObserver() {
+  //TODO: figure out CPULoadStateObserver stuff
+  return nullptr;
+}
+
 void VideoSendStream::Start() {
   RTC_DCHECK_RUN_ON(&thread_checker_);
   RTC_LOG(LS_INFO) << "VideoSendStream::Start";
   VideoSendStreamImpl* send_stream = send_stream_.get();
   worker_queue_->PostTask([this, send_stream] {
     send_stream->Start();
     thread_sync_event_.Set();
   });
@@ -796,16 +801,21 @@ VideoSendStreamImpl::VideoSendStreamImpl
       RTC_CHECK_EQ(0, rtp_rtcp->RegisterSendRtpHeaderExtension(
                           StringToRtpExtensionType(extension), id));
     }
   }
 
   ConfigureProtection();
   ConfigureSsrcs();
 
+  // Configure the mid for each of the rtp modules
+  for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
+    rtp_rtcp->SetMID(config_->rtp.mid.c_str());
+  }
+
   // TODO(pbos): Should we set CNAME on all RTP modules?
   rtp_rtcp_modules_.front()->SetCNAME(config_->rtp.c_name.c_str());
 
   for (RtpRtcp* rtp_rtcp : rtp_rtcp_modules_) {
     rtp_rtcp->RegisterRtcpStatisticsCallback(stats_proxy_);
     rtp_rtcp->RegisterSendChannelRtpStatisticsCallback(stats_proxy_);
     rtp_rtcp->SetMaxRtpPacketSize(config_->rtp.max_packet_size);
     rtp_rtcp->RegisterVideoSendPayload(
--- a/media/webrtc/trunk/webrtc/video/video_send_stream.h
+++ b/media/webrtc/trunk/webrtc/video/video_send_stream.h
@@ -69,16 +69,18 @@ class VideoSendStream : public webrtc::V
 
   // webrtc::VideoSendStream implementation.
   void Start() override;
   void Stop() override;
 
   void SetSource(rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
                  const DegradationPreference& degradation_preference) override;
 
+  CPULoadStateObserver* LoadStateObserver() override;
+
   void ReconfigureVideoEncoder(VideoEncoderConfig) override;
   Stats GetStats() override;
 
   typedef std::map<uint32_t, RtpState> RtpStateMap;
   typedef std::map<uint32_t, RtpPayloadState> RtpPayloadStateMap;
 
   // Takes ownership of each file, is responsible for closing them later.
   // Calling this method will close and finalize any current logs.
--- a/media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.cc
+++ b/media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.cc
@@ -3,34 +3,37 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "webrtc/modules/video_capture/video_capture_impl.h"
+#include "modules/video_capture/video_capture_impl.h"
 
 #include <stdlib.h>
 #include <string>
 
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/include/module_common_types.h"
-#include "webrtc/modules/video_capture/video_capture_config.h"
-#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/trace.h"
-#include "webrtc/base/trace_event.h"
-#include "webrtc/video_engine/desktop_capture_impl.h"
-#include "webrtc/modules/desktop_capture/desktop_frame.h"
-#include "webrtc/modules/desktop_capture/desktop_device_info.h"
-#include "webrtc/modules/desktop_capture/app_capturer.h"
-#include "webrtc/modules/desktop_capture/desktop_capture_options.h"
-#include "webrtc/modules/video_capture/video_capture.h"
+#include "common_types.h"
+#include "api/video/i420_buffer.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "libyuv.h"  // NOLINT
+#include "modules/include/module_common_types.h"
+#include "modules/video_capture/video_capture_config.h"
+#include "system_wrappers/include/clock.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/trace_event.h"
+#include "video_engine/desktop_capture_impl.h"
+#include "modules/desktop_capture/desktop_frame.h"
+#include "modules/desktop_capture/desktop_device_info.h"
+#include "modules/desktop_capture/app_capturer.h"
+#include "modules/desktop_capture/desktop_capture_options.h"
+#include "modules/video_capture/video_capture.h"
 
 namespace webrtc {
 
 ScreenDeviceInfoImpl::ScreenDeviceInfoImpl(const int32_t id) : _id(id) {
 }
 
 ScreenDeviceInfoImpl::~ScreenDeviceInfoImpl(void) {
 }
@@ -237,38 +240,16 @@ VideoCaptureModule* DesktopCaptureImpl::
   return capture;
 }
 
 int32_t WindowDeviceInfoImpl::Init() {
   desktop_device_info_ = std::unique_ptr<DesktopDeviceInfo>(DesktopDeviceInfoImpl::Create());
   return 0;
 }
 
-int32_t DesktopCaptureImpl::AddRef() const {
-  return ++mRefCount;
-}
-int32_t DesktopCaptureImpl::Release() const {
-  assert(mRefCount > 0);
-  auto count = --mRefCount;
-  if (!count) {
-    WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceVideoCapture, -1,
-                 "DesktopCapture self deleting (desktopCapture=0x%p)", this);
-
-    // Clear any pointers before starting destruction. Otherwise worker-
-    // threads will still have pointers to a partially destructed object.
-    // Example: AudioDeviceBuffer::RequestPlayoutData() can access a
-    // partially deconstructed |_ptrCbAudioTransport| during destruction
-    // if we don't call Terminate here.
-    //-> NG TODO Terminate();
-    delete this;
-    return count;
-  }
-  return mRefCount;
-}
-
 int32_t WindowDeviceInfoImpl::Refresh() {
   desktop_device_info_->Refresh();
   return 0;
 }
 
 uint32_t WindowDeviceInfoImpl::NumberOfDevices() {
   return desktop_device_info_->getWindowCount();
 }
@@ -430,85 +411,84 @@ int32_t DesktopCaptureImpl::Init(const c
   _deviceUniqueId = uniqueId;
 
   return 0;
 }
 
 DesktopCaptureImpl::DesktopCaptureImpl(const int32_t id)
   : _id(id),
     _deviceUniqueId(""),
-    _apiCs(*CriticalSectionWrapper::CreateCriticalSection()),
     _requestedCapability(),
-    _callBackCs(*CriticalSectionWrapper::CreateCriticalSection()),
     _rotateFrame(kVideoRotation_0),
     last_capture_time_(rtc::TimeNanos()/rtc::kNumNanosecsPerMillisec),
     // XXX Note that this won't capture drift!
     delta_ntp_internal_ms_(Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() -
                            last_capture_time_),
     time_event_(EventWrapper::Create()),
-    mRefCount(0),
 #if defined(_WIN32)
     capturer_thread_(new rtc::PlatformUIThread(Run, this, "ScreenCaptureThread")),
 #else
+#if defined(WEBRTC_LINUX)
+    capturer_thread_(nullptr),
+#else
     capturer_thread_(new rtc::PlatformThread(Run, this, "ScreenCaptureThread")),
 #endif
+#endif
     started_(false) {
   //-> TODO @@NG why is this crashing (seen on Linux)
   //-> capturer_thread_->SetPriority(rtc::kHighPriority);
   _requestedCapability.width = kDefaultWidth;
   _requestedCapability.height = kDefaultHeight;
   _requestedCapability.maxFPS = 30;
-  _requestedCapability.rawType = kVideoI420;
-  _requestedCapability.codecType = kVideoCodecUnknown;
+  _requestedCapability.videoType = kI420;
   memset(_incomingFrameTimesNanos, 0, sizeof(_incomingFrameTimesNanos));
 }
 
 DesktopCaptureImpl::~DesktopCaptureImpl() {
   time_event_->Set();
-  capturer_thread_->Stop();
-
-  delete &_callBackCs;
-  delete &_apiCs;
+  if (capturer_thread_) {
+    capturer_thread_->Stop();
+  }
 }
 
 void DesktopCaptureImpl::RegisterCaptureDataCallback(rtc::VideoSinkInterface<VideoFrame> *dataCallback)
 {
-  CriticalSectionScoped cs(&_apiCs);
-  CriticalSectionScoped cs2(&_callBackCs);
+  rtc::CritScope lock(&_apiCs);
+  rtc::CritScope lock2(&_callBackCs);
   _dataCallBacks.insert(dataCallback);
 }
 
 void DesktopCaptureImpl::DeRegisterCaptureDataCallback(
   rtc::VideoSinkInterface<VideoFrame> *dataCallback)
 {
-  CriticalSectionScoped cs(&_apiCs);
-  CriticalSectionScoped cs2(&_callBackCs);
+  rtc::CritScope lock(&_apiCs);
+  rtc::CritScope lock2(&_callBackCs);
   auto it = _dataCallBacks.find(dataCallback);
   if (it != _dataCallBacks.end()) {
     _dataCallBacks.erase(it);
   }
 }
 
 int32_t DesktopCaptureImpl::StopCaptureIfAllClientsClose() {
   if (_dataCallBacks.empty()) {
     return StopCapture();
   } else {
     return 0;
   }
 }
 
 int32_t DesktopCaptureImpl::DeliverCapturedFrame(webrtc::VideoFrame& captureFrame,
                                                  int64_t capture_time) {
-  UpdateFrameCount();  // frame count used for local frame rate callback.
+  UpdateFrameCount();  // frame count used for local frame rate callBack.
 
   // Set the capture time
   if (capture_time != 0) {
-    captureFrame.set_render_time_ms(capture_time - delta_ntp_internal_ms_);
+    captureFrame.set_timestamp_us(1000 * (capture_time - delta_ntp_internal_ms_));
   } else {
-    captureFrame.set_render_time_ms(rtc::TimeNanos()/rtc::kNumNanosecsPerMillisec);
+    captureFrame.set_timestamp_us(rtc::TimeMicros());
   }
 
   if (captureFrame.render_time_ms() == last_capture_time_) {
     // We don't allow the same capture time for two frames, drop this one.
     return -1;
   }
   last_capture_time_ = captureFrame.render_time_ms();
 
@@ -520,94 +500,73 @@ int32_t DesktopCaptureImpl::DeliverCaptu
 }
 
 // Copied from VideoCaptureImpl::IncomingFrame. See Bug 1038324
 int32_t DesktopCaptureImpl::IncomingFrame(uint8_t* videoFrame,
                                           size_t videoFrameLength,
                                           const VideoCaptureCapability& frameInfo,
                                           int64_t captureTime/*=0*/)
 {
-  WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceVideoCapture, _id,
-               "IncomingFrame width %d, height %d", (int) frameInfo.width,
-               (int) frameInfo.height);
-
   int64_t startProcessTime = rtc::TimeNanos();
-
-  CriticalSectionScoped cs(&_callBackCs);
+  rtc::CritScope cs(&_callBackCs);
 
   const int32_t width = frameInfo.width;
   const int32_t height = frameInfo.height;
 
-  TRACE_EVENT1("webrtc", "VC::IncomingFrame", "capture_time", captureTime);
-
-  if (frameInfo.codecType == kVideoCodecUnknown) {
-    // Not encoded, convert to I420.
-    const VideoType commonVideoType =
-      RawVideoTypeToCommonVideoVideoType(frameInfo.rawType);
-
-    if (frameInfo.rawType != kVideoMJPEG &&
-        CalcBufferSize(commonVideoType, width,
-                       abs(height)) != videoFrameLength) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
-                   "Wrong incoming frame length.");
-      return -1;
-    }
-
-    int stride_y = width;
-    int stride_uv = (width + 1) / 2;
-    int target_width = width;
-    int target_height = abs(height);
-    // Rotating resolution when for 90/270 degree rotations.
-    if (_rotateFrame == kVideoRotation_90 || _rotateFrame == kVideoRotation_270)  {
-      target_height = width;
-      target_width = abs(height);
-    }
-
-    // Setting absolute height (in case it was negative).
-    // In Windows, the image starts bottom left, instead of top left.
-    // Setting a negative source height, inverts the image (within LibYuv).
-    rtc::scoped_refptr<webrtc::I420Buffer> buffer;
-    buffer = I420Buffer::Create(target_width, target_height, stride_y,
-                                stride_uv, stride_uv);
-    const int conversionResult = ConvertToI420(commonVideoType,
-                                               videoFrame,
-                                               0, 0,  // No cropping
-                                               width, height,
-                                               videoFrameLength,
-                                               _rotateFrame,
-                                               buffer.get());
-    webrtc::VideoFrame captureFrame(buffer, 0, 0, kVideoRotation_0);
-    if (conversionResult < 0) {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
-                   "Failed to convert capture frame from type %d to I420",
-                   frameInfo.rawType);
-      return -1;
-    }
-
-    DeliverCapturedFrame(captureFrame, captureTime);
-  } else {
-    assert(false);
+  // Not encoded, convert to I420.
+  if (frameInfo.videoType != VideoType::kMJPEG &&
+      CalcBufferSize(frameInfo.videoType, width, abs(height)) !=
+          videoFrameLength) {
+    RTC_LOG(LS_ERROR) << "Wrong incoming frame length.";
     return -1;
   }
 
+  int stride_y = width;
+  int stride_uv = (width + 1) / 2;
+
+  // Setting absolute height (in case it was negative).
+  // In Windows, the image starts bottom left, instead of top left.
+  // Setting a negative source height, inverts the image (within LibYuv).
+
+  // TODO(nisse): Use a pool?
+  rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
+      width, abs(height), stride_y, stride_uv, stride_uv);
+
+  const int conversionResult = libyuv::ConvertToI420(
+      videoFrame, videoFrameLength, buffer.get()->MutableDataY(),
+      buffer.get()->StrideY(), buffer.get()->MutableDataU(),
+      buffer.get()->StrideU(), buffer.get()->MutableDataV(),
+      buffer.get()->StrideV(), 0, 0,  // No Cropping
+      width, height, width, height, libyuv::kRotate0,
+      ConvertVideoType(frameInfo.videoType));
+  if (conversionResult != 0) {
+    RTC_LOG(LS_ERROR) << "Failed to convert capture frame from type "
+                      << static_cast<int>(frameInfo.videoType) << "to I420.";
+    return -1;
+  }
+
+  VideoFrame captureFrame(buffer, 0, rtc::TimeMillis(), kVideoRotation_0);
+  captureFrame.set_ntp_time_ms(captureTime);
+
+  DeliverCapturedFrame(captureFrame, captureTime);
+
   const int64_t processTime =
     (rtc::TimeNanos() - startProcessTime)/rtc::kNumNanosecsPerMillisec;
 
   if (processTime > 10) {
-    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCapture, _id,
-                 "Too long processing time of Incoming frame: %ums",
-                 (unsigned int) processTime);
+    RTC_LOG(LS_WARNING) << "Too long processing time of incoming frame: "
+                        << processTime << " ms";
   }
 
   return 0;
 }
 
 int32_t DesktopCaptureImpl::SetCaptureRotation(VideoRotation rotation) {
-  CriticalSectionScoped cs(&_apiCs);
-  CriticalSectionScoped cs2(&_callBackCs);
+  rtc::CritScope lock(&_apiCs);
+  rtc::CritScope lock2(&_callBackCs);
   _rotateFrame = rotation;
   return 0;
 }
 
 bool DesktopCaptureImpl::SetApplyRotation(bool enable) {
   return true;
 }
 
@@ -703,20 +662,17 @@ int32_t DesktopCaptureImpl::CaptureSetti
 
 void DesktopCaptureImpl::OnCaptureResult(DesktopCapturer::Result result,
                                          std::unique_ptr<DesktopFrame> frame) {
   if (frame.get() == nullptr) return;
   uint8_t * videoFrame = frame->data();
   VideoCaptureCapability frameInfo;
   frameInfo.width = frame->size().width();
   frameInfo.height = frame->size().height();
-  frameInfo.rawType = kVideoARGB;
-
-  // combine cursor in frame
-  // Latest WebRTC already support it by DesktopFrameWithCursor/DesktopAndCursorComposer.
+  frameInfo.videoType = VideoType::kARGB;
 
   size_t videoFrameLength = frameInfo.width * frameInfo.height * DesktopFrame::kBytesPerPixel;
   IncomingFrame(videoFrame, videoFrameLength, frameInfo);
 }
 
 void DesktopCaptureImpl::process() {
   DesktopRect desktop_rect;
   DesktopRegion desktop_region;
--- a/media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.h
+++ b/media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.h
@@ -13,32 +13,33 @@
 
 /*
  * video_capture_impl.h
  */
 
 #include <string>
 #include <memory>
 
-#include "webrtc/video_frame.h"
-#include "webrtc/base/scoped_ref_ptr.h"
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_capture/video_capture_config.h"
-#include "webrtc/modules/desktop_capture/shared_memory.h"
-#include "webrtc/base/platform_thread.h"
-#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/modules/desktop_capture/desktop_device_info.h"
-#include "webrtc/modules/desktop_capture/desktop_and_cursor_composer.h"
+
+#include "api/video/video_frame.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_capture/video_capture_config.h"
+#include "modules/desktop_capture/shared_memory.h"
+#include "modules/desktop_capture/desktop_device_info.h"
+#include "modules/desktop_capture/desktop_and_cursor_composer.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "system_wrappers/include/event_wrapper.h"
 #include <set>
 
 using namespace webrtc::videocapturemodule;
 
 namespace webrtc {
 
-class CriticalSectionWrapper;
 class VideoCaptureEncodeInterface;
 
 
 //simulate deviceInfo interface for video engine, bridge screen/application and real screen/application device info
 
 class ScreenDeviceInfoImpl : public VideoCaptureModule::DeviceInfo {
 public:
   ScreenDeviceInfoImpl(const int32_t id);
@@ -164,61 +165,59 @@ class DesktopCaptureImpl: public Desktop
 {
 public:
   /* Create a screen capture modules object
    */
   static VideoCaptureModule* Create(const int32_t id, const char* uniqueId, const CaptureDeviceType type);
   static VideoCaptureModule::DeviceInfo* CreateDeviceInfo(const int32_t id, const CaptureDeviceType type);
 
   int32_t Init(const char* uniqueId, const CaptureDeviceType type);
-  //RefCounting for RefCountedModule
-  virtual int32_t AddRef() const override;
-  virtual int32_t Release() const override;
   //Call backs
-  virtual void RegisterCaptureDataCallback(rtc::VideoSinkInterface<VideoFrame> *dataCallback) override;
-  virtual void DeRegisterCaptureDataCallback(rtc::VideoSinkInterface<VideoFrame> *dataCallback) override;
-  virtual int32_t StopCaptureIfAllClientsClose() override;
+  void RegisterCaptureDataCallback(rtc::VideoSinkInterface<VideoFrame> *dataCallback) override;
+  void DeRegisterCaptureDataCallback(rtc::VideoSinkInterface<VideoFrame> *dataCallback) override;
+  int32_t StopCaptureIfAllClientsClose() override;
 
-  virtual int32_t SetCaptureRotation(VideoRotation rotation) override;
-  virtual bool SetApplyRotation(bool enable) override;
-  virtual bool GetApplyRotation() override { return true; }
+  int32_t SetCaptureRotation(VideoRotation rotation) override;
+  bool SetApplyRotation(bool enable) override;
+  bool GetApplyRotation() override { return true; }
 
-  virtual const char* CurrentDeviceName() const override;
+  const char* CurrentDeviceName() const override;
 
   // Implement VideoCaptureExternal
   // |capture_time| must be specified in the NTP time format in milliseconds.
-  virtual int32_t IncomingFrame(uint8_t* videoFrame,
-                                size_t videoFrameLength,
-                                const VideoCaptureCapability& frameInfo,
-                                int64_t captureTime = 0) override;
+  int32_t IncomingFrame(uint8_t* videoFrame,
+                        size_t videoFrameLength,
+                        const VideoCaptureCapability& frameInfo,
+                        int64_t captureTime = 0) override;
 
   // Platform dependent
-  virtual int32_t StartCapture(const VideoCaptureCapability& capability) override;
-  virtual int32_t StopCapture() override;
-  virtual bool CaptureStarted() override;
-  virtual int32_t CaptureSettings(VideoCaptureCapability& settings) override;
+  int32_t StartCapture(const VideoCaptureCapability& capability) override;
+  virtual bool FocusOnSelectedSource() override;
+  int32_t StopCapture() override;
+  bool CaptureStarted() override;
+  int32_t CaptureSettings(VideoCaptureCapability& settings) override;
 
 protected:
   DesktopCaptureImpl(const int32_t id);
   virtual ~DesktopCaptureImpl();
   int32_t DeliverCapturedFrame(webrtc::VideoFrame& captureFrame,
                                int64_t capture_time);
 
   static const uint32_t kMaxDesktopCaptureCpuUsage = 50; // maximum CPU usage in %
 
   int32_t _id; // Module ID
   std::string _deviceUniqueId; // current Device unique name;
-  CriticalSectionWrapper& _apiCs;
+  rtc::CriticalSection _apiCs;
   VideoCaptureCapability _requestedCapability; // Should be set by platform dependent code in StartCapture.
 
 private:
   void UpdateFrameCount();
   uint32_t CalculateFrameRate(int64_t now_ns);
 
-  CriticalSectionWrapper& _callBackCs;
+  rtc::CriticalSection _callBackCs;
 
   std::set<rtc::VideoSinkInterface<VideoFrame>*> _dataCallBacks;
 
   int64_t _incomingFrameTimesNanos[kFrameRateCountHistorySize];// timestamp for local captured frames
   VideoRotation _rotateFrame; //Set if the frame should be rotated by the capture module.
 
   // Used to make sure incoming timestamp is increasing for every frame.
   int64_t last_capture_time_;
@@ -240,15 +239,14 @@ public:
 private:
   std::unique_ptr<DesktopAndCursorComposer> desktop_capturer_cursor_composer_;
   std::unique_ptr<EventWrapper> time_event_;
 #if defined(_WIN32)
   std::unique_ptr<rtc::PlatformUIThread> capturer_thread_;
 #else
   std::unique_ptr<rtc::PlatformThread> capturer_thread_;
 #endif
-  mutable uint32_t mRefCount;
   bool started_;
 };
 
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_DESKTOP_CAPTURE_MAIN_SOURCE_DESKTOP_CAPTURE_IMPL_H_
--- a/media/webrtc/trunk/webrtc/voice_engine/channel.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel.cc
@@ -24,16 +24,17 @@
 #include "logging/rtc_event_log/events/rtc_event_audio_playout.h"
 #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
 #include "modules/audio_coding/codecs/audio_format_conversion.h"
 #include "modules/audio_device/include/audio_device.h"
 #include "modules/audio_processing/include/audio_processing.h"
 #include "modules/include/module_common_types.h"
 #include "modules/pacing/packet_router.h"
 #include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_packet_observer.h"
 #include "modules/rtp_rtcp/include/rtp_payload_registry.h"
 #include "modules/rtp_rtcp/include/rtp_receiver.h"
 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
 #include "modules/rtp_rtcp/source/rtp_receiver_strategy.h"
 #include "modules/utility/include/process_thread.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/criticalsection.h"
 #include "rtc_base/format_macros.h"
@@ -118,16 +119,231 @@ class RtcpRttStatsProxy final : public R
   }
 
  private:
   rtc::CriticalSection crit_;
   RtcpRttStats* rtcp_rtt_stats_ RTC_GUARDED_BY(crit_);
   RTC_DISALLOW_COPY_AND_ASSIGN(RtcpRttStatsProxy);
 };
 
+// Extend the default RTCP statistics struct with max_jitter, defined as the
+// maximum jitter value seen in an RTCP report block.
+struct ChannelStatistics : public RtcpStatistics {
+  ChannelStatistics() : rtcp(), max_jitter(0) {}
+
+  RtcpStatistics rtcp;
+  uint32_t max_jitter;
+};
+
+// Statistics callback, called at each generation of a new RTCP report block.
+class StatisticsProxy : public RtcpStatisticsCallback,
+   public RtcpPacketTypeCounterObserver {
+ public:
+  StatisticsProxy(uint32_t ssrc) : ssrc_(ssrc) {}
+  virtual ~StatisticsProxy() {}
+
+  void StatisticsUpdated(const RtcpStatistics& statistics,
+                         uint32_t ssrc) override {
+    rtc::CritScope cs(&stats_lock_);
+    if (ssrc != ssrc_)
+      return;
+
+    stats_.rtcp = statistics;
+    if (statistics.jitter > stats_.max_jitter) {
+      stats_.max_jitter = statistics.jitter;
+    }
+  }
+
+  void CNameChanged(const char* cname, uint32_t ssrc) override {}
+
+  void SetSSRC(uint32_t ssrc) {
+    rtc::CritScope cs(&stats_lock_);
+    ssrc_ = ssrc;
+    mReceiverReportDerivedStats.clear();
+    mInitialSequenceNumber.reset();
+  }
+
+  ChannelStatistics GetStats() {
+    rtc::CritScope cs(&stats_lock_);
+    return stats_;
+  }
+
+  // These can be created before reports are received so that information
+  // needed to derive certain stats (e.g. PacketsReceived) can be stored.
+  class ReceiverReportDerivedStats {
+  public:
+    // Event handler for incoming RTCP Receiver Reports
+    void UpdateWithReceiverReport(const RTCPReportBlock& aReceiverReport,
+                                  rtc::Optional<uint32_t> initialSequenceNum,
+                                  int64_t aRoundTripTime,
+                                  uint32_t aEncoderFrequencyHz,
+                                  int64_t aReceptionTime)
+    {
+      if (!mFirstExtendedSequenceNumber && initialSequenceNum) {
+        mFirstExtendedSequenceNumber = *initialSequenceNum;
+      }
+
+      // No initial sequence number available!
+      if (!mFirstExtendedSequenceNumber) {
+        RTC_LOG(LS_WARNING) <<
+                     "ReceiverReportDerivedStats::UpdateWithReceiverReport()"
+                     " called before a first sequence number is known to the"
+                     " StatisticsProxy";
+        // This is as good a guess as we can get if the initial
+        // sequence number is not known
+        mFirstExtendedSequenceNumber = static_cast<uint32_t>(
+            std::max<int64_t>(0, aReceiverReport.extended_highest_sequence_number -
+                                 aReceiverReport.packets_lost));
+      }
+
+      mReceiverSsrc = aReceiverReport.sender_ssrc;
+      mSenderSsrc = aReceiverReport.source_ssrc;
+      mLatestHighExtendedSequenceNumber = aReceiverReport.extended_highest_sequence_number;
+      mLatestReceiverReportReceptionTime = aReceptionTime;
+      mFractionOfPacketsLostInQ8 = aReceiverReport.fraction_lost;
+      mJitterInSamples = aReceiverReport.jitter;
+      mEncoderFrequencyHz = aEncoderFrequencyHz;
+      mCumulativePacketsLost = aReceiverReport.packets_lost;
+      mLastSenderReportTimestamp = aReceiverReport.last_sender_report_timestamp;
+      mDelaySinceLastSenderReport = aReceiverReport.delay_since_last_sender_report;
+      mRoundTripTime = aRoundTripTime;
+    }
+    bool HasReceivedReport() { return mFirstReceiverReportReceptionTime; }
+    // This is the SSRC of the entity sending the RTCP Receiver Reports
+    // That is it is the SSRC of the RTP receiver
+    uint32_t mReceiverSsrc = 0;
+    // This is the SSRC of the entity receiving the RTCP Receiver Reports
+    // That is it is the SSRC of the RTP sender
+    uint32_t mSenderSsrc = 0;
+    // Reception time for the RTCP packet containing this data
+    // Only available if an receiver report has been received
+    int64_t mLatestReceiverReportReceptionTime = 0;
+    // Reception time for the first RTCP packet contianing a
+    // Receiver Report with match mReceiverSsrc.
+    int64_t mFirstReceiverReportReceptionTime = 0;
+    // The total number of packets know to be lost
+    uint32_t mCumulativePacketsLost = 0;
+    // The RTP sender must record the first sequence number used
+    // so that number of packets received can be calculated from ...
+    uint32_t mFirstExtendedSequenceNumber = 0;
+    // The most recent sequence number seen by the receiver at the time
+    // Receiver Report was generated
+    uint32_t mLatestHighExtendedSequenceNumber = 0;
+    int64_t mRoundTripTime = 0;
+    // The amount of jitter measured in MS, derived from the
+    // RTCP reported jitter (measured in frames), and the
+    // effective playout frequency.
+    double JitterMs() const {
+      if (!mEncoderFrequencyHz) {
+        if (!mHasWarnedAboutNoFrequency) {
+          mHasWarnedAboutNoFrequency = true;
+          RTC_LOG(LS_WARNING) <<
+                  "ReceiverReportDerivedStats::JitterMs() called before"
+                  " the playout frequency is known.";
+        }
+        return 0;
+      }
+      return (mJitterInSamples * 1000) / mEncoderFrequencyHz;
+    }
+    // Fraction of packets lost
+    double FractionOfPacketsLost() const {
+      return (double) mFractionOfPacketsLostInQ8 / 256;
+    }
+    uint32_t PacketsReceived() const {
+      return static_cast<uint32_t>(std::max<int64_t>(0,
+        (int64_t) mLatestHighExtendedSequenceNumber -
+             (mFirstExtendedSequenceNumber + mCumulativePacketsLost)));
+    }
+  private:
+    // The ratio of packets lost to total packets sent expressed
+    // as the dividend in X / 256.
+    uint8_t mFractionOfPacketsLostInQ8 = 0;
+    // Jitter in the RTCP packet is in Time Units,
+    // which is the sample rate of the audio.
+    uint32_t mJitterInSamples = 0;
+    // Use to calculate the jitter
+    uint32_t mEncoderFrequencyHz = 0;
+    // Used to calculate the RTT
+    uint32_t mLastSenderReportTimestamp = 0;
+    // Used to calculate the RTT
+    uint32_t mDelaySinceLastSenderReport = 0;
+    // Only warn about jitter calculation once per instance
+    mutable bool mHasWarnedAboutNoFrequency = false;
+  };
+
+  void RtcpPacketTypesCounterUpdated(uint32_t ssrc,
+      const RtcpPacketTypeCounter& packet_counter) override {
+    rtc::CritScope cs(&stats_lock_);
+    if (ssrc != ssrc_) {
+      return;
+    }
+    packet_counter_ = packet_counter;
+ };
+
+ // Called when we receive RTCP receiver reports
+ void OnIncomingReceiverReports(const ReportBlockList & mReceiverReports,
+                                const int64_t aRoundTripTime,
+                                const int64_t aReceptionTime) {
+    if (!mReceiverReports.empty()) { // Don't lock if we have nothing to do.
+      rtc::CritScope cs(&stats_lock_);
+      for(const auto& report : mReceiverReports) {
+        // Creates a new report if necessary before updating
+        ReceiverReportDerivedStats newStats;
+        mReceiverReportDerivedStats.emplace(report.source_ssrc, newStats)
+          .first->second.UpdateWithReceiverReport(report,
+                                                  mInitialSequenceNumber,
+                                                  aRoundTripTime,
+                                                  mPlayoutFrequency,
+                                                  aReceptionTime);
+      }
+    }
+  }
+
+  void OnSendCodecFrequencyChanged(uint32_t aFrequency) {
+    rtc::CritScope cs(&stats_lock_);
+    mPlayoutFrequency = aFrequency;
+  }
+
+  void OnInitialSequenceNumberSet(uint32_t aSequenceNumber) {
+    rtc::CritScope cs(&stats_lock_);
+    mInitialSequenceNumber.emplace(aSequenceNumber);
+    mReceiverReportDerivedStats.clear();
+  }
+
+  const rtc::Optional<ReceiverReportDerivedStats>
+  GetReceiverReportDerivedStats(const uint32_t receiverSsrc) const {
+    rtc::CritScope cs(&stats_lock_);
+    const auto& it = mReceiverReportDerivedStats.find(receiverSsrc);
+    if (it != mReceiverReportDerivedStats.end()) {
+      return rtc::Optional<ReceiverReportDerivedStats>(it->second);
+    }
+    return rtc::Optional<ReceiverReportDerivedStats>();
+  }
+
+  void GetPacketTypeCounter(RtcpPacketTypeCounter& aPacketTypeCounter) {
+    rtc::CritScope cs(&stats_lock_);
+    aPacketTypeCounter = packet_counter_;
+  }
+
+ private:
+  // StatisticsUpdated calls are triggered from threads in the RTP module,
+  // while GetStats calls can be triggered from the public voice engine API,
+  // hence synchronization is needed.
+  rtc::CriticalSection stats_lock_;
+  uint32_t ssrc_;
+  ChannelStatistics stats_;
+  RtcpPacketTypeCounter packet_counter_;
+
+  // receiver report handling, maps ssrc -> stats
+  std::map<uint32_t, ReceiverReportDerivedStats> mReceiverReportDerivedStats;
+  // store initial sender sequence number
+  rtc::Optional<uint32_t> mInitialSequenceNumber;
+  uint32_t mPlayoutFrequency;
+ };
+
 class TransportFeedbackProxy : public TransportFeedbackObserver {
  public:
   TransportFeedbackProxy() : feedback_observer_(nullptr) {
     pacer_thread_.DetachFromThread();
     network_thread_.DetachFromThread();
   }
 
   void SetTransportFeedbackObserver(
@@ -289,16 +505,17 @@ class VoERtcpObserver : public RtcpBandw
     }
     int weighted_fraction_lost = 0;
     if (total_number_of_packets > 0) {
       weighted_fraction_lost =
           (fraction_lost_aggregate + total_number_of_packets / 2) /
           total_number_of_packets;
     }
     owner_->OnUplinkPacketLossRate(weighted_fraction_lost / 255.0f);
+    owner_->OnIncomingReceiverReports(report_blocks, rtt, now_ms);
   }
 
  private:
   Channel* owner_;
   // Maps remote side ssrc to extended highest sequence number received.
   std::map<uint32_t, uint32_t> extended_max_sequence_number_;
   rtc::CriticalSection crit_;
   RtcpBandwidthObserver* bandwidth_observer_ RTC_GUARDED_BY(crit_);
@@ -318,16 +535,71 @@ class Channel::ProcessAndEncodeAudioTask
     channel_->ProcessAndEncodeAudioOnTaskQueue(audio_frame_.get());
     return true;
   }
 
   std::unique_ptr<AudioFrame> audio_frame_;
   Channel* const channel_;
 };
 
+void Channel::OnIncomingReceiverReports(const ReportBlockList& aReportBlocks,
+                                        const int64_t aRoundTripTime,
+                                        const int64_t aReceptionTime) {
+
+  statistics_proxy_->OnIncomingReceiverReports(aReportBlocks,
+                                               aRoundTripTime,
+                                               aReceptionTime);
+}
+
+bool Channel::GetRTCPReceiverStatistics(int64_t* timestamp,
+                                        uint32_t* jitterMs,
+                                        uint32_t* cumulativeLost,
+                                        uint32_t* packetsReceived,
+                                        uint64_t* bytesReceived,
+                                        double* packetsFractionLost,
+                                        int64_t* rtt) const {
+  uint32_t ssrc = _rtpRtcpModule->SSRC();
+
+  const auto& stats = statistics_proxy_->GetReceiverReportDerivedStats(ssrc);
+  if (!stats || !stats->PacketsReceived()) {
+    return false;
+  }
+  *timestamp = stats->mLatestReceiverReportReceptionTime;
+  *jitterMs = stats->JitterMs();
+  *cumulativeLost = stats->mCumulativePacketsLost;
+  *packetsReceived = stats->PacketsReceived();
+  *packetsFractionLost = stats->FractionOfPacketsLost();
+  *rtt = stats->mRoundTripTime;
+
+  // bytesReceived is only an estimate, which we derive from the locally
+  // generated RTCP sender reports, and the remotely genderated receiver
+  // reports.
+  // There is an open issue in the spec as to if this should be included
+  // here where it is only a guess.
+  // https://github.com/w3c/webrtc-stats/issues/241
+  *bytesReceived = 0;
+  if (*packetsReceived) {
+    // GetDataCounters has internal CS lock within RtpSender
+    StreamDataCounters rtpCounters;
+    StreamDataCounters rtxCounters; // unused
+    _rtpRtcpModule->GetSendStreamDataCounters(&rtpCounters, &rtxCounters);
+    uint64_t sentPackets = rtpCounters.transmitted.packets;
+    if (sentPackets) {
+      uint64_t sentBytes = rtpCounters.MediaPayloadBytes();
+      *bytesReceived = sentBytes * (*packetsReceived) / sentPackets;
+    }
+  }
+
+  return true;
+}
+
+void Channel::SetRtpPacketObserver(RtpPacketObserver* observer) {
+  rtp_source_observer_ = observer;
+}
+
 int32_t Channel::SendData(FrameType frameType,
                           uint8_t payloadType,
                           uint32_t timeStamp,
                           const uint8_t* payloadData,
                           size_t payloadSize,
                           const RTPFragmentationHeader* fragmentation) {
   RTC_DCHECK_RUN_ON(encoder_queue_);
   if (_includeAudioLevelIndication) {
@@ -393,16 +665,19 @@ bool Channel::SendRtcp(const uint8_t* da
     return false;
   }
   return true;
 }
 
 void Channel::OnIncomingSSRCChanged(uint32_t ssrc) {
   // Update ssrc so that NTP for AV sync can be updated.
   _rtpRtcpModule->SetRemoteSSRC(ssrc);
+
+  // Update stats proxy to receive stats for new ssrc
+  statistics_proxy_->SetSSRC(ssrc);
 }
 
 void Channel::OnIncomingCSRCChanged(uint32_t CSRC, bool added) {
   // TODO(saza): remove.
 }
 
 int32_t Channel::OnInitializeDecoder(int payload_type,
                                      const SdpAudioFormat& audio_format,
@@ -429,16 +704,29 @@ int32_t Channel::OnReceivedPayloadData(c
   // Push the incoming payload (parsed and ready for decoding) into the ACM
   if (audio_coding_->IncomingPacket(payloadData, payloadSize, *rtpHeader) !=
       0) {
     RTC_LOG(LS_ERROR)
         << "Channel::OnReceivedPayloadData() unable to push data to the ACM";
     return -1;
   }
 
+  // Observe incoming packets for getContributingSources and
+  // getSynchronizationSources.
+  if (rtp_source_observer_) {
+    const auto playoutFrequency = audio_coding_->PlayoutFrequency();
+    uint32_t jitter = 0;
+    if (playoutFrequency > 0) {
+      const ChannelStatistics stats = statistics_proxy_->GetStats();
+      jitter = stats.rtcp.jitter / (playoutFrequency / 1000);
+    }
+    rtp_source_observer_->OnRtpPacket(rtpHeader,
+        webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds(), jitter);
+  }
+
   int64_t round_trip_time = 0;
   _rtpRtcpModule->RTT(rtp_receiver_->SSRC(), &round_trip_time, NULL, NULL,
                       NULL);
 
   std::vector<uint16_t> nack_list = audio_coding_->GetNackList(round_trip_time);
   if (!nack_list.empty()) {
     // Can't use nack_list.data() since it's not supported by all
     // compilers.
@@ -462,19 +750,16 @@ bool Channel::OnRecoveredPacket(const ui
   return ReceivePacket(rtp_packet, rtp_packet_length, header);
 }
 
 AudioMixer::Source::AudioFrameInfo Channel::GetAudioFrameWithInfo(
     int sample_rate_hz,
     AudioFrame* audio_frame) {
   audio_frame->sample_rate_hz_ = sample_rate_hz;
 
-  unsigned int ssrc;
-  RTC_CHECK_EQ(GetRemoteSSRC(ssrc), 0);
-  event_log_proxy_->Log(rtc::MakeUnique<RtcEventAudioPlayout>(ssrc));
   // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
   bool muted;
   if (audio_coding_->PlayoutData10Ms(audio_frame->sample_rate_hz_, audio_frame,
                                      &muted) == -1) {
     RTC_LOG(LS_ERROR) << "Channel::GetAudioFrame() PlayoutData10Ms() failed!";
     // In all likelihood, the audio in this frame is garbage. We return an
     // error so that the audio mixer module doesn't add it to the mix. As
     // a result, it won't be played out and the actions skipped here are
@@ -622,16 +907,17 @@ Channel::Channel(int32_t channelId,
       _transportPtr(NULL),
       input_mute_(false),
       previous_frame_muted_(false),
       _outputGain(1.0f),
       _includeAudioLevelIndication(false),
       transport_overhead_per_packet_(0),
       rtp_overhead_per_packet_(0),
       _outputSpeechType(AudioFrame::kNormalSpeech),
+      _current_sync_offset(0),
       rtcp_observer_(new VoERtcpObserver(this)),
       associate_send_channel_(ChannelOwner(nullptr)),
       pacing_enabled_(config.enable_voice_pacing),
       feedback_observer_proxy_(new TransportFeedbackProxy()),
       seq_num_allocator_proxy_(new TransportSequenceNumberProxy()),
       rtp_packet_sender_proxy_(new RtpPacketSenderProxy()),
       retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(),
                                                    kMaxRetransmissionWindowMs)),
@@ -658,16 +944,21 @@ Channel::Channel(int32_t channelId,
   }
   configuration.event_log = &(*event_log_proxy_);
   configuration.rtt_stats = &(*rtcp_rtt_stats_proxy_);
   configuration.retransmission_rate_limiter =
       retransmission_rate_limiter_.get();
 
   _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
   _rtpRtcpModule->SetSendingMediaStatus(false);
+
+  statistics_proxy_.reset(new StatisticsProxy(_rtpRtcpModule->SSRC()));
+  rtp_receive_statistics_->RegisterRtcpStatisticsCallback(
+    statistics_proxy_.get());
+  configuration.rtcp_packet_type_counter_observer = statistics_proxy_.get();
 }
 
 Channel::~Channel() {
   RTC_DCHECK(!channel_state_.Get().sending);
   RTC_DCHECK(!channel_state_.Get().playing);
 }
 
 int32_t Channel::Init() {
@@ -1136,45 +1427,54 @@ int Channel::SetSendTelephoneEventPayloa
           << "SetSendTelephoneEventPayloadType() failed to register "
              "send payload type";
       return -1;
     }
   }
   return 0;
 }
 
+int Channel::SetLocalMID(const char* mid) {
+  if (channel_state_.Get().sending) {
+    return -1;
+  }
+  _rtpRtcpModule->SetMID(mid);
+  return 0;
+}
+
 int Channel::SetLocalSSRC(unsigned int ssrc) {
   if (channel_state_.Get().sending) {
     RTC_LOG(LS_ERROR) << "SetLocalSSRC() already sending";
     return -1;
   }
   _rtpRtcpModule->SetSSRC(ssrc);
   return 0;
 }
-
+/*
 int Channel::GetRemoteSSRC(unsigned int& ssrc) {
   ssrc = rtp_receiver_->SSRC();
   return 0;
 }
-
+*/
 int Channel::SetSendAudioLevelIndicationStatus(bool enable, unsigned char id) {
   _includeAudioLevelIndication = enable;
   return SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
 }
 
 int Channel::SetSendMIDStatus(bool enable, unsigned char id) {
-  return SetSendRtpHeaderExtension(enable, kRtpExtensionMId, id);
+  return SetSendRtpHeaderExtension(enable, kRtpExtensionMid, id);
 }
 
 int Channel::SetReceiveAudioLevelIndicationStatus(bool enable,
-                                                  unsigned char id) {
-  rtp_header_parser_->DeregisterRtpHeaderExtension(kRtpExtensionAudioLevel);
-  if (enable &&
-      !rtp_header_parser_->RegisterRtpHeaderExtension(kRtpExtensionAudioLevel,
-                                                      id)) {
+                                                  unsigned char id,
+                                                  bool isLevelSsrc) {
+  const webrtc::RTPExtensionType& rtpExt = isLevelSsrc ?
+      kRtpExtensionAudioLevel : kRtpExtensionCsrcAudioLevel;
+  rtp_header_parser_->DeregisterRtpHeaderExtension(rtpExt);
+  if (enable && !rtp_header_parser_->RegisterRtpHeaderExtension(rtpExt, id)) {
     return -1;
   }
   return 0;
 }
 
 void Channel::EnableSendTransportSequenceNumber(int id) {
   int ret =
       SetSendRtpHeaderExtension(true, kRtpExtensionTransportSequenceNumber, id);
@@ -1333,16 +1633,25 @@ int Channel::GetRTPStatistics(CallStatis
   // --- Timestamps
   {
     rtc::CritScope lock(&ts_stats_lock_);
     stats.capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_;
   }
   return 0;
 }
 
+int Channel::GetRTCPPacketTypeCounters(RtcpPacketTypeCounter& stats) {
+  if (_rtpRtcpModule->RTCP() == RtcpMode::kOff) {
+    return -1;
+  }
+
+  statistics_proxy_->GetPacketTypeCounter(stats);
+  return 0;
+}
+
 void Channel::SetNACKStatus(bool enable, int maxNumberOfPackets) {
   // None of these functions can fail.
   // If pacing is enabled we always store packets.
   if (!pacing_enabled_)
     _rtpRtcpModule->SetStorePacketsStatus(enable, maxNumberOfPackets);
   rtp_receive_statistics_->SetMaxReorderingThreshold(maxNumberOfPackets);
   if (enable)
     audio_coding_->EnableNack(maxNumberOfPackets);
@@ -1496,16 +1805,24 @@ ANAStats Channel::GetANAStatistics() con
   return audio_coding_->GetANAStats();
 }
 
 uint32_t Channel::GetDelayEstimate() const {
   rtc::CritScope lock(&video_sync_lock_);
   return audio_coding_->FilteredCurrentDelayMs() + playout_delay_ms_;
 }
 
+void Channel::GetDelayEstimates(int* jitter_buffer_delay_ms,
+                                int* playout_buffer_delay_ms,
+                                int* avsync_offset_ms) const {
+  rtc::CritScope lock(&video_sync_lock_);
+  *jitter_buffer_delay_ms = audio_coding_->FilteredCurrentDelayMs();
+  *playout_buffer_delay_ms = playout_delay_ms_;
+  *avsync_offset_ms = _current_sync_offset;
+}
 int Channel::SetMinimumPlayoutDelay(int delayMs) {
   if ((delayMs < kVoiceEngineMinMinPlayoutDelayMs) ||
       (delayMs > kVoiceEngineMaxMinPlayoutDelayMs)) {
     RTC_LOG(LS_ERROR) << "SetMinimumPlayoutDelay() invalid min delay";
     return -1;
   }
   if (audio_coding_->SetMinimumPlayoutDelay(delayMs) != 0) {
     RTC_LOG(LS_ERROR)
--- a/media/webrtc/trunk/webrtc/voice_engine/channel.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel.h
@@ -41,16 +41,17 @@ namespace webrtc {
 
 class AudioDeviceModule;
 class PacketRouter;
 class ProcessThread;
 class RateLimiter;
 class ReceiveStatistics;
 class RemoteNtpTimeEstimator;
 class RtcEventLog;
+class RtpPacketObserver;
 class RTPPayloadRegistry;
 class RTPReceiverAudio;
 class RtpPacketReceived;
 class RtpRtcp;
 class RtpTransportControllerSendInterface;
 class TelephoneEventHandler;
 
 struct SenderInfo;
@@ -63,16 +64,19 @@ struct CallStatistics {
   int64_t rttMs;
   size_t bytesSent;
   int packetsSent;
   size_t bytesReceived;
   int packetsReceived;
   // The capture ntp time (in local timebase) of the first played out audio
   // frame.
   int64_t capture_start_ntp_time_ms_;
+
+  uint32_t rtcp_sender_packets_sent;
+  uint32_t rtcp_sender_octets_sent;
 };
 
 // See section 6.4.2 in http://www.ietf.org/rfc/rfc3550.txt for details.
 struct ReportBlock {
   uint32_t sender_SSRC;  // SSRC of sender
   uint32_t source_SSRC;
   uint8_t fraction_lost;
   uint32_t cumulative_num_packets_lost;
@@ -82,16 +86,17 @@ struct ReportBlock {
   uint32_t delay_since_last_SR;
 };
 
 namespace voe {
 
 class RtcEventLogProxy;
 class RtcpRttStatsProxy;
 class RtpPacketSenderProxy;
+class StatisticsProxy;
 class TransportFeedbackProxy;
 class TransportSequenceNumberProxy;
 class VoERtcpObserver;
 
 // Helper class to simplify locking scheme for members that are accessed from
 // multiple threads.
 // Example: a member can be set on thread T1 and read by an internal audio
 // thread T2. Accessing the member via this class ensures that we are
@@ -211,39 +216,47 @@ class Channel
 
   // Stats.
   int GetNetworkStatistics(NetworkStatistics& stats);
   void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
   ANAStats GetANAStatistics() const;
 
   // Audio+Video Sync.
   uint32_t GetDelayEstimate() const;
+  void GetDelayEstimates(int* jitter_buffer_delay_ms,
+                         int* playout_buffer_delay_ms,
+                         int* avsync_offset_ms) const;
   int SetMinimumPlayoutDelay(int delayMs);
+  void SetCurrentSyncOffset(int offsetMs) { _current_sync_offset = offsetMs; }
   int GetPlayoutTimestamp(unsigned int& timestamp);
   int GetRtpRtcp(RtpRtcp** rtpRtcpModule, RtpReceiver** rtp_receiver) const;
 
   // DTMF.
   int SendTelephoneEventOutband(int event, int duration_ms);
   int SetSendTelephoneEventPayloadType(int payload_type, int payload_frequency);
 
   // RTP+RTCP
+  int SetLocalMID(const char* mid);
   int SetLocalSSRC(unsigned int ssrc);
   int SetSendAudioLevelIndicationStatus(bool enable, unsigned char id);
-  int SetReceiveAudioLevelIndicationStatus(bool enable, unsigned char id);
+  int SetSendMIDStatus(bool enable, unsigned char id);
+  int SetReceiveAudioLevelIndicationStatus(bool enable, unsigned char id, bool isLevelSsrc);
+  int SetReceiveCsrcAudioLevelIndicationStatus(bool enable, unsigned char id);
   void EnableSendTransportSequenceNumber(int id);
   void EnableReceiveTransportSequenceNumber(int id);
 
   void RegisterSenderCongestionControlObjects(
       RtpTransportControllerSendInterface* transport,
       RtcpBandwidthObserver* bandwidth_observer);
   void RegisterReceiverCongestionControlObjects(PacketRouter* packet_router);
   void ResetSenderCongestionControlObjects();
   void ResetReceiverCongestionControlObjects();
   void SetRTCPStatus(bool enable);
   int SetRTCP_CNAME(const char cName[256]);
+  int GetRTCPPacketTypeCounters(RtcpPacketTypeCounter& stats);
   int GetRemoteRTCPReportBlocks(std::vector<ReportBlock>* report_blocks);
   int GetRTPStatistics(CallStatistics& stats);
   void SetNACKStatus(bool enable, int maxNumberOfPackets);
 
   // From AudioPacketizationCallback in the ACM
   int32_t SendData(FrameType frameType,
                    uint8_t payloadType,
                    uint32_t timeStamp,
@@ -258,16 +271,20 @@ class Channel
 
   // From RtpFeedback in the RTP/RTCP module
   int32_t OnInitializeDecoder(int payload_type,
                               const SdpAudioFormat& audio_format,
                               uint32_t rate) override;
   void OnIncomingSSRCChanged(uint32_t ssrc) override;
   void OnIncomingCSRCChanged(uint32_t CSRC, bool added) override;
 
+  void OnIncomingReceiverReports(const ReportBlockList& aReportBlocks,
+                                 const int64_t aRoundTripTime,
+                                 const int64_t aReceptionTime);
+
   // From Transport (called by the RTP/RTCP module)
   bool SendRtp(const uint8_t* data,
                size_t len,
                const PacketOptions& packet_options) override;
   bool SendRtcp(const uint8_t* data, size_t len) override;
 
   // From AudioMixer::Source.
   AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
@@ -314,32 +331,40 @@ class Channel
   void SetRtcEventLog(RtcEventLog* event_log);
 
   void SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats);
   void SetTransportOverhead(size_t transport_overhead_per_packet);
 
   // From OverheadObserver in the RTP/RTCP module
   void OnOverheadChanged(size_t overhead_bytes_per_packet) override;
 
+  bool GetRTCPReceiverStatistics(int64_t* timestamp,
+                                 uint32_t* jitterMs,
+                                 uint32_t* cumulativeLost,
+                                 uint32_t* packetsReceived,
+                                 uint64_t* bytesReceived,
+                                 double* packetsFractionLost,
+                                 int64_t* rtt) const;
+  virtual void SetRtpPacketObserver(RtpPacketObserver* observer);
+
   // The existence of this function alongside OnUplinkPacketLossRate is
   // a compromise. We want the encoder to be agnostic of the PLR source, but
   // we also don't want it to receive conflicting information from TWCC and
   // from RTCP-XR.
   void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate);
 
   void OnRecoverableUplinkPacketLossRate(float recoverable_packet_loss_rate);
 
   std::vector<RtpSource> GetSources() const {
     return rtp_receiver_->GetSources();
   }
 
  private:
   class ProcessAndEncodeAudioTask;
 
-  int GetRemoteSSRC(unsigned int& ssrc);
   void OnUplinkPacketLossRate(float packet_loss_rate);
   bool InputMute() const;
   bool OnRecoveredPacket(const uint8_t* packet, size_t packet_length);
 
   bool ReceivePacket(const uint8_t* packet,
                      size_t packet_length,
                      const RTPHeader& header);
   bool IsPacketInOrder(const RTPHeader& header) const;
@@ -371,16 +396,17 @@ class Channel
   ChannelState channel_state_;
 
   std::unique_ptr<voe::RtcEventLogProxy> event_log_proxy_;
   std::unique_ptr<voe::RtcpRttStatsProxy> rtcp_rtt_stats_proxy_;
 
   std::unique_ptr<RtpHeaderParser> rtp_header_parser_;
   std::unique_ptr<RTPPayloadRegistry> rtp_payload_registry_;
   std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+  std::unique_ptr<StatisticsProxy> statistics_proxy_;
   std::unique_ptr<RtpReceiver> rtp_receiver_;
   TelephoneEventHandler* telephone_event_handler_;
   std::unique_ptr<RtpRtcp> _rtpRtcpModule;
   std::unique_ptr<AudioCodingModule> audio_coding_;
   std::unique_ptr<AudioSinkInterface> audio_sink_;
   AudioLevel _outputAudioLevel;
   // Downsamples to the codec rate if necessary.
   PushResampler<int16_t> input_resampler_;
@@ -389,16 +415,17 @@ class Channel
   RemoteNtpTimeEstimator ntp_estimator_ RTC_GUARDED_BY(ts_stats_lock_);
 
   // Timestamp of the audio pulled from NetEq.
   rtc::Optional<uint32_t> jitter_buffer_playout_timestamp_;
 
   rtc::CriticalSection video_sync_lock_;
   uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(video_sync_lock_);
   uint32_t playout_delay_ms_ RTC_GUARDED_BY(video_sync_lock_);
+  int _current_sync_offset;
   uint16_t send_sequence_number_;
 
   rtc::CriticalSection ts_stats_lock_;
 
   std::unique_ptr<rtc::TimestampWrapAroundHandler> rtp_ts_wraparound_handler_;
   // The rtp timestamp of the first played out audio frame.
   int64_t capture_start_rtp_time_stamp_;
   // The capture ntp time (in local timebase) of the first played out audio
@@ -434,16 +461,18 @@ class Channel
   std::unique_ptr<TransportFeedbackProxy> feedback_observer_proxy_;
   std::unique_ptr<TransportSequenceNumberProxy> seq_num_allocator_proxy_;
   std::unique_ptr<RtpPacketSenderProxy> rtp_packet_sender_proxy_;
   std::unique_ptr<RateLimiter> retransmission_rate_limiter_;
 
   // TODO(ossu): Remove once GetAudioDecoderFactory() is no longer needed.
   rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
 
+  RtpPacketObserver* rtp_source_observer_ = nullptr;
+
   rtc::Optional<EncoderProps> cached_encoder_props_;
 
   rtc::ThreadChecker construction_thread_;
 
   const bool use_twcc_plr_for_ana_;
 
   rtc::CriticalSection encoder_queue_lock_;
 
--- a/media/webrtc/trunk/webrtc/voice_engine/channel_proxy.cc
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel_proxy.cc
@@ -43,16 +43,21 @@ void ChannelProxy::ModifyEncoder(
   channel()->ModifyEncoder(modifier);
 }
 
 void ChannelProxy::SetRTCPStatus(bool enable) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   channel()->SetRTCPStatus(enable);
 }
 
+void ChannelProxy::SetLocalMID(const char* mid) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel()->SetLocalMID(mid);
+}
+
 void ChannelProxy::SetLocalSSRC(uint32_t ssrc) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   int error = channel()->SetLocalSSRC(ssrc);
   RTC_DCHECK_EQ(0, error);
 }
 
 void ChannelProxy::SetRTCP_CNAME(const std::string& c_name) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
@@ -68,19 +73,33 @@ void ChannelProxy::SetNACKStatus(bool en
 }
 
 void ChannelProxy::SetSendAudioLevelIndicationStatus(bool enable, int id) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   int error = channel()->SetSendAudioLevelIndicationStatus(enable, id);
   RTC_DCHECK_EQ(0, error);
 }
 
-void ChannelProxy::SetReceiveAudioLevelIndicationStatus(bool enable, int id) {
+void ChannelProxy::SetReceiveAudioLevelIndicationStatus(bool enable, int id,
+                                                        bool isLevelSsrc) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel()->SetReceiveAudioLevelIndicationStatus(enable, id);
+  int error = channel()->SetReceiveAudioLevelIndicationStatus(enable, id,
+                                                              isLevelSsrc);
+  RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::SetReceiveCsrcAudioLevelIndicationStatus(bool enable, int id) {
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  int error = channel()->SetReceiveCsrcAudioLevelIndicationStatus(enable, id);
+  RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::SetSendMIDStatus(bool enable, int id) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  int error = channel()->SetSendMIDStatus(enable, id);
   RTC_DCHECK_EQ(0, error);
 }
 
 void ChannelProxy::EnableSendTransportSequenceNumber(int id) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   channel()->EnableSendTransportSequenceNumber(id);
 }
 
@@ -108,78 +127,147 @@ void ChannelProxy::ResetSenderCongestion
   channel()->ResetSenderCongestionControlObjects();
 }
 
 void ChannelProxy::ResetReceiverCongestionControlObjects() {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   channel()->ResetReceiverCongestionControlObjects();
 }
 
+bool ChannelProxy::GetRTCPPacketTypeCounters(RtcpPacketTypeCounter& stats)
+{
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel()->GetRTCPPacketTypeCounters(stats) == 0;
+}
+
+bool ChannelProxy::GetRTCPReceiverStatistics(int64_t* timestamp,
+                                             uint32_t* jitterMs,
+                                             uint32_t* cumulativeLost,
+                                             uint32_t* packetsReceived,
+                                             uint64_t* bytesReceived,
+                                             double* packetsFractionLost,
+                                             int64_t* rtt) const {
+  // No thread check necessary, we are synchronizing on the lock in StatsProxy
+  return channel()->GetRTCPReceiverStatistics(timestamp,
+                                              jitterMs,
+                                              cumulativeLost,
+                                              packetsReceived,
+                                              bytesReceived,
+                                              packetsFractionLost,
+                                              rtt);
+}
+
 CallStatistics ChannelProxy::GetRTCPStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  // Since we (Mozilla) need to collect stats on STS, we can't
+  // use the thread-checker (which will want to be called on MainThread)
+  // without refactor of ExecuteStatsQuery_s().
+  // However, GetRTPStatistics internally locks in the SSRC()
+  // and statistician methods.
+
+  // RTC_DCHECK(thread_checker_.CalledOnValidThread());
   CallStatistics stats = {0};
   int error = channel()->GetRTPStatistics(stats);
   RTC_DCHECK_EQ(0, error);
   return stats;
 }
 
+int ChannelProxy::GetRTPStatistics(unsigned int& averageJitterMs,
+                                   unsigned int& cumulativeLost) const {
+  // Since we (Mozilla) need to collect stats on STS, we can't
+  // use the thread-checker (which will want to be called on MainThread)
+  // without refactor of ExecuteStatsQuery_s().
+  // However, GetRTPStatistics internally locks in the SSRC()
+  // and statistician methods.  PlayoutFrequency() should also be safe.
+  // statistics_proxy_->GetStats() also locks
+
+  CallStatistics stats;
+  int result = channel()->GetRTPStatistics(stats);
+  int32_t playoutFrequency = channel()->GetPlayoutFrequency() / 1000;
+  if (playoutFrequency) {
+    averageJitterMs = stats.jitterSamples / playoutFrequency;
+  }
+  cumulativeLost = stats.cumulativeLost;
+  return result;
+}
+
 std::vector<ReportBlock> ChannelProxy::GetRemoteRTCPReportBlocks() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   std::vector<webrtc::ReportBlock> blocks;
   int error = channel()->GetRemoteRTCPReportBlocks(&blocks);
   RTC_DCHECK_EQ(0, error);
   return blocks;
 }
 
 NetworkStatistics ChannelProxy::GetNetworkStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   NetworkStatistics stats = {0};
   int error = channel()->GetNetworkStatistics(stats);
   RTC_DCHECK_EQ(0, error);
   return stats;
 }
 
 AudioDecodingCallStats ChannelProxy::GetDecodingCallStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   AudioDecodingCallStats stats;
   channel()->GetDecodingCallStatistics(&stats);
   return stats;
 }
 
 ANAStats ChannelProxy::GetANAStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return channel()->GetANAStatistics();
 }
 
 int ChannelProxy::GetSpeechOutputLevel() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return channel()->GetSpeechOutputLevel();
 }
 
 int ChannelProxy::GetSpeechOutputLevelFullRange() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return channel()->GetSpeechOutputLevelFullRange();
 }
 
 double ChannelProxy::GetTotalOutputEnergy() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return channel()->GetTotalOutputEnergy();
 }
 
 double ChannelProxy::GetTotalOutputDuration() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return channel()->GetTotalOutputDuration();
 }
 
 uint32_t ChannelProxy::GetDelayEstimate() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
-             module_process_thread_checker_.CalledOnValidThread());
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
+  //           module_process_thread_checker_.CalledOnValidThread());
   return channel()->GetDelayEstimate();
 }
 
+void ChannelProxy::GetDelayEstimates(int* jitter_buffer_delay_ms,
+                                     int* playout_buffer_delay_ms,
+                                     int* avsync_offset_ms) const {
+  //Called from AudioIPC thread
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
+  //           module_process_thread_checker_.CalledOnValidThread());
+  return channel()->GetDelayEstimates(jitter_buffer_delay_ms,
+                                      playout_buffer_delay_ms,
+                                      avsync_offset_ms);
+}
+
 bool ChannelProxy::SetSendTelephoneEventPayloadType(int payload_type,
                                                     int payload_frequency) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return channel()->SetSendTelephoneEventPayloadType(payload_type,
                                                      payload_frequency) == 0;
 }
 
 bool ChannelProxy::SendTelephoneEventOutband(int event, int duration_ms) {
@@ -301,17 +389,18 @@ void ChannelProxy::SetMinimumPlayoutDela
 }
 
 void ChannelProxy::SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   channel()->SetRtcpRttStats(rtcp_rtt_stats);
 }
 
 bool ChannelProxy::GetRecCodec(CodecInst* codec_inst) const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  //Called on STS Thread to get stats
+  //RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return channel()->GetRecCodec(*codec_inst) == 0;
 }
 
 void ChannelProxy::OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   channel()->OnTwccBasedUplinkPacketLossRate(packet_loss_rate);
 }
 
@@ -321,15 +410,20 @@ void ChannelProxy::OnRecoverableUplinkPa
   channel()->OnRecoverableUplinkPacketLossRate(recoverable_packet_loss_rate);
 }
 
 std::vector<RtpSource> ChannelProxy::GetSources() const {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return channel()->GetSources();
 }
 
+void ChannelProxy::SetRtpPacketObserver(RtpPacketObserver* observer) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel()->SetRtpPacketObserver(observer);
+}
+
 Channel* ChannelProxy::channel() const {
   RTC_DCHECK(channel_owner_.channel());
   return channel_owner_.channel();
 }
 
 }  // namespace voe
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/voice_engine/channel_proxy.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/channel_proxy.h
@@ -27,16 +27,17 @@
 
 namespace webrtc {
 
 class AudioSinkInterface;
 class PacketRouter;
 class RtcEventLog;
 class RtcpBandwidthObserver;
 class RtcpRttStats;
+class RtpPacketObserver;
 class RtpPacketSender;
 class RtpPacketReceived;
 class RtpReceiver;
 class RtpRtcp;
 class RtpTransportControllerSendInterface;
 class Transport;
 class TransportFeedbackObserver;
 
@@ -56,42 +57,59 @@ class ChannelProxy : public RtpPacketSin
   virtual ~ChannelProxy();
 
   virtual bool SetEncoder(int payload_type,
                           std::unique_ptr<AudioEncoder> encoder);
   virtual void ModifyEncoder(
       rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier);
 
   virtual void SetRTCPStatus(bool enable);
+  virtual void SetLocalMID(const char* mid);
   virtual void SetLocalSSRC(uint32_t ssrc);
   virtual void SetRTCP_CNAME(const std::string& c_name);
   virtual void SetNACKStatus(bool enable, int max_packets);
   virtual void SetSendAudioLevelIndicationStatus(bool enable, int id);
-  virtual void SetReceiveAudioLevelIndicationStatus(bool enable, int id);
+  virtual void SetReceiveAudioLevelIndicationStatus(bool enable, int id,
+                                                    bool isLevelSsrc = true);
+  virtual void SetReceiveCsrcAudioLevelIndicationStatus(bool enable, int id);
+  virtual void SetSendMIDStatus(bool enable, int id);
   virtual void EnableSendTransportSequenceNumber(int id);
   virtual void EnableReceiveTransportSequenceNumber(int id);
   virtual void RegisterSenderCongestionControlObjects(
       RtpTransportControllerSendInterface* transport,
       RtcpBandwidthObserver* bandwidth_observer);
   virtual void RegisterReceiverCongestionControlObjects(
       PacketRouter* packet_router);
   virtual void ResetSenderCongestionControlObjects();
   virtual void ResetReceiverCongestionControlObjects();
+  virtual bool GetRTCPPacketTypeCounters(RtcpPacketTypeCounter& stats);
+  virtual bool GetRTCPReceiverStatistics(int64_t* timestamp,
+                                         uint32_t* jitterMs,
+                                         uint32_t* cumulativeLost,
+                                         uint32_t* packetsReceived,
+                                         uint64_t* bytesReceived,
+                                         double* packetsFractionLost,
+                                         int64_t* rtt) const;
   virtual CallStatistics GetRTCPStatistics() const;
+  virtual int GetRTPStatistics(unsigned int& averageJitterMs,
+                               unsigned int& cumulativeLost) const;
   virtual std::vector<ReportBlock> GetRemoteRTCPReportBlocks() const;
   virtual NetworkStatistics GetNetworkStatistics() const;
   virtual AudioDecodingCallStats GetDecodingCallStatistics() const;
   virtual ANAStats GetANAStatistics() const;
   virtual int GetSpeechOutputLevel() const;
   virtual int GetSpeechOutputLevelFullRange() const;
   // See description of "totalAudioEnergy" in the WebRTC stats spec:
   // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
   virtual double GetTotalOutputEnergy() const;
   virtual double GetTotalOutputDuration() const;
   virtual uint32_t GetDelayEstimate() const;
+  virtual void GetDelayEstimates(int* jitter_buffer_delay_ms,
+                                 int* playout_buffer_delay_ms,
+                                 int* avsync_offset_ms) const;
   virtual bool SetSendTelephoneEventPayloadType(int payload_type,
                                                 int payload_frequency);
   virtual bool SendTelephoneEventOutband(int event, int duration_ms);
   virtual void SetBitrate(int bitrate_bps, int64_t probing_interval_ms);
   virtual void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs);
   virtual void SetSink(std::unique_ptr<AudioSinkInterface> sink);
   virtual void SetInputMute(bool muted);
   virtual void RegisterTransport(Transport* transport);
@@ -116,16 +134,18 @@ class ChannelProxy : public RtpPacketSin
   virtual void SetMinimumPlayoutDelay(int delay_ms);
   virtual void SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats);
   virtual bool GetRecCodec(CodecInst* codec_inst) const;
   virtual void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate);
   virtual void OnRecoverableUplinkPacketLossRate(
       float recoverable_packet_loss_rate);
   virtual std::vector<webrtc::RtpSource> GetSources() const;
 
+  virtual void SetRtpPacketObserver(RtpPacketObserver* observer);
+
  private:
   Channel* channel() const;
 
   // Thread checkers document and lock usage of some methods on voe::Channel to
   // specific threads we know about. The goal is to eventually split up
   // voe::Channel into parts with single-threaded semantics, and thereby reduce
   // the need for locks.
   rtc::ThreadChecker worker_thread_checker_;
--- a/media/webrtc/trunk/webrtc/voice_engine/include/voe_base.h
+++ b/media/webrtc/trunk/webrtc/voice_engine/include/voe_base.h
@@ -59,16 +59,24 @@ class WEBRTC_DLLEXPORT VoiceEngine {
   // Note that if there are outstanding references held via other interfaces,
   // the voice engine instance will not actually be deleted until those
   // references have been released.
   static bool Delete(VoiceEngine*& voiceEngine);
 
  protected:
   VoiceEngine() {}
   ~VoiceEngine() {}
+
+ private:
+  // VS 2015 (others?) gets confused by a baseclass with no vtbl, and
+  // static_cast<VoiceEngineImpl*>(mVoiceEngine) produces a bad ptr.  It
+  // might also be related to the total size of the object.
+
+  // Add a virtual method to assuage the poor compiler.
+  virtual void DummyVS2015BugFix() {};
 };
 
 // VoEBase
 class WEBRTC_DLLEXPORT VoEBase {
  public:
   struct ChannelConfig {
     AudioCodingModule::Config acm_config;
     bool enable_voice_pacing = false;