Bug 1053130: Refactor MediaEngine video sources; alse remove Snapshot function. r=jesup,alfredo
authorChai-hung Tai <ctai@mozilla.com>
Sun, 12 Oct 2014 23:37:37 -0400
changeset 234586 c033be95eb1f218b60182b9613c905b3f46a6450
parent 234585 f547cf19d10415162339116376904b94241874ca
child 234587 2d0a1d87170024ca2ef2d2a5400f9b8ffe143373
push id611
push userraliiev@mozilla.com
push dateMon, 05 Jan 2015 23:23:16 +0000
treeherdermozilla-release@345cd3b9c445 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup, alfredo
bugs1053130
milestone35.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1053130: Refactor MediaEngine video sources; alse remove Snapshot function. r=jesup,alfredo
content/media/webrtc/MediaEngine.h
content/media/webrtc/MediaEngineCameraVideoSource.cpp
content/media/webrtc/MediaEngineCameraVideoSource.h
content/media/webrtc/MediaEngineDefault.cpp
content/media/webrtc/MediaEngineDefault.h
content/media/webrtc/MediaEngineGonkVideoSource.cpp
content/media/webrtc/MediaEngineGonkVideoSource.h
content/media/webrtc/MediaEngineTabVideoSource.cpp
content/media/webrtc/MediaEngineTabVideoSource.h
content/media/webrtc/MediaEngineWebRTC.cpp
content/media/webrtc/MediaEngineWebRTC.h
content/media/webrtc/MediaEngineWebRTCAudio.cpp
content/media/webrtc/MediaEngineWebRTCVideo.cpp
content/media/webrtc/moz.build
dom/media/MediaManager.cpp
--- a/content/media/webrtc/MediaEngine.h
+++ b/content/media/webrtc/MediaEngine.h
@@ -107,22 +107,16 @@ public:
   /* Start the device and add the track to the provided SourceMediaStream, with
    * the provided TrackID. You may start appending data to the track
    * immediately after. */
   virtual nsresult Start(SourceMediaStream*, TrackID) = 0;
 
   /* tell the source if there are any direct listeners attached */
   virtual void SetDirectListeners(bool) = 0;
 
-  /* Take a snapshot from this source. In the case of video this is a single
-   * image, and for audio, it is a snippet lasting aDuration milliseconds. The
-   * duration argument is ignored for a MediaEngineVideoSource.
-   */
-  virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile) = 0;
-
   /* Called when the stream wants more data */
   virtual void NotifyPull(MediaStreamGraph* aGraph,
                           SourceMediaStream *aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
                           TrackTicks &aLastEndTime) = 0;
 
   /* Stop the device and release the corresponding MediaStream */
@@ -173,16 +167,18 @@ public:
       return true;
     }
   }
 
   /* It is an error to call Start() before an Allocate(), and Stop() before
    * a Start(). Only Allocate() may be called after a Deallocate(). */
 
 protected:
+  // Only class' own members can be initialized in constructor initializer list.
+  explicit MediaEngineSource(MediaEngineState aState) : mState(aState) {}
   MediaEngineState mState;
 };
 
 /**
  * Video source and friends.
  */
 class MediaEnginePrefs {
 public:
@@ -226,33 +222,40 @@ private:
   }
 };
 
 class MediaEngineVideoSource : public MediaEngineSource
 {
 public:
   virtual ~MediaEngineVideoSource() {}
 
-  virtual const MediaSourceType GetMediaSource() {
-      return MediaSourceType::Camera;
-  }
   /* This call reserves but does not start the device. */
   virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
                             const MediaEnginePrefs &aPrefs) = 0;
+protected:
+  explicit MediaEngineVideoSource(MediaEngineState aState)
+    : MediaEngineSource(aState) {}
+  MediaEngineVideoSource()
+    : MediaEngineSource(kReleased) {}
 };
 
 /**
  * Audio source and friends.
  */
 class MediaEngineAudioSource : public MediaEngineSource
 {
 public:
   virtual ~MediaEngineAudioSource() {}
 
   /* This call reserves but does not start the device. */
   virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
                             const MediaEnginePrefs &aPrefs) = 0;
+protected:
+  explicit MediaEngineAudioSource(MediaEngineState aState)
+    : MediaEngineSource(aState) {}
+  MediaEngineAudioSource()
+    : MediaEngineSource(kReleased) {}
 
 };
 
 }
 
 #endif /* MEDIAENGINE_H_ */
copy from content/media/webrtc/MediaEngineWebRTCVideo.cpp
copy to content/media/webrtc/MediaEngineCameraVideoSource.cpp
--- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp
+++ b/content/media/webrtc/MediaEngineCameraVideoSource.cpp
@@ -1,350 +1,62 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "MediaEngineWebRTC.h"
-#include "Layers.h"
-#include "ImageTypes.h"
-#include "ImageContainer.h"
-#include "mozilla/layers/GrallocTextureClient.h"
-#include "nsMemory.h"
-#include "mtransport/runnable_utils.h"
-#include "MediaTrackConstraints.h"
+#include "MediaEngineCameraVideoSource.h"
 
-#ifdef MOZ_B2G_CAMERA
-#include "GrallocImages.h"
-#include "libyuv.h"
-#include "mozilla/Hal.h"
-#include "ScreenOrientation.h"
-using namespace mozilla::dom;
-#endif
 namespace mozilla {
 
-using namespace mozilla::gfx;
 using dom::ConstrainLongRange;
 using dom::ConstrainDoubleRange;
 using dom::MediaTrackConstraintSet;
 
 #ifdef PR_LOGGING
 extern PRLogModuleInfo* GetMediaManagerLog();
 #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
 #define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
 #else
 #define LOG(msg)
 #define LOGFRAME(msg)
 #endif
 
-/**
- * Webrtc video source.
- */
-#ifndef MOZ_B2G_CAMERA
-NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable)
-#else
-NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable)
-NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-#endif
-
-// ViEExternalRenderer Callback.
-#ifndef MOZ_B2G_CAMERA
-int
-MediaEngineWebRTCVideoSource::FrameSizeChange(
-   unsigned int w, unsigned int h, unsigned int streams)
-{
-  mWidth = w;
-  mHeight = h;
-  LOG(("Video FrameSizeChange: %ux%u", w, h));
-  return 0;
+/* static */ bool
+MediaEngineCameraVideoSource::IsWithin(int32_t n, const ConstrainLongRange& aRange) {
+  return aRange.mMin <= n && n <= aRange.mMax;
 }
 
-// ViEExternalRenderer Callback. Process every incoming frame here.
-int
-MediaEngineWebRTCVideoSource::DeliverFrame(
-   unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
-   void *handle)
-{
-  // mInSnapshotMode can only be set before the camera is turned on and
-  // the renderer is started, so this amounts to a 1-shot
-  if (mInSnapshotMode) {
-    // Set the condition variable to false and notify Snapshot().
-    MonitorAutoLock lock(mMonitor);
-    mInSnapshotMode = false;
-    lock.Notify();
-    return 0;
-  }
-
-  // Check for proper state.
-  if (mState != kStarted) {
-    LOG(("DeliverFrame: video not started"));
-    return 0;
-  }
-
-  if (mWidth*mHeight + 2*(((mWidth+1)/2)*((mHeight+1)/2)) != size) {
-    MOZ_ASSERT(false, "Wrong size frame in DeliverFrame!");
-    return 0;
-  }
-
-  // Create a video frame and append it to the track.
-  nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
-
-  layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
-
-  uint8_t* frame = static_cast<uint8_t*> (buffer);
-  const uint8_t lumaBpp = 8;
-  const uint8_t chromaBpp = 4;
-
-  // Take lots of care to round up!
-  layers::PlanarYCbCrData data;
-  data.mYChannel = frame;
-  data.mYSize = IntSize(mWidth, mHeight);
-  data.mYStride = (mWidth * lumaBpp + 7)/ 8;
-  data.mCbCrStride = (mWidth * chromaBpp + 7) / 8;
-  data.mCbChannel = frame + mHeight * data.mYStride;
-  data.mCrChannel = data.mCbChannel + ((mHeight+1)/2) * data.mCbCrStride;
-  data.mCbCrSize = IntSize((mWidth+1)/ 2, (mHeight+1)/ 2);
-  data.mPicX = 0;
-  data.mPicY = 0;
-  data.mPicSize = IntSize(mWidth, mHeight);
-  data.mStereoMode = StereoMode::MONO;
-
-  videoImage->SetData(data);
-
-#ifdef DEBUG
-  static uint32_t frame_num = 0;
-  LOGFRAME(("frame %d (%dx%d); timestamp %u, render_time %lu", frame_num++,
-            mWidth, mHeight, time_stamp, render_time));
-#endif
-
-  // we don't touch anything in 'this' until here (except for snapshot,
-  // which has it's own lock)
-  MonitorAutoLock lock(mMonitor);
-
-  // implicitly releases last image
-  mImage = image.forget();
-
-  return 0;
-}
-#endif
-
-// Called if the graph thinks it's running out of buffered video; repeat
-// the last frame for whatever minimum period it think it needs.  Note that
-// this means that no *real* frame can be inserted during this period.
-void
-MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
-                                         SourceMediaStream *aSource,
-                                         TrackID aID,
-                                         StreamTime aDesiredTime,
-                                         TrackTicks &aLastEndTime)
-{
-  VideoSegment segment;
-
-  MonitorAutoLock lock(mMonitor);
-  // B2G does AddTrack, but holds kStarted until the hardware changes state.
-  // So mState could be kReleased here.  We really don't care about the state,
-  // though.
-
-  // Note: we're not giving up mImage here
-  nsRefPtr<layers::Image> image = mImage;
-  TrackTicks target = aSource->TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
-  TrackTicks delta = target - aLastEndTime;
-  LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime,
-            (int64_t) target, (int64_t) delta, image ? "" : "<null>"));
-
-  // Bug 846188 We may want to limit incoming frames to the requested frame rate
-  // mFps - if you want 30FPS, and the camera gives you 60FPS, this could
-  // cause issues.
-  // We may want to signal if the actual frame rate is below mMinFPS -
-  // cameras often don't return the requested frame rate especially in low
-  // light; we should consider surfacing this so that we can switch to a
-  // lower resolution (which may up the frame rate)
-
-  // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime
-  // Doing so means a negative delta and thus messes up handling of the graph
-  if (delta > 0) {
-    // nullptr images are allowed
-    IntSize size(image ? mWidth : 0, image ? mHeight : 0);
-    segment.AppendFrame(image.forget(), delta, size);
-    // This can fail if either a) we haven't added the track yet, or b)
-    // we've removed or finished the track.
-    if (aSource->AppendToTrack(aID, &(segment))) {
-      aLastEndTime = target;
-    }
-  }
-}
-
-static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) {
+/* static */ bool
+MediaEngineCameraVideoSource::IsWithin(double n, const ConstrainDoubleRange& aRange) {
   return aRange.mMin <= n && n <= aRange.mMax;
 }
 
-static bool IsWithin(double n, const ConstrainDoubleRange& aRange) {
-  return aRange.mMin <= n && n <= aRange.mMax;
-}
-
-static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) {
+/* static */ int32_t
+MediaEngineCameraVideoSource::Clamp(int32_t n, const ConstrainLongRange& aRange) {
   return std::max(aRange.mMin, std::min(n, aRange.mMax));
 }
 
-static bool
-AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
+/* static */ bool
+MediaEngineCameraVideoSource::AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
   return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax;
 }
 
-static bool
-Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
+/* static */ bool
+MediaEngineCameraVideoSource::Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
   MOZ_ASSERT(AreIntersecting(aA, aB));
   aA.mMin = std::max(aA.mMin, aB.mMin);
   aA.mMax = std::min(aA.mMax, aB.mMax);
   return true;
 }
 
-static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
-                                 const webrtc::CaptureCapability& aCandidate) {
-  if (!IsWithin(aCandidate.width, aConstraints.mWidth) ||
-      !IsWithin(aCandidate.height, aConstraints.mHeight)) {
-    return false;
-  }
-  if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
-    return false;
-  }
-  return true;
-}
-
+// A special version of the algorithm for cameras that don't list capabilities.
 void
-MediaEngineWebRTCVideoSource::ChooseCapability(
-    const VideoTrackConstraintsN &aConstraints,
-    const MediaEnginePrefs &aPrefs)
-{
-#ifdef MOZ_B2G_CAMERA
-  return GuessCapability(aConstraints, aPrefs);
-#else
-  NS_ConvertUTF16toUTF8 uniqueId(mUniqueId);
-  int num = mViECapture->NumberOfCapabilities(uniqueId.get(), kMaxUniqueIdLength);
-  if (num <= 0) {
-    // Mac doesn't support capabilities.
-    return GuessCapability(aConstraints, aPrefs);
-  }
-
-  // The rest is the full algorithm for cameras that can list their capabilities.
-
-  LOG(("ChooseCapability: prefs: %dx%d @%d-%dfps",
-       aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
-
-  typedef nsTArray<uint8_t> SourceSet;
-
-  SourceSet candidateSet;
-  for (int i = 0; i < num; i++) {
-    candidateSet.AppendElement(i);
-  }
-
-  // Pick among capabilities: First apply required constraints.
-
-  for (uint32_t i = 0; i < candidateSet.Length();) {
-    webrtc::CaptureCapability cap;
-    mViECapture->GetCaptureCapability(uniqueId.get(), kMaxUniqueIdLength,
-                                      candidateSet[i], cap);
-    if (!SatisfyConstraintSet(aConstraints.mRequired, cap)) {
-      candidateSet.RemoveElementAt(i);
-    } else {
-      ++i;
-    }
-  }
-
-  SourceSet tailSet;
-
-  // Then apply advanced (formerly known as optional) constraints.
-
-  if (aConstraints.mAdvanced.WasPassed()) {
-    auto &array = aConstraints.mAdvanced.Value();
-
-    for (uint32_t i = 0; i < array.Length(); i++) {
-      SourceSet rejects;
-      for (uint32_t j = 0; j < candidateSet.Length();) {
-        webrtc::CaptureCapability cap;
-        mViECapture->GetCaptureCapability(uniqueId.get(), kMaxUniqueIdLength,
-                                          candidateSet[j], cap);
-        if (!SatisfyConstraintSet(array[i], cap)) {
-          rejects.AppendElement(candidateSet[j]);
-          candidateSet.RemoveElementAt(j);
-        } else {
-          ++j;
-        }
-      }
-      (candidateSet.Length()? tailSet : candidateSet).MoveElementsFrom(rejects);
-    }
-  }
-
-  if (!candidateSet.Length()) {
-    candidateSet.AppendElement(0);
-  }
-
-  int prefWidth = aPrefs.GetWidth();
-  int prefHeight = aPrefs.GetHeight();
-
-  // Default is closest to available capability but equal to or below;
-  // otherwise closest above.  Since we handle the num=0 case above and
-  // take the first entry always, we can never exit uninitialized.
-
-  webrtc::CaptureCapability cap;
-  bool higher = true;
-  for (uint32_t i = 0; i < candidateSet.Length(); i++) {
-    mViECapture->GetCaptureCapability(NS_ConvertUTF16toUTF8(mUniqueId).get(),
-                                      kMaxUniqueIdLength, candidateSet[i], cap);
-    if (higher) {
-      if (i == 0 ||
-          (mCapability.width > cap.width && mCapability.height > cap.height)) {
-        // closer than the current choice
-        mCapability = cap;
-        // FIXME: expose expected capture delay?
-      }
-      if (cap.width <= (uint32_t) prefWidth && cap.height <= (uint32_t) prefHeight) {
-        higher = false;
-      }
-    } else {
-      if (cap.width > (uint32_t) prefWidth || cap.height > (uint32_t) prefHeight ||
-          cap.maxFPS < (uint32_t) aPrefs.mMinFPS) {
-        continue;
-      }
-      if (mCapability.width < cap.width && mCapability.height < cap.height) {
-        mCapability = cap;
-        // FIXME: expose expected capture delay?
-      }
-    }
-    // Same resolution, maybe better format or FPS match
-    if (mCapability.width == cap.width && mCapability.height == cap.height) {
-      // FPS too low
-      if (cap.maxFPS < (uint32_t) aPrefs.mMinFPS) {
-        continue;
-      }
-      // Better match
-      if (cap.maxFPS < mCapability.maxFPS) {
-        mCapability = cap;
-      } else if (cap.maxFPS == mCapability.maxFPS) {
-        // Resolution and FPS the same, check format
-        if (cap.rawType == webrtc::RawVideoType::kVideoI420
-          || cap.rawType == webrtc::RawVideoType::kVideoYUY2
-          || cap.rawType == webrtc::RawVideoType::kVideoYV12) {
-          mCapability = cap;
-        }
-      }
-    }
-  }
-  LOG(("chose cap %dx%d @%dfps codec %d raw %d",
-       mCapability.width, mCapability.height, mCapability.maxFPS,
-       mCapability.codecType, mCapability.rawType));
-#endif
-}
-
-// A special version of the algorithm for cameras that don't list capabilities.
-
-void
-MediaEngineWebRTCVideoSource::GuessCapability(
-    const VideoTrackConstraintsN &aConstraints,
-    const MediaEnginePrefs &aPrefs)
+MediaEngineCameraVideoSource::GuessCapability(
+    const VideoTrackConstraintsN& aConstraints,
+    const MediaEnginePrefs& aPrefs)
 {
   LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
        aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
 
   // In short: compound constraint-ranges and use pref as ideal.
 
   ConstrainLongRange cWidth(aConstraints.mRequired.mWidth);
   ConstrainLongRange cHeight(aConstraints.mRequired.mHeight);
@@ -411,731 +123,27 @@ MediaEngineWebRTCVideoSource::GuessCapab
     }
   }
   mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS;
   LOG(("chose cap %dx%d @%dfps",
        mCapability.width, mCapability.height, mCapability.maxFPS));
 }
 
 void
-MediaEngineWebRTCVideoSource::GetName(nsAString& aName)
+MediaEngineCameraVideoSource::GetName(nsAString& aName)
 {
   aName = mDeviceName;
 }
 
 void
-MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID)
+MediaEngineCameraVideoSource::GetUUID(nsAString& aUUID)
 {
   aUUID = mUniqueId;
 }
 
-nsresult
-MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraints,
-                                       const MediaEnginePrefs &aPrefs)
-{
-  LOG((__FUNCTION__));
-#ifdef MOZ_B2G_CAMERA
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-  if (mState == kReleased && mInitDone) {
-    ChooseCapability(aConstraints, aPrefs);
-    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                         &MediaEngineWebRTCVideoSource::AllocImpl));
-    mCallbackMonitor.Wait();
-    if (mState != kAllocated) {
-      return NS_ERROR_FAILURE;
-    }
-  }
-#else
-  if (mState == kReleased && mInitDone) {
-    // Note: if shared, we don't allow a later opener to affect the resolution.
-    // (This may change depending on spec changes for Constraints/settings)
-
-    ChooseCapability(aConstraints, aPrefs);
-
-    if (mViECapture->AllocateCaptureDevice(NS_ConvertUTF16toUTF8(mUniqueId).get(),
-                                           kMaxUniqueIdLength, mCaptureIndex)) {
-      return NS_ERROR_FAILURE;
-    }
-    mState = kAllocated;
-    LOG(("Video device %d allocated", mCaptureIndex));
-  } else if (mSources.IsEmpty()) {
-    LOG(("Video device %d reallocated", mCaptureIndex));
-  } else {
-    LOG(("Video device %d allocated shared", mCaptureIndex));
-  }
-#endif
-
-  return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::Deallocate()
-{
-  LOG((__FUNCTION__));
-  if (mSources.IsEmpty()) {
-#ifdef MOZ_B2G_CAMERA
-    ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
-    if (mState != kStopped && mState != kAllocated) {
-      return NS_ERROR_FAILURE;
-    }
-#ifdef MOZ_B2G_CAMERA
-    // We do not register success callback here
-
-    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                         &MediaEngineWebRTCVideoSource::DeallocImpl));
-    mCallbackMonitor.Wait();
-    if (mState != kReleased) {
-      return NS_ERROR_FAILURE;
-    }
-#elif XP_MACOSX
-    // Bug 829907 - on mac, in shutdown, the mainthread stops processing
-    // 'native' events, and the QTKit code uses events to the main native CFRunLoop
-    // in order to provide thread safety.  In order to avoid this locking us up,
-    // release the ViE capture device synchronously on MainThread (so the native
-    // event isn't needed).
-    // XXX Note if MainThread Dispatch()es NS_DISPATCH_SYNC to us we can deadlock.
-    // XXX It might be nice to only do this if we're in shutdown...  Hard to be
-    // sure when that is though.
-    // Thread safety: a) we call this synchronously, and don't use ViECapture from
-    // another thread anywhere else, b) ViEInputManager::DestroyCaptureDevice() grabs
-    // an exclusive object lock and deletes it in a critical section, so all in all
-    // this should be safe threadwise.
-    NS_DispatchToMainThread(WrapRunnable(mViECapture,
-                                         &webrtc::ViECapture::ReleaseCaptureDevice,
-                                         mCaptureIndex),
-                            NS_DISPATCH_SYNC);
-#else
-    mViECapture->ReleaseCaptureDevice(mCaptureIndex);
-#endif
-    mState = kReleased;
-    LOG(("Video device %d deallocated", mCaptureIndex));
-  } else {
-    LOG(("Video device %d deallocated but still in use", mCaptureIndex));
-  }
-  return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
-{
-  LOG((__FUNCTION__));
-#ifndef MOZ_B2G_CAMERA
-  int error = 0;
-#endif
-  if (!mInitDone || !aStream) {
-    return NS_ERROR_FAILURE;
-  }
-
-  mSources.AppendElement(aStream);
-
-  aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
-  aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
-
-#ifdef MOZ_B2G_CAMERA
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
-
-  if (mState == kStarted) {
-    return NS_OK;
-  }
-  mImageContainer = layers::LayerManager::CreateImageContainer();
-
-#ifdef MOZ_B2G_CAMERA
-  NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                       &MediaEngineWebRTCVideoSource::StartImpl,
-                                       mCapability));
-  mCallbackMonitor.Wait();
-  if (mState != kStarted) {
-    return NS_ERROR_FAILURE;
-  }
-#else
-  mState = kStarted;
-  error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
-  if (error == -1) {
-    return NS_ERROR_FAILURE;
-  }
-
-  error = mViERender->StartRender(mCaptureIndex);
-  if (error == -1) {
-    return NS_ERROR_FAILURE;
-  }
-
-  if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
-    return NS_ERROR_FAILURE;
-  }
-#endif
-
-  return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
-{
-  LOG((__FUNCTION__));
-  if (!mSources.RemoveElement(aSource)) {
-    // Already stopped - this is allowed
-    return NS_OK;
-  }
-  if (!mSources.IsEmpty()) {
-    return NS_OK;
-  }
-#ifdef MOZ_B2G_CAMERA
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
-  if (mState != kStarted) {
-    return NS_ERROR_FAILURE;
-  }
-
-  {
-    MonitorAutoLock lock(mMonitor);
-    mState = kStopped;
-    aSource->EndTrack(aID);
-    // Drop any cached image so we don't start with a stale image on next
-    // usage
-    mImage = nullptr;
-  }
-#ifdef MOZ_B2G_CAMERA
-  NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                       &MediaEngineWebRTCVideoSource::StopImpl));
-#else
-  mViERender->StopRender(mCaptureIndex);
-  mViERender->RemoveRenderer(mCaptureIndex);
-  mViECapture->StopCapture(mCaptureIndex);
-#endif
-
-  return NS_OK;
-}
-
 void
-MediaEngineWebRTCVideoSource::SetDirectListeners(bool aHasDirectListeners)
+MediaEngineCameraVideoSource::SetDirectListeners(bool aHasDirectListeners)
 {
   LOG((__FUNCTION__));
   mHasDirectListeners = aHasDirectListeners;
 }
 
-nsresult
-MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
-  return NS_ERROR_NOT_IMPLEMENTED;
-}
-
-/**
- * Initialization and Shutdown functions for the video source, called by the
- * constructor and destructor respectively.
- */
-
-void
-MediaEngineWebRTCVideoSource::Init()
-{
-#ifdef MOZ_B2G_CAMERA
-  nsAutoCString deviceName;
-  ICameraControl::GetCameraName(mCaptureIndex, deviceName);
-  CopyUTF8toUTF16(deviceName, mDeviceName);
-  CopyUTF8toUTF16(deviceName, mUniqueId);
-#else
-  // fix compile warning for these being unused. (remove once used)
-  (void) mFps;
-  (void) mMinFps;
-
-  LOG((__FUNCTION__));
-  if (mVideoEngine == nullptr) {
-    return;
-  }
-
-  mViEBase = webrtc::ViEBase::GetInterface(mVideoEngine);
-  if (mViEBase == nullptr) {
-    return;
-  }
-
-  // Get interfaces for capture, render for now
-  mViECapture = webrtc::ViECapture::GetInterface(mVideoEngine);
-  mViERender = webrtc::ViERender::GetInterface(mVideoEngine);
-
-  if (mViECapture == nullptr || mViERender == nullptr) {
-    return;
-  }
-
-  char deviceName[kMaxDeviceNameLength];
-  char uniqueId[kMaxUniqueIdLength];
-  if (mViECapture->GetCaptureDevice(mCaptureIndex,
-                                    deviceName, kMaxDeviceNameLength,
-                                    uniqueId, kMaxUniqueIdLength)) {
-    return;
-  }
-
-  CopyUTF8toUTF16(deviceName, mDeviceName);
-  CopyUTF8toUTF16(uniqueId, mUniqueId);
-#endif
-
-  mInitDone = true;
-}
-
-void
-MediaEngineWebRTCVideoSource::Shutdown()
-{
-  LOG((__FUNCTION__));
-  if (!mInitDone) {
-    return;
-  }
-#ifdef MOZ_B2G_CAMERA
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
-  if (mState == kStarted) {
-    while (!mSources.IsEmpty()) {
-      Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
-    }
-    MOZ_ASSERT(mState == kStopped);
-  }
-
-  if (mState == kAllocated || mState == kStopped) {
-    Deallocate();
-  }
-#ifndef MOZ_B2G_CAMERA
-  mViECapture->Release();
-  mViERender->Release();
-  mViEBase->Release();
-#endif
-  mState = kReleased;
-  mInitDone = false;
-}
-
-void MediaEngineWebRTCVideoSource::Refresh(int aIndex) {
-  // NOTE: mCaptureIndex might have changed when allocated!
-  // Use aIndex to update information, but don't change mCaptureIndex!!
-#ifdef MOZ_B2G_CAMERA
-  // Caller looked up this source by uniqueId; since deviceName == uniqueId nothing else changes
-#else
-  // Caller looked up this source by uniqueId, so it shouldn't change
-  char deviceName[kMaxDeviceNameLength];
-  char uniqueId[kMaxUniqueIdLength];
-
-  if (mViECapture->GetCaptureDevice(aIndex,
-                                    deviceName, sizeof(deviceName),
-                                    uniqueId, sizeof(uniqueId))) {
-    return;
-  }
-
-  CopyUTF8toUTF16(deviceName, mDeviceName);
-#ifdef DEBUG
-  nsString temp;
-  CopyUTF8toUTF16(uniqueId, temp);
-  MOZ_ASSERT(temp.Equals(mUniqueId));
-#endif
-#endif
-}
-
-#ifdef MOZ_B2G_CAMERA
-
-// All these functions must be run on MainThread!
-void
-MediaEngineWebRTCVideoSource::AllocImpl() {
-  MOZ_ASSERT(NS_IsMainThread());
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-
-  mCameraControl = ICameraControl::Create(mCaptureIndex);
-  if (mCameraControl) {
-    mState = kAllocated;
-    // Add this as a listener for CameraControl events. We don't need
-    // to explicitly remove this--destroying the CameraControl object
-    // in DeallocImpl() will do that for us.
-    mCameraControl->AddListener(this);
-  }
-
-  mCallbackMonitor.Notify();
-}
-
-void
-MediaEngineWebRTCVideoSource::DeallocImpl() {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  mCameraControl = nullptr;
-}
-
-// The same algorithm from bug 840244
-static int
-GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) {
-  int screenAngle = 0;
-  switch (aScreen) {
-    case eScreenOrientation_PortraitPrimary:
-      screenAngle = 0;
-      break;
-    case eScreenOrientation_PortraitSecondary:
-      screenAngle = 180;
-      break;
-   case eScreenOrientation_LandscapePrimary:
-      screenAngle = 90;
-      break;
-   case eScreenOrientation_LandscapeSecondary:
-      screenAngle = 270;
-      break;
-   default:
-      MOZ_ASSERT(false);
-      break;
-  }
-
-  int result;
-
-  if (aBackCamera) {
-    //back camera
-    result = (aCameraMountAngle - screenAngle + 360) % 360;
-  } else {
-    //front camera
-    result = (aCameraMountAngle + screenAngle) % 360;
-  }
-  return result;
-}
-
-// undefine to remove on-the-fly rotation support
-#define DYNAMIC_GUM_ROTATION
-
-void
-MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
-#ifdef DYNAMIC_GUM_ROTATION
-  if (mHasDirectListeners) {
-    // aka hooked to PeerConnection
-    MonitorAutoLock enter(mMonitor);
-    mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera);
-
-    LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)",
-         mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
-  }
-#endif
-
-  mOrientationChanged = true;
-}
-
-void
-MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  ICameraControl::Configuration config;
-  config.mMode = ICameraControl::kPictureMode;
-  config.mPreviewSize.width = aCapability.width;
-  config.mPreviewSize.height = aCapability.height;
-  mCameraControl->Start(&config);
-  mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize);
-
-  hal::RegisterScreenConfigurationObserver(this);
-}
-
-void
-MediaEngineWebRTCVideoSource::StopImpl() {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  hal::UnregisterScreenConfigurationObserver(this);
-  mCameraControl->Stop();
-}
-
-void
-MediaEngineWebRTCVideoSource::SnapshotImpl() {
-  MOZ_ASSERT(NS_IsMainThread());
-  mCameraControl->TakePicture();
-}
-
-void
-MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState)
-{
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-  if (aState == CameraControlListener::kHardwareClosed) {
-    // When the first CameraControl listener is added, it gets pushed
-    // the current state of the camera--normally 'closed'. We only
-    // pay attention to that state if we've progressed out of the
-    // allocated state.
-    if (mState != kAllocated) {
-      mState = kReleased;
-      mCallbackMonitor.Notify();
-    }
-  } else {
-    // Can't read this except on MainThread (ugh)
-    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                         &MediaEngineWebRTCVideoSource::GetRotation));
-    mState = kStarted;
-    mCallbackMonitor.Notify();
-  }
-}
-
-void
-MediaEngineWebRTCVideoSource::GetRotation()
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  MonitorAutoLock enter(mMonitor);
-
-  mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle);
-  MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 ||
-             mCameraAngle == 270);
-  hal::ScreenConfiguration config;
-  hal::GetCurrentScreenConfiguration(&config);
-
-  nsCString deviceName;
-  ICameraControl::GetCameraName(mCaptureIndex, deviceName);
-  if (deviceName.EqualsASCII("back")) {
-    mBackCamera = true;
-  }
-
-  mRotation = GetRotateAmount(config.orientation(), mCameraAngle, mBackCamera);
-  LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)",
-       mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
-}
-
-void
-MediaEngineWebRTCVideoSource::OnUserError(UserContext aContext, nsresult aError)
-{
-  {
-    // Scope the monitor, since there is another monitor below and we don't want
-    // unexpected deadlock.
-    ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-    mCallbackMonitor.Notify();
-  }
-
-  // A main thread runnable to send error code to all queued PhotoCallbacks.
-  class TakePhotoError : public nsRunnable {
-  public:
-    TakePhotoError(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
-                   nsresult aRv)
-      : mRv(aRv)
-    {
-      mCallbacks.SwapElements(aCallbacks);
-    }
-
-    NS_IMETHOD Run()
-    {
-      uint32_t callbackNumbers = mCallbacks.Length();
-      for (uint8_t i = 0; i < callbackNumbers; i++) {
-        mCallbacks[i]->PhotoError(mRv);
-      }
-      // PhotoCallback needs to dereference on main thread.
-      mCallbacks.Clear();
-      return NS_OK;
-    }
-
-  protected:
-    nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
-    nsresult mRv;
-  };
-
-  if (aContext == UserContext::kInTakePicture) {
-    MonitorAutoLock lock(mMonitor);
-    if (mPhotoCallbacks.Length()) {
-      NS_DispatchToMainThread(new TakePhotoError(mPhotoCallbacks, aError));
-    }
-  }
-}
-
-void
-MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
-{
-  // It needs to start preview because Gonk camera will stop preview while
-  // taking picture.
-  mCameraControl->StartPreview();
-
-  // Create a main thread runnable to generate a blob and call all current queued
-  // PhotoCallbacks.
-  class GenerateBlobRunnable : public nsRunnable {
-  public:
-    GenerateBlobRunnable(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
-                         uint8_t* aData,
-                         uint32_t aLength,
-                         const nsAString& aMimeType)
-    {
-      mCallbacks.SwapElements(aCallbacks);
-      mPhoto.AppendElements(aData, aLength);
-      mMimeType = aMimeType;
-    }
-
-    NS_IMETHOD Run()
-    {
-      nsRefPtr<dom::File> blob =
-        dom::File::CreateMemoryFile(nullptr, mPhoto.Elements(), mPhoto.Length(), mMimeType);
-      uint32_t callbackCounts = mCallbacks.Length();
-      for (uint8_t i = 0; i < callbackCounts; i++) {
-        nsRefPtr<dom::File> tempBlob = blob;
-        mCallbacks[i]->PhotoComplete(tempBlob.forget());
-      }
-      // PhotoCallback needs to dereference on main thread.
-      mCallbacks.Clear();
-      return NS_OK;
-    }
-
-    nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
-    nsTArray<uint8_t> mPhoto;
-    nsString mMimeType;
-  };
-
-  // All elements in mPhotoCallbacks will be swapped in GenerateBlobRunnable
-  // constructor. This captured image will be sent to all the queued
-  // PhotoCallbacks in this runnable.
-  MonitorAutoLock lock(mMonitor);
-  if (mPhotoCallbacks.Length()) {
-    NS_DispatchToMainThread(
-      new GenerateBlobRunnable(mPhotoCallbacks, aData, aLength, aMimeType));
-  }
-}
-
-uint32_t
-MediaEngineWebRTCVideoSource::ConvertPixelFormatToFOURCC(int aFormat)
-{
-  switch (aFormat) {
-  case HAL_PIXEL_FORMAT_RGBA_8888:
-    return libyuv::FOURCC_BGRA;
-  case HAL_PIXEL_FORMAT_YCrCb_420_SP:
-    return libyuv::FOURCC_NV21;
-  case HAL_PIXEL_FORMAT_YV12:
-    return libyuv::FOURCC_YV12;
-  default: {
-    LOG((" xxxxx Unknown pixel format %d", aFormat));
-    MOZ_ASSERT(false, "Unknown pixel format.");
-    return libyuv::FOURCC_ANY;
-    }
-  }
-}
-
-void
-MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
-  layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage);
-  android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
-  void *pMem = nullptr;
-  uint32_t size = aWidth * aHeight * 3 / 2;
-
-  graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem);
-
-  uint8_t* srcPtr = static_cast<uint8_t*>(pMem);
-  // Create a video frame and append it to the track.
-  nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
-  layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
-
-  uint32_t dstWidth;
-  uint32_t dstHeight;
-
-  if (mRotation == 90 || mRotation == 270) {
-    dstWidth = aHeight;
-    dstHeight = aWidth;
-  } else {
-    dstWidth = aWidth;
-    dstHeight = aHeight;
-  }
-
-  uint32_t half_width = dstWidth / 2;
-  uint8_t* dstPtr = videoImage->AllocateAndGetNewBuffer(size);
-  libyuv::ConvertToI420(srcPtr, size,
-                        dstPtr, dstWidth,
-                        dstPtr + (dstWidth * dstHeight), half_width,
-                        dstPtr + (dstWidth * dstHeight * 5 / 4), half_width,
-                        0, 0,
-                        aWidth, aHeight,
-                        aWidth, aHeight,
-                        static_cast<libyuv::RotationMode>(mRotation),
-                        ConvertPixelFormatToFOURCC(graphicBuffer->getPixelFormat()));
-  graphicBuffer->unlock();
-
-  const uint8_t lumaBpp = 8;
-  const uint8_t chromaBpp = 4;
-
-  layers::PlanarYCbCrData data;
-  data.mYChannel = dstPtr;
-  data.mYSize = IntSize(dstWidth, dstHeight);
-  data.mYStride = dstWidth * lumaBpp / 8;
-  data.mCbCrStride = dstWidth * chromaBpp / 8;
-  data.mCbChannel = dstPtr + dstHeight * data.mYStride;
-  data.mCrChannel = data.mCbChannel +( dstHeight * data.mCbCrStride / 2);
-  data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2);
-  data.mPicX = 0;
-  data.mPicY = 0;
-  data.mPicSize = IntSize(dstWidth, dstHeight);
-  data.mStereoMode = StereoMode::MONO;
-
-  videoImage->SetDataNoCopy(data);
-
-  // implicitly releases last image
-  mImage = image.forget();
-}
-
-bool
-MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
-  {
-    ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-    if (mState == kStopped) {
-      return false;
-    }
-  }
-
-  MonitorAutoLock enter(mMonitor);
-  // Bug XXX we'd prefer to avoid converting if mRotation == 0, but that causes problems in UpdateImage()
-  RotateImage(aImage, aWidth, aHeight);
-  if (mRotation != 0 && mRotation != 180) {
-    uint32_t temp = aWidth;
-    aWidth = aHeight;
-    aHeight = temp;
-  }
-  if (mWidth != static_cast<int>(aWidth) || mHeight != static_cast<int>(aHeight)) {
-    mWidth = aWidth;
-    mHeight = aHeight;
-    LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight));
-  }
-
-  return true; // return true because we're accepting the frame
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::TakePhoto(PhotoCallback* aCallback)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-
-  MonitorAutoLock lock(mMonitor);
-
-  // If other callback exists, that means there is a captured picture on the way,
-  // it doesn't need to TakePicture() again.
-  if (!mPhotoCallbacks.Length()) {
-    nsresult rv;
-    if (mOrientationChanged) {
-      UpdatePhotoOrientation();
-    }
-    rv = mCameraControl->TakePicture();
-    if (NS_FAILED(rv)) {
-      return rv;
-    }
-  }
-
-  mPhotoCallbacks.AppendElement(aCallback);
-
-  return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::UpdatePhotoOrientation()
-{
-  MOZ_ASSERT(NS_IsMainThread());
-
-  hal::ScreenConfiguration config;
-  hal::GetCurrentScreenConfiguration(&config);
-
-  // The rotation angle is clockwise.
-  int orientation = 0;
-  switch (config.orientation()) {
-    case eScreenOrientation_PortraitPrimary:
-      orientation = 0;
-      break;
-    case eScreenOrientation_PortraitSecondary:
-      orientation = 180;
-      break;
-   case eScreenOrientation_LandscapePrimary:
-      orientation = 270;
-      break;
-   case eScreenOrientation_LandscapeSecondary:
-      orientation = 90;
-      break;
-  }
-
-  // Front camera is inverse angle comparing to back camera.
-  orientation = (mBackCamera ? orientation : (-orientation));
-
-  ICameraControlParameterSetAutoEnter batch(mCameraControl);
-  // It changes the orientation value in EXIF information only.
-  mCameraControl->Set(CAMERA_PARAM_PICTURE_ROTATION, orientation);
-
-  mOrientationChanged = false;
-
-  return NS_OK;
-}
-
-#endif
-
-}
+} // namespace mozilla
copy from content/media/webrtc/MediaEngineWebRTC.h
copy to content/media/webrtc/MediaEngineCameraVideoSource.h
--- a/content/media/webrtc/MediaEngineWebRTC.h
+++ b/content/media/webrtc/MediaEngineCameraVideoSource.h
@@ -1,462 +1,100 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#ifndef MEDIAENGINEWEBRTC_H_
-#define MEDIAENGINEWEBRTC_H_
-
-#include "prcvar.h"
-#include "prthread.h"
-#include "nsIThread.h"
-#include "nsIRunnable.h"
+#ifndef MediaEngineCameraVideoSource_h
+#define MediaEngineCameraVideoSource_h
 
-#include "mozilla/dom/File.h"
-#include "mozilla/Mutex.h"
-#include "mozilla/Monitor.h"
-#include "nsCOMPtr.h"
-#include "nsThreadUtils.h"
-#include "DOMMediaStream.h"
-#include "nsDirectoryServiceDefs.h"
-#include "nsComponentManagerUtils.h"
-#include "nsRefPtrHashtable.h"
-
-#include "VideoUtils.h"
 #include "MediaEngine.h"
-#include "VideoSegment.h"
-#include "AudioSegment.h"
-#include "StreamBuffer.h"
-#include "MediaStreamGraph.h"
+#include "MediaTrackConstraints.h"
 
-#include "MediaEngineWrapper.h"
-#include "mozilla/dom/MediaStreamTrackBinding.h"
-// WebRTC library includes follow
-#include "webrtc/common.h"
-// Audio Engine
-#include "webrtc/voice_engine/include/voe_base.h"
-#include "webrtc/voice_engine/include/voe_codec.h"
-#include "webrtc/voice_engine/include/voe_hardware.h"
-#include "webrtc/voice_engine/include/voe_network.h"
-#include "webrtc/voice_engine/include/voe_audio_processing.h"
-#include "webrtc/voice_engine/include/voe_volume_control.h"
-#include "webrtc/voice_engine/include/voe_external_media.h"
-#include "webrtc/voice_engine/include/voe_audio_processing.h"
-#include "webrtc/voice_engine/include/voe_call_report.h"
+#include "nsDirectoryServiceDefs.h"
 
-// Video Engine
 // conflicts with #include of scoped_ptr.h
 #undef FF
-#include "webrtc/video_engine/include/vie_base.h"
-#include "webrtc/video_engine/include/vie_codec.h"
-#include "webrtc/video_engine/include/vie_render.h"
 #include "webrtc/video_engine/include/vie_capture.h"
-#ifdef MOZ_B2G_CAMERA
-#include "CameraControlListener.h"
-#include "ICameraControl.h"
-#include "ImageContainer.h"
-#include "nsGlobalWindow.h"
-#include "prprf.h"
-#include "mozilla/Hal.h"
-#endif
-
-#include "NullTransport.h"
-#include "AudioOutputObserver.h"
 
 namespace mozilla {
 
-#ifdef MOZ_B2G_CAMERA
-class CameraAllocateRunnable;
-class GetCameraNameRunnable;
-#endif
-
-/**
- * The WebRTC implementation of the MediaEngine interface.
- *
- * On B2G platform, member data may accessed from different thread after construction:
- *
- * MediaThread:
- *   mState, mImage, mWidth, mHeight, mCapability, mPrefs, mDeviceName, mUniqueId, mInitDone,
- *   mImageContainer, mSources, mState, mImage
- *
- * MainThread:
- *   mCaptureIndex, mLastCapture, mState,  mWidth, mHeight,
- *
- * Where mWidth, mHeight, mImage, mPhotoCallbacks are protected by mMonitor
- *       mState is protected by mCallbackMonitor
- * Other variable is accessed only from single thread
- */
-class MediaEngineWebRTCVideoSource : public MediaEngineVideoSource
-                                   , public nsRunnable
-#ifdef MOZ_B2G_CAMERA
-                                   , public CameraControlListener
-                                   , public mozilla::hal::ScreenConfigurationObserver
-#else
-                                   , public webrtc::ExternalRenderer
-#endif
+class MediaEngineCameraVideoSource : public MediaEngineVideoSource
 {
 public:
-#ifdef MOZ_B2G_CAMERA
-  MediaEngineWebRTCVideoSource(int aIndex,
-                               MediaSourceType aMediaSource = MediaSourceType::Camera)
-    : mCameraControl(nullptr)
-    , mCallbackMonitor("WebRTCCamera.CallbackMonitor")
-    , mRotation(0)
-    , mBackCamera(false)
-    , mOrientationChanged(true) // Correct the orientation at first time takePhoto.
-    , mCaptureIndex(aIndex)
-    , mMediaSource(aMediaSource)
-    , mMonitor("WebRTCCamera.Monitor")
+  MediaEngineCameraVideoSource(int aIndex,
+                               const char* aMonitorName = "Camera.Monitor")
+    : MediaEngineVideoSource(kReleased)
+    , mMonitor(aMonitorName)
     , mWidth(0)
     , mHeight(0)
+    , mInitDone(false)
     , mHasDirectListeners(false)
-    , mInitDone(false)
-    , mInSnapshotMode(false)
-    , mSnapshotPath(nullptr)
-  {
-    mState = kReleased;
-    Init();
-  }
-#else
-  // ViEExternalRenderer.
-  virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int);
-  virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t,
-                           void *handle);
-  /**
-   * Does DeliverFrame() support a null buffer and non-null handle
-   * (video texture)?
-   * XXX Investigate!  Especially for Android/B2G
-   */
-  virtual bool IsTextureSupported() { return false; }
-
-  MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr, int aIndex,
-                               MediaSourceType aMediaSource = MediaSourceType::Camera)
-    : mVideoEngine(aVideoEnginePtr)
     , mCaptureIndex(aIndex)
     , mFps(-1)
-    , mMinFps(-1)
-    , mMediaSource(aMediaSource)
-    , mMonitor("WebRTCCamera.Monitor")
-    , mWidth(0)
-    , mHeight(0)
-    , mHasDirectListeners(false)
-    , mInitDone(false)
-    , mInSnapshotMode(false)
-    , mSnapshotPath(nullptr) {
-    MOZ_ASSERT(aVideoEnginePtr);
-    mState = kReleased;
-    Init();
-  }
-#endif
+  {}
+
 
-  virtual void GetName(nsAString&);
-  virtual void GetUUID(nsAString&);
-  virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
-                            const MediaEnginePrefs &aPrefs);
-  virtual nsresult Deallocate();
-  virtual nsresult Start(SourceMediaStream*, TrackID);
-  virtual nsresult Stop(SourceMediaStream*, TrackID);
-  virtual void SetDirectListeners(bool aHasListeners);
-  virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
+  virtual void GetName(nsAString& aName) MOZ_OVERRIDE;
+  virtual void GetUUID(nsAString& aUUID) MOZ_OVERRIDE;
+  virtual void SetDirectListeners(bool aHasListeners) MOZ_OVERRIDE;
   virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
                           bool aAgcOn, uint32_t aAGC,
                           bool aNoiseOn, uint32_t aNoise,
-                          int32_t aPlayoutDelay) { return NS_OK; };
-  virtual void NotifyPull(MediaStreamGraph* aGraph,
-                          SourceMediaStream *aSource,
-                          TrackID aId,
-                          StreamTime aDesiredTime,
-                          TrackTicks &aLastEndTime);
+                          int32_t aPlayoutDelay) MOZ_OVERRIDE
+  {
+    return NS_OK;
+  };
 
-  virtual bool IsFake() {
+  virtual bool IsFake() MOZ_OVERRIDE
+  {
     return false;
   }
 
   virtual const MediaSourceType GetMediaSource() {
-    return mMediaSource;
+      return MediaSourceType::Camera;
   }
 
-#ifndef MOZ_B2G_CAMERA
-  NS_DECL_THREADSAFE_ISUPPORTS
-
-  nsresult TakePhoto(PhotoCallback* aCallback)
+  virtual nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE
   {
     return NS_ERROR_NOT_IMPLEMENTED;
   }
-#else
-  // We are subclassed from CameraControlListener, which implements a
-  // threadsafe reference-count for us.
-  NS_DECL_ISUPPORTS_INHERITED
-
-  void OnHardwareStateChange(HardwareState aState);
-  void GetRotation();
-  bool OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
-  void OnUserError(UserContext aContext, nsresult aError);
-  void OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType);
-
-  void AllocImpl();
-  void DeallocImpl();
-  void StartImpl(webrtc::CaptureCapability aCapability);
-  void StopImpl();
-  void SnapshotImpl();
-  void RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
-  uint32_t ConvertPixelFormatToFOURCC(int aFormat);
-  void Notify(const mozilla::hal::ScreenConfiguration& aConfiguration);
-
-  nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE;
-
-  // It sets the correct photo orientation via camera parameter according to
-  // current screen orientation.
-  nsresult UpdatePhotoOrientation();
-
-#endif
-
-  // This runnable is for creating a temporary file on the main thread.
-  NS_IMETHODIMP
-  Run()
-  {
-    nsCOMPtr<nsIFile> tmp;
-    nsresult rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(tmp));
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    tmp->Append(NS_LITERAL_STRING("webrtc_snapshot.jpeg"));
-    rv = tmp->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0600);
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    mSnapshotPath = new nsString();
-    rv = tmp->GetPath(*mSnapshotPath);
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    return NS_OK;
-  }
-
-  void Refresh(int aIndex);
 
 protected:
-  ~MediaEngineWebRTCVideoSource() { Shutdown(); }
+  ~MediaEngineCameraVideoSource() {}
 
-private:
-  // Initialize the needed Video engine interfaces.
-  void Init();
-  void Shutdown();
+  static bool IsWithin(int32_t n, const dom::ConstrainLongRange& aRange);
+  static bool IsWithin(double n, const dom::ConstrainDoubleRange& aRange);
+  static int32_t Clamp(int32_t n, const dom::ConstrainLongRange& aRange);
+  static bool AreIntersecting(const dom::ConstrainLongRange& aA,
+                              const dom::ConstrainLongRange& aB);
+  static bool Intersect(dom::ConstrainLongRange& aA, const dom::ConstrainLongRange& aB);
+  void GuessCapability(const VideoTrackConstraintsN& aConstraints,
+                       const MediaEnginePrefs& aPrefs);
 
   // Engine variables.
-#ifdef MOZ_B2G_CAMERA
-  mozilla::ReentrantMonitor mCallbackMonitor; // Monitor for camera callback handling
-  // This is only modified on MainThread (AllocImpl and DeallocImpl)
-  nsRefPtr<ICameraControl> mCameraControl;
-  nsCOMPtr<nsIDOMFile> mLastCapture;
-  nsTArray<nsRefPtr<PhotoCallback>> mPhotoCallbacks;
-
-  // These are protected by mMonitor below
-  int mRotation;
-  int mCameraAngle; // See dom/base/ScreenOrientation.h
-  bool mBackCamera;
-  bool mOrientationChanged; // True when screen rotates.
-#else
-  webrtc::VideoEngine* mVideoEngine; // Weak reference, don't free.
-  webrtc::ViEBase* mViEBase;
-  webrtc::ViECapture* mViECapture;
-  webrtc::ViERender* mViERender;
-#endif
-  webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
-
-  int mCaptureIndex;
-  int mFps; // Track rate (30 fps by default)
-  int mMinFps; // Min rate we want to accept
-  MediaSourceType mMediaSource; // source of media (camera | application | screen)
 
   // mMonitor protects mImage access/changes, and transitions of mState
   // from kStarted to kStopped (which are combined with EndTrack() and
   // image changes).  Note that mSources is not accessed from other threads
   // for video and is not protected.
-  Monitor mMonitor; // Monitor for processing WebRTC frames.
-  int mWidth, mHeight;
+  // All the mMonitor accesses are from the child classes.
+  Monitor mMonitor; // Monitor for processing Camera frames.
   nsRefPtr<layers::Image> mImage;
   nsRefPtr<layers::ImageContainer> mImageContainer;
-  bool mHasDirectListeners;
+  int mWidth, mHeight; // protected with mMonitor on Gonk due to different threading
+  // end of data protected by mMonitor
 
-  nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
+  nsTArray<SourceMediaStream*> mSources; // When this goes empty, we shut down HW
 
   bool mInitDone;
-  bool mInSnapshotMode;
-  nsString* mSnapshotPath;
+  bool mHasDirectListeners;
+  int mCaptureIndex;
+  int mFps; // Track rate (30 fps by default)
+
+  webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
 
   nsString mDeviceName;
   nsString mUniqueId;
-
-  void ChooseCapability(const VideoTrackConstraintsN &aConstraints,
-                        const MediaEnginePrefs &aPrefs);
-
-  void GuessCapability(const VideoTrackConstraintsN &aConstraints,
-                       const MediaEnginePrefs &aPrefs);
 };
 
-class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
-                                     public webrtc::VoEMediaProcess
-{
-public:
-  MediaEngineWebRTCAudioSource(nsIThread *aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
-                               int aIndex, const char* name, const char* uuid)
-    : mSamples(0)
-    , mVoiceEngine(aVoiceEnginePtr)
-    , mMonitor("WebRTCMic.Monitor")
-    , mThread(aThread)
-    , mCapIndex(aIndex)
-    , mChannel(-1)
-    , mInitDone(false)
-    , mStarted(false)
-    , mEchoOn(false), mAgcOn(false), mNoiseOn(false)
-    , mEchoCancel(webrtc::kEcDefault)
-    , mAGC(webrtc::kAgcDefault)
-    , mNoiseSuppress(webrtc::kNsDefault)
-    , mPlayoutDelay(0)
-    , mNullTransport(nullptr) {
-    MOZ_ASSERT(aVoiceEnginePtr);
-    mState = kReleased;
-    mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
-    mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid));
-    Init();
-  }
 
-  virtual void GetName(nsAString&);
-  virtual void GetUUID(nsAString&);
-
-  virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
-                            const MediaEnginePrefs &aPrefs);
-  virtual nsresult Deallocate();
-  virtual nsresult Start(SourceMediaStream*, TrackID);
-  virtual nsresult Stop(SourceMediaStream*, TrackID);
-  virtual void SetDirectListeners(bool aHasDirectListeners) {};
-  virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
-  virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
-                          bool aAgcOn, uint32_t aAGC,
-                          bool aNoiseOn, uint32_t aNoise,
-                          int32_t aPlayoutDelay);
-
-  virtual void NotifyPull(MediaStreamGraph* aGraph,
-                          SourceMediaStream *aSource,
-                          TrackID aId,
-                          StreamTime aDesiredTime,
-                          TrackTicks &aLastEndTime);
-
-  virtual bool IsFake() {
-    return false;
-  }
-
-  virtual const MediaSourceType GetMediaSource() {
-    return MediaSourceType::Microphone;
-  }
-
-  virtual nsresult TakePhoto(PhotoCallback* aCallback)
-  {
-    return NS_ERROR_NOT_IMPLEMENTED;
-  }
-
-  // VoEMediaProcess.
-  void Process(int channel, webrtc::ProcessingTypes type,
-               int16_t audio10ms[], int length,
-               int samplingFreq, bool isStereo);
-
-  NS_DECL_THREADSAFE_ISUPPORTS
-
-protected:
-  ~MediaEngineWebRTCAudioSource() { Shutdown(); }
-
-  // mSamples is an int to avoid conversions when comparing/etc to
-  // samplingFreq & length. Making mSamples protected instead of private is a
-  // silly way to avoid -Wunused-private-field warnings when PR_LOGGING is not
-  // #defined. mSamples is not actually expected to be used by a derived class.
-  int mSamples;
-
-private:
-  void Init();
-  void Shutdown();
-
-  webrtc::VoiceEngine* mVoiceEngine;
-  ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
-  ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
-  ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
-  ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
-  ScopedCustomReleasePtr<webrtc::VoECallReport> mVoECallReport;
-
-  // mMonitor protects mSources[] access/changes, and transitions of mState
-  // from kStarted to kStopped (which are combined with EndTrack()).
-  // mSources[] is accessed from webrtc threads.
-  Monitor mMonitor;
-  nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
-  nsCOMPtr<nsIThread> mThread;
-  int mCapIndex;
-  int mChannel;
-  TrackID mTrackID;
-  bool mInitDone;
-  bool mStarted;
-
-  nsString mDeviceName;
-  nsString mDeviceUUID;
-
-  bool mEchoOn, mAgcOn, mNoiseOn;
-  webrtc::EcModes  mEchoCancel;
-  webrtc::AgcModes mAGC;
-  webrtc::NsModes  mNoiseSuppress;
-  int32_t mPlayoutDelay;
-
-  NullTransport *mNullTransport;
-};
-
-class MediaEngineWebRTC : public MediaEngine
-{
-public:
-  explicit MediaEngineWebRTC(MediaEnginePrefs &aPrefs);
-
-  // Clients should ensure to clean-up sources video/audio sources
-  // before invoking Shutdown on this class.
-  void Shutdown();
-
-  virtual void EnumerateVideoDevices(MediaSourceType,
-                                    nsTArray<nsRefPtr<MediaEngineVideoSource> >*);
-  virtual void EnumerateAudioDevices(MediaSourceType,
-                                    nsTArray<nsRefPtr<MediaEngineAudioSource> >*);
-private:
-  ~MediaEngineWebRTC() {
-    Shutdown();
-#ifdef MOZ_B2G_CAMERA
-    AsyncLatencyLogger::Get()->Release();
-#endif
-    gFarendObserver = nullptr;
-  }
-
-  nsCOMPtr<nsIThread> mThread;
-
-  Mutex mMutex;
-
-  // protected with mMutex:
-  webrtc::VideoEngine* mScreenEngine;
-  webrtc::VideoEngine* mBrowserEngine;
-  webrtc::VideoEngine* mWinEngine;
-  webrtc::VideoEngine* mAppEngine;
-  webrtc::VideoEngine* mVideoEngine;
-  webrtc::VoiceEngine* mVoiceEngine;
-
-  // specialized configurations
-  webrtc::Config mAppEngineConfig;
-  webrtc::Config mWinEngineConfig;
-  webrtc::Config mScreenEngineConfig;
-  webrtc::Config mBrowserEngineConfig;
-
-  // Need this to avoid unneccesary WebRTC calls while enumerating.
-  bool mVideoEngineInit;
-  bool mAudioEngineInit;
-  bool mScreenEngineInit;
-  bool mBrowserEngineInit;
-  bool mWinEngineInit;
-  bool mAppEngineInit;
-  bool mHasTabVideoSource;
-
-  // Store devices we've already seen in a hashtable for quick return.
-  // Maps UUID to MediaEngineSource (one set for audio, one for video).
-  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCVideoSource > mVideoSources;
-  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource > mAudioSources;
-};
-
-}
-
-#endif /* NSMEDIAENGINEWEBRTC_H_ */
+} // namespace mozilla
+#endif // MediaEngineCameraVideoSource_h
--- a/content/media/webrtc/MediaEngineDefault.cpp
+++ b/content/media/webrtc/MediaEngineDefault.cpp
@@ -34,20 +34,22 @@ namespace mozilla {
 using namespace mozilla::gfx;
 
 NS_IMPL_ISUPPORTS(MediaEngineDefaultVideoSource, nsITimerCallback)
 /**
  * Default video source.
  */
 
 MediaEngineDefaultVideoSource::MediaEngineDefaultVideoSource()
-  : mTimer(nullptr), mMonitor("Fake video"), mCb(16), mCr(16)
+  : MediaEngineVideoSource(kReleased)
+  , mTimer(nullptr)
+  , mMonitor("Fake video")
+  , mCb(16), mCr(16)
 {
   mImageContainer = layers::LayerManager::CreateImageContainer();
-  mState = kReleased;
 }
 
 MediaEngineDefaultVideoSource::~MediaEngineDefaultVideoSource()
 {}
 
 void
 MediaEngineDefaultVideoSource::GetName(nsAString& aName)
 {
@@ -165,60 +167,16 @@ MediaEngineDefaultVideoSource::Stop(Sour
 
   aSource->EndTrack(aID);
   aSource->Finish();
 
   mState = kStopped;
   return NS_OK;
 }
 
-nsresult
-MediaEngineDefaultVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
-  *aFile = nullptr;
-
-#ifndef MOZ_WIDGET_ANDROID
-  return NS_ERROR_NOT_IMPLEMENTED;
-#else
-  nsAutoString filePath;
-  nsCOMPtr<nsIFilePicker> filePicker = do_CreateInstance("@mozilla.org/filepicker;1");
-  if (!filePicker)
-    return NS_ERROR_FAILURE;
-
-  nsXPIDLString title;
-  nsContentUtils::GetLocalizedString(nsContentUtils::eFORMS_PROPERTIES, "Browse", title);
-  int16_t mode = static_cast<int16_t>(nsIFilePicker::modeOpen);
-
-  nsresult rv = filePicker->Init(nullptr, title, mode);
-  NS_ENSURE_SUCCESS(rv, rv);
-  filePicker->AppendFilters(nsIFilePicker::filterImages);
-
-  // XXX - This API should be made async
-  int16_t dialogReturn;
-  rv = filePicker->Show(&dialogReturn);
-  NS_ENSURE_SUCCESS(rv, rv);
-  if (dialogReturn == nsIFilePicker::returnCancel) {
-    *aFile = nullptr;
-    return NS_OK;
-  }
-
-  nsCOMPtr<nsIFile> localFile;
-  filePicker->GetFile(getter_AddRefs(localFile));
-
-  if (!localFile) {
-    *aFile = nullptr;
-    return NS_OK;
-  }
-
-  nsCOMPtr<nsIDOMFile> domFile = dom::File::CreateFromFile(nullptr, localFile);
-  domFile.forget(aFile);
-  return NS_OK;
-#endif
-}
-
 NS_IMETHODIMP
 MediaEngineDefaultVideoSource::Notify(nsITimer* aTimer)
 {
   // Update the target color
   if (mCr <= 16) {
     if (mCb < 240) {
       mCb++;
     } else {
@@ -346,19 +304,19 @@ private:
 };
 
 /**
  * Default audio source.
  */
 NS_IMPL_ISUPPORTS(MediaEngineDefaultAudioSource, nsITimerCallback)
 
 MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource()
-  : mTimer(nullptr)
+  : MediaEngineAudioSource(kReleased)
+  , mTimer(nullptr)
 {
-  mState = kReleased;
 }
 
 MediaEngineDefaultAudioSource::~MediaEngineDefaultAudioSource()
 {}
 
 void
 MediaEngineDefaultAudioSource::GetName(nsAString& aName)
 {
@@ -450,22 +408,16 @@ MediaEngineDefaultAudioSource::Stop(Sour
 
   aSource->EndTrack(aID);
   aSource->Finish();
 
   mState = kStopped;
   return NS_OK;
 }
 
-nsresult
-MediaEngineDefaultAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
-   return NS_ERROR_NOT_IMPLEMENTED;
-}
-
 NS_IMETHODIMP
 MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
 {
   AudioSegment segment;
   nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(AUDIO_FRAME_LENGTH * sizeof(int16_t));
   int16_t* dest = static_cast<int16_t*>(buffer->Data());
 
   mSineGenerator->generate(dest, AUDIO_FRAME_LENGTH);
--- a/content/media/webrtc/MediaEngineDefault.h
+++ b/content/media/webrtc/MediaEngineDefault.h
@@ -41,17 +41,16 @@ public:
   virtual void GetUUID(nsAString&);
 
   virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
                             const MediaEnginePrefs &aPrefs);
   virtual nsresult Deallocate();
   virtual nsresult Start(SourceMediaStream*, TrackID);
   virtual nsresult Stop(SourceMediaStream*, TrackID);
   virtual void SetDirectListeners(bool aHasDirectListeners) {};
-  virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
   virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
                           bool aAgcOn, uint32_t aAGC,
                           bool aNoiseOn, uint32_t aNoise,
                           int32_t aPlayoutDelay) { return NS_OK; };
   virtual void NotifyPull(MediaStreamGraph* aGraph,
                           SourceMediaStream *aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
@@ -106,17 +105,16 @@ public:
   virtual void GetUUID(nsAString&);
 
   virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
                             const MediaEnginePrefs &aPrefs);
   virtual nsresult Deallocate();
   virtual nsresult Start(SourceMediaStream*, TrackID);
   virtual nsresult Stop(SourceMediaStream*, TrackID);
   virtual void SetDirectListeners(bool aHasDirectListeners) {};
-  virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
   virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
                           bool aAgcOn, uint32_t aAGC,
                           bool aNoiseOn, uint32_t aNoise,
                           int32_t aPlayoutDelay) { return NS_OK; };
   virtual void NotifyPull(MediaStreamGraph* aGraph,
                           SourceMediaStream *aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
copy from content/media/webrtc/MediaEngineWebRTCVideo.cpp
copy to content/media/webrtc/MediaEngineGonkVideoSource.cpp
--- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp
+++ b/content/media/webrtc/MediaEngineGonkVideoSource.cpp
@@ -1,151 +1,59 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
+#include "MediaEngineGonkVideoSource.h"
 
-#include "MediaEngineWebRTC.h"
-#include "Layers.h"
-#include "ImageTypes.h"
-#include "ImageContainer.h"
-#include "mozilla/layers/GrallocTextureClient.h"
-#include "nsMemory.h"
+#define LOG_TAG "MediaEngineGonkVideoSource"
+
+#include <utils/Log.h>
+
+#include "GrallocImages.h"
+#include "VideoUtils.h"
+#include "ScreenOrientation.h"
+
+#include "libyuv.h"
 #include "mtransport/runnable_utils.h"
-#include "MediaTrackConstraints.h"
 
-#ifdef MOZ_B2G_CAMERA
-#include "GrallocImages.h"
-#include "libyuv.h"
-#include "mozilla/Hal.h"
-#include "ScreenOrientation.h"
-using namespace mozilla::dom;
-#endif
 namespace mozilla {
 
+using namespace mozilla::dom;
 using namespace mozilla::gfx;
-using dom::ConstrainLongRange;
-using dom::ConstrainDoubleRange;
-using dom::MediaTrackConstraintSet;
 
 #ifdef PR_LOGGING
 extern PRLogModuleInfo* GetMediaManagerLog();
 #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
 #define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
 #else
 #define LOG(msg)
 #define LOGFRAME(msg)
 #endif
 
-/**
- * Webrtc video source.
- */
-#ifndef MOZ_B2G_CAMERA
-NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable)
-#else
-NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable)
-NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-#endif
-
-// ViEExternalRenderer Callback.
-#ifndef MOZ_B2G_CAMERA
-int
-MediaEngineWebRTCVideoSource::FrameSizeChange(
-   unsigned int w, unsigned int h, unsigned int streams)
-{
-  mWidth = w;
-  mHeight = h;
-  LOG(("Video FrameSizeChange: %ux%u", w, h));
-  return 0;
-}
-
-// ViEExternalRenderer Callback. Process every incoming frame here.
-int
-MediaEngineWebRTCVideoSource::DeliverFrame(
-   unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
-   void *handle)
-{
-  // mInSnapshotMode can only be set before the camera is turned on and
-  // the renderer is started, so this amounts to a 1-shot
-  if (mInSnapshotMode) {
-    // Set the condition variable to false and notify Snapshot().
-    MonitorAutoLock lock(mMonitor);
-    mInSnapshotMode = false;
-    lock.Notify();
-    return 0;
-  }
-
-  // Check for proper state.
-  if (mState != kStarted) {
-    LOG(("DeliverFrame: video not started"));
-    return 0;
-  }
-
-  if (mWidth*mHeight + 2*(((mWidth+1)/2)*((mHeight+1)/2)) != size) {
-    MOZ_ASSERT(false, "Wrong size frame in DeliverFrame!");
-    return 0;
-  }
-
-  // Create a video frame and append it to the track.
-  nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
-
-  layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
-
-  uint8_t* frame = static_cast<uint8_t*> (buffer);
-  const uint8_t lumaBpp = 8;
-  const uint8_t chromaBpp = 4;
-
-  // Take lots of care to round up!
-  layers::PlanarYCbCrData data;
-  data.mYChannel = frame;
-  data.mYSize = IntSize(mWidth, mHeight);
-  data.mYStride = (mWidth * lumaBpp + 7)/ 8;
-  data.mCbCrStride = (mWidth * chromaBpp + 7) / 8;
-  data.mCbChannel = frame + mHeight * data.mYStride;
-  data.mCrChannel = data.mCbChannel + ((mHeight+1)/2) * data.mCbCrStride;
-  data.mCbCrSize = IntSize((mWidth+1)/ 2, (mHeight+1)/ 2);
-  data.mPicX = 0;
-  data.mPicY = 0;
-  data.mPicSize = IntSize(mWidth, mHeight);
-  data.mStereoMode = StereoMode::MONO;
-
-  videoImage->SetData(data);
-
-#ifdef DEBUG
-  static uint32_t frame_num = 0;
-  LOGFRAME(("frame %d (%dx%d); timestamp %u, render_time %lu", frame_num++,
-            mWidth, mHeight, time_stamp, render_time));
-#endif
-
-  // we don't touch anything in 'this' until here (except for snapshot,
-  // which has it's own lock)
-  MonitorAutoLock lock(mMonitor);
-
-  // implicitly releases last image
-  mImage = image.forget();
-
-  return 0;
-}
-#endif
+// We are subclassed from CameraControlListener, which implements a
+// threadsafe reference-count for us.
+NS_IMPL_QUERY_INTERFACE(MediaEngineGonkVideoSource, nsISupports)
+NS_IMPL_ADDREF_INHERITED(MediaEngineGonkVideoSource, CameraControlListener)
+NS_IMPL_RELEASE_INHERITED(MediaEngineGonkVideoSource, CameraControlListener)
 
 // Called if the graph thinks it's running out of buffered video; repeat
-// the last frame for whatever minimum period it think it needs.  Note that
+// the last frame for whatever minimum period it think it needs. Note that
 // this means that no *real* frame can be inserted during this period.
 void
-MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
-                                         SourceMediaStream *aSource,
-                                         TrackID aID,
-                                         StreamTime aDesiredTime,
-                                         TrackTicks &aLastEndTime)
+MediaEngineGonkVideoSource::NotifyPull(MediaStreamGraph* aGraph,
+                                       SourceMediaStream* aSource,
+                                       TrackID aID,
+                                       StreamTime aDesiredTime,
+                                       TrackTicks& aLastEndTime)
 {
   VideoSegment segment;
 
   MonitorAutoLock lock(mMonitor);
   // B2G does AddTrack, but holds kStarted until the hardware changes state.
-  // So mState could be kReleased here.  We really don't care about the state,
+  // So mState could be kReleased here. We really don't care about the state,
   // though.
 
   // Note: we're not giving up mImage here
   nsRefPtr<layers::Image> image = mImage;
   TrackTicks target = aSource->TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
   TrackTicks delta = target - aLastEndTime;
   LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime,
             (int64_t) target, (int64_t) delta, image ? "" : "<null>"));
@@ -167,589 +75,196 @@ MediaEngineWebRTCVideoSource::NotifyPull
     // This can fail if either a) we haven't added the track yet, or b)
     // we've removed or finished the track.
     if (aSource->AppendToTrack(aID, &(segment))) {
       aLastEndTime = target;
     }
   }
 }
 
-static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) {
-  return aRange.mMin <= n && n <= aRange.mMax;
-}
-
-static bool IsWithin(double n, const ConstrainDoubleRange& aRange) {
-  return aRange.mMin <= n && n <= aRange.mMax;
-}
-
-static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) {
-  return std::max(aRange.mMin, std::min(n, aRange.mMax));
-}
-
-static bool
-AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
-  return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax;
-}
-
-static bool
-Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
-  MOZ_ASSERT(AreIntersecting(aA, aB));
-  aA.mMin = std::max(aA.mMin, aB.mMin);
-  aA.mMax = std::min(aA.mMax, aB.mMax);
-  return true;
-}
-
-static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
-                                 const webrtc::CaptureCapability& aCandidate) {
-  if (!IsWithin(aCandidate.width, aConstraints.mWidth) ||
-      !IsWithin(aCandidate.height, aConstraints.mHeight)) {
-    return false;
-  }
-  if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
-    return false;
-  }
-  return true;
-}
-
 void
-MediaEngineWebRTCVideoSource::ChooseCapability(
-    const VideoTrackConstraintsN &aConstraints,
-    const MediaEnginePrefs &aPrefs)
+MediaEngineGonkVideoSource::ChooseCapability(const VideoTrackConstraintsN& aConstraints,
+                                             const MediaEnginePrefs& aPrefs)
 {
-#ifdef MOZ_B2G_CAMERA
   return GuessCapability(aConstraints, aPrefs);
-#else
-  NS_ConvertUTF16toUTF8 uniqueId(mUniqueId);
-  int num = mViECapture->NumberOfCapabilities(uniqueId.get(), kMaxUniqueIdLength);
-  if (num <= 0) {
-    // Mac doesn't support capabilities.
-    return GuessCapability(aConstraints, aPrefs);
-  }
-
-  // The rest is the full algorithm for cameras that can list their capabilities.
-
-  LOG(("ChooseCapability: prefs: %dx%d @%d-%dfps",
-       aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
-
-  typedef nsTArray<uint8_t> SourceSet;
-
-  SourceSet candidateSet;
-  for (int i = 0; i < num; i++) {
-    candidateSet.AppendElement(i);
-  }
-
-  // Pick among capabilities: First apply required constraints.
-
-  for (uint32_t i = 0; i < candidateSet.Length();) {
-    webrtc::CaptureCapability cap;
-    mViECapture->GetCaptureCapability(uniqueId.get(), kMaxUniqueIdLength,
-                                      candidateSet[i], cap);
-    if (!SatisfyConstraintSet(aConstraints.mRequired, cap)) {
-      candidateSet.RemoveElementAt(i);
-    } else {
-      ++i;
-    }
-  }
-
-  SourceSet tailSet;
-
-  // Then apply advanced (formerly known as optional) constraints.
-
-  if (aConstraints.mAdvanced.WasPassed()) {
-    auto &array = aConstraints.mAdvanced.Value();
-
-    for (uint32_t i = 0; i < array.Length(); i++) {
-      SourceSet rejects;
-      for (uint32_t j = 0; j < candidateSet.Length();) {
-        webrtc::CaptureCapability cap;
-        mViECapture->GetCaptureCapability(uniqueId.get(), kMaxUniqueIdLength,
-                                          candidateSet[j], cap);
-        if (!SatisfyConstraintSet(array[i], cap)) {
-          rejects.AppendElement(candidateSet[j]);
-          candidateSet.RemoveElementAt(j);
-        } else {
-          ++j;
-        }
-      }
-      (candidateSet.Length()? tailSet : candidateSet).MoveElementsFrom(rejects);
-    }
-  }
-
-  if (!candidateSet.Length()) {
-    candidateSet.AppendElement(0);
-  }
-
-  int prefWidth = aPrefs.GetWidth();
-  int prefHeight = aPrefs.GetHeight();
-
-  // Default is closest to available capability but equal to or below;
-  // otherwise closest above.  Since we handle the num=0 case above and
-  // take the first entry always, we can never exit uninitialized.
-
-  webrtc::CaptureCapability cap;
-  bool higher = true;
-  for (uint32_t i = 0; i < candidateSet.Length(); i++) {
-    mViECapture->GetCaptureCapability(NS_ConvertUTF16toUTF8(mUniqueId).get(),
-                                      kMaxUniqueIdLength, candidateSet[i], cap);
-    if (higher) {
-      if (i == 0 ||
-          (mCapability.width > cap.width && mCapability.height > cap.height)) {
-        // closer than the current choice
-        mCapability = cap;
-        // FIXME: expose expected capture delay?
-      }
-      if (cap.width <= (uint32_t) prefWidth && cap.height <= (uint32_t) prefHeight) {
-        higher = false;
-      }
-    } else {
-      if (cap.width > (uint32_t) prefWidth || cap.height > (uint32_t) prefHeight ||
-          cap.maxFPS < (uint32_t) aPrefs.mMinFPS) {
-        continue;
-      }
-      if (mCapability.width < cap.width && mCapability.height < cap.height) {
-        mCapability = cap;
-        // FIXME: expose expected capture delay?
-      }
-    }
-    // Same resolution, maybe better format or FPS match
-    if (mCapability.width == cap.width && mCapability.height == cap.height) {
-      // FPS too low
-      if (cap.maxFPS < (uint32_t) aPrefs.mMinFPS) {
-        continue;
-      }
-      // Better match
-      if (cap.maxFPS < mCapability.maxFPS) {
-        mCapability = cap;
-      } else if (cap.maxFPS == mCapability.maxFPS) {
-        // Resolution and FPS the same, check format
-        if (cap.rawType == webrtc::RawVideoType::kVideoI420
-          || cap.rawType == webrtc::RawVideoType::kVideoYUY2
-          || cap.rawType == webrtc::RawVideoType::kVideoYV12) {
-          mCapability = cap;
-        }
-      }
-    }
-  }
-  LOG(("chose cap %dx%d @%dfps codec %d raw %d",
-       mCapability.width, mCapability.height, mCapability.maxFPS,
-       mCapability.codecType, mCapability.rawType));
-#endif
-}
-
-// A special version of the algorithm for cameras that don't list capabilities.
-
-void
-MediaEngineWebRTCVideoSource::GuessCapability(
-    const VideoTrackConstraintsN &aConstraints,
-    const MediaEnginePrefs &aPrefs)
-{
-  LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
-       aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
-
-  // In short: compound constraint-ranges and use pref as ideal.
-
-  ConstrainLongRange cWidth(aConstraints.mRequired.mWidth);
-  ConstrainLongRange cHeight(aConstraints.mRequired.mHeight);
-
-  if (aConstraints.mAdvanced.WasPassed()) {
-    const auto& advanced = aConstraints.mAdvanced.Value();
-    for (uint32_t i = 0; i < advanced.Length(); i++) {
-      if (AreIntersecting(cWidth, advanced[i].mWidth) &&
-          AreIntersecting(cHeight, advanced[i].mHeight)) {
-        Intersect(cWidth, advanced[i].mWidth);
-        Intersect(cHeight, advanced[i].mHeight);
-      }
-    }
-  }
-  // Detect Mac HD cams and give them some love in the form of a dynamic default
-  // since that hardware switches between 4:3 at low res and 16:9 at higher res.
-  //
-  // Logic is: if we're relying on defaults in aPrefs, then
-  // only use HD pref when non-HD pref is too small and HD pref isn't too big.
-
-  bool macHD = ((!aPrefs.mWidth || !aPrefs.mHeight) &&
-                mDeviceName.EqualsASCII("FaceTime HD Camera (Built-in)") &&
-                (aPrefs.GetWidth() < cWidth.mMin ||
-                 aPrefs.GetHeight() < cHeight.mMin) &&
-                !(aPrefs.GetWidth(true) > cWidth.mMax ||
-                  aPrefs.GetHeight(true) > cHeight.mMax));
-  int prefWidth = aPrefs.GetWidth(macHD);
-  int prefHeight = aPrefs.GetHeight(macHD);
-
-  // Clamp width and height without distorting inherent aspect too much.
-
-  if (IsWithin(prefWidth, cWidth) == IsWithin(prefHeight, cHeight)) {
-    // If both are within, we get the default (pref) aspect.
-    // If neither are within, we get the aspect of the enclosing constraint.
-    // Either are presumably reasonable (presuming constraints are sane).
-    mCapability.width = Clamp(prefWidth, cWidth);
-    mCapability.height = Clamp(prefHeight, cHeight);
-  } else {
-    // But if only one clips (e.g. width), the resulting skew is undesirable:
-    //       .------------.
-    //       | constraint |
-    //  .----+------------+----.
-    //  |    |            |    |
-    //  |pref|  result    |    |   prefAspect != resultAspect
-    //  |    |            |    |
-    //  '----+------------+----'
-    //       '------------'
-    //  So in this case, preserve prefAspect instead:
-    //  .------------.
-    //  | constraint |
-    //  .------------.
-    //  |pref        |             prefAspect is unchanged
-    //  '------------'
-    //  |            |
-    //  '------------'
-    if (IsWithin(prefWidth, cWidth)) {
-      mCapability.height = Clamp(prefHeight, cHeight);
-      mCapability.width = Clamp((mCapability.height * prefWidth) /
-                                prefHeight, cWidth);
-    } else {
-      mCapability.width = Clamp(prefWidth, cWidth);
-      mCapability.height = Clamp((mCapability.width * prefHeight) /
-                                 prefWidth, cHeight);
-    }
-  }
-  mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS;
-  LOG(("chose cap %dx%d @%dfps",
-       mCapability.width, mCapability.height, mCapability.maxFPS));
-}
-
-void
-MediaEngineWebRTCVideoSource::GetName(nsAString& aName)
-{
-  aName = mDeviceName;
-}
-
-void
-MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID)
-{
-  aUUID = mUniqueId;
 }
 
 nsresult
-MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraints,
-                                       const MediaEnginePrefs &aPrefs)
+MediaEngineGonkVideoSource::Allocate(const VideoTrackConstraintsN& aConstraints,
+                                     const MediaEnginePrefs& aPrefs)
 {
   LOG((__FUNCTION__));
-#ifdef MOZ_B2G_CAMERA
+
   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   if (mState == kReleased && mInitDone) {
     ChooseCapability(aConstraints, aPrefs);
-    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                         &MediaEngineWebRTCVideoSource::AllocImpl));
+    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
+                                         &MediaEngineGonkVideoSource::AllocImpl));
     mCallbackMonitor.Wait();
     if (mState != kAllocated) {
       return NS_ERROR_FAILURE;
     }
   }
-#else
-  if (mState == kReleased && mInitDone) {
-    // Note: if shared, we don't allow a later opener to affect the resolution.
-    // (This may change depending on spec changes for Constraints/settings)
-
-    ChooseCapability(aConstraints, aPrefs);
-
-    if (mViECapture->AllocateCaptureDevice(NS_ConvertUTF16toUTF8(mUniqueId).get(),
-                                           kMaxUniqueIdLength, mCaptureIndex)) {
-      return NS_ERROR_FAILURE;
-    }
-    mState = kAllocated;
-    LOG(("Video device %d allocated", mCaptureIndex));
-  } else if (mSources.IsEmpty()) {
-    LOG(("Video device %d reallocated", mCaptureIndex));
-  } else {
-    LOG(("Video device %d allocated shared", mCaptureIndex));
-  }
-#endif
 
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCVideoSource::Deallocate()
+MediaEngineGonkVideoSource::Deallocate()
 {
   LOG((__FUNCTION__));
   if (mSources.IsEmpty()) {
-#ifdef MOZ_B2G_CAMERA
+
     ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
+
     if (mState != kStopped && mState != kAllocated) {
       return NS_ERROR_FAILURE;
     }
-#ifdef MOZ_B2G_CAMERA
+
     // We do not register success callback here
 
-    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                         &MediaEngineWebRTCVideoSource::DeallocImpl));
+    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
+                                         &MediaEngineGonkVideoSource::DeallocImpl));
     mCallbackMonitor.Wait();
     if (mState != kReleased) {
       return NS_ERROR_FAILURE;
     }
-#elif XP_MACOSX
-    // Bug 829907 - on mac, in shutdown, the mainthread stops processing
-    // 'native' events, and the QTKit code uses events to the main native CFRunLoop
-    // in order to provide thread safety.  In order to avoid this locking us up,
-    // release the ViE capture device synchronously on MainThread (so the native
-    // event isn't needed).
-    // XXX Note if MainThread Dispatch()es NS_DISPATCH_SYNC to us we can deadlock.
-    // XXX It might be nice to only do this if we're in shutdown...  Hard to be
-    // sure when that is though.
-    // Thread safety: a) we call this synchronously, and don't use ViECapture from
-    // another thread anywhere else, b) ViEInputManager::DestroyCaptureDevice() grabs
-    // an exclusive object lock and deletes it in a critical section, so all in all
-    // this should be safe threadwise.
-    NS_DispatchToMainThread(WrapRunnable(mViECapture,
-                                         &webrtc::ViECapture::ReleaseCaptureDevice,
-                                         mCaptureIndex),
-                            NS_DISPATCH_SYNC);
-#else
-    mViECapture->ReleaseCaptureDevice(mCaptureIndex);
-#endif
+
     mState = kReleased;
     LOG(("Video device %d deallocated", mCaptureIndex));
   } else {
     LOG(("Video device %d deallocated but still in use", mCaptureIndex));
   }
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
+MediaEngineGonkVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
 {
   LOG((__FUNCTION__));
-#ifndef MOZ_B2G_CAMERA
-  int error = 0;
-#endif
   if (!mInitDone || !aStream) {
     return NS_ERROR_FAILURE;
   }
 
   mSources.AppendElement(aStream);
 
   aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
   aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
 
-#ifdef MOZ_B2G_CAMERA
   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
 
   if (mState == kStarted) {
     return NS_OK;
   }
   mImageContainer = layers::LayerManager::CreateImageContainer();
 
-#ifdef MOZ_B2G_CAMERA
-  NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                       &MediaEngineWebRTCVideoSource::StartImpl,
+  NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
+                                       &MediaEngineGonkVideoSource::StartImpl,
                                        mCapability));
   mCallbackMonitor.Wait();
   if (mState != kStarted) {
     return NS_ERROR_FAILURE;
   }
-#else
-  mState = kStarted;
-  error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
-  if (error == -1) {
-    return NS_ERROR_FAILURE;
-  }
-
-  error = mViERender->StartRender(mCaptureIndex);
-  if (error == -1) {
-    return NS_ERROR_FAILURE;
-  }
-
-  if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
-    return NS_ERROR_FAILURE;
-  }
-#endif
 
   return NS_OK;
 }
 
 nsresult
-MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
+MediaEngineGonkVideoSource::Stop(SourceMediaStream* aSource, TrackID aID)
 {
   LOG((__FUNCTION__));
   if (!mSources.RemoveElement(aSource)) {
     // Already stopped - this is allowed
     return NS_OK;
   }
   if (!mSources.IsEmpty()) {
     return NS_OK;
   }
-#ifdef MOZ_B2G_CAMERA
+
   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
+
   if (mState != kStarted) {
     return NS_ERROR_FAILURE;
   }
 
   {
     MonitorAutoLock lock(mMonitor);
     mState = kStopped;
     aSource->EndTrack(aID);
     // Drop any cached image so we don't start with a stale image on next
     // usage
     mImage = nullptr;
   }
-#ifdef MOZ_B2G_CAMERA
-  NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                       &MediaEngineWebRTCVideoSource::StopImpl));
-#else
-  mViERender->StopRender(mCaptureIndex);
-  mViERender->RemoveRenderer(mCaptureIndex);
-  mViECapture->StopCapture(mCaptureIndex);
-#endif
+
+  NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
+                                       &MediaEngineGonkVideoSource::StopImpl));
 
   return NS_OK;
 }
 
-void
-MediaEngineWebRTCVideoSource::SetDirectListeners(bool aHasDirectListeners)
-{
-  LOG((__FUNCTION__));
-  mHasDirectListeners = aHasDirectListeners;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
-  return NS_ERROR_NOT_IMPLEMENTED;
-}
-
 /**
- * Initialization and Shutdown functions for the video source, called by the
- * constructor and destructor respectively.
- */
+* Initialization and Shutdown functions for the video source, called by the
+* constructor and destructor respectively.
+*/
 
 void
-MediaEngineWebRTCVideoSource::Init()
+MediaEngineGonkVideoSource::Init()
 {
-#ifdef MOZ_B2G_CAMERA
   nsAutoCString deviceName;
   ICameraControl::GetCameraName(mCaptureIndex, deviceName);
   CopyUTF8toUTF16(deviceName, mDeviceName);
   CopyUTF8toUTF16(deviceName, mUniqueId);
-#else
-  // fix compile warning for these being unused. (remove once used)
-  (void) mFps;
-  (void) mMinFps;
-
-  LOG((__FUNCTION__));
-  if (mVideoEngine == nullptr) {
-    return;
-  }
-
-  mViEBase = webrtc::ViEBase::GetInterface(mVideoEngine);
-  if (mViEBase == nullptr) {
-    return;
-  }
-
-  // Get interfaces for capture, render for now
-  mViECapture = webrtc::ViECapture::GetInterface(mVideoEngine);
-  mViERender = webrtc::ViERender::GetInterface(mVideoEngine);
-
-  if (mViECapture == nullptr || mViERender == nullptr) {
-    return;
-  }
-
-  char deviceName[kMaxDeviceNameLength];
-  char uniqueId[kMaxUniqueIdLength];
-  if (mViECapture->GetCaptureDevice(mCaptureIndex,
-                                    deviceName, kMaxDeviceNameLength,
-                                    uniqueId, kMaxUniqueIdLength)) {
-    return;
-  }
-
-  CopyUTF8toUTF16(deviceName, mDeviceName);
-  CopyUTF8toUTF16(uniqueId, mUniqueId);
-#endif
 
   mInitDone = true;
 }
 
 void
-MediaEngineWebRTCVideoSource::Shutdown()
+MediaEngineGonkVideoSource::Shutdown()
 {
   LOG((__FUNCTION__));
   if (!mInitDone) {
     return;
   }
-#ifdef MOZ_B2G_CAMERA
+
   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
+
   if (mState == kStarted) {
     while (!mSources.IsEmpty()) {
       Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
     }
     MOZ_ASSERT(mState == kStopped);
   }
 
   if (mState == kAllocated || mState == kStopped) {
     Deallocate();
   }
-#ifndef MOZ_B2G_CAMERA
-  mViECapture->Release();
-  mViERender->Release();
-  mViEBase->Release();
-#endif
+
   mState = kReleased;
   mInitDone = false;
 }
 
-void MediaEngineWebRTCVideoSource::Refresh(int aIndex) {
-  // NOTE: mCaptureIndex might have changed when allocated!
-  // Use aIndex to update information, but don't change mCaptureIndex!!
-#ifdef MOZ_B2G_CAMERA
-  // Caller looked up this source by uniqueId; since deviceName == uniqueId nothing else changes
-#else
-  // Caller looked up this source by uniqueId, so it shouldn't change
-  char deviceName[kMaxDeviceNameLength];
-  char uniqueId[kMaxUniqueIdLength];
-
-  if (mViECapture->GetCaptureDevice(aIndex,
-                                    deviceName, sizeof(deviceName),
-                                    uniqueId, sizeof(uniqueId))) {
-    return;
-  }
-
-  CopyUTF8toUTF16(deviceName, mDeviceName);
-#ifdef DEBUG
-  nsString temp;
-  CopyUTF8toUTF16(uniqueId, temp);
-  MOZ_ASSERT(temp.Equals(mUniqueId));
-#endif
-#endif
-}
-
-#ifdef MOZ_B2G_CAMERA
-
 // All these functions must be run on MainThread!
 void
-MediaEngineWebRTCVideoSource::AllocImpl() {
+MediaEngineGonkVideoSource::AllocImpl() {
   MOZ_ASSERT(NS_IsMainThread());
   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
 
   mCameraControl = ICameraControl::Create(mCaptureIndex);
   if (mCameraControl) {
     mState = kAllocated;
     // Add this as a listener for CameraControl events. We don't need
     // to explicitly remove this--destroying the CameraControl object
     // in DeallocImpl() will do that for us.
     mCameraControl->AddListener(this);
   }
-
   mCallbackMonitor.Notify();
 }
 
 void
-MediaEngineWebRTCVideoSource::DeallocImpl() {
+MediaEngineGonkVideoSource::DeallocImpl() {
   MOZ_ASSERT(NS_IsMainThread());
 
   mCameraControl = nullptr;
 }
 
 // The same algorithm from bug 840244
 static int
 GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) {
@@ -770,96 +285,91 @@ GetRotateAmount(ScreenOrientation aScree
    default:
       MOZ_ASSERT(false);
       break;
   }
 
   int result;
 
   if (aBackCamera) {
-    //back camera
+    // back camera
     result = (aCameraMountAngle - screenAngle + 360) % 360;
   } else {
-    //front camera
+    // front camera
     result = (aCameraMountAngle + screenAngle) % 360;
   }
   return result;
 }
 
 // undefine to remove on-the-fly rotation support
 #define DYNAMIC_GUM_ROTATION
 
 void
-MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
+MediaEngineGonkVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
 #ifdef DYNAMIC_GUM_ROTATION
   if (mHasDirectListeners) {
     // aka hooked to PeerConnection
     MonitorAutoLock enter(mMonitor);
     mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera);
 
     LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)",
          mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
   }
 #endif
 
   mOrientationChanged = true;
 }
 
 void
-MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
+MediaEngineGonkVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
   MOZ_ASSERT(NS_IsMainThread());
 
   ICameraControl::Configuration config;
   config.mMode = ICameraControl::kPictureMode;
   config.mPreviewSize.width = aCapability.width;
   config.mPreviewSize.height = aCapability.height;
   mCameraControl->Start(&config);
   mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize);
 
   hal::RegisterScreenConfigurationObserver(this);
 }
 
 void
-MediaEngineWebRTCVideoSource::StopImpl() {
+MediaEngineGonkVideoSource::StopImpl() {
   MOZ_ASSERT(NS_IsMainThread());
 
   hal::UnregisterScreenConfigurationObserver(this);
   mCameraControl->Stop();
 }
 
 void
-MediaEngineWebRTCVideoSource::SnapshotImpl() {
-  MOZ_ASSERT(NS_IsMainThread());
-  mCameraControl->TakePicture();
-}
-
-void
-MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState)
+MediaEngineGonkVideoSource::OnHardwareStateChange(HardwareState aState)
 {
   ReentrantMonitorAutoEnter sync(mCallbackMonitor);
   if (aState == CameraControlListener::kHardwareClosed) {
     // When the first CameraControl listener is added, it gets pushed
     // the current state of the camera--normally 'closed'. We only
     // pay attention to that state if we've progressed out of the
     // allocated state.
     if (mState != kAllocated) {
       mState = kReleased;
       mCallbackMonitor.Notify();
     }
   } else {
     // Can't read this except on MainThread (ugh)
-    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                         &MediaEngineWebRTCVideoSource::GetRotation));
+    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
+                                         &MediaEngineGonkVideoSource::GetRotation));
     mState = kStarted;
     mCallbackMonitor.Notify();
   }
 }
 
+
 void
-MediaEngineWebRTCVideoSource::GetRotation()
+MediaEngineGonkVideoSource::GetRotation()
 {
   MOZ_ASSERT(NS_IsMainThread());
   MonitorAutoLock enter(mMonitor);
 
   mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle);
   MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 ||
              mCameraAngle == 270);
   hal::ScreenConfiguration config;
@@ -872,17 +382,17 @@ MediaEngineWebRTCVideoSource::GetRotatio
   }
 
   mRotation = GetRotateAmount(config.orientation(), mCameraAngle, mBackCamera);
   LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)",
        mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
 }
 
 void
-MediaEngineWebRTCVideoSource::OnUserError(UserContext aContext, nsresult aError)
+MediaEngineGonkVideoSource::OnUserError(UserContext aContext, nsresult aError)
 {
   {
     // Scope the monitor, since there is another monitor below and we don't want
     // unexpected deadlock.
     ReentrantMonitorAutoEnter sync(mCallbackMonitor);
     mCallbackMonitor.Notify();
   }
 
@@ -916,17 +426,17 @@ MediaEngineWebRTCVideoSource::OnUserErro
     MonitorAutoLock lock(mMonitor);
     if (mPhotoCallbacks.Length()) {
       NS_DispatchToMainThread(new TakePhotoError(mPhotoCallbacks, aError));
     }
   }
 }
 
 void
-MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
+MediaEngineGonkVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
 {
   // It needs to start preview because Gonk camera will stop preview while
   // taking picture.
   mCameraControl->StartPreview();
 
   // Create a main thread runnable to generate a blob and call all current queued
   // PhotoCallbacks.
   class GenerateBlobRunnable : public nsRunnable {
@@ -965,18 +475,80 @@ MediaEngineWebRTCVideoSource::OnTakePict
   // PhotoCallbacks in this runnable.
   MonitorAutoLock lock(mMonitor);
   if (mPhotoCallbacks.Length()) {
     NS_DispatchToMainThread(
       new GenerateBlobRunnable(mPhotoCallbacks, aData, aLength, aMimeType));
   }
 }
 
+nsresult
+MediaEngineGonkVideoSource::TakePhoto(PhotoCallback* aCallback)
+{
+  MOZ_ASSERT(NS_IsMainThread());
+
+  MonitorAutoLock lock(mMonitor);
+
+  // If other callback exists, that means there is a captured picture on the way,
+  // it doesn't need to TakePicture() again.
+  if (!mPhotoCallbacks.Length()) {
+    nsresult rv;
+    if (mOrientationChanged) {
+      UpdatePhotoOrientation();
+    }
+    rv = mCameraControl->TakePicture();
+    if (NS_FAILED(rv)) {
+      return rv;
+    }
+  }
+
+  mPhotoCallbacks.AppendElement(aCallback);
+
+  return NS_OK;
+}
+
+nsresult
+MediaEngineGonkVideoSource::UpdatePhotoOrientation()
+{
+  MOZ_ASSERT(NS_IsMainThread());
+
+  hal::ScreenConfiguration config;
+  hal::GetCurrentScreenConfiguration(&config);
+
+  // The rotation angle is clockwise.
+  int orientation = 0;
+  switch (config.orientation()) {
+    case eScreenOrientation_PortraitPrimary:
+      orientation = 0;
+      break;
+    case eScreenOrientation_PortraitSecondary:
+      orientation = 180;
+      break;
+   case eScreenOrientation_LandscapePrimary:
+      orientation = 270;
+      break;
+   case eScreenOrientation_LandscapeSecondary:
+      orientation = 90;
+      break;
+  }
+
+  // Front camera is inverse angle comparing to back camera.
+  orientation = (mBackCamera ? orientation : (-orientation));
+
+  ICameraControlParameterSetAutoEnter batch(mCameraControl);
+  // It changes the orientation value in EXIF information only.
+  mCameraControl->Set(CAMERA_PARAM_PICTURE_ROTATION, orientation);
+
+  mOrientationChanged = false;
+
+  return NS_OK;
+}
+
 uint32_t
-MediaEngineWebRTCVideoSource::ConvertPixelFormatToFOURCC(int aFormat)
+MediaEngineGonkVideoSource::ConvertPixelFormatToFOURCC(int aFormat)
 {
   switch (aFormat) {
   case HAL_PIXEL_FORMAT_RGBA_8888:
     return libyuv::FOURCC_BGRA;
   case HAL_PIXEL_FORMAT_YCrCb_420_SP:
     return libyuv::FOURCC_NV21;
   case HAL_PIXEL_FORMAT_YV12:
     return libyuv::FOURCC_YV12;
@@ -984,17 +556,17 @@ MediaEngineWebRTCVideoSource::ConvertPix
     LOG((" xxxxx Unknown pixel format %d", aFormat));
     MOZ_ASSERT(false, "Unknown pixel format.");
     return libyuv::FOURCC_ANY;
     }
   }
 }
 
 void
-MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
+MediaEngineGonkVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
   layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage);
   android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
   void *pMem = nullptr;
   uint32_t size = aWidth * aHeight * 3 / 2;
 
   graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem);
 
   uint8_t* srcPtr = static_cast<uint8_t*>(pMem);
@@ -1044,17 +616,17 @@ MediaEngineWebRTCVideoSource::RotateImag
 
   videoImage->SetDataNoCopy(data);
 
   // implicitly releases last image
   mImage = image.forget();
 }
 
 bool
-MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
+MediaEngineGonkVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
   {
     ReentrantMonitorAutoEnter sync(mCallbackMonitor);
     if (mState == kStopped) {
       return false;
     }
   }
 
   MonitorAutoLock enter(mMonitor);
@@ -1069,73 +641,9 @@ MediaEngineWebRTCVideoSource::OnNewPrevi
     mWidth = aWidth;
     mHeight = aHeight;
     LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight));
   }
 
   return true; // return true because we're accepting the frame
 }
 
-nsresult
-MediaEngineWebRTCVideoSource::TakePhoto(PhotoCallback* aCallback)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-
-  MonitorAutoLock lock(mMonitor);
-
-  // If other callback exists, that means there is a captured picture on the way,
-  // it doesn't need to TakePicture() again.
-  if (!mPhotoCallbacks.Length()) {
-    nsresult rv;
-    if (mOrientationChanged) {
-      UpdatePhotoOrientation();
-    }
-    rv = mCameraControl->TakePicture();
-    if (NS_FAILED(rv)) {
-      return rv;
-    }
-  }
-
-  mPhotoCallbacks.AppendElement(aCallback);
-
-  return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::UpdatePhotoOrientation()
-{
-  MOZ_ASSERT(NS_IsMainThread());
-
-  hal::ScreenConfiguration config;
-  hal::GetCurrentScreenConfiguration(&config);
-
-  // The rotation angle is clockwise.
-  int orientation = 0;
-  switch (config.orientation()) {
-    case eScreenOrientation_PortraitPrimary:
-      orientation = 0;
-      break;
-    case eScreenOrientation_PortraitSecondary:
-      orientation = 180;
-      break;
-   case eScreenOrientation_LandscapePrimary:
-      orientation = 270;
-      break;
-   case eScreenOrientation_LandscapeSecondary:
-      orientation = 90;
-      break;
-  }
-
-  // Front camera is inverse angle comparing to back camera.
-  orientation = (mBackCamera ? orientation : (-orientation));
-
-  ICameraControlParameterSetAutoEnter batch(mCameraControl);
-  // It changes the orientation value in EXIF information only.
-  mCameraControl->Set(CAMERA_PARAM_PICTURE_ROTATION, orientation);
-
-  mOrientationChanged = false;
-
-  return NS_OK;
-}
-
-#endif
-
-}
+} // namespace mozilla
copy from content/media/webrtc/MediaEngineWebRTC.h
copy to content/media/webrtc/MediaEngineGonkVideoSource.h
--- a/content/media/webrtc/MediaEngineWebRTC.h
+++ b/content/media/webrtc/MediaEngineGonkVideoSource.h
@@ -1,462 +1,113 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#ifndef MEDIAENGINEWEBRTC_H_
-#define MEDIAENGINEWEBRTC_H_
-
-#include "prcvar.h"
-#include "prthread.h"
-#include "nsIThread.h"
-#include "nsIRunnable.h"
-
-#include "mozilla/dom/File.h"
-#include "mozilla/Mutex.h"
-#include "mozilla/Monitor.h"
-#include "nsCOMPtr.h"
-#include "nsThreadUtils.h"
-#include "DOMMediaStream.h"
-#include "nsDirectoryServiceDefs.h"
-#include "nsComponentManagerUtils.h"
-#include "nsRefPtrHashtable.h"
-
-#include "VideoUtils.h"
-#include "MediaEngine.h"
-#include "VideoSegment.h"
-#include "AudioSegment.h"
-#include "StreamBuffer.h"
-#include "MediaStreamGraph.h"
+#ifndef MediaEngineGonkVideoSource_h_
+#define MediaEngineGonkVideoSource_h_
 
-#include "MediaEngineWrapper.h"
-#include "mozilla/dom/MediaStreamTrackBinding.h"
-// WebRTC library includes follow
-#include "webrtc/common.h"
-// Audio Engine
-#include "webrtc/voice_engine/include/voe_base.h"
-#include "webrtc/voice_engine/include/voe_codec.h"
-#include "webrtc/voice_engine/include/voe_hardware.h"
-#include "webrtc/voice_engine/include/voe_network.h"
-#include "webrtc/voice_engine/include/voe_audio_processing.h"
-#include "webrtc/voice_engine/include/voe_volume_control.h"
-#include "webrtc/voice_engine/include/voe_external_media.h"
-#include "webrtc/voice_engine/include/voe_audio_processing.h"
-#include "webrtc/voice_engine/include/voe_call_report.h"
-
-// Video Engine
-// conflicts with #include of scoped_ptr.h
-#undef FF
-#include "webrtc/video_engine/include/vie_base.h"
-#include "webrtc/video_engine/include/vie_codec.h"
-#include "webrtc/video_engine/include/vie_render.h"
-#include "webrtc/video_engine/include/vie_capture.h"
-#ifdef MOZ_B2G_CAMERA
-#include "CameraControlListener.h"
-#include "ICameraControl.h"
-#include "ImageContainer.h"
-#include "nsGlobalWindow.h"
-#include "prprf.h"
-#include "mozilla/Hal.h"
+#ifndef MOZ_B2G_CAMERA
+#error MediaEngineGonkVideoSource is only available when MOZ_B2G_CAMERA is defined.
 #endif
 
-#include "NullTransport.h"
-#include "AudioOutputObserver.h"
+#include "CameraControlListener.h"
+#include "MediaEngineCameraVideoSource.h"
+
+#include "mozilla/Hal.h"
+#include "mozilla/ReentrantMonitor.h"
 
 namespace mozilla {
 
-#ifdef MOZ_B2G_CAMERA
-class CameraAllocateRunnable;
-class GetCameraNameRunnable;
-#endif
-
 /**
- * The WebRTC implementation of the MediaEngine interface.
+ * The B2G implementation of the MediaEngine interface.
  *
  * On B2G platform, member data may accessed from different thread after construction:
  *
  * MediaThread:
- *   mState, mImage, mWidth, mHeight, mCapability, mPrefs, mDeviceName, mUniqueId, mInitDone,
- *   mImageContainer, mSources, mState, mImage
+ * mState, mImage, mWidth, mHeight, mCapability, mPrefs, mDeviceName, mUniqueId, mInitDone,
+ * mSources, mImageContainer, mSources, mState, mImage, mLastCapture.
  *
- * MainThread:
- *   mCaptureIndex, mLastCapture, mState,  mWidth, mHeight,
+ * CameraThread:
+ * mDOMCameraControl, mCaptureIndex, mCameraThread, mWindowId, mCameraManager,
+ * mNativeCameraControl, mPreviewStream, mState, mLastCapture, mWidth, mHeight
  *
- * Where mWidth, mHeight, mImage, mPhotoCallbacks are protected by mMonitor
- *       mState is protected by mCallbackMonitor
+ * Where mWidth, mHeight, mImage, mPhotoCallbacks, mRotation, mCameraAngle and
+ * mBackCamera are protected by mMonitor (in parent MediaEngineCameraVideoSource)
+ * mState, mLastCapture is protected by mCallbackMonitor
  * Other variable is accessed only from single thread
  */
-class MediaEngineWebRTCVideoSource : public MediaEngineVideoSource
-                                   , public nsRunnable
-#ifdef MOZ_B2G_CAMERA
-                                   , public CameraControlListener
-                                   , public mozilla::hal::ScreenConfigurationObserver
-#else
-                                   , public webrtc::ExternalRenderer
-#endif
+class MediaEngineGonkVideoSource : public MediaEngineCameraVideoSource
+                                 , public mozilla::hal::ScreenConfigurationObserver
+                                 , public CameraControlListener
 {
 public:
-#ifdef MOZ_B2G_CAMERA
-  MediaEngineWebRTCVideoSource(int aIndex,
-                               MediaSourceType aMediaSource = MediaSourceType::Camera)
-    : mCameraControl(nullptr)
-    , mCallbackMonitor("WebRTCCamera.CallbackMonitor")
+  NS_DECL_ISUPPORTS_INHERITED
+
+  MediaEngineGonkVideoSource(int aIndex)
+    : MediaEngineCameraVideoSource(aIndex, "GonkCamera.Monitor")
+    , mCameraControl(nullptr)
+    , mCallbackMonitor("GonkCamera.CallbackMonitor")
     , mRotation(0)
     , mBackCamera(false)
     , mOrientationChanged(true) // Correct the orientation at first time takePhoto.
-    , mCaptureIndex(aIndex)
-    , mMediaSource(aMediaSource)
-    , mMonitor("WebRTCCamera.Monitor")
-    , mWidth(0)
-    , mHeight(0)
-    , mHasDirectListeners(false)
-    , mInitDone(false)
-    , mInSnapshotMode(false)
-    , mSnapshotPath(nullptr)
-  {
-    mState = kReleased;
-    Init();
-  }
-#else
-  // ViEExternalRenderer.
-  virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int);
-  virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t,
-                           void *handle);
-  /**
-   * Does DeliverFrame() support a null buffer and non-null handle
-   * (video texture)?
-   * XXX Investigate!  Especially for Android/B2G
-   */
-  virtual bool IsTextureSupported() { return false; }
+    {
+      Init();
+    }
 
-  MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr, int aIndex,
-                               MediaSourceType aMediaSource = MediaSourceType::Camera)
-    : mVideoEngine(aVideoEnginePtr)
-    , mCaptureIndex(aIndex)
-    , mFps(-1)
-    , mMinFps(-1)
-    , mMediaSource(aMediaSource)
-    , mMonitor("WebRTCCamera.Monitor")
-    , mWidth(0)
-    , mHeight(0)
-    , mHasDirectListeners(false)
-    , mInitDone(false)
-    , mInSnapshotMode(false)
-    , mSnapshotPath(nullptr) {
-    MOZ_ASSERT(aVideoEnginePtr);
-    mState = kReleased;
-    Init();
-  }
-#endif
-
-  virtual void GetName(nsAString&);
-  virtual void GetUUID(nsAString&);
   virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
-                            const MediaEnginePrefs &aPrefs);
-  virtual nsresult Deallocate();
-  virtual nsresult Start(SourceMediaStream*, TrackID);
-  virtual nsresult Stop(SourceMediaStream*, TrackID);
-  virtual void SetDirectListeners(bool aHasListeners);
-  virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
-  virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
-                          bool aAgcOn, uint32_t aAGC,
-                          bool aNoiseOn, uint32_t aNoise,
-                          int32_t aPlayoutDelay) { return NS_OK; };
+                            const MediaEnginePrefs &aPrefs) MOZ_OVERRIDE;
+  virtual nsresult Deallocate() MOZ_OVERRIDE;
+  virtual nsresult Start(SourceMediaStream* aStream, TrackID aID) MOZ_OVERRIDE;
+  virtual nsresult Stop(SourceMediaStream* aSource, TrackID aID) MOZ_OVERRIDE;
   virtual void NotifyPull(MediaStreamGraph* aGraph,
-                          SourceMediaStream *aSource,
+                          SourceMediaStream* aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
-                          TrackTicks &aLastEndTime);
-
-  virtual bool IsFake() {
-    return false;
-  }
-
-  virtual const MediaSourceType GetMediaSource() {
-    return mMediaSource;
-  }
-
-#ifndef MOZ_B2G_CAMERA
-  NS_DECL_THREADSAFE_ISUPPORTS
-
-  nsresult TakePhoto(PhotoCallback* aCallback)
-  {
-    return NS_ERROR_NOT_IMPLEMENTED;
-  }
-#else
-  // We are subclassed from CameraControlListener, which implements a
-  // threadsafe reference-count for us.
-  NS_DECL_ISUPPORTS_INHERITED
+                          TrackTicks& aLastEndTime) MOZ_OVERRIDE;
 
   void OnHardwareStateChange(HardwareState aState);
   void GetRotation();
   bool OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
   void OnUserError(UserContext aContext, nsresult aError);
   void OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType);
 
   void AllocImpl();
   void DeallocImpl();
   void StartImpl(webrtc::CaptureCapability aCapability);
   void StopImpl();
-  void SnapshotImpl();
+  uint32_t ConvertPixelFormatToFOURCC(int aFormat);
   void RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
-  uint32_t ConvertPixelFormatToFOURCC(int aFormat);
   void Notify(const mozilla::hal::ScreenConfiguration& aConfiguration);
 
   nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE;
 
   // It sets the correct photo orientation via camera parameter according to
   // current screen orientation.
   nsresult UpdatePhotoOrientation();
 
-#endif
-
-  // This runnable is for creating a temporary file on the main thread.
-  NS_IMETHODIMP
-  Run()
+protected:
+  ~MediaEngineGonkVideoSource()
   {
-    nsCOMPtr<nsIFile> tmp;
-    nsresult rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(tmp));
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    tmp->Append(NS_LITERAL_STRING("webrtc_snapshot.jpeg"));
-    rv = tmp->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0600);
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    mSnapshotPath = new nsString();
-    rv = tmp->GetPath(*mSnapshotPath);
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    return NS_OK;
+    Shutdown();
   }
-
-  void Refresh(int aIndex);
-
-protected:
-  ~MediaEngineWebRTCVideoSource() { Shutdown(); }
-
-private:
   // Initialize the needed Video engine interfaces.
   void Init();
   void Shutdown();
+  void ChooseCapability(const VideoTrackConstraintsN& aConstraints,
+                        const MediaEnginePrefs& aPrefs);
 
-  // Engine variables.
-#ifdef MOZ_B2G_CAMERA
   mozilla::ReentrantMonitor mCallbackMonitor; // Monitor for camera callback handling
   // This is only modified on MainThread (AllocImpl and DeallocImpl)
   nsRefPtr<ICameraControl> mCameraControl;
   nsCOMPtr<nsIDOMFile> mLastCapture;
+
+  // These are protected by mMonitor in parent class
   nsTArray<nsRefPtr<PhotoCallback>> mPhotoCallbacks;
-
-  // These are protected by mMonitor below
   int mRotation;
   int mCameraAngle; // See dom/base/ScreenOrientation.h
   bool mBackCamera;
   bool mOrientationChanged; // True when screen rotates.
-#else
-  webrtc::VideoEngine* mVideoEngine; // Weak reference, don't free.
-  webrtc::ViEBase* mViEBase;
-  webrtc::ViECapture* mViECapture;
-  webrtc::ViERender* mViERender;
-#endif
-  webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
-
-  int mCaptureIndex;
-  int mFps; // Track rate (30 fps by default)
-  int mMinFps; // Min rate we want to accept
-  MediaSourceType mMediaSource; // source of media (camera | application | screen)
-
-  // mMonitor protects mImage access/changes, and transitions of mState
-  // from kStarted to kStopped (which are combined with EndTrack() and
-  // image changes).  Note that mSources is not accessed from other threads
-  // for video and is not protected.
-  Monitor mMonitor; // Monitor for processing WebRTC frames.
-  int mWidth, mHeight;
-  nsRefPtr<layers::Image> mImage;
-  nsRefPtr<layers::ImageContainer> mImageContainer;
-  bool mHasDirectListeners;
-
-  nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
-
-  bool mInitDone;
-  bool mInSnapshotMode;
-  nsString* mSnapshotPath;
-
-  nsString mDeviceName;
-  nsString mUniqueId;
-
-  void ChooseCapability(const VideoTrackConstraintsN &aConstraints,
-                        const MediaEnginePrefs &aPrefs);
-
-  void GuessCapability(const VideoTrackConstraintsN &aConstraints,
-                       const MediaEnginePrefs &aPrefs);
 };
 
-class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
-                                     public webrtc::VoEMediaProcess
-{
-public:
-  MediaEngineWebRTCAudioSource(nsIThread *aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
-                               int aIndex, const char* name, const char* uuid)
-    : mSamples(0)
-    , mVoiceEngine(aVoiceEnginePtr)
-    , mMonitor("WebRTCMic.Monitor")
-    , mThread(aThread)
-    , mCapIndex(aIndex)
-    , mChannel(-1)
-    , mInitDone(false)
-    , mStarted(false)
-    , mEchoOn(false), mAgcOn(false), mNoiseOn(false)
-    , mEchoCancel(webrtc::kEcDefault)
-    , mAGC(webrtc::kAgcDefault)
-    , mNoiseSuppress(webrtc::kNsDefault)
-    , mPlayoutDelay(0)
-    , mNullTransport(nullptr) {
-    MOZ_ASSERT(aVoiceEnginePtr);
-    mState = kReleased;
-    mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
-    mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid));
-    Init();
-  }
-
-  virtual void GetName(nsAString&);
-  virtual void GetUUID(nsAString&);
-
-  virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
-                            const MediaEnginePrefs &aPrefs);
-  virtual nsresult Deallocate();
-  virtual nsresult Start(SourceMediaStream*, TrackID);
-  virtual nsresult Stop(SourceMediaStream*, TrackID);
-  virtual void SetDirectListeners(bool aHasDirectListeners) {};
-  virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
-  virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
-                          bool aAgcOn, uint32_t aAGC,
-                          bool aNoiseOn, uint32_t aNoise,
-                          int32_t aPlayoutDelay);
-
-  virtual void NotifyPull(MediaStreamGraph* aGraph,
-                          SourceMediaStream *aSource,
-                          TrackID aId,
-                          StreamTime aDesiredTime,
-                          TrackTicks &aLastEndTime);
-
-  virtual bool IsFake() {
-    return false;
-  }
-
-  virtual const MediaSourceType GetMediaSource() {
-    return MediaSourceType::Microphone;
-  }
-
-  virtual nsresult TakePhoto(PhotoCallback* aCallback)
-  {
-    return NS_ERROR_NOT_IMPLEMENTED;
-  }
-
-  // VoEMediaProcess.
-  void Process(int channel, webrtc::ProcessingTypes type,
-               int16_t audio10ms[], int length,
-               int samplingFreq, bool isStereo);
-
-  NS_DECL_THREADSAFE_ISUPPORTS
-
-protected:
-  ~MediaEngineWebRTCAudioSource() { Shutdown(); }
-
-  // mSamples is an int to avoid conversions when comparing/etc to
-  // samplingFreq & length. Making mSamples protected instead of private is a
-  // silly way to avoid -Wunused-private-field warnings when PR_LOGGING is not
-  // #defined. mSamples is not actually expected to be used by a derived class.
-  int mSamples;
-
-private:
-  void Init();
-  void Shutdown();
+} // namespace mozilla
 
-  webrtc::VoiceEngine* mVoiceEngine;
-  ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
-  ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
-  ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
-  ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
-  ScopedCustomReleasePtr<webrtc::VoECallReport> mVoECallReport;
-
-  // mMonitor protects mSources[] access/changes, and transitions of mState
-  // from kStarted to kStopped (which are combined with EndTrack()).
-  // mSources[] is accessed from webrtc threads.
-  Monitor mMonitor;
-  nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
-  nsCOMPtr<nsIThread> mThread;
-  int mCapIndex;
-  int mChannel;
-  TrackID mTrackID;
-  bool mInitDone;
-  bool mStarted;
-
-  nsString mDeviceName;
-  nsString mDeviceUUID;
-
-  bool mEchoOn, mAgcOn, mNoiseOn;
-  webrtc::EcModes  mEchoCancel;
-  webrtc::AgcModes mAGC;
-  webrtc::NsModes  mNoiseSuppress;
-  int32_t mPlayoutDelay;
-
-  NullTransport *mNullTransport;
-};
-
-class MediaEngineWebRTC : public MediaEngine
-{
-public:
-  explicit MediaEngineWebRTC(MediaEnginePrefs &aPrefs);
-
-  // Clients should ensure to clean-up sources video/audio sources
-  // before invoking Shutdown on this class.
-  void Shutdown();
-
-  virtual void EnumerateVideoDevices(MediaSourceType,
-                                    nsTArray<nsRefPtr<MediaEngineVideoSource> >*);
-  virtual void EnumerateAudioDevices(MediaSourceType,
-                                    nsTArray<nsRefPtr<MediaEngineAudioSource> >*);
-private:
-  ~MediaEngineWebRTC() {
-    Shutdown();
-#ifdef MOZ_B2G_CAMERA
-    AsyncLatencyLogger::Get()->Release();
-#endif
-    gFarendObserver = nullptr;
-  }
-
-  nsCOMPtr<nsIThread> mThread;
-
-  Mutex mMutex;
-
-  // protected with mMutex:
-  webrtc::VideoEngine* mScreenEngine;
-  webrtc::VideoEngine* mBrowserEngine;
-  webrtc::VideoEngine* mWinEngine;
-  webrtc::VideoEngine* mAppEngine;
-  webrtc::VideoEngine* mVideoEngine;
-  webrtc::VoiceEngine* mVoiceEngine;
-
-  // specialized configurations
-  webrtc::Config mAppEngineConfig;
-  webrtc::Config mWinEngineConfig;
-  webrtc::Config mScreenEngineConfig;
-  webrtc::Config mBrowserEngineConfig;
-
-  // Need this to avoid unneccesary WebRTC calls while enumerating.
-  bool mVideoEngineInit;
-  bool mAudioEngineInit;
-  bool mScreenEngineInit;
-  bool mBrowserEngineInit;
-  bool mWinEngineInit;
-  bool mAppEngineInit;
-  bool mHasTabVideoSource;
-
-  // Store devices we've already seen in a hashtable for quick return.
-  // Maps UUID to MediaEngineSource (one set for audio, one for video).
-  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCVideoSource > mVideoSources;
-  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource > mAudioSources;
-};
-
-}
-
-#endif /* NSMEDIAENGINEWEBRTC_H_ */
+#endif // MediaEngineGonkVideoSource_h_
--- a/content/media/webrtc/MediaEngineTabVideoSource.cpp
+++ b/content/media/webrtc/MediaEngineTabVideoSource.cpp
@@ -184,22 +184,16 @@ MediaEngineTabVideoSource::Start(mozilla
     runnable = new StartRunnable(this);
   NS_DispatchToMainThread(runnable);
   aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
   aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
 
   return NS_OK;
 }
 
-nsresult
-MediaEngineTabVideoSource::Snapshot(uint32_t, nsIDOMFile**)
-{
-  return NS_OK;
-}
-
 void
 MediaEngineTabVideoSource::
 NotifyPull(MediaStreamGraph*, SourceMediaStream* aSource, mozilla::TrackID aID, mozilla::StreamTime aDesiredTime, mozilla::TrackTicks& aLastEndTime)
 {
   VideoSegment segment;
   MonitorAutoLock mon(mMonitor);
 
   // Note: we're not giving up mImage here
--- a/content/media/webrtc/MediaEngineTabVideoSource.h
+++ b/content/media/webrtc/MediaEngineTabVideoSource.h
@@ -20,17 +20,16 @@ class MediaEngineTabVideoSource : public
 
     virtual void GetName(nsAString_internal&);
     virtual void GetUUID(nsAString_internal&);
     virtual nsresult Allocate(const VideoTrackConstraintsN &,
                               const mozilla::MediaEnginePrefs&);
     virtual nsresult Deallocate();
     virtual nsresult Start(mozilla::SourceMediaStream*, mozilla::TrackID);
     virtual void SetDirectListeners(bool aHasDirectListeners) {};
-    virtual nsresult Snapshot(uint32_t, nsIDOMFile**);
     virtual void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, mozilla::TrackTicks&);
     virtual nsresult Stop(mozilla::SourceMediaStream*, mozilla::TrackID);
     virtual nsresult Config(bool, uint32_t, bool, uint32_t, bool, uint32_t, int32_t);
     virtual bool IsFake();
     virtual const MediaSourceType GetMediaSource() {
       return MediaSourceType::Browser;
     }
 
--- a/content/media/webrtc/MediaEngineWebRTC.cpp
+++ b/content/media/webrtc/MediaEngineWebRTC.cpp
@@ -26,16 +26,21 @@ GetUserMediaLog()
 #include "nsITabSource.h"
 #include "MediaTrackConstraints.h"
 
 #ifdef MOZ_WIDGET_ANDROID
 #include "AndroidJNIWrapper.h"
 #include "AndroidBridge.h"
 #endif
 
+#ifdef MOZ_B2G_CAMERA
+#include "ICameraControl.h"
+#include "MediaEngineGonkVideoSource.h"
+#endif
+
 #undef LOG
 #define LOG(args) PR_LOG(GetUserMediaLog(), PR_LOG_DEBUG, args)
 
 namespace mozilla {
 
 MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
     : mMutex("mozilla::MediaEngineWebRTC")
     , mScreenEngine(nullptr)
@@ -68,17 +73,17 @@ MediaEngineWebRTC::MediaEngineWebRTC(Med
 
 void
 MediaEngineWebRTC::EnumerateVideoDevices(MediaSourceType aMediaSource,
                                          nsTArray<nsRefPtr<MediaEngineVideoSource> >* aVSources)
 {
   // We spawn threads to handle gUM runnables, so we must protect the member vars
   MutexAutoLock lock(mMutex);
 
- #ifdef MOZ_B2G_CAMERA
+#ifdef MOZ_B2G_CAMERA
   if (aMediaSource != MediaSourceType::Camera) {
     // only supports camera sources
     return;
   }
 
   /**
    * We still enumerate every time, in case a new device was plugged in since
    * the last call. TODO: Verify that WebRTC actually does deal with hotplugging
@@ -96,23 +101,23 @@ MediaEngineWebRTC::EnumerateVideoDevices
 
   for (int i = 0; i < num; i++) {
     nsCString cameraName;
     result = ICameraControl::GetCameraName(i, cameraName);
     if (result != NS_OK) {
       continue;
     }
 
-    nsRefPtr<MediaEngineWebRTCVideoSource> vSource;
+    nsRefPtr<MediaEngineVideoSource> vSource;
     NS_ConvertUTF8toUTF16 uuid(cameraName);
     if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) {
       // We've already seen this device, just append.
       aVSources->AppendElement(vSource.get());
     } else {
-      vSource = new MediaEngineWebRTCVideoSource(i, aMediaSource);
+      vSource = new MediaEngineGonkVideoSource(i);
       mVideoSources.Put(uuid, vSource); // Hashtable takes ownership.
       aVSources->AppendElement(vSource);
     }
   }
 
   return;
 #else
   ScopedCustomReleasePtr<webrtc::ViEBase> ptrViEBase;
@@ -251,21 +256,21 @@ MediaEngineWebRTC::EnumerateVideoDevices
 #endif
 
     if (uniqueId[0] == '\0') {
       // In case a device doesn't set uniqueId!
       strncpy(uniqueId, deviceName, sizeof(uniqueId));
       uniqueId[sizeof(uniqueId)-1] = '\0'; // strncpy isn't safe
     }
 
-    nsRefPtr<MediaEngineWebRTCVideoSource> vSource;
+    nsRefPtr<MediaEngineVideoSource> vSource;
     NS_ConvertUTF8toUTF16 uuid(uniqueId);
     if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) {
       // We've already seen this device, just refresh and append.
-      vSource->Refresh(i);
+      static_cast<MediaEngineWebRTCVideoSource*>(vSource.get())->Refresh(i);
       aVSources->AppendElement(vSource.get());
     } else {
       vSource = new MediaEngineWebRTCVideoSource(videoEngine, i, aMediaSource);
       mVideoSources.Put(uuid, vSource); // Hashtable takes ownership.
       aVSources->AppendElement(vSource);
     }
   }
 
--- a/content/media/webrtc/MediaEngineWebRTC.h
+++ b/content/media/webrtc/MediaEngineWebRTC.h
@@ -16,17 +16,17 @@
 #include "nsCOMPtr.h"
 #include "nsThreadUtils.h"
 #include "DOMMediaStream.h"
 #include "nsDirectoryServiceDefs.h"
 #include "nsComponentManagerUtils.h"
 #include "nsRefPtrHashtable.h"
 
 #include "VideoUtils.h"
-#include "MediaEngine.h"
+#include "MediaEngineCameraVideoSource.h"
 #include "VideoSegment.h"
 #include "AudioSegment.h"
 #include "StreamBuffer.h"
 #include "MediaStreamGraph.h"
 
 #include "MediaEngineWrapper.h"
 #include "mozilla/dom/MediaStreamTrackBinding.h"
 // WebRTC library includes follow
@@ -44,304 +44,147 @@
 
 // Video Engine
 // conflicts with #include of scoped_ptr.h
 #undef FF
 #include "webrtc/video_engine/include/vie_base.h"
 #include "webrtc/video_engine/include/vie_codec.h"
 #include "webrtc/video_engine/include/vie_render.h"
 #include "webrtc/video_engine/include/vie_capture.h"
-#ifdef MOZ_B2G_CAMERA
-#include "CameraControlListener.h"
-#include "ICameraControl.h"
-#include "ImageContainer.h"
-#include "nsGlobalWindow.h"
-#include "prprf.h"
-#include "mozilla/Hal.h"
-#endif
 
 #include "NullTransport.h"
 #include "AudioOutputObserver.h"
 
 namespace mozilla {
 
-#ifdef MOZ_B2G_CAMERA
-class CameraAllocateRunnable;
-class GetCameraNameRunnable;
-#endif
-
 /**
  * The WebRTC implementation of the MediaEngine interface.
- *
- * On B2G platform, member data may accessed from different thread after construction:
- *
- * MediaThread:
- *   mState, mImage, mWidth, mHeight, mCapability, mPrefs, mDeviceName, mUniqueId, mInitDone,
- *   mImageContainer, mSources, mState, mImage
- *
- * MainThread:
- *   mCaptureIndex, mLastCapture, mState,  mWidth, mHeight,
- *
- * Where mWidth, mHeight, mImage, mPhotoCallbacks are protected by mMonitor
- *       mState is protected by mCallbackMonitor
- * Other variable is accessed only from single thread
  */
-class MediaEngineWebRTCVideoSource : public MediaEngineVideoSource
-                                   , public nsRunnable
-#ifdef MOZ_B2G_CAMERA
-                                   , public CameraControlListener
-                                   , public mozilla::hal::ScreenConfigurationObserver
-#else
+class MediaEngineWebRTCVideoSource : public MediaEngineCameraVideoSource
                                    , public webrtc::ExternalRenderer
-#endif
 {
 public:
-#ifdef MOZ_B2G_CAMERA
-  MediaEngineWebRTCVideoSource(int aIndex,
-                               MediaSourceType aMediaSource = MediaSourceType::Camera)
-    : mCameraControl(nullptr)
-    , mCallbackMonitor("WebRTCCamera.CallbackMonitor")
-    , mRotation(0)
-    , mBackCamera(false)
-    , mOrientationChanged(true) // Correct the orientation at first time takePhoto.
-    , mCaptureIndex(aIndex)
-    , mMediaSource(aMediaSource)
-    , mMonitor("WebRTCCamera.Monitor")
-    , mWidth(0)
-    , mHeight(0)
-    , mHasDirectListeners(false)
-    , mInitDone(false)
-    , mInSnapshotMode(false)
-    , mSnapshotPath(nullptr)
-  {
-    mState = kReleased;
-    Init();
-  }
-#else
+  NS_DECL_THREADSAFE_ISUPPORTS
+
   // ViEExternalRenderer.
-  virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int);
-  virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t,
+  virtual int FrameSizeChange(unsigned int w, unsigned int h, unsigned int streams);
+  virtual int DeliverFrame(unsigned char* buffer,
+                           int size,
+                           uint32_t time_stamp,
+                           int64_t render_time,
                            void *handle);
   /**
    * Does DeliverFrame() support a null buffer and non-null handle
    * (video texture)?
    * XXX Investigate!  Especially for Android/B2G
    */
   virtual bool IsTextureSupported() { return false; }
 
   MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr, int aIndex,
                                MediaSourceType aMediaSource = MediaSourceType::Camera)
-    : mVideoEngine(aVideoEnginePtr)
-    , mCaptureIndex(aIndex)
-    , mFps(-1)
+    : MediaEngineCameraVideoSource(aIndex, "WebRTCCamera.Monitor")
+    , mVideoEngine(aVideoEnginePtr)
     , mMinFps(-1)
     , mMediaSource(aMediaSource)
-    , mMonitor("WebRTCCamera.Monitor")
-    , mWidth(0)
-    , mHeight(0)
-    , mHasDirectListeners(false)
-    , mInitDone(false)
-    , mInSnapshotMode(false)
-    , mSnapshotPath(nullptr) {
+  {
     MOZ_ASSERT(aVideoEnginePtr);
-    mState = kReleased;
     Init();
   }
-#endif
 
-  virtual void GetName(nsAString&);
-  virtual void GetUUID(nsAString&);
-  virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
-                            const MediaEnginePrefs &aPrefs);
+  virtual nsresult Allocate(const VideoTrackConstraintsN& aConstraints,
+                            const MediaEnginePrefs& aPrefs);
   virtual nsresult Deallocate();
   virtual nsresult Start(SourceMediaStream*, TrackID);
   virtual nsresult Stop(SourceMediaStream*, TrackID);
-  virtual void SetDirectListeners(bool aHasListeners);
-  virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
-  virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
-                          bool aAgcOn, uint32_t aAGC,
-                          bool aNoiseOn, uint32_t aNoise,
-                          int32_t aPlayoutDelay) { return NS_OK; };
   virtual void NotifyPull(MediaStreamGraph* aGraph,
-                          SourceMediaStream *aSource,
+                          SourceMediaStream* aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
-                          TrackTicks &aLastEndTime);
-
-  virtual bool IsFake() {
-    return false;
-  }
+                          TrackTicks& aLastEndTime);
 
   virtual const MediaSourceType GetMediaSource() {
     return mMediaSource;
   }
-
-#ifndef MOZ_B2G_CAMERA
-  NS_DECL_THREADSAFE_ISUPPORTS
-
-  nsresult TakePhoto(PhotoCallback* aCallback)
+  virtual nsresult TakePhoto(PhotoCallback* aCallback)
   {
     return NS_ERROR_NOT_IMPLEMENTED;
   }
-#else
-  // We are subclassed from CameraControlListener, which implements a
-  // threadsafe reference-count for us.
-  NS_DECL_ISUPPORTS_INHERITED
-
-  void OnHardwareStateChange(HardwareState aState);
-  void GetRotation();
-  bool OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
-  void OnUserError(UserContext aContext, nsresult aError);
-  void OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType);
-
-  void AllocImpl();
-  void DeallocImpl();
-  void StartImpl(webrtc::CaptureCapability aCapability);
-  void StopImpl();
-  void SnapshotImpl();
-  void RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
-  uint32_t ConvertPixelFormatToFOURCC(int aFormat);
-  void Notify(const mozilla::hal::ScreenConfiguration& aConfiguration);
-
-  nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE;
-
-  // It sets the correct photo orientation via camera parameter according to
-  // current screen orientation.
-  nsresult UpdatePhotoOrientation();
-
-#endif
-
-  // This runnable is for creating a temporary file on the main thread.
-  NS_IMETHODIMP
-  Run()
-  {
-    nsCOMPtr<nsIFile> tmp;
-    nsresult rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(tmp));
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    tmp->Append(NS_LITERAL_STRING("webrtc_snapshot.jpeg"));
-    rv = tmp->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0600);
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    mSnapshotPath = new nsString();
-    rv = tmp->GetPath(*mSnapshotPath);
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    return NS_OK;
-  }
 
   void Refresh(int aIndex);
 
 protected:
   ~MediaEngineWebRTCVideoSource() { Shutdown(); }
 
 private:
   // Initialize the needed Video engine interfaces.
   void Init();
   void Shutdown();
 
   // Engine variables.
-#ifdef MOZ_B2G_CAMERA
-  mozilla::ReentrantMonitor mCallbackMonitor; // Monitor for camera callback handling
-  // This is only modified on MainThread (AllocImpl and DeallocImpl)
-  nsRefPtr<ICameraControl> mCameraControl;
-  nsCOMPtr<nsIDOMFile> mLastCapture;
-  nsTArray<nsRefPtr<PhotoCallback>> mPhotoCallbacks;
-
-  // These are protected by mMonitor below
-  int mRotation;
-  int mCameraAngle; // See dom/base/ScreenOrientation.h
-  bool mBackCamera;
-  bool mOrientationChanged; // True when screen rotates.
-#else
   webrtc::VideoEngine* mVideoEngine; // Weak reference, don't free.
   webrtc::ViEBase* mViEBase;
   webrtc::ViECapture* mViECapture;
   webrtc::ViERender* mViERender;
-#endif
   webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
 
-  int mCaptureIndex;
-  int mFps; // Track rate (30 fps by default)
   int mMinFps; // Min rate we want to accept
   MediaSourceType mMediaSource; // source of media (camera | application | screen)
 
-  // mMonitor protects mImage access/changes, and transitions of mState
-  // from kStarted to kStopped (which are combined with EndTrack() and
-  // image changes).  Note that mSources is not accessed from other threads
-  // for video and is not protected.
-  Monitor mMonitor; // Monitor for processing WebRTC frames.
-  int mWidth, mHeight;
-  nsRefPtr<layers::Image> mImage;
-  nsRefPtr<layers::ImageContainer> mImageContainer;
-  bool mHasDirectListeners;
-
-  nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
-
-  bool mInitDone;
-  bool mInSnapshotMode;
-  nsString* mSnapshotPath;
-
-  nsString mDeviceName;
-  nsString mUniqueId;
-
-  void ChooseCapability(const VideoTrackConstraintsN &aConstraints,
-                        const MediaEnginePrefs &aPrefs);
-
-  void GuessCapability(const VideoTrackConstraintsN &aConstraints,
-                       const MediaEnginePrefs &aPrefs);
+  static bool SatisfyConstraintSet(const dom::MediaTrackConstraintSet& aConstraints,
+                                   const webrtc::CaptureCapability& aCandidate);
+  void ChooseCapability(const VideoTrackConstraintsN& aConstraints,
+                        const MediaEnginePrefs& aPrefs);
 };
 
 class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
                                      public webrtc::VoEMediaProcess
 {
 public:
-  MediaEngineWebRTCAudioSource(nsIThread *aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
+  MediaEngineWebRTCAudioSource(nsIThread* aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
                                int aIndex, const char* name, const char* uuid)
-    : mSamples(0)
+    : MediaEngineAudioSource(kReleased)
+    , mSamples(0)
     , mVoiceEngine(aVoiceEnginePtr)
     , mMonitor("WebRTCMic.Monitor")
     , mThread(aThread)
     , mCapIndex(aIndex)
     , mChannel(-1)
     , mInitDone(false)
     , mStarted(false)
     , mEchoOn(false), mAgcOn(false), mNoiseOn(false)
     , mEchoCancel(webrtc::kEcDefault)
     , mAGC(webrtc::kAgcDefault)
     , mNoiseSuppress(webrtc::kNsDefault)
     , mPlayoutDelay(0)
     , mNullTransport(nullptr) {
     MOZ_ASSERT(aVoiceEnginePtr);
-    mState = kReleased;
     mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
     mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid));
     Init();
   }
 
-  virtual void GetName(nsAString&);
-  virtual void GetUUID(nsAString&);
+  virtual void GetName(nsAString& aName);
+  virtual void GetUUID(nsAString& aUUID);
 
-  virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
-                            const MediaEnginePrefs &aPrefs);
+  virtual nsresult Allocate(const AudioTrackConstraintsN& aConstraints,
+                            const MediaEnginePrefs& aPrefs);
   virtual nsresult Deallocate();
-  virtual nsresult Start(SourceMediaStream*, TrackID);
-  virtual nsresult Stop(SourceMediaStream*, TrackID);
+  virtual nsresult Start(SourceMediaStream* aStream, TrackID aID);
+  virtual nsresult Stop(SourceMediaStream* aSource, TrackID aID);
   virtual void SetDirectListeners(bool aHasDirectListeners) {};
-  virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
   virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
                           bool aAgcOn, uint32_t aAGC,
                           bool aNoiseOn, uint32_t aNoise,
                           int32_t aPlayoutDelay);
 
   virtual void NotifyPull(MediaStreamGraph* aGraph,
-                          SourceMediaStream *aSource,
+                          SourceMediaStream* aSource,
                           TrackID aId,
                           StreamTime aDesiredTime,
-                          TrackTicks &aLastEndTime);
+                          TrackTicks& aLastEndTime);
 
   virtual bool IsFake() {
     return false;
   }
 
   virtual const MediaSourceType GetMediaSource() {
     return MediaSourceType::Microphone;
   }
@@ -377,17 +220,17 @@ private:
   ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
   ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
   ScopedCustomReleasePtr<webrtc::VoECallReport> mVoECallReport;
 
   // mMonitor protects mSources[] access/changes, and transitions of mState
   // from kStarted to kStopped (which are combined with EndTrack()).
   // mSources[] is accessed from webrtc threads.
   Monitor mMonitor;
-  nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
+  nsTArray<SourceMediaStream*> mSources; // When this goes empty, we shut down HW
   nsCOMPtr<nsIThread> mThread;
   int mCapIndex;
   int mChannel;
   TrackID mTrackID;
   bool mInitDone;
   bool mStarted;
 
   nsString mDeviceName;
@@ -400,17 +243,17 @@ private:
   int32_t mPlayoutDelay;
 
   NullTransport *mNullTransport;
 };
 
 class MediaEngineWebRTC : public MediaEngine
 {
 public:
-  explicit MediaEngineWebRTC(MediaEnginePrefs &aPrefs);
+  explicit MediaEngineWebRTC(MediaEnginePrefs& aPrefs);
 
   // Clients should ensure to clean-up sources video/audio sources
   // before invoking Shutdown on this class.
   void Shutdown();
 
   virtual void EnumerateVideoDevices(MediaSourceType,
                                     nsTArray<nsRefPtr<MediaEngineVideoSource> >*);
   virtual void EnumerateAudioDevices(MediaSourceType,
@@ -448,15 +291,15 @@ private:
   bool mScreenEngineInit;
   bool mBrowserEngineInit;
   bool mWinEngineInit;
   bool mAppEngineInit;
   bool mHasTabVideoSource;
 
   // Store devices we've already seen in a hashtable for quick return.
   // Maps UUID to MediaEngineSource (one set for audio, one for video).
-  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCVideoSource > mVideoSources;
-  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource > mAudioSources;
+  nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
+  nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource> mAudioSources;
 };
 
 }
 
 #endif /* NSMEDIAENGINEWEBRTC_H_ */
--- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -401,22 +401,16 @@ MediaEngineWebRTCAudioSource::NotifyPull
 #ifdef DEBUG
   TrackTicks target = aSource->TimeToTicksRoundUp(SAMPLE_FREQUENCY, aDesiredTime);
   TrackTicks delta = target - aLastEndTime;
   LOG(("Audio: NotifyPull: aDesiredTime %ld, target %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) target, (int64_t) delta));
   aLastEndTime = target;
 #endif
 }
 
-nsresult
-MediaEngineWebRTCAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
-   return NS_ERROR_NOT_IMPLEMENTED;
-}
-
 void
 MediaEngineWebRTCAudioSource::Init()
 {
   mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
 
   mVoEBase->Init();
 
   mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
--- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCVideo.cpp
@@ -6,23 +6,16 @@
 #include "Layers.h"
 #include "ImageTypes.h"
 #include "ImageContainer.h"
 #include "mozilla/layers/GrallocTextureClient.h"
 #include "nsMemory.h"
 #include "mtransport/runnable_utils.h"
 #include "MediaTrackConstraints.h"
 
-#ifdef MOZ_B2G_CAMERA
-#include "GrallocImages.h"
-#include "libyuv.h"
-#include "mozilla/Hal.h"
-#include "ScreenOrientation.h"
-using namespace mozilla::dom;
-#endif
 namespace mozilla {
 
 using namespace mozilla::gfx;
 using dom::ConstrainLongRange;
 using dom::ConstrainDoubleRange;
 using dom::MediaTrackConstraintSet;
 
 #ifdef PR_LOGGING
@@ -32,52 +25,35 @@ extern PRLogModuleInfo* GetMediaManagerL
 #else
 #define LOG(msg)
 #define LOGFRAME(msg)
 #endif
 
 /**
  * Webrtc video source.
  */
-#ifndef MOZ_B2G_CAMERA
-NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable)
-#else
-NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable)
-NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-#endif
 
-// ViEExternalRenderer Callback.
-#ifndef MOZ_B2G_CAMERA
+NS_IMPL_ISUPPORTS0(MediaEngineWebRTCVideoSource)
+
 int
 MediaEngineWebRTCVideoSource::FrameSizeChange(
    unsigned int w, unsigned int h, unsigned int streams)
 {
   mWidth = w;
   mHeight = h;
   LOG(("Video FrameSizeChange: %ux%u", w, h));
   return 0;
 }
 
 // ViEExternalRenderer Callback. Process every incoming frame here.
 int
 MediaEngineWebRTCVideoSource::DeliverFrame(
    unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
    void *handle)
 {
-  // mInSnapshotMode can only be set before the camera is turned on and
-  // the renderer is started, so this amounts to a 1-shot
-  if (mInSnapshotMode) {
-    // Set the condition variable to false and notify Snapshot().
-    MonitorAutoLock lock(mMonitor);
-    mInSnapshotMode = false;
-    lock.Notify();
-    return 0;
-  }
-
   // Check for proper state.
   if (mState != kStarted) {
     LOG(("DeliverFrame: video not started"));
     return 0;
   }
 
   if (mWidth*mHeight + 2*(((mWidth+1)/2)*((mHeight+1)/2)) != size) {
     MOZ_ASSERT(false, "Wrong size frame in DeliverFrame!");
@@ -119,17 +95,16 @@ MediaEngineWebRTCVideoSource::DeliverFra
   // which has it's own lock)
   MonitorAutoLock lock(mMonitor);
 
   // implicitly releases last image
   mImage = image.forget();
 
   return 0;
 }
-#endif
 
 // Called if the graph thinks it's running out of buffered video; repeat
 // the last frame for whatever minimum period it think it needs.  Note that
 // this means that no *real* frame can be inserted during this period.
 void
 MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
                                          SourceMediaStream *aSource,
                                          TrackID aID,
@@ -167,61 +142,34 @@ MediaEngineWebRTCVideoSource::NotifyPull
     // This can fail if either a) we haven't added the track yet, or b)
     // we've removed or finished the track.
     if (aSource->AppendToTrack(aID, &(segment))) {
       aLastEndTime = target;
     }
   }
 }
 
-static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) {
-  return aRange.mMin <= n && n <= aRange.mMax;
-}
-
-static bool IsWithin(double n, const ConstrainDoubleRange& aRange) {
-  return aRange.mMin <= n && n <= aRange.mMax;
-}
-
-static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) {
-  return std::max(aRange.mMin, std::min(n, aRange.mMax));
-}
-
-static bool
-AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
-  return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax;
-}
-
-static bool
-Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
-  MOZ_ASSERT(AreIntersecting(aA, aB));
-  aA.mMin = std::max(aA.mMin, aB.mMin);
-  aA.mMax = std::min(aA.mMax, aB.mMax);
-  return true;
-}
-
-static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
-                                 const webrtc::CaptureCapability& aCandidate) {
-  if (!IsWithin(aCandidate.width, aConstraints.mWidth) ||
-      !IsWithin(aCandidate.height, aConstraints.mHeight)) {
+/*static*/
+bool MediaEngineWebRTCVideoSource::SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
+                                                        const webrtc::CaptureCapability& aCandidate) {
+  if (!MediaEngineCameraVideoSource::IsWithin(aCandidate.width, aConstraints.mWidth) ||
+      !MediaEngineCameraVideoSource::IsWithin(aCandidate.height, aConstraints.mHeight)) {
     return false;
   }
-  if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
+  if (!MediaEngineCameraVideoSource::IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
     return false;
   }
   return true;
 }
 
 void
 MediaEngineWebRTCVideoSource::ChooseCapability(
     const VideoTrackConstraintsN &aConstraints,
     const MediaEnginePrefs &aPrefs)
 {
-#ifdef MOZ_B2G_CAMERA
-  return GuessCapability(aConstraints, aPrefs);
-#else
   NS_ConvertUTF16toUTF8 uniqueId(mUniqueId);
   int num = mViECapture->NumberOfCapabilities(uniqueId.get(), kMaxUniqueIdLength);
   if (num <= 0) {
     // Mac doesn't support capabilities.
     return GuessCapability(aConstraints, aPrefs);
   }
 
   // The rest is the full algorithm for cameras that can list their capabilities.
@@ -326,129 +274,23 @@ MediaEngineWebRTCVideoSource::ChooseCapa
           mCapability = cap;
         }
       }
     }
   }
   LOG(("chose cap %dx%d @%dfps codec %d raw %d",
        mCapability.width, mCapability.height, mCapability.maxFPS,
        mCapability.codecType, mCapability.rawType));
-#endif
-}
-
-// A special version of the algorithm for cameras that don't list capabilities.
-
-void
-MediaEngineWebRTCVideoSource::GuessCapability(
-    const VideoTrackConstraintsN &aConstraints,
-    const MediaEnginePrefs &aPrefs)
-{
-  LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
-       aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
-
-  // In short: compound constraint-ranges and use pref as ideal.
-
-  ConstrainLongRange cWidth(aConstraints.mRequired.mWidth);
-  ConstrainLongRange cHeight(aConstraints.mRequired.mHeight);
-
-  if (aConstraints.mAdvanced.WasPassed()) {
-    const auto& advanced = aConstraints.mAdvanced.Value();
-    for (uint32_t i = 0; i < advanced.Length(); i++) {
-      if (AreIntersecting(cWidth, advanced[i].mWidth) &&
-          AreIntersecting(cHeight, advanced[i].mHeight)) {
-        Intersect(cWidth, advanced[i].mWidth);
-        Intersect(cHeight, advanced[i].mHeight);
-      }
-    }
-  }
-  // Detect Mac HD cams and give them some love in the form of a dynamic default
-  // since that hardware switches between 4:3 at low res and 16:9 at higher res.
-  //
-  // Logic is: if we're relying on defaults in aPrefs, then
-  // only use HD pref when non-HD pref is too small and HD pref isn't too big.
-
-  bool macHD = ((!aPrefs.mWidth || !aPrefs.mHeight) &&
-                mDeviceName.EqualsASCII("FaceTime HD Camera (Built-in)") &&
-                (aPrefs.GetWidth() < cWidth.mMin ||
-                 aPrefs.GetHeight() < cHeight.mMin) &&
-                !(aPrefs.GetWidth(true) > cWidth.mMax ||
-                  aPrefs.GetHeight(true) > cHeight.mMax));
-  int prefWidth = aPrefs.GetWidth(macHD);
-  int prefHeight = aPrefs.GetHeight(macHD);
-
-  // Clamp width and height without distorting inherent aspect too much.
-
-  if (IsWithin(prefWidth, cWidth) == IsWithin(prefHeight, cHeight)) {
-    // If both are within, we get the default (pref) aspect.
-    // If neither are within, we get the aspect of the enclosing constraint.
-    // Either are presumably reasonable (presuming constraints are sane).
-    mCapability.width = Clamp(prefWidth, cWidth);
-    mCapability.height = Clamp(prefHeight, cHeight);
-  } else {
-    // But if only one clips (e.g. width), the resulting skew is undesirable:
-    //       .------------.
-    //       | constraint |
-    //  .----+------------+----.
-    //  |    |            |    |
-    //  |pref|  result    |    |   prefAspect != resultAspect
-    //  |    |            |    |
-    //  '----+------------+----'
-    //       '------------'
-    //  So in this case, preserve prefAspect instead:
-    //  .------------.
-    //  | constraint |
-    //  .------------.
-    //  |pref        |             prefAspect is unchanged
-    //  '------------'
-    //  |            |
-    //  '------------'
-    if (IsWithin(prefWidth, cWidth)) {
-      mCapability.height = Clamp(prefHeight, cHeight);
-      mCapability.width = Clamp((mCapability.height * prefWidth) /
-                                prefHeight, cWidth);
-    } else {
-      mCapability.width = Clamp(prefWidth, cWidth);
-      mCapability.height = Clamp((mCapability.width * prefHeight) /
-                                 prefWidth, cHeight);
-    }
-  }
-  mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS;
-  LOG(("chose cap %dx%d @%dfps",
-       mCapability.width, mCapability.height, mCapability.maxFPS));
-}
-
-void
-MediaEngineWebRTCVideoSource::GetName(nsAString& aName)
-{
-  aName = mDeviceName;
-}
-
-void
-MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID)
-{
-  aUUID = mUniqueId;
 }
 
 nsresult
 MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraints,
                                        const MediaEnginePrefs &aPrefs)
 {
   LOG((__FUNCTION__));
-#ifdef MOZ_B2G_CAMERA
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-  if (mState == kReleased && mInitDone) {
-    ChooseCapability(aConstraints, aPrefs);
-    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                         &MediaEngineWebRTCVideoSource::AllocImpl));
-    mCallbackMonitor.Wait();
-    if (mState != kAllocated) {
-      return NS_ERROR_FAILURE;
-    }
-  }
-#else
   if (mState == kReleased && mInitDone) {
     // Note: if shared, we don't allow a later opener to affect the resolution.
     // (This may change depending on spec changes for Constraints/settings)
 
     ChooseCapability(aConstraints, aPrefs);
 
     if (mViECapture->AllocateCaptureDevice(NS_ConvertUTF16toUTF8(mUniqueId).get(),
                                            kMaxUniqueIdLength, mCaptureIndex)) {
@@ -456,42 +298,29 @@ MediaEngineWebRTCVideoSource::Allocate(c
     }
     mState = kAllocated;
     LOG(("Video device %d allocated", mCaptureIndex));
   } else if (mSources.IsEmpty()) {
     LOG(("Video device %d reallocated", mCaptureIndex));
   } else {
     LOG(("Video device %d allocated shared", mCaptureIndex));
   }
-#endif
 
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCVideoSource::Deallocate()
 {
   LOG((__FUNCTION__));
   if (mSources.IsEmpty()) {
-#ifdef MOZ_B2G_CAMERA
-    ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
     if (mState != kStopped && mState != kAllocated) {
       return NS_ERROR_FAILURE;
     }
-#ifdef MOZ_B2G_CAMERA
-    // We do not register success callback here
-
-    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                         &MediaEngineWebRTCVideoSource::DeallocImpl));
-    mCallbackMonitor.Wait();
-    if (mState != kReleased) {
-      return NS_ERROR_FAILURE;
-    }
-#elif XP_MACOSX
+#ifdef XP_MACOSX
     // Bug 829907 - on mac, in shutdown, the mainthread stops processing
     // 'native' events, and the QTKit code uses events to the main native CFRunLoop
     // in order to provide thread safety.  In order to avoid this locking us up,
     // release the ViE capture device synchronously on MainThread (so the native
     // event isn't needed).
     // XXX Note if MainThread Dispatch()es NS_DISPATCH_SYNC to us we can deadlock.
     // XXX It might be nice to only do this if we're in shutdown...  Hard to be
     // sure when that is though.
@@ -513,130 +342,82 @@ MediaEngineWebRTCVideoSource::Deallocate
   }
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
 {
   LOG((__FUNCTION__));
-#ifndef MOZ_B2G_CAMERA
   int error = 0;
-#endif
   if (!mInitDone || !aStream) {
     return NS_ERROR_FAILURE;
   }
 
   mSources.AppendElement(aStream);
 
   aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
   aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
 
-#ifdef MOZ_B2G_CAMERA
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
-
   if (mState == kStarted) {
     return NS_OK;
   }
   mImageContainer = layers::LayerManager::CreateImageContainer();
 
-#ifdef MOZ_B2G_CAMERA
-  NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                       &MediaEngineWebRTCVideoSource::StartImpl,
-                                       mCapability));
-  mCallbackMonitor.Wait();
-  if (mState != kStarted) {
-    return NS_ERROR_FAILURE;
-  }
-#else
   mState = kStarted;
   error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
   if (error == -1) {
     return NS_ERROR_FAILURE;
   }
 
   error = mViERender->StartRender(mCaptureIndex);
   if (error == -1) {
     return NS_ERROR_FAILURE;
   }
 
   if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
     return NS_ERROR_FAILURE;
   }
-#endif
 
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
 {
   LOG((__FUNCTION__));
   if (!mSources.RemoveElement(aSource)) {
     // Already stopped - this is allowed
     return NS_OK;
   }
   if (!mSources.IsEmpty()) {
     return NS_OK;
   }
-#ifdef MOZ_B2G_CAMERA
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
   if (mState != kStarted) {
     return NS_ERROR_FAILURE;
   }
 
   {
     MonitorAutoLock lock(mMonitor);
     mState = kStopped;
     aSource->EndTrack(aID);
     // Drop any cached image so we don't start with a stale image on next
     // usage
     mImage = nullptr;
   }
-#ifdef MOZ_B2G_CAMERA
-  NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                       &MediaEngineWebRTCVideoSource::StopImpl));
-#else
   mViERender->StopRender(mCaptureIndex);
   mViERender->RemoveRenderer(mCaptureIndex);
   mViECapture->StopCapture(mCaptureIndex);
-#endif
 
   return NS_OK;
 }
 
 void
-MediaEngineWebRTCVideoSource::SetDirectListeners(bool aHasDirectListeners)
-{
-  LOG((__FUNCTION__));
-  mHasDirectListeners = aHasDirectListeners;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
-  return NS_ERROR_NOT_IMPLEMENTED;
-}
-
-/**
- * Initialization and Shutdown functions for the video source, called by the
- * constructor and destructor respectively.
- */
-
-void
 MediaEngineWebRTCVideoSource::Init()
 {
-#ifdef MOZ_B2G_CAMERA
-  nsAutoCString deviceName;
-  ICameraControl::GetCameraName(mCaptureIndex, deviceName);
-  CopyUTF8toUTF16(deviceName, mDeviceName);
-  CopyUTF8toUTF16(deviceName, mUniqueId);
-#else
   // fix compile warning for these being unused. (remove once used)
   (void) mFps;
   (void) mMinFps;
 
   LOG((__FUNCTION__));
   if (mVideoEngine == nullptr) {
     return;
   }
@@ -659,483 +440,58 @@ MediaEngineWebRTCVideoSource::Init()
   if (mViECapture->GetCaptureDevice(mCaptureIndex,
                                     deviceName, kMaxDeviceNameLength,
                                     uniqueId, kMaxUniqueIdLength)) {
     return;
   }
 
   CopyUTF8toUTF16(deviceName, mDeviceName);
   CopyUTF8toUTF16(uniqueId, mUniqueId);
-#endif
 
   mInitDone = true;
 }
 
 void
 MediaEngineWebRTCVideoSource::Shutdown()
 {
   LOG((__FUNCTION__));
   if (!mInitDone) {
     return;
   }
-#ifdef MOZ_B2G_CAMERA
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
   if (mState == kStarted) {
     while (!mSources.IsEmpty()) {
       Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
     }
     MOZ_ASSERT(mState == kStopped);
   }
 
   if (mState == kAllocated || mState == kStopped) {
     Deallocate();
   }
-#ifndef MOZ_B2G_CAMERA
   mViECapture->Release();
   mViERender->Release();
   mViEBase->Release();
-#endif
   mState = kReleased;
   mInitDone = false;
 }
 
 void MediaEngineWebRTCVideoSource::Refresh(int aIndex) {
   // NOTE: mCaptureIndex might have changed when allocated!
   // Use aIndex to update information, but don't change mCaptureIndex!!
-#ifdef MOZ_B2G_CAMERA
-  // Caller looked up this source by uniqueId; since deviceName == uniqueId nothing else changes
-#else
   // Caller looked up this source by uniqueId, so it shouldn't change
   char deviceName[kMaxDeviceNameLength];
   char uniqueId[kMaxUniqueIdLength];
 
   if (mViECapture->GetCaptureDevice(aIndex,
                                     deviceName, sizeof(deviceName),
                                     uniqueId, sizeof(uniqueId))) {
     return;
   }
 
   CopyUTF8toUTF16(deviceName, mDeviceName);
 #ifdef DEBUG
   nsString temp;
   CopyUTF8toUTF16(uniqueId, temp);
   MOZ_ASSERT(temp.Equals(mUniqueId));
 #endif
-#endif
-}
-
-#ifdef MOZ_B2G_CAMERA
-
-// All these functions must be run on MainThread!
-void
-MediaEngineWebRTCVideoSource::AllocImpl() {
-  MOZ_ASSERT(NS_IsMainThread());
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-
-  mCameraControl = ICameraControl::Create(mCaptureIndex);
-  if (mCameraControl) {
-    mState = kAllocated;
-    // Add this as a listener for CameraControl events. We don't need
-    // to explicitly remove this--destroying the CameraControl object
-    // in DeallocImpl() will do that for us.
-    mCameraControl->AddListener(this);
-  }
-
-  mCallbackMonitor.Notify();
-}
-
-void
-MediaEngineWebRTCVideoSource::DeallocImpl() {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  mCameraControl = nullptr;
-}
-
-// The same algorithm from bug 840244
-static int
-GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) {
-  int screenAngle = 0;
-  switch (aScreen) {
-    case eScreenOrientation_PortraitPrimary:
-      screenAngle = 0;
-      break;
-    case eScreenOrientation_PortraitSecondary:
-      screenAngle = 180;
-      break;
-   case eScreenOrientation_LandscapePrimary:
-      screenAngle = 90;
-      break;
-   case eScreenOrientation_LandscapeSecondary:
-      screenAngle = 270;
-      break;
-   default:
-      MOZ_ASSERT(false);
-      break;
-  }
-
-  int result;
-
-  if (aBackCamera) {
-    //back camera
-    result = (aCameraMountAngle - screenAngle + 360) % 360;
-  } else {
-    //front camera
-    result = (aCameraMountAngle + screenAngle) % 360;
-  }
-  return result;
-}
-
-// undefine to remove on-the-fly rotation support
-#define DYNAMIC_GUM_ROTATION
-
-void
-MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
-#ifdef DYNAMIC_GUM_ROTATION
-  if (mHasDirectListeners) {
-    // aka hooked to PeerConnection
-    MonitorAutoLock enter(mMonitor);
-    mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera);
-
-    LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)",
-         mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
-  }
-#endif
-
-  mOrientationChanged = true;
-}
-
-void
-MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  ICameraControl::Configuration config;
-  config.mMode = ICameraControl::kPictureMode;
-  config.mPreviewSize.width = aCapability.width;
-  config.mPreviewSize.height = aCapability.height;
-  mCameraControl->Start(&config);
-  mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize);
-
-  hal::RegisterScreenConfigurationObserver(this);
-}
-
-void
-MediaEngineWebRTCVideoSource::StopImpl() {
-  MOZ_ASSERT(NS_IsMainThread());
-
-  hal::UnregisterScreenConfigurationObserver(this);
-  mCameraControl->Stop();
-}
-
-void
-MediaEngineWebRTCVideoSource::SnapshotImpl() {
-  MOZ_ASSERT(NS_IsMainThread());
-  mCameraControl->TakePicture();
-}
-
-void
-MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState)
-{
-  ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-  if (aState == CameraControlListener::kHardwareClosed) {
-    // When the first CameraControl listener is added, it gets pushed
-    // the current state of the camera--normally 'closed'. We only
-    // pay attention to that state if we've progressed out of the
-    // allocated state.
-    if (mState != kAllocated) {
-      mState = kReleased;
-      mCallbackMonitor.Notify();
-    }
-  } else {
-    // Can't read this except on MainThread (ugh)
-    NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
-                                         &MediaEngineWebRTCVideoSource::GetRotation));
-    mState = kStarted;
-    mCallbackMonitor.Notify();
-  }
-}
-
-void
-MediaEngineWebRTCVideoSource::GetRotation()
-{
-  MOZ_ASSERT(NS_IsMainThread());
-  MonitorAutoLock enter(mMonitor);
-
-  mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle);
-  MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 ||
-             mCameraAngle == 270);
-  hal::ScreenConfiguration config;
-  hal::GetCurrentScreenConfiguration(&config);
-
-  nsCString deviceName;
-  ICameraControl::GetCameraName(mCaptureIndex, deviceName);
-  if (deviceName.EqualsASCII("back")) {
-    mBackCamera = true;
-  }
-
-  mRotation = GetRotateAmount(config.orientation(), mCameraAngle, mBackCamera);
-  LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)",
-       mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
-}
-
-void
-MediaEngineWebRTCVideoSource::OnUserError(UserContext aContext, nsresult aError)
-{
-  {
-    // Scope the monitor, since there is another monitor below and we don't want
-    // unexpected deadlock.
-    ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-    mCallbackMonitor.Notify();
-  }
-
-  // A main thread runnable to send error code to all queued PhotoCallbacks.
-  class TakePhotoError : public nsRunnable {
-  public:
-    TakePhotoError(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
-                   nsresult aRv)
-      : mRv(aRv)
-    {
-      mCallbacks.SwapElements(aCallbacks);
-    }
-
-    NS_IMETHOD Run()
-    {
-      uint32_t callbackNumbers = mCallbacks.Length();
-      for (uint8_t i = 0; i < callbackNumbers; i++) {
-        mCallbacks[i]->PhotoError(mRv);
-      }
-      // PhotoCallback needs to dereference on main thread.
-      mCallbacks.Clear();
-      return NS_OK;
-    }
-
-  protected:
-    nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
-    nsresult mRv;
-  };
-
-  if (aContext == UserContext::kInTakePicture) {
-    MonitorAutoLock lock(mMonitor);
-    if (mPhotoCallbacks.Length()) {
-      NS_DispatchToMainThread(new TakePhotoError(mPhotoCallbacks, aError));
-    }
-  }
 }
 
-void
-MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
-{
-  // It needs to start preview because Gonk camera will stop preview while
-  // taking picture.
-  mCameraControl->StartPreview();
-
-  // Create a main thread runnable to generate a blob and call all current queued
-  // PhotoCallbacks.
-  class GenerateBlobRunnable : public nsRunnable {
-  public:
-    GenerateBlobRunnable(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
-                         uint8_t* aData,
-                         uint32_t aLength,
-                         const nsAString& aMimeType)
-    {
-      mCallbacks.SwapElements(aCallbacks);
-      mPhoto.AppendElements(aData, aLength);
-      mMimeType = aMimeType;
-    }
-
-    NS_IMETHOD Run()
-    {
-      nsRefPtr<dom::File> blob =
-        dom::File::CreateMemoryFile(nullptr, mPhoto.Elements(), mPhoto.Length(), mMimeType);
-      uint32_t callbackCounts = mCallbacks.Length();
-      for (uint8_t i = 0; i < callbackCounts; i++) {
-        nsRefPtr<dom::File> tempBlob = blob;
-        mCallbacks[i]->PhotoComplete(tempBlob.forget());
-      }
-      // PhotoCallback needs to dereference on main thread.
-      mCallbacks.Clear();
-      return NS_OK;
-    }
-
-    nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
-    nsTArray<uint8_t> mPhoto;
-    nsString mMimeType;
-  };
-
-  // All elements in mPhotoCallbacks will be swapped in GenerateBlobRunnable
-  // constructor. This captured image will be sent to all the queued
-  // PhotoCallbacks in this runnable.
-  MonitorAutoLock lock(mMonitor);
-  if (mPhotoCallbacks.Length()) {
-    NS_DispatchToMainThread(
-      new GenerateBlobRunnable(mPhotoCallbacks, aData, aLength, aMimeType));
-  }
-}
-
-uint32_t
-MediaEngineWebRTCVideoSource::ConvertPixelFormatToFOURCC(int aFormat)
-{
-  switch (aFormat) {
-  case HAL_PIXEL_FORMAT_RGBA_8888:
-    return libyuv::FOURCC_BGRA;
-  case HAL_PIXEL_FORMAT_YCrCb_420_SP:
-    return libyuv::FOURCC_NV21;
-  case HAL_PIXEL_FORMAT_YV12:
-    return libyuv::FOURCC_YV12;
-  default: {
-    LOG((" xxxxx Unknown pixel format %d", aFormat));
-    MOZ_ASSERT(false, "Unknown pixel format.");
-    return libyuv::FOURCC_ANY;
-    }
-  }
-}
-
-void
-MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
-  layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage);
-  android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
-  void *pMem = nullptr;
-  uint32_t size = aWidth * aHeight * 3 / 2;
-
-  graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem);
-
-  uint8_t* srcPtr = static_cast<uint8_t*>(pMem);
-  // Create a video frame and append it to the track.
-  nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
-  layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
-
-  uint32_t dstWidth;
-  uint32_t dstHeight;
-
-  if (mRotation == 90 || mRotation == 270) {
-    dstWidth = aHeight;
-    dstHeight = aWidth;
-  } else {
-    dstWidth = aWidth;
-    dstHeight = aHeight;
-  }
-
-  uint32_t half_width = dstWidth / 2;
-  uint8_t* dstPtr = videoImage->AllocateAndGetNewBuffer(size);
-  libyuv::ConvertToI420(srcPtr, size,
-                        dstPtr, dstWidth,
-                        dstPtr + (dstWidth * dstHeight), half_width,
-                        dstPtr + (dstWidth * dstHeight * 5 / 4), half_width,
-                        0, 0,
-                        aWidth, aHeight,
-                        aWidth, aHeight,
-                        static_cast<libyuv::RotationMode>(mRotation),
-                        ConvertPixelFormatToFOURCC(graphicBuffer->getPixelFormat()));
-  graphicBuffer->unlock();
-
-  const uint8_t lumaBpp = 8;
-  const uint8_t chromaBpp = 4;
-
-  layers::PlanarYCbCrData data;
-  data.mYChannel = dstPtr;
-  data.mYSize = IntSize(dstWidth, dstHeight);
-  data.mYStride = dstWidth * lumaBpp / 8;
-  data.mCbCrStride = dstWidth * chromaBpp / 8;
-  data.mCbChannel = dstPtr + dstHeight * data.mYStride;
-  data.mCrChannel = data.mCbChannel +( dstHeight * data.mCbCrStride / 2);
-  data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2);
-  data.mPicX = 0;
-  data.mPicY = 0;
-  data.mPicSize = IntSize(dstWidth, dstHeight);
-  data.mStereoMode = StereoMode::MONO;
-
-  videoImage->SetDataNoCopy(data);
-
-  // implicitly releases last image
-  mImage = image.forget();
-}
-
-bool
-MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
-  {
-    ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-    if (mState == kStopped) {
-      return false;
-    }
-  }
-
-  MonitorAutoLock enter(mMonitor);
-  // Bug XXX we'd prefer to avoid converting if mRotation == 0, but that causes problems in UpdateImage()
-  RotateImage(aImage, aWidth, aHeight);
-  if (mRotation != 0 && mRotation != 180) {
-    uint32_t temp = aWidth;
-    aWidth = aHeight;
-    aHeight = temp;
-  }
-  if (mWidth != static_cast<int>(aWidth) || mHeight != static_cast<int>(aHeight)) {
-    mWidth = aWidth;
-    mHeight = aHeight;
-    LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight));
-  }
-
-  return true; // return true because we're accepting the frame
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::TakePhoto(PhotoCallback* aCallback)
-{
-  MOZ_ASSERT(NS_IsMainThread());
-
-  MonitorAutoLock lock(mMonitor);
-
-  // If other callback exists, that means there is a captured picture on the way,
-  // it doesn't need to TakePicture() again.
-  if (!mPhotoCallbacks.Length()) {
-    nsresult rv;
-    if (mOrientationChanged) {
-      UpdatePhotoOrientation();
-    }
-    rv = mCameraControl->TakePicture();
-    if (NS_FAILED(rv)) {
-      return rv;
-    }
-  }
-
-  mPhotoCallbacks.AppendElement(aCallback);
-
-  return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::UpdatePhotoOrientation()
-{
-  MOZ_ASSERT(NS_IsMainThread());
-
-  hal::ScreenConfiguration config;
-  hal::GetCurrentScreenConfiguration(&config);
-
-  // The rotation angle is clockwise.
-  int orientation = 0;
-  switch (config.orientation()) {
-    case eScreenOrientation_PortraitPrimary:
-      orientation = 0;
-      break;
-    case eScreenOrientation_PortraitSecondary:
-      orientation = 180;
-      break;
-   case eScreenOrientation_LandscapePrimary:
-      orientation = 270;
-      break;
-   case eScreenOrientation_LandscapeSecondary:
-      orientation = 90;
-      break;
-  }
-
-  // Front camera is inverse angle comparing to back camera.
-  orientation = (mBackCamera ? orientation : (-orientation));
-
-  ICameraControlParameterSetAutoEnter batch(mCameraControl);
-  // It changes the orientation value in EXIF information only.
-  mCameraControl->Set(CAMERA_PARAM_PICTURE_ROTATION, orientation);
-
-  mOrientationChanged = false;
-
-  return NS_OK;
-}
-
-#endif
-
-}
+} // namespace mozilla
--- a/content/media/webrtc/moz.build
+++ b/content/media/webrtc/moz.build
@@ -3,40 +3,48 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 XPIDL_MODULE = 'content_webrtc'
 
 EXPORTS += [
     'MediaEngine.h',
+    'MediaEngineCameraVideoSource.h',
     'MediaEngineDefault.h',
     'MediaTrackConstraints.h',
 ]
 
 if CONFIG['MOZ_WEBRTC']:
     EXPORTS += ['AudioOutputObserver.h',
                 'MediaEngineWebRTC.h']
     UNIFIED_SOURCES += [
+        'MediaEngineCameraVideoSource.cpp',
         'MediaEngineTabVideoSource.cpp',
         'MediaEngineWebRTCAudio.cpp',
         'MediaEngineWebRTCVideo.cpp',
     ]
     # MediaEngineWebRTC.cpp needs to be built separately.
     SOURCES += [
         'MediaEngineWebRTC.cpp',
     ]
     LOCAL_INCLUDES += [
         '/dom/base',
         '/dom/camera',
         '/media/libyuv/include',
         '/media/webrtc/signaling/src/common',
         '/media/webrtc/signaling/src/common/browser_logging',
         '/media/webrtc/trunk',
     ]
+    # Gonk camera source.
+    if CONFIG['MOZ_B2G_CAMERA']:
+        EXPORTS += ['MediaEngineGonkVideoSource.h']
+        UNIFIED_SOURCES += [
+            'MediaEngineGonkVideoSource.cpp',
+        ]
 
 XPIDL_SOURCES += [
     'nsITabSource.idl'
 ]
 
 UNIFIED_SOURCES += [
     'MediaEngineDefault.cpp',
     'PeerIdentity.cpp',
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -1036,18 +1036,17 @@ static SourceSet *
   result->MoveElementsFrom(candidateSet);
   result->MoveElementsFrom(tailSet);
   return result.forget();
 }
 
 /**
  * Runs on a seperate thread and is responsible for enumerating devices.
  * Depending on whether a picture or stream was asked for, either
- * ProcessGetUserMedia or ProcessGetUserMediaSnapshot is called, and the results
- * are sent back to the DOM.
+ * ProcessGetUserMedia is called, and the results are sent back to the DOM.
  *
  * Do not run this on the main thread. The success and error callbacks *MUST*
  * be dispatched on the main thread!
  */
 class GetUserMediaTask : public Task
 {
 public:
   GetUserMediaTask(
@@ -1119,28 +1118,16 @@ public:
     // Was a device provided?
     if (!mDeviceChosen) {
       nsresult rv = SelectDevice(backend);
       if (rv != NS_OK) {
         return;
       }
     }
 
-    // It is an error if audio or video are requested along with picture.
-    if (mConstraints.mPicture &&
-        (IsOn(mConstraints.mAudio) || IsOn(mConstraints.mVideo))) {
-      Fail(NS_LITERAL_STRING("NOT_SUPPORTED_ERR"));
-      return;
-    }
-
-    if (mConstraints.mPicture) {
-      ProcessGetUserMediaSnapshot(mVideoDevice->GetSource(), 0);
-      return;
-    }
-
     // There's a bug in the permission code that can leave us with mAudio but no audio device
     ProcessGetUserMedia(((IsOn(mConstraints.mAudio) && mAudioDevice) ?
                          mAudioDevice->GetSource() : nullptr),
                         ((IsOn(mConstraints.mVideo) && mVideoDevice) ?
                          mVideoDevice->GetSource() : nullptr));
   }
 
   nsresult
@@ -1199,17 +1186,17 @@ public:
     return NS_OK;
   }
 
   nsresult
   SelectDevice(MediaEngine* backend)
   {
     MOZ_ASSERT(mSuccess);
     MOZ_ASSERT(mError);
-    if (mConstraints.mPicture || IsOn(mConstraints.mVideo)) {
+    if (IsOn(mConstraints.mVideo)) {
       VideoTrackConstraintsN constraints(GetInvariant(mConstraints.mVideo));
       ScopedDeletePtr<SourceSet> sources(GetSources(backend, constraints,
                                &MediaEngine::EnumerateVideoDevices));
 
       if (!sources->Length()) {
         Fail(NS_LITERAL_STRING("NO_DEVICES_FOUND"));
         return NS_ERROR_FAILURE;
       }
@@ -1276,48 +1263,16 @@ public:
     ));
 
     MOZ_ASSERT(!mSuccess);
     MOZ_ASSERT(!mError);
 
     return;
   }
 
-  /**
-   * Allocates a video device, takes a snapshot and returns a File via
-   * a SuccessRunnable or an error via the ErrorRunnable. Off the main thread.
-   */
-  void
-  ProcessGetUserMediaSnapshot(MediaEngineVideoSource* aSource, int aDuration)
-  {
-    MOZ_ASSERT(mSuccess);
-    MOZ_ASSERT(mError);
-    nsresult rv = aSource->Allocate(GetInvariant(mConstraints.mVideo), mPrefs);
-    if (NS_FAILED(rv)) {
-      Fail(NS_LITERAL_STRING("HARDWARE_UNAVAILABLE"));
-      return;
-    }
-
-    /**
-     * Display picture capture UI here before calling Snapshot() - Bug 748835.
-     */
-    nsCOMPtr<nsIDOMFile> file;
-    aSource->Snapshot(aDuration, getter_AddRefs(file));
-    aSource->Deallocate();
-
-    NS_DispatchToMainThread(new SuccessCallbackRunnable(
-      mSuccess, mError, file, mWindowID
-    ));
-
-    MOZ_ASSERT(!mSuccess);
-    MOZ_ASSERT(!mError);
-
-    return;
-  }
-
 private:
   MediaStreamConstraints mConstraints;
 
   nsCOMPtr<nsIDOMGetUserMediaSuccessCallback> mSuccess;
   nsCOMPtr<nsIDOMGetUserMediaErrorCallback> mError;
   uint64_t mWindowID;
   nsRefPtr<GetUserMediaCallbackMediaStreamListener> mListener;
   nsRefPtr<AudioDevice> mAudioDevice;
@@ -1589,45 +1544,16 @@ MediaManager::GetUserMedia(
 
   bool privileged = nsContentUtils::IsChromeDoc(aWindow->GetExtantDoc());
 
   nsCOMPtr<nsIDOMGetUserMediaSuccessCallback> onSuccess(aOnSuccess);
   nsCOMPtr<nsIDOMGetUserMediaErrorCallback> onError(aOnError);
 
   MediaStreamConstraints c(aConstraints); // copy
 
-  /**
-   * If we were asked to get a picture, before getting a snapshot, we check if
-   * the calling page is allowed to open a popup. We do this because
-   * {picture:true} will open a new "window" to let the user preview or select
-   * an image, on Android. The desktop UI for {picture:true} is TBD, at which
-   * may point we can decide whether to extend this test there as well.
-   */
-#if !defined(MOZ_WEBRTC)
-  if (c.mPicture && !privileged) {
-    if (aWindow->GetPopupControlState() > openControlled) {
-      nsCOMPtr<nsIPopupWindowManager> pm =
-        do_GetService(NS_POPUPWINDOWMANAGER_CONTRACTID);
-      if (!pm) {
-        return NS_OK;
-      }
-      uint32_t permission;
-      nsCOMPtr<nsIDocument> doc = aWindow->GetExtantDoc();
-      if (doc) {
-        pm->TestPermission(doc->NodePrincipal(), &permission);
-        if (permission == nsIPopupWindowManager::DENY_POPUP) {
-          aWindow->FirePopupBlockedEvent(doc, nullptr, EmptyString(),
-                                         EmptyString());
-          return NS_OK;
-        }
-      }
-    }
-  }
-#endif
-
   static bool created = false;
   if (!created) {
     // Force MediaManager to startup before we try to access it from other threads
     // Hack: should init singleton earlier unless it's expensive (mem or CPU)
     (void) MediaManager::Get();
 #ifdef MOZ_B2G
     // Initialize MediaPermissionManager before send out any permission request.
     (void) MediaPermissionManager::GetInstance();
@@ -1746,25 +1672,16 @@ MediaManager::GetUserMedia(
   }
 
 #ifdef MOZ_B2G_CAMERA
   if (mCameraManager == nullptr) {
     mCameraManager = nsDOMCameraManager::CreateInstance(aWindow);
   }
 #endif
 
-#if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
-  if (c.mPicture) {
-    // ShowFilePickerForMimeType() must run on the Main Thread! (on Android)
-    // Note, GetUserMediaRunnableWrapper takes ownership of task.
-    NS_DispatchToMainThread(new GetUserMediaRunnableWrapper(task.forget()));
-    return NS_OK;
-  }
-#endif
-
   bool isLoop = false;
   nsCOMPtr<nsIURI> loopURI;
   nsresult rv = NS_NewURI(getter_AddRefs(loopURI), "about:loopconversation");
   NS_ENSURE_SUCCESS(rv, rv);
   rv = docURI->EqualsExceptRef(loopURI, &isLoop);
   NS_ENSURE_SUCCESS(rv, rv);
 
   if (isLoop) {