Backed out 3 changesets (bug 1421025, bug 1388219) for causing bug 1421706 on a CLOSED TREE.
authorRyan VanderMeulen <ryanvm@gmail.com>
Wed, 29 Nov 2017 17:00:27 -0500
changeset 394145 91fc3a79606bdc7fb43fef12eff7e65b5b84c00e
parent 394144 698d4d2ed8c11406c4a6d4f82e6af9c5c49debaf
child 394217 60db335c172647ce1196d1f62aa066a003d8ffbf
push id32993
push userryanvm@gmail.com
push dateWed, 29 Nov 2017 22:00:39 +0000
treeherdermozilla-central@91fc3a79606b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1421025, 1388219, 1421706
milestone59.0a1
backs out1a69438ec05f314572ff526291e0a1b0ea756953
213c2c200c08bee1cb84eb17a1b0ccb92dbb923c
341aaeb4ce69dbafbe10cc81d18e0aa1d49f3005
first release with
nightly linux32
91fc3a79606b / 59.0a1 / 20171129220149 / files
nightly linux64
91fc3a79606b / 59.0a1 / 20171129220149 / files
nightly mac
91fc3a79606b / 59.0a1 / 20171129220149 / files
nightly win32
91fc3a79606b / 59.0a1 / 20171129220149 / files
nightly win64
91fc3a79606b / 59.0a1 / 20171129220149 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 3 changesets (bug 1421025, bug 1388219) for causing bug 1421706 on a CLOSED TREE. Backed out changeset 1a69438ec05f (bug 1421025) Backed out changeset 213c2c200c08 (bug 1388219) Backed out changeset 341aaeb4ce69 (bug 1388219)
dom/media/systemservices/CamerasParent.cpp
dom/media/webrtc/MediaEngine.h
dom/media/webrtc/MediaEngineCameraVideoSource.cpp
dom/media/webrtc/MediaEngineCameraVideoSource.h
dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
dom/media/webrtc/MediaEngineRemoteVideoSource.h
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaTrackConstraints.cpp
dom/media/webrtc/MediaTrackConstraints.h
media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.cc
--- a/dom/media/systemservices/CamerasParent.cpp
+++ b/dom/media/systemservices/CamerasParent.cpp
@@ -47,17 +47,17 @@ ResolutionFeasibilityDistance(int32_t ca
 {
   // The purpose of this function is to find a smallest resolution
   // which is larger than all requested capabilities.
   // Then we can use down-scaling to fulfill each request.
   uint32_t distance;
   if (candidate >= requested) {
     distance = (candidate - requested) * 1000 / std::max(candidate, requested);
   } else {
-    distance = 10000 + (requested - candidate) *
+    distance = (UINT32_MAX / 2) + (requested - candidate) *
       1000 / std::max(candidate, requested);
   }
   return distance;
 }
 
 uint32_t
 FeasibilityDistance(int32_t candidate, int32_t requested)
 {
@@ -857,24 +857,24 @@ CamerasParent::RecvStartCapture(const Ca
           capability.width = ipcCaps.width();
           capability.height = ipcCaps.height();
           capability.maxFPS = ipcCaps.maxFPS();
           capability.expectedCaptureDelay = ipcCaps.expectedCaptureDelay();
           capability.rawType = static_cast<webrtc::RawVideoType>(ipcCaps.rawType());
           capability.codecType = static_cast<webrtc::VideoCodecType>(ipcCaps.codecType());
           capability.interlaced = ipcCaps.interlaced();
 
+          if (aCapEngine == CameraEngine) {
 #ifdef DEBUG
-          auto deviceUniqueID = sDeviceUniqueIDs.find(capnum);
-          MOZ_ASSERT(deviceUniqueID == sDeviceUniqueIDs.end());
+            auto deviceUniqueID = sDeviceUniqueIDs.find(capnum);
+            MOZ_ASSERT(deviceUniqueID == sDeviceUniqueIDs.end());
 #endif
-          sDeviceUniqueIDs.emplace(capnum, cap.VideoCapture()->CurrentDeviceName());
-          sAllRequestedCapabilities.emplace(capnum, capability);
+            sDeviceUniqueIDs.emplace(capnum, cap.VideoCapture()->CurrentDeviceName());
+            sAllRequestedCapabilities.emplace(capnum, capability);
 
-          if (aCapEngine == CameraEngine) {
             for (const auto &it : sDeviceUniqueIDs) {
               if (strcmp(it.second, cap.VideoCapture()->CurrentDeviceName()) == 0) {
                 capability.width = std::max(
                   capability.width, sAllRequestedCapabilities[it.first].width);
                 capability.height = std::max(
                   capability.height, sAllRequestedCapabilities[it.first].height);
                 capability.maxFPS = std::max(
                   capability.maxFPS, sAllRequestedCapabilities[it.first].maxFPS);
@@ -903,26 +903,16 @@ CamerasParent::RecvStartCapture(const Ca
                   candidateCapability.second.maxFPS, capability.maxFPS));
               if (distance < minDistance) {
                 minIdx = candidateCapability.first;;
                 minDistance = distance;
               }
             }
             MOZ_ASSERT(minIdx != -1);
             capability = candidateCapabilities->second[minIdx];
-          } else if (aCapEngine == ScreenEngine ||
-                     aCapEngine == BrowserEngine ||
-                     aCapEngine == WinEngine ||
-                     aCapEngine == AppEngine) {
-            for (const auto &it : sDeviceUniqueIDs) {
-              if (strcmp(it.second, cap.VideoCapture()->CurrentDeviceName()) == 0) {
-                capability.maxFPS = std::max(
-                  capability.maxFPS, sAllRequestedCapabilities[it.first].maxFPS);
-              }
-            }
           }
 
           error = cap.VideoCapture()->StartCapture(capability);
 
           if (!error) {
             cap.VideoCapture()->RegisterCaptureDataCallback(
               static_cast<rtc::VideoSinkInterface<webrtc::VideoFrame>*>(*cbh));
           }
@@ -954,24 +944,26 @@ CamerasParent::StopCapture(const Capture
 {
   if (auto engine = EnsureInitialized(aCapEngine)) {
     // we're removing elements, iterate backwards
     for (size_t i = mCallbacks.Length(); i > 0; i--) {
       if (mCallbacks[i - 1]->mCapEngine == aCapEngine &&
           mCallbacks[i - 1]->mStreamId == (uint32_t)capnum) {
 
         CallbackHelper* cbh = mCallbacks[i-1];
-        engine->WithEntry(capnum,[cbh, &capnum](VideoEngine::CaptureEntry& cap){
+        engine->WithEntry(capnum,[cbh, &capnum, &aCapEngine](VideoEngine::CaptureEntry& cap){
           if (cap.VideoCapture()) {
             cap.VideoCapture()->DeRegisterCaptureDataCallback(
               static_cast<rtc::VideoSinkInterface<webrtc::VideoFrame>*>(cbh));
             cap.VideoCapture()->StopCaptureIfAllClientsClose();
 
-            sDeviceUniqueIDs.erase(capnum);
-            sAllRequestedCapabilities.erase(capnum);
+            if (aCapEngine == CameraEngine) {
+              sDeviceUniqueIDs.erase(capnum);
+              sAllRequestedCapabilities.erase(capnum);
+            }
           }
         });
 
         delete mCallbacks[i - 1];
         mCallbacks.RemoveElementAt(i - 1);
         break;
       }
     }
--- a/dom/media/webrtc/MediaEngine.h
+++ b/dom/media/webrtc/MediaEngine.h
@@ -212,35 +212,30 @@ public:
   virtual bool GetScary() const { return false; };
 
   class AllocationHandle
   {
   public:
     NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AllocationHandle);
   protected:
     ~AllocationHandle() {}
-    static uint64_t sId;
   public:
     AllocationHandle(const dom::MediaTrackConstraints& aConstraints,
                      const mozilla::ipc::PrincipalInfo& aPrincipalInfo,
                      const MediaEnginePrefs& aPrefs,
                      const nsString& aDeviceId)
 
     : mConstraints(aConstraints),
       mPrincipalInfo(aPrincipalInfo),
       mPrefs(aPrefs),
-#ifdef MOZ_WEBRTC
-      mId(sId++),
-#endif
       mDeviceId(aDeviceId) {}
   public:
     NormalizedConstraints mConstraints;
     mozilla::ipc::PrincipalInfo mPrincipalInfo;
     MediaEnginePrefs mPrefs;
-    uint64_t mId;
     nsString mDeviceId;
   };
 
   /* Release the device back to the system. */
   virtual nsresult Deallocate(AllocationHandle* aHandle)
   {
     MOZ_ASSERT(aHandle);
     RefPtr<AllocationHandle> handle = aHandle;
@@ -366,17 +361,16 @@ protected:
    * aPrefs            - As passed in (in case of changes in about:config).
    * aDeviceId         - As passed in (origin dependent).
    * aOutBadConstraint - Result: nonzero if failed to apply. Name of culprit.
    */
 
   virtual nsresult
   UpdateSingleSource(const AllocationHandle* aHandle,
                      const NormalizedConstraints& aNetConstraints,
-                     const NormalizedConstraints& aNewConstraint,
                      const MediaEnginePrefs& aPrefs,
                      const nsString& aDeviceId,
                      const char** aOutBadConstraint) {
     return NS_ERROR_NOT_IMPLEMENTED;
   };
 
   /* ReevaluateAllocation - Call to change constraints for an allocation of
    * a single device. Manages allocation handles, calculates net constraints
@@ -395,42 +389,36 @@ protected:
                        NormalizedConstraints* aConstraintsUpdate,
                        const MediaEnginePrefs& aPrefs,
                        const nsString& aDeviceId,
                        const char** aOutBadConstraint)
   {
     // aHandle and/or aConstraintsUpdate may be nullptr (see below)
 
     AutoTArray<const NormalizedConstraints*, 10> allConstraints;
-    AutoTArray<const NormalizedConstraints*, 1> updatedConstraint;
     for (auto& registered : mRegisteredHandles) {
       if (aConstraintsUpdate && registered.get() == aHandle) {
         continue; // Don't count old constraints
       }
       allConstraints.AppendElement(&registered->mConstraints);
     }
     if (aConstraintsUpdate) {
       allConstraints.AppendElement(aConstraintsUpdate);
-      updatedConstraint.AppendElement(aConstraintsUpdate);
     } else if (aHandle) {
       // In the case of AddShareOfSingleSource, the handle isn't registered yet.
       allConstraints.AppendElement(&aHandle->mConstraints);
-      updatedConstraint.AppendElement(&aHandle->mConstraints);
-    } else {
-      updatedConstraint.AppendElements(allConstraints);
     }
 
     NormalizedConstraints netConstraints(allConstraints);
     if (netConstraints.mBadConstraint) {
       *aOutBadConstraint = netConstraints.mBadConstraint;
       return NS_ERROR_FAILURE;
     }
 
-    NormalizedConstraints newConstraint(updatedConstraint);
-    nsresult rv = UpdateSingleSource(aHandle, netConstraints, newConstraint, aPrefs, aDeviceId,
+    nsresult rv = UpdateSingleSource(aHandle, netConstraints, aPrefs, aDeviceId,
                                      aOutBadConstraint);
     if (NS_FAILED(rv)) {
       return rv;
     }
     if (aHandle && aConstraintsUpdate) {
       aHandle->mConstraints = *aConstraintsUpdate;
     }
     return NS_OK;
--- a/dom/media/webrtc/MediaEngineCameraVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineCameraVideoSource.cpp
@@ -20,30 +20,20 @@ extern LogModule* GetMediaManagerLog();
 // guts for appending data to the MSG track
 bool MediaEngineCameraVideoSource::AppendToTrack(SourceMediaStream* aSource,
                                                  layers::Image* aImage,
                                                  TrackID aID,
                                                  StreamTime delta,
                                                  const PrincipalHandle& aPrincipalHandle)
 {
   MOZ_ASSERT(aSource);
-  MOZ_ASSERT(aImage);
-
-  if (!aImage) {
-    return 0;
-  }
 
   VideoSegment segment;
   RefPtr<layers::Image> image = aImage;
-  IntSize size = image->GetSize();
-
-  if (!size.width || !size.height) {
-    return 0;
-  }
-
+  IntSize size(image ? mWidth : 0, image ? mHeight : 0);
   segment.AppendFrame(image.forget(), delta, size, aPrincipalHandle);
 
   // This is safe from any thread, and is safe if the track is Finished
   // or Destroyed.
   // This can fail if either a) we haven't added the track yet, or b)
   // we've removed or finished the track.
   return aSource->AppendToTrack(aID, &(segment));
 }
@@ -60,29 +50,16 @@ void
 MediaEngineCameraVideoSource::GetCapability(size_t aIndex,
                                             webrtc::CaptureCapability& aOut) const
 {
   MOZ_ASSERT(aIndex < mHardcodedCapabilities.Length());
   aOut = mHardcodedCapabilities.SafeElementAt(aIndex, webrtc::CaptureCapability());
 }
 
 uint32_t
-MediaEngineCameraVideoSource::GetDistance(
-    const webrtc::CaptureCapability& aCandidate,
-    const NormalizedConstraintSet &aConstraints,
-    const nsString& aDeviceId,
-    const DistanceCalculation aCalculate) const
-{
-  if (aCalculate == kFeasibility) {
-    return GetFeasibilityDistance(aCandidate, aConstraints, aDeviceId);
-  }
-  return GetFitnessDistance(aCandidate, aConstraints, aDeviceId);
-}
-
-uint32_t
 MediaEngineCameraVideoSource::GetFitnessDistance(
     const webrtc::CaptureCapability& aCandidate,
     const NormalizedConstraintSet &aConstraints,
     const nsString& aDeviceId) const
 {
   // Treat width|height|frameRate == 0 on capability as "can do any".
   // This allows for orthogonal capabilities that are not in discrete steps.
 
@@ -93,37 +70,16 @@ MediaEngineCameraVideoSource::GetFitness
                                                aConstraints.mWidth) : 0) +
     uint64_t(aCandidate.height? FitnessDistance(int32_t(aCandidate.height),
                                                 aConstraints.mHeight) : 0) +
     uint64_t(aCandidate.maxFPS? FitnessDistance(double(aCandidate.maxFPS),
                                                 aConstraints.mFrameRate) : 0);
   return uint32_t(std::min(distance, uint64_t(UINT32_MAX)));
 }
 
-uint32_t
-MediaEngineCameraVideoSource::GetFeasibilityDistance(
-    const webrtc::CaptureCapability& aCandidate,
-    const NormalizedConstraintSet &aConstraints,
-    const nsString& aDeviceId) const
-{
-  // Treat width|height|frameRate == 0 on capability as "can do any".
-  // This allows for orthogonal capabilities that are not in discrete steps.
-
-  uint64_t distance =
-    uint64_t(FitnessDistance(aDeviceId, aConstraints.mDeviceId)) +
-    uint64_t(FitnessDistance(mFacingMode, aConstraints.mFacingMode)) +
-    uint64_t(aCandidate.width? FeasibilityDistance(int32_t(aCandidate.width),
-                                               aConstraints.mWidth) : 0) +
-    uint64_t(aCandidate.height? FeasibilityDistance(int32_t(aCandidate.height),
-                                                aConstraints.mHeight) : 0) +
-    uint64_t(aCandidate.maxFPS? FeasibilityDistance(double(aCandidate.maxFPS),
-                                                aConstraints.mFrameRate) : 0);
-  return uint32_t(std::min(distance, uint64_t(UINT32_MAX)));
-}
-
 // Find best capability by removing inferiors. May leave >1 of equal distance
 
 /* static */ void
 MediaEngineCameraVideoSource::TrimLessFitCandidates(CapabilitySet& set) {
   uint32_t best = UINT32_MAX;
   for (auto& candidate : set) {
     if (best > candidate.mDistance) {
       best = candidate.mDistance;
@@ -257,19 +213,17 @@ MediaEngineCameraVideoSource::LogCapabil
                       uint32_t(sizeof(codec) / sizeof(*codec) - 1))],
        aDistance));
 }
 
 bool
 MediaEngineCameraVideoSource::ChooseCapability(
     const NormalizedConstraints &aConstraints,
     const MediaEnginePrefs &aPrefs,
-    const nsString& aDeviceId,
-    webrtc::CaptureCapability& aCapability,
-    const DistanceCalculation aCalculate)
+    const nsString& aDeviceId)
 {
   if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
     LOG(("ChooseCapability: prefs: %dx%d @%dfps",
          aPrefs.GetWidth(), aPrefs.GetHeight(),
          aPrefs.mFPS));
     LogConstraints(aConstraints);
     if (!aConstraints.mAdvanced.empty()) {
       LOG(("Advanced array[%zu]:", aConstraints.mAdvanced.size()));
@@ -287,17 +241,17 @@ MediaEngineCameraVideoSource::ChooseCapa
   }
 
   // First, filter capabilities by required constraints (min, max, exact).
 
   for (size_t i = 0; i < candidateSet.Length();) {
     auto& candidate = candidateSet[i];
     webrtc::CaptureCapability cap;
     GetCapability(candidate.mIndex, cap);
-    candidate.mDistance = GetDistance(cap, aConstraints, aDeviceId, aCalculate);
+    candidate.mDistance = GetFitnessDistance(cap, aConstraints, aDeviceId);
     LogCapability("Capability", cap, candidate.mDistance);
     if (candidate.mDistance == UINT32_MAX) {
       candidateSet.RemoveElementAt(i);
     } else {
       ++i;
     }
   }
 
@@ -309,17 +263,17 @@ MediaEngineCameraVideoSource::ChooseCapa
   // Filter further with all advanced constraints (that don't overconstrain).
 
   for (const auto &cs : aConstraints.mAdvanced) {
     CapabilitySet rejects;
     for (size_t i = 0; i < candidateSet.Length();) {
       auto& candidate = candidateSet[i];
       webrtc::CaptureCapability cap;
       GetCapability(candidate.mIndex, cap);
-      if (GetDistance(cap, cs, aDeviceId, aCalculate) == UINT32_MAX) {
+      if (GetFitnessDistance(cap, cs, aDeviceId) == UINT32_MAX) {
         rejects.AppendElement(candidate);
         candidateSet.RemoveElementAt(i);
       } else {
         ++i;
       }
     }
     if (!candidateSet.Length()) {
       candidateSet.AppendElements(Move(rejects));
@@ -340,39 +294,39 @@ MediaEngineCameraVideoSource::ChooseCapa
     prefs.mWidth.SetAsLong() = aPrefs.GetWidth();
     prefs.mHeight.SetAsLong() = aPrefs.GetHeight();
     prefs.mFrameRate.SetAsDouble() = aPrefs.mFPS;
     NormalizedConstraintSet normPrefs(prefs, false);
 
     for (auto& candidate : candidateSet) {
       webrtc::CaptureCapability cap;
       GetCapability(candidate.mIndex, cap);
-      candidate.mDistance = GetDistance(cap, normPrefs, aDeviceId, aCalculate);
+      candidate.mDistance = GetFitnessDistance(cap, normPrefs, aDeviceId);
     }
     TrimLessFitCandidates(candidateSet);
   }
 
   // Any remaining multiples all have the same distance, but may vary on
   // format. Some formats are more desirable for certain use like WebRTC.
   // E.g. I420 over RGB24 can remove a needless format conversion.
 
   bool found = false;
   for (auto& candidate : candidateSet) {
     webrtc::CaptureCapability cap;
     GetCapability(candidate.mIndex, cap);
     if (cap.rawType == webrtc::RawVideoType::kVideoI420 ||
         cap.rawType == webrtc::RawVideoType::kVideoYUY2 ||
         cap.rawType == webrtc::RawVideoType::kVideoYV12) {
-      aCapability = cap;
+      mCapability = cap;
       found = true;
       break;
     }
   }
   if (!found) {
-    GetCapability(candidateSet[0].mIndex, aCapability);
+    GetCapability(candidateSet[0].mIndex, mCapability);
   }
 
   LogCapability("Chosen capability", mCapability, sameDistance);
   return true;
 }
 
 void
 MediaEngineCameraVideoSource::SetName(nsString aName)
--- a/dom/media/webrtc/MediaEngineCameraVideoSource.h
+++ b/dom/media/webrtc/MediaEngineCameraVideoSource.h
@@ -19,29 +19,16 @@
 #include "webrtc/modules/video_capture/video_capture_defines.h"
 
 namespace webrtc {
   using CaptureCapability = VideoCaptureCapability;
 }
 
 namespace mozilla {
 
-// Fitness distance is defined in
-// https://www.w3.org/TR/2017/CR-mediacapture-streams-20171003/#dfn-selectsettings
-// The main difference of feasibility and fitness distance is that if the
-// constraint is required ('max', or 'exact'), and the settings dictionary's value
-// for the constraint does not satisfy the constraint, the fitness distance is
-// positive infinity. Given a continuous space of settings dictionaries comprising
-// all discrete combinations of dimension and frame-rate related properties,
-// the feasibility distance is still in keeping with the constraints algorithm.
-enum DistanceCalculation {
-  kFitness,
-  kFeasibility
-};
-
 class MediaEngineCameraVideoSource : public MediaEngineVideoSource
 {
 public:
   // Some subclasses use an index to track multiple instances.
   explicit MediaEngineCameraVideoSource(int aIndex,
                                         const char* aMonitorName = "Camera.Monitor")
     : MediaEngineVideoSource(kReleased)
     , mMonitor(aMonitorName)
@@ -94,40 +81,29 @@ protected:
   ~MediaEngineCameraVideoSource() {}
 
   // guts for appending data to the MSG track
   virtual bool AppendToTrack(SourceMediaStream* aSource,
                              layers::Image* aImage,
                              TrackID aID,
                              StreamTime delta,
                              const PrincipalHandle& aPrincipalHandle);
-  uint32_t GetDistance(const webrtc::CaptureCapability& aCandidate,
-                       const NormalizedConstraintSet &aConstraints,
-                       const nsString& aDeviceId,
-                       const DistanceCalculation aCalculate) const;
   uint32_t GetFitnessDistance(const webrtc::CaptureCapability& aCandidate,
                               const NormalizedConstraintSet &aConstraints,
                               const nsString& aDeviceId) const;
-  uint32_t GetFeasibilityDistance(const webrtc::CaptureCapability& aCandidate,
-                              const NormalizedConstraintSet &aConstraints,
-                              const nsString& aDeviceId) const;
   static void TrimLessFitCandidates(CapabilitySet& set);
   static void LogConstraints(const NormalizedConstraintSet& aConstraints);
   static void LogCapability(const char* aHeader,
                             const webrtc::CaptureCapability &aCapability,
                             uint32_t aDistance);
   virtual size_t NumCapabilities() const;
   virtual void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) const;
-  virtual bool ChooseCapability(
-    const NormalizedConstraints &aConstraints,
-    const MediaEnginePrefs &aPrefs,
-    const nsString& aDeviceId,
-    webrtc::CaptureCapability& aCapability,
-    const DistanceCalculation aCalculate
-  );
+  virtual bool ChooseCapability(const NormalizedConstraints &aConstraints,
+                                const MediaEnginePrefs &aPrefs,
+                                const nsString& aDeviceId);
   void SetName(nsString aName);
   void SetUUID(const char* aUUID);
   const nsCString& GetUUID() const; // protected access
 
   // Engine variables.
 
   // mMonitor protects mImage access/changes, and transitions of mState
   // from kStarted to kStopped (which are combined with EndTrack() and
@@ -135,30 +111,25 @@ protected:
   // mMonitor also protects mSources[] and mPrincipalHandles[] access/changes.
   // mSources[] and mPrincipalHandles[] are accessed from webrtc threads.
 
   // All the mMonitor accesses are from the child classes.
   Monitor mMonitor; // Monitor for processing Camera frames.
   nsTArray<RefPtr<SourceMediaStream>> mSources; // When this goes empty, we shut down HW
   nsTArray<PrincipalHandle> mPrincipalHandles; // Directly mapped to mSources.
   RefPtr<layers::Image> mImage;
-  nsTArray<RefPtr<layers::Image>> mImages;
-  nsTArray<webrtc::CaptureCapability> mTargetCapabilities;
-  nsTArray<uint64_t> mHandleIds;
   RefPtr<layers::ImageContainer> mImageContainer;
   // end of data protected by mMonitor
 
   int mWidth, mHeight;
   bool mInitDone;
   int mCaptureIndex;
   TrackID mTrackID;
 
   webrtc::CaptureCapability mCapability;
-  webrtc::CaptureCapability mTargetCapability;
-  uint64_t mHandleId;
 
   mutable nsTArray<webrtc::CaptureCapability> mHardcodedCapabilities;
 private:
   nsString mDeviceName;
   nsCString mUniqueId;
   nsString mFacingMode;
 };
 
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
@@ -5,28 +5,23 @@
 
 #include "MediaEngineRemoteVideoSource.h"
 
 #include "mozilla/RefPtr.h"
 #include "VideoUtils.h"
 #include "nsIPrefService.h"
 #include "MediaTrackConstraints.h"
 #include "CamerasChild.h"
-#include "VideoFrameUtils.h"
-#include "webrtc/api/video/i420_buffer.h"
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
 
 extern mozilla::LogModule* GetMediaManagerLog();
 #define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
 #define LOGFRAME(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
 
 namespace mozilla {
 
-uint64_t MediaEngineCameraVideoSource::AllocationHandle::sId = 0;
-
 // These need a definition somewhere because template
 // code is allowed to take their address, and they aren't
 // guaranteed to have one without this.
 const unsigned int MediaEngineSource::kMaxDeviceNameLength;
 const unsigned int MediaEngineSource::kMaxUniqueIdLength;;
 
 using dom::ConstrainLongRange;
 
@@ -80,19 +75,16 @@ MediaEngineRemoteVideoSource::Shutdown()
     bool empty;
 
     while (1) {
       {
         MonitorAutoLock lock(mMonitor);
         empty = mSources.IsEmpty();
         if (empty) {
           MOZ_ASSERT(mPrincipalHandles.IsEmpty());
-          MOZ_ASSERT(mTargetCapabilities.IsEmpty());
-          MOZ_ASSERT(mHandleIds.IsEmpty());
-          MOZ_ASSERT(mImages.IsEmpty());
           break;
         }
         source = mSources[0];
       }
       Stop(source, kVideoTrack); // XXX change to support multiple tracks
     }
     MOZ_ASSERT(mState == kStopped);
   }
@@ -129,19 +121,16 @@ MediaEngineRemoteVideoSource::Allocate(
   if (NS_FAILED(rv)) {
     return rv;
   }
   if (mState == kStarted &&
       MOZ_LOG_TEST(GetMediaManagerLog(), mozilla::LogLevel::Debug)) {
     MonitorAutoLock lock(mMonitor);
     if (mSources.IsEmpty()) {
       MOZ_ASSERT(mPrincipalHandles.IsEmpty());
-      MOZ_ASSERT(mTargetCapabilities.IsEmpty());
-      MOZ_ASSERT(mHandleIds.IsEmpty());
-      MOZ_ASSERT(mImages.IsEmpty());
       LOG(("Video device %d reallocated", mCaptureIndex));
     } else {
       LOG(("Video device %d allocated shared", mCaptureIndex));
     }
   }
   return NS_OK;
 }
 
@@ -174,38 +163,30 @@ MediaEngineRemoteVideoSource::Start(Sour
 {
   LOG((__PRETTY_FUNCTION__));
   AssertIsOnOwningThread();
   if (!mInitDone || !aStream) {
     LOG(("No stream or init not done"));
     return NS_ERROR_FAILURE;
   }
 
-  mImageContainer =
-    layers::LayerManager::CreateImageContainer(layers::ImageContainer::ASYNCHRONOUS);
-
   {
     MonitorAutoLock lock(mMonitor);
     mSources.AppendElement(aStream);
     mPrincipalHandles.AppendElement(aPrincipalHandle);
-    mTargetCapabilities.AppendElement(mTargetCapability);
-    mHandleIds.AppendElement(mHandleId);
-    mImages.AppendElement(mImageContainer->CreatePlanarYCbCrImage());
-
     MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
-    MOZ_ASSERT(mSources.Length() == mTargetCapabilities.Length());
-    MOZ_ASSERT(mSources.Length() == mHandleIds.Length());
-    MOZ_ASSERT(mSources.Length() == mImages.Length());
   }
 
   aStream->AddTrack(aID, 0, new VideoSegment(), SourceMediaStream::ADDTRACK_QUEUED);
 
   if (mState == kStarted) {
     return NS_OK;
   }
+  mImageContainer =
+    layers::LayerManager::CreateImageContainer(layers::ImageContainer::ASYNCHRONOUS);
 
   mState = kStarted;
   mTrackID = aID;
 
   if (mozilla::camera::GetChildAndCall(
     &mozilla::camera::CamerasChild::StartCapture,
     mCapEngine, mCaptureIndex, mCapability, this)) {
     LOG(("StartCapture failed"));
@@ -232,24 +213,18 @@ MediaEngineRemoteVideoSource::Stop(mozil
 
     size_t i = mSources.IndexOf(aSource);
     if (i == mSources.NoIndex) {
       // Already stopped - this is allowed
       return NS_OK;
     }
 
     MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
-    MOZ_ASSERT(mSources.Length() == mTargetCapabilities.Length());
-    MOZ_ASSERT(mSources.Length() == mHandleIds.Length());
-    MOZ_ASSERT(mSources.Length() == mImages.Length());
     mSources.RemoveElementAt(i);
     mPrincipalHandles.RemoveElementAt(i);
-    mTargetCapabilities.RemoveElementAt(i);
-    mHandleIds.RemoveElementAt(i);
-    mImages.RemoveElementAt(i);
 
     aSource->EndTrack(aID);
 
     if (!mSources.IsEmpty()) {
       return NS_OK;
     }
     if (mState != kStarted) {
       return NS_ERROR_FAILURE;
@@ -282,84 +257,52 @@ MediaEngineRemoteVideoSource::Restart(Al
   return ReevaluateAllocation(aHandle, &constraints, aPrefs, aDeviceId,
                               aOutBadConstraint);
 }
 
 nsresult
 MediaEngineRemoteVideoSource::UpdateSingleSource(
     const AllocationHandle* aHandle,
     const NormalizedConstraints& aNetConstraints,
-    const NormalizedConstraints& aNewConstraint,
     const MediaEnginePrefs& aPrefs,
     const nsString& aDeviceId,
     const char** aOutBadConstraint)
 {
+  if (!ChooseCapability(aNetConstraints, aPrefs, aDeviceId)) {
+    *aOutBadConstraint = FindBadConstraint(aNetConstraints, *this, aDeviceId);
+    return NS_ERROR_FAILURE;
+  }
+
   switch (mState) {
     case kReleased:
       MOZ_ASSERT(aHandle);
-      mHandleId = aHandle->mId;
-      if (!ChooseCapability(aNetConstraints, aPrefs, aDeviceId, mCapability, kFitness)) {
-        *aOutBadConstraint = FindBadConstraint(aNetConstraints, *this, aDeviceId);
-        return NS_ERROR_FAILURE;
-      }
-      mTargetCapability = mCapability;
-
       if (camera::GetChildAndCall(&camera::CamerasChild::AllocateCaptureDevice,
                                   mCapEngine, GetUUID().get(),
                                   kMaxUniqueIdLength, mCaptureIndex,
                                   aHandle->mPrincipalInfo)) {
         return NS_ERROR_FAILURE;
       }
       mState = kAllocated;
       SetLastCapability(mCapability);
       LOG(("Video device %d allocated", mCaptureIndex));
       break;
 
     case kStarted:
-      {
-        size_t index = mHandleIds.NoIndex;
-        if (aHandle) {
-          mHandleId = aHandle->mId;
-          index = mHandleIds.IndexOf(mHandleId);
-        }
-
-        if (!ChooseCapability(aNewConstraint, aPrefs, aDeviceId, mTargetCapability,
-                              kFitness)) {
-          *aOutBadConstraint = FindBadConstraint(aNewConstraint, *this, aDeviceId);
+      if (mCapability != mLastCapability) {
+        camera::GetChildAndCall(&camera::CamerasChild::StopCapture,
+                                mCapEngine, mCaptureIndex);
+        if (camera::GetChildAndCall(&camera::CamerasChild::StartCapture,
+                                    mCapEngine, mCaptureIndex, mCapability,
+                                    this)) {
+          LOG(("StartCapture failed"));
           return NS_ERROR_FAILURE;
         }
-
-        if (index != mHandleIds.NoIndex) {
-          MonitorAutoLock lock(mMonitor);
-          mTargetCapabilities[index] = mTargetCapability;
-          MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
-          MOZ_ASSERT(mSources.Length() == mTargetCapabilities.Length());
-          MOZ_ASSERT(mSources.Length() == mHandleIds.Length());
-          MOZ_ASSERT(mSources.Length() == mImages.Length());
-        }
-
-        if (!ChooseCapability(aNetConstraints, aPrefs, aDeviceId, mCapability,
-                              kFeasibility)) {
-          *aOutBadConstraint = FindBadConstraint(aNetConstraints, *this, aDeviceId);
-          return NS_ERROR_FAILURE;
-        }
-
-        if (mCapability != mLastCapability) {
-          camera::GetChildAndCall(&camera::CamerasChild::StopCapture,
-                                  mCapEngine, mCaptureIndex);
-          if (camera::GetChildAndCall(&camera::CamerasChild::StartCapture,
-                                      mCapEngine, mCaptureIndex, mCapability,
-                                      this)) {
-            LOG(("StartCapture failed"));
-            return NS_ERROR_FAILURE;
-          }
-          SetLastCapability(mCapability);
-        }
-        break;
+        SetLastCapability(mCapability);
       }
+      break;
 
     default:
       LOG(("Video device %d in ignored state %d", mCaptureIndex, mState));
       break;
   }
   return NS_OK;
 }
 
@@ -395,32 +338,28 @@ MediaEngineRemoteVideoSource::SetLastCap
 }
 
 void
 MediaEngineRemoteVideoSource::NotifyPull(MediaStreamGraph* aGraph,
                                          SourceMediaStream* aSource,
                                          TrackID aID, StreamTime aDesiredTime,
                                          const PrincipalHandle& aPrincipalHandle)
 {
-  StreamTime delta = 0;
-  size_t i;
+  VideoSegment segment;
+
   MonitorAutoLock lock(mMonitor);
   if (mState != kStarted) {
     return;
   }
 
-  i = mSources.IndexOf(aSource);
-  if (i == mSources.NoIndex) {
-    return;
-  }
-
-  delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
+  StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
 
   if (delta > 0) {
-    AppendToTrack(aSource, mImages[i], aID, delta, aPrincipalHandle);
+    // nullptr images are allowed
+    AppendToTrack(aSource, mImage, aID, delta, aPrincipalHandle);
   }
 }
 
 void
 MediaEngineRemoteVideoSource::FrameSizeChange(unsigned int w, unsigned int h)
 {
   if ((mWidth < 0) || (mHeight < 0) ||
       (w !=  (unsigned int) mWidth) || (h != (unsigned int) mHeight)) {
@@ -433,137 +372,73 @@ MediaEngineRemoteVideoSource::FrameSizeC
       settings->mWidth.Value() = w;
       settings->mHeight.Value() = h;
       return NS_OK;
     }));
   }
 }
 
 int
-MediaEngineRemoteVideoSource::DeliverFrame(uint8_t* aBuffer,
+MediaEngineRemoteVideoSource::DeliverFrame(uint8_t* aBuffer ,
                                     const camera::VideoFrameProperties& aProps)
 {
-  MonitorAutoLock lock(mMonitor);
   // Check for proper state.
-  if (mState != kStarted || !mImageContainer) {
+  if (mState != kStarted) {
     LOG(("DeliverFrame: video not started"));
     return 0;
   }
 
   // Update the dimensions
   FrameSizeChange(aProps.width(), aProps.height());
 
-  MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
-  MOZ_ASSERT(mSources.Length() == mTargetCapabilities.Length());
-  MOZ_ASSERT(mSources.Length() == mHandleIds.Length());
-  MOZ_ASSERT(mSources.Length() == mImages.Length());
-
-  for (uint32_t i = 0; i < mTargetCapabilities.Length(); i++ ) {
-    int32_t req_max_width = mTargetCapabilities[i].width & 0xffff;
-    int32_t req_max_height = mTargetCapabilities[i].height & 0xffff;
-    int32_t req_ideal_width = (mTargetCapabilities[i].width >> 16) & 0xffff;
-    int32_t req_ideal_height = (mTargetCapabilities[i].height >> 16) & 0xffff;
-
-    int32_t dest_max_width = std::min(req_max_width, mWidth);
-    int32_t dest_max_height = std::min(req_max_height, mHeight);
-    // This logic works for both camera and screen sharing case.
-    // for camera case, req_ideal_width and req_ideal_height is 0.
-    // The following snippet will set dst_width to dest_max_width and dst_height to dest_max_height
-    int32_t dst_width = std::min(req_ideal_width > 0 ? req_ideal_width : mWidth, dest_max_width);
-    int32_t dst_height = std::min(req_ideal_height > 0 ? req_ideal_height : mHeight, dest_max_height);
-
-    int dst_stride_y = dst_width;
-    int dst_stride_uv = (dst_width + 1) / 2;
-
-    camera::VideoFrameProperties properties;
-    uint8_t* frame;
-    bool needReScale = !((dst_width == mWidth && dst_height == mHeight) ||
-                         (dst_width > mWidth || dst_height > mHeight));
+  layers::PlanarYCbCrData data;
+  RefPtr<layers::PlanarYCbCrImage> image;
+  {
+    // We grab the lock twice, but don't hold it across the (long) CopyData
+    MonitorAutoLock lock(mMonitor);
+    if (!mImageContainer) {
+      LOG(("DeliverFrame() called after Stop()!"));
+      return 0;
+    }
+    // Create a video frame and append it to the track.
+    image = mImageContainer->CreatePlanarYCbCrImage();
 
-    if (!needReScale) {
-      dst_width = mWidth;
-      dst_height = mHeight;
-      frame = aBuffer;
-    } else {
-      rtc::scoped_refptr<webrtc::I420Buffer> i420Buffer;
-      i420Buffer = webrtc::I420Buffer::Create(mWidth, mHeight, mWidth,
-                                              (mWidth + 1) / 2, (mWidth + 1) / 2);
-
-      const int conversionResult = webrtc::ConvertToI420(webrtc::kI420,
-                                                         aBuffer,
-                                                         0, 0,  // No cropping
-                                                         mWidth, mHeight,
-                                                         mWidth * mHeight * 3 / 2,
-                                                         webrtc::kVideoRotation_0,
-                                                         i420Buffer.get());
-
-      webrtc::VideoFrame captureFrame(i420Buffer, 0, 0, webrtc::kVideoRotation_0);
-      if (conversionResult < 0) {
-        return 0;
-      }
-
-      rtc::scoped_refptr<webrtc::I420Buffer> scaledBuffer;
-      scaledBuffer = webrtc::I420Buffer::Create(dst_width, dst_height, dst_stride_y,
-                                                dst_stride_uv, dst_stride_uv);
-
-      scaledBuffer->CropAndScaleFrom(*captureFrame.video_frame_buffer().get());
-      webrtc::VideoFrame scaledFrame(scaledBuffer, 0, 0, webrtc::kVideoRotation_0);
-
-      VideoFrameUtils::InitFrameBufferProperties(scaledFrame, properties);
-      frame = new unsigned char[properties.bufferSize()];
-
-      if (!frame) {
-        return 0;
-      }
-
-      VideoFrameUtils::CopyVideoFrameBuffers(frame,
-                                             properties.bufferSize(), scaledFrame);
-    }
-
-    // Create a video frame and append it to the track.
-    RefPtr<layers::PlanarYCbCrImage> image = mImageContainer->CreatePlanarYCbCrImage();
-
+    uint8_t* frame = static_cast<uint8_t*> (aBuffer);
     const uint8_t lumaBpp = 8;
     const uint8_t chromaBpp = 4;
 
-    layers::PlanarYCbCrData data;
-
     // Take lots of care to round up!
     data.mYChannel = frame;
-    data.mYSize = IntSize(dst_width, dst_height);
-    data.mYStride = (dst_width * lumaBpp + 7) / 8;
-    data.mCbCrStride = (dst_width * chromaBpp + 7) / 8;
-    data.mCbChannel = frame + dst_height * data.mYStride;
-    data.mCrChannel = data.mCbChannel + ((dst_height + 1) / 2) * data.mCbCrStride;
-    data.mCbCrSize = IntSize((dst_width + 1) / 2, (dst_height + 1) / 2);
+    data.mYSize = IntSize(mWidth, mHeight);
+    data.mYStride = (mWidth * lumaBpp + 7)/ 8;
+    data.mCbCrStride = (mWidth * chromaBpp + 7) / 8;
+    data.mCbChannel = frame + mHeight * data.mYStride;
+    data.mCrChannel = data.mCbChannel + ((mHeight+1)/2) * data.mCbCrStride;
+    data.mCbCrSize = IntSize((mWidth+1)/ 2, (mHeight+1)/ 2);
     data.mPicX = 0;
     data.mPicY = 0;
-    data.mPicSize = IntSize(dst_width, dst_height);
+    data.mPicSize = IntSize(mWidth, mHeight);
     data.mStereoMode = StereoMode::MONO;
-
-    if (!image->CopyData(data)) {
-      MOZ_ASSERT(false);
-      return 0;
-    }
+  }
 
-    if (needReScale && frame) {
-      delete frame;
-      frame = nullptr;
-    }
+  if (!image->CopyData(data)) {
+    MOZ_ASSERT(false);
+    return 0;
+  }
 
+  MonitorAutoLock lock(mMonitor);
 #ifdef DEBUG
-    static uint32_t frame_num = 0;
-    LOGFRAME(("frame %d (%dx%d); timeStamp %u, ntpTimeMs %" PRIu64 ", renderTimeMs %" PRIu64,
-              frame_num++, mWidth, mHeight,
-              aProps.timeStamp(), aProps.ntpTimeMs(), aProps.renderTimeMs()));
+  static uint32_t frame_num = 0;
+  LOGFRAME(("frame %d (%dx%d); timeStamp %u, ntpTimeMs %" PRIu64 ", renderTimeMs %" PRIu64,
+            frame_num++, mWidth, mHeight,
+            aProps.timeStamp(), aProps.ntpTimeMs(), aProps.renderTimeMs()));
 #endif
 
-    // implicitly releases last image
-    mImages[i] = image.forget();
-  }
+  // implicitly releases last image
+  mImage = image.forget();
 
   // We'll push the frame into the MSG on the next NotifyPull. This will avoid
   // swamping the MSG with frames should it be taking longer than normal to run
   // an iteration.
 
   return 0;
 }
 
@@ -584,41 +459,38 @@ MediaEngineRemoteVideoSource::NumCapabil
   }
   return num;
 }
 
 bool
 MediaEngineRemoteVideoSource::ChooseCapability(
     const NormalizedConstraints &aConstraints,
     const MediaEnginePrefs &aPrefs,
-    const nsString& aDeviceId,
-    webrtc::CaptureCapability& aCapability,
-    const DistanceCalculation aCalculate)
+    const nsString& aDeviceId)
 {
   AssertIsOnOwningThread();
 
   switch(mMediaSource) {
     case dom::MediaSourceEnum::Screen:
     case dom::MediaSourceEnum::Window:
     case dom::MediaSourceEnum::Application: {
       FlattenedConstraints c(aConstraints);
       // The actual resolution to constrain around is not easy to find ahead of
       // time (and may in fact change over time), so as a hack, we push ideal
       // and max constraints down to desktop_capture_impl.cc and finish the
       // algorithm there.
-      aCapability.width =
-        (c.mWidth.mIdeal.valueOr(0) & 0xffff) << 16 | (c.mWidth.mMax & 0xffff);
-      aCapability.height =
-        (c.mHeight.mIdeal.valueOr(0) & 0xffff) << 16 | (c.mHeight.mMax & 0xffff);
-      aCapability.maxFPS =
-        c.mFrameRate.Clamp(c.mFrameRate.mIdeal.valueOr(aPrefs.mFPS));
+      mCapability.width = (c.mWidth.mIdeal.valueOr(0) & 0xffff) << 16 |
+                          (c.mWidth.mMax & 0xffff);
+      mCapability.height = (c.mHeight.mIdeal.valueOr(0) & 0xffff) << 16 |
+                           (c.mHeight.mMax & 0xffff);
+      mCapability.maxFPS = c.mFrameRate.Clamp(c.mFrameRate.mIdeal.valueOr(aPrefs.mFPS));
       return true;
     }
     default:
-      return MediaEngineCameraVideoSource::ChooseCapability(aConstraints, aPrefs, aDeviceId, aCapability, aCalculate);
+      return MediaEngineCameraVideoSource::ChooseCapability(aConstraints, aPrefs, aDeviceId);
   }
 
 }
 
 void
 MediaEngineRemoteVideoSource::GetCapability(size_t aIndex,
                                             webrtc::CaptureCapability& aOut) const
 {
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.h
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.h
@@ -79,22 +79,19 @@ public:
                   SourceMediaStream* aSource,
                   TrackID aId,
                   StreamTime aDesiredTime,
                   const PrincipalHandle& aPrincipalHandle) override;
   dom::MediaSourceEnum GetMediaSource() const override {
     return mMediaSource;
   }
 
-  bool ChooseCapability(
-    const NormalizedConstraints &aConstraints,
-    const MediaEnginePrefs &aPrefs,
-    const nsString& aDeviceId,
-    webrtc::CaptureCapability& aCapability,
-    const DistanceCalculation aCalculate) override;
+  bool ChooseCapability(const NormalizedConstraints &aConstraints,
+                        const MediaEnginePrefs &aPrefs,
+                        const nsString& aDeviceId) override;
 
   void Refresh(int aIndex);
 
   void Shutdown() override;
 
   bool GetScary() const override { return mScary; }
 
 protected:
@@ -105,17 +102,16 @@ private:
   void Init();
   size_t NumCapabilities() const override;
   void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) const override;
   void SetLastCapability(const webrtc::CaptureCapability& aCapability);
 
   nsresult
   UpdateSingleSource(const AllocationHandle* aHandle,
                      const NormalizedConstraints& aNetConstraints,
-                     const NormalizedConstraints& aNewConstraint,
                      const MediaEnginePrefs& aPrefs,
                      const nsString& aDeviceId,
                      const char** aOutBadConstraint) override;
 
   dom::MediaSourceEnum mMediaSource; // source of media (camera | application | screen)
   mozilla::camera::CaptureEngine mCapEngine;
 
   // To only restart camera when needed, we keep track previous settings.
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -561,17 +561,16 @@ public:
 
 protected:
   ~MediaEngineWebRTCMicrophoneSource() {}
 
 private:
   nsresult
   UpdateSingleSource(const AllocationHandle* aHandle,
                      const NormalizedConstraints& aNetConstraints,
-                     const NormalizedConstraints& aNewConstraint,
                      const MediaEnginePrefs& aPrefs,
                      const nsString& aDeviceId,
                      const char** aOutBadConstraint) override;
 
   void SetLastPrefs(const MediaEnginePrefs& aPrefs);
 
   // These allocate/configure and release the channel
   bool AllocChannel();
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -274,17 +274,16 @@ bool operator == (const MediaEnginePrefs
 {
   return !memcmp(&a, &b, sizeof(MediaEnginePrefs));
 };
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::UpdateSingleSource(
     const AllocationHandle* aHandle,
     const NormalizedConstraints& aNetConstraints,
-    const NormalizedConstraints& aNewConstraint, /* Ignored */
     const MediaEnginePrefs& aPrefs,
     const nsString& aDeviceId,
     const char** aOutBadConstraint)
 {
   FlattenedConstraints c(aNetConstraints);
 
   MediaEnginePrefs prefs = aPrefs;
   prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn);
--- a/dom/media/webrtc/MediaTrackConstraints.cpp
+++ b/dom/media/webrtc/MediaTrackConstraints.cpp
@@ -412,38 +412,16 @@ MediaConstraintsHelper::FitnessDistance(
   }
   if (aN == aRange.mIdeal.valueOr(aN)) {
     return 0;
   }
   return uint32_t(ValueType((std::abs(aN - aRange.mIdeal.value()) * 1000) /
                             std::max(std::abs(aN), std::abs(aRange.mIdeal.value()))));
 }
 
-template<class ValueType, class NormalizedRange>
-/* static */ uint32_t
-MediaConstraintsHelper::FeasibilityDistance(ValueType aN,
-                                            const NormalizedRange& aRange)
-{
-  if (aRange.mMin > aN) {
-    return UINT32_MAX;
-  }
-  // We prefer larger resolution because now we support downscaling
-  if (aN == aRange.mIdeal.valueOr(aN)) {
-    return 0;
-  }
-
-  if (aN > aRange.mIdeal.value()) {
-    return uint32_t(ValueType((std::abs(aN - aRange.mIdeal.value()) * 1000) /
-      std::max(std::abs(aN), std::abs(aRange.mIdeal.value()))));
-  }
-
-  return 10000 + uint32_t(ValueType((std::abs(aN - aRange.mIdeal.value()) * 1000) /
-    std::max(std::abs(aN), std::abs(aRange.mIdeal.value()))));
-}
-
 // Fitness distance returned as integer math * 1000. Infinity = UINT32_MAX
 
 /* static */ uint32_t
 MediaConstraintsHelper::FitnessDistance(
     nsString aN,
     const NormalizedConstraintSet::StringRange& aParams)
 {
   if (!aParams.mExact.empty() && aParams.mExact.find(aN) == aParams.mExact.end()) {
--- a/dom/media/webrtc/MediaTrackConstraints.h
+++ b/dom/media/webrtc/MediaTrackConstraints.h
@@ -80,29 +80,22 @@ public:
     ValueType Clamp(ValueType n) const { return std::max(mMin, std::min(n, mMax)); }
     ValueType Get(ValueType defaultValue) const {
       return Clamp(mIdeal.valueOr(defaultValue));
     }
     bool Intersects(const Range& aOther) const {
       return mMax >= aOther.mMin && mMin <= aOther.mMax;
     }
     void Intersect(const Range& aOther) {
+      MOZ_ASSERT(Intersects(aOther));
       mMin = std::max(mMin, aOther.mMin);
-      if (Intersects(aOther)) {
-        mMax = std::min(mMax, aOther.mMax);
-      } else {
-        // If there is no intersection, we will down-scale or drop frame
-        mMax = std::max(mMax, aOther.mMax);
-      }
+      mMax = std::min(mMax, aOther.mMax);
     }
     bool Merge(const Range& aOther) {
-      if (strcmp(mName, "width") != 0 &&
-          strcmp(mName, "height") != 0 &&
-          strcmp(mName, "frameRate") != 0 &&
-          !Intersects(aOther)) {
+      if (!Intersects(aOther)) {
         return false;
       }
       Intersect(aOther);
 
       if (aOther.mIdeal.isSome()) {
         // Ideal values, as stored, may be outside their min max range, so use
         // clamped values in averaging, to avoid extreme outliers skewing results.
         if (mIdeal.isNothing()) {
@@ -299,18 +292,16 @@ struct FlattenedConstraints : public Nor
 
 // A helper class for MediaEngines
 
 class MediaConstraintsHelper
 {
 protected:
   template<class ValueType, class NormalizedRange>
   static uint32_t FitnessDistance(ValueType aN, const NormalizedRange& aRange);
-  template<class ValueType, class NormalizedRange>
-  static uint32_t FeasibilityDistance(ValueType aN, const NormalizedRange& aRange);
   static uint32_t FitnessDistance(nsString aN,
       const NormalizedConstraintSet::StringRange& aConstraint);
 
   static uint32_t
   GetMinimumFitnessDistance(const NormalizedConstraintSet &aConstraints,
                             const nsString& aDeviceId);
 
   template<class DeviceType>
--- a/media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.cc
+++ b/media/webrtc/trunk/webrtc/video_engine/desktop_capture_impl.cc
@@ -577,17 +577,55 @@ int32_t DesktopCaptureImpl::IncomingFram
     webrtc::VideoFrame captureFrame(buffer, 0, 0, kVideoRotation_0);
     if (conversionResult < 0) {
       WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
                    "Failed to convert capture frame from type %d to I420",
                    frameInfo.rawType);
       return -1;
     }
 
-    DeliverCapturedFrame(captureFrame, captureTime);
+    int32_t req_max_width = _requestedCapability.width & 0xffff;
+    int32_t req_max_height = _requestedCapability.height & 0xffff;
+    int32_t req_ideal_width = (_requestedCapability.width >> 16) & 0xffff;
+    int32_t req_ideal_height = (_requestedCapability.height >> 16) & 0xffff;
+
+    int32_t dest_max_width = std::min(req_max_width, target_width);
+    int32_t dest_max_height = std::min(req_max_height, target_height);
+    int32_t dst_width = std::min(req_ideal_width > 0 ? req_ideal_width : target_width, dest_max_width);
+    int32_t dst_height = std::min(req_ideal_height > 0 ? req_ideal_height : target_height, dest_max_height);
+
+    // scale to average of portrait and landscape
+    float scale_width = (float)dst_width / (float)target_width;
+    float scale_height = (float)dst_height / (float)target_height;
+    float scale = (scale_width + scale_height) / 2;
+    dst_width = (int)(scale * target_width);
+    dst_height = (int)(scale * target_height);
+
+    // if scaled rectangle exceeds max rectangle, scale to minimum of portrait and landscape
+    if (dst_width > dest_max_width || dst_height > dest_max_height) {
+      scale_width = (float)dest_max_width / (float)dst_width;
+      scale_height = (float)dest_max_height / (float)dst_height;
+      scale = std::min(scale_width, scale_height);
+      dst_width = (int)(scale * dst_width);
+      dst_height = (int)(scale * dst_height);
+    }
+
+    int dst_stride_y = dst_width;
+    int dst_stride_uv = (dst_width + 1) / 2;
+    if (dst_width == target_width && dst_height == target_height) {
+      DeliverCapturedFrame(captureFrame, captureTime);
+    } else {
+      rtc::scoped_refptr<webrtc::I420Buffer> buffer;
+      buffer = I420Buffer::Create(dst_width, dst_height, dst_stride_y,
+                                  dst_stride_uv, dst_stride_uv);
+
+      buffer->ScaleFrom(*captureFrame.video_frame_buffer().get());
+      webrtc::VideoFrame scaledFrame(buffer, 0, 0, kVideoRotation_0);
+      DeliverCapturedFrame(scaledFrame, captureTime);
+    }
   } else {
     assert(false);
     return -1;
   }
 
   const int64_t processTime =
     (rtc::TimeNanos() - startProcessTime)/rtc::kNumNanosecsPerMillisec;