Bug 1434861 - Simplify rescaling code in MediaEngineRemoteVideoSource::DeliverFrame. r=jib
authorAndreas Pehrson <pehrsons@mozilla.com>
Thu, 01 Feb 2018 16:06:19 +0100
changeset 402775 8932202ed0b9ada7c3fd14e0c69c9571744eb56a
parent 402774 fd7764a996d8af48a09bd4c2e587daaf64dc2085
child 402776 c6e90b3e141fc1d47bc3ffb0bb673b78e171030b
push id99659
push useraciure@mozilla.com
push dateWed, 07 Feb 2018 22:33:57 +0000
treeherdermozilla-inbound@5ceb1098fef3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjib
bugs1434861
milestone60.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1434861 - Simplify rescaling code in MediaEngineRemoteVideoSource::DeliverFrame. r=jib Most importantly, this reduces the number of copies to 1 in the common case. In a case where we are rescaling because there are competing gUM requests this does two copies, where one is the crop-and-scale operation itself. In the worst case we do two allocations, but with a buffer pool and a recycling ImageContainer we allocate very rarely in practice. MozReview-Commit-ID: B0Et4wZol9n
dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
dom/media/webrtc/MediaEngineRemoteVideoSource.h
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
@@ -8,17 +8,17 @@
 #include "AllocationHandle.h"
 #include "CamerasChild.h"
 #include "MediaManager.h"
 #include "MediaTrackConstraints.h"
 #include "mozilla/RefPtr.h"
 #include "nsIPrefService.h"
 #include "VideoFrameUtils.h"
 #include "VideoUtils.h"
-#include "webrtc/api/video/i420_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
 
 mozilla::LogModule* GetMediaManagerLog();
 #define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
 #define LOGFRAME(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
 
 namespace mozilla {
 
@@ -34,16 +34,18 @@ MediaEngineRemoteVideoSource::MediaEngin
     camera::CaptureEngine aCapEngine,
     MediaSourceEnum aMediaSource,
     bool aScary)
   : mCaptureIndex(aIndex)
   , mMediaSource(aMediaSource)
   , mCapEngine(aCapEngine)
   , mScary(aScary)
   , mMutex("MediaEngineRemoteVideoSource::mMutex")
+  , mRescalingBufferPool(/* zero_initialize */ false,
+                         /* max_number_of_buffers */ 1)
   , mSettings(MakeAndAddRef<media::Refcountable<MediaTrackSettings>>())
 {
   MOZ_ASSERT(aMediaSource != MediaSourceEnum::Other);
   Init();
 }
 
 void
 MediaEngineRemoteVideoSource::Init()
@@ -227,18 +229,19 @@ MediaEngineRemoteVideoSource::Deallocate
     mStream = nullptr;
     mTrackID = TRACK_NONE;
     mPrincipal = PRINCIPAL_HANDLE_NONE;
     mState = kReleased;
   }
 
   // Stop() has stopped capture synchronously on the media thread before we get
   // here, so there are no longer any callbacks on an IPC thread accessing
-  // mImageContainer.
+  // mImageContainer or mRescalingBufferPool.
   mImageContainer = nullptr;
+  mRescalingBufferPool.Release();
 
   LOG(("Video device %d deallocated", mCaptureIndex));
 
   if (camera::GetChildAndCall(&camera::CamerasChild::ReleaseCaptureDevice,
                               mCapEngine, mCaptureIndex)) {
     MOZ_ASSERT_UNREACHABLE("Couldn't release allocated device");
   }
   return NS_OK;
@@ -460,103 +463,71 @@ MediaEngineRemoteVideoSource::DeliverFra
     MutexAutoLock lock(mMutex);
     MOZ_ASSERT(mState == kStarted);
     req_max_width = mCapability.width & 0xffff;
     req_max_height = mCapability.height & 0xffff;
     req_ideal_width = (mCapability.width >> 16) & 0xffff;
     req_ideal_height = (mCapability.height >> 16) & 0xffff;
   }
 
-  int32_t dest_max_width = std::min(req_max_width, aProps.width());
-  int32_t dest_max_height = std::min(req_max_height, aProps.height());
+  int32_t dst_max_width = std::min(req_max_width, aProps.width());
+  int32_t dst_max_height = std::min(req_max_height, aProps.height());
   // This logic works for both camera and screen sharing case.
   // for camera case, req_ideal_width and req_ideal_height is 0.
-  // The following snippet will set dst_width to dest_max_width and dst_height to dest_max_height
-  int32_t dst_width = std::min(req_ideal_width > 0 ? req_ideal_width : aProps.width(), dest_max_width);
-  int32_t dst_height = std::min(req_ideal_height > 0 ? req_ideal_height : aProps.height(), dest_max_height);
-
-  int dst_stride_y = dst_width;
-  int dst_stride_uv = (dst_width + 1) / 2;
-
-  camera::VideoFrameProperties properties;
-  UniquePtr<uint8_t []> frameBuf;
-  uint8_t* frame;
-  bool needReScale = (dst_width != aProps.width() ||
-                      dst_height != aProps.height()) &&
-                     dst_width <= aProps.width() &&
-                     dst_height <= aProps.height();
+  // The following snippet will set dst_width to dst_max_width and dst_height to dst_max_height
+  int32_t dst_width = std::min(req_ideal_width > 0 ? req_ideal_width : aProps.width(), dst_max_width);
+  int32_t dst_height = std::min(req_ideal_height > 0 ? req_ideal_height : aProps.height(), dst_max_height);
 
-  if (!needReScale) {
-    dst_width = aProps.width();
-    dst_height = aProps.height();
-    frame = aBuffer;
-  } else {
-    rtc::scoped_refptr<webrtc::I420Buffer> i420Buffer;
-    i420Buffer = webrtc::I420Buffer::Create(aProps.width(),
-                                            aProps.height(),
-                                            aProps.width(),
-                                            (aProps.width() + 1) / 2,
-                                            (aProps.width() + 1) / 2);
+  rtc::Callback0<void> callback_unused;
+  rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer =
+    new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
+      aProps.width(),
+      aProps.height(),
+      aBuffer,
+      aProps.yStride(),
+      aBuffer + aProps.yAllocatedSize(),
+      aProps.uStride(),
+      aBuffer + aProps.yAllocatedSize() + aProps.uAllocatedSize(),
+      aProps.vStride(),
+      callback_unused);
 
-    const int conversionResult = webrtc::ConvertToI420(webrtc::kI420,
-                                                       aBuffer,
-                                                       0, 0,  // No cropping
-                                                       aProps.width(), aProps.height(),
-                                                       aProps.width() * aProps.height() * 3 / 2,
-                                                       webrtc::kVideoRotation_0,
-                                                       i420Buffer.get());
-
-    webrtc::VideoFrame captureFrame(i420Buffer, 0, 0, webrtc::kVideoRotation_0);
-    if (conversionResult < 0) {
+  if ((dst_width != aProps.width() || dst_height != aProps.height()) &&
+      dst_width <= aProps.width() &&
+      dst_height <= aProps.height()) {
+    // Destination resolution is smaller than source buffer. We'll rescale.
+    rtc::scoped_refptr<webrtc::I420Buffer> scaledBuffer =
+      mRescalingBufferPool.CreateBuffer(dst_width, dst_height);
+    if (!scaledBuffer) {
+      MOZ_ASSERT_UNREACHABLE("We might fail to allocate a buffer, but with this "
+                             "being a recycling pool that shouldn't happen");
       return 0;
     }
-
-    rtc::scoped_refptr<webrtc::I420Buffer> scaledBuffer;
-    scaledBuffer = webrtc::I420Buffer::Create(dst_width, dst_height, dst_stride_y,
-                                              dst_stride_uv, dst_stride_uv);
-
-    scaledBuffer->CropAndScaleFrom(*captureFrame.video_frame_buffer().get());
-    webrtc::VideoFrame scaledFrame(scaledBuffer, 0, 0, webrtc::kVideoRotation_0);
-
-    VideoFrameUtils::InitFrameBufferProperties(scaledFrame, properties);
-    frameBuf.reset(new (fallible) uint8_t[properties.bufferSize()]);
-    frame = frameBuf.get();
-
-    if (!frame) {
-      return 0;
-    }
-
-    VideoFrameUtils::CopyVideoFrameBuffers(frame,
-                                           properties.bufferSize(), scaledFrame);
+    scaledBuffer->CropAndScaleFrom(*buffer);
+    buffer = scaledBuffer;
   }
 
-  // Create a video frame and append it to the track.
+  layers::PlanarYCbCrData data;
+  data.mYChannel = const_cast<uint8_t*>(buffer->DataY());
+  data.mYSize = IntSize(buffer->width(), buffer->height());
+  data.mYStride = buffer->StrideY();
+  MOZ_ASSERT(buffer->StrideU() == buffer->StrideV());
+  data.mCbCrStride = buffer->StrideU();
+  data.mCbChannel = const_cast<uint8_t*>(buffer->DataU());
+  data.mCrChannel = const_cast<uint8_t*>(buffer->DataV());
+  data.mCbCrSize = IntSize((buffer->width() + 1) / 2,
+                           (buffer->height() + 1) / 2);
+  data.mPicX = 0;
+  data.mPicY = 0;
+  data.mPicSize = IntSize(buffer->width(), buffer->height());
+
   RefPtr<layers::PlanarYCbCrImage> image =
     mImageContainer->CreatePlanarYCbCrImage();
-
-  const uint8_t lumaBpp = 8;
-  const uint8_t chromaBpp = 4;
-
-  layers::PlanarYCbCrData data;
-
-  // Take lots of care to round up!
-  data.mYChannel = frame;
-  data.mYSize = IntSize(dst_width, dst_height);
-  data.mYStride = (dst_width * lumaBpp + 7) / 8;
-  data.mCbCrStride = (dst_width * chromaBpp + 7) / 8;
-  data.mCbChannel = frame + dst_height * data.mYStride;
-  data.mCrChannel = data.mCbChannel + ((dst_height + 1) / 2) * data.mCbCrStride;
-  data.mCbCrSize = IntSize((dst_width + 1) / 2, (dst_height + 1) / 2);
-  data.mPicX = 0;
-  data.mPicY = 0;
-  data.mPicSize = IntSize(dst_width, dst_height);
-  data.mStereoMode = StereoMode::MONO;
-
   if (!image->CopyData(data)) {
-    MOZ_ASSERT(false);
+    MOZ_ASSERT_UNREACHABLE("We might fail to allocate a buffer, but with this "
+                           "being a recycling container that shouldn't happen");
     return 0;
   }
 
 #ifdef DEBUG
   static uint32_t frame_num = 0;
   LOGFRAME(("frame %d (%dx%d)->(%dx%d); timeStamp %u, ntpTimeMs %" PRIu64 ", renderTimeMs %" PRIu64,
             frame_num++, aProps.width(), aProps.height(), dst_width, dst_height,
             aProps.timeStamp(), aProps.ntpTimeMs(), aProps.renderTimeMs()));
--- a/dom/media/webrtc/MediaEngineRemoteVideoSource.h
+++ b/dom/media/webrtc/MediaEngineRemoteVideoSource.h
@@ -32,16 +32,17 @@
 #include "mozilla/dom/MediaStreamTrackBinding.h"
 
 // Camera Access via IPC
 #include "CamerasChild.h"
 
 #include "NullTransport.h"
 
 // WebRTC includes
+#include "webrtc/common_video/include/i420_buffer_pool.h"
 #include "webrtc/modules/video_capture/video_capture_defines.h"
 
 namespace webrtc {
 using CaptureCapability = VideoCaptureCapability;
 }
 
 namespace mozilla {
 
@@ -203,16 +204,20 @@ private:
   // Accessed in DeliverFrame() on the camera IPC thread, guaranteed to happen
   // after Start() and before the end of Stop().
   RefPtr<layers::ImageContainer> mImageContainer;
 
   // The latest frame delivered from the video capture backend.
   // Protected by mMutex.
   RefPtr<layers::Image> mImage;
 
+  // A buffer pool used to manage the temporary buffer used when rescaling
+  // incoming images. Cameras IPC thread only.
+  webrtc::I420BufferPool mRescalingBufferPool;
+
   // The intrinsic size of the latest captured image, so we can feed black
   // images of the same size while stopped.
   // Set under mMutex on the owning thread. Accessed under one of the two.
   gfx::IntSize mImageSize = gfx::IntSize(0, 0);
 
   // The current settings of this source.
   // Note that these may be different from the settings of the underlying device
   // since we scale frames to avoid fingerprinting.