Bug 911046 - Part 2: Support 'handle-using' video frames for WebRTC on B2G. r=jesup, ekr
authorJohn Lin <jolin@mozilla.com>
Mon, 21 Apr 2014 23:41:00 +0200
changeset 198002 5f761a1905ca31ae767aa25eeba3554658cf06c8
parent 198001 29791e7f84ba8ba70e68cf3087a997425456b194
child 198003 091d1a5fd0aa7544c46c67d4810d8ee58ab11704
push id3624
push userasasaki@mozilla.com
push dateMon, 09 Jun 2014 21:49:01 +0000
treeherdermozilla-beta@b1a5da15899a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup, ekr
bugs911046
milestone31.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 911046 - Part 2: Support 'handle-using' video frames for WebRTC on B2G. r=jesup, ekr
media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
media/webrtc/signaling/src/media-conduit/VideoConduit.h
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
media/webrtc/signaling/test/mediaconduit_unittests.cpp
--- a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
@@ -8,16 +8,18 @@
 #include "nsISupportsImpl.h"
 #include "nsXPCOM.h"
 #include "nsDOMNavigationTiming.h"
 #include "mozilla/RefPtr.h"
 #include "CodecConfig.h"
 #include "VideoTypes.h"
 #include "MediaConduitErrors.h"
 
+#include "ImageContainer.h"
+
 #include <vector>
 
 namespace mozilla {
 /**
  * Abstract Interface for transporting RTP packets - audio/vidoeo
  * The consumers of this interface are responsible for passing in
  * the RTPfied media packets
  */
@@ -39,16 +41,30 @@ public:
    * @param data : RTCP Packet to be transported
    * @param len  : Length of the RTCP packet
    * @result     : NS_OK on success, NS_ERROR_FAILURE otherwise
    */
   virtual nsresult SendRtcpPacket(const void* data, int len) = 0;
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TransportInterface)
 };
 
+/**
+ * This class wraps image object for VideoRenderer::RenderVideoFrame()
+ * callback implementation to use for rendering.
+ */
+class ImageHandle
+{
+public:
+  ImageHandle(layers::Image* image) : mImage(image) {}
+
+  const RefPtr<layers::Image>& GetImage() const { return mImage; }
+
+private:
+  RefPtr<layers::Image> mImage;
+};
 
 /**
  * 1. Abstract renderer for video data
  * 2. This class acts as abstract interface between the video-engine and
  *    video-engine agnostic renderer implementation.
  * 3. Concrete implementation of this interface is responsible for
  *    processing and/or rendering the obtained raw video frame to appropriate
  *    output , say, <video>
@@ -70,26 +86,31 @@ class VideoRenderer
 
   /**
    * Callback Function reporting decoded I420 frame for processing.
    * @param buffer: pointer to decoded video frame
    * @param buffer_size: size of the decoded frame
    * @param time_stamp: Decoder timestamp, typically 90KHz as per RTP
    * @render_time: Wall-clock time at the decoder for synchronization
    *                purposes in milliseconds
-   * NOTE: It is the responsibility of the concrete implementations of this
-   * class to own copy of the frame if needed for time longer than scope of
-   * this callback.
+   * @handle: opaque handle for image object of decoded video frame.
+   * NOTE: If decoded video frame is passed through buffer , it is the
+   * responsibility of the concrete implementations of this class to own copy
+   * of the frame if needed for time longer than scope of this callback.
    * Such implementations should be quick in processing the frames and return
    * immediately.
+   * On the other hand, if decoded video frame is passed through handle, the
+   * implementations should keep a reference to the (ref-counted) image object
+   * inside until it's no longer needed.
    */
   virtual void RenderVideoFrame(const unsigned char* buffer,
                                 unsigned int buffer_size,
                                 uint32_t time_stamp,
-                                int64_t render_time) = 0;
+                                int64_t render_time,
+                                const ImageHandle& handle) = 0;
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoRenderer)
 };
 
 
 /**
  * Generic Interface for representing Audio/Video Session
  * MediaSession conduit is identified by 2 main components
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
@@ -9,16 +9,17 @@
 #include "ccsdp.h"
 
 #include "VideoConduit.h"
 #include "AudioConduit.h"
 #include "nsThreadUtils.h"
 
 #include "LoadManager.h"
 
+#include "webrtc/common_video/interface/native_handle.h"
 #include "webrtc/video_engine/include/vie_errors.h"
 
 #ifdef MOZ_WIDGET_ANDROID
 #include "AndroidJNIWrapper.h"
 #endif
 
 #include <algorithm>
 #include <math.h>
@@ -1065,17 +1066,27 @@ WebrtcVideoConduit::DeliverFrame(unsigne
                                  uint32_t time_stamp,
                                  int64_t render_time,
                                  void *handle)
 {
   CSFLogDebug(logTag,  "%s Buffer Size %d", __FUNCTION__, buffer_size);
 
   if(mRenderer)
   {
-    mRenderer->RenderVideoFrame(buffer, buffer_size, time_stamp, render_time);
+    layers::Image* img = nullptr;
+    // |handle| should be a webrtc::NativeHandle if available.
+    if (handle) {
+      webrtc::NativeHandle* native_h = static_cast<webrtc::NativeHandle*>(handle);
+      // In the handle, there should be a layers::Image.
+      img = static_cast<layers::Image*>(native_h->GetHandle());
+    }
+
+    const ImageHandle img_h(img);
+    mRenderer->RenderVideoFrame(buffer, buffer_size, time_stamp, render_time,
+                                img_h);
     return 0;
   }
 
   CSFLogError(logTag,  "%s Renderer is NULL  ", __FUNCTION__);
   return -1;
 }
 
 /**
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.h
@@ -177,19 +177,26 @@ public:
   virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int);
 
   virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t,
                            void *handle);
 
   /**
    * Does DeliverFrame() support a null buffer and non-null handle
    * (video texture)?
-   * XXX Investigate!  Especially for Android/B2G
+   * B2G support it (when using HW video decoder with graphic buffer output).
+   * XXX Investigate!  Especially for Android
    */
-  virtual bool IsTextureSupported() { return false; }
+  virtual bool IsTextureSupported() {
+#ifdef WEBRTC_GONK
+    return true;
+#else
+    return false;
+#endif
+  }
 
   unsigned short SendingWidth() {
     return mSendingWidth;
   }
 
   unsigned short SendingHeight() {
     return mSendingHeight;
   }
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -18,17 +18,17 @@
 #include "srtp.h"
 
 #ifdef MOZILLA_INTERNAL_API
 #include "VideoSegment.h"
 #include "Layers.h"
 #include "ImageTypes.h"
 #include "ImageContainer.h"
 #include "VideoUtils.h"
-#ifdef MOZ_WIDGET_GONK
+#ifdef WEBRTC_GONK
 #include "GrallocImages.h"
 #include "mozilla/layers/GrallocTextureClient.h"
 #endif
 #endif
 
 #include "nsError.h"
 #include "AudioSegment.h"
 #include "MediaSegment.h"
@@ -1105,17 +1105,17 @@ void MediaPipelineTransmit::PipelineList
   // We get passed duplicate frames every ~10ms even if there's no frame change!
   int32_t serial = img->GetSerial();
   if (serial == last_img_) {
     return;
   }
   last_img_ = serial;
 
   ImageFormat format = img->GetFormat();
-#ifdef MOZ_WIDGET_GONK
+#ifdef WEBRTC_GONK
   if (format == ImageFormat::GRALLOC_PLANAR_YCBCR) {
     layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(img);
     android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
     void *basePtr;
     graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &basePtr);
     conduit->SendVideoFrame(static_cast<unsigned char*>(basePtr),
                             (graphicBuffer->getWidth() * graphicBuffer->getHeight() * 3) / 2,
                             graphicBuffer->getWidth(),
@@ -1399,50 +1399,59 @@ MediaPipelineReceiveVideo::PipelineListe
   image_container_ = layers::LayerManager::CreateImageContainer();
 #endif
 }
 
 void MediaPipelineReceiveVideo::PipelineListener::RenderVideoFrame(
     const unsigned char* buffer,
     unsigned int buffer_size,
     uint32_t time_stamp,
-    int64_t render_time) {
+    int64_t render_time,
+    const RefPtr<layers::Image>& video_image) {
 #ifdef MOZILLA_INTERNAL_API
   ReentrantMonitorAutoEnter enter(monitor_);
 
-  // Create a video frame and append it to the track.
+  if (buffer) {
+    // Create a video frame using |buffer|.
 #ifdef MOZ_WIDGET_GONK
-  ImageFormat format = ImageFormat::GRALLOC_PLANAR_YCBCR;
+    ImageFormat format = ImageFormat::GRALLOC_PLANAR_YCBCR;
 #else
-  ImageFormat format = ImageFormat::PLANAR_YCBCR;
+    ImageFormat format = ImageFormat::PLANAR_YCBCR;
 #endif
-  nsRefPtr<layers::Image> image = image_container_->CreateImage(format);
-
-  layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
-  uint8_t* frame = const_cast<uint8_t*>(static_cast<const uint8_t*> (buffer));
-  const uint8_t lumaBpp = 8;
-  const uint8_t chromaBpp = 4;
+    nsRefPtr<layers::Image> image = image_container_->CreateImage(format);
+    layers::PlanarYCbCrImage* yuvImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
+    uint8_t* frame = const_cast<uint8_t*>(static_cast<const uint8_t*> (buffer));
+    const uint8_t lumaBpp = 8;
+    const uint8_t chromaBpp = 4;
 
-  layers::PlanarYCbCrData data;
-  data.mYChannel = frame;
-  data.mYSize = IntSize(width_, height_);
-  data.mYStride = width_ * lumaBpp/ 8;
-  data.mCbCrStride = width_ * chromaBpp / 8;
-  data.mCbChannel = frame + height_ * data.mYStride;
-  data.mCrChannel = data.mCbChannel + height_ * data.mCbCrStride / 2;
-  data.mCbCrSize = IntSize(width_/ 2, height_/ 2);
-  data.mPicX = 0;
-  data.mPicY = 0;
-  data.mPicSize = IntSize(width_, height_);
-  data.mStereoMode = StereoMode::MONO;
+    layers::PlanarYCbCrData yuvData;
+    yuvData.mYChannel = frame;
+    yuvData.mYSize = IntSize(width_, height_);
+    yuvData.mYStride = width_ * lumaBpp/ 8;
+    yuvData.mCbCrStride = width_ * chromaBpp / 8;
+    yuvData.mCbChannel = frame + height_ * yuvData.mYStride;
+    yuvData.mCrChannel = yuvData.mCbChannel + height_ * yuvData.mCbCrStride / 2;
+    yuvData.mCbCrSize = IntSize(width_/ 2, height_/ 2);
+    yuvData.mPicX = 0;
+    yuvData.mPicY = 0;
+    yuvData.mPicSize = IntSize(width_, height_);
+    yuvData.mStereoMode = StereoMode::MONO;
 
-  videoImage->SetData(data);
+    yuvImage->SetData(yuvData);
 
-  image_ = image.forget();
-#endif
+    image_ = image.forget();
+  }
+#ifdef WEBRTC_GONK
+  else {
+    // Decoder produced video frame that can be appended to the track directly.
+    MOZ_ASSERT(video_image);
+    image_ = video_image;
+  }
+#endif // WEBRTC_GONK
+#endif // MOZILLA_INTERNAL_API
 }
 
 void MediaPipelineReceiveVideo::PipelineListener::
 NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) {
   ReentrantMonitorAutoEnter enter(monitor_);
 
 #ifdef MOZILLA_INTERNAL_API
   nsRefPtr<layers::Image> image = image_;
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
@@ -638,19 +638,21 @@ class MediaPipelineReceiveVideo : public
                                  unsigned int height,
                                  unsigned int number_of_streams) {
       pipeline_->listener_->FrameSizeChange(width, height, number_of_streams);
     }
 
     virtual void RenderVideoFrame(const unsigned char* buffer,
                                   unsigned int buffer_size,
                                   uint32_t time_stamp,
-                                  int64_t render_time) {
+                                  int64_t render_time,
+                                  const ImageHandle& handle) {
       pipeline_->listener_->RenderVideoFrame(buffer, buffer_size, time_stamp,
-                                            render_time);
+                                             render_time,
+                                             handle.GetImage());
     }
 
    private:
     MediaPipelineReceiveVideo *pipeline_;  // Raw pointer to avoid cycles
   };
 
   // Separate class to allow ref counting
   class PipelineListener : public GenericReceiveListener {
@@ -673,18 +675,18 @@ class MediaPipelineReceiveVideo : public
 
       width_ = width;
       height_ = height;
     }
 
     void RenderVideoFrame(const unsigned char* buffer,
                           unsigned int buffer_size,
                           uint32_t time_stamp,
-                          int64_t render_time);
-
+                          int64_t render_time,
+                          const RefPtr<layers::Image>& video_image);
 
    private:
     int width_;
     int height_;
 #ifdef MOZILLA_INTERNAL_API
     nsRefPtr<layers::ImageContainer> image_container_;
     nsRefPtr<layers::Image> image_;
 #endif
--- a/media/webrtc/signaling/test/mediaconduit_unittests.cpp
+++ b/media/webrtc/signaling/test/mediaconduit_unittests.cpp
@@ -351,17 +351,18 @@ public:
   virtual ~DummyVideoTarget()
   {
   }
 
 
   void RenderVideoFrame(const unsigned char* buffer,
                         unsigned int buffer_size,
                         uint32_t time_stamp,
-                        int64_t render_time)
+                        int64_t render_time,
+                        const mozilla::ImageHandle& handle)
  {
   //write the frame to the file
   if(VerifyFrame(buffer, buffer_size) == 0)
   {
       vidStatsGlobal.numFramesRenderedSuccessfully++;
   } else
   {
       vidStatsGlobal.numFramesRenderedWrongly++;