Bug 761018 - GStreamer video buffer handling optimization; r=cdouble
authorAlessandro Decina <alessandro.d@gmail.com>
Wed, 13 Mar 2013 16:11:15 -0400
changeset 124740 7c3e470dc0ebc1cbc52ab576b8b5a6a0fd1aad31
parent 124739 d09a98157090da0ccf0b7e15863f4ae5255adafd
child 124741 459afca0e3918c63780ea449c0d5bb77152d5abe
push id24433
push useremorley@mozilla.com
push dateThu, 14 Mar 2013 12:21:10 +0000
treeherdermozilla-central@96af92fa87fd [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerscdouble
bugs761018
milestone22.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 761018 - GStreamer video buffer handling optimization; r=cdouble
content/media/MediaDecoderReader.cpp
content/media/MediaDecoderReader.h
content/media/gstreamer/GStreamerReader.cpp
content/media/gstreamer/GStreamerReader.h
gfx/layers/ImageContainer.cpp
gfx/layers/ImageContainer.h
--- a/content/media/MediaDecoderReader.cpp
+++ b/content/media/MediaDecoderReader.cpp
@@ -137,25 +137,26 @@ VideoData::VideoData(int64_t aOffset,
 VideoData::~VideoData()
 {
   MOZ_COUNT_DTOR(VideoData);
 }
 
 
 VideoData* VideoData::Create(VideoInfo& aInfo,
                              ImageContainer* aContainer,
+                             Image* aImage,
                              int64_t aOffset,
                              int64_t aTime,
                              int64_t aEndTime,
                              const YCbCrBuffer& aBuffer,
                              bool aKeyframe,
                              int64_t aTimecode,
                              nsIntRect aPicture)
 {
-  if (!aContainer) {
+  if (!aImage && !aContainer) {
     // Create a dummy VideoData with no image. This gives us something to
     // send to media streams if necessary.
     nsAutoPtr<VideoData> v(new VideoData(aOffset,
                                          aTime,
                                          aEndTime,
                                          aKeyframe,
                                          aTimecode,
                                          aInfo.mDisplay));
@@ -199,24 +200,29 @@ VideoData* VideoData::Create(VideoInfo& 
                                        aEndTime,
                                        aKeyframe,
                                        aTimecode,
                                        aInfo.mDisplay));
   const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
   const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
   const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];
 
-  // Currently our decoder only knows how to output to PLANAR_YCBCR
-  // format.
-  ImageFormat format[2] = {PLANAR_YCBCR, GRALLOC_PLANAR_YCBCR};
-  if (IsYV12Format(Y, Cb, Cr)) {
-    v->mImage = aContainer->CreateImage(format, 2);
+  if (!aImage) {
+    // Currently our decoder only knows how to output to PLANAR_YCBCR
+    // format.
+    ImageFormat format[2] = {PLANAR_YCBCR, GRALLOC_PLANAR_YCBCR};
+    if (IsYV12Format(Y, Cb, Cr)) {
+      v->mImage = aContainer->CreateImage(format, 2);
+    } else {
+      v->mImage = aContainer->CreateImage(format, 1);
+    }
   } else {
-    v->mImage = aContainer->CreateImage(format, 1);
+    v->mImage = aImage;
   }
+
   if (!v->mImage) {
     return nullptr;
   }
   NS_ASSERTION(v->mImage->GetFormat() == PLANAR_YCBCR ||
                v->mImage->GetFormat() == GRALLOC_PLANAR_YCBCR,
                "Wrong format?");
   PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get());
 
@@ -232,20 +238,53 @@ VideoData* VideoData::Create(VideoInfo& 
   data.mCbSkip = Cb.mSkip;
   data.mCrSkip = Cr.mSkip;
   data.mPicX = aPicture.x;
   data.mPicY = aPicture.y;
   data.mPicSize = gfxIntSize(aPicture.width, aPicture.height);
   data.mStereoMode = aInfo.mStereoMode;
 
   videoImage->SetDelayedConversion(true);
-  videoImage->SetData(data);
+  if (!aImage) {
+    videoImage->SetData(data);
+  } else {
+    videoImage->SetDataNoCopy(data);
+  }
+
   return v.forget();
 }
 
+VideoData* VideoData::Create(VideoInfo& aInfo,
+                             ImageContainer* aContainer,
+                             int64_t aOffset,
+                             int64_t aTime,
+                             int64_t aEndTime,
+                             const YCbCrBuffer& aBuffer,
+                             bool aKeyframe,
+                             int64_t aTimecode,
+                             nsIntRect aPicture)
+{
+  return Create(aInfo, aContainer, nullptr, aOffset, aTime, aEndTime, aBuffer,
+                aKeyframe, aTimecode, aPicture);
+}
+
+VideoData* VideoData::Create(VideoInfo& aInfo,
+                             Image* aImage,
+                             int64_t aOffset,
+                             int64_t aTime,
+                             int64_t aEndTime,
+                             const YCbCrBuffer& aBuffer,
+                             bool aKeyframe,
+                             int64_t aTimecode,
+                             nsIntRect aPicture)
+{
+  return Create(aInfo, nullptr, aImage, aOffset, aTime, aEndTime, aBuffer,
+                aKeyframe, aTimecode, aPicture);
+}
+
 VideoData* VideoData::CreateFromImage(VideoInfo& aInfo,
                                       ImageContainer* aContainer,
                                       int64_t aOffset,
                                       int64_t aTime,
                                       int64_t aEndTime,
                                       const nsRefPtr<Image>& aImage,
                                       bool aKeyframe,
                                       int64_t aTimecode,
@@ -262,17 +301,17 @@ VideoData* VideoData::CreateFromImage(Vi
 }
 
 #ifdef MOZ_WIDGET_GONK
 VideoData* VideoData::Create(VideoInfo& aInfo,
                              ImageContainer* aContainer,
                              int64_t aOffset,
                              int64_t aTime,
                              int64_t aEndTime,
-                             mozilla::layers::GraphicBufferLocked *aBuffer,
+                             mozilla::layers::GraphicBufferLocked* aBuffer,
                              bool aKeyframe,
                              int64_t aTimecode,
                              nsIntRect aPicture)
 {
   if (!aContainer) {
     // Create a dummy VideoData with no image. This gives us something to
     // send to media streams if necessary.
     nsAutoPtr<VideoData> v(new VideoData(aOffset,
--- a/content/media/MediaDecoderReader.h
+++ b/content/media/MediaDecoderReader.h
@@ -127,38 +127,63 @@ public:
       uint32_t mStride;
       uint32_t mOffset;
       uint32_t mSkip;
     };
 
     Plane mPlanes[3];
   };
 
-  // Constructs a VideoData object. Makes a copy of YCbCr data in aBuffer.
-  // aTimecode is a codec specific number representing the timestamp of
-  // the frame of video data. Returns nullptr if an error occurs. This may
-  // indicate that memory couldn't be allocated to create the VideoData
-  // object, or it may indicate some problem with the input data (e.g.
-  // negative stride).
+  // Constructs a VideoData object. If aImage is NULL, creates a new Image
+  // holding a copy of the YCbCr data passed in aBuffer. If aImage is not NULL,
+  // it's stored as the underlying video image and aBuffer is assumed to point
+  // to memory within aImage so no copy is made. aTimecode is a codec specific
+  // number representing the timestamp of the frame of video data. Returns
+  // nsnull if an error occurs. This may indicate that memory couldn't be
+  // allocated to create the VideoData object, or it may indicate some problem
+  // with the input data (e.g. negative stride).
   static VideoData* Create(VideoInfo& aInfo,
                            ImageContainer* aContainer,
+                           Image* aImage,
+                           int64_t aOffset,
+                           int64_t aTime,
+                           int64_t aEndTime,
+                           const YCbCrBuffer &aBuffer,
+                           bool aKeyframe,
+                           int64_t aTimecode,
+                           nsIntRect aPicture);
+
+  // Variant that always makes a copy of aBuffer
+  static VideoData* Create(VideoInfo& aInfo,
+                           ImageContainer* aContainer,
+                           int64_t aOffset,
+                           int64_t aTime,
+                           int64_t aEndTime,
+                           const YCbCrBuffer &aBuffer,
+                           bool aKeyframe,
+                           int64_t aTimecode,
+                           nsIntRect aPicture);
+
+  // Variant to create a VideoData instance given an existing aImage
+  static VideoData* Create(VideoInfo& aInfo,
+                           Image* aImage,
                            int64_t aOffset,
                            int64_t aTime,
                            int64_t aEndTime,
                            const YCbCrBuffer &aBuffer,
                            bool aKeyframe,
                            int64_t aTimecode,
                            nsIntRect aPicture);
 
   static VideoData* Create(VideoInfo& aInfo,
                            ImageContainer* aContainer,
                            int64_t aOffset,
                            int64_t aTime,
                            int64_t aEndTime,
-                           layers::GraphicBufferLocked *aBuffer,
+                           layers::GraphicBufferLocked* aBuffer,
                            bool aKeyframe,
                            int64_t aTimecode,
                            nsIntRect aPicture);
 
   static VideoData* CreateFromImage(VideoInfo& aInfo,
                                     ImageContainer* aContainer,
                                     int64_t aOffset,
                                     int64_t aTime,
--- a/content/media/gstreamer/GStreamerReader.cpp
+++ b/content/media/gstreamer/GStreamerReader.cpp
@@ -22,45 +22,52 @@ using namespace layers;
 
 #ifdef PR_LOGGING
 extern PRLogModuleInfo* gMediaDecoderLog;
 #define LOG(type, msg) PR_LOG(gMediaDecoderLog, type, msg)
 #else
 #define LOG(type, msg)
 #endif
 
+extern bool
+IsYV12Format(const VideoData::YCbCrBuffer::Plane& aYPlane,
+             const VideoData::YCbCrBuffer::Plane& aCbPlane,
+             const VideoData::YCbCrBuffer::Plane& aCrPlane);
+
 static const int MAX_CHANNELS = 4;
 // Let the demuxer work in pull mode for short files
 static const int SHORT_FILE_SIZE = 1024 * 1024;
 // The default resource->Read() size when working in push mode
 static const int DEFAULT_SOURCE_READ_SIZE = 50 * 1024;
 
+G_DEFINE_BOXED_TYPE(BufferData, buffer_data, BufferData::Copy, BufferData::Free);
+
 typedef enum {
   GST_PLAY_FLAG_VIDEO         = (1 << 0),
   GST_PLAY_FLAG_AUDIO         = (1 << 1),
   GST_PLAY_FLAG_TEXT          = (1 << 2),
   GST_PLAY_FLAG_VIS           = (1 << 3),
   GST_PLAY_FLAG_SOFT_VOLUME   = (1 << 4),
   GST_PLAY_FLAG_NATIVE_AUDIO  = (1 << 5),
   GST_PLAY_FLAG_NATIVE_VIDEO  = (1 << 6),
   GST_PLAY_FLAG_DOWNLOAD      = (1 << 7),
   GST_PLAY_FLAG_BUFFERING     = (1 << 8),
   GST_PLAY_FLAG_DEINTERLACE   = (1 << 9),
   GST_PLAY_FLAG_SOFT_COLORBALANCE = (1 << 10)
 } PlayFlags;
 
 GStreamerReader::GStreamerReader(AbstractMediaDecoder* aDecoder)
   : MediaDecoderReader(aDecoder),
-  mPlayBin(NULL),
-  mBus(NULL),
-  mSource(NULL),
-  mVideoSink(NULL),
-  mVideoAppSink(NULL),
-  mAudioSink(NULL),
-  mAudioAppSink(NULL),
+  mPlayBin(nullptr),
+  mBus(nullptr),
+  mSource(nullptr),
+  mVideoSink(nullptr),
+  mVideoAppSink(nullptr),
+  mAudioSink(nullptr),
+  mAudioAppSink(nullptr),
   mFormat(GST_VIDEO_FORMAT_UNKNOWN),
   mVideoSinkBufferCount(0),
   mAudioSinkBufferCount(0),
   mGstThreadsMonitor("media.gst.threads"),
   mReachedEos(false),
   mByteOffset(0),
   mLastReportedByteOffset(0),
   fpsNum(0),
@@ -70,214 +77,218 @@ GStreamerReader::GStreamerReader(Abstrac
 
   mSrcCallbacks.need_data = GStreamerReader::NeedDataCb;
   mSrcCallbacks.enough_data = GStreamerReader::EnoughDataCb;
   mSrcCallbacks.seek_data = GStreamerReader::SeekDataCb;
 
   mSinkCallbacks.eos = GStreamerReader::EosCb;
   mSinkCallbacks.new_preroll = GStreamerReader::NewPrerollCb;
   mSinkCallbacks.new_buffer = GStreamerReader::NewBufferCb;
-  mSinkCallbacks.new_buffer_list = NULL;
+  mSinkCallbacks.new_buffer_list = nullptr;
 
   gst_segment_init(&mVideoSegment, GST_FORMAT_UNDEFINED);
   gst_segment_init(&mAudioSegment, GST_FORMAT_UNDEFINED);
 }
 
 GStreamerReader::~GStreamerReader()
 {
   MOZ_COUNT_DTOR(GStreamerReader);
   ResetDecode();
 
   if (mPlayBin) {
     gst_app_src_end_of_stream(mSource);
     if (mSource)
       gst_object_unref(mSource);
     gst_element_set_state(mPlayBin, GST_STATE_NULL);
     gst_object_unref(mPlayBin);
-    mPlayBin = NULL;
-    mVideoSink = NULL;
-    mVideoAppSink = NULL;
-    mAudioSink = NULL;
-    mAudioAppSink = NULL;
+    mPlayBin = nullptr;
+    mVideoSink = nullptr;
+    mVideoAppSink = nullptr;
+    mAudioSink = nullptr;
+    mAudioAppSink = nullptr;
     gst_object_unref(mBus);
-    mBus = NULL;
+    mBus = nullptr;
   }
 }
 
 nsresult GStreamerReader::Init(MediaDecoderReader* aCloneDonor)
 {
-  GError *error = NULL;
+  GError* error = nullptr;
   if (!gst_init_check(0, 0, &error)) {
     LOG(PR_LOG_ERROR, ("gst initialization failed: %s", error->message));
     g_error_free(error);
     return NS_ERROR_FAILURE;
   }
 
-  mPlayBin = gst_element_factory_make("playbin2", NULL);
-  if (mPlayBin == NULL) {
+  mPlayBin = gst_element_factory_make("playbin2", nullptr);
+  if (!mPlayBin) {
     LOG(PR_LOG_ERROR, ("couldn't create playbin2"));
     return NS_ERROR_FAILURE;
   }
-  g_object_set(mPlayBin, "buffer-size", 0, NULL);
+  g_object_set(mPlayBin, "buffer-size", 0, nullptr);
   mBus = gst_pipeline_get_bus(GST_PIPELINE(mPlayBin));
 
   mVideoSink = gst_parse_bin_from_description("capsfilter name=filter ! "
       "appsink name=videosink sync=true max-buffers=1 "
       "caps=video/x-raw-yuv,format=(fourcc)I420"
-      , TRUE, NULL);
+      , TRUE, nullptr);
   mVideoAppSink = GST_APP_SINK(gst_bin_get_by_name(GST_BIN(mVideoSink),
         "videosink"));
   gst_app_sink_set_callbacks(mVideoAppSink, &mSinkCallbacks,
-      (gpointer) this, NULL);
-  GstPad *sinkpad = gst_element_get_pad(GST_ELEMENT(mVideoAppSink), "sink");
+      (gpointer) this, nullptr);
+  GstPad* sinkpad = gst_element_get_pad(GST_ELEMENT(mVideoAppSink), "sink");
   gst_pad_add_event_probe(sinkpad,
       G_CALLBACK(&GStreamerReader::EventProbeCb), this);
   gst_object_unref(sinkpad);
+#if GST_VERSION_MICRO >= 36
+  gst_pad_set_bufferalloc_function(sinkpad, GStreamerReader::AllocateVideoBufferCb);
+#endif
+  gst_pad_set_element_private(sinkpad, this);
 
   mAudioSink = gst_parse_bin_from_description("capsfilter name=filter ! "
 #ifdef MOZ_SAMPLE_TYPE_FLOAT32
         "appsink name=audiosink sync=true caps=audio/x-raw-float,"
 #ifdef IS_LITTLE_ENDIAN
-        "channels={1,2},width=32,endianness=1234", TRUE, NULL);
+        "channels={1,2},width=32,endianness=1234", TRUE, nullptr);
 #else
-        "channels={1,2},width=32,endianness=4321", TRUE, NULL);
+        "channels={1,2},width=32,endianness=4321", TRUE, nullptr);
 #endif
 #else
         "appsink name=audiosink sync=true caps=audio/x-raw-int,"
 #ifdef IS_LITTLE_ENDIAN
-        "channels={1,2},width=16,endianness=1234", TRUE, NULL);
+        "channels={1,2},width=16,endianness=1234", TRUE, nullptr);
 #else
-        "channels={1,2},width=16,endianness=4321", TRUE, NULL);
+        "channels={1,2},width=16,endianness=4321", TRUE, nullptr);
 #endif
 #endif
   mAudioAppSink = GST_APP_SINK(gst_bin_get_by_name(GST_BIN(mAudioSink),
-        "audiosink"));
+                                                   "audiosink"));
   gst_app_sink_set_callbacks(mAudioAppSink, &mSinkCallbacks,
-      (gpointer) this, NULL);
+                             (gpointer) this, nullptr);
   sinkpad = gst_element_get_pad(GST_ELEMENT(mAudioAppSink), "sink");
   gst_pad_add_event_probe(sinkpad,
-      G_CALLBACK(&GStreamerReader::EventProbeCb), this);
+                          G_CALLBACK(&GStreamerReader::EventProbeCb), this);
   gst_object_unref(sinkpad);
 
   g_object_set(mPlayBin, "uri", "appsrc://",
-      "video-sink", mVideoSink,
-      "audio-sink", mAudioSink,
-      NULL);
+               "video-sink", mVideoSink,
+               "audio-sink", mAudioSink,
+               nullptr);
 
-  g_signal_connect(G_OBJECT(mPlayBin), "notify::source",
-    G_CALLBACK(GStreamerReader::PlayBinSourceSetupCb), this);
+  g_object_connect(mPlayBin, "signal::source-setup",
+                  GStreamerReader::PlayBinSourceSetupCb, this, nullptr);
 
   return NS_OK;
 }
 
-void GStreamerReader::PlayBinSourceSetupCb(GstElement *aPlayBin,
-                                             GParamSpec *pspec,
-                                             gpointer aUserData)
+void GStreamerReader::PlayBinSourceSetupCb(GstElement* aPlayBin,
+                                           GParamSpec* pspec,
+                                           gpointer aUserData)
 {
   GstElement *source;
-  GStreamerReader *reader = reinterpret_cast<GStreamerReader*>(aUserData);
+  GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
 
   g_object_get(aPlayBin, "source", &source, NULL);
   reader->PlayBinSourceSetup(GST_APP_SRC(source));
 }
 
-void GStreamerReader::PlayBinSourceSetup(GstAppSrc *aSource)
+void GStreamerReader::PlayBinSourceSetup(GstAppSrc* aSource)
 {
   mSource = GST_APP_SRC(aSource);
-  gst_app_src_set_callbacks(mSource, &mSrcCallbacks, (gpointer) this, NULL);
+  gst_app_src_set_callbacks(mSource, &mSrcCallbacks, (gpointer) this, nullptr);
   MediaResource* resource = mDecoder->GetResource();
 
   /* do a short read to trigger a network request so that GetLength() below
    * returns something meaningful and not -1
    */
   char buf[512];
   unsigned int size = 0;
   resource->Read(buf, sizeof(buf), &size);
   resource->Seek(SEEK_SET, 0);
 
   /* now we should have a length */
-  int64_t len = resource->GetLength();
-  gst_app_src_set_size(mSource, len);
+  int64_t resourceLength = resource->GetLength();
+  gst_app_src_set_size(mSource, resourceLength);
   if (resource->IsDataCachedToEndOfResource(0) ||
-      (len != -1 && len <= SHORT_FILE_SIZE)) {
+      (resourceLength != -1 && resourceLength <= SHORT_FILE_SIZE)) {
     /* let the demuxer work in pull mode for local files (or very short files)
      * so that we get optimal seeking accuracy/performance
      */
-    LOG(PR_LOG_DEBUG, ("configuring random access, len %lld", len));
+    LOG(PR_LOG_DEBUG, ("configuring random access, len %lld", resourceLength));
     gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_RANDOM_ACCESS);
   } else {
     /* make the demuxer work in push mode so that seeking is kept to a minimum
      */
-    LOG(PR_LOG_DEBUG, ("configuring push mode, len %lld", len));
+    LOG(PR_LOG_DEBUG, ("configuring push mode, len %lld", resourceLength));
     gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_SEEKABLE);
   }
 }
 
 nsresult GStreamerReader::ReadMetadata(VideoInfo* aInfo,
-                                         MetadataTags** aTags)
+                                       MetadataTags** aTags)
 {
   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
   nsresult ret = NS_OK;
 
   /* We do 3 attempts here: decoding audio and video, decoding video only,
    * decoding audio only. This allows us to play streams that have one broken
    * stream but that are otherwise decodeable.
    */
   guint flags[3] = {GST_PLAY_FLAG_VIDEO|GST_PLAY_FLAG_AUDIO,
     static_cast<guint>(~GST_PLAY_FLAG_AUDIO), static_cast<guint>(~GST_PLAY_FLAG_VIDEO)};
   guint default_flags, current_flags;
-  g_object_get(mPlayBin, "flags", &default_flags, NULL);
+  g_object_get(mPlayBin, "flags", &default_flags, nullptr);
 
-  GstMessage *message = NULL;
+  GstMessage* message = nullptr;
   for (unsigned int i = 0; i < G_N_ELEMENTS(flags); i++) {
     current_flags = default_flags & flags[i];
-    g_object_set(G_OBJECT(mPlayBin), "flags", current_flags, NULL);
+    g_object_set(G_OBJECT(mPlayBin), "flags", current_flags, nullptr);
 
     /* reset filter caps to ANY */
-    GstCaps *caps = gst_caps_new_any();
-    GstElement *filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter");
-    g_object_set(filter, "caps", caps, NULL);
+    GstCaps* caps = gst_caps_new_any();
+    GstElement* filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter");
+    g_object_set(filter, "caps", caps, nullptr);
     gst_object_unref(filter);
 
     filter = gst_bin_get_by_name(GST_BIN(mVideoSink), "filter");
-    g_object_set(filter, "caps", caps, NULL);
+    g_object_set(filter, "caps", caps, nullptr);
     gst_object_unref(filter);
     gst_caps_unref(caps);
-    filter = NULL;
+    filter = nullptr;
 
     if (!(current_flags & GST_PLAY_FLAG_AUDIO))
       filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter");
     else if (!(current_flags & GST_PLAY_FLAG_VIDEO))
       filter = gst_bin_get_by_name(GST_BIN(mVideoSink), "filter");
 
     if (filter) {
       /* Little trick: set the target caps to "skip" so that playbin2 fails to
        * find a decoder for the stream we want to skip.
        */
-      GstCaps *filterCaps = gst_caps_new_simple ("skip", NULL);
-      g_object_set(filter, "caps", filterCaps, NULL);
+      GstCaps* filterCaps = gst_caps_new_simple ("skip", nullptr);
+      g_object_set(filter, "caps", filterCaps, nullptr);
       gst_caps_unref(filterCaps);
       gst_object_unref(filter);
     }
 
     /* start the pipeline */
     gst_element_set_state(mPlayBin, GST_STATE_PAUSED);
 
     /* Wait for ASYNC_DONE, which is emitted when the pipeline is built,
      * prerolled and ready to play. Also watch for errors.
      */
     message = gst_bus_timed_pop_filtered(mBus, GST_CLOCK_TIME_NONE,
-       (GstMessageType)(GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_ERROR));
+                 (GstMessageType)(GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_ERROR));
     if (GST_MESSAGE_TYPE(message) == GST_MESSAGE_ERROR) {
-      GError *error;
-      gchar *debug;
+      GError* error;
+      gchar* debug;
 
       gst_message_parse_error(message, &error, &debug);
       LOG(PR_LOG_ERROR, ("read metadata error: %s: %s", error->message,
-            debug));
+                         debug));
       g_error_free(error);
       g_free(debug);
       gst_element_set_state(mPlayBin, GST_STATE_NULL);
       gst_message_unref(message);
       ret = NS_ERROR_FAILURE;
     } else {
       gst_message_unref(message);
       ret = NS_OK;
@@ -311,17 +322,17 @@ nsresult GStreamerReader::ReadMetadata(V
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     LOG(PR_LOG_DEBUG, ("returning duration %" GST_TIME_FORMAT,
           GST_TIME_ARGS (duration)));
     duration = GST_TIME_AS_USECONDS (duration);
     mDecoder->SetMediaDuration(duration);
   }
 
   int n_video = 0, n_audio = 0;
-  g_object_get(mPlayBin, "n-video", &n_video, "n-audio", &n_audio, NULL);
+  g_object_get(mPlayBin, "n-video", &n_video, "n-audio", &n_audio, nullptr);
   mInfo.mHasVideo = n_video != 0;
   mInfo.mHasAudio = n_audio != 0;
 
   *aInfo = mInfo;
 
   *aTags = nullptr;
 
   /* set the pipeline to PLAYING so that it starts decoding and queueing data in
@@ -354,17 +365,17 @@ nsresult GStreamerReader::ResetDecode()
 void GStreamerReader::NotifyBytesConsumed()
 {
   NS_ASSERTION(mByteOffset >= mLastReportedByteOffset,
       "current byte offset less than prev offset");
   mDecoder->NotifyBytesConsumed(mByteOffset - mLastReportedByteOffset);
   mLastReportedByteOffset = mByteOffset;
 }
 
-bool GStreamerReader::WaitForDecodedData(int *aCounter)
+bool GStreamerReader::WaitForDecodedData(int* aCounter)
 {
   ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
 
   /* Report consumed bytes from here as we can't do it from gst threads */
   NotifyBytesConsumed();
   while(*aCounter == 0) {
     if (mReachedEos) {
       return false;
@@ -381,60 +392,60 @@ bool GStreamerReader::DecodeAudioData()
 {
   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
 
   if (!WaitForDecodedData(&mAudioSinkBufferCount)) {
     mAudioQueue.Finish();
     return false;
   }
 
-  GstBuffer *buffer = gst_app_sink_pull_buffer(mAudioAppSink);
+  GstBuffer* buffer = gst_app_sink_pull_buffer(mAudioAppSink);
   int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer);
   timestamp = gst_segment_to_stream_time(&mAudioSegment,
       GST_FORMAT_TIME, timestamp);
   timestamp = GST_TIME_AS_USECONDS(timestamp);
   int64_t duration = 0;
   if (GST_CLOCK_TIME_IS_VALID(GST_BUFFER_DURATION(buffer)))
     duration = GST_TIME_AS_USECONDS(GST_BUFFER_DURATION(buffer));
 
   int64_t offset = GST_BUFFER_OFFSET(buffer);
   unsigned int size = GST_BUFFER_SIZE(buffer);
   int32_t frames = (size / sizeof(AudioDataValue)) / mInfo.mAudioChannels;
   ssize_t outSize = static_cast<size_t>(size / sizeof(AudioDataValue));
   nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outSize]);
   memcpy(data, GST_BUFFER_DATA(buffer), GST_BUFFER_SIZE(buffer));
-  AudioData *audio = new AudioData(offset, timestamp, duration,
+  AudioData* audio = new AudioData(offset, timestamp, duration,
       frames, data.forget(), mInfo.mAudioChannels);
 
   mAudioQueue.Push(audio);
   gst_buffer_unref(buffer);
 
   return true;
 }
 
 bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
                                          int64_t aTimeThreshold)
 {
   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
 
-  GstBuffer *buffer = NULL;
+  GstBuffer* buffer = nullptr;
   int64_t timestamp, nextTimestamp;
   while (true)
   {
     if (!WaitForDecodedData(&mVideoSinkBufferCount)) {
       mVideoQueue.Finish();
       break;
     }
     mDecoder->NotifyDecodedFrames(0, 1);
 
     buffer = gst_app_sink_pull_buffer(mVideoAppSink);
     bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DISCONT);
     if ((aKeyFrameSkip && !isKeyframe)) {
       gst_buffer_unref(buffer);
-      buffer = NULL;
+      buffer = nullptr;
       continue;
     }
 
     timestamp = GST_BUFFER_TIMESTAMP(buffer);
     {
       ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
       timestamp = gst_segment_to_stream_time(&mVideoSegment,
           GST_FORMAT_TIME, timestamp);
@@ -448,28 +459,55 @@ bool GStreamerReader::DecodeVideoFrame(b
       /* add 1-frame duration */
       nextTimestamp += gst_util_uint64_scale(GST_USECOND, fpsNum, fpsDen);
 
     if (timestamp < aTimeThreshold) {
       LOG(PR_LOG_DEBUG, ("skipping frame %" GST_TIME_FORMAT
             " threshold %" GST_TIME_FORMAT,
             GST_TIME_ARGS(timestamp), GST_TIME_ARGS(aTimeThreshold)));
       gst_buffer_unref(buffer);
-      buffer = NULL;
+      buffer = nullptr;
       continue;
     }
 
     break;
   }
 
-  if (buffer == NULL)
+  if (!buffer)
     /* no more frames */
     return false;
 
-  guint8 *data = GST_BUFFER_DATA(buffer);
+  nsRefPtr<PlanarYCbCrImage> image;
+#if GST_VERSION_MICRO >= 36
+  const GstStructure* structure = gst_buffer_get_qdata(buffer,
+      g_quark_from_string("moz-reader-data"));
+  const GValue* value = gst_structure_get_value(structure, "image");
+  if (value) {
+    BufferData* data = reinterpret_cast<BufferData*>(g_value_get_boxed(value));
+    image = data->mImage;
+  }
+#endif
+
+  if (!image) {
+    /* Ugh, upstream is not calling gst_pad_alloc_buffer(). Fallback to
+     * allocating a PlanarYCbCrImage backed GstBuffer here and memcpy.
+     */
+    GstBuffer* tmp = nullptr;
+    AllocateVideoBufferFull(nullptr, GST_BUFFER_OFFSET(buffer),
+        GST_BUFFER_SIZE(buffer), nullptr, &tmp, image);
+
+    /* copy */
+    gst_buffer_copy_metadata(tmp, buffer, GST_BUFFER_COPY_ALL);
+    memcpy(GST_BUFFER_DATA(tmp), GST_BUFFER_DATA(buffer),
+        GST_BUFFER_SIZE(tmp));
+    gst_buffer_unref(buffer);
+    buffer = tmp;
+  }
+
+  guint8* data = GST_BUFFER_DATA(buffer);
 
   int width = mPicture.width;
   int height = mPicture.height;
   GstVideoFormat format = mFormat;
 
   VideoData::YCbCrBuffer b;
   for(int i = 0; i < 3; i++) {
     b.mPlanes[i].mData = data + gst_video_format_get_component_offset(format, i,
@@ -482,25 +520,19 @@ bool GStreamerReader::DecodeVideoFrame(b
     b.mPlanes[i].mOffset = 0;
     b.mPlanes[i].mSkip = 0;
   }
 
   bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer,
       GST_BUFFER_FLAG_DELTA_UNIT);
   /* XXX ? */
   int64_t offset = 0;
-  VideoData *video = VideoData::Create(mInfo,
-                                       mDecoder->GetImageContainer(),
-                                       offset,
-                                       timestamp,
-                                       nextTimestamp,
-                                       b,
-                                       isKeyframe,
-                                       -1,
-                                       mPicture);
+  VideoData* video = VideoData::Create(mInfo, image, offset,
+                                       timestamp, nextTimestamp, b,
+                                       isKeyframe, -1, mPicture);
   mVideoQueue.Push(video);
   gst_buffer_unref(buffer);
 
   return true;
 }
 
 nsresult GStreamerReader::Seek(int64_t aTarget,
                                  int64_t aStartTime,
@@ -527,34 +559,33 @@ nsresult GStreamerReader::GetBuffered(Ti
                                       int64_t aStartTime)
 {
   if (!mInfo.mHasVideo && !mInfo.mHasAudio) {
     return NS_OK;
   }
 
   GstFormat format = GST_FORMAT_TIME;
   MediaResource* resource = mDecoder->GetResource();
-  gint64 resourceLength = resource->GetLength();
   nsTArray<MediaByteRange> ranges;
   resource->GetCachedRanges(ranges);
 
   if (mDecoder->OnStateMachineThread())
     /* Report the position from here while buffering as we can't report it from
      * the gstreamer threads that are actually reading from the resource
      */
     NotifyBytesConsumed();
 
   if (resource->IsDataCachedToEndOfResource(0)) {
     /* fast path for local or completely cached files */
     gint64 duration = 0;
 
     duration = QueryDuration();
     double end = (double) duration / GST_MSECOND;
     LOG(PR_LOG_DEBUG, ("complete range [0, %f] for [0, %li]",
-          end, resourceLength));
+          end, resource->GetLength()));
     aBuffered->Add(0, end);
     return NS_OK;
   }
 
   for(uint32_t index = 0; index < ranges.Length(); index++) {
     int64_t startOffset = ranges[index].mStart;
     int64_t endOffset = ranges[index].mEnd;
     gint64 startTime, endTime;
@@ -564,51 +595,53 @@ nsresult GStreamerReader::GetBuffered(Ti
       continue;
     if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES,
       endOffset, &format, &endTime) || format != GST_FORMAT_TIME)
       continue;
 
     double start = (double) GST_TIME_AS_USECONDS (startTime) / GST_MSECOND;
     double end = (double) GST_TIME_AS_USECONDS (endTime) / GST_MSECOND;
     LOG(PR_LOG_DEBUG, ("adding range [%f, %f] for [%li %li] size %li",
-          start, end, startOffset, endOffset, resourceLength));
+          start, end, startOffset, endOffset, resource->GetLength()));
     aBuffered->Add(start, end);
   }
 
   return NS_OK;
 }
 
 void GStreamerReader::ReadAndPushData(guint aLength)
 {
   MediaResource* resource = mDecoder->GetResource();
   NS_ASSERTION(resource, "Decoder has no media resource");
   nsresult rv = NS_OK;
 
-  GstBuffer *buffer = gst_buffer_new_and_alloc(aLength);
-  guint8 *data = GST_BUFFER_DATA(buffer);
+  GstBuffer* buffer = gst_buffer_new_and_alloc(aLength);
+  guint8* data = GST_BUFFER_DATA(buffer);
   uint32_t size = 0, bytesRead = 0;
   while(bytesRead < aLength) {
     rv = resource->Read(reinterpret_cast<char*>(data + bytesRead),
         aLength - bytesRead, &size);
     if (NS_FAILED(rv) || size == 0)
       break;
 
     bytesRead += size;
   }
 
   GST_BUFFER_SIZE(buffer) = bytesRead;
   mByteOffset += bytesRead;
 
   GstFlowReturn ret = gst_app_src_push_buffer(mSource, gst_buffer_ref(buffer));
-  if (ret != GST_FLOW_OK)
+  if (ret != GST_FLOW_OK) {
     LOG(PR_LOG_ERROR, ("ReadAndPushData push ret %s", gst_flow_get_name(ret)));
+  }
 
-  if (GST_BUFFER_SIZE (buffer) < aLength)
+  if (GST_BUFFER_SIZE (buffer) < aLength) {
     /* If we read less than what we wanted, we reached the end */
     gst_app_src_end_of_stream(mSource);
+  }
 
   gst_buffer_unref(buffer);
 }
 
 int64_t GStreamerReader::QueryDuration()
 {
   gint64 duration = 0;
   GstFormat format = GST_FORMAT_TIME;
@@ -627,91 +660,95 @@ int64_t GStreamerReader::QueryDuration()
     // We decoded more than the reported duration (which could be estimated)
     LOG(PR_LOG_DEBUG, ("mDuration > duration"));
     duration = mDecoder->mDuration;
   }*/
 
   return duration;
 }
 
-void GStreamerReader::NeedDataCb(GstAppSrc *aSrc,
-                                   guint aLength,
-                                   gpointer aUserData)
+void GStreamerReader::NeedDataCb(GstAppSrc* aSrc,
+                                 guint aLength,
+                                 gpointer aUserData)
 {
-  GStreamerReader *reader = (GStreamerReader *) aUserData;
+  GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
   reader->NeedData(aSrc, aLength);
 }
 
-void GStreamerReader::NeedData(GstAppSrc *aSrc, guint aLength)
+void GStreamerReader::NeedData(GstAppSrc* aSrc, guint aLength)
 {
-  if (aLength == -1)
+  if (aLength == static_cast<guint>(-1))
     aLength = DEFAULT_SOURCE_READ_SIZE;
   ReadAndPushData(aLength);
 }
 
-void GStreamerReader::EnoughDataCb(GstAppSrc *aSrc, gpointer aUserData)
+void GStreamerReader::EnoughDataCb(GstAppSrc* aSrc, gpointer aUserData)
 {
-  GStreamerReader *reader = (GStreamerReader *) aUserData;
+  GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
   reader->EnoughData(aSrc);
 }
 
-void GStreamerReader::EnoughData(GstAppSrc *aSrc)
+void GStreamerReader::EnoughData(GstAppSrc* aSrc)
 {
 }
 
-gboolean GStreamerReader::SeekDataCb(GstAppSrc *aSrc,
-                                       guint64 aOffset,
-                                       gpointer aUserData)
+gboolean GStreamerReader::SeekDataCb(GstAppSrc* aSrc,
+                                     guint64 aOffset,
+                                     gpointer aUserData)
 {
-  GStreamerReader *reader = (GStreamerReader *) aUserData;
+  GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
   return reader->SeekData(aSrc, aOffset);
 }
 
-gboolean GStreamerReader::SeekData(GstAppSrc *aSrc, guint64 aOffset)
+gboolean GStreamerReader::SeekData(GstAppSrc* aSrc, guint64 aOffset)
 {
   ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
   MediaResource* resource = mDecoder->GetResource();
+  int64_t resourceLength = resource->GetLength();
 
-  if (gst_app_src_get_size(mSource) == -1)
+  if (gst_app_src_get_size(mSource) == -1) {
     /* It's possible that we didn't know the length when we initialized mSource
      * but maybe we do now
      */
-    gst_app_src_set_size(mSource, resource->GetLength());
+    gst_app_src_set_size(mSource, resourceLength);
+  }
 
   nsresult rv = NS_ERROR_FAILURE;
-  if (aOffset < resource->GetLength())
+  if (aOffset < static_cast<guint64>(resourceLength)) {
     rv = resource->Seek(SEEK_SET, aOffset);
+  }
 
-  if (NS_SUCCEEDED(rv))
+  if (NS_SUCCEEDED(rv)) {
     mByteOffset = mLastReportedByteOffset = aOffset;
-  else
+  } else {
     LOG(PR_LOG_ERROR, ("seek at %lu failed", aOffset));
+  }
 
   return NS_SUCCEEDED(rv);
 }
 
-gboolean GStreamerReader::EventProbeCb(GstPad *aPad,
-                                         GstEvent *aEvent,
+gboolean GStreamerReader::EventProbeCb(GstPad* aPad,
+                                         GstEvent* aEvent,
                                          gpointer aUserData)
 {
-  GStreamerReader *reader = (GStreamerReader *) aUserData;
+  GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
   return reader->EventProbe(aPad, aEvent);
 }
 
-gboolean GStreamerReader::EventProbe(GstPad *aPad, GstEvent *aEvent)
+gboolean GStreamerReader::EventProbe(GstPad* aPad, GstEvent* aEvent)
 {
-  GstElement *parent = GST_ELEMENT(gst_pad_get_parent(aPad));
+  GstElement* parent = GST_ELEMENT(gst_pad_get_parent(aPad));
   switch(GST_EVENT_TYPE(aEvent)) {
     case GST_EVENT_NEWSEGMENT:
     {
       gboolean update;
       gdouble rate;
       GstFormat format;
       gint64 start, stop, position;
-      GstSegment *segment;
+      GstSegment* segment;
 
       /* Store the segments so we can convert timestamps to stream time, which
        * is what the upper layers sync on.
        */
       ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
       gst_event_parse_new_segment(aEvent, &update, &rate, &format,
           &start, &stop, &position);
       if (parent == GST_ELEMENT(mVideoAppSink))
@@ -729,67 +766,129 @@ gboolean GStreamerReader::EventProbe(Gst
     default:
       break;
   }
   gst_object_unref(parent);
 
   return TRUE;
 }
 
-GstFlowReturn GStreamerReader::NewPrerollCb(GstAppSink *aSink,
+GstFlowReturn GStreamerReader::AllocateVideoBufferFull(GstPad* aPad,
+                                                       guint64 aOffset,
+                                                       guint aSize,
+                                                       GstCaps* aCaps,
+                                                       GstBuffer** aBuf,
+                                                       nsRefPtr<PlanarYCbCrImage>& aImage)
+{
+  /* allocate an image using the container */
+  ImageContainer* container = mDecoder->GetImageContainer();
+  ImageFormat format = PLANAR_YCBCR;
+  PlanarYCbCrImage* img = reinterpret_cast<PlanarYCbCrImage*>(container->CreateImage(&format, 1).get());
+  nsRefPtr<PlanarYCbCrImage> image = dont_AddRef(img);
+
+  /* prepare a GstBuffer pointing to the underlying PlanarYCbCrImage buffer */
+  GstBuffer* buf = gst_buffer_new();
+  GST_BUFFER_SIZE(buf) = aSize;
+  /* allocate the actual YUV buffer */
+  GST_BUFFER_DATA(buf) = image->AllocateAndGetNewBuffer(aSize);
+
+  aImage = image;
+
+#if GST_VERSION_MICRO >= 36
+  /* create a GBoxed handle to hold the image */
+  BufferData* data = new BufferData(image);
+
+  /* store it in a GValue so we can put it in a GstStructure */
+  GValue value = {0,};
+  g_value_init(&value, buffer_data_get_type());
+  g_value_take_boxed(&value, data);
+
+  /* store the value in the structure */
+  GstStructure* structure = gst_structure_new("moz-reader-data", nullptr);
+  gst_structure_take_value(structure, "image", &value);
+
+  /* and attach the structure to the buffer */
+  gst_buffer_set_qdata(buf, g_quark_from_string("moz-reader-data"), structure);
+#endif
+
+  *aBuf = buf;
+  return GST_FLOW_OK;
+}
+
+GstFlowReturn GStreamerReader::AllocateVideoBufferCb(GstPad* aPad,
+                                                     guint64 aOffset,
+                                                     guint aSize,
+                                                     GstCaps* aCaps,
+                                                     GstBuffer** aBuf)
+{
+  GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(gst_pad_get_element_private(aPad));
+  return reader->AllocateVideoBuffer(aPad, aOffset, aSize, aCaps, aBuf);
+}
+
+GstFlowReturn GStreamerReader::AllocateVideoBuffer(GstPad* aPad,
+                                                   guint64 aOffset,
+                                                   guint aSize,
+                                                   GstCaps* aCaps,
+                                                   GstBuffer** aBuf)
+{
+  nsRefPtr<PlanarYCbCrImage> image;
+  return AllocateVideoBufferFull(aPad, aOffset, aSize, aCaps, aBuf, image);
+}
+
+GstFlowReturn GStreamerReader::NewPrerollCb(GstAppSink* aSink,
                                               gpointer aUserData)
 {
-  GStreamerReader *reader = (GStreamerReader *) aUserData;
+  GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
 
   if (aSink == reader->mVideoAppSink)
     reader->VideoPreroll();
   else
     reader->AudioPreroll();
   return GST_FLOW_OK;
 }
 
 void GStreamerReader::AudioPreroll()
 {
   /* The first audio buffer has reached the audio sink. Get rate and channels */
   LOG(PR_LOG_DEBUG, ("Audio preroll"));
-  GstPad *sinkpad = gst_element_get_pad(GST_ELEMENT(mAudioAppSink), "sink");
-  GstCaps *caps = gst_pad_get_negotiated_caps(sinkpad);
-  GstStructure *s = gst_caps_get_structure(caps, 0);
+  GstPad* sinkpad = gst_element_get_pad(GST_ELEMENT(mAudioAppSink), "sink");
+  GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad);
+  GstStructure* s = gst_caps_get_structure(caps, 0);
   mInfo.mAudioRate = mInfo.mAudioChannels = 0;
-  gst_structure_get_int(s, "rate", (gint *) &mInfo.mAudioRate);
-  gst_structure_get_int(s, "channels", (gint *) &mInfo.mAudioChannels);
+  gst_structure_get_int(s, "rate", (gint*) &mInfo.mAudioRate);
+  gst_structure_get_int(s, "channels", (gint*) &mInfo.mAudioChannels);
   NS_ASSERTION(mInfo.mAudioRate != 0, ("audio rate is zero"));
   NS_ASSERTION(mInfo.mAudioChannels != 0, ("audio channels is zero"));
   NS_ASSERTION(mInfo.mAudioChannels > 0 && mInfo.mAudioChannels <= MAX_CHANNELS,
       "invalid audio channels number");
   mInfo.mHasAudio = true;
   gst_caps_unref(caps);
   gst_object_unref(sinkpad);
 }
 
 void GStreamerReader::VideoPreroll()
 {
   /* The first video buffer has reached the video sink. Get width and height */
   LOG(PR_LOG_DEBUG, ("Video preroll"));
-  GstPad *sinkpad = gst_element_get_pad(GST_ELEMENT(mVideoAppSink), "sink");
-  GstCaps *caps = gst_pad_get_negotiated_caps(sinkpad);
+  GstPad* sinkpad = gst_element_get_pad(GST_ELEMENT(mVideoAppSink), "sink");
+  GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad);
   gst_video_format_parse_caps(caps, &mFormat, &mPicture.width, &mPicture.height);
-  GstStructure *structure = gst_caps_get_structure(caps, 0);
+  GstStructure* structure = gst_caps_get_structure(caps, 0);
   gst_structure_get_fraction(structure, "framerate", &fpsNum, &fpsDen);
   NS_ASSERTION(mPicture.width && mPicture.height, "invalid video resolution");
   mInfo.mDisplay = nsIntSize(mPicture.width, mPicture.height);
   mInfo.mHasVideo = true;
   gst_caps_unref(caps);
   gst_object_unref(sinkpad);
 }
 
-GstFlowReturn GStreamerReader::NewBufferCb(GstAppSink *aSink,
-                                             gpointer aUserData)
+GstFlowReturn GStreamerReader::NewBufferCb(GstAppSink* aSink,
+                                           gpointer aUserData)
 {
-  GStreamerReader *reader = (GStreamerReader *) aUserData;
+  GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
 
   if (aSink == reader->mVideoAppSink)
     reader->NewVideoBuffer();
   else
     reader->NewAudioBuffer();
 
   return GST_FLOW_OK;
 }
@@ -810,23 +909,23 @@ void GStreamerReader::NewAudioBuffer()
   ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
   /* We have a new audio buffer queued in the audio sink. Increment the counter
    * and notify the decode thread potentially blocked in DecodeAudioData
    */
   mAudioSinkBufferCount++;
   mon.NotifyAll();
 }
 
-void GStreamerReader::EosCb(GstAppSink *aSink, gpointer aUserData)
+void GStreamerReader::EosCb(GstAppSink* aSink, gpointer aUserData)
 {
-  GStreamerReader *reader = (GStreamerReader *) aUserData;
+  GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
   reader->Eos(aSink);
 }
 
-void GStreamerReader::Eos(GstAppSink *aSink)
+void GStreamerReader::Eos(GstAppSink* aSink)
 {
   /* We reached the end of the stream */
   {
     ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
     /* Potentially unblock DecodeVideoFrame and DecodeAudioData */
     mReachedEos = true;
     mon.NotifyAll();
   }
--- a/content/media/gstreamer/GStreamerReader.h
+++ b/content/media/gstreamer/GStreamerReader.h
@@ -4,16 +4,17 @@
 
 #if !defined(GStreamerReader_h_)
 #define GStreamerReader_h_
 
 #include <gst/gst.h>
 #include <gst/app/gstappsrc.h>
 #include <gst/app/gstappsink.h>
 #include <gst/video/video.h>
+#include <map>
 #include "MediaDecoderReader.h"
 
 namespace mozilla {
 
 namespace dom {
 class TimeRanges;
 }
 
@@ -44,75 +45,87 @@ public:
 
   virtual bool HasVideo() {
     return mInfo.mHasVideo;
   }
 
 private:
 
   void ReadAndPushData(guint aLength);
-  bool WaitForDecodedData(int *counter);
+  bool WaitForDecodedData(int* counter);
   void NotifyBytesConsumed();
   int64_t QueryDuration();
 
   /* Gst callbacks */
 
   /* Called on the source-setup signal emitted by playbin. Used to
    * configure appsrc .
    */
-  static void PlayBinSourceSetupCb(GstElement *aPlayBin,
-                                   GParamSpec *pspec,
+  static void PlayBinSourceSetupCb(GstElement* aPlayBin,
+                                   GParamSpec* pspec,
                                    gpointer aUserData);
-  void PlayBinSourceSetup(GstAppSrc *aSource);
+  void PlayBinSourceSetup(GstAppSrc* aSource);
 
   /* Called from appsrc when we need to read more data from the resource */
-  static void NeedDataCb(GstAppSrc *aSrc, guint aLength, gpointer aUserData);
-  void NeedData(GstAppSrc *aSrc, guint aLength);
+  static void NeedDataCb(GstAppSrc* aSrc, guint aLength, gpointer aUserData);
+  void NeedData(GstAppSrc* aSrc, guint aLength);
 
   /* Called when appsrc has enough data and we can stop reading */
-  static void EnoughDataCb(GstAppSrc *aSrc, gpointer aUserData);
-  void EnoughData(GstAppSrc *aSrc);
+  static void EnoughDataCb(GstAppSrc* aSrc, gpointer aUserData);
+  void EnoughData(GstAppSrc* aSrc);
 
   /* Called when a seek is issued on the pipeline */
-  static gboolean SeekDataCb(GstAppSrc *aSrc,
+  static gboolean SeekDataCb(GstAppSrc* aSrc,
                              guint64 aOffset,
                              gpointer aUserData);
-  gboolean SeekData(GstAppSrc *aSrc, guint64 aOffset);
+  gboolean SeekData(GstAppSrc* aSrc, guint64 aOffset);
 
   /* Called when events reach the sinks. See inline comments */
-  static gboolean EventProbeCb(GstPad *aPad, GstEvent *aEvent, gpointer aUserData);
-  gboolean EventProbe(GstPad *aPad, GstEvent *aEvent);
+  static gboolean EventProbeCb(GstPad* aPad, GstEvent* aEvent, gpointer aUserData);
+  gboolean EventProbe(GstPad* aPad, GstEvent* aEvent);
+
+  /* Called when elements in the video branch of the pipeline call
+   * gst_pad_alloc_buffer(). Used to provide PlanarYCbCrImage backed GstBuffers
+   * to the pipeline so that a memory copy can be avoided when handling YUV
+   * buffers from the pipeline to the gfx side.
+   */
+  static GstFlowReturn AllocateVideoBufferCb(GstPad* aPad, guint64 aOffset, guint aSize,
+                                             GstCaps* aCaps, GstBuffer** aBuf);
+  GstFlowReturn AllocateVideoBufferFull(GstPad* aPad, guint64 aOffset, guint aSize,
+                                     GstCaps* aCaps, GstBuffer** aBuf, nsRefPtr<layers::PlanarYCbCrImage>& aImage);
+  GstFlowReturn AllocateVideoBuffer(GstPad* aPad, guint64 aOffset, guint aSize,
+                                     GstCaps* aCaps, GstBuffer** aBuf);
 
   /* Called when the pipeline is prerolled, that is when at start or after a
    * seek, the first audio and video buffers are queued in the sinks.
    */
-  static GstFlowReturn NewPrerollCb(GstAppSink *aSink, gpointer aUserData);
+  static GstFlowReturn NewPrerollCb(GstAppSink* aSink, gpointer aUserData);
   void VideoPreroll();
   void AudioPreroll();
 
   /* Called when buffers reach the sinks */
-  static GstFlowReturn NewBufferCb(GstAppSink *aSink, gpointer aUserData);
+  static GstFlowReturn NewBufferCb(GstAppSink* aSink, gpointer aUserData);
   void NewVideoBuffer();
   void NewAudioBuffer();
 
   /* Called at end of stream, when decoding has finished */
-  static void EosCb(GstAppSink *aSink, gpointer aUserData);
-  void Eos(GstAppSink *aSink);
+  static void EosCb(GstAppSink* aSink, gpointer aUserData);
+  void Eos(GstAppSink* aSink);
 
-  GstElement *mPlayBin;
-  GstBus *mBus;
-  GstAppSrc *mSource;
+  GstElement* mPlayBin;
+  GstBus* mBus;
+  GstAppSrc* mSource;
   /* video sink bin */
-  GstElement *mVideoSink;
+  GstElement* mVideoSink;
   /* the actual video app sink */
-  GstAppSink *mVideoAppSink;
+  GstAppSink* mVideoAppSink;
   /* audio sink bin */
-  GstElement *mAudioSink;
+  GstElement* mAudioSink;
   /* the actual audio app sink */
-  GstAppSink *mAudioAppSink;
+  GstAppSink* mAudioAppSink;
   GstVideoFormat mFormat;
   nsIntRect mPicture;
   int mVideoSinkBufferCount;
   int mAudioSinkBufferCount;
   GstAppSrcCallbacks mSrcCallbacks;
   GstAppSinkCallbacks mSinkCallbacks;
   /* monitor used to synchronize access to shared state between gstreamer
    * threads and other gecko threads */
@@ -130,11 +143,26 @@ private:
   /* offset we've reached reading from the source */
   gint64 mByteOffset;
   /* the last offset we reported with NotifyBytesConsumed */
   gint64 mLastReportedByteOffset;
   int fpsNum;
   int fpsDen;
 };
 
+class BufferData {
+  public:
+    BufferData(layers::PlanarYCbCrImage* aImage) : mImage(aImage) {}
+
+    static void* Copy(void* aData) {
+      return new BufferData(reinterpret_cast<BufferData*>(aData)->mImage);
+    }
+
+    static void Free(void* aData) {
+      delete reinterpret_cast<BufferData*>(aData);
+    }
+
+    nsRefPtr<layers::PlanarYCbCrImage> mImage;
+};
+
 } // namespace mozilla
 
 #endif
--- a/gfx/layers/ImageContainer.cpp
+++ b/gfx/layers/ImageContainer.cpp
@@ -449,17 +449,17 @@ PlanarYCbCrImage::CopyData(const Data& a
 {
   mData = aData;
 
   // update buffer size
   mBufferSize = mData.mCbCrStride * mData.mCbCrSize.height * 2 +
                 mData.mYStride * mData.mYSize.height;
 
   // get new buffer
-  mBuffer = AllocateBuffer(mBufferSize); 
+  mBuffer = AllocateBuffer(mBufferSize);
   if (!mBuffer)
     return;
 
   mData.mYChannel = mBuffer;
   mData.mCbChannel = mData.mYChannel + mData.mYStride * mData.mYSize.height;
   mData.mCrChannel = mData.mCbChannel + mData.mCbCrStride * mData.mCbCrSize.height;
 
   CopyPlane(mData.mYChannel, aData.mYChannel,
@@ -481,16 +481,34 @@ PlanarYCbCrImage::SetData(const Data &aD
 gfxASurface::gfxImageFormat
 PlanarYCbCrImage::GetOffscreenFormat()
 {
   return mOffscreenFormat != gfxASurface::ImageFormatUnknown ?
     gfxPlatform::GetPlatform()->GetOffscreenFormat() :
     mOffscreenFormat;
 }
 
+void
+PlanarYCbCrImage::SetDataNoCopy(const Data &aData)
+{
+  mData = aData;
+  mSize = aData.mPicSize;
+}
+
+uint8_t*
+PlanarYCbCrImage::AllocateAndGetNewBuffer(uint32_t aSize)
+{
+  // update buffer size
+  mBufferSize = aSize;
+
+  // get new buffer
+  mBuffer = AllocateBuffer(mBufferSize); 
+  return mBuffer;
+}
+
 already_AddRefed<gfxASurface>
 PlanarYCbCrImage::GetAsSurface()
 {
   if (mSurface) {
     nsRefPtr<gfxASurface> result = mSurface.get();
     return result.forget();
   }
 
--- a/gfx/layers/ImageContainer.h
+++ b/gfx/layers/ImageContainer.h
@@ -684,16 +684,30 @@ public:
 
   /**
    * This makes a copy of the data buffers, in order to support functioning
    * in all different layer managers.
    */
   virtual void SetData(const Data& aData);
 
   /**
+   * This doesn't make a copy of the data buffers. Can be used when mBuffer is
+   * pre allocated with AllocateAndGetNewBuffer(size) and then SetDataNoCopy is
+   * called to only update the picture size, planes etc. fields in mData.
+   * The GStreamer media backend uses this to decode into PlanarYCbCrImage(s)
+   * directly.
+   */
+  virtual void SetDataNoCopy(const Data &aData);
+
+  /**
+   * This allocates and returns a new buffer
+   */
+  virtual uint8_t* AllocateAndGetNewBuffer(uint32_t aSize);
+
+  /**
    * Ask this Image to not convert YUV to RGB during SetData, and make
    * the original data available through GetData. This is optional,
    * and not all PlanarYCbCrImages will support it.
    */
   virtual void SetDelayedConversion(bool aDelayed) { }
 
   /**
    * Grab the original YUV data. This is optional.