Bug 1344649 - part 2: let VideoData::CreateFromImage() accept only neccessary parameters. r=jya a=gchang
authorJohn Lin <jolin@mozilla.com>
Thu, 09 Mar 2017 12:06:24 +0800
changeset 395229 2ce9976ffff3f4acea9cfa1fe03c73445145fed7
parent 395228 da006962bafd0fa519419ae1bb2f26ae8dc7b6b9
child 395230 28d1b6af43b76327a0fd0c8e76e0ed57d204c70b
push id1468
push userasasaki@mozilla.com
push dateMon, 05 Jun 2017 19:31:07 +0000
treeherdermozilla-release@0641fc6ee9d1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjya, gchang
bugs1344649
milestone54.0a2
Bug 1344649 - part 2: let VideoData::CreateFromImage() accept only neccessary parameters. r=jya a=gchang VideoData doesn't care what's in aInfo but display size and aPicture are unused. MozReview-Commit-ID: IBqq8Rm8dM4
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/android/AndroidMediaReader.cpp
dom/media/ipc/VideoDecoderChild.cpp
dom/media/platforms/android/RemoteDataDecoder.cpp
dom/media/platforms/apple/AppleVTDecoder.cpp
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -428,31 +428,30 @@ VideoData::CreateAndCopyData(const Video
                       argb_buffer, size.width * 4,
                       size.width, size.height);
 
   return v.forget();
 }
 
 /* static */
 already_AddRefed<VideoData>
-VideoData::CreateFromImage(const VideoInfo& aInfo,
+VideoData::CreateFromImage(const IntSize& aDisplay,
                            int64_t aOffset,
                            int64_t aTime,
                            int64_t aDuration,
                            const RefPtr<Image>& aImage,
                            bool aKeyframe,
-                           int64_t aTimecode,
-                           const IntRect& aPicture)
+                           int64_t aTimecode)
 {
   RefPtr<VideoData> v(new VideoData(aOffset,
                                     aTime,
                                     aDuration,
                                     aKeyframe,
                                     aTimecode,
-                                    aInfo.mDisplay,
+                                    aDisplay,
                                     0));
   v->mImage = aImage;
   return v.forget();
 }
 
 MediaRawData::MediaRawData()
   : MediaData(RAW_DATA, 0)
   , mCrypto(mCryptoInternal)
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -513,24 +513,23 @@ public:
     int64_t aTime,
     int64_t aDuration,
     layers::TextureClient* aBuffer,
     bool aKeyframe,
     int64_t aTimecode,
     const IntRect& aPicture);
 
   static already_AddRefed<VideoData> CreateFromImage(
-    const VideoInfo& aInfo,
+    const IntSize& aDisplay,
     int64_t aOffset,
     int64_t aTime,
     int64_t aDuration,
     const RefPtr<Image>& aImage,
     bool aKeyframe,
-    int64_t aTimecode,
-    const IntRect& aPicture);
+    int64_t aTimecode);
 
   // Initialize PlanarYCbCrImage. Only When aCopyData is true,
   // video data is copied to PlanarYCbCrImage.
   static bool SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
                                   const VideoInfo& aInfo,
                                   const YCbCrBuffer& aBuffer,
                                   const IntRect& aPicture,
                                   bool aCopyData);
--- a/dom/media/android/AndroidMediaReader.cpp
+++ b/dom/media/android/AndroidMediaReader.cpp
@@ -167,36 +167,23 @@ bool AndroidMediaReader::DecodeVideoFram
       return true;
 
     currentImage = bufferCallback.GetImage();
     int64_t pos = mDecoder->GetResource()->Tell();
     IntRect picture = mPicture;
 
     RefPtr<VideoData> v;
     if (currentImage) {
-      gfx::IntSize frameSize = currentImage->GetSize();
-      if (frameSize.width != mInitialFrame.width ||
-          frameSize.height != mInitialFrame.height) {
-        // Frame size is different from what the container reports. This is legal,
-        // and we will preserve the ratio of the crop rectangle as it
-        // was reported relative to the picture size reported by the container.
-        picture.x = (mPicture.x * frameSize.width) / mInitialFrame.width;
-        picture.y = (mPicture.y * frameSize.height) / mInitialFrame.height;
-        picture.width = (frameSize.width * mPicture.width) / mInitialFrame.width;
-        picture.height = (frameSize.height * mPicture.height) / mInitialFrame.height;
-      }
-
-      v = VideoData::CreateFromImage(mInfo.mVideo,
+      v = VideoData::CreateFromImage(mInfo.mVideo.mDisplay,
                                      pos,
                                      frame.mTimeUs,
                                      1, // We don't know the duration yet.
                                      currentImage,
                                      frame.mKeyFrame,
-                                     -1,
-                                     picture);
+                                     -1);
     } else {
       // Assume YUV
       VideoData::YCbCrBuffer b;
       b.mPlanes[0].mData = static_cast<uint8_t *>(frame.Y.mData);
       b.mPlanes[0].mStride = frame.Y.mStride;
       b.mPlanes[0].mHeight = frame.Y.mHeight;
       b.mPlanes[0].mWidth = frame.Y.mWidth;
       b.mPlanes[0].mOffset = frame.Y.mOffset;
--- a/dom/media/ipc/VideoDecoderChild.cpp
+++ b/dom/media/ipc/VideoDecoderChild.cpp
@@ -34,31 +34,29 @@ VideoDecoderChild::~VideoDecoderChild()
   AssertOnManagerThread();
   mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
 }
 
 mozilla::ipc::IPCResult
 VideoDecoderChild::RecvOutput(const VideoDataIPDL& aData)
 {
   AssertOnManagerThread();
-  VideoInfo info(aData.display().width, aData.display().height);
 
   // The Image here creates a TextureData object that takes ownership
   // of the SurfaceDescriptor, and is responsible for making sure that
   // it gets deallocated.
   RefPtr<Image> image = new GPUVideoImage(GetManager(), aData.sd(), aData.frameSize());
 
-  RefPtr<VideoData> video = VideoData::CreateFromImage(info,
+  RefPtr<VideoData> video = VideoData::CreateFromImage(aData.display(),
                                                        aData.base().offset(),
                                                        aData.base().time(),
                                                        aData.base().duration(),
                                                        image,
                                                        aData.base().keyframe(),
-                                                       aData.base().timecode(),
-                                                       IntRect());
+                                                       aData.base().timecode());
   mDecodedData.AppendElement(Move(video));
   return IPC_OK();
 }
 
 mozilla::ipc::IPCResult
 VideoDecoderChild::RecvInputExhausted()
 {
   AssertOnManagerThread();
--- a/dom/media/platforms/android/RemoteDataDecoder.cpp
+++ b/dom/media/platforms/android/RemoteDataDecoder.cpp
@@ -181,21 +181,19 @@ public:
       if (size > 0) {
         MutexAutoLock lock(mDecoder->mMutex);
 
         RefPtr<layers::Image> img = new SurfaceTextureImage(
           mDecoder->mSurfaceTexture.get(), mDecoder->mConfig.mImage,
           gl::OriginPos::BottomLeft);
 
         RefPtr<VideoData> v = VideoData::CreateFromImage(
-          mDecoder->mConfig, offset, presentationTimeUs, durationUs,
+          mDecoder->mConfig.mDisplay, offset, presentationTimeUs, durationUs,
           img, !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME),
-          presentationTimeUs,
-          gfx::IntRect(0, 0, mDecoder->mConfig.mDisplay.width,
-                       mDecoder->mConfig.mDisplay.height));
+          presentationTimeUs);
 
         v->SetListener(Move(releaseSample));
 
         mDecoder->Output(v);
       }
 
       if (isEOS) {
         mDecoder->DrainComplete();
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -347,20 +347,16 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
     }
   }
 
   // Where our resulting image will end up.
   RefPtr<MediaData> data;
   // Bounds.
   VideoInfo info;
   info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
-  gfx::IntRect visible = gfx::IntRect(0,
-                                      0,
-                                      mPictureWidth,
-                                      mPictureHeight);
 
   if (useNullSample) {
     data = new NullData(aFrameRef.byte_offset,
                         aFrameRef.composition_timestamp.ToMicroseconds(),
                         aFrameRef.duration.ToMicroseconds());
   } else if (mUseSoftwareImages) {
     size_t width = CVPixelBufferGetWidth(aImage);
     size_t height = CVPixelBufferGetHeight(aImage);
@@ -400,16 +396,21 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
     buffer.mPlanes[2].mData =
       static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
     buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
     buffer.mPlanes[2].mWidth = (width+1) / 2;
     buffer.mPlanes[2].mHeight = (height+1) / 2;
     buffer.mPlanes[2].mOffset = 1;
     buffer.mPlanes[2].mSkip = 1;
 
+    gfx::IntRect visible = gfx::IntRect(0,
+                                        0,
+                                        mPictureWidth,
+                                        mPictureHeight);
+
     // Copy the image data into our own format.
     data =
       VideoData::CreateAndCopyData(info,
                                    mImageContainer,
                                    aFrameRef.byte_offset,
                                    aFrameRef.composition_timestamp.ToMicroseconds(),
                                    aFrameRef.duration.ToMicroseconds(),
                                    buffer,
@@ -423,24 +424,23 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
     IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
     MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
 
     RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
 
     RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);
 
     data =
-      VideoData::CreateFromImage(info,
+      VideoData::CreateFromImage(info.mDisplay,
                                  aFrameRef.byte_offset,
                                  aFrameRef.composition_timestamp.ToMicroseconds(),
                                  aFrameRef.duration.ToMicroseconds(),
                                  image.forget(),
                                  aFrameRef.is_sync_point,
-                                 aFrameRef.decode_timestamp.ToMicroseconds(),
-                                 visible);
+                                 aFrameRef.decode_timestamp.ToMicroseconds());
 #else
     MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
 #endif
   }
 
   if (!data) {
     NS_ERROR("Couldn't create VideoData for frame");
     MonitorAutoLock mon(mMonitor);
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -872,24 +872,23 @@ WMFVideoMFTManager::CreateBasicVideoFram
 
   VideoData::SetVideoDataToImage(image,
                                  mVideoInfo,
                                  b,
                                  pictureRegion,
                                  false);
 
   RefPtr<VideoData> v =
-    VideoData::CreateFromImage(mVideoInfo,
+    VideoData::CreateFromImage(mVideoInfo.mDisplay,
                                aStreamOffset,
                                pts.ToMicroseconds(),
                                duration.ToMicroseconds(),
                                image.forget(),
                                false,
-                               -1,
-                               pictureRegion);
+                               -1);
 
   v.forget(aOutVideoData);
   return S_OK;
 }
 
 HRESULT
 WMFVideoMFTManager::CreateD3DVideoFrame(IMFSample* aSample,
                                         int64_t aStreamOffset,
@@ -911,24 +910,23 @@ WMFVideoMFTManager::CreateD3DVideoFrame(
                                   getter_AddRefs(image));
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
   NS_ENSURE_TRUE(image, E_FAIL);
 
   media::TimeUnit pts = GetSampleTime(aSample);
   NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
   media::TimeUnit duration = GetSampleDuration(aSample);
   NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
-  RefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo,
+  RefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo.mDisplay,
                                                    aStreamOffset,
                                                    pts.ToMicroseconds(),
                                                    duration.ToMicroseconds(),
                                                    image.forget(),
                                                    false,
-                                                   -1,
-                                                   pictureRegion);
+                                                   -1);
 
   NS_ENSURE_TRUE(v, E_FAIL);
   v.forget(aOutVideoData);
 
   return S_OK;
 }
 
 // Blocks until decoded sample is produced by the deoder.