Bug 944117 - updated WebM demuxer to surface alpha information. r?jya draft
authorkaro <kkoorts@mozilla.com>
Mon, 21 Nov 2016 16:59:25 +1300
changeset 442605 d7d2d1a680ca532acb910b7a6754659c6ead7a79
parent 438112 47e0584afe0ab0b867412189c610b302b6ba0ea7
child 537841 c5dd1f3f44e9283304e81b289870ebf76321074a
push id36754
push userbmo:kkoorts@mozilla.com
push dateTue, 22 Nov 2016 22:06:07 +0000
reviewersjya
bugs944117
milestone52.0a1
Bug 944117 - updated WebM demuxer to surface alpha information. r?jya MozReview-Commit-ID: JjTu7ddetzj
dom/media/ADTSDemuxer.cpp
dom/media/MP3Demuxer.cpp
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/MediaInfo.h
dom/media/flac/FlacDemuxer.cpp
dom/media/fmp4/MP4Demuxer.cpp
dom/media/gmp/GMPAudioHost.cpp
dom/media/gmp/GMPCDMProxy.cpp
dom/media/gmp/widevine-adapter/WidevineVideoDecoder.cpp
dom/media/gtest/TestMP3Demuxer.cpp
dom/media/ipc/VideoDecoderChild.cpp
dom/media/platforms/agnostic/OpusDecoder.cpp
dom/media/platforms/agnostic/TheoraDecoder.cpp
dom/media/platforms/agnostic/VPXDecoder.cpp
dom/media/platforms/agnostic/VorbisDecoder.cpp
dom/media/platforms/agnostic/WAVDecoder.cpp
dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
dom/media/platforms/omx/OmxDataDecoder.cpp
dom/media/platforms/wmf/WMFAudioMFTManager.cpp
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
dom/media/wave/WaveDemuxer.cpp
dom/media/webm/WebMDemuxer.cpp
media/libstagefright/binding/Adts.cpp
media/libstagefright/binding/AnnexB.cpp
media/libstagefright/binding/H264.cpp
media/libstagefright/binding/Index.cpp
--- a/dom/media/ADTSDemuxer.cpp
+++ b/dom/media/ADTSDemuxer.cpp
@@ -732,17 +732,17 @@ ADTSTrackDemuxer::GetNextFrame(const adt
   nsAutoPtr<MediaRawDataWriter> frameWriter(frame->CreateWriter());
   if (!frameWriter->SetSize(length)) {
     ADTSLOG("GetNext() Exit failed to allocated media buffer");
     return nullptr;
   }
 
   const uint32_t read = Read(frameWriter->Data(), offset, length);
   if (read != length) {
-    ADTSLOG("GetNext() Exit read=%u frame->Size()=%u", read, frame->Size());
+    ADTSLOG("GetNext() Exit read=%u frame->DataSize()=%u", read, frame->DataSize());
     return nullptr;
   }
 
   UpdateState(aFrame);
 
   frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
   frame->mDuration = Duration(1).ToMicroseconds();
   frame->mTimecode = frame->mTime;
--- a/dom/media/MP3Demuxer.cpp
+++ b/dom/media/MP3Demuxer.cpp
@@ -557,36 +557,36 @@ MP3TrackDemuxer::GetNextFrame(const Medi
   frame->mOffset = aRange.mStart;
 
   nsAutoPtr<MediaRawDataWriter> frameWriter(frame->CreateWriter());
   if (!frameWriter->SetSize(aRange.Length())) {
     MP3LOG("GetNext() Exit failed to allocated media buffer");
     return nullptr;
   }
 
-  const uint32_t read = Read(frameWriter->Data(), frame->mOffset, frame->Size());
+  const uint32_t read = Read(frameWriter->Data(), frame->mOffset, frame->DataSize());
 
   if (read != aRange.Length()) {
-    MP3LOG("GetNext() Exit read=%u frame->Size()=%u", read, frame->Size());
+    MP3LOG("GetNext() Exit read=%u frame->DataSize()=%u", read, frame->DataSize());
     return nullptr;
   }
 
   UpdateState(aRange);
 
   frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
   frame->mDuration = Duration(1).ToMicroseconds();
   frame->mTimecode = frame->mTime;
   frame->mKeyframe = true;
 
   MOZ_ASSERT(frame->mTime >= 0);
   MOZ_ASSERT(frame->mDuration > 0);
 
   if (mNumParsedFrames == 1) {
     // First frame parsed, let's read VBR info if available.
-    ByteReader reader(frame->Data(), frame->Size());
+    ByteReader reader(frame->Data(), frame->DataSize());
     mParser.ParseVBRHeader(&reader);
     mFirstFrameOffset = frame->mOffset;
   }
 
   MP3LOGV("GetNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64
           " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
           " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d",
           mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen,
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -388,32 +388,43 @@ MediaRawData::MediaRawData()
 
 MediaRawData::MediaRawData(const uint8_t* aData, size_t aSize)
   : MediaData(RAW_DATA, 0)
   , mCrypto(mCryptoInternal)
   , mBuffer(aData, aSize)
 {
 }
 
+MediaRawData::MediaRawData(const uint8_t* aData, size_t aSize, const uint8_t* aAlphaData, size_t aAlphaSize)
+  : MediaData(RAW_DATA, 0)
+  , mCrypto(mCryptoInternal)
+  , mBuffer(aData, aSize)
+  , mAlphaBuffer(aAlphaData, aAlphaSize)
+{
+}
+
 already_AddRefed<MediaRawData>
 MediaRawData::Clone() const
 {
   RefPtr<MediaRawData> s = new MediaRawData;
   s->mTimecode = mTimecode;
   s->mTime = mTime;
   s->mDuration = mDuration;
   s->mOffset = mOffset;
   s->mKeyframe = mKeyframe;
   s->mExtraData = mExtraData;
   s->mCryptoInternal = mCryptoInternal;
   s->mTrackInfo = mTrackInfo;
   s->mEOS = mEOS;
   if (!s->mBuffer.Append(mBuffer.Data(), mBuffer.Length())) {
     return nullptr;
   }
+  if (!s->mAlphaBuffer.Append(mAlphaBuffer.Data(), mAlphaBuffer.Length())) {
+    return nullptr;
+  }
   return s.forget();
 }
 
 MediaRawData::~MediaRawData()
 {
 }
 
 size_t
@@ -464,12 +475,12 @@ uint8_t*
 MediaRawDataWriter::Data()
 {
   return mTarget->mBuffer.Data();
 }
 
 size_t
 MediaRawDataWriter::Size()
 {
-  return mTarget->Size();
+  return mTarget->DataSize();
 }
 
 } // namespace mozilla
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -613,24 +613,28 @@ private:
   bool EnsureSize(size_t aSize);
   MediaRawData* mTarget;
 };
 
 class MediaRawData : public MediaData {
 public:
   MediaRawData();
   MediaRawData(const uint8_t* aData, size_t mSize);
+  MediaRawData(const uint8_t* aData, size_t mSize, const uint8_t* aAlphaData, size_t mAlphaSize);
 
   // Pointer to data or null if not-yet allocated
   const uint8_t* Data() const { return mBuffer.Data(); }
+  // Pointer to alpha data or null if not-yet allocated
+  const uint8_t* AlphaData() const { return mAlphaBuffer.Data(); }
   // Size of buffer.
-  size_t Size() const { return mBuffer.Length(); }
+  size_t DataSize() const { return mBuffer.Length(); }
+  size_t AlphaSize() const { return mAlphaBuffer.Length(); }
   size_t ComputedSizeOfIncludingThis() const
   {
-    return sizeof(*this) + mBuffer.ComputedSizeOfExcludingThis();
+    return sizeof(*this) + mBuffer.ComputedSizeOfExcludingThis() + mAlphaBuffer.ComputedSizeOfExcludingThis();
   }
 
   const CryptoSample& mCrypto;
   RefPtr<MediaByteBuffer> mExtraData;
 
   // Used by the Vorbis decoder and Ogg demuxer.
   // Indicates that this is the last packet of the stream.
   bool mEOS = false;
@@ -645,16 +649,17 @@ public:
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
 
 protected:
   ~MediaRawData();
 
 private:
   friend class MediaRawDataWriter;
   AlignedByteBuffer mBuffer;
+  AlignedByteBuffer mAlphaBuffer;
   CryptoSample mCryptoInternal;
   MediaRawData(const MediaRawData&); // Not implemented
 };
 
   // MediaByteBuffer is a ref counted infallible TArray.
 class MediaByteBuffer : public nsTArray<uint8_t> {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaByteBuffer);
   MediaByteBuffer() = default;
--- a/dom/media/MediaInfo.h
+++ b/dom/media/MediaInfo.h
@@ -197,28 +197,30 @@ public:
     : TrackInfo(kVideoTrack, NS_LITERAL_STRING("2"), NS_LITERAL_STRING("main"),
                 EmptyString(), EmptyString(), true, 2)
     , mDisplay(aSize)
     , mStereoMode(StereoMode::MONO)
     , mImage(aSize)
     , mCodecSpecificConfig(new MediaByteBuffer)
     , mExtraData(new MediaByteBuffer)
     , mRotation(kDegree_0)
+    , mAlphaPresent(false)
     , mImageRect(nsIntRect(nsIntPoint(), aSize))
   {
   }
 
   VideoInfo(const VideoInfo& aOther)
     : TrackInfo(aOther)
     , mDisplay(aOther.mDisplay)
     , mStereoMode(aOther.mStereoMode)
     , mImage(aOther.mImage)
     , mCodecSpecificConfig(aOther.mCodecSpecificConfig)
     , mExtraData(aOther.mExtraData)
     , mRotation(aOther.mRotation)
+    , mAlphaPresent(aOther.mAlphaPresent)
     , mImageRect(aOther.mImageRect)
   {
   }
 
   bool IsValid() const override
   {
     return mDisplay.width > 0 && mDisplay.height > 0;
   }
@@ -300,16 +302,19 @@ public:
 
   RefPtr<MediaByteBuffer> mCodecSpecificConfig;
   RefPtr<MediaByteBuffer> mExtraData;
 
   // Describing how many degrees video frames should be rotated in clock-wise to
   // get correct view.
   Rotation mRotation;
 
+  // Indicates whether or not frames may contain alpha information.
+  bool mAlphaPresent;
+
 private:
   // mImage may be cropped; currently only used with the WebM container.
   // A negative width or height indicate that no cropping is to occur.
   nsIntRect mImageRect;
 };
 
 class AudioInfo : public TrackInfo {
 public:
--- a/dom/media/flac/FlacDemuxer.cpp
+++ b/dom/media/flac/FlacDemuxer.cpp
@@ -971,17 +971,17 @@ FlacTrackDemuxer::GetNextFrame(const fla
   nsAutoPtr<MediaRawDataWriter> frameWriter(frame->CreateWriter());
   if (!frameWriter->SetSize(size)) {
     LOG("GetNext() Exit failed to allocated media buffer");
     return nullptr;
   }
 
   const uint32_t read = Read(frameWriter->Data(), offset, size);
   if (read != size) {
-    LOG("GetNextFrame() Exit read=%u frame->Size=%u", read, frame->Size());
+    LOG("GetNextFrame() Exit read=%u frame->DataSize=%u", read, frame->DataSize());
     return nullptr;
   }
 
   frame->mTime = aFrame.Time().ToMicroseconds();
   frame->mDuration = aFrame.Duration().ToMicroseconds();
   frame->mTimecode = frame->mTime;
   frame->mOffset = aFrame.Offset();
   frame->mKeyframe = true;
--- a/dom/media/fmp4/MP4Demuxer.cpp
+++ b/dom/media/fmp4/MP4Demuxer.cpp
@@ -295,17 +295,17 @@ MP4TrackDemuxer::Seek(media::TimeUnit aT
 
   // Check what time we actually seeked to.
   RefPtr<MediaRawData> sample;
   do {
     sample = GetNextSample();
     if (!sample) {
       return SeekPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__);
     }
-    if (!sample->Size()) {
+    if (!sample->DataSize()) {
       // This sample can't be decoded, continue searching.
       continue;
     }
     if (sample->mKeyframe) {
       mQueuedSample = sample;
       seekTime = mQueuedSample->mTime;
     }
   } while (!mQueuedSample);
@@ -377,17 +377,17 @@ MP4TrackDemuxer::GetSamples(int32_t aNum
     MOZ_ASSERT(mQueuedSample->mKeyframe,
                "mQueuedSample must be a keyframe");
     samples->mSamples.AppendElement(mQueuedSample);
     mQueuedSample = nullptr;
     aNumSamples--;
   }
   RefPtr<MediaRawData> sample;
   while (aNumSamples && (sample = GetNextSample())) {
-    if (!sample->Size()) {
+    if (!sample->DataSize()) {
       continue;
     }
     samples->mSamples.AppendElement(sample);
     aNumSamples--;
   }
 
   if (samples->mSamples.IsEmpty()) {
     return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__);
--- a/dom/media/gmp/GMPAudioHost.cpp
+++ b/dom/media/gmp/GMPAudioHost.cpp
@@ -35,17 +35,17 @@ GMPAudioSamplesImpl::GMPAudioSamplesImpl
 GMPAudioSamplesImpl::GMPAudioSamplesImpl(MediaRawData* aSample,
                                          uint32_t aChannels,
                                          uint32_t aRate)
  : mFormat(kGMPAudioEncodedSamples)
  , mTimeStamp(aSample->mTime)
  , mChannels(aChannels)
  , mRate(aRate)
 {
-  mBuffer.AppendElements(aSample->Data(), aSample->Size());
+  mBuffer.AppendElements(aSample->Data(), aSample->DataSize());
   if (aSample->mCrypto.mValid) {
     mCrypto = new GMPEncryptedBufferDataImpl(aSample->mCrypto);
   }
 }
 
 GMPAudioSamplesImpl::~GMPAudioSamplesImpl()
 {
 }
--- a/dom/media/gmp/GMPCDMProxy.cpp
+++ b/dom/media/gmp/GMPCDMProxy.cpp
@@ -688,17 +688,17 @@ GMPCDMProxy::gmp_Decrypt(RefPtr<DecryptJ
 
   if (!mCDM) {
     aJob->PostResult(AbortedErr);
     return;
   }
 
   aJob->mId = ++mDecryptionJobCount;
   nsTArray<uint8_t> data;
-  data.AppendElements(aJob->mSample->Data(), aJob->mSample->Size());
+  data.AppendElements(aJob->mSample->Data(), aJob->mSample->DataSize());
   mCDM->Decrypt(aJob->mId, aJob->mSample->mCrypto, data);
   mDecryptionJobs.AppendElement(aJob.forget());
 }
 
 void
 GMPCDMProxy::gmp_Decrypted(uint32_t aId,
                            DecryptStatus aResult,
                            const nsTArray<uint8_t>& aDecryptedData)
@@ -730,24 +730,24 @@ GMPCDMProxy::DecryptJob::PostResult(Decr
   nsTArray<uint8_t> empty;
   PostResult(aResult, empty);
 }
 
 void
 GMPCDMProxy::DecryptJob::PostResult(DecryptStatus aResult,
                                     const nsTArray<uint8_t>& aDecryptedData)
 {
-  if (aDecryptedData.Length() != mSample->Size()) {
+  if (aDecryptedData.Length() != mSample->DataSize()) {
     NS_WARNING("CDM returned incorrect number of decrypted bytes");
   }
   if (aResult == Ok) {
     nsAutoPtr<MediaRawDataWriter> writer(mSample->CreateWriter());
     PodCopy(writer->Data(),
             aDecryptedData.Elements(),
-            std::min<size_t>(aDecryptedData.Length(), mSample->Size()));
+            std::min<size_t>(aDecryptedData.Length(), mSample->DataSize()));
   } else if (aResult == NoKeyErr) {
     NS_WARNING("CDM returned NoKeyErr");
     // We still have the encrypted sample, so we can re-enqueue it to be
     // decrypted again once the key is usable again.
   } else {
     nsAutoCString str("CDM returned decode failure DecryptStatus=");
     str.AppendInt(aResult);
     NS_WARNING(str.get());
--- a/dom/media/gmp/widevine-adapter/WidevineVideoDecoder.cpp
+++ b/dom/media/gmp/widevine-adapter/WidevineVideoDecoder.cpp
@@ -116,17 +116,17 @@ WidevineVideoDecoder::Decode(GMPVideoEnc
   if (mCodecType == kGMPVideoCodecH264) {
     // Convert input from AVCC, which GMPAPI passes in, to AnnexB, which
     // Chromium uses internally.
     mp4_demuxer::AnnexB::ConvertSampleToAnnexB(raw);
   }
 
   const GMPEncryptedBufferMetadata* crypto = aInputFrame->GetDecryptionData();
   nsTArray<SubsampleEntry> subsamples;
-  InitInputBuffer(crypto, aInputFrame->TimeStamp(), raw->Data(), raw->Size(), sample, subsamples);
+  InitInputBuffer(crypto, aInputFrame->TimeStamp(), raw->Data(), raw->DataSize(), sample, subsamples);
 
   // For keyframes, ConvertSampleToAnnexB will stick the AnnexB extra data
   // at the start of the input. So we need to account for that as clear data
   // in the subsamples.
   if (raw->mKeyframe && !subsamples.IsEmpty() && mCodecType == kGMPVideoCodecH264) {
     subsamples[0].clear_bytes += mAnnexB->Length();
   }
 
--- a/dom/media/gtest/TestMP3Demuxer.cpp
+++ b/dom/media/gtest/TestMP3Demuxer.cpp
@@ -328,17 +328,17 @@ TEST_F(MP3DemuxerTest, FrameParsing) {
 
     while (frameData) {
       if (static_cast<int64_t>(target.mSyncOffsets.size()) > numFrames) {
         // Test sync offsets.
         EXPECT_EQ(target.mSyncOffsets[numFrames], frameData->mOffset);
       }
 
       ++numFrames;
-      parsedLength += frameData->Size();
+      parsedLength += frameData->DataSize();
 
       const auto& frame = target.mDemuxer->LastFrame();
       const auto& header = frame.Header();
       ASSERT_TRUE(header.IsValid());
 
       numSamples += header.SamplesPerFrame();
 
       EXPECT_EQ(target.mMPEGLayer, header.Layer());
--- a/dom/media/ipc/VideoDecoderChild.cpp
+++ b/dom/media/ipc/VideoDecoderChild.cpp
@@ -181,22 +181,22 @@ VideoDecoderChild::Input(MediaRawData* a
   if (!mCanSend) {
     return;
   }
 
   // TODO: It would be nice to add an allocator method to
   // MediaDataDecoder so that the demuxer could write directly
   // into shmem rather than requiring a copy here.
   Shmem buffer;
-  if (!AllocShmem(aSample->Size(), Shmem::SharedMemory::TYPE_BASIC, &buffer)) {
+  if (!AllocShmem(aSample->DataSize(), Shmem::SharedMemory::TYPE_BASIC, &buffer)) {
     mCallback->Error(NS_ERROR_DOM_MEDIA_DECODE_ERR);
     return;
   }
 
-  memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->Size());
+  memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->DataSize());
 
   MediaRawDataIPDL sample(MediaDataIPDL(aSample->mOffset,
                                         aSample->mTime,
                                         aSample->mTimecode,
                                         aSample->mDuration,
                                         aSample->mFrames,
                                         aSample->mKeyframe),
                           buffer);
--- a/dom/media/platforms/agnostic/OpusDecoder.cpp
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -185,23 +185,23 @@ OpusDataDecoder::DoDecode(MediaRawData* 
   if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
     // We are starting a new block.
     mFrames = 0;
     mLastFrameTime = Some(aSample->mTime);
   }
 
   // Maximum value is 63*2880, so there's no chance of overflow.
   int32_t frames_number = opus_packet_get_nb_frames(aSample->Data(),
-                                                    aSample->Size());
+                                                    aSample->DataSize());
   if (frames_number <= 0) {
     OPUS_DEBUG("Invalid packet header: r=%ld length=%ld",
-               frames_number, aSample->Size());
+               frames_number, aSample->DataSize());
     return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                        RESULT_DETAIL("Invalid packet header: r=%d length=%u",
-                                     frames_number, uint32_t(aSample->Size())));
+                                     frames_number, uint32_t(aSample->DataSize())));
   }
 
   int32_t samples = opus_packet_get_samples_per_frame(aSample->Data(),
                                            opus_int32(mOpusParser->mRate));
 
 
   // A valid Opus packet must be between 2.5 and 120 ms long (48kHz).
   int32_t frames = frames_number*samples;
@@ -214,21 +214,21 @@ OpusDataDecoder::DoDecode(MediaRawData* 
   AlignedAudioBuffer buffer(frames * channels);
   if (!buffer) {
     return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
   }
 
   // Decode to the appropriate sample type.
 #ifdef MOZ_SAMPLE_TYPE_FLOAT32
   int ret = opus_multistream_decode_float(mOpusDecoder,
-                                          aSample->Data(), aSample->Size(),
+                                          aSample->Data(), aSample->DataSize(),
                                           buffer.get(), frames, false);
 #else
   int ret = opus_multistream_decode(mOpusDecoder,
-                                    aSample->Data(), aSample->Size(),
+                                    aSample->Data(), aSample->DataSize(),
                                     buffer.get(), frames, false);
 #endif
   if (ret < 0) {
     return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                        RESULT_DETAIL("Opus decoding error:%d", ret));
   }
   NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
   CheckedInt64 startTime = aSample->mTime;
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -124,17 +124,17 @@ TheoraDecoder::DoDecodeHeader(const unsi
 }
 
 MediaResult
 TheoraDecoder::DoDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
   const unsigned char* aData = aSample->Data();
-  size_t aLength = aSample->Size();
+  size_t aLength = aSample->DataSize();
 
   bool bos = mPacketCount == 0;
   ogg_packet pkt = InitTheoraPacket(aData, aLength, bos, false, aSample->mTimecode, mPacketCount++);
 
   int ret = th_decode_packetin(mTheoraDecoderContext, &pkt, nullptr);
   if (ret == 0 || ret == TH_DUPFRAME) {
     th_ycbcr_buffer ycbcr;
     th_decode_ycbcr_out(mTheoraDecoderContext, ycbcr);
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -101,25 +101,25 @@ MediaResult
 VPXDecoder::DoDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 #if defined(DEBUG)
   vpx_codec_stream_info_t si;
   PodZero(&si);
   si.sz = sizeof(si);
   if (mCodec == Codec::VP8) {
-    vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), aSample->Data(), aSample->Size(), &si);
+    vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), aSample->Data(), aSample->DataSize(), &si);
   } else if (mCodec == Codec::VP9) {
-    vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), aSample->Data(), aSample->Size(), &si);
+    vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), aSample->Data(), aSample->DataSize(), &si);
   }
   NS_ASSERTION(bool(si.is_kf) == aSample->mKeyframe,
                "VPX Decode Keyframe error sample->mKeyframe and si.si_kf out of sync");
 #endif
 
-  if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(), aSample->Size(), nullptr, 0)) {
+  if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(), aSample->DataSize(), nullptr, 0)) {
     LOG("VPX Decode error: %s", vpx_codec_err_to_string(r));
     return MediaResult(
       NS_ERROR_DOM_MEDIA_DECODE_ERR,
       RESULT_DETAIL("VPX error: %s", vpx_codec_err_to_string(r)));
   }
 
   vpx_codec_iter_t  iter = nullptr;
   vpx_image_t      *img;
--- a/dom/media/platforms/agnostic/VorbisDecoder.cpp
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -147,17 +147,17 @@ VorbisDataDecoder::ProcessDecode(MediaRa
 }
 
 MediaResult
 VorbisDataDecoder::DoDecode(MediaRawData* aSample)
 {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
   const unsigned char* aData = aSample->Data();
-  size_t aLength = aSample->Size();
+  size_t aLength = aSample->DataSize();
   int64_t aOffset = aSample->mOffset;
   uint64_t aTstampUsecs = aSample->mTime;
   int64_t aTotalFrames = 0;
 
   MOZ_ASSERT(mPacketCount >= 3);
 
   if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
     // We are starting a new block.
--- a/dom/media/platforms/agnostic/WAVDecoder.cpp
+++ b/dom/media/platforms/agnostic/WAVDecoder.cpp
@@ -71,17 +71,17 @@ WaveDataDecoder::Input(MediaRawData* aSa
   } else {
     mCallback->InputExhausted();
   }
 }
 
 MediaResult
 WaveDataDecoder::DoDecode(MediaRawData* aSample)
 {
-  size_t aLength = aSample->Size();
+  size_t aLength = aSample->DataSize();
   ByteReader aReader(aSample->Data(), aLength);
   int64_t aOffset = aSample->mOffset;
   uint64_t aTstampUsecs = aSample->mTime;
 
   int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;
 
   AlignedAudioBuffer buffer(frames * mInfo.mChannels);
   if (!buffer) {
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -187,17 +187,17 @@ GMPVideoDecoder::CreateFrame(MediaRawDat
   GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
   if (GMP_FAILED(err)) {
     mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                  RESULT_DETAIL("Host::CreateFrame:%x", err)));
     return nullptr;
   }
 
   GMPUniquePtr<GMPVideoEncodedFrame> frame(static_cast<GMPVideoEncodedFrame*>(ftmp));
-  err = frame->CreateEmptyFrame(aSample->Size());
+  err = frame->CreateEmptyFrame(aSample->DataSize());
   if (GMP_FAILED(err)) {
     mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                  RESULT_DETAIL("GMPVideoEncodedFrame::CreateEmptyFrame:%x", err)));
     return nullptr;
   }
 
   memcpy(frame->Buffer(), aSample->Data(), frame->Size());
 
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -119,17 +119,17 @@ CopyAndPackAudio(AVFrame* aFrame, uint32
 
 MediaResult
 FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
 {
   AVPacket packet;
   mLib->av_init_packet(&packet);
 
   packet.data = const_cast<uint8_t*>(aSample->Data());
-  packet.size = aSample->Size();
+  packet.size = aSample->DataSize();
 
   if (!PrepareFrame()) {
     return MediaResult(
       NS_ERROR_OUT_OF_MEMORY,
       RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame"));
   }
 
   int64_t samplePosition = aSample->mOffset;
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -167,17 +167,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
   bool gotFrame = false;
   return DoDecode(aSample, &gotFrame);
 }
 
 MediaResult
 FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame)
 {
   uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
-  size_t inputSize = aSample->Size();
+  size_t inputSize = aSample->DataSize();
 
 #if LIBAVCODEC_VERSION_MAJOR >= 54
   if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8
 #if LIBAVCODEC_VERSION_MAJOR >= 55
       || mCodecID == AV_CODEC_ID_VP9
 #endif
       )) {
     while (inputSize) {
--- a/dom/media/platforms/omx/OmxDataDecoder.cpp
+++ b/dom/media/platforms/omx/OmxDataDecoder.cpp
@@ -453,25 +453,25 @@ OmxDataDecoder::FillAndEmptyBuffers()
     RefPtr<BufferData> inbuf = FindAvailableBuffer(OMX_DirInput);
     if (!inbuf) {
       LOG("no input buffer!");
       break;
     }
 
     RefPtr<MediaRawData> data = mMediaRawDatas[0];
     // Buffer size should large enough for raw data.
-    MOZ_RELEASE_ASSERT(inbuf->mBuffer->nAllocLen >= data->Size());
+    MOZ_RELEASE_ASSERT(inbuf->mBuffer->nAllocLen >= data->DataSize());
 
-    memcpy(inbuf->mBuffer->pBuffer, data->Data(), data->Size());
-    inbuf->mBuffer->nFilledLen = data->Size();
+    memcpy(inbuf->mBuffer->pBuffer, data->Data(), data->DataSize());
+    inbuf->mBuffer->nFilledLen = data->DataSize();
     inbuf->mBuffer->nOffset = 0;
-    inbuf->mBuffer->nFlags = inbuf->mBuffer->nAllocLen > data->Size() ?
+    inbuf->mBuffer->nFlags = inbuf->mBuffer->nAllocLen > data->DataSize() ?
                              OMX_BUFFERFLAG_ENDOFFRAME : 0;
     inbuf->mBuffer->nTimeStamp = data->mTime;
-    if (data->Size()) {
+    if (data->DataSize()) {
       inbuf->mRawData = mMediaRawDatas[0];
     } else {
        LOG("send EOS buffer");
       inbuf->mBuffer->nFlags |= OMX_BUFFERFLAG_EOS;
     }
 
     LOG("feed sample %p to omx component, len %d, flag %X", data.get(),
         inbuf->mBuffer->nFilledLen, inbuf->mBuffer->nFlags);
--- a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
@@ -188,17 +188,17 @@ WMFAudioMFTManager::Init()
 
   return true;
 }
 
 HRESULT
 WMFAudioMFTManager::Input(MediaRawData* aSample)
 {
   return mDecoder->Input(aSample->Data(),
-                         uint32_t(aSample->Size()),
+                         uint32_t(aSample->DataSize()),
                          aSample->mTime);
 }
 
 HRESULT
 WMFAudioMFTManager::UpdateOutputType()
 {
   HRESULT hr;
 
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -534,17 +534,17 @@ WMFVideoMFTManager::Input(MediaRawData* 
   }
 
   if (!mDecoder) {
     // This can happen during shutdown.
     return E_FAIL;
   }
 
   HRESULT hr = mDecoder->CreateInputSample(aSample->Data(),
-                                           uint32_t(aSample->Size()),
+                                           uint32_t(aSample->DataSize()),
                                            aSample->mTime,
                                            &mLastInput);
   NS_ENSURE_TRUE(SUCCEEDED(hr) && mLastInput != nullptr, hr);
 
   mLastDuration = aSample->mDuration;
   mLastTime = aSample->mTime;
   mSamplesCount++;
 
--- a/dom/media/wave/WaveDemuxer.cpp
+++ b/dom/media/wave/WaveDemuxer.cpp
@@ -513,17 +513,17 @@ WAVTrackDemuxer::GetNextChunk(const Medi
 
   nsAutoPtr<MediaRawDataWriter> chunkWriter(datachunk->CreateWriter());
   if (!chunkWriter->SetSize(aRange.Length())) {
     return nullptr;
   }
 
   const uint32_t read = Read(chunkWriter->Data(),
                              datachunk->mOffset,
-                             datachunk->Size());
+                             datachunk->DataSize());
 
   if (read != aRange.Length()) {
     return nullptr;
   }
 
   UpdateState(aRange);
   ++mNumParsedChunks;
   ++mChunkIndex;
@@ -558,17 +558,17 @@ WAVTrackDemuxer::GetFileHeader(const Med
 
   nsAutoPtr<MediaRawDataWriter> headerWriter(fileHeader->CreateWriter());
   if (!headerWriter->SetSize(aRange.Length())) {
     return nullptr;
   }
 
   const uint32_t read = Read(headerWriter->Data(),
                              fileHeader->mOffset,
-                             fileHeader->Size());
+                             fileHeader->DataSize());
 
   if (read != aRange.Length()) {
     return nullptr;
   }
 
   UpdateState(aRange);
 
   return fileHeader.forget();
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -353,16 +353,17 @@ WebMDemuxer::ReadMetadata()
       if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
         // Video track's frame sizes will overflow. Ignore the video track.
         continue;
       }
 
       mVideoTrack = track;
       mHasVideo = true;
 
+      mInfo.mVideo.mAlphaPresent = params.alpha_mode;
       mInfo.mVideo.mDisplay = displaySize;
       mInfo.mVideo.mImage = frameSize;
       mInfo.mVideo.SetImageRect(pictureRect);
 
       switch (params.stereo_mode) {
         case NESTEGG_VIDEO_MONO:
           mInfo.mVideo.mStereoMode = StereoMode::MONO;
           break;
@@ -621,16 +622,22 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
   for (uint32_t i = 0; i < count; ++i) {
     unsigned char* data;
     size_t length;
     r = nestegg_packet_data(holder->Packet(), i, &data, &length);
     if (r == -1) {
       WEBM_DEBUG("nestegg_packet_data failed r=%d", r);
       return false;
     }
+    unsigned char* alphaData;
+    size_t alphaLength = 0;
+    // Check packets for alpha information if file has declared alpha frames may be present.
+    if (mInfo.mVideo.mAlphaPresent == true) {
+      nestegg_packet_additional_data(holder->Packet(), 1, &alphaData, &alphaLength);
+    }
     bool isKeyframe = false;
     if (aType == TrackInfo::kAudioTrack) {
       isKeyframe = true;
     } else if (aType == TrackInfo::kVideoTrack) {
       if (packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_ENCRYPTED) {
         // Packet is encrypted, can't peek, use packet info
         isKeyframe = nestegg_packet_has_keyframe(holder->Packet()) == NESTEGG_PACKET_HAS_KEYFRAME_TRUE;
       } else {
@@ -658,17 +665,22 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
           mLastSeenFrameWidth = Some(si.w);
           mLastSeenFrameHeight = Some(si.h);
         }
       }
     }
 
     WEBM_DEBUG("push sample tstamp: %ld next_tstamp: %ld length: %ld kf: %d",
                tstamp, next_tstamp, length, isKeyframe);
-    RefPtr<MediaRawData> sample = new MediaRawData(data, length);
+    RefPtr<MediaRawData> sample;
+    if (mInfo.mVideo.mAlphaPresent == true && alphaLength != 0) {
+      sample = new MediaRawData(data, length, alphaData, alphaLength);
+    } else {
+      sample = new MediaRawData(data, length);
+    }
     sample->mTimecode = tstamp;
     sample->mTime = tstamp;
     sample->mDuration = next_tstamp - tstamp;
     sample->mOffset = holder->Offset();
     sample->mKeyframe = isKeyframe;
     if (discardPadding && i == count - 1) {
       uint8_t c[8];
       BigEndian::writeInt64(&c[0], discardPadding);
--- a/media/libstagefright/binding/Adts.cpp
+++ b/media/libstagefright/binding/Adts.cpp
@@ -33,17 +33,17 @@ Adts::GetFrequencyIndex(uint32_t aSample
 }
 
 bool
 Adts::ConvertSample(uint16_t aChannelCount, int8_t aFrequencyIndex,
                     int8_t aProfile, MediaRawData* aSample)
 {
   static const int kADTSHeaderSize = 7;
 
-  size_t newSize = aSample->Size() + kADTSHeaderSize;
+  size_t newSize = aSample->DataSize() + kADTSHeaderSize;
 
   // ADTS header uses 13 bits for packet size.
   if (newSize >= (1 << 13) || aChannelCount > 15 ||
       aFrequencyIndex < 0 || aProfile < 1 || aProfile > 4) {
     return false;
   }
 
   Array<uint8_t, kADTSHeaderSize> header;
@@ -59,17 +59,17 @@ Adts::ConvertSample(uint16_t aChannelCou
   nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
   if (!writer->Prepend(&header[0], ArrayLength(header))) {
     return false;
   }
 
   if (aSample->mCrypto.mValid) {
     if (aSample->mCrypto.mPlainSizes.Length() == 0) {
       writer->mCrypto.mPlainSizes.AppendElement(kADTSHeaderSize);
-      writer->mCrypto.mEncryptedSizes.AppendElement(aSample->Size() - kADTSHeaderSize);
+      writer->mCrypto.mEncryptedSizes.AppendElement(aSample->DataSize() - kADTSHeaderSize);
     } else {
       writer->mCrypto.mPlainSizes[0] += kADTSHeaderSize;
     }
   }
 
   return true;
 }
 }
--- a/media/libstagefright/binding/AnnexB.cpp
+++ b/media/libstagefright/binding/AnnexB.cpp
@@ -26,22 +26,22 @@ AnnexB::ConvertSampleToAnnexB(mozilla::M
     return true;
   }
   MOZ_ASSERT(aSample->Data());
 
   if (!ConvertSampleTo4BytesAVCC(aSample)) {
     return false;
   }
 
-  if (aSample->Size() < 4) {
+  if (aSample->DataSize() < 4) {
     // Nothing to do, it's corrupted anyway.
     return true;
   }
 
-  ByteReader reader(aSample->Data(), aSample->Size());
+  ByteReader reader(aSample->Data(), aSample->DataSize());
 
   mozilla::Vector<uint8_t> tmp;
   ByteWriter writer(tmp);
 
   while (reader.Remaining() >= 4) {
     uint32_t nalLen = reader.ReadU32();
     const uint8_t* p = reader.Read(nalLen);
 
@@ -221,17 +221,17 @@ AnnexB::ConvertSampleToAVCC(mozilla::Med
   }
   if (!IsAnnexB(aSample)) {
     // Not AnnexB, nothing to convert.
     return true;
   }
 
   mozilla::Vector<uint8_t> nalu;
   ByteWriter writer(nalu);
-  ByteReader reader(aSample->Data(), aSample->Size());
+  ByteReader reader(aSample->Data(), aSample->DataSize());
 
   ParseNALUnits(writer, reader);
   nsAutoPtr<MediaRawDataWriter> samplewriter(aSample->CreateWriter());
   return samplewriter->Replace(nalu.begin(), nalu.length());
 }
 
 already_AddRefed<mozilla::MediaByteBuffer>
 AnnexB::ExtractExtraData(const mozilla::MediaRawData* aSample)
@@ -260,17 +260,17 @@ AnnexB::ExtractExtraData(const mozilla::
   int nalLenSize;
   if (IsAVCC(aSample)) {
     nalLenSize = ((*aSample->mExtraData)[4] & 3) + 1;
   } else {
     // We do not have an extradata, assume it's AnnexB converted to AVCC via
     // ConvertSampleToAVCC.
     nalLenSize = 4;
   }
-  ByteReader reader(aSample->Data(), aSample->Size());
+  ByteReader reader(aSample->Data(), aSample->DataSize());
 
   // Find SPS and PPS NALUs in AVCC data
   while (reader.Remaining() > nalLenSize) {
     uint32_t nalLen;
     switch (nalLenSize) {
       case 1: nalLen = reader.ReadU8();  break;
       case 2: nalLen = reader.ReadU16(); break;
       case 3: nalLen = reader.ReadU24(); break;
@@ -340,17 +340,17 @@ AnnexB::ConvertSampleTo4BytesAVCC(mozill
 
   int nalLenSize = ((*aSample->mExtraData)[4] & 3) + 1;
 
   if (nalLenSize == 4) {
     return true;
   }
   mozilla::Vector<uint8_t> dest;
   ByteWriter writer(dest);
-  ByteReader reader(aSample->Data(), aSample->Size());
+  ByteReader reader(aSample->Data(), aSample->DataSize());
   while (reader.Remaining() > nalLenSize) {
     uint32_t nalLen;
     switch (nalLenSize) {
       case 1: nalLen = reader.ReadU8();  break;
       case 2: nalLen = reader.ReadU16(); break;
       case 3: nalLen = reader.ReadU24(); break;
       case 4: nalLen = reader.ReadU32(); break;
     }
@@ -363,24 +363,24 @@ AnnexB::ConvertSampleTo4BytesAVCC(mozill
   }
   nsAutoPtr<MediaRawDataWriter> samplewriter(aSample->CreateWriter());
   return samplewriter->Replace(dest.begin(), dest.length());
 }
 
 bool
 AnnexB::IsAVCC(const mozilla::MediaRawData* aSample)
 {
-  return aSample->Size() >= 3 && aSample->mExtraData &&
+  return aSample->DataSize() >= 3 && aSample->mExtraData &&
     aSample->mExtraData->Length() >= 7 && (*aSample->mExtraData)[0] == 1;
 }
 
 bool
 AnnexB::IsAnnexB(const mozilla::MediaRawData* aSample)
 {
-  if (aSample->Size() < 4) {
+  if (aSample->DataSize() < 4) {
     return false;
   }
   uint32_t header = mozilla::BigEndian::readUint32(aSample->Data());
   return header == 0x00000001 || (header >> 8) == 0x000001;
 }
 
 bool
 AnnexB::CompareExtraData(const mozilla::MediaByteBuffer* aExtraData1,
--- a/media/libstagefright/binding/H264.cpp
+++ b/media/libstagefright/binding/H264.cpp
@@ -494,17 +494,17 @@ H264::GetFrameType(const mozilla::MediaR
   if (!AnnexB::IsAVCC(aSample)) {
     // We must have a valid AVCC frame with extradata.
     return FrameType::INVALID;
   }
   MOZ_ASSERT(aSample->Data());
 
   int nalLenSize = ((*aSample->mExtraData)[4] & 3) + 1;
 
-  ByteReader reader(aSample->Data(), aSample->Size());
+  ByteReader reader(aSample->Data(), aSample->DataSize());
 
   while (reader.Remaining() >= nalLenSize) {
     uint32_t nalLen;
     switch (nalLenSize) {
       case 1: nalLen = reader.ReadU8();  break;
       case 2: nalLen = reader.ReadU16(); break;
       case 3: nalLen = reader.ReadU24(); break;
       case 4: nalLen = reader.ReadU32(); break;
--- a/media/libstagefright/binding/Index.cpp
+++ b/media/libstagefright/binding/Index.cpp
@@ -107,18 +107,18 @@ already_AddRefed<MediaRawData> SampleIte
 
   nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
   // Do the blocking read
   if (!writer->SetSize(s->mByteRange.Length())) {
     return nullptr;
   }
 
   size_t bytesRead;
-  if (!mIndex->mSource->ReadAt(sample->mOffset, writer->Data(), sample->Size(),
-                               &bytesRead) || bytesRead != sample->Size()) {
+  if (!mIndex->mSource->ReadAt(sample->mOffset, writer->Data(), sample->DataSize(),
+                               &bytesRead) || bytesRead != sample->DataSize()) {
     return nullptr;
   }
 
   if (!s->mCencRange.IsEmpty()) {
     MoofParser* parser = mIndex->mMoofParser.get();
 
     if (!parser || !parser->mSinf.IsValid()) {
       return nullptr;
@@ -150,17 +150,17 @@ already_AddRefed<MediaRawData> SampleIte
 
       for (size_t i = 0; i < count; i++) {
         writer->mCrypto.mPlainSizes.AppendElement(reader.ReadU16());
         writer->mCrypto.mEncryptedSizes.AppendElement(reader.ReadU32());
       }
     } else {
       // No subsample information means the entire sample is encrypted.
       writer->mCrypto.mPlainSizes.AppendElement(0);
-      writer->mCrypto.mEncryptedSizes.AppendElement(sample->Size());
+      writer->mCrypto.mEncryptedSizes.AppendElement(sample->DataSize());
     }
   }
 
   Next();
 
   return sample.forget();
 }