Bug 1240630: [ffmpeg] P4. Rename FFmpegH264Decoder into FFmpegVideoDecoder. r=kentuckyfriedtakahe
☠☠ backed out by 1f7787efce87 ☠ ☠
authorJean-Yves Avenard <jyavenard@mozilla.com>
Tue, 19 Jan 2016 22:20:01 +1100
changeset 280765 2360ccbf3aaa50a42b2f7ceaaec474cc968e7683
parent 280764 1f5356e9679ef984b9029d99b73b9e27308755d5
child 280766 4ab338518d021d46de2198ef499d74a1bc77617b
push id29922
push usercbook@mozilla.com
push dateThu, 21 Jan 2016 10:51:00 +0000
treeherdermozilla-central@977d78a8dd78 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskentuckyfriedtakahe
bugs1240630
milestone46.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1240630: [ffmpeg] P4. Rename FFmpegH264Decoder into FFmpegVideoDecoder. r=kentuckyfriedtakahe The days we used to only be able to use the FFmpeg decoder for H264 are long gone. It can do H264, VP6, VP8 and VP9.
dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
dom/media/platforms/ffmpeg/FFmpegH264Decoder.cpp
dom/media/platforms/ffmpeg/FFmpegH264Decoder.h
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
dom/media/platforms/ffmpeg/ffmpeg57/moz.build
dom/media/platforms/ffmpeg/ffvpx/moz.build
dom/media/platforms/ffmpeg/libav53/moz.build
dom/media/platforms/ffmpeg/libav54/moz.build
dom/media/platforms/ffmpeg/libav55/moz.build
--- a/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
+++ b/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
@@ -4,17 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef __FFmpegDecoderModule_h__
 #define __FFmpegDecoderModule_h__
 
 #include "PlatformDecoderModule.h"
 #include "FFmpegAudioDecoder.h"
-#include "FFmpegH264Decoder.h"
+#include "FFmpegVideoDecoder.h"
 
 namespace mozilla
 {
 
 template <int V>
 class FFmpegDecoderModule : public PlatformDecoderModule
 {
 public:
@@ -32,18 +32,18 @@ public:
   already_AddRefed<MediaDataDecoder>
   CreateVideoDecoder(const VideoInfo& aConfig,
                      layers::LayersBackend aLayersBackend,
                      layers::ImageContainer* aImageContainer,
                      FlushableTaskQueue* aVideoTaskQueue,
                      MediaDataDecoderCallback* aCallback) override
   {
     RefPtr<MediaDataDecoder> decoder =
-      new FFmpegH264Decoder<V>(aVideoTaskQueue, aCallback, aConfig,
-                               aImageContainer);
+      new FFmpegVideoDecoder<V>(aVideoTaskQueue, aCallback, aConfig,
+                                aImageContainer);
     return decoder.forget();
   }
 
   already_AddRefed<MediaDataDecoder>
   CreateAudioDecoder(const AudioInfo& aConfig,
                      FlushableTaskQueue* aAudioTaskQueue,
                      MediaDataDecoderCallback* aCallback) override
   {
@@ -58,17 +58,17 @@ public:
 
   bool SupportsMimeType(const nsACString& aMimeType) const override
   {
 #ifdef USING_MOZFFVPX
     AVCodecID audioCodec = AV_CODEC_ID_NONE;
 #else
     AVCodecID audioCodec = FFmpegAudioDecoder<V>::GetCodecId(aMimeType);
 #endif
-    AVCodecID videoCodec = FFmpegH264Decoder<V>::GetCodecId(aMimeType);
+    AVCodecID videoCodec = FFmpegVideoDecoder<V>::GetCodecId(aMimeType);
     if (audioCodec == AV_CODEC_ID_NONE && videoCodec == AV_CODEC_ID_NONE) {
       return false;
     }
     AVCodecID codec = audioCodec != AV_CODEC_ID_NONE ? audioCodec : videoCodec;
     return !!FFmpegDataDecoder<V>::FindAVCodec(codec);
   }
 
   ConversionRequired
deleted file mode 100644
--- a/dom/media/platforms/ffmpeg/FFmpegH264Decoder.cpp
+++ /dev/null
@@ -1,395 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "mozilla/TaskQueue.h"
-
-#include "nsThreadUtils.h"
-#include "nsAutoPtr.h"
-#include "ImageContainer.h"
-
-#include "MediaInfo.h"
-
-#include "FFmpegH264Decoder.h"
-#include "FFmpegLog.h"
-#include "mozilla/PodOperations.h"
-
-#include "libavutil/pixfmt.h"
-#if LIBAVCODEC_VERSION_MAJOR < 54
-#define AVPixelFormat PixelFormat
-#define AV_PIX_FMT_YUV420P PIX_FMT_YUV420P
-#define AV_PIX_FMT_YUVJ420P PIX_FMT_YUVJ420P
-#define AV_PIX_FMT_YUV444P PIX_FMT_YUV444P
-#define AV_PIX_FMT_NONE PIX_FMT_NONE
-#endif
-
-typedef mozilla::layers::Image Image;
-typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
-
-namespace mozilla
-{
-
-/**
- * FFmpeg calls back to this function with a list of pixel formats it supports.
- * We choose a pixel format that we support and return it.
- * For now, we just look for YUV420P, YUVJ420P and YUV444 as those are the only
- * only non-HW accelerated format supported by FFmpeg's H264 and VP9 decoder.
- */
-static AVPixelFormat
-ChoosePixelFormat(AVCodecContext* aCodecContext, const AVPixelFormat* aFormats)
-{
-  FFMPEG_LOG("Choosing FFmpeg pixel format for video decoding.");
-  for (; *aFormats > -1; aFormats++) {
-    switch (*aFormats) {
-      case AV_PIX_FMT_YUV444P:
-        FFMPEG_LOG("Requesting pixel format YUV444P.");
-        return AV_PIX_FMT_YUV444P;
-      case AV_PIX_FMT_YUV420P:
-      case AV_PIX_FMT_YUVJ420P:
-        FFMPEG_LOG("Requesting pixel format YUV420P.");
-        return AV_PIX_FMT_YUV420P;
-      default:
-        break;
-    }
-  }
-
-  NS_WARNING("FFmpeg does not share any supported pixel formats.");
-  return AV_PIX_FMT_NONE;
-}
-
-FFmpegH264Decoder<LIBAV_VER>::PtsCorrectionContext::PtsCorrectionContext()
-  : mNumFaultyPts(0)
-  , mNumFaultyDts(0)
-  , mLastPts(INT64_MIN)
-  , mLastDts(INT64_MIN)
-{
-}
-
-int64_t
-FFmpegH264Decoder<LIBAV_VER>::PtsCorrectionContext::GuessCorrectPts(int64_t aPts, int64_t aDts)
-{
-  int64_t pts = AV_NOPTS_VALUE;
-
-  if (aDts != int64_t(AV_NOPTS_VALUE)) {
-    mNumFaultyDts += aDts <= mLastDts;
-    mLastDts = aDts;
-  }
-  if (aPts != int64_t(AV_NOPTS_VALUE)) {
-    mNumFaultyPts += aPts <= mLastPts;
-    mLastPts = aPts;
-  }
-  if ((mNumFaultyPts <= mNumFaultyDts || aDts == int64_t(AV_NOPTS_VALUE)) &&
-      aPts != int64_t(AV_NOPTS_VALUE)) {
-    pts = aPts;
-  } else {
-    pts = aDts;
-  }
-  return pts;
-}
-
-void
-FFmpegH264Decoder<LIBAV_VER>::PtsCorrectionContext::Reset()
-{
-  mNumFaultyPts = 0;
-  mNumFaultyDts = 0;
-  mLastPts = INT64_MIN;
-  mLastDts = INT64_MIN;
-}
-
-FFmpegH264Decoder<LIBAV_VER>::FFmpegH264Decoder(
-  FlushableTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
-  const VideoInfo& aConfig,
-  ImageContainer* aImageContainer)
-  : FFmpegDataDecoder(aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
-  , mImageContainer(aImageContainer)
-  , mDisplay(aConfig.mDisplay)
-  , mImage(aConfig.mImage)
-  , mCodecParser(nullptr)
-{
-  MOZ_COUNT_CTOR(FFmpegH264Decoder);
-  // Use a new MediaByteBuffer as the object will be modified during initialization.
-  mExtraData = new MediaByteBuffer;
-  mExtraData->AppendElements(*aConfig.mExtraData);
-}
-
-RefPtr<MediaDataDecoder::InitPromise>
-FFmpegH264Decoder<LIBAV_VER>::Init()
-{
-  if (NS_FAILED(InitDecoder())) {
-    return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
-  }
-
-  return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
-}
-
-void
-FFmpegH264Decoder<LIBAV_VER>::InitCodecContext()
-{
-  mCodecContext->width = mImage.width;
-  mCodecContext->height = mImage.height;
-
-  // We use the same logic as libvpx in determining the number of threads to use
-  // so that we end up behaving in the same fashion when using ffmpeg as
-  // we would otherwise cause various crashes (see bug 1236167)
-  int decode_threads = 1;
-  if (mDisplay.width >= 2048) {
-    decode_threads = 8;
-  } else if (mDisplay.width >= 1024) {
-    decode_threads = 4;
-  } else if (mDisplay.width >= 320) {
-    decode_threads = 2;
-  }
-
-  decode_threads = std::min(decode_threads, PR_GetNumberOfProcessors());
-  mCodecContext->thread_count = decode_threads;
-  if (decode_threads > 1) {
-    mCodecContext->thread_type = FF_THREAD_SLICE | FF_THREAD_FRAME;
-  }
-
-  // FFmpeg will call back to this to negotiate a video pixel format.
-  mCodecContext->get_format = ChoosePixelFormat;
-
-  mCodecParser = AV_CALL(av_parser_init(mCodecID));
-  if (mCodecParser) {
-    mCodecParser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
-  }
-}
-
-FFmpegH264Decoder<LIBAV_VER>::DecodeResult
-FFmpegH264Decoder<LIBAV_VER>::DoDecodeFrame(MediaRawData* aSample)
-{
-  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
-
-  uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
-  size_t inputSize = aSample->Size();
-
-#if LIBAVCODEC_VERSION_MAJOR >= 54
-  if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8
-#if LIBAVCODEC_VERSION_MAJOR >= 55
-      || mCodecID == AV_CODEC_ID_VP9
-#endif
-      )) {
-    bool gotFrame = false;
-    while (inputSize) {
-      uint8_t* data;
-      int size;
-      int len = AV_CALL(av_parser_parse2(mCodecParser, mCodecContext, &data, &size,
-                                         inputData, inputSize,
-                                         aSample->mTime, aSample->mTimecode,
-                                         aSample->mOffset));
-      if (size_t(len) > inputSize) {
-        mCallback->Error();
-        return DecodeResult::DECODE_ERROR;
-      }
-      inputData += len;
-      inputSize -= len;
-      if (size) {
-        switch (DoDecodeFrame(aSample, data, size)) {
-          case DecodeResult::DECODE_ERROR:
-            return DecodeResult::DECODE_ERROR;
-          case DecodeResult::DECODE_FRAME:
-            gotFrame = true;
-            break;
-          default:
-            break;
-        }
-      }
-    }
-    return gotFrame ? DecodeResult::DECODE_FRAME : DecodeResult::DECODE_NO_FRAME;
-  }
-#endif
-  return DoDecodeFrame(aSample, inputData, inputSize);
-}
-
-FFmpegH264Decoder<LIBAV_VER>::DecodeResult
-FFmpegH264Decoder<LIBAV_VER>::DoDecodeFrame(MediaRawData* aSample,
-                                            uint8_t* aData, int aSize)
-{
-  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
-
-  AVPacket packet;
-  AV_CALL(av_init_packet(&packet));
-
-  packet.data = aData;
-  packet.size = aSize;
-  packet.dts = aSample->mTimecode;
-  packet.pts = aSample->mTime;
-  packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
-  packet.pos = aSample->mOffset;
-
-  // LibAV provides no API to retrieve the decoded sample's duration.
-  // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
-  // As such we instead use a map using the dts as key that we will retrieve
-  // later.
-  // The map will have a typical size of 16 entry.
-  mDurationMap.Insert(aSample->mTimecode, aSample->mDuration);
-
-  if (!PrepareFrame()) {
-    NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
-    mCallback->Error();
-    return DecodeResult::DECODE_ERROR;
-  }
-
-  // Required with old version of FFmpeg/LibAV
-  mFrame->reordered_opaque = AV_NOPTS_VALUE;
-
-  int decoded;
-  int bytesConsumed =
-    AV_CALL(avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet));
-
-  FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
-             "(Input: pts(%lld) dts(%lld) Output: pts(%lld) "
-             "opaque(%lld) pkt_pts(%lld) pkt_dts(%lld))",
-             bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
-             mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);
-
-  if (bytesConsumed < 0) {
-    NS_WARNING("FFmpeg video decoder error.");
-    mCallback->Error();
-    return DecodeResult::DECODE_ERROR;
-  }
-
-  // If we've decoded a frame then we need to output it
-  if (decoded) {
-    int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
-    FFMPEG_LOG("Got one frame output with pts=%lld opaque=%lld",
-               pts, mCodecContext->reordered_opaque);
-    // Retrieve duration from dts.
-    // We use the first entry found matching this dts (this is done to
-    // handle damaged file with multiple frames with the same dts)
-
-    int64_t duration;
-    if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
-      NS_WARNING("Unable to retrieve duration from map");
-      duration = aSample->mDuration;
-      // dts are probably incorrectly reported ; so clear the map as we're
-      // unlikely to find them in the future anyway. This also guards
-      // against the map becoming extremely big.
-      mDurationMap.Clear();
-    }
-
-    VideoInfo info;
-    info.mDisplay = mDisplay;
-
-    VideoData::YCbCrBuffer b;
-    b.mPlanes[0].mData = mFrame->data[0];
-    b.mPlanes[1].mData = mFrame->data[1];
-    b.mPlanes[2].mData = mFrame->data[2];
-
-    b.mPlanes[0].mStride = mFrame->linesize[0];
-    b.mPlanes[1].mStride = mFrame->linesize[1];
-    b.mPlanes[2].mStride = mFrame->linesize[2];
-
-    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
-    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
-    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;
-
-    b.mPlanes[0].mWidth = mFrame->width;
-    b.mPlanes[0].mHeight = mFrame->height;
-    if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) {
-      b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
-      b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
-    } else {
-      b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
-      b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
-    }
-
-    RefPtr<VideoData> v = VideoData::Create(info,
-                                              mImageContainer,
-                                              aSample->mOffset,
-                                              pts,
-                                              duration,
-                                              b,
-                                              !!mFrame->key_frame,
-                                              -1,
-                                              mImage);
-    if (!v) {
-      NS_WARNING("image allocation error.");
-      mCallback->Error();
-      return DecodeResult::DECODE_ERROR;
-    }
-    mCallback->Output(v);
-    return DecodeResult::DECODE_FRAME;
-  }
-  return DecodeResult::DECODE_NO_FRAME;
-}
-
-void
-FFmpegH264Decoder<LIBAV_VER>::DecodeFrame(MediaRawData* aSample)
-{
-  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
-
-  if (DoDecodeFrame(aSample) != DecodeResult::DECODE_ERROR &&
-      mTaskQueue->IsEmpty()) {
-    mCallback->InputExhausted();
-  }
-}
-
-nsresult
-FFmpegH264Decoder<LIBAV_VER>::Input(MediaRawData* aSample)
-{
-  nsCOMPtr<nsIRunnable> runnable(
-    NS_NewRunnableMethodWithArg<RefPtr<MediaRawData>>(
-      this, &FFmpegH264Decoder<LIBAV_VER>::DecodeFrame,
-      RefPtr<MediaRawData>(aSample)));
-  mTaskQueue->Dispatch(runnable.forget());
-
-  return NS_OK;
-}
-
-void
-FFmpegH264Decoder<LIBAV_VER>::ProcessDrain()
-{
-  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
-  RefPtr<MediaRawData> empty(new MediaRawData());
-  while (DoDecodeFrame(empty) == DecodeResult::DECODE_FRAME) {
-  }
-  mCallback->DrainComplete();
-}
-
-void
-FFmpegH264Decoder<LIBAV_VER>::ProcessFlush()
-{
-  mPtsContext.Reset();
-  mDurationMap.Clear();
-  FFmpegDataDecoder::ProcessFlush();
-}
-
-FFmpegH264Decoder<LIBAV_VER>::~FFmpegH264Decoder()
-{
-  MOZ_COUNT_DTOR(FFmpegH264Decoder);
-  if (mCodecParser) {
-    AV_CALL(av_parser_close(mCodecParser));
-    mCodecParser = nullptr;
-  }
-}
-
-AVCodecID
-FFmpegH264Decoder<LIBAV_VER>::GetCodecId(const nsACString& aMimeType)
-{
-  if (aMimeType.EqualsLiteral("video/avc") || aMimeType.EqualsLiteral("video/mp4")) {
-    return AV_CODEC_ID_H264;
-  }
-
-  if (aMimeType.EqualsLiteral("video/x-vnd.on2.vp6")) {
-    return AV_CODEC_ID_VP6F;
-  }
-
-#if LIBAVCODEC_VERSION_MAJOR >= 54
-  if (aMimeType.EqualsLiteral("video/webm; codecs=vp8")) {
-    return AV_CODEC_ID_VP8;
-  }
-#endif
-
-#if LIBAVCODEC_VERSION_MAJOR >= 55
-  if (aMimeType.EqualsLiteral("video/webm; codecs=vp9")) {
-    return AV_CODEC_ID_VP9;
-  }
-#endif
-
-  return AV_CODEC_ID_NONE;
-}
-
-} // namespace mozilla
deleted file mode 100644
--- a/dom/media/platforms/ffmpeg/FFmpegH264Decoder.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef __FFmpegH264Decoder_h__
-#define __FFmpegH264Decoder_h__
-
-#include "FFmpegDataDecoder.h"
-#include "mozilla/Pair.h"
-#include "nsTArray.h"
-
-namespace mozilla
-{
-
-template <int V>
-class FFmpegH264Decoder : public FFmpegDataDecoder<V>
-{
-};
-
-template <>
-class FFmpegH264Decoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
-{
-  typedef mozilla::layers::Image Image;
-  typedef mozilla::layers::ImageContainer ImageContainer;
-
-  enum DecodeResult {
-    DECODE_FRAME,
-    DECODE_NO_FRAME,
-    DECODE_ERROR
-  };
-
-public:
-  FFmpegH264Decoder(FlushableTaskQueue* aTaskQueue,
-                    MediaDataDecoderCallback* aCallback,
-                    const VideoInfo& aConfig,
-                    ImageContainer* aImageContainer);
-  virtual ~FFmpegH264Decoder();
-
-  RefPtr<InitPromise> Init() override;
-  nsresult Input(MediaRawData* aSample) override;
-  void ProcessDrain() override;
-  void ProcessFlush() override;
-  void InitCodecContext() override;
-  static AVCodecID GetCodecId(const nsACString& aMimeType);
-
-private:
-  void DecodeFrame(MediaRawData* aSample);
-  DecodeResult DoDecodeFrame(MediaRawData* aSample);
-  DecodeResult DoDecodeFrame(MediaRawData* aSample, uint8_t* aData, int aSize);
-  void DoDrain();
-  void OutputDelayedFrames();
-
-  /**
-   * This method allocates a buffer for FFmpeg's decoder, wrapped in an Image.
-   * Currently it only supports Planar YUV420, which appears to be the only
-   * non-hardware accelerated image format that FFmpeg's H264 decoder is
-   * capable of outputting.
-   */
-  int AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
-                                 AVFrame* aFrame);
-
-  RefPtr<ImageContainer> mImageContainer;
-  nsIntSize mDisplay;
-  nsIntRect mImage;
-
-  // Parser used for VP8 and VP9 decoding.
-  AVCodecParserContext* mCodecParser;
-
-  class PtsCorrectionContext {
-  public:
-    PtsCorrectionContext();
-    int64_t GuessCorrectPts(int64_t aPts, int64_t aDts);
-    void Reset();
-
-  private:
-    int64_t mNumFaultyPts; /// Number of incorrect PTS values so far
-    int64_t mNumFaultyDts; /// Number of incorrect DTS values so far
-    int64_t mLastPts;       /// PTS of the last frame
-    int64_t mLastDts;       /// DTS of the last frame
-  };
-
-  PtsCorrectionContext mPtsContext;
-
-  class DurationMap {
-  public:
-    typedef Pair<int64_t, int64_t> DurationElement;
-
-    // Insert Dts and Duration pair at the end of our map.
-    void Insert(int64_t aDts, int64_t aDuration)
-    {
-      mMap.AppendElement(MakePair(aDts, aDuration));
-    }
-    // Sets aDuration matching aDts and remove it from the map if found.
-    // The element returned is the first one found.
-    // Returns true if found, false otherwise.
-    bool Find(int64_t aDts, int64_t& aDuration)
-    {
-      for (uint32_t i = 0; i < mMap.Length(); i++) {
-        DurationElement& element = mMap[i];
-        if (element.first() == aDts) {
-          aDuration = element.second();
-          mMap.RemoveElementAt(i);
-          return true;
-        }
-      }
-      return false;
-    }
-    // Remove all elements of the map.
-    void Clear()
-    {
-      mMap.Clear();
-    }
-
-  private:
-    nsAutoTArray<DurationElement, 16> mMap;
-  };
-
-  DurationMap mDurationMap;
-};
-
-} // namespace mozilla
-
-#endif // __FFmpegH264Decoder_h__
new file mode 100644
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -0,0 +1,395 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/TaskQueue.h"
+
+#include "nsThreadUtils.h"
+#include "nsAutoPtr.h"
+#include "ImageContainer.h"
+
+#include "MediaInfo.h"
+
+#include "FFmpegVideoDecoder.h"
+#include "FFmpegLog.h"
+#include "mozilla/PodOperations.h"
+
+#include "libavutil/pixfmt.h"
+#if LIBAVCODEC_VERSION_MAJOR < 54
+#define AVPixelFormat PixelFormat
+#define AV_PIX_FMT_YUV420P PIX_FMT_YUV420P
+#define AV_PIX_FMT_YUVJ420P PIX_FMT_YUVJ420P
+#define AV_PIX_FMT_YUV444P PIX_FMT_YUV444P
+#define AV_PIX_FMT_NONE PIX_FMT_NONE
+#endif
+
+typedef mozilla::layers::Image Image;
+typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
+
+namespace mozilla
+{
+
+/**
+ * FFmpeg calls back to this function with a list of pixel formats it supports.
+ * We choose a pixel format that we support and return it.
+ * For now, we just look for YUV420P, YUVJ420P and YUV444 as those are the only
+ * only non-HW accelerated format supported by FFmpeg's H264 and VP9 decoder.
+ */
+static AVPixelFormat
+ChoosePixelFormat(AVCodecContext* aCodecContext, const AVPixelFormat* aFormats)
+{
+  FFMPEG_LOG("Choosing FFmpeg pixel format for video decoding.");
+  for (; *aFormats > -1; aFormats++) {
+    switch (*aFormats) {
+      case AV_PIX_FMT_YUV444P:
+        FFMPEG_LOG("Requesting pixel format YUV444P.");
+        return AV_PIX_FMT_YUV444P;
+      case AV_PIX_FMT_YUV420P:
+      case AV_PIX_FMT_YUVJ420P:
+        FFMPEG_LOG("Requesting pixel format YUV420P.");
+        return AV_PIX_FMT_YUV420P;
+      default:
+        break;
+    }
+  }
+
+  NS_WARNING("FFmpeg does not share any supported pixel formats.");
+  return AV_PIX_FMT_NONE;
+}
+
+FFmpegVideoDecoder<LIBAV_VER>::PtsCorrectionContext::PtsCorrectionContext()
+  : mNumFaultyPts(0)
+  , mNumFaultyDts(0)
+  , mLastPts(INT64_MIN)
+  , mLastDts(INT64_MIN)
+{
+}
+
+int64_t
+FFmpegVideoDecoder<LIBAV_VER>::PtsCorrectionContext::GuessCorrectPts(int64_t aPts, int64_t aDts)
+{
+  int64_t pts = AV_NOPTS_VALUE;
+
+  if (aDts != int64_t(AV_NOPTS_VALUE)) {
+    mNumFaultyDts += aDts <= mLastDts;
+    mLastDts = aDts;
+  }
+  if (aPts != int64_t(AV_NOPTS_VALUE)) {
+    mNumFaultyPts += aPts <= mLastPts;
+    mLastPts = aPts;
+  }
+  if ((mNumFaultyPts <= mNumFaultyDts || aDts == int64_t(AV_NOPTS_VALUE)) &&
+      aPts != int64_t(AV_NOPTS_VALUE)) {
+    pts = aPts;
+  } else {
+    pts = aDts;
+  }
+  return pts;
+}
+
+void
+FFmpegVideoDecoder<LIBAV_VER>::PtsCorrectionContext::Reset()
+{
+  mNumFaultyPts = 0;
+  mNumFaultyDts = 0;
+  mLastPts = INT64_MIN;
+  mLastDts = INT64_MIN;
+}
+
+FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(
+  FlushableTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
+  const VideoInfo& aConfig,
+  ImageContainer* aImageContainer)
+  : FFmpegDataDecoder(aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
+  , mImageContainer(aImageContainer)
+  , mDisplay(aConfig.mDisplay)
+  , mImage(aConfig.mImage)
+  , mCodecParser(nullptr)
+{
+  MOZ_COUNT_CTOR(FFmpegVideoDecoder);
+  // Use a new MediaByteBuffer as the object will be modified during initialization.
+  mExtraData = new MediaByteBuffer;
+  mExtraData->AppendElements(*aConfig.mExtraData);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+FFmpegVideoDecoder<LIBAV_VER>::Init()
+{
+  if (NS_FAILED(InitDecoder())) {
+    return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
+  }
+
+  return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
+}
+
+void
+FFmpegVideoDecoder<LIBAV_VER>::InitCodecContext()
+{
+  mCodecContext->width = mImage.width;
+  mCodecContext->height = mImage.height;
+
+  // We use the same logic as libvpx in determining the number of threads to use
+  // so that we end up behaving in the same fashion when using ffmpeg as
+  // we would otherwise cause various crashes (see bug 1236167)
+  int decode_threads = 1;
+  if (mDisplay.width >= 2048) {
+    decode_threads = 8;
+  } else if (mDisplay.width >= 1024) {
+    decode_threads = 4;
+  } else if (mDisplay.width >= 320) {
+    decode_threads = 2;
+  }
+
+  decode_threads = std::min(decode_threads, PR_GetNumberOfProcessors());
+  mCodecContext->thread_count = decode_threads;
+  if (decode_threads > 1) {
+    mCodecContext->thread_type = FF_THREAD_SLICE | FF_THREAD_FRAME;
+  }
+
+  // FFmpeg will call back to this to negotiate a video pixel format.
+  mCodecContext->get_format = ChoosePixelFormat;
+
+  mCodecParser = AV_CALL(av_parser_init(mCodecID));
+  if (mCodecParser) {
+    mCodecParser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
+  }
+}
+
+FFmpegVideoDecoder<LIBAV_VER>::DecodeResult
+FFmpegVideoDecoder<LIBAV_VER>::DoDecodeFrame(MediaRawData* aSample)
+{
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+  uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
+  size_t inputSize = aSample->Size();
+
+#if LIBAVCODEC_VERSION_MAJOR >= 54
+  if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+      || mCodecID == AV_CODEC_ID_VP9
+#endif
+      )) {
+    bool gotFrame = false;
+    while (inputSize) {
+      uint8_t* data;
+      int size;
+      int len = AV_CALL(av_parser_parse2(mCodecParser, mCodecContext, &data, &size,
+                                         inputData, inputSize,
+                                         aSample->mTime, aSample->mTimecode,
+                                         aSample->mOffset));
+      if (size_t(len) > inputSize) {
+        mCallback->Error();
+        return DecodeResult::DECODE_ERROR;
+      }
+      inputData += len;
+      inputSize -= len;
+      if (size) {
+        switch (DoDecodeFrame(aSample, data, size)) {
+          case DecodeResult::DECODE_ERROR:
+            return DecodeResult::DECODE_ERROR;
+          case DecodeResult::DECODE_FRAME:
+            gotFrame = true;
+            break;
+          default:
+            break;
+        }
+      }
+    }
+    return gotFrame ? DecodeResult::DECODE_FRAME : DecodeResult::DECODE_NO_FRAME;
+  }
+#endif
+  return DoDecodeFrame(aSample, inputData, inputSize);
+}
+
+FFmpegVideoDecoder<LIBAV_VER>::DecodeResult
+FFmpegVideoDecoder<LIBAV_VER>::DoDecodeFrame(MediaRawData* aSample,
+                                            uint8_t* aData, int aSize)
+{
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+  AVPacket packet;
+  AV_CALL(av_init_packet(&packet));
+
+  packet.data = aData;
+  packet.size = aSize;
+  packet.dts = aSample->mTimecode;
+  packet.pts = aSample->mTime;
+  packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
+  packet.pos = aSample->mOffset;
+
+  // LibAV provides no API to retrieve the decoded sample's duration.
+  // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
+  // As such we instead use a map using the dts as key that we will retrieve
+  // later.
+  // The map will have a typical size of 16 entry.
+  mDurationMap.Insert(aSample->mTimecode, aSample->mDuration);
+
+  if (!PrepareFrame()) {
+    NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
+    mCallback->Error();
+    return DecodeResult::DECODE_ERROR;
+  }
+
+  // Required with old version of FFmpeg/LibAV
+  mFrame->reordered_opaque = AV_NOPTS_VALUE;
+
+  int decoded;
+  int bytesConsumed =
+    AV_CALL(avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet));
+
+  FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
+             "(Input: pts(%lld) dts(%lld) Output: pts(%lld) "
+             "opaque(%lld) pkt_pts(%lld) pkt_dts(%lld))",
+             bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
+             mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);
+
+  if (bytesConsumed < 0) {
+    NS_WARNING("FFmpeg video decoder error.");
+    mCallback->Error();
+    return DecodeResult::DECODE_ERROR;
+  }
+
+  // If we've decoded a frame then we need to output it
+  if (decoded) {
+    int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
+    FFMPEG_LOG("Got one frame output with pts=%lld opaque=%lld",
+               pts, mCodecContext->reordered_opaque);
+    // Retrieve duration from dts.
+    // We use the first entry found matching this dts (this is done to
+    // handle damaged file with multiple frames with the same dts)
+
+    int64_t duration;
+    if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
+      NS_WARNING("Unable to retrieve duration from map");
+      duration = aSample->mDuration;
+      // dts are probably incorrectly reported ; so clear the map as we're
+      // unlikely to find them in the future anyway. This also guards
+      // against the map becoming extremely big.
+      mDurationMap.Clear();
+    }
+
+    VideoInfo info;
+    info.mDisplay = mDisplay;
+
+    VideoData::YCbCrBuffer b;
+    b.mPlanes[0].mData = mFrame->data[0];
+    b.mPlanes[1].mData = mFrame->data[1];
+    b.mPlanes[2].mData = mFrame->data[2];
+
+    b.mPlanes[0].mStride = mFrame->linesize[0];
+    b.mPlanes[1].mStride = mFrame->linesize[1];
+    b.mPlanes[2].mStride = mFrame->linesize[2];
+
+    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
+    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
+    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;
+
+    b.mPlanes[0].mWidth = mFrame->width;
+    b.mPlanes[0].mHeight = mFrame->height;
+    if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) {
+      b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
+      b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
+    } else {
+      b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
+      b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
+    }
+
+    RefPtr<VideoData> v = VideoData::Create(info,
+                                              mImageContainer,
+                                              aSample->mOffset,
+                                              pts,
+                                              duration,
+                                              b,
+                                              !!mFrame->key_frame,
+                                              -1,
+                                              mImage);
+    if (!v) {
+      NS_WARNING("image allocation error.");
+      mCallback->Error();
+      return DecodeResult::DECODE_ERROR;
+    }
+    mCallback->Output(v);
+    return DecodeResult::DECODE_FRAME;
+  }
+  return DecodeResult::DECODE_NO_FRAME;
+}
+
+void
+FFmpegVideoDecoder<LIBAV_VER>::DecodeFrame(MediaRawData* aSample)
+{
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+  if (DoDecodeFrame(aSample) != DecodeResult::DECODE_ERROR &&
+      mTaskQueue->IsEmpty()) {
+    mCallback->InputExhausted();
+  }
+}
+
+nsresult
+FFmpegVideoDecoder<LIBAV_VER>::Input(MediaRawData* aSample)
+{
+  nsCOMPtr<nsIRunnable> runnable(
+    NS_NewRunnableMethodWithArg<RefPtr<MediaRawData>>(
+      this, &FFmpegVideoDecoder<LIBAV_VER>::DecodeFrame,
+      RefPtr<MediaRawData>(aSample)));
+  mTaskQueue->Dispatch(runnable.forget());
+
+  return NS_OK;
+}
+
+void
+FFmpegVideoDecoder<LIBAV_VER>::ProcessDrain()
+{
+  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+  RefPtr<MediaRawData> empty(new MediaRawData());
+  while (DoDecodeFrame(empty) == DecodeResult::DECODE_FRAME) {
+  }
+  mCallback->DrainComplete();
+}
+
+void
+FFmpegVideoDecoder<LIBAV_VER>::ProcessFlush()
+{
+  mPtsContext.Reset();
+  mDurationMap.Clear();
+  FFmpegDataDecoder::ProcessFlush();
+}
+
+FFmpegVideoDecoder<LIBAV_VER>::~FFmpegVideoDecoder()
+{
+  MOZ_COUNT_DTOR(FFmpegVideoDecoder);
+  if (mCodecParser) {
+    AV_CALL(av_parser_close(mCodecParser));
+    mCodecParser = nullptr;
+  }
+}
+
+AVCodecID
+FFmpegVideoDecoder<LIBAV_VER>::GetCodecId(const nsACString& aMimeType)
+{
+  if (aMimeType.EqualsLiteral("video/avc") || aMimeType.EqualsLiteral("video/mp4")) {
+    return AV_CODEC_ID_H264;
+  }
+
+  if (aMimeType.EqualsLiteral("video/x-vnd.on2.vp6")) {
+    return AV_CODEC_ID_VP6F;
+  }
+
+#if LIBAVCODEC_VERSION_MAJOR >= 54
+  if (aMimeType.EqualsLiteral("video/webm; codecs=vp8")) {
+    return AV_CODEC_ID_VP8;
+  }
+#endif
+
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+  if (aMimeType.EqualsLiteral("video/webm; codecs=vp9")) {
+    return AV_CODEC_ID_VP9;
+  }
+#endif
+
+  return AV_CODEC_ID_NONE;
+}
+
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FFmpegVideoDecoder_h__
+#define __FFmpegVideoDecoder_h__
+
+#include "FFmpegDataDecoder.h"
+#include "mozilla/Pair.h"
+#include "nsTArray.h"
+
+namespace mozilla
+{
+
+template <int V>
+class FFmpegVideoDecoder : public FFmpegDataDecoder<V>
+{
+};
+
+template <>
+class FFmpegVideoDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
+{
+  typedef mozilla::layers::Image Image;
+  typedef mozilla::layers::ImageContainer ImageContainer;
+
+  enum DecodeResult {
+    DECODE_FRAME,
+    DECODE_NO_FRAME,
+    DECODE_ERROR
+  };
+
+public:
+  FFmpegVideoDecoder(FlushableTaskQueue* aTaskQueue,
+                    MediaDataDecoderCallback* aCallback,
+                    const VideoInfo& aConfig,
+                    ImageContainer* aImageContainer);
+  virtual ~FFmpegVideoDecoder();
+
+  RefPtr<InitPromise> Init() override;
+  nsresult Input(MediaRawData* aSample) override;
+  void ProcessDrain() override;
+  void ProcessFlush() override;
+  void InitCodecContext() override;
+  static AVCodecID GetCodecId(const nsACString& aMimeType);
+
+private:
+  void DecodeFrame(MediaRawData* aSample);
+  DecodeResult DoDecodeFrame(MediaRawData* aSample);
+  DecodeResult DoDecodeFrame(MediaRawData* aSample, uint8_t* aData, int aSize);
+  void DoDrain();
+  void OutputDelayedFrames();
+
+  /**
+   * This method allocates a buffer for FFmpeg's decoder, wrapped in an Image.
+   * Currently it only supports Planar YUV420, which appears to be the only
+   * non-hardware accelerated image format that FFmpeg's H264 decoder is
+   * capable of outputting.
+   */
+  int AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
+                                 AVFrame* aFrame);
+
+  RefPtr<ImageContainer> mImageContainer;
+  nsIntSize mDisplay;
+  nsIntRect mImage;
+
+  // Parser used for VP8 and VP9 decoding.
+  AVCodecParserContext* mCodecParser;
+
+  class PtsCorrectionContext {
+  public:
+    PtsCorrectionContext();
+    int64_t GuessCorrectPts(int64_t aPts, int64_t aDts);
+    void Reset();
+
+  private:
+    int64_t mNumFaultyPts; /// Number of incorrect PTS values so far
+    int64_t mNumFaultyDts; /// Number of incorrect DTS values so far
+    int64_t mLastPts;       /// PTS of the last frame
+    int64_t mLastDts;       /// DTS of the last frame
+  };
+
+  PtsCorrectionContext mPtsContext;
+
+  class DurationMap {
+  public:
+    typedef Pair<int64_t, int64_t> DurationElement;
+
+    // Insert Dts and Duration pair at the end of our map.
+    void Insert(int64_t aDts, int64_t aDuration)
+    {
+      mMap.AppendElement(MakePair(aDts, aDuration));
+    }
+    // Sets aDuration matching aDts and remove it from the map if found.
+    // The element returned is the first one found.
+    // Returns true if found, false otherwise.
+    bool Find(int64_t aDts, int64_t& aDuration)
+    {
+      for (uint32_t i = 0; i < mMap.Length(); i++) {
+        DurationElement& element = mMap[i];
+        if (element.first() == aDts) {
+          aDuration = element.second();
+          mMap.RemoveElementAt(i);
+          return true;
+        }
+      }
+      return false;
+    }
+    // Remove all elements of the map.
+    void Clear()
+    {
+      mMap.Clear();
+    }
+
+  private:
+    nsAutoTArray<DurationElement, 16> mMap;
+  };
+
+  DurationMap mDurationMap;
+};
+
+} // namespace mozilla
+
+#endif // __FFmpegVideoDecoder_h__
--- a/dom/media/platforms/ffmpeg/ffmpeg57/moz.build
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/moz.build
@@ -3,17 +3,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 UNIFIED_SOURCES += [
     '../FFmpegAudioDecoder.cpp',
     '../FFmpegDataDecoder.cpp',
     '../FFmpegDecoderModule.cpp',
-    '../FFmpegH264Decoder.cpp',
+    '../FFmpegVideoDecoder.cpp',
 ]
 LOCAL_INCLUDES += [
     '..',
     'include',
 ]
 
 if CONFIG['GNU_CXX']:
   CXXFLAGS += [ '-Wno-deprecated-declarations' ]
--- a/dom/media/platforms/ffmpeg/ffvpx/moz.build
+++ b/dom/media/platforms/ffmpeg/ffvpx/moz.build
@@ -7,17 +7,17 @@
 LOCAL_INCLUDES += ['/xpcom/build']
 EXPORTS += [
     'FFVPXRuntimeLinker.h',
 ]
 
 UNIFIED_SOURCES += [
     '../FFmpegDataDecoder.cpp',
     '../FFmpegDecoderModule.cpp',
-    '../FFmpegH264Decoder.cpp',
+    '../FFmpegVideoDecoder.cpp',
 ]
 SOURCES += [
     'FFVPXRuntimeLinker.cpp',
 ]
 LOCAL_INCLUDES += [
     '..',
     '../ffmpeg57/include',
 ]
--- a/dom/media/platforms/ffmpeg/libav53/moz.build
+++ b/dom/media/platforms/ffmpeg/libav53/moz.build
@@ -3,17 +3,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 UNIFIED_SOURCES += [
     '../FFmpegAudioDecoder.cpp',
     '../FFmpegDataDecoder.cpp',
     '../FFmpegDecoderModule.cpp',
-    '../FFmpegH264Decoder.cpp',
+    '../FFmpegVideoDecoder.cpp',
 ]
 LOCAL_INCLUDES += [
     '..',
     'include',
 ]
 
 FINAL_LIBRARY = 'xul'
 
--- a/dom/media/platforms/ffmpeg/libav54/moz.build
+++ b/dom/media/platforms/ffmpeg/libav54/moz.build
@@ -3,17 +3,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 UNIFIED_SOURCES += [
     '../FFmpegAudioDecoder.cpp',
     '../FFmpegDataDecoder.cpp',
     '../FFmpegDecoderModule.cpp',
-    '../FFmpegH264Decoder.cpp',
+    '../FFmpegVideoDecoder.cpp',
 ]
 LOCAL_INCLUDES += [
     '..',
     'include',
 ]
 
 FINAL_LIBRARY = 'xul'
 
--- a/dom/media/platforms/ffmpeg/libav55/moz.build
+++ b/dom/media/platforms/ffmpeg/libav55/moz.build
@@ -3,17 +3,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 UNIFIED_SOURCES += [
     '../FFmpegAudioDecoder.cpp',
     '../FFmpegDataDecoder.cpp',
     '../FFmpegDecoderModule.cpp',
-    '../FFmpegH264Decoder.cpp',
+    '../FFmpegVideoDecoder.cpp',
 ]
 LOCAL_INCLUDES += [
     '..',
     'include',
 ]
 
 if CONFIG['GNU_CXX']:
   CXXFLAGS += [ '-Wno-deprecated-declarations' ]