Bug 1530774 - Part 3. Remove decoder support for producing paletted frames. r=tnikkel
authorAndrew Osmond <aosmond@mozilla.com>
Fri, 15 Mar 2019 13:29:02 -0400
changeset 523590 52390d9090fbd8d46b00ea29034e7039511ff8a4
parent 523589 6dd55ee8961128d0c9e314d60e7509604e7548b2
child 523591 8962b8d9b7a673288218ed9087102032d7cf015b
push id11265
push userffxbld-merge
push dateMon, 13 May 2019 10:53:39 +0000
treeherdermozilla-beta@77e0fe8dbdd3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstnikkel
bugs1530774
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1530774 - Part 3. Remove decoder support for producing paletted frames. r=tnikkel Differential Revision: https://phabricator.services.mozilla.com/D23716
image/AnimationParams.h
image/AnimationSurfaceProvider.cpp
image/Decoder.cpp
image/Decoder.h
image/DecoderFlags.h
image/RasterImage.cpp
image/SurfaceFilters.h
image/SurfacePipe.cpp
image/SurfacePipeFactory.h
image/decoders/nsBMPDecoder.cpp
image/decoders/nsGIFDecoder2.cpp
image/decoders/nsGIFDecoder2.h
image/decoders/nsJPEGDecoder.cpp
image/decoders/nsPNGDecoder.cpp
image/decoders/nsWebPDecoder.cpp
image/imgFrame.cpp
image/imgFrame.h
image/test/gtest/Common.cpp
image/test/gtest/TestAnimationFrameBuffer.cpp
image/test/gtest/TestBlendAnimationFilter.cpp
image/test/gtest/TestDecoders.cpp
image/test/gtest/TestFrameAnimator.cpp
--- a/image/AnimationParams.h
+++ b/image/AnimationParams.h
@@ -28,16 +28,25 @@ enum class DisposalMethod : int8_t {
   CLEAR_ALL = -1,   // Clear the whole image, revealing what's underneath.
   NOT_SPECIFIED,    // Leave the frame and let the new frame draw on top.
   KEEP,             // Leave the frame and let the new frame draw on top.
   CLEAR,            // Clear the frame's area, revealing what's underneath.
   RESTORE_PREVIOUS  // Restore the previous (composited) frame.
 };
 
 struct AnimationParams {
+  AnimationParams(const gfx::IntRect& aBlendRect, const FrameTimeout& aTimeout,
+                  uint32_t aFrameNum, BlendMethod aBlendMethod,
+                  DisposalMethod aDisposalMethod)
+      : mBlendRect(aBlendRect),
+        mTimeout(aTimeout),
+        mFrameNum(aFrameNum),
+        mBlendMethod(aBlendMethod),
+        mDisposalMethod(aDisposalMethod) {}
+
   gfx::IntRect mBlendRect;
   FrameTimeout mTimeout;
   uint32_t mFrameNum;
   BlendMethod mBlendMethod;
   DisposalMethod mDisposalMethod;
 };
 
 }  // namespace image
--- a/image/AnimationSurfaceProvider.cpp
+++ b/image/AnimationSurfaceProvider.cpp
@@ -26,29 +26,22 @@ AnimationSurfaceProvider::AnimationSurfa
       mDecodingMutex("AnimationSurfaceProvider::mDecoder"),
       mDecoder(aDecoder.get()),
       mFramesMutex("AnimationSurfaceProvider::mFrames") {
   MOZ_ASSERT(!mDecoder->IsMetadataDecode(),
              "Use MetadataDecodingTask for metadata decodes");
   MOZ_ASSERT(!mDecoder->IsFirstFrameDecode(),
              "Use DecodedSurfaceProvider for single-frame image decodes");
 
-  // We may produce paletted surfaces for GIF which means the frames are smaller
-  // than one would expect.
-  size_t pixelSize = !aDecoder->ShouldBlendAnimation() &&
-                             aDecoder->GetType() == DecoderType::GIF
-                         ? sizeof(uint8_t)
-                         : sizeof(uint32_t);
-
   // Calculate how many frames we need to decode in this animation before we
   // enter decode-on-demand mode.
   IntSize frameSize = aSurfaceKey.Size();
   size_t threshold =
       (size_t(gfxPrefs::ImageAnimatedDecodeOnDemandThresholdKB()) * 1024) /
-      (pixelSize * frameSize.width * frameSize.height);
+      (sizeof(uint32_t) * frameSize.width * frameSize.height);
   size_t batch = gfxPrefs::ImageAnimatedDecodeOnDemandBatchSize();
 
   mFrames.reset(
       new AnimationFrameRetainedBuffer(threshold, batch, aCurrentFrame));
 }
 
 AnimationSurfaceProvider::~AnimationSurfaceProvider() {
   DropImageReference();
@@ -408,23 +401,18 @@ void AnimationSurfaceProvider::RequestFr
   if (mFrames->MayDiscard() || mFrames->IsRecycling()) {
     MOZ_ASSERT_UNREACHABLE("Already replaced frame queue!");
     return;
   }
 
   auto oldFrameQueue =
       static_cast<AnimationFrameRetainedBuffer*>(mFrames.get());
 
-  // We only recycle if it is a full frame. Partial frames may be sized
-  // differently from each other. We do not support recycling with WebRender
-  // and shared surfaces at this time as there is additional synchronization
-  // required to know when it is safe to recycle.
   MOZ_ASSERT(!mDecoder->GetFrameRecycler());
-  if (gfxPrefs::ImageAnimatedDecodeOnDemandRecycle() &&
-      mDecoder->ShouldBlendAnimation()) {
+  if (gfxPrefs::ImageAnimatedDecodeOnDemandRecycle()) {
     mFrames.reset(new AnimationFrameRecyclingQueue(std::move(*oldFrameQueue)));
     mDecoder->SetFrameRecycler(this);
   } else {
     mFrames.reset(new AnimationFrameDiscardingQueue(std::move(*oldFrameQueue)));
   }
 }
 
 void AnimationSurfaceProvider::AnnounceSurfaceAvailable() {
--- a/image/Decoder.cpp
+++ b/image/Decoder.cpp
@@ -41,18 +41,16 @@ class MOZ_STACK_CLASS AutoRecordDecoderT
  private:
   Decoder* mDecoder;
   TimeStamp mStartTime;
 };
 
 Decoder::Decoder(RasterImage* aImage)
     : mImageData(nullptr),
       mImageDataLength(0),
-      mColormap(nullptr),
-      mColormapSize(0),
       mImage(aImage),
       mFrameRecycler(nullptr),
       mProgress(NoProgress),
       mFrameCount(0),
       mLoopLength(FrameTimeout::Zero()),
       mDecoderFlags(DefaultDecoderFlags()),
       mSurfaceFlags(DefaultSurfaceFlags()),
       mInitialized(false),
@@ -248,30 +246,26 @@ DecoderFinalStatus Decoder::FinalStatus(
 DecoderTelemetry Decoder::Telemetry() const {
   MOZ_ASSERT(mIterator);
   return DecoderTelemetry(SpeedHistogram(),
                           mIterator ? mIterator->ByteCount() : 0,
                           mIterator ? mIterator->ChunkCount() : 0, mDecodeTime);
 }
 
 nsresult Decoder::AllocateFrame(const gfx::IntSize& aOutputSize,
-                                const gfx::IntRect& aFrameRect,
                                 gfx::SurfaceFormat aFormat,
-                                uint8_t aPaletteDepth,
                                 const Maybe<AnimationParams>& aAnimParams) {
-  mCurrentFrame =
-      AllocateFrameInternal(aOutputSize, aFrameRect, aFormat, aPaletteDepth,
-                            aAnimParams, std::move(mCurrentFrame));
+  mCurrentFrame = AllocateFrameInternal(aOutputSize, aFormat, aAnimParams,
+                                        std::move(mCurrentFrame));
 
   if (mCurrentFrame) {
     mHasFrameToTake = true;
 
     // Gather the raw pointers the decoders will use.
     mCurrentFrame->GetImageData(&mImageData, &mImageDataLength);
-    mCurrentFrame->GetPaletteData(&mColormap, &mColormapSize);
 
     // We should now be on |aFrameNum|. (Note that we're comparing the frame
     // number, which is zero-based, with the frame count, which is one-based.)
     MOZ_ASSERT_IF(aAnimParams, aAnimParams->mFrameNum + 1 == mFrameCount);
 
     // If we're past the first frame, PostIsAnimated() should've been called.
     MOZ_ASSERT_IF(mFrameCount > 1, HasAnimation());
 
@@ -279,73 +273,66 @@ nsresult Decoder::AllocateFrame(const gf
     MOZ_ASSERT(!mInFrame, "Starting new frame but not done with old one!");
     mInFrame = true;
   }
 
   return mCurrentFrame ? NS_OK : NS_ERROR_FAILURE;
 }
 
 RawAccessFrameRef Decoder::AllocateFrameInternal(
-    const gfx::IntSize& aOutputSize, const gfx::IntRect& aFrameRect,
-    SurfaceFormat aFormat, uint8_t aPaletteDepth,
+    const gfx::IntSize& aOutputSize, SurfaceFormat aFormat,
     const Maybe<AnimationParams>& aAnimParams,
     RawAccessFrameRef&& aPreviousFrame) {
   if (HasError()) {
     return RawAccessFrameRef();
   }
 
   uint32_t frameNum = aAnimParams ? aAnimParams->mFrameNum : 0;
   if (frameNum != mFrameCount) {
     MOZ_ASSERT_UNREACHABLE("Allocating frames out of order");
     return RawAccessFrameRef();
   }
 
-  if (aOutputSize.width <= 0 || aOutputSize.height <= 0 ||
-      aFrameRect.Width() <= 0 || aFrameRect.Height() <= 0) {
+  if (aOutputSize.width <= 0 || aOutputSize.height <= 0) {
     NS_WARNING("Trying to add frame with zero or negative size");
     return RawAccessFrameRef();
   }
 
   if (frameNum == 1) {
     MOZ_ASSERT(aPreviousFrame, "Must provide a previous frame when animated");
     aPreviousFrame->SetRawAccessOnly();
   }
 
   if (frameNum > 0) {
-    if (ShouldBlendAnimation()) {
-      if (aPreviousFrame->GetDisposalMethod() !=
-          DisposalMethod::RESTORE_PREVIOUS) {
-        // If the new restore frame is the direct previous frame, then we know
-        // the dirty rect is composed only of the current frame's blend rect and
-        // the restore frame's clear rect (if applicable) which are handled in
-        // filters.
-        mRestoreFrame = std::move(aPreviousFrame);
-        mRestoreDirtyRect.SetBox(0, 0, 0, 0);
-      } else {
-        // We only need the previous frame's dirty rect, because while there may
-        // have been several frames between us and mRestoreFrame, the only areas
-        // that changed are the restore frame's clear rect, the current frame
-        // blending rect, and the previous frame's blending rect. All else is
-        // forgotten due to us restoring the same frame again.
-        mRestoreDirtyRect = aPreviousFrame->GetBoundedBlendRect();
-      }
+    if (aPreviousFrame->GetDisposalMethod() !=
+        DisposalMethod::RESTORE_PREVIOUS) {
+      // If the new restore frame is the direct previous frame, then we know
+      // the dirty rect is composed only of the current frame's blend rect and
+      // the restore frame's clear rect (if applicable) which are handled in
+      // filters.
+      mRestoreFrame = std::move(aPreviousFrame);
+      mRestoreDirtyRect.SetBox(0, 0, 0, 0);
+    } else {
+      // We only need the previous frame's dirty rect, because while there may
+      // have been several frames between us and mRestoreFrame, the only areas
+      // that changed are the restore frame's clear rect, the current frame
+      // blending rect, and the previous frame's blending rect. All else is
+      // forgotten due to us restoring the same frame again.
+      mRestoreDirtyRect = aPreviousFrame->GetBoundedBlendRect();
     }
   }
 
   RawAccessFrameRef ref;
 
   // If we have a frame recycler, it must be for an animated image producing
   // full frames. If the higher layers are discarding frames because of the
   // memory footprint, then the recycler will allow us to reuse the buffers.
   // Each frame should be the same size and have mostly the same properties.
   if (mFrameRecycler) {
-    MOZ_ASSERT(ShouldBlendAnimation());
-    MOZ_ASSERT(aPaletteDepth == 0);
     MOZ_ASSERT(aAnimParams);
-    MOZ_ASSERT(aFrameRect.IsEqualEdges(IntRect(IntPoint(0, 0), aOutputSize)));
 
     ref = mFrameRecycler->RecycleFrame(mRecycleRect);
     if (ref) {
       // If the recycled frame is actually the current restore frame, we cannot
       // use it. If the next restore frame is the new frame we are creating, in
       // theory we could reuse it, but we would need to store the restore frame
       // animation parameters elsewhere. For now we just drop it.
       bool blocked = ref.get() == mRestoreFrame.get();
@@ -363,19 +350,18 @@ RawAccessFrameRef Decoder::AllocateFrame
   // Produce a new frame to store the data.
   if (!ref) {
     // There is no underlying data to reuse, so reset the recycle rect to be
     // the full frame, to ensure the restore frame is fully copied.
     mRecycleRect = IntRect(IntPoint(0, 0), aOutputSize);
 
     bool nonPremult = bool(mSurfaceFlags & SurfaceFlags::NO_PREMULTIPLY_ALPHA);
     auto frame = MakeNotNull<RefPtr<imgFrame>>();
-    if (NS_FAILED(frame->InitForDecoder(
-            aOutputSize, aFrameRect, aFormat, aPaletteDepth, nonPremult,
-            aAnimParams, ShouldBlendAnimation(), bool(mFrameRecycler)))) {
+    if (NS_FAILED(frame->InitForDecoder(aOutputSize, aFormat, nonPremult,
+                                        aAnimParams, bool(mFrameRecycler)))) {
       NS_WARNING("imgFrame::Init should succeed");
       return RawAccessFrameRef();
     }
 
     ref = frame->RawAccessRef();
     if (!ref) {
       frame->Abort();
       return RawAccessFrameRef();
--- a/image/Decoder.h
+++ b/image/Decoder.h
@@ -267,24 +267,16 @@ class Decoder {
   /**
    * Should we stop decoding after the first frame?
    */
   bool IsFirstFrameDecode() const {
     return bool(mDecoderFlags & DecoderFlags::FIRST_FRAME_ONLY);
   }
 
   /**
-   * Should blend the current frame with the previous frames to produce a
-   * complete frame instead of a partial frame for animated images.
-   */
-  bool ShouldBlendAnimation() const {
-    return bool(mDecoderFlags & DecoderFlags::BLEND_ANIMATION);
-  }
-
-  /**
    * @return the number of complete animation frames which have been decoded so
    * far, if it has changed since the last call to TakeCompleteFrameCount();
    * otherwise, returns Nothing().
    */
   Maybe<uint32_t> TakeCompleteFrameCount();
 
   // The number of frames we have, including anything in-progress. Thus, this
   // is only 0 if we haven't begun any frames.
@@ -413,30 +405,21 @@ class Decoder {
    * current frame we are producing for its animation parameters.
    */
   imgFrame* GetCurrentFrame() { return mCurrentFrame.get(); }
 
   /**
    * For use during decoding only. Allows the BlendAnimationFilter to get the
    * frame it should be pulling the previous frame data from.
    */
-  const RawAccessFrameRef& GetRestoreFrameRef() const {
-    MOZ_ASSERT(ShouldBlendAnimation());
-    return mRestoreFrame;
-  }
+  const RawAccessFrameRef& GetRestoreFrameRef() const { return mRestoreFrame; }
 
-  const gfx::IntRect& GetRestoreDirtyRect() const {
-    MOZ_ASSERT(ShouldBlendAnimation());
-    return mRestoreDirtyRect;
-  }
+  const gfx::IntRect& GetRestoreDirtyRect() const { return mRestoreDirtyRect; }
 
-  const gfx::IntRect& GetRecycleRect() const {
-    MOZ_ASSERT(ShouldBlendAnimation());
-    return mRecycleRect;
-  }
+  const gfx::IntRect& GetRecycleRect() const { return mRecycleRect; }
 
   const gfx::IntRect& GetFirstFrameRefreshArea() const {
     return mFirstFrameRefreshArea;
   }
 
   bool HasFrameToTake() const { return mHasFrameToTake; }
   void ClearHasFrameToTake() {
     MOZ_ASSERT(mHasFrameToTake);
@@ -537,22 +520,19 @@ class Decoder {
   // May not be called mid-frame.
   //
   // For animated images, specify the loop count. -1 means loop forever, 0
   // means a single iteration, stopping on the last frame.
   void PostDecodeDone(int32_t aLoopCount = 0);
 
   /**
    * Allocates a new frame, making it our current frame if successful.
-   *
-   * If a non-paletted frame is desired, pass 0 for aPaletteDepth.
    */
   nsresult AllocateFrame(const gfx::IntSize& aOutputSize,
-                         const gfx::IntRect& aFrameRect,
-                         gfx::SurfaceFormat aFormat, uint8_t aPaletteDepth = 0,
+                         gfx::SurfaceFormat aFormat,
                          const Maybe<AnimationParams>& aAnimParams = Nothing());
 
  private:
   /// Report that an error was encountered while decoding.
   void PostError();
 
   /**
    * CompleteDecode() finishes up the decoding process after Decode() determines
@@ -567,28 +547,25 @@ class Decoder {
     if (mFrameCount == 0) {
       return 0;
     }
 
     return mInFrame ? mFrameCount - 1 : mFrameCount;
   }
 
   RawAccessFrameRef AllocateFrameInternal(
-      const gfx::IntSize& aOutputSize, const gfx::IntRect& aFrameRect,
-      gfx::SurfaceFormat aFormat, uint8_t aPaletteDepth,
+      const gfx::IntSize& aOutputSize, gfx::SurfaceFormat aFormat,
       const Maybe<AnimationParams>& aAnimParams,
       RawAccessFrameRef&& aPreviousFrame);
 
  protected:
   Maybe<Downscaler> mDownscaler;
 
-  uint8_t* mImageData;  // Pointer to image data in either Cairo or 8bit format
+  uint8_t* mImageData;  // Pointer to image data in BGRA/X
   uint32_t mImageDataLength;
-  uint32_t* mColormap;  // Current colormap to be used in Cairo format
-  uint32_t mColormapSize;
 
  private:
   RefPtr<RasterImage> mImage;
   Maybe<SourceBufferIterator> mIterator;
   IDecoderFrameRecycler* mFrameRecycler;
 
   // The current frame the decoder is producing.
   RawAccessFrameRef mCurrentFrame;
--- a/image/DecoderFlags.h
+++ b/image/DecoderFlags.h
@@ -26,24 +26,16 @@ enum class DecoderFlags : uint8_t {
 
   /**
    * By default, a surface is considered substitutable. That means callers are
    * willing to accept a less than ideal match to display. If a caller requires
    * a specific size and won't accept alternatives, then this flag should be
    * set.
    */
   CANNOT_SUBSTITUTE = 1 << 4,
-
-  /**
-   * By default, an animation decoder will produce partial frames that need to
-   * be combined with the previously displayed/composited frame by FrameAnimator
-   * to produce a complete frame. If this flag is set, the decoder will perform
-   * this blending at decode time, and the frames produced are complete.
-   */
-  BLEND_ANIMATION = 1 << 5
 };
 MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(DecoderFlags)
 
 /**
  * @return the default set of decode flags.
  */
 inline DecoderFlags DefaultDecoderFlags() { return DecoderFlags(); }
 
--- a/image/RasterImage.cpp
+++ b/image/RasterImage.cpp
@@ -1191,18 +1191,16 @@ bool RasterImage::Decode(const IntSize& 
     surfaceFlags &= ~SurfaceFlags::NO_PREMULTIPLY_ALPHA;
   }
 
   // Create a decoder.
   RefPtr<IDecodingTask> task;
   nsresult rv;
   bool animated = mAnimationState && aPlaybackType == PlaybackType::eAnimated;
   if (animated) {
-    decoderFlags |= DecoderFlags::BLEND_ANIMATION;
-
     size_t currentFrame = mAnimationState->GetCurrentAnimationFrameIndex();
     rv = DecoderFactory::CreateAnimationDecoder(
         mDecoderType, WrapNotNull(this), mSourceBuffer, mSize, decoderFlags,
         surfaceFlags, currentFrame, getter_AddRefs(task));
   } else {
     rv = DecoderFactory::CreateDecoder(
         mDecoderType, WrapNotNull(this), mSourceBuffer, mSize, aSize,
         decoderFlags, surfaceFlags, getter_AddRefs(task));
@@ -1343,17 +1341,17 @@ ImgDrawResult RasterImage::DrawInternal(
   bool frameIsFinished = aSurface->IsFinished();
 
 #ifdef DEBUG
   NotifyDrawingObservers();
 #endif
 
   // By now we may have a frame with the requested size. If not, we need to
   // adjust the drawing parameters accordingly.
-  IntSize finalSize = aSurface->GetImageSize();
+  IntSize finalSize = aSurface->GetSize();
   bool couldRedecodeForBetterFrame = false;
   if (finalSize != aSize) {
     gfx::Size scale(double(aSize.width) / finalSize.width,
                     double(aSize.height) / finalSize.height);
     aContext->Multiply(gfxMatrix::Scaling(scale.width, scale.height));
     region.Scale(1.0 / scale.width, 1.0 / scale.height);
 
     couldRedecodeForBetterFrame = CanDownscaleDuringDecode(aSize, aFlags);
--- a/image/SurfaceFilters.h
+++ b/image/SurfaceFilters.h
@@ -355,21 +355,16 @@ class BlendAnimationFilter final : publi
   template <typename... Rest>
   nsresult Configure(const BlendAnimationConfig& aConfig,
                      const Rest&... aRest) {
     nsresult rv = mNext.Configure(aRest...);
     if (NS_FAILED(rv)) {
       return rv;
     }
 
-    if (!aConfig.mDecoder || !aConfig.mDecoder->ShouldBlendAnimation()) {
-      MOZ_ASSERT_UNREACHABLE("Expected image decoder that is blending!");
-      return NS_ERROR_INVALID_ARG;
-    }
-
     imgFrame* currentFrame = aConfig.mDecoder->GetCurrentFrame();
     if (!currentFrame) {
       MOZ_ASSERT_UNREACHABLE("Decoder must have current frame!");
       return NS_ERROR_FAILURE;
     }
 
     mFrameRect = mUnclampedFrameRect = currentFrame->GetBlendRect();
     gfx::IntSize outputSize = mNext.InputSize();
@@ -413,17 +408,17 @@ class BlendAnimationFilter final : publi
     // is a full frame and uses source blending, there is no need to consider
     // the disposal method of the previous frame.
     gfx::IntRect dirtyRect(outputRect);
     gfx::IntRect clearRect;
     if (!fullFrame || blendMethod != BlendMethod::SOURCE) {
       const RawAccessFrameRef& restoreFrame =
           aConfig.mDecoder->GetRestoreFrameRef();
       if (restoreFrame) {
-        MOZ_ASSERT(restoreFrame->GetImageSize() == outputSize);
+        MOZ_ASSERT(restoreFrame->GetSize() == outputSize);
         MOZ_ASSERT(restoreFrame->IsFinished());
 
         // We can safely use this pointer without holding a RawAccessFrameRef
         // because the decoder will keep it alive for us.
         mBaseFrameStartPtr = restoreFrame.Data();
         MOZ_ASSERT(mBaseFrameStartPtr);
 
         gfx::IntRect restoreBlendRect = restoreFrame->GetBoundedBlendRect();
--- a/image/SurfacePipe.cpp
+++ b/image/SurfacePipe.cpp
@@ -49,30 +49,24 @@ uint8_t* AbstractSurfaceSink::DoAdvanceR
                          IntRect(0, invalidY, InputSize().width, 1));
 
   mRow = min(uint32_t(InputSize().height), mRow + 1);
 
   return mRow < uint32_t(InputSize().height) ? GetRowPointer() : nullptr;
 }
 
 nsresult SurfaceSink::Configure(const SurfaceConfig& aConfig) {
-  // For non-paletted surfaces, the surface size is just the output size.
   IntSize surfaceSize = aConfig.mOutputSize;
 
-  // Non-paletted surfaces should not have frame rects, so we just pass
-  // AllocateFrame() a frame rect which covers the entire surface.
-  IntRect frameRect(0, 0, surfaceSize.width, surfaceSize.height);
-
   // Allocate the frame.
   // XXX(seth): Once every Decoder subclass uses SurfacePipe, we probably want
   // to allocate the frame directly here and get rid of Decoder::AllocateFrame
   // altogether.
-  nsresult rv = aConfig.mDecoder->AllocateFrame(
-      surfaceSize, frameRect, aConfig.mFormat,
-      /* aPaletteDepth */ 0, aConfig.mAnimParams);
+  nsresult rv = aConfig.mDecoder->AllocateFrame(surfaceSize, aConfig.mFormat,
+                                                aConfig.mAnimParams);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
   mImageData = aConfig.mDecoder->mImageData;
   mImageDataLength = aConfig.mDecoder->mImageDataLength;
   mFlipVertically = aConfig.mFlipVertically;
 
--- a/image/SurfacePipeFactory.h
+++ b/image/SurfacePipeFactory.h
@@ -54,20 +54,16 @@ enum class SurfacePipeFlags {
 
   FLIP_VERTICALLY = 1 << 2,  // If set, flip the image vertically.
 
   PROGRESSIVE_DISPLAY = 1 << 3,  // If set, we expect the image to be displayed
                                  // progressively. This enables features that
                                  // result in a better user experience for
                                  // progressive display but which may be more
                                  // computationally expensive.
-
-  BLEND_ANIMATION = 1 << 4  // If set, produce the next full frame of an
-                            // animation instead of a partial frame to be
-                            // blended later.
 };
 MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(SurfacePipeFlags)
 
 class SurfacePipeFactory {
  public:
   /**
    * Creates and initializes a normal (i.e., non-paletted) SurfacePipe.
    *
@@ -97,34 +93,31 @@ class SurfacePipeFactory {
     const bool deinterlace = bool(aFlags & SurfacePipeFlags::DEINTERLACE);
     const bool flipVertically =
         bool(aFlags & SurfacePipeFlags::FLIP_VERTICALLY);
     const bool progressiveDisplay =
         bool(aFlags & SurfacePipeFlags::PROGRESSIVE_DISPLAY);
     const bool downscale = aInputSize != aOutputSize;
     const bool removeFrameRect = !aFrameRect.IsEqualEdges(
         nsIntRect(0, 0, aInputSize.width, aInputSize.height));
-    const bool blendAnimation =
-        bool(aFlags & SurfacePipeFlags::BLEND_ANIMATION);
+    const bool blendAnimation = aAnimParams.isSome();
 
     // Don't interpolate if we're sure we won't show this surface to the user
     // until it's completely decoded. The final pass of an ADAM7 image doesn't
     // need interpolation, so we only need to interpolate if we'll be displaying
     // the image while it's still being decoded.
     const bool adam7Interpolate =
         bool(aFlags & SurfacePipeFlags::ADAM7_INTERPOLATE) &&
         progressiveDisplay;
 
     if (deinterlace && adam7Interpolate) {
       MOZ_ASSERT_UNREACHABLE("ADAM7 deinterlacing is handled by libpng");
       return Nothing();
     }
 
-    MOZ_ASSERT_IF(blendAnimation, aAnimParams);
-
     // Construct configurations for the SurfaceFilters. Note that the order of
     // these filters is significant. We want to deinterlace or interpolate raw
     // input rows, before any other transformations, and we want to remove the
     // frame rect (which may involve adding blank rows or columns to the image)
     // before any downscaling, so that the new rows and columns are taken into
     // account.
     DeinterlacingConfig<uint32_t> deinterlacingConfig{progressiveDisplay};
     ADAM7InterpolatingConfig interpolatingConfig;
--- a/image/decoders/nsBMPDecoder.cpp
+++ b/image/decoders/nsBMPDecoder.cpp
@@ -652,19 +652,19 @@ LexerTransition<nsBMPDecoder::State> nsB
     mColors = MakeUnique<ColorTableEntry[]>(256);
     memset(mColors.get(), 0, 256 * sizeof(ColorTableEntry));
 
     // OS/2 Bitmaps have no padding byte.
     mBytesPerColor = (mH.mBIHSize == InfoHeaderLength::WIN_V2) ? 3 : 4;
   }
 
   MOZ_ASSERT(!mImageData, "Already have a buffer allocated?");
-  nsresult rv = AllocateFrame(
-      OutputSize(), FullOutputFrame(),
-      mMayHaveTransparency ? SurfaceFormat::B8G8R8A8 : SurfaceFormat::B8G8R8X8);
+  nsresult rv = AllocateFrame(OutputSize(), mMayHaveTransparency
+                                                ? SurfaceFormat::B8G8R8A8
+                                                : SurfaceFormat::B8G8R8X8);
   if (NS_FAILED(rv)) {
     return Transition::TerminateFailure();
   }
   MOZ_ASSERT(mImageData, "Should have a buffer now");
 
   if (mDownscaler) {
     // BMPs store their rows in reverse order, so the downscaler needs to
     // reverse them again when writing its output. Unless the height is
--- a/image/decoders/nsGIFDecoder2.cpp
+++ b/image/decoders/nsGIFDecoder2.cpp
@@ -81,16 +81,18 @@ static const uint8_t PACKED_FIELDS_TABLE
 
 nsGIFDecoder2::nsGIFDecoder2(RasterImage* aImage)
     : Decoder(aImage),
       mLexer(Transition::To(State::GIF_HEADER, GIF_HEADER_LEN),
              Transition::TerminateSuccess()),
       mOldColor(0),
       mCurrentFrameIndex(-1),
       mColorTablePos(0),
+      mColormap(nullptr),
+      mColormapSize(0),
       mColorMask('\0'),
       mGIFOpen(false),
       mSawTransparency(false) {
   // Clear out the structure, excluding the arrays.
   memset(&mGIFStruct, 0, sizeof(mGIFStruct));
 }
 
 nsGIFDecoder2::~nsGIFDecoder2() { free(mGIFStruct.local_colormap); }
@@ -159,64 +161,44 @@ bool nsGIFDecoder2::CheckForTransparency
 }
 
 //******************************************************************************
 nsresult nsGIFDecoder2::BeginImageFrame(const IntRect& aFrameRect,
                                         uint16_t aDepth, bool aIsInterlaced) {
   MOZ_ASSERT(HasSize());
 
   bool hasTransparency = CheckForTransparency(aFrameRect);
-  bool blendAnimation = ShouldBlendAnimation();
 
   // Make sure there's no animation if we're downscaling.
   MOZ_ASSERT_IF(Size() != OutputSize(), !GetImageMetadata().HasAnimation());
 
-  AnimationParams animParams{
-      aFrameRect, FrameTimeout::FromRawMilliseconds(mGIFStruct.delay_time),
-      uint32_t(mGIFStruct.images_decoded), BlendMethod::OVER,
-      DisposalMethod(mGIFStruct.disposal_method)};
+  Maybe<AnimationParams> animParams;
+  if (!IsFirstFrameDecode()) {
+    animParams.emplace(aFrameRect,
+                       FrameTimeout::FromRawMilliseconds(mGIFStruct.delay_time),
+                       uint32_t(mGIFStruct.images_decoded), BlendMethod::OVER,
+                       DisposalMethod(mGIFStruct.disposal_method));
+  }
 
   SurfacePipeFlags pipeFlags =
       aIsInterlaced ? SurfacePipeFlags::DEINTERLACE : SurfacePipeFlags();
 
   gfx::SurfaceFormat format;
   if (mGIFStruct.images_decoded == 0) {
     // The first frame may be displayed progressively.
     pipeFlags |= SurfacePipeFlags::PROGRESSIVE_DISPLAY;
 
     format =
         hasTransparency ? SurfaceFormat::B8G8R8A8 : SurfaceFormat::B8G8R8X8;
   } else {
     format = SurfaceFormat::B8G8R8A8;
   }
 
-  if (blendAnimation) {
-    pipeFlags |= SurfacePipeFlags::BLEND_ANIMATION;
-  }
-
-  Maybe<SurfacePipe> pipe;
-  if (mGIFStruct.images_decoded == 0 || blendAnimation) {
-    // The first frame is always decoded into an RGB surface.
-    pipe = SurfacePipeFactory::CreateSurfacePipe(this, Size(), OutputSize(),
-                                                 aFrameRect, format,
-                                                 Some(animParams), pipeFlags);
-  } else {
-    // This is an animation frame (and not the first). To minimize the memory
-    // usage of animations, the image data is stored in paletted form.
-    //
-    // We should never use paletted surfaces with a draw target directly, so
-    // the only practical difference between B8G8R8A8 and B8G8R8X8 is the
-    // cleared pixel value if we get truncated. We want 0 in that case to
-    // ensure it is an acceptable value for the color map as was the case
-    // historically.
-    MOZ_ASSERT(Size() == OutputSize());
-    pipe = SurfacePipeFactory::CreatePalettedSurfacePipe(
-        this, Size(), aFrameRect, format, aDepth, Some(animParams), pipeFlags);
-  }
-
+  Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
+      this, Size(), OutputSize(), aFrameRect, format, animParams, pipeFlags);
   mCurrentFrameIndex = mGIFStruct.images_decoded;
 
   if (!pipe) {
     mPipe = SurfacePipe();
     return NS_ERROR_FAILURE;
   }
 
   mPipe = std::move(*pipe);
@@ -250,16 +232,18 @@ void nsGIFDecoder2::EndImageFrame() {
   PostFrameStop(opacity);
 
   // Reset the transparent pixel
   if (mOldColor) {
     mColormap[mGIFStruct.tpixel] = mOldColor;
     mOldColor = 0;
   }
 
+  mColormap = nullptr;
+  mColormapSize = 0;
   mCurrentFrameIndex = -1;
 }
 
 template <typename PixelSize>
 PixelSize nsGIFDecoder2::ColormapIndexToPixel(uint8_t aIndex) {
   MOZ_ASSERT(sizeof(PixelSize) == sizeof(uint32_t));
 
   // Retrieve the next color, clamping to the size of the colormap.
@@ -869,21 +853,16 @@ LexerTransition<nsGIFDecoder2::State> ns
       int64_t(frameRect.Width()) * int64_t(frameRect.Height());
 
   if (haveLocalColorTable) {
     // We have a local color table, so prepare to read it into the palette of
     // the current frame.
     mGIFStruct.local_colormap_size = 1 << depth;
 
     if (!mColormap) {
-      // Allocate a buffer to store the local color tables. This could be if the
-      // first frame has a local color table, or for subsequent frames when
-      // blending the animation during decoding.
-      MOZ_ASSERT(mGIFStruct.images_decoded == 0 || ShouldBlendAnimation());
-
       // Ensure our current colormap buffer is large enough to hold the new one.
       mColormapSize = sizeof(uint32_t) << realDepth;
       if (mGIFStruct.local_colormap_buffer_size < mColormapSize) {
         if (mGIFStruct.local_colormap) {
           free(mGIFStruct.local_colormap);
         }
         mGIFStruct.local_colormap_buffer_size = mColormapSize;
         mGIFStruct.local_colormap =
@@ -1014,28 +993,21 @@ LexerTransition<nsGIFDecoder2::State> ns
     const char* aData, size_t aLength) {
   const uint8_t* data = reinterpret_cast<const uint8_t*>(aData);
   size_t length = aLength;
 
   while (mGIFStruct.pixels_remaining > 0 &&
          (length > 0 || mGIFStruct.bits >= mGIFStruct.codesize)) {
     size_t bytesRead = 0;
 
-    auto result =
-        mGIFStruct.images_decoded == 0 || ShouldBlendAnimation()
-            ? mPipe.WritePixelBlocks<uint32_t>(
-                  [&](uint32_t* aPixelBlock, int32_t aBlockSize) {
-                    return YieldPixels<uint32_t>(data, length, &bytesRead,
-                                                 aPixelBlock, aBlockSize);
-                  })
-            : mPipe.WritePixelBlocks<uint8_t>(
-                  [&](uint8_t* aPixelBlock, int32_t aBlockSize) {
-                    return YieldPixels<uint8_t>(data, length, &bytesRead,
-                                                aPixelBlock, aBlockSize);
-                  });
+    auto result = mPipe.WritePixelBlocks<uint32_t>(
+        [&](uint32_t* aPixelBlock, int32_t aBlockSize) {
+          return YieldPixels<uint32_t>(data, length, &bytesRead, aPixelBlock,
+                                       aBlockSize);
+        });
 
     if (MOZ_UNLIKELY(bytesRead > length)) {
       MOZ_ASSERT_UNREACHABLE("Overread?");
       bytesRead = length;
     }
 
     // Advance our position in the input based upon what YieldPixel() consumed.
     data += bytesRead;
--- a/image/decoders/nsGIFDecoder2.h
+++ b/image/decoders/nsGIFDecoder2.h
@@ -138,16 +138,18 @@ class nsGIFDecoder2 : public Decoder {
   // The frame number of the currently-decoding frame when we're in the middle
   // of decoding it, and -1 otherwise.
   int32_t mCurrentFrameIndex;
 
   // When we're reading in the global or local color table, this records our
   // current position - i.e., the offset into which the next byte should be
   // written.
   size_t mColorTablePos;
+  uint32_t* mColormap;  // Current colormap to be used in Cairo format
+  uint32_t mColormapSize;
 
   uint8_t mColorMask;  // Apply this to the pixel to keep within colormap
   bool mGIFOpen;
   bool mSawTransparency;
 
   gif_struct mGIFStruct;
 
   SurfacePipe mPipe;  /// The SurfacePipe used to write to the output surface.
--- a/image/decoders/nsJPEGDecoder.cpp
+++ b/image/decoders/nsJPEGDecoder.cpp
@@ -380,18 +380,17 @@ LexerTransition<nsJPEGDecoder::State> ns
       // when not doing a progressive decode.
       mInfo.buffered_image =
           mDecodeStyle == PROGRESSIVE && jpeg_has_multiple_scans(&mInfo);
 
       /* Used to set up image size so arrays can be allocated */
       jpeg_calc_output_dimensions(&mInfo);
 
       MOZ_ASSERT(!mImageData, "Already have a buffer allocated?");
-      nsresult rv = AllocateFrame(OutputSize(), FullOutputFrame(),
-                                  SurfaceFormat::B8G8R8X8);
+      nsresult rv = AllocateFrame(OutputSize(), SurfaceFormat::B8G8R8X8);
       if (NS_FAILED(rv)) {
         mState = JPEG_ERROR;
         MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
                 ("} (could not initialize image frame)"));
         return Transition::TerminateFailure();
       }
 
       MOZ_ASSERT(mImageData, "Should have a buffer now");
--- a/image/decoders/nsPNGDecoder.cpp
+++ b/image/decoders/nsPNGDecoder.cpp
@@ -190,17 +190,17 @@ nsresult nsPNGDecoder::CreateFrame(const
   // Make sure there's no animation or padding if we're downscaling.
   MOZ_ASSERT_IF(Size() != OutputSize(), mNumFrames == 0);
   MOZ_ASSERT_IF(Size() != OutputSize(), !GetImageMetadata().HasAnimation());
   MOZ_ASSERT_IF(Size() != OutputSize(),
                 transparency != TransparencyType::eFrameRect);
 
   Maybe<AnimationParams> animParams;
 #ifdef PNG_APNG_SUPPORTED
-  if (png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) {
+  if (!IsFirstFrameDecode() && png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) {
     mAnimInfo = AnimFrameInfo(mPNG, mInfo);
 
     if (mAnimInfo.mDispose == DisposalMethod::CLEAR) {
       // We may have to display the background under this image during
       // animation playback, so we regard it as transparent.
       PostHasTransparency();
     }
 
@@ -217,20 +217,16 @@ nsresult nsPNGDecoder::CreateFrame(const
                                    ? SurfacePipeFlags::ADAM7_INTERPOLATE
                                    : SurfacePipeFlags();
 
   if (mNumFrames == 0) {
     // The first frame may be displayed progressively.
     pipeFlags |= SurfacePipeFlags::PROGRESSIVE_DISPLAY;
   }
 
-  if (ShouldBlendAnimation()) {
-    pipeFlags |= SurfacePipeFlags::BLEND_ANIMATION;
-  }
-
   Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
       this, Size(), OutputSize(), aFrameInfo.mFrameRect, mFormat, animParams,
       pipeFlags);
 
   if (!pipe) {
     mPipe = SurfacePipe();
     return NS_ERROR_FAILURE;
   }
@@ -534,17 +530,18 @@ void nsPNGDecoder::info_callback(png_str
 
   png_bytep trans = nullptr;
   int num_trans = 0;
 
   nsPNGDecoder* decoder =
       static_cast<nsPNGDecoder*>(png_get_progressive_ptr(png_ptr));
 
   if (decoder->mGotInfoCallback) {
-    MOZ_LOG(sPNGLog, LogLevel::Warning, ("libpng called info_callback more than once\n"));
+    MOZ_LOG(sPNGLog, LogLevel::Warning,
+            ("libpng called info_callback more than once\n"));
     return;
   }
 
   decoder->mGotInfoCallback = true;
 
   // Always decode to 24-bit RGB or 32-bit RGBA
   png_get_IHDR(png_ptr, info_ptr, &width, &height, &bit_depth, &color_type,
                &interlace_type, &compression_type, &filter_type);
--- a/image/decoders/nsWebPDecoder.cpp
+++ b/image/decoders/nsWebPDecoder.cpp
@@ -226,26 +226,23 @@ nsresult nsWebPDecoder::CreateFrame(cons
     MOZ_LOG(sWebPLog, LogLevel::Error,
             ("[this=%p] nsWebPDecoder::CreateFrame -- create decoder error\n",
              this));
     return NS_ERROR_FAILURE;
   }
 
   SurfacePipeFlags pipeFlags = SurfacePipeFlags();
 
-  if (ShouldBlendAnimation()) {
-    pipeFlags |= SurfacePipeFlags::BLEND_ANIMATION;
+  Maybe<AnimationParams> animParams;
+  if (!IsFirstFrameDecode()) {
+    animParams.emplace(aFrameRect, mTimeout, mCurrentFrame, mBlend, mDisposal);
   }
 
-  AnimationParams animParams{aFrameRect, mTimeout, mCurrentFrame, mBlend,
-                             mDisposal};
-
   Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
-      this, Size(), OutputSize(), aFrameRect, mFormat, Some(animParams),
-      pipeFlags);
+      this, Size(), OutputSize(), aFrameRect, mFormat, animParams, pipeFlags);
   if (!pipe) {
     MOZ_LOG(sWebPLog, LogLevel::Error,
             ("[this=%p] nsWebPDecoder::CreateFrame -- no pipe\n", this));
     return NS_ERROR_FAILURE;
   }
 
   mFrameRect = aFrameRect;
   mPipe = std::move(*pipe);
--- a/image/imgFrame.cpp
+++ b/image/imgFrame.cpp
@@ -91,22 +91,20 @@ static bool ShouldUseHeap(const IntSize&
   if (bufferSize < gfxPrefs::ImageMemVolatileMinThresholdKB()) {
     return true;
   }
 
   return false;
 }
 
 static already_AddRefed<DataSourceSurface> AllocateBufferForImage(
-    const IntSize& size, SurfaceFormat format, bool aIsAnimated = false,
-    bool aIsFullFrame = true) {
+    const IntSize& size, SurfaceFormat format, bool aIsAnimated = false) {
   int32_t stride = VolatileSurfaceStride(size, format);
 
-  if (gfxVars::GetUseWebRenderOrDefault() && gfxPrefs::ImageMemShared() &&
-      aIsFullFrame) {
+  if (gfxVars::GetUseWebRenderOrDefault() && gfxPrefs::ImageMemShared()) {
     RefPtr<SourceSurfaceSharedData> newSurf = new SourceSurfaceSharedData();
     if (newSurf->Init(size, stride, format)) {
       return newSurf.forget();
     }
   } else if (ShouldUseHeap(size, stride, aIsAnimated)) {
     RefPtr<SourceSurfaceAlignedRawData> newSurf =
         new SourceSurfaceAlignedRawData();
     if (newSurf->Init(size, format, false, 0, stride)) {
@@ -171,202 +169,137 @@ static bool ClearSurface(DataSourceSurfa
     // Otherwise, it's allocated via mmap and refers to a zeroed page and will
     // be COW once it's written to.
     memset(data, 0, stride * aSize.height);
   }
 
   return true;
 }
 
-static bool AllowedImageAndFrameDimensions(const nsIntSize& aImageSize,
-                                           const nsIntRect& aFrameRect) {
-  if (!SurfaceCache::IsLegalSize(aImageSize)) {
-    return false;
-  }
-  if (!SurfaceCache::IsLegalSize(aFrameRect.Size())) {
-    return false;
-  }
-  nsIntRect imageRect(0, 0, aImageSize.width, aImageSize.height);
-  if (!imageRect.Contains(aFrameRect)) {
-    NS_WARNING("Animated image frame does not fit inside bounds of image");
-  }
-  return true;
-}
-
 imgFrame::imgFrame()
     : mMonitor("imgFrame"),
       mDecoded(0, 0, 0, 0),
       mLockCount(0),
       mRecycleLockCount(0),
       mAborted(false),
       mFinished(false),
       mOptimizable(false),
       mShouldRecycle(false),
       mTimeout(FrameTimeout::FromRawMilliseconds(100)),
       mDisposalMethod(DisposalMethod::NOT_SPECIFIED),
       mBlendMethod(BlendMethod::OVER),
       mFormat(SurfaceFormat::UNKNOWN),
-      mPalettedImageData(nullptr),
-      mPaletteDepth(0),
-      mNonPremult(false),
-      mIsFullFrame(false) {}
+      mNonPremult(false) {}
 
 imgFrame::~imgFrame() {
 #ifdef DEBUG
   MonitorAutoLock lock(mMonitor);
   MOZ_ASSERT(mAborted || AreAllPixelsWritten());
   MOZ_ASSERT(mAborted || mFinished);
 #endif
-
-  free(mPalettedImageData);
-  mPalettedImageData = nullptr;
 }
 
 nsresult imgFrame::InitForDecoder(const nsIntSize& aImageSize,
-                                  const nsIntRect& aRect, SurfaceFormat aFormat,
-                                  uint8_t aPaletteDepth, bool aNonPremult,
+                                  SurfaceFormat aFormat, bool aNonPremult,
                                   const Maybe<AnimationParams>& aAnimParams,
-                                  bool aIsFullFrame, bool aShouldRecycle) {
+                                  bool aShouldRecycle) {
   // Assert for properties that should be verified by decoders,
   // warn for properties related to bad content.
-  if (!AllowedImageAndFrameDimensions(aImageSize, aRect)) {
+  if (!SurfaceCache::IsLegalSize(aImageSize)) {
     NS_WARNING("Should have legal image size");
     mAborted = true;
     return NS_ERROR_FAILURE;
   }
 
   mImageSize = aImageSize;
-  mFrameRect = aRect;
 
   // May be updated shortly after InitForDecoder by BlendAnimationFilter
   // because it needs to take into consideration the previous frames to
   // properly calculate. We start with the whole frame as dirty.
-  mDirtyRect = aRect;
+  mDirtyRect = GetRect();
 
   if (aAnimParams) {
     mBlendRect = aAnimParams->mBlendRect;
     mTimeout = aAnimParams->mTimeout;
     mBlendMethod = aAnimParams->mBlendMethod;
     mDisposalMethod = aAnimParams->mDisposalMethod;
-    mIsFullFrame = aAnimParams->mFrameNum == 0 || aIsFullFrame;
   } else {
-    mBlendRect = aRect;
-    mIsFullFrame = true;
-  }
-
-  // We only allow a non-trivial frame rect (i.e., a frame rect that doesn't
-  // cover the entire image) for paletted animation frames. We never draw those
-  // frames directly; we just use FrameAnimator to composite them and produce a
-  // BGRA surface that we actually draw. We enforce this here to make sure that
-  // imgFrame::Draw(), which is responsible for drawing all other kinds of
-  // frames, never has to deal with a non-trivial frame rect.
-  if (aPaletteDepth == 0 &&
-      !mFrameRect.IsEqualEdges(IntRect(IntPoint(), mImageSize))) {
-    MOZ_ASSERT_UNREACHABLE(
-        "Creating a non-paletted imgFrame with a "
-        "non-trivial frame rect");
-    return NS_ERROR_FAILURE;
+    mBlendRect = GetRect();
   }
 
   if (aShouldRecycle) {
     // If we are recycling then we should always use BGRA for the underlying
     // surface because if we use BGRX, the next frame composited into the
     // surface could be BGRA and cause rendering problems.
-    MOZ_ASSERT(mIsFullFrame);
-    MOZ_ASSERT(aPaletteDepth == 0);
     MOZ_ASSERT(aAnimParams);
     mFormat = SurfaceFormat::B8G8R8A8;
   } else {
     mFormat = aFormat;
   }
 
-  mPaletteDepth = aPaletteDepth;
   mNonPremult = aNonPremult;
   mShouldRecycle = aShouldRecycle;
 
-  if (aPaletteDepth != 0) {
-    // We're creating for a paletted image.
-    if (aPaletteDepth > 8) {
-      NS_WARNING("Should have legal palette depth");
-      NS_ERROR("This Depth is not supported");
-      mAborted = true;
-      return NS_ERROR_FAILURE;
-    }
+  MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?");
 
-    // Use the fallible allocator here. Paletted images always use 1 byte per
-    // pixel, so calculating the amount of memory we need is straightforward.
-    size_t dataSize = PaletteDataLength() + mFrameRect.Area();
-    mPalettedImageData =
-        static_cast<uint8_t*>(calloc(dataSize, sizeof(uint8_t)));
-    if (!mPalettedImageData) {
-      NS_WARNING("Call to calloc for paletted image data should succeed");
-    }
-    NS_ENSURE_TRUE(mPalettedImageData, NS_ERROR_OUT_OF_MEMORY);
-  } else {
-    MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?");
+  bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0;
+  mRawSurface = AllocateBufferForImage(mImageSize, mFormat, postFirstFrame);
+  if (!mRawSurface) {
+    mAborted = true;
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
 
-    bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0;
-    mRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat,
-                                         postFirstFrame, mIsFullFrame);
-    if (!mRawSurface) {
+  if (StaticPrefs::browser_measurement_render_anims_and_video_solid() &&
+      aAnimParams) {
+    mBlankRawSurface = AllocateBufferForImage(mImageSize, mFormat);
+    if (!mBlankRawSurface) {
       mAborted = true;
       return NS_ERROR_OUT_OF_MEMORY;
     }
+  }
 
-    if (StaticPrefs::browser_measurement_render_anims_and_video_solid() &&
-        aAnimParams) {
-      mBlankRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat);
-      if (!mBlankRawSurface) {
-        mAborted = true;
-        return NS_ERROR_OUT_OF_MEMORY;
-      }
-    }
+  mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
+  if (!mLockedSurface) {
+    NS_WARNING("Failed to create LockedSurface");
+    mAborted = true;
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
 
-    mLockedSurface =
-        CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat);
-    if (!mLockedSurface) {
-      NS_WARNING("Failed to create LockedSurface");
+  if (mBlankRawSurface) {
+    mBlankLockedSurface =
+        CreateLockedSurface(mBlankRawSurface, mImageSize, mFormat);
+    if (!mBlankLockedSurface) {
+      NS_WARNING("Failed to create BlankLockedSurface");
       mAborted = true;
       return NS_ERROR_OUT_OF_MEMORY;
     }
+  }
 
-    if (mBlankRawSurface) {
-      mBlankLockedSurface =
-          CreateLockedSurface(mBlankRawSurface, mFrameRect.Size(), mFormat);
-      if (!mBlankLockedSurface) {
-        NS_WARNING("Failed to create BlankLockedSurface");
-        mAborted = true;
-        return NS_ERROR_OUT_OF_MEMORY;
-      }
-    }
+  if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
+    NS_WARNING("Could not clear allocated buffer");
+    mAborted = true;
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
 
-    if (!ClearSurface(mRawSurface, mFrameRect.Size(), mFormat)) {
-      NS_WARNING("Could not clear allocated buffer");
+  if (mBlankRawSurface) {
+    if (!GreenSurface(mBlankRawSurface, mImageSize, mFormat)) {
+      NS_WARNING("Could not clear allocated blank buffer");
       mAborted = true;
       return NS_ERROR_OUT_OF_MEMORY;
     }
-
-    if (mBlankRawSurface) {
-      if (!GreenSurface(mBlankRawSurface, mFrameRect.Size(), mFormat)) {
-        NS_WARNING("Could not clear allocated blank buffer");
-        mAborted = true;
-        return NS_ERROR_OUT_OF_MEMORY;
-      }
-    }
   }
 
   return NS_OK;
 }
 
 nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
   // We want to recycle this frame, but there is no guarantee that consumers are
   // done with it in a timely manner. Let's ensure they are done with it first.
   MonitorAutoLock lock(mMonitor);
 
-  MOZ_ASSERT(mIsFullFrame);
   MOZ_ASSERT(mLockCount > 0);
   MOZ_ASSERT(mLockedSurface);
 
   if (!mShouldRecycle) {
     // This frame either was never marked as recyclable, or the flag was cleared
     // for a caller which does not support recycling.
     return NS_ERROR_NOT_AVAILABLE;
   }
@@ -412,17 +345,17 @@ nsresult imgFrame::InitForDecoderRecycle
       timeout -= delta;
     }
   }
 
   mBlendRect = aAnimParams.mBlendRect;
   mTimeout = aAnimParams.mTimeout;
   mBlendMethod = aAnimParams.mBlendMethod;
   mDisposalMethod = aAnimParams.mDisposalMethod;
-  mDirtyRect = mFrameRect;
+  mDirtyRect = GetRect();
 
   return NS_OK;
 }
 
 nsresult imgFrame::InitWithDrawable(
     gfxDrawable* aDrawable, const nsIntSize& aSize, const SurfaceFormat aFormat,
     SamplingFilter aSamplingFilter, uint32_t aImageFlags,
     gfx::BackendType aBackend, DrawTarget* aTargetDT) {
@@ -430,82 +363,78 @@ nsresult imgFrame::InitWithDrawable(
   // warn for properties related to bad content.
   if (!SurfaceCache::IsLegalSize(aSize)) {
     NS_WARNING("Should have legal image size");
     mAborted = true;
     return NS_ERROR_FAILURE;
   }
 
   mImageSize = aSize;
-  mFrameRect = IntRect(IntPoint(0, 0), aSize);
-
   mFormat = aFormat;
-  mPaletteDepth = 0;
 
   RefPtr<DrawTarget> target;
 
   bool canUseDataSurface = Factory::DoesBackendSupportDataDrawtarget(aBackend);
   if (canUseDataSurface) {
     // It's safe to use data surfaces for content on this platform, so we can
     // get away with using volatile buffers.
     MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitWithDrawable() twice?");
 
-    mRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat);
+    mRawSurface = AllocateBufferForImage(mImageSize, mFormat);
     if (!mRawSurface) {
       mAborted = true;
       return NS_ERROR_OUT_OF_MEMORY;
     }
 
-    mLockedSurface =
-        CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat);
+    mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
     if (!mLockedSurface) {
       NS_WARNING("Failed to create LockedSurface");
       mAborted = true;
       return NS_ERROR_OUT_OF_MEMORY;
     }
 
-    if (!ClearSurface(mRawSurface, mFrameRect.Size(), mFormat)) {
+    if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
       NS_WARNING("Could not clear allocated buffer");
       mAborted = true;
       return NS_ERROR_OUT_OF_MEMORY;
     }
 
     target = gfxPlatform::CreateDrawTargetForData(
-        mLockedSurface->GetData(), mFrameRect.Size(), mLockedSurface->Stride(),
+        mLockedSurface->GetData(), mImageSize, mLockedSurface->Stride(),
         mFormat);
   } else {
     // We can't use data surfaces for content, so we'll create an offscreen
     // surface instead.  This means if someone later calls RawAccessRef(), we
     // may have to do an expensive readback, but we warned callers about that in
     // the documentation for this method.
     MOZ_ASSERT(!mOptSurface, "Called imgFrame::InitWithDrawable() twice?");
 
     if (aTargetDT && !gfxVars::UseWebRender()) {
-      target = aTargetDT->CreateSimilarDrawTarget(mFrameRect.Size(), mFormat);
+      target = aTargetDT->CreateSimilarDrawTarget(mImageSize, mFormat);
     } else {
       if (gfxPlatform::GetPlatform()->SupportsAzureContentForType(aBackend)) {
         target = gfxPlatform::GetPlatform()->CreateDrawTargetForBackend(
-            aBackend, mFrameRect.Size(), mFormat);
+            aBackend, mImageSize, mFormat);
       } else {
         target = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
-            mFrameRect.Size(), mFormat);
+            mImageSize, mFormat);
       }
     }
   }
 
   if (!target || !target->IsValid()) {
     mAborted = true;
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
   // Draw using the drawable the caller provided.
   RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(target);
   MOZ_ASSERT(ctx);  // Already checked the draw target above.
-  gfxUtils::DrawPixelSnapped(ctx, aDrawable, SizeDouble(mFrameRect.Size()),
-                             ImageRegion::Create(ThebesRect(mFrameRect)),
+  gfxUtils::DrawPixelSnapped(ctx, aDrawable, SizeDouble(mImageSize),
+                             ImageRegion::Create(ThebesRect(GetRect())),
                              mFormat, aSamplingFilter, aImageFlags);
 
   if (canUseDataSurface && !mLockedSurface) {
     NS_WARNING("Failed to create VolatileDataSourceSurface");
     mAborted = true;
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
@@ -552,17 +481,17 @@ nsresult imgFrame::Optimize(DrawTarget* 
   if (ShutdownTracker::ShutdownHasStarted()) {
     return NS_OK;
   }
 
   if (gDisableOptimize) {
     return NS_OK;
   }
 
-  if (mPalettedImageData || mOptSurface) {
+  if (mOptSurface) {
     return NS_OK;
   }
 
   // XXX(seth): It's currently unclear if there's any reason why we can't
   // optimize non-premult surfaces. We should look into removing this.
   if (mNonPremult) {
     return NS_OK;
   }
@@ -655,23 +584,16 @@ bool imgFrame::Draw(gfxContext* aContext
                     float aOpacity) {
   AUTO_PROFILER_LABEL("imgFrame::Draw", GRAPHICS);
 
   MOZ_ASSERT(NS_IsMainThread());
   NS_ASSERTION(!aRegion.Rect().IsEmpty(), "Drawing empty region!");
   NS_ASSERTION(!aRegion.IsRestricted() ||
                    !aRegion.Rect().Intersect(aRegion.Restriction()).IsEmpty(),
                "We must be allowed to sample *some* source pixels!");
-  MOZ_ASSERT(mFrameRect.IsEqualEdges(IntRect(IntPoint(), mImageSize)),
-             "Directly drawing an image with a non-trivial frame rect!");
-
-  if (mPalettedImageData) {
-    MOZ_ASSERT_UNREACHABLE("Directly drawing a paletted image!");
-    return false;
-  }
 
   // Perform the draw and freeing of the surface outside the lock. We want to
   // avoid contention with the decoder if we can. The surface may also attempt
   // to relock the monitor if it is freed (e.g. RecyclingSourceSurface).
   RefPtr<SourceSurface> surf;
   SurfaceWithFormat surfaceResult;
   ImageRegion region(aRegion);
   gfxRect imageRect(0, 0, mImageSize.width, mImageSize.height);
@@ -728,166 +650,126 @@ nsresult imgFrame::ImageUpdated(const ns
   return ImageUpdatedInternal(aUpdateRect);
 }
 
 nsresult imgFrame::ImageUpdatedInternal(const nsIntRect& aUpdateRect) {
   mMonitor.AssertCurrentThreadOwns();
 
   // Clamp to the frame rect to ensure that decoder bugs don't result in a
   // decoded rect that extends outside the bounds of the frame rect.
-  IntRect updateRect = mFrameRect.Intersect(aUpdateRect);
+  IntRect updateRect = aUpdateRect.Intersect(GetRect());
   if (updateRect.IsEmpty()) {
     return NS_OK;
   }
 
   mDecoded.UnionRect(mDecoded, updateRect);
 
-  // Paletted images cannot invalidate.
-  if (mPalettedImageData) {
-    return NS_OK;
-  }
-
   // Update our invalidation counters for any consumers watching for changes
   // in the surface.
   if (mRawSurface) {
     mRawSurface->Invalidate(updateRect);
   }
   if (mLockedSurface && mRawSurface != mLockedSurface) {
     mLockedSurface->Invalidate(updateRect);
   }
   return NS_OK;
 }
 
 void imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */,
                       bool aFinalize /* = true */) {
   MonitorAutoLock lock(mMonitor);
   MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
 
-  if (mPalettedImageData) {
-    ImageUpdatedInternal(mFrameRect);
-  } else if (!mDecoded.IsEqualEdges(mFrameRect)) {
+  IntRect frameRect(GetRect());
+  if (!mDecoded.IsEqualEdges(frameRect)) {
     // The decoder should have produced rows starting from either the bottom or
     // the top of the image. We need to calculate the region for which we have
     // not yet invalidated.
-    IntRect delta(0, 0, mFrameRect.width, 0);
+    IntRect delta(0, 0, frameRect.width, 0);
     if (mDecoded.y == 0) {
       delta.y = mDecoded.height;
-      delta.height = mFrameRect.height - mDecoded.height;
-    } else if (mDecoded.y + mDecoded.height == mFrameRect.height) {
-      delta.height = mFrameRect.height - mDecoded.y;
+      delta.height = frameRect.height - mDecoded.height;
+    } else if (mDecoded.y + mDecoded.height == frameRect.height) {
+      delta.height = frameRect.height - mDecoded.y;
     } else {
       MOZ_ASSERT_UNREACHABLE("Decoder only updated middle of image!");
-      delta = mFrameRect;
+      delta = frameRect;
     }
 
     ImageUpdatedInternal(delta);
   }
 
-  MOZ_ASSERT(mDecoded.IsEqualEdges(mFrameRect));
+  MOZ_ASSERT(mDecoded.IsEqualEdges(frameRect));
 
   if (aFinalize) {
     FinalizeSurfaceInternal();
   }
 
   mFinished = true;
 
   // The image is now complete, wake up anyone who's waiting.
   mMonitor.NotifyAll();
 }
 
 uint32_t imgFrame::GetImageBytesPerRow() const {
   mMonitor.AssertCurrentThreadOwns();
 
   if (mRawSurface) {
-    return mFrameRect.Width() * BytesPerPixel(mFormat);
-  }
-
-  if (mPaletteDepth) {
-    return mFrameRect.Width();
+    return mImageSize.width * BytesPerPixel(mFormat);
   }
 
   return 0;
 }
 
 uint32_t imgFrame::GetImageDataLength() const {
-  return GetImageBytesPerRow() * mFrameRect.Height();
+  return GetImageBytesPerRow() * mImageSize.height;
 }
 
 void imgFrame::GetImageData(uint8_t** aData, uint32_t* aLength) const {
   MonitorAutoLock lock(mMonitor);
   GetImageDataInternal(aData, aLength);
 }
 
 void imgFrame::GetImageDataInternal(uint8_t** aData, uint32_t* aLength) const {
   mMonitor.AssertCurrentThreadOwns();
   MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
+  MOZ_ASSERT(mLockedSurface);
 
   if (mLockedSurface) {
     // TODO: This is okay for now because we only realloc shared surfaces on
     // the main thread after decoding has finished, but if animations want to
     // read frame data off the main thread, we will need to reconsider this.
     *aData = mLockedSurface->GetData();
     MOZ_ASSERT(
         *aData,
         "mLockedSurface is non-null, but GetData is null in GetImageData");
-  } else if (mPalettedImageData) {
-    *aData = mPalettedImageData + PaletteDataLength();
-    MOZ_ASSERT(
-        *aData,
-        "mPalettedImageData is non-null, but result is null in GetImageData");
   } else {
-    MOZ_ASSERT(
-        false,
-        "Have neither mLockedSurface nor mPalettedImageData in GetImageData");
     *aData = nullptr;
   }
 
   *aLength = GetImageDataLength();
 }
 
 uint8_t* imgFrame::GetImageData() const {
   uint8_t* data;
   uint32_t length;
   GetImageData(&data, &length);
   return data;
 }
 
-bool imgFrame::GetIsPaletted() const { return mPalettedImageData != nullptr; }
-
-void imgFrame::GetPaletteData(uint32_t** aPalette, uint32_t* length) const {
-  AssertImageDataLocked();
-
-  if (!mPalettedImageData) {
-    *aPalette = nullptr;
-    *length = 0;
-  } else {
-    *aPalette = (uint32_t*)mPalettedImageData;
-    *length = PaletteDataLength();
-  }
-}
-
-uint32_t* imgFrame::GetPaletteData() const {
-  uint32_t* data;
-  uint32_t length;
-  GetPaletteData(&data, &length);
-  return data;
-}
-
 uint8_t* imgFrame::LockImageData(bool aOnlyFinished) {
   MonitorAutoLock lock(mMonitor);
 
   MOZ_ASSERT(mLockCount >= 0, "Unbalanced locks and unlocks");
   if (mLockCount < 0 || (aOnlyFinished && !mFinished)) {
     return nullptr;
   }
 
   uint8_t* data;
-  if (mPalettedImageData) {
-    data = mPalettedImageData;
-  } else if (mLockedSurface) {
+  if (mLockedSurface) {
     data = mLockedSurface->GetData();
   } else {
     data = nullptr;
   }
 
   // If the raw data is still available, we should get a valid pointer for it.
   if (!data) {
     MOZ_ASSERT_UNREACHABLE("It's illegal to re-lock an optimized imgFrame");
@@ -994,17 +876,17 @@ already_AddRefed<SourceSurface> imgFrame
   }
 
   MOZ_ASSERT(!mShouldRecycle, "Should recycle but no locked surface!");
 
   if (!mRawSurface) {
     return nullptr;
   }
 
-  return CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat);
+  return CreateLockedSurface(mRawSurface, mImageSize, mFormat);
 }
 
 void imgFrame::Abort() {
   MonitorAutoLock lock(mMonitor);
 
   mAborted = true;
 
   // Wake up anyone who's waiting.
@@ -1032,27 +914,24 @@ void imgFrame::WaitUntilFinished() const
 
     // Not complete yet, so we'll have to wait.
     mMonitor.Wait();
   }
 }
 
 bool imgFrame::AreAllPixelsWritten() const {
   mMonitor.AssertCurrentThreadOwns();
-  return mDecoded.IsEqualInterior(mFrameRect);
+  return mDecoded.IsEqualInterior(GetRect());
 }
 
 void imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
                                       const AddSizeOfCb& aCallback) const {
   MonitorAutoLock lock(mMonitor);
 
   AddSizeOfCbData metadata;
-  if (mPalettedImageData) {
-    metadata.heap += aMallocSizeOf(mPalettedImageData);
-  }
   if (mLockedSurface) {
     metadata.heap += aMallocSizeOf(mLockedSurface);
   }
   if (mOptSurface) {
     metadata.heap += aMallocSizeOf(mOptSurface);
   }
   if (mRawSurface) {
     metadata.heap += aMallocSizeOf(mRawSurface);
--- a/image/imgFrame.h
+++ b/image/imgFrame.h
@@ -45,21 +45,20 @@ class imgFrame {
   /**
    * Initialize this imgFrame with an empty surface and prepare it for being
    * written to by a decoder.
    *
    * This is appropriate for use with decoded images, but it should not be used
    * when drawing content into an imgFrame, as it may use a different graphics
    * backend than normal content drawing.
    */
-  nsresult InitForDecoder(const nsIntSize& aImageSize, const nsIntRect& aRect,
-                          SurfaceFormat aFormat, uint8_t aPaletteDepth,
+  nsresult InitForDecoder(const nsIntSize& aImageSize, SurfaceFormat aFormat,
                           bool aNonPremult,
                           const Maybe<AnimationParams>& aAnimParams,
-                          bool aIsFullFrame, bool aShouldRecycle);
+                          bool aShouldRecycle);
 
   /**
    * Reinitialize this imgFrame with the new parameters, but otherwise retain
    * the underlying buffer.
    *
    * This is appropriate for use with animated images, where the decoder was
    * given an IDecoderFrameRecycler object which may yield a recycled imgFrame
    * that was discarded to save memory.
@@ -156,42 +155,34 @@ class imgFrame {
   void WaitUntilFinished() const;
 
   /**
    * Returns the number of bytes per pixel this imgFrame requires.  This is a
    * worst-case value that does not take into account the effects of format
    * changes caused by Optimize(), since an imgFrame is not optimized throughout
    * its lifetime.
    */
-  uint32_t GetBytesPerPixel() const { return GetIsPaletted() ? 1 : 4; }
+  uint32_t GetBytesPerPixel() const { return 4; }
 
-  const IntSize& GetImageSize() const { return mImageSize; }
-  const IntRect& GetRect() const { return mFrameRect; }
-  IntSize GetSize() const { return mFrameRect.Size(); }
+  const IntSize& GetSize() const { return mImageSize; }
+  IntRect GetRect() const { return IntRect(IntPoint(0, 0), mImageSize); }
   const IntRect& GetBlendRect() const { return mBlendRect; }
   IntRect GetBoundedBlendRect() const {
-    return mBlendRect.Intersect(mFrameRect);
+    return mBlendRect.Intersect(GetRect());
   }
   FrameTimeout GetTimeout() const { return mTimeout; }
   BlendMethod GetBlendMethod() const { return mBlendMethod; }
   DisposalMethod GetDisposalMethod() const { return mDisposalMethod; }
   bool FormatHasAlpha() const { return mFormat == SurfaceFormat::B8G8R8A8; }
   void GetImageData(uint8_t** aData, uint32_t* length) const;
   uint8_t* GetImageData() const;
 
-  bool GetIsPaletted() const;
-  void GetPaletteData(uint32_t** aPalette, uint32_t* length) const;
-  uint32_t* GetPaletteData() const;
-  uint8_t GetPaletteDepth() const { return mPaletteDepth; }
-
   const IntRect& GetDirtyRect() const { return mDirtyRect; }
   void SetDirtyRect(const IntRect& aDirtyRect) { mDirtyRect = aDirtyRect; }
 
-  bool IsFullFrame() const { return mIsFullFrame; }
-
   void SetOptimizable();
 
   void FinalizeSurface();
   already_AddRefed<SourceSurface> GetSourceSurface();
 
   struct AddSizeOfCbData {
     AddSizeOfCbData()
         : heap(0), nonHeap(0), handles(0), index(0), externalId(0) {}
@@ -235,20 +226,16 @@ class imgFrame {
   /**
    * @param aTemporary  If true, it will assume the caller does not require a
    *                    wrapping RecycleSourceSurface to protect the underlying
    *                    surface from recycling. The reference to the surface
    *                    must be freed before releasing the main thread context.
    */
   already_AddRefed<SourceSurface> GetSourceSurfaceInternal(bool aTemporary);
 
-  uint32_t PaletteDataLength() const {
-    return mPaletteDepth ? (size_t(1) << mPaletteDepth) * sizeof(uint32_t) : 0;
-  }
-
   struct SurfaceWithFormat {
     RefPtr<gfxDrawable> mDrawable;
     SurfaceFormat mFormat;
     SurfaceWithFormat() : mFormat(SurfaceFormat::UNKNOWN) {}
     SurfaceWithFormat(gfxDrawable* aDrawable, SurfaceFormat aFormat)
         : mDrawable(aDrawable), mFormat(aFormat) {}
     SurfaceWithFormat(SurfaceWithFormat&& aOther)
         : mDrawable(std::move(aOther.mDrawable)), mFormat(aOther.mFormat) {}
@@ -315,27 +302,16 @@ class imgFrame {
 
   //////////////////////////////////////////////////////////////////////////////
   // Effectively const data, only mutated in the Init methods.
   //////////////////////////////////////////////////////////////////////////////
 
   //! The size of the buffer we are decoding to.
   IntSize mImageSize;
 
-  //! XXX(aosmond): This means something different depending on the context. We
-  //!               should correct this.
-  //!
-  //! There are several different contexts for mFrameRect:
-  //! - If for non-animated image, it will be originate at (0, 0) and matches
-  //!   the dimensions of mImageSize.
-  //! - If for an APNG, it also matches the above.
-  //! - If for a GIF which is producing full frames, it matches the above.
-  //! - If for a GIF which is producing partial frames, it matches mBlendRect.
-  IntRect mFrameRect;
-
   //! The contents for the frame, as represented in the encoded image. This may
   //! differ from mImageSize because it may be a partial frame. For the first
   //! frame, this means we need to shift the data in place, and for animated
   //! frames, it likely need to combine with a previous frame to get the full
   //! contents.
   IntRect mBlendRect;
 
   //! This is the region that has changed between this frame and the previous
@@ -345,45 +321,33 @@ class imgFrame {
 
   //! The timeout for this frame.
   FrameTimeout mTimeout;
 
   DisposalMethod mDisposalMethod;
   BlendMethod mBlendMethod;
   SurfaceFormat mFormat;
 
-  // The palette and image data for images that are paletted, since Cairo
-  // doesn't support these images.
-  // The paletted data comes first, then the image data itself.
-  // Total length is PaletteDataLength() + GetImageDataLength().
-  uint8_t* mPalettedImageData;
-  uint8_t mPaletteDepth;
-
   bool mNonPremult;
-
-  //! True if the frame has all of the data stored in it, false if it needs to
-  //! be combined with another frame (e.g. the previous frame) to be complete.
-  bool mIsFullFrame;
 };
 
 /**
  * A reference to an imgFrame that holds the imgFrame's surface in memory,
  * allowing drawing. If you have a DrawableFrameRef |ref| and |if (ref)| returns
  * true, then calls to Draw() and GetSourceSurface() are guaranteed to succeed.
  */
 class DrawableFrameRef final {
   typedef gfx::DataSourceSurface DataSourceSurface;
 
  public:
   DrawableFrameRef() {}
 
   explicit DrawableFrameRef(imgFrame* aFrame) : mFrame(aFrame) {
     MOZ_ASSERT(aFrame);
     MonitorAutoLock lock(aFrame->mMonitor);
-    MOZ_ASSERT(!aFrame->GetIsPaletted(), "Paletted must use RawAccessFrameRef");
 
     if (aFrame->mRawSurface) {
       mRef.emplace(aFrame->mRawSurface, DataSourceSurface::READ);
       if (!mRef->IsMapped()) {
         mFrame = nullptr;
         mRef.reset();
       }
     } else {
@@ -427,20 +391,20 @@ class DrawableFrameRef final {
 
   RefPtr<imgFrame> mFrame;
   Maybe<DataSourceSurface::ScopedMap> mRef;
 };
 
 /**
  * A reference to an imgFrame that holds the imgFrame's surface in memory in a
  * format appropriate for access as raw data. If you have a RawAccessFrameRef
- * |ref| and |if (ref)| is true, then calls to GetImageData() and
- * GetPaletteData() are guaranteed to succeed. This guarantee is stronger than
- * DrawableFrameRef, so everything that a valid DrawableFrameRef guarantees is
- * also guaranteed by a valid RawAccessFrameRef.
+ * |ref| and |if (ref)| is true, then calls to GetImageData() is guaranteed to
+ * succeed. This guarantee is stronger than DrawableFrameRef, so everything that
+ * a valid DrawableFrameRef guarantees is also guaranteed by a valid
+ * RawAccessFrameRef.
  *
  * This may be considerably more expensive than is necessary just for drawing,
  * so only use this when you need to read or write the raw underlying image data
  * that the imgFrame holds.
  *
  * Once all an imgFrame's RawAccessFrameRefs go out of scope, new
  * RawAccessFrameRefs cannot be created.
  */
@@ -502,17 +466,16 @@ class RawAccessFrameRef final {
     if (mFrame) {
       mFrame->UnlockImageData();
     }
     mFrame = nullptr;
     mData = nullptr;
   }
 
   uint8_t* Data() const { return mData; }
-  uint32_t PaletteDataLength() const { return mFrame->PaletteDataLength(); }
 
  private:
   RawAccessFrameRef(const RawAccessFrameRef& aOther) = delete;
   RawAccessFrameRef& operator=(const RawAccessFrameRef& aOther) = delete;
 
   RefPtr<imgFrame> mFrame;
   uint8_t* mData;
 };
--- a/image/test/gtest/Common.cpp
+++ b/image/test/gtest/Common.cpp
@@ -350,17 +350,17 @@ void CheckGeneratedSurface(SourceSurface
   const int32_t heightBelow = surfaceSize.height - aRect.YMost();
   EXPECT_TRUE(RectIsSolidColor(
       aSurface, IntRect(0, aRect.YMost(), surfaceSize.width, heightBelow),
       aOuterColor, aFuzz));
 }
 
 void CheckGeneratedPalettedImage(Decoder* aDecoder, const IntRect& aRect) {
   RawAccessFrameRef currentFrame = aDecoder->GetCurrentFrameRef();
-  IntSize imageSize = currentFrame->GetImageSize();
+  IntSize imageSize = currentFrame->GetSize();
 
   // This diagram shows how the surface is divided into regions that the code
   // below tests for the correct content. The output rect is the bounds of the
   // region labeled 'C'.
   //
   // +---------------------------+
   // |             A             |
   // +---------+--------+--------+
--- a/image/test/gtest/TestAnimationFrameBuffer.cpp
+++ b/image/test/gtest/TestAnimationFrameBuffer.cpp
@@ -13,19 +13,18 @@ using namespace mozilla::image;
 
 static already_AddRefed<imgFrame> CreateEmptyFrame(
     const IntSize& aSize = IntSize(1, 1),
     const IntRect& aFrameRect = IntRect(0, 0, 1, 1), bool aCanRecycle = true) {
   RefPtr<imgFrame> frame = new imgFrame();
   AnimationParams animParams{aFrameRect, FrameTimeout::Forever(),
                              /* aFrameNum */ 1, BlendMethod::OVER,
                              DisposalMethod::NOT_SPECIFIED};
-  nsresult rv = frame->InitForDecoder(aSize, IntRect(IntPoint(0, 0), aSize),
-                                      SurfaceFormat::B8G8R8A8, 0, false,
-                                      Some(animParams), true, aCanRecycle);
+  nsresult rv = frame->InitForDecoder(aSize, SurfaceFormat::B8G8R8A8, false,
+                                      Some(animParams), aCanRecycle);
   EXPECT_TRUE(NS_SUCCEEDED(rv));
   RawAccessFrameRef frameRef = frame->RawAccessRef();
   frame->SetRawAccessOnly();
   // Normally the blend animation filter would set the dirty rect, but since
   // we aren't producing an actual animation here, we need to fake it.
   frame->SetDirtyRect(aFrameRect);
   frame->Finish();
   return frame.forget();
--- a/image/test/gtest/TestBlendAnimationFilter.cpp
+++ b/image/test/gtest/TestBlendAnimationFilter.cpp
@@ -17,17 +17,17 @@
 
 using namespace mozilla;
 using namespace mozilla::gfx;
 using namespace mozilla::image;
 
 static already_AddRefed<Decoder> CreateTrivialBlendingDecoder() {
   gfxPrefs::GetSingleton();
   DecoderType decoderType = DecoderFactory::GetDecoderType("image/gif");
-  DecoderFlags decoderFlags = DecoderFlags::BLEND_ANIMATION;
+  DecoderFlags decoderFlags = DefaultDecoderFlags();
   SurfaceFlags surfaceFlags = DefaultSurfaceFlags();
   auto sourceBuffer = MakeNotNull<RefPtr<SourceBuffer>>();
   return DecoderFactory::CreateAnonymousDecoder(
       decoderType, sourceBuffer, Nothing(), decoderFlags, surfaceFlags);
 }
 
 template <typename Func>
 RawAccessFrameRef WithBlendAnimationFilter(Decoder* aDecoder,
--- a/image/test/gtest/TestDecoders.cpp
+++ b/image/test/gtest/TestDecoders.cpp
@@ -340,17 +340,17 @@ static void WithSingleChunkAnimationDeco
   RefPtr<IDecodingTask> task = DecoderFactory::CreateMetadataDecoder(
       decoderType, rasterImage, sourceBuffer);
   ASSERT_TRUE(task != nullptr);
 
   // Run the metadata decoder synchronously.
   task->Run();
 
   // Create a decoder.
-  DecoderFlags decoderFlags = DecoderFlags::BLEND_ANIMATION;
+  DecoderFlags decoderFlags = DefaultDecoderFlags();
   SurfaceFlags surfaceFlags = DefaultSurfaceFlags();
   RefPtr<Decoder> decoder = DecoderFactory::CreateAnonymousDecoder(
       decoderType, sourceBuffer, Nothing(), decoderFlags, surfaceFlags);
   ASSERT_TRUE(decoder != nullptr);
 
   // Create an AnimationSurfaceProvider which will manage the decoding process
   // and make this decoder's output available in the surface cache.
   SurfaceKey surfaceKey = RasterSurfaceKey(aTestCase.mOutputSize, surfaceFlags,
--- a/image/test/gtest/TestFrameAnimator.cpp
+++ b/image/test/gtest/TestFrameAnimator.cpp
@@ -37,17 +37,17 @@ static void CheckFrameAnimatorBlendResul
       aImage->GetFrame(imgIContainer::FRAME_CURRENT, imgIContainer::FLAG_NONE);
   ASSERT_TRUE(surface != nullptr);
   CheckGeneratedSurface(surface, IntRect(0, 0, 50, 50), BGRAColor::Green(),
                         BGRAColor::Red());
 }
 
 template <typename Func>
 static void WithFrameAnimatorDecode(const ImageTestCase& aTestCase,
-                                    bool aBlendFilter, Func aResultChecker) {
+                                    Func aResultChecker) {
   // Create an image.
   RefPtr<Image> image = ImageFactory::CreateAnonymousImage(
       nsDependentCString(aTestCase.mMimeType));
   ASSERT_TRUE(!image->HasError());
 
   NotNull<RefPtr<RasterImage>> rasterImage =
       WrapNotNull(static_cast<RasterImage*>(image.get()));
 
@@ -75,61 +75,44 @@ static void WithFrameAnimatorDecode(cons
 
   // Run the metadata decoder synchronously.
   task->Run();
   task = nullptr;
 
   // Create an AnimationSurfaceProvider which will manage the decoding process
   // and make this decoder's output available in the surface cache.
   DecoderFlags decoderFlags = DefaultDecoderFlags();
-  if (aBlendFilter) {
-    decoderFlags |= DecoderFlags::BLEND_ANIMATION;
-  }
   SurfaceFlags surfaceFlags = DefaultSurfaceFlags();
   rv = DecoderFactory::CreateAnimationDecoder(
       decoderType, rasterImage, sourceBuffer, aTestCase.mSize, decoderFlags,
       surfaceFlags, 0, getter_AddRefs(task));
   EXPECT_EQ(rv, NS_OK);
   ASSERT_TRUE(task != nullptr);
 
   // Run the full decoder synchronously.
   task->Run();
 
   // Call the lambda to verify the expected results.
   aResultChecker(rasterImage.get());
 }
 
-static void CheckFrameAnimatorBlend(const ImageTestCase& aTestCase,
-                                    bool aBlendFilter) {
-  WithFrameAnimatorDecode(aTestCase, aBlendFilter, [&](RasterImage* aImage) {
+static void CheckFrameAnimatorBlend(const ImageTestCase& aTestCase) {
+  WithFrameAnimatorDecode(aTestCase, [&](RasterImage* aImage) {
     CheckFrameAnimatorBlendResults(aTestCase, aImage);
   });
 }
 
 class ImageFrameAnimator : public ::testing::Test {
  protected:
   AutoInitializeImageLib mInit;
 };
 
-TEST_F(ImageFrameAnimator, BlendGIFWithAnimator) {
-  CheckFrameAnimatorBlend(BlendAnimatedGIFTestCase(), /* aBlendFilter */ false);
-}
-
 TEST_F(ImageFrameAnimator, BlendGIFWithFilter) {
-  CheckFrameAnimatorBlend(BlendAnimatedGIFTestCase(), /* aBlendFilter */ true);
-}
-
-TEST_F(ImageFrameAnimator, BlendPNGWithAnimator) {
-  CheckFrameAnimatorBlend(BlendAnimatedPNGTestCase(), /* aBlendFilter */ false);
+  CheckFrameAnimatorBlend(BlendAnimatedGIFTestCase());
 }
 
 TEST_F(ImageFrameAnimator, BlendPNGWithFilter) {
-  CheckFrameAnimatorBlend(BlendAnimatedPNGTestCase(), /* aBlendFilter */ true);
-}
-
-TEST_F(ImageFrameAnimator, BlendWebPWithAnimator) {
-  CheckFrameAnimatorBlend(BlendAnimatedWebPTestCase(),
-                          /* aBlendFilter */ false);
+  CheckFrameAnimatorBlend(BlendAnimatedPNGTestCase());
 }
 
 TEST_F(ImageFrameAnimator, BlendWebPWithFilter) {
-  CheckFrameAnimatorBlend(BlendAnimatedWebPTestCase(), /* aBlendFilter */ true);
+  CheckFrameAnimatorBlend(BlendAnimatedWebPTestCase());
 }