Bug 1079627 (Part 2) - Add SourceBuffer. r=tn
☠☠ backed out by cb26891d69e9 ☠ ☠
authorSeth Fowler <seth@mozilla.com>
Mon, 12 Jan 2015 01:20:23 -0800
changeset 249144 44b622a479b60c6ecd1883a84b782e54a44221a7
parent 249143 c86c43915254b769d8b63cdf997a159d32905055
child 249145 e7add8446221fcce51f580f37cd779a42c876e3e
push id4489
push userraliiev@mozilla.com
push dateMon, 23 Feb 2015 15:17:55 +0000
treeherdermozilla-beta@fd7c3dc24146 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstn
bugs1079627
milestone37.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1079627 (Part 2) - Add SourceBuffer. r=tn
image/src/SourceBuffer.cpp
image/src/SourceBuffer.h
image/src/SurfaceCache.cpp
image/src/SurfaceCache.h
image/src/moz.build
new file mode 100644
--- /dev/null
+++ b/image/src/SourceBuffer.cpp
@@ -0,0 +1,460 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "SourceBuffer.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include "mozilla/Likely.h"
+#include "MainThreadUtils.h"
+#include "SurfaceCache.h"
+
+using std::max;
+using std::min;
+
+namespace mozilla {
+namespace image {
+
+//////////////////////////////////////////////////////////////////////////////
+// SourceBufferIterator implementation.
+//////////////////////////////////////////////////////////////////////////////
+
+SourceBufferIterator::State
+SourceBufferIterator::AdvanceOrScheduleResume(IResumable* aConsumer)
+{
+  MOZ_ASSERT(mOwner);
+  return mOwner->AdvanceIteratorOrScheduleResume(*this, aConsumer);
+}
+
+bool
+SourceBufferIterator::RemainingBytesIsNoMoreThan(size_t aBytes) const
+{
+  MOZ_ASSERT(mOwner);
+  return mOwner->RemainingBytesIsNoMoreThan(*this, aBytes);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// SourceBuffer implementation.
+//////////////////////////////////////////////////////////////////////////////
+
+SourceBuffer::SourceBuffer()
+  : mMutex("image::SourceBuffer")
+{ }
+
+nsresult
+SourceBuffer::AppendChunk(Maybe<Chunk>&& aChunk)
+{
+  mMutex.AssertCurrentThreadOwns();
+
+#ifdef DEBUG
+  if (mChunks.Length() > 0) {
+    NS_WARNING("Appending an extra chunk for SourceBuffer");
+  }
+#endif
+
+  if (MOZ_UNLIKELY(!aChunk)) {
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
+
+  if (MOZ_UNLIKELY(aChunk->AllocationFailed())) {
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
+
+  if (MOZ_UNLIKELY(!mChunks.AppendElement(Move(*aChunk)))) {
+    return NS_ERROR_OUT_OF_MEMORY;
+  }
+
+  return NS_OK;
+}
+
+Maybe<SourceBuffer::Chunk>
+SourceBuffer::CreateChunk(size_t aCapacity)
+{
+  if (MOZ_UNLIKELY(aCapacity == 0)) {
+    MOZ_ASSERT_UNREACHABLE("Appending a chunk of zero size?");
+    return Nothing();
+  }
+
+  // Protect against overflow.
+  if (MOZ_UNLIKELY(SIZE_MAX - aCapacity < MIN_CHUNK_CAPACITY)) {
+    return Nothing();
+  }
+
+  // Round up to the next multiple of MIN_CHUNK_CAPACITY (which should be the
+  // size of a page).
+  size_t roundedCapacity =
+    (aCapacity + MIN_CHUNK_CAPACITY - 1) & ~(MIN_CHUNK_CAPACITY - 1);
+  MOZ_ASSERT(roundedCapacity >= aCapacity, "Bad math?");
+  MOZ_ASSERT(roundedCapacity - aCapacity < MIN_CHUNK_CAPACITY, "Bad math?");
+
+  // Use the size of the SurfaceCache as an additional heuristic to avoid
+  // allocating huge buffers. Generally images do not get smaller when decoded,
+  // so if we could store the source data in the SurfaceCache, we assume that
+  // there's no way we'll be able to store the decoded version.
+  if (MOZ_UNLIKELY(!SurfaceCache::CanHold(roundedCapacity))) {
+    return Nothing();
+  }
+
+  return Some(Chunk(roundedCapacity));
+}
+
+size_t
+SourceBuffer::FibonacciCapacityWithMinimum(size_t aMinCapacity)
+{
+  mMutex.AssertCurrentThreadOwns();
+
+  // We grow the source buffer using a Fibonacci growth rate.
+
+  size_t length = mChunks.Length();
+
+  if (length == 0) {
+    return aMinCapacity;
+  }
+
+  if (length == 1) {
+    return max(2 * mChunks[0].Capacity(), aMinCapacity);
+  }
+
+  return max(mChunks[length - 1].Capacity() + mChunks[length - 2].Capacity(),
+             aMinCapacity);
+}
+
+void
+SourceBuffer::AddWaitingConsumer(IResumable* aConsumer)
+{
+  mMutex.AssertCurrentThreadOwns();
+
+  MOZ_ASSERT(!mStatus, "Waiting when we're complete?");
+
+  if (MOZ_UNLIKELY(NS_IsMainThread())) {
+    NS_WARNING("SourceBuffer consumer on the main thread needed to wait");
+  }
+
+  mWaitingConsumers.AppendElement(aConsumer);
+}
+
+void
+SourceBuffer::ResumeWaitingConsumers()
+{
+  mMutex.AssertCurrentThreadOwns();
+
+  if (mWaitingConsumers.Length() == 0) {
+    return;
+  }
+
+  for (uint32_t i = 0 ; i < mWaitingConsumers.Length() ; ++i) {
+    mWaitingConsumers[i]->Resume();
+  }
+
+  mWaitingConsumers.Clear();
+}
+
+nsresult
+SourceBuffer::ExpectLength(size_t aExpectedLength)
+{
+  MOZ_ASSERT(aExpectedLength > 0, "Zero expected size?");
+
+  MutexAutoLock lock(mMutex);
+
+  if (MOZ_UNLIKELY(mStatus)) {
+    MOZ_ASSERT_UNREACHABLE("ExpectLength after SourceBuffer is complete");
+    return NS_OK;
+  }
+
+  if (MOZ_UNLIKELY(mChunks.Length() > 0)) {
+    MOZ_ASSERT_UNREACHABLE("Duplicate or post-Append call to ExpectLength");
+    return NS_OK;
+  }
+
+  if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aExpectedLength))))) {
+    return HandleError(NS_ERROR_OUT_OF_MEMORY);
+  }
+
+  return NS_OK;
+}
+
+nsresult
+SourceBuffer::Append(const char* aData, size_t aLength)
+{
+  MOZ_ASSERT(aData, "Should have a buffer");
+  MOZ_ASSERT(aLength > 0, "Writing a zero-sized chunk");
+
+  size_t currentChunkCapacity = 0;
+  size_t currentChunkLength = 0;
+  char* currentChunkData = nullptr;
+  size_t currentChunkRemaining = 0;
+  size_t forCurrentChunk = 0;
+  size_t forNextChunk = 0;
+  size_t nextChunkCapacity = 0;
+
+  {
+    MutexAutoLock lock(mMutex);
+
+    if (MOZ_UNLIKELY(mStatus)) {
+      // This SourceBuffer is already complete; ignore further data.
+      return NS_ERROR_FAILURE;
+    }
+
+    if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
+      if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aLength))))) {
+        return HandleError(NS_ERROR_OUT_OF_MEMORY);
+      }
+    }
+
+    // Copy out the current chunk's information so we can release the lock.
+    // Note that this wouldn't be safe if multiple producers were allowed!
+    Chunk& currentChunk = mChunks.LastElement();
+    currentChunkCapacity = currentChunk.Capacity();
+    currentChunkLength = currentChunk.Length();
+    currentChunkData = currentChunk.Data();
+
+    // Partition this data between the current chunk and the next chunk.
+    // (Because we always allocate a chunk big enough to fit everything passed
+    // to Append, we'll never need more than those two chunks to store
+    // everything.)
+    currentChunkRemaining = currentChunkCapacity - currentChunkLength;
+    forCurrentChunk = min(aLength, currentChunkRemaining);
+    forNextChunk = aLength - forCurrentChunk;
+
+    // If we'll need another chunk, determine what its capacity should be while
+    // we still hold the lock.
+    nextChunkCapacity = forNextChunk > 0
+                      ? FibonacciCapacityWithMinimum(forNextChunk)
+                      : 0;
+  }
+
+  // Write everything we can fit into the current chunk.
+  MOZ_ASSERT(currentChunkLength + forCurrentChunk <= currentChunkCapacity);
+  memcpy(currentChunkData + currentChunkLength, aData, forCurrentChunk);
+
+  // If there's something left, create a new chunk and write it there.
+  Maybe<Chunk> nextChunk;
+  if (forNextChunk > 0) {
+    MOZ_ASSERT(nextChunkCapacity >= forNextChunk, "Next chunk too small?");
+    nextChunk = CreateChunk(nextChunkCapacity);
+    if (MOZ_LIKELY(nextChunk && !nextChunk->AllocationFailed())) {
+      memcpy(nextChunk->Data(), aData + forCurrentChunk, forNextChunk);
+      nextChunk->AddLength(forNextChunk);
+    }
+  }
+
+  // Update shared data structures.
+  {
+    MutexAutoLock lock(mMutex);
+
+    // Update the length of the current chunk.
+    Chunk& currentChunk = mChunks.LastElement();
+    MOZ_ASSERT(currentChunk.Data() == currentChunkData, "Multiple producers?");
+    MOZ_ASSERT(currentChunk.Length() == currentChunkLength,
+               "Multiple producers?");
+
+    currentChunk.AddLength(forCurrentChunk);
+
+    // If we created a new chunk, add it to the series.
+    if (forNextChunk > 0) {
+      if (MOZ_UNLIKELY(!nextChunk)) {
+        return HandleError(NS_ERROR_OUT_OF_MEMORY);
+      }
+
+      if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(Move(nextChunk))))) {
+        return HandleError(NS_ERROR_OUT_OF_MEMORY);
+      }
+    }
+
+    // Resume any waiting readers now that there's new data.
+    ResumeWaitingConsumers();
+  }
+
+  return NS_OK;
+}
+
+void
+SourceBuffer::Complete(nsresult aStatus)
+{
+  MutexAutoLock lock(mMutex);
+
+  if (MOZ_UNLIKELY(mStatus)) {
+    MOZ_ASSERT_UNREACHABLE("Called Complete more than once");
+    return;
+  }
+
+  if (MOZ_UNLIKELY(NS_SUCCEEDED(aStatus) && IsEmpty())) {
+    // It's illegal to succeed without writing anything.
+    aStatus = NS_ERROR_FAILURE;
+  }
+
+  mStatus = Some(aStatus);
+
+  // Resume any waiting consumers now that we're complete.
+  ResumeWaitingConsumers();
+}
+
+bool
+SourceBuffer::IsComplete()
+{
+  MutexAutoLock lock(mMutex);
+  return bool(mStatus);
+}
+
+size_t
+SourceBuffer::SizeOfIncludingThisWithComputedFallback(MallocSizeOf
+                                                        aMallocSizeOf) const
+{
+  MutexAutoLock lock(mMutex);
+
+  size_t n = aMallocSizeOf(this);
+  n += mChunks.SizeOfExcludingThis(aMallocSizeOf);
+
+  for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) {
+    size_t chunkSize = aMallocSizeOf(mChunks[i].Data());
+
+    if (chunkSize == 0) {
+      // We're on a platform where moz_malloc_size_of always returns 0.
+      chunkSize = mChunks[i].Capacity();
+    }
+
+    n += chunkSize;
+  }
+
+  return n;
+}
+
+bool
+SourceBuffer::RemainingBytesIsNoMoreThan(const SourceBufferIterator& aIterator,
+                                         size_t aBytes) const
+{
+  MutexAutoLock lock(mMutex);
+
+  // If we're not complete, we always say no.
+  if (!mStatus) {
+    return false;
+  }
+
+  // If the iterator's at the end, the answer is trivial.
+  if (!aIterator.HasMore()) {
+    return true;
+  }
+
+  uint32_t iteratorChunk = aIterator.mData.mIterating.mChunk;
+  size_t iteratorOffset = aIterator.mData.mIterating.mOffset;
+  size_t iteratorLength = aIterator.mData.mIterating.mLength;
+
+  // Include the bytes the iterator is currently pointing to in the limit, so
+  // that the current chunk doesn't have to be a special case.
+  size_t bytes = aBytes + iteratorOffset + iteratorLength;
+
+  // Count the length over all of our chunks, starting with the one that the
+  // iterator is currently pointing to. (This is O(N), but N is expected to be
+  // ~1, so it doesn't seem worth caching the length separately.)
+  size_t lengthSoFar = 0;
+  for (uint32_t i = iteratorChunk ; i < mChunks.Length() ; ++i) {
+    lengthSoFar += mChunks[i].Length();
+    if (lengthSoFar > bytes) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+SourceBufferIterator::State
+SourceBuffer::AdvanceIteratorOrScheduleResume(SourceBufferIterator& aIterator,
+                                              IResumable* aConsumer)
+{
+  MutexAutoLock lock(mMutex);
+
+  if (MOZ_UNLIKELY(!aIterator.HasMore())) {
+    MOZ_ASSERT_UNREACHABLE("Should not advance a completed iterator");
+    return SourceBufferIterator::COMPLETE;
+  }
+
+  if (MOZ_UNLIKELY(mStatus && NS_FAILED(*mStatus))) {
+    // This SourceBuffer is complete due to an error; all reads fail.
+    return aIterator.SetComplete(*mStatus);
+  }
+
+  if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
+    // We haven't gotten an initial chunk yet.
+    AddWaitingConsumer(aConsumer);
+    return aIterator.SetWaiting();
+  }
+
+  uint32_t iteratorChunkIdx = aIterator.mData.mIterating.mChunk;
+  MOZ_ASSERT(iteratorChunkIdx < mChunks.Length());
+
+  const Chunk& currentChunk = mChunks[iteratorChunkIdx];
+  size_t iteratorEnd = aIterator.mData.mIterating.mOffset +
+                       aIterator.mData.mIterating.mLength;
+  MOZ_ASSERT(iteratorEnd <= currentChunk.Length());
+  MOZ_ASSERT(iteratorEnd <= currentChunk.Capacity());
+
+  if (iteratorEnd < currentChunk.Length()) {
+    // There's more data in the current chunk.
+    return aIterator.SetReady(iteratorChunkIdx, currentChunk.Data(),
+                              iteratorEnd, currentChunk.Length() - iteratorEnd);
+  }
+
+  if (iteratorEnd == currentChunk.Capacity() &&
+      !IsLastChunk(iteratorChunkIdx)) {
+    // Advance to the next chunk.
+    const Chunk& nextChunk = mChunks[iteratorChunkIdx + 1];
+    return aIterator.SetReady(iteratorChunkIdx + 1, nextChunk.Data(), 0,
+                              nextChunk.Length());
+  }
+
+  MOZ_ASSERT(IsLastChunk(iteratorChunkIdx), "Should've advanced");
+
+  if (mStatus) {
+    // There's no more data and this SourceBuffer completed successfully.
+    MOZ_ASSERT(NS_SUCCEEDED(*mStatus), "Handled failures earlier");
+    return aIterator.SetComplete(*mStatus);
+  }
+
+  // We're not complete, but there's no more data right now. Arrange to wake up
+  // the consumer when we get more data.
+  AddWaitingConsumer(aConsumer);
+  return aIterator.SetWaiting();
+}
+
+nsresult
+SourceBuffer::HandleError(nsresult aError)
+{
+  MOZ_ASSERT(NS_FAILED(aError), "Should have an error here");
+  MOZ_ASSERT(aError == NS_ERROR_OUT_OF_MEMORY,
+             "Unexpected error; may want to notify waiting readers, which "
+             "HandleError currently doesn't do");
+
+  mMutex.AssertCurrentThreadOwns();
+
+  NS_WARNING("SourceBuffer encountered an unrecoverable error");
+
+  // Record the error.
+  mStatus = Some(aError);
+
+  // Drop our references to waiting readers.
+  mWaitingConsumers.Clear();
+
+  return *mStatus;
+}
+
+bool
+SourceBuffer::IsEmpty()
+{
+  mMutex.AssertCurrentThreadOwns();
+  return mChunks.Length() == 0 ||
+         mChunks[0].Length() == 0;
+}
+
+bool
+SourceBuffer::IsLastChunk(uint32_t aChunk)
+{
+  mMutex.AssertCurrentThreadOwns();
+  return aChunk + 1 == mChunks.Length();
+}
+
+} // namespace image
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/image/src/SourceBuffer.h
@@ -0,0 +1,364 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * SourceBuffer is a single producer, multiple consumer data structure used for
+ * storing image source (compressed) data.
+ */
+
+#ifndef MOZILLA_IMAGELIB_SOURCEBUFFER_H_
+#define MOZILLA_IMAGELIB_SOURCEBUFFER_H_
+
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/Move.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+#include "nsRefPtr.h"
+#include "nsTArray.h"
+
+namespace mozilla {
+namespace image {
+
+class SourceBuffer;
+
+/**
+ * IResumable is an interface for classes that can schedule themselves to resume
+ * their work later. An implementation of IResumable generally should post a
+ * runnable to some event target which continues the work of the task.
+ */
+struct IResumable
+{
+  MOZ_DECLARE_REFCOUNTED_TYPENAME(IResumable)
+
+  // Subclasses may or may not be XPCOM classes, so we just require that they
+  // implement AddRef and Release.
+  NS_IMETHOD_(MozExternalRefCountType) AddRef(void) = 0;
+  NS_IMETHOD_(MozExternalRefCountType) Release(void) = 0;
+
+  virtual void Resume() = 0;
+
+protected:
+  virtual ~IResumable() { }
+};
+
+/**
+ * SourceBufferIterator is a class that allows consumers of image source data to
+ * read the contents of a SourceBuffer sequentially.
+ *
+ * Consumers can advance through the SourceBuffer by calling
+ * AdvanceOrScheduleResume() repeatedly. After every advance, they should call
+ * check the return value, which will tell them the iterator's new state.
+ *
+ * If WAITING is returned, AdvanceOrScheduleResume() has arranged
+ * to call the consumer's Resume() method later, so the consumer should save its
+ * state if needed and stop running.
+ *
+ * If the iterator's new state is READY, then the consumer can call Data() and
+ * Length() to read new data from the SourceBuffer.
+ *
+ * Finally, in the COMPLETE state the consumer can call CompletionStatus() to
+ * get the status passed to SourceBuffer::Complete().
+ */
+class SourceBufferIterator MOZ_FINAL
+{
+public:
+  enum State {
+    START,    // The iterator is at the beginning of the buffer.
+    READY,    // The iterator is pointing to new data.
+    WAITING,  // The iterator is blocked and the caller must yield.
+    COMPLETE  // The iterator is pointing to the end of the buffer.
+  };
+
+  explicit SourceBufferIterator(SourceBuffer* aOwner)
+    : mOwner(aOwner)
+    , mState(START)
+  {
+    MOZ_ASSERT(aOwner);
+    mData.mIterating.mChunk = 0;
+    mData.mIterating.mData = nullptr;
+    mData.mIterating.mOffset = 0;
+    mData.mIterating.mLength = 0;
+  }
+
+  SourceBufferIterator(SourceBufferIterator&& aOther)
+    : mOwner(Move(aOther.mOwner))
+    , mState(aOther.mState)
+    , mData(aOther.mData)
+  { }
+
+  SourceBufferIterator& operator=(SourceBufferIterator&& aOther)
+  {
+    mOwner = Move(aOther.mOwner);
+    mState = aOther.mState;
+    mData = aOther.mData;
+    return *this;
+  }
+
+  /**
+   * Returns true if there are no more than @aBytes remaining in the
+   * SourceBuffer. If the SourceBuffer is not yet complete, returns false.
+   */
+  bool RemainingBytesIsNoMoreThan(size_t aBytes) const;
+
+  /**
+   * Advances the iterator through the SourceBuffer if possible. If not,
+   * arranges to call the @aConsumer's Resume() method when more data is
+   * available.
+   */
+  State AdvanceOrScheduleResume(IResumable* aConsumer);
+
+  /// If at the end, returns the status passed to SourceBuffer::Complete().
+  nsresult CompletionStatus() const
+  {
+    MOZ_ASSERT(mState == COMPLETE, "Calling CompletionStatus() in the wrong state");
+    return mState == COMPLETE ? mData.mAtEnd.mStatus : NS_OK;
+  }
+
+  /// If we're ready to read, returns a pointer to the new data.
+  const char* Data() const
+  {
+    MOZ_ASSERT(mState == READY, "Calling Data() in the wrong state");
+    return mState == READY ? mData.mIterating.mData + mData.mIterating.mOffset
+                           : nullptr;
+  }
+
+  /// If we're ready to read, returns the length of the new data.
+  size_t Length() const
+  {
+    MOZ_ASSERT(mState == READY, "Calling Length() in the wrong state");
+    return mState == READY ? mData.mIterating.mLength : 0;
+  }
+
+private:
+  friend class SourceBuffer;
+
+  SourceBufferIterator(const SourceBufferIterator&) = delete;
+  SourceBufferIterator& operator=(const SourceBufferIterator&) = delete;
+
+  bool HasMore() const { return mState != COMPLETE; }
+
+  State SetReady(uint32_t aChunk, const char* aData,
+                size_t aOffset, size_t aLength)
+  {
+    MOZ_ASSERT(mState != COMPLETE);
+    mData.mIterating.mChunk = aChunk;
+    mData.mIterating.mData = aData;
+    mData.mIterating.mOffset = aOffset;
+    mData.mIterating.mLength = aLength;
+    return mState = READY;
+  }
+
+  State SetWaiting()
+  {
+    MOZ_ASSERT(mState != COMPLETE);
+    MOZ_ASSERT(mState != WAITING, "Did we get a spurious wakeup somehow?");
+    return mState = WAITING;
+  }
+
+  State SetComplete(nsresult aStatus)
+  {
+    mData.mAtEnd.mStatus = aStatus;
+    return mState = COMPLETE;
+  }
+
+  nsRefPtr<SourceBuffer> mOwner;
+
+  State mState;
+
+  /**
+   * This union contains our iteration state if we're still iterating (for
+   * states START, READY, and WAITING) and the status the SourceBuffer was
+   * completed with if we're in state COMPLETE.
+   */
+  union {
+    struct {
+      uint32_t mChunk;
+      const char* mData;
+      size_t mOffset;
+      size_t mLength;
+    } mIterating;
+    struct {
+      nsresult mStatus;
+    } mAtEnd;
+  } mData;
+};
+
+/**
+ * SourceBuffer is a parallel data structure used for storing image source
+ * (compressed) data.
+ *
+ * SourceBuffer is a single producer, multiple consumer data structure. The
+ * single producer calls Append() to append data to the buffer. In parallel,
+ * multiple consumers can call Iterator(), which returns a SourceBufferIterator
+ * that they can use to iterate through the buffer. The SourceBufferIterator
+ * returns a series of pointers which remain stable for lifetime of the
+ * SourceBuffer, and the data they point to is immutable, ensuring that the
+ * producer never interferes with the consumers.
+ *
+ * In order to avoid blocking, SourceBuffer works with SourceBufferIterator to
+ * keep a list of consumers which are waiting for new data, and to resume them
+ * when the producer appends more. All consumers must implement the IResumable
+ * interface to make this possible.
+ *
+ * XXX(seth): We should add support for compacting a SourceBuffer. To do this,
+ * we need to have SourceBuffer keep track of how many live
+ * SourceBufferIterator's point to it. When the SourceBuffer is complete and no
+ * live SourceBufferIterator's for it remain, we can compact its contents into a
+ * single chunk.
+ */
+class SourceBuffer MOZ_FINAL
+{
+public:
+  MOZ_DECLARE_REFCOUNTED_TYPENAME(image::SourceBuffer)
+  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(image::SourceBuffer)
+
+  SourceBuffer();
+
+  //////////////////////////////////////////////////////////////////////////////
+  // Producer methods.
+  //////////////////////////////////////////////////////////////////////////////
+
+  /**
+   * If the producer knows how long the source data will be, it should call
+   * ExpectLength, which enables SourceBuffer to preallocate its buffer.
+   */
+  nsresult ExpectLength(size_t aExpectedLength);
+
+  /// Append the provided data to the buffer.
+  nsresult Append(const char* aData, size_t aLength);
+
+  /**
+   * Mark the buffer complete, with a status that will be available to
+   * consumers. Further calls to Append() are forbidden after Complete().
+   */
+  void Complete(nsresult aStatus);
+
+  /// Returns true if the buffer is complete.
+  bool IsComplete();
+
+  /// Memory reporting.
+  size_t SizeOfIncludingThisWithComputedFallback(MallocSizeOf) const;
+
+
+  //////////////////////////////////////////////////////////////////////////////
+  // Consumer methods.
+  //////////////////////////////////////////////////////////////////////////////
+
+  /// Returns an iterator to this SourceBuffer.
+  SourceBufferIterator Iterator() { return SourceBufferIterator(this); }
+
+
+private:
+  friend class SourceBufferIterator;
+
+  ~SourceBuffer() { }
+
+  //////////////////////////////////////////////////////////////////////////////
+  // Chunk type and chunk-related methods.
+  //////////////////////////////////////////////////////////////////////////////
+
+  class Chunk
+  {
+  public:
+    explicit Chunk(size_t aCapacity)
+      : mCapacity(aCapacity)
+      , mLength(0)
+      , mData(MakeUnique<char[]>(mCapacity))
+    {
+      MOZ_ASSERT(aCapacity > 0, "Creating zero-capacity chunk");
+    }
+
+    Chunk(Chunk&& aOther)
+      : mCapacity(aOther.mCapacity)
+      , mLength(aOther.mLength)
+      , mData(Move(aOther.mData))
+    {
+      aOther.mCapacity = aOther.mLength = 0;
+    }
+
+    Chunk& operator=(Chunk&& aOther)
+    {
+      mCapacity = aOther.mCapacity;
+      mLength = aOther.mLength;
+      mData = Move(aOther.mData);
+      aOther.mCapacity = aOther.mLength = 0;
+      return *this;
+    }
+
+    bool AllocationFailed() const { return !mData; }
+    size_t Capacity() const { return mCapacity; }
+    size_t Length() const { return mLength; }
+    char* Data() const { return mData.get(); }
+
+    void AddLength(size_t aAdditionalLength)
+    {
+      MOZ_ASSERT(mLength + aAdditionalLength <= mCapacity);
+      mLength += aAdditionalLength;
+    }
+
+  private:
+    Chunk(const Chunk&) = delete;
+    Chunk& operator=(const Chunk&) = delete;
+
+    size_t mCapacity;
+    size_t mLength;
+    UniquePtr<char[]> mData;
+  };
+
+  nsresult AppendChunk(Maybe<Chunk>&& aChunk);
+  Maybe<Chunk> CreateChunk(size_t aCapacity);
+  size_t FibonacciCapacityWithMinimum(size_t aMinCapacity);
+
+
+  //////////////////////////////////////////////////////////////////////////////
+  // Iterator / consumer methods.
+  //////////////////////////////////////////////////////////////////////////////
+
+  void AddWaitingConsumer(IResumable* aConsumer);
+  void ResumeWaitingConsumers();
+
+  typedef SourceBufferIterator::State State;
+
+  State AdvanceIteratorOrScheduleResume(SourceBufferIterator& aIterator,
+                                        IResumable* aConsumer);
+  bool RemainingBytesIsNoMoreThan(const SourceBufferIterator& aIterator,
+                                  size_t aBytes) const;
+
+
+  //////////////////////////////////////////////////////////////////////////////
+  // Helper methods.
+  //////////////////////////////////////////////////////////////////////////////
+
+  nsresult HandleError(nsresult aError);
+  bool IsEmpty();
+  bool IsLastChunk(uint32_t aChunk);
+
+
+  //////////////////////////////////////////////////////////////////////////////
+  // Member variables.
+  //////////////////////////////////////////////////////////////////////////////
+
+  static const size_t MIN_CHUNK_CAPACITY = 4096;
+
+  /// All private members are protected by mMutex.
+  mutable Mutex mMutex;
+
+  /// The data in this SourceBuffer, stored as a series of Chunks.
+  FallibleTArray<Chunk> mChunks;
+
+  /// Consumers which are waiting to be notified when new data is available.
+  nsTArray<nsRefPtr<IResumable>> mWaitingConsumers;
+
+  /// If present, marks this SourceBuffer complete with the given final status.
+  Maybe<nsresult> mStatus;
+};
+
+} // namespace image
+} // namespace mozilla
+
+#endif // MOZILLA_IMAGELIB_SOURCEBUFFER_H_
--- a/image/src/SurfaceCache.cpp
+++ b/image/src/SurfaceCache.cpp
@@ -819,16 +819,26 @@ SurfaceCache::CanHold(const IntSize& aSi
   if (!sInstance) {
     return false;
   }
 
   Cost cost = ComputeCost(aSize);
   return sInstance->CanHold(cost);
 }
 
+/* static */ bool
+SurfaceCache::CanHold(size_t aSize)
+{
+  if (!sInstance) {
+    return false;
+  }
+
+  return sInstance->CanHold(aSize);
+}
+
 /* static */ void
 SurfaceCache::LockImage(Image* aImageKey)
 {
   if (sInstance) {
     MutexAutoLock lock(sInstance->GetMutex());
     return sInstance->LockImage(aImageKey);
   }
 }
--- a/image/src/SurfaceCache.h
+++ b/image/src/SurfaceCache.h
@@ -231,16 +231,17 @@ struct SurfaceCache
    * Use CanHold() to avoid the need to create a temporary surface when we know
    * for sure the cache can't hold it.
    *
    * @param aSize  The dimensions of a surface in pixels.
    *
    * @return false if the surface cache can't hold a surface of that size.
    */
   static bool CanHold(const IntSize& aSize);
+  static bool CanHold(size_t aSize);
 
   /**
    * Locks an image, preventing any of that image's surfaces from expiring
    * unless they have a transient lifetime.
    *
    * Regardless of locking, any of an image's surfaces may be removed using
    * RemoveSurface(), and all of an image's surfaces are removed by
    * RemoveImage(), whether the image is locked or not.
--- a/image/src/moz.build
+++ b/image/src/moz.build
@@ -28,16 +28,17 @@ UNIFIED_SOURCES += [
     'ImageOps.cpp',
     'ImageWrapper.cpp',
     'imgFrame.cpp',
     'imgTools.cpp',
     'MultipartImage.cpp',
     'OrientedImage.cpp',
     'ScriptedNotificationObserver.cpp',
     'ShutdownTracker.cpp',
+    'SourceBuffer.cpp',
     'SurfaceCache.cpp',
     'SVGDocumentWrapper.cpp',
     'VectorImage.cpp',
 ]
 
 # These files can't be unified because of ImageLogging.h #include order issues.
 SOURCES += [
     'imgLoader.cpp',