Bug 913819 - HTTP cache v2: preload several chunks in advance in input stream to speed up reading, r=honzab
authorMichal Novotny <michal.novotny@gmail.com>
Fri, 02 May 2014 20:15:14 +0200
changeset 181769 8a6a5bfdedc6b0abcf64100cbb8d4eeada5ef180
parent 181768 c8d9cc05d899ab1265c8ef9a1cfb6c2218d5dfb3
child 181770 c69333201bc72986792810b9606ee4db5023e8d8
push id272
push userpvanderbeken@mozilla.com
push dateMon, 05 May 2014 16:31:18 +0000
reviewershonzab
bugs913819
milestone32.0a1
Bug 913819 - HTTP cache v2: preload several chunks in advance in input stream to speed up reading, r=honzab
netwerk/cache2/CacheFile.cpp
netwerk/cache2/CacheFile.h
netwerk/cache2/CacheFileInputStream.cpp
netwerk/cache2/CacheFileInputStream.h
netwerk/cache2/CacheFileOutputStream.cpp
netwerk/cache2/CacheObserver.cpp
netwerk/cache2/CacheObserver.h
--- a/netwerk/cache2/CacheFile.cpp
+++ b/netwerk/cache2/CacheFile.cpp
@@ -15,16 +15,17 @@
 #include "nsComponentManagerUtils.h"
 #include "nsProxyRelease.h"
 
 // When CACHE_CHUNKS is defined we always cache unused chunks in mCacheChunks.
 // When it is not defined, we always release the chunks ASAP, i.e. we cache
 // unused chunks only when:
 //  - CacheFile is memory-only
 //  - CacheFile is still waiting for the handle
+//  - the chunk is preloaded
 
 //#define CACHE_CHUNKS
 
 namespace mozilla {
 namespace net {
 
 class NotifyCacheFileListenerEvent : public nsRunnable {
 public:
@@ -176,16 +177,17 @@ CacheFile::CacheFile()
   : mLock("CacheFile.mLock")
   , mOpeningFile(false)
   , mReady(false)
   , mMemoryOnly(false)
   , mOpenAsMemoryOnly(false)
   , mDataAccessed(false)
   , mDataIsDirty(false)
   , mWritingMetadata(false)
+  , mPreloadWithoutInputStreams(true)
   , mStatus(NS_OK)
   , mDataSize(-1)
   , mOutput(nullptr)
 {
   LOG(("CacheFile::CacheFile() [this=%p]", this));
 }
 
 CacheFile::~CacheFile()
@@ -311,17 +313,17 @@ nsresult
 CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk *aChunk)
 {
   CacheFileAutoLock lock(this);
 
   nsresult rv;
 
   uint32_t index = aChunk->Index();
 
-  LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08x, chunk=%p, idx=%d]",
+  LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08x, chunk=%p, idx=%u]",
        this, aResult, aChunk, index));
 
   if (NS_FAILED(aResult)) {
     SetError(aResult);
     CacheFileIOManager::DoomFile(mHandle, nullptr);
   }
 
   if (HaveChunkListeners(index)) {
@@ -334,17 +336,17 @@ CacheFile::OnChunkRead(nsresult aResult,
 
 nsresult
 CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk)
 {
   CacheFileAutoLock lock(this);
 
   nsresult rv;
 
-  LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08x, chunk=%p, idx=%d]",
+  LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08x, chunk=%p, idx=%u]",
        this, aResult, aChunk, aChunk->Index()));
 
   MOZ_ASSERT(!mMemoryOnly);
   MOZ_ASSERT(!mOpeningFile);
   MOZ_ASSERT(mHandle);
 
   if (NS_FAILED(aResult)) {
     SetError(aResult);
@@ -368,35 +370,27 @@ CacheFile::OnChunkWritten(nsresult aResu
 
   if (aChunk->mRefCnt != 2) {
     LOG(("CacheFile::OnChunkWritten() - Chunk is still used [this=%p, chunk=%p,"
          " refcnt=%d]", this, aChunk, aChunk->mRefCnt.get()));
 
     return NS_OK;
   }
 
-#ifdef CACHE_CHUNKS
+  bool keepChunk = false;
   if (NS_SUCCEEDED(aResult)) {
-    LOG(("CacheFile::OnChunkWritten() - Caching unused chunk [this=%p, "
-         "chunk=%p]", this, aChunk));
+    keepChunk = ShouldKeepChunk(aChunk->Index());
+    LOG(("CacheFile::OnChunkWritten() - %s unused chunk [this=%p, chunk=%p]",
+         keepChunk ? "Caching" : "Releasing", this, aChunk));
   } else {
-    LOG(("CacheFile::OnChunkWritten() - Removing failed chunk [this=%p, "
+    LOG(("CacheFile::OnChunkWritten() - Releasing failed chunk [this=%p, "
          "chunk=%p]", this, aChunk));
   }
-#else
-  LOG(("CacheFile::OnChunkWritten() - Releasing %s chunk [this=%p, chunk=%p]",
-       NS_SUCCEEDED(aResult) ? "unused" : "failed", this, aChunk));
-#endif
 
-  RemoveChunkInternal(aChunk,
-#ifdef CACHE_CHUNKS
-                      NS_SUCCEEDED(aResult));
-#else
-                      false);
-#endif
+  RemoveChunkInternal(aChunk, keepChunk);
 
   WriteMetadataIfNeededLocked();
 
   return NS_OK;
 }
 
 nsresult
 CacheFile::OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
@@ -575,16 +569,19 @@ CacheFile::OnMetadataRead(nsresult aResu
 
   bool isNew = false;
   if (NS_SUCCEEDED(aResult)) {
     mReady = true;
     mDataSize = mMetadata->Offset();
     if (mDataSize == 0 && mMetadata->ElementsSize() == 0) {
       isNew = true;
       mMetadata->MarkDirty();
+    } else {
+      CacheFileAutoLock lock(this);
+      PreloadChunks(0);
     }
 
     InitIndexEntry();
   }
 
   nsCOMPtr<CacheFileListener> listener;
   mListener.swap(listener);
   listener->OnFileReady(aResult, isNew);
@@ -666,16 +663,21 @@ CacheFile::OpenInputStream(nsIInputStrea
 
   if (!mReady) {
     LOG(("CacheFile::OpenInputStream() - CacheFile is not ready [this=%p]",
          this));
 
     return NS_ERROR_NOT_AVAILABLE;
   }
 
+  // Once we open input stream we no longer allow preloading of chunks without
+  // input stream, i.e. we will no longer keep first few chunks preloaded when
+  // the last input stream is closed.
+  mPreloadWithoutInputStreams = false;
+
   CacheFileInputStream *input = new CacheFileInputStream(this);
 
   LOG(("CacheFile::OpenInputStream() - Creating new input stream %p [this=%p]",
        input, this));
 
   mInputs.AppendElement(input);
   NS_ADDREF(input);
 
@@ -797,22 +799,17 @@ CacheFile::ThrowMemoryCachedData()
     // entries from being purged.
 
     LOG(("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
          "entry is still opening the file [this=%p]", this));
 
     return NS_ERROR_ABORT;
   }
 
-#ifdef CACHE_CHUNKS
   mCachedChunks.Clear();
-#else
-  // If we don't cache all chunks, mCachedChunks must be empty.
-  MOZ_ASSERT(mCachedChunks.Count() == 0);
-#endif
 
   return NS_OK;
 }
 
 nsresult
 CacheFile::GetElement(const char *aKey, char **_retval)
 {
   CacheFileAutoLock lock(this);
@@ -981,82 +978,97 @@ void
 CacheFile::ReleaseOutsideLock(nsISupports *aObject)
 {
   AssertOwnsLock();
 
   mObjsToRelease.AppendElement(aObject);
 }
 
 nsresult
-CacheFile::GetChunk(uint32_t aIndex, bool aWriter,
+CacheFile::GetChunk(uint32_t aIndex, ECallerType aCaller,
                     CacheFileChunkListener *aCallback, CacheFileChunk **_retval)
 {
   CacheFileAutoLock lock(this);
-  return GetChunkLocked(aIndex, aWriter, aCallback, _retval);
+  return GetChunkLocked(aIndex, aCaller, aCallback, _retval);
 }
 
 nsresult
-CacheFile::GetChunkLocked(uint32_t aIndex, bool aWriter,
+CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
                           CacheFileChunkListener *aCallback,
                           CacheFileChunk **_retval)
 {
   AssertOwnsLock();
 
-  LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%d, writer=%d, listener=%p]",
-       this, aIndex, aWriter, aCallback));
+  LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%u, caller=%d, listener=%p]",
+       this, aIndex, aCaller, aCallback));
 
   MOZ_ASSERT(mReady);
   MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
-  MOZ_ASSERT((aWriter && !aCallback) || (!aWriter && aCallback));
+  MOZ_ASSERT((aCaller == READER && aCallback) ||
+             (aCaller == WRITER && !aCallback) ||
+             (aCaller == PRELOADER && !aCallback));
+
+  // Preload chunks from disk when this is disk backed entry and the listener
+  // is reader.
+  bool preload = !mMemoryOnly && (aCaller == READER);
 
   nsresult rv;
 
   nsRefPtr<CacheFileChunk> chunk;
   if (mChunks.Get(aIndex, getter_AddRefs(chunk))) {
     LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]",
          chunk.get(), this));
 
+    // Preloader calls this method to preload only non-loaded chunks.
+    MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
+
     // We might get failed chunk between releasing the lock in
     // CacheFileChunk::OnDataWritten/Read and CacheFile::OnChunkWritten/Read
     rv = chunk->GetStatus();
     if (NS_FAILED(rv)) {
       SetError(rv);
       LOG(("CacheFile::GetChunkLocked() - Found failed chunk in mChunks "
            "[this=%p]", this));
       return rv;
     }
 
-    if (chunk->IsReady() || aWriter) {
+    if (chunk->IsReady() || aCaller == WRITER) {
       chunk.swap(*_retval);
-    }
-    else {
+    } else {
       rv = QueueChunkListener(aIndex, aCallback);
       NS_ENSURE_SUCCESS(rv, rv);
     }
 
+    if (preload) {
+      PreloadChunks(aIndex + 1);
+    }
+
     return NS_OK;
   }
 
   if (mCachedChunks.Get(aIndex, getter_AddRefs(chunk))) {
-#ifndef CACHE_CHUNKS
-    // We don't cache all chunks, so we must not have handle and we must be
-    // either waiting for the handle, or this is memory-only entry.
-    MOZ_ASSERT(!mHandle && (mMemoryOnly || mOpeningFile));
-#endif
     LOG(("CacheFile::GetChunkLocked() - Reusing cached chunk %p [this=%p]",
          chunk.get(), this));
 
+    // Preloader calls this method to preload only non-loaded chunks.
+    MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
+
     mChunks.Put(aIndex, chunk);
     mCachedChunks.Remove(aIndex);
     chunk->mFile = this;
     chunk->mRemovingChunk = false;
 
     MOZ_ASSERT(chunk->IsReady());
 
     chunk.swap(*_retval);
+
+    if (preload) {
+      PreloadChunks(aIndex + 1);
+    }
+
     return NS_OK;
   }
 
   int64_t off = aIndex * kChunkSize;
 
   if (off < mDataSize) {
     // We cannot be here if this is memory only entry since the chunk must exist
     MOZ_ASSERT(!mMemoryOnly);
@@ -1080,28 +1092,30 @@ CacheFile::GetChunkLocked(uint32_t aInde
     rv = chunk->Read(mHandle, std::min(static_cast<uint32_t>(mDataSize - off),
                      static_cast<uint32_t>(kChunkSize)),
                      mMetadata->GetHash(aIndex), this);
     if (NS_WARN_IF(NS_FAILED(rv))) {
       RemoveChunkInternal(chunk, false);
       return rv;
     }
 
-    if (aWriter) {
+    if (aCaller == WRITER) {
       chunk.swap(*_retval);
-    }
-    else {
+    } else if (aCaller != PRELOADER) {
       rv = QueueChunkListener(aIndex, aCallback);
       NS_ENSURE_SUCCESS(rv, rv);
     }
 
+    if (preload) {
+      PreloadChunks(aIndex + 1);
+    }
+
     return NS_OK;
-  }
-  else if (off == mDataSize) {
-    if (aWriter) {
+  } else if (off == mDataSize) {
+    if (aCaller == WRITER) {
       // this listener is going to write to the chunk
       chunk = new CacheFileChunk(this, aIndex);
       mChunks.Put(aIndex, chunk);
 
       LOG(("CacheFile::GetChunkLocked() - Created new empty chunk %p [this=%p]",
            chunk.get(), this));
 
       chunk->InitNew(this);
@@ -1110,19 +1124,18 @@ CacheFile::GetChunkLocked(uint32_t aInde
       if (HaveChunkListeners(aIndex)) {
         rv = NotifyChunkListeners(aIndex, NS_OK, chunk);
         NS_ENSURE_SUCCESS(rv, rv);
       }
 
       chunk.swap(*_retval);
       return NS_OK;
     }
-  }
-  else {
-    if (aWriter) {
+  } else {
+    if (aCaller == WRITER) {
       // this chunk was requested by writer, but we need to fill the gap first
 
       // Fill with zero the last chunk if it is incomplete
       if (mDataSize % kChunkSize) {
         rv = PadChunkWithZeroes(mDataSize / kChunkSize);
         NS_ENSURE_SUCCESS(rv, rv);
 
         MOZ_ASSERT(!(mDataSize % kChunkSize));
@@ -1132,75 +1145,161 @@ CacheFile::GetChunkLocked(uint32_t aInde
 
       if (mMemoryOnly) {
         // We need to create all missing CacheFileChunks if this is memory-only
         // entry
         for (uint32_t i = startChunk ; i < aIndex ; i++) {
           rv = PadChunkWithZeroes(i);
           NS_ENSURE_SUCCESS(rv, rv);
         }
-      }
-      else {
+      } else {
         // We don't need to create CacheFileChunk for other empty chunks unless
         // there is some input stream waiting for this chunk.
 
         if (startChunk != aIndex) {
           // Make sure the file contains zeroes at the end of the file
           rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle,
                                                       startChunk * kChunkSize,
                                                       aIndex * kChunkSize,
                                                       nullptr);
           NS_ENSURE_SUCCESS(rv, rv);
         }
 
         for (uint32_t i = startChunk ; i < aIndex ; i++) {
           if (HaveChunkListeners(i)) {
             rv = PadChunkWithZeroes(i);
             NS_ENSURE_SUCCESS(rv, rv);
-          }
-          else {
+          } else {
             mMetadata->SetHash(i, kEmptyChunkHash);
             mDataSize = (i + 1) * kChunkSize;
           }
         }
       }
 
       MOZ_ASSERT(mDataSize == off);
-      rv = GetChunkLocked(aIndex, true, nullptr, getter_AddRefs(chunk));
+      rv = GetChunkLocked(aIndex, WRITER, nullptr, getter_AddRefs(chunk));
       NS_ENSURE_SUCCESS(rv, rv);
 
       chunk.swap(*_retval);
       return NS_OK;
     }
   }
 
+  // We can be here only if the caller is reader since writer always create a
+  // new chunk above and preloader calls this method to preload only chunks that
+  // are not loaded but that do exist.
+  MOZ_ASSERT(aCaller == READER, "Unexpected!");
+
   if (mOutput) {
     // the chunk doesn't exist but mOutput may create it
     rv = QueueChunkListener(aIndex, aCallback);
     NS_ENSURE_SUCCESS(rv, rv);
-  }
-  else {
+  } else {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   return NS_OK;
 }
 
+void
+CacheFile::PreloadChunks(uint32_t aIndex)
+{
+  AssertOwnsLock();
+
+  uint32_t limit = aIndex + CacheObserver::PreloadChunkCount();
+
+  for (uint32_t i = aIndex; i < limit; ++i) {
+    int64_t off = i * kChunkSize;
+
+    if (off >= mDataSize) {
+      // This chunk is beyond EOF.
+      return;
+    }
+
+    if (mChunks.GetWeak(i) || mCachedChunks.GetWeak(i)) {
+      // This chunk is already in memory or is being read right now.
+      continue;
+    }
+
+    LOG(("CacheFile::PreloadChunks() - Preloading chunk [this=%p, idx=%u]",
+         this, i));
+
+    nsRefPtr<CacheFileChunk> chunk;
+    GetChunkLocked(i, PRELOADER, nullptr, getter_AddRefs(chunk));
+    // We've checked that we don't have this chunk, so no chunk must be
+    // returned.
+    MOZ_ASSERT(!chunk);
+  }
+}
+
+bool
+CacheFile::ShouldKeepChunk(uint32_t aIndex)
+{
+  AssertOwnsLock();
+
+#ifdef CACHE_CHUNKS
+  // We cache all chunks.
+  return true;
+#else
+  // Cache chunk when this is memory only entry or we don't have a handle yet.
+  if (mMemoryOnly || mOpeningFile) {
+    return true;
+  }
+
+  uint32_t preloadChunkCount = CacheObserver::PreloadChunkCount();
+
+  if (preloadChunkCount == 0) {
+    // Preloading of chunks is disabled
+    return false;
+  }
+
+  if (mPreloadWithoutInputStreams && aIndex < preloadChunkCount) {
+    // We don't have any input stream yet, but it is likely that some will be
+    // opened soon. Keep first preloadChunkCount chunks in memory.
+    return true;
+  }
+
+  // Check whether this chunk should be considered as preloaded chunk for any
+  // existing input stream.
+
+  // maxPos is the position of the last byte in the given chunk
+  int64_t maxPos = static_cast<int64_t>(aIndex + 1) * kChunkSize - 1;
+
+  // minPos is the position of the first byte in a chunk that precedes the given
+  // chunk by PreloadChunkCount chunks
+  int64_t minPos;
+  if (preloadChunkCount >= aIndex) {
+    minPos = 0;
+  } else {
+    minPos = static_cast<int64_t>(aIndex - preloadChunkCount) * kChunkSize;
+  }
+
+  for (uint32_t i = 0; i < mInputs.Length(); ++i) {
+    int64_t inputPos = mInputs[i]->GetPosition();
+    if (inputPos >= minPos && inputPos <= maxPos) {
+      return true;
+    }
+  }
+
+  return false;
+#endif
+}
+
 nsresult
 CacheFile::RemoveChunk(CacheFileChunk *aChunk)
 {
   nsresult rv;
 
   // Avoid lock reentrancy by increasing the RefCnt
   nsRefPtr<CacheFileChunk> chunk = aChunk;
 
   {
     CacheFileAutoLock lock(this);
 
-    LOG(("CacheFile::RemoveChunk() [this=%p, chunk=%p, idx=%d]",
+    LOG(("CacheFile::RemoveChunk() [this=%p, chunk=%p, idx=%u]",
          this, aChunk, aChunk->Index()));
 
     MOZ_ASSERT(mReady);
     MOZ_ASSERT((mHandle && !mMemoryOnly && !mOpeningFile) ||
                (!mHandle && mMemoryOnly && !mOpeningFile) ||
                (!mHandle && !mMemoryOnly && mOpeningFile));
 
     if (aChunk->mRefCnt != 2) {
@@ -1222,17 +1321,17 @@ CacheFile::RemoveChunk(CacheFileChunk *a
       ChunkListeners *listeners;
       mChunkListeners.Get(chunk->Index(), &listeners);
       MOZ_ASSERT(!listeners);
     }
 #endif
 
     if (NS_FAILED(mStatus)) {
       // Don't write any chunk to disk since this entry will be doomed
-      LOG(("CacheFile::RemoveChunk() - Removing chunk because of status "
+      LOG(("CacheFile::RemoveChunk() - Releasing chunk because of status "
            "[this=%p, chunk=%p, mStatus=0x%08x]", this, chunk.get(), mStatus));
 
       RemoveChunkInternal(chunk, false);
       return mStatus;
     }
 
     if (chunk->IsDirty() && !mMemoryOnly && !mOpeningFile) {
       LOG(("CacheFile::RemoveChunk() - Writing dirty chunk to the disk "
@@ -1257,37 +1356,21 @@ CacheFile::RemoveChunk(CacheFileChunk *a
 
         // chunk needs to be released under the lock to be able to rely on
         // CacheFileChunk::mRefCnt in CacheFile::OnChunkWritten()
         chunk = nullptr;
         return NS_OK;
       }
     }
 
-#ifdef CACHE_CHUNKS
-    LOG(("CacheFile::RemoveChunk() - Caching unused chunk [this=%p, chunk=%p]",
-         this, chunk.get()));
-#else
-    if (mMemoryOnly || mOpeningFile) {
-      LOG(("CacheFile::RemoveChunk() - Caching unused chunk [this=%p, chunk=%p,"
-           " reason=%s]", this, chunk.get(),
-           mMemoryOnly ? "memory-only" : "opening-file"));
-    } else {
-      LOG(("CacheFile::RemoveChunk() - Releasing unused chunk [this=%p, "
-           "chunk=%p]", this, chunk.get()));
-    }
-#endif
+    bool keepChunk = ShouldKeepChunk(aChunk->Index());
+    LOG(("CacheFile::RemoveChunk() - %s unused chunk [this=%p, chunk=%p]",
+         keepChunk ? "Caching" : "Releasing", this, chunk.get()));
 
-    RemoveChunkInternal(chunk,
-#ifdef CACHE_CHUNKS
-                        true);
-#else
-                        // Cache the chunk only when we have a reason to do so
-                        mMemoryOnly || mOpeningFile);
-#endif
+    RemoveChunkInternal(chunk, keepChunk);
 
     if (!mMemoryOnly)
       WriteMetadataIfNeededLocked();
   }
 
   return NS_OK;
 }
 
@@ -1316,16 +1399,20 @@ CacheFile::RemoveInput(CacheFileInputStr
   found = mInputs.RemoveElement(aInput);
   MOZ_ASSERT(found);
 
   ReleaseOutsideLock(static_cast<nsIInputStream*>(aInput));
 
   if (!mMemoryOnly)
     WriteMetadataIfNeededLocked();
 
+  // If the input didn't read all data, there might be left some preloaded
+  // chunks that won't be used anymore.
+  mCachedChunks.Enumerate(&CacheFile::CleanUpPreloadedChunks, this);
+
   return NS_OK;
 }
 
 nsresult
 CacheFile::RemoveOutput(CacheFileOutputStream *aOutput)
 {
   AssertOwnsLock();
 
@@ -1354,17 +1441,17 @@ CacheFile::RemoveOutput(CacheFileOutputS
 nsresult
 CacheFile::NotifyChunkListener(CacheFileChunkListener *aCallback,
                                nsIEventTarget *aTarget,
                                nsresult aResult,
                                uint32_t aChunkIdx,
                                CacheFileChunk *aChunk)
 {
   LOG(("CacheFile::NotifyChunkListener() [this=%p, listener=%p, target=%p, "
-       "rv=0x%08x, idx=%d, chunk=%p]", this, aCallback, aTarget, aResult,
+       "rv=0x%08x, idx=%u, chunk=%p]", this, aCallback, aTarget, aResult,
        aChunkIdx, aChunk));
 
   nsresult rv;
   nsRefPtr<NotifyChunkListenerEvent> ev;
   ev = new NotifyChunkListenerEvent(aCallback, aResult, aChunkIdx, aChunk);
   if (aTarget)
     rv = aTarget->Dispatch(ev, NS_DISPATCH_NORMAL);
   else
@@ -1373,17 +1460,17 @@ CacheFile::NotifyChunkListener(CacheFile
 
   return NS_OK;
 }
 
 nsresult
 CacheFile::QueueChunkListener(uint32_t aIndex,
                               CacheFileChunkListener *aCallback)
 {
-  LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%d, listener=%p]",
+  LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%u, listener=%p]",
        this, aIndex, aCallback));
 
   AssertOwnsLock();
 
   MOZ_ASSERT(aCallback);
 
   ChunkListenerItem *item = new ChunkListenerItem();
   item->mTarget = NS_GetCurrentThread();
@@ -1398,17 +1485,17 @@ CacheFile::QueueChunkListener(uint32_t a
   listeners->mItems.AppendElement(item);
   return NS_OK;
 }
 
 nsresult
 CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult,
                                 CacheFileChunk *aChunk)
 {
-  LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%d, rv=0x%08x, "
+  LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%u, rv=0x%08x, "
        "chunk=%p]", this, aIndex, aResult, aChunk));
 
   AssertOwnsLock();
 
   nsresult rv, rv2;
 
   ChunkListeners *listeners;
   mChunkListeners.Get(aIndex, &listeners);
@@ -1563,17 +1650,17 @@ CacheFile::PostWriteTimer()
 
 PLDHashOperator
 CacheFile::WriteAllCachedChunks(const uint32_t& aIdx,
                                 nsRefPtr<CacheFileChunk>& aChunk,
                                 void* aClosure)
 {
   CacheFile *file = static_cast<CacheFile*>(aClosure);
 
-  LOG(("CacheFile::WriteAllCachedChunks() [this=%p, idx=%d, chunk=%p]",
+  LOG(("CacheFile::WriteAllCachedChunks() [this=%p, idx=%u, chunk=%p]",
        file, aIdx, aChunk.get()));
 
   file->mChunks.Put(aIdx, aChunk);
   aChunk->mFile = file;
   aChunk->mRemovingChunk = false;
 
   MOZ_ASSERT(aChunk->IsReady());
 
@@ -1586,17 +1673,17 @@ CacheFile::WriteAllCachedChunks(const ui
 PLDHashOperator
 CacheFile::FailListenersIfNonExistentChunk(
   const uint32_t& aIdx,
   nsAutoPtr<ChunkListeners>& aListeners,
   void* aClosure)
 {
   CacheFile *file = static_cast<CacheFile*>(aClosure);
 
-  LOG(("CacheFile::FailListenersIfNonExistentChunk() [this=%p, idx=%d]",
+  LOG(("CacheFile::FailListenersIfNonExistentChunk() [this=%p, idx=%u]",
        file, aIdx));
 
   nsRefPtr<CacheFileChunk> chunk;
   file->mChunks.Get(aIdx, getter_AddRefs(chunk));
   if (chunk) {
     MOZ_ASSERT(!chunk->IsReady());
     return PL_DHASH_NEXT;
   }
@@ -1616,38 +1703,57 @@ CacheFile::FailUpdateListeners(
   const uint32_t& aIdx,
   nsRefPtr<CacheFileChunk>& aChunk,
   void* aClosure)
 {
 #ifdef PR_LOGGING
   CacheFile *file = static_cast<CacheFile*>(aClosure);
 #endif
 
-  LOG(("CacheFile::FailUpdateListeners() [this=%p, idx=%d]",
+  LOG(("CacheFile::FailUpdateListeners() [this=%p, idx=%u]",
        file, aIdx));
 
   if (aChunk->IsReady()) {
     aChunk->NotifyUpdateListeners();
   }
 
   return PL_DHASH_NEXT;
 }
 
+PLDHashOperator
+CacheFile::CleanUpPreloadedChunks(const uint32_t& aIdx,
+                                  nsRefPtr<CacheFileChunk>& aChunk,
+                                  void* aClosure)
+{
+  CacheFile *file = static_cast<CacheFile*>(aClosure);
+
+  LOG(("CacheFile::CleanUpPreloadedChunks() [this=%p, idx=%u, chunk=%p]", file,
+       aIdx, aChunk.get()));
+
+  if (file->ShouldKeepChunk(aIdx)) {
+    LOG(("CacheFile::CleanUpPreloadedChunks() - Keeping chunk"));
+    return PL_DHASH_NEXT;
+  }
+
+  LOG(("CacheFile::CleanUpPreloadedChunks() - Removing chunk"));
+  return PL_DHASH_REMOVE;
+}
+
 nsresult
 CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx)
 {
   AssertOwnsLock();
 
   // This method is used to pad last incomplete chunk with zeroes or create
   // a new chunk full of zeroes
   MOZ_ASSERT(mDataSize / kChunkSize == aChunkIdx);
 
   nsresult rv;
   nsRefPtr<CacheFileChunk> chunk;
-  rv = GetChunkLocked(aChunkIdx, true, nullptr, getter_AddRefs(chunk));
+  rv = GetChunkLocked(aChunkIdx, WRITER, nullptr, getter_AddRefs(chunk));
   NS_ENSURE_SUCCESS(rv, rv);
 
   LOG(("CacheFile::PadChunkWithZeroes() - Zeroing hole in chunk %d, range %d-%d"
        " [this=%p]", aChunkIdx, chunk->DataSize(), kChunkSize - 1, this));
 
   chunk->EnsureBufSize(kChunkSize);
   memset(chunk->BufForWriting() + chunk->DataSize(), 0, kChunkSize - chunk->DataSize());
 
--- a/netwerk/cache2/CacheFile.h
+++ b/netwerk/cache2/CacheFile.h
@@ -116,22 +116,32 @@ private:
 
   virtual ~CacheFile();
 
   void     Lock();
   void     Unlock();
   void     AssertOwnsLock() const;
   void     ReleaseOutsideLock(nsISupports *aObject);
 
-  nsresult GetChunk(uint32_t aIndex, bool aWriter,
+  enum ECallerType {
+    READER    = 0,
+    WRITER    = 1,
+    PRELOADER = 2
+  };
+
+  nsresult GetChunk(uint32_t aIndex, ECallerType aCaller,
                     CacheFileChunkListener *aCallback,
                     CacheFileChunk **_retval);
-  nsresult GetChunkLocked(uint32_t aIndex, bool aWriter,
+  nsresult GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
                           CacheFileChunkListener *aCallback,
                           CacheFileChunk **_retval);
+
+  void     PreloadChunks(uint32_t aIndex);
+  bool     ShouldKeepChunk(uint32_t aIndex);
+
   nsresult RemoveChunk(CacheFileChunk *aChunk);
   void     RemoveChunkInternal(CacheFileChunk *aChunk, bool aCacheChunk);
 
   nsresult RemoveInput(CacheFileInputStream *aInput);
   nsresult RemoveOutput(CacheFileOutputStream *aOutput);
   nsresult NotifyChunkListener(CacheFileChunkListener *aCallback,
                                nsIEventTarget *aTarget,
                                nsresult aResult,
@@ -157,30 +167,36 @@ private:
                            const uint32_t& aIdx,
                            nsAutoPtr<mozilla::net::ChunkListeners>& aListeners,
                            void* aClosure);
 
   static PLDHashOperator FailUpdateListeners(const uint32_t& aIdx,
                                              nsRefPtr<CacheFileChunk>& aChunk,
                                              void* aClosure);
 
+  static PLDHashOperator CleanUpPreloadedChunks(
+                           const uint32_t& aIdx,
+                           nsRefPtr<CacheFileChunk>& aChunk,
+                           void* aClosure);
+
   nsresult PadChunkWithZeroes(uint32_t aChunkIdx);
 
   void SetError(nsresult aStatus);
 
   nsresult InitIndexEntry();
 
   mozilla::Mutex mLock;
   bool           mOpeningFile;
   bool           mReady;
   bool           mMemoryOnly;
   bool           mOpenAsMemoryOnly;
   bool           mDataAccessed;
   bool           mDataIsDirty;
   bool           mWritingMetadata;
+  bool           mPreloadWithoutInputStreams;
   nsresult       mStatus;
   int64_t        mDataSize;
   nsCString      mKey;
 
   nsRefPtr<CacheFileHandle>    mHandle;
   nsRefPtr<CacheFileMetadata>  mMetadata;
   nsCOMPtr<CacheFileListener>  mListener;
   nsCOMPtr<CacheFileIOListener>   mDoomAfterOpenListener;
--- a/netwerk/cache2/CacheFileInputStream.cpp
+++ b/netwerk/cache2/CacheFileInputStream.cpp
@@ -538,17 +538,18 @@ CacheFileInputStream::EnsureCorrectChunk
   if (mListeningForChunk == static_cast<int64_t>(chunkIdx)) {
     // We're already waiting for this chunk
     LOG(("CacheFileInputStream::EnsureCorrectChunk() - Already listening for "
          "chunk %lld [this=%p]", mListeningForChunk, this));
 
     return;
   }
 
-  rv = mFile->GetChunkLocked(chunkIdx, false, this, getter_AddRefs(mChunk));
+  rv = mFile->GetChunkLocked(chunkIdx, CacheFile::READER, this,
+                             getter_AddRefs(mChunk));
   if (NS_FAILED(rv)) {
     LOG(("CacheFileInputStream::EnsureCorrectChunk() - GetChunkLocked failed. "
          "[this=%p, idx=%d, rv=0x%08x]", this, chunkIdx, rv));
     if (rv != NS_ERROR_NOT_AVAILABLE) {
       // We store the error in mStatus, so we can propagate it later to consumer
       // in Read(), Available() etc. We need to handle NS_ERROR_NOT_AVAILABLE
       // differently since it is returned when the requested chunk is not
       // available and there is no writer that could create it, i.e. it means
--- a/netwerk/cache2/CacheFileInputStream.h
+++ b/netwerk/cache2/CacheFileInputStream.h
@@ -33,16 +33,18 @@ public:
   NS_IMETHOD OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk);
   NS_IMETHOD OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
                               CacheFileChunk *aChunk);
   NS_IMETHOD OnChunkUpdated(CacheFileChunk *aChunk);
 
   // Memory reporting
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
 
+  uint32_t GetPosition() const { return mPos; };
+
 private:
   virtual ~CacheFileInputStream();
 
   void ReleaseChunk();
   void EnsureCorrectChunk(bool aReleaseOnly);
   void CanRead(int64_t *aCanRead, const char **aBuf);
   void NotifyListener();
   void MaybeNotifyListener();
--- a/netwerk/cache2/CacheFileOutputStream.cpp
+++ b/netwerk/cache2/CacheFileOutputStream.cpp
@@ -340,17 +340,18 @@ CacheFileOutputStream::EnsureCorrectChun
       ReleaseChunk();
     }
   }
 
   if (aReleaseOnly)
     return;
 
   nsresult rv;
-  rv = mFile->GetChunkLocked(chunkIdx, true, nullptr, getter_AddRefs(mChunk));
+  rv = mFile->GetChunkLocked(chunkIdx, CacheFile::WRITER, nullptr,
+                             getter_AddRefs(mChunk));
   if (NS_FAILED(rv)) {
     LOG(("CacheFileOutputStream::EnsureCorrectChunk() - GetChunkLocked failed. "
          "[this=%p, idx=%d, rv=0x%08x]", this, chunkIdx, rv));
     mStatus = rv;
   }
 }
 
 void
--- a/netwerk/cache2/CacheObserver.cpp
+++ b/netwerk/cache2/CacheObserver.cpp
@@ -51,16 +51,19 @@ int32_t CacheObserver::sMemoryCacheCapac
 int32_t CacheObserver::sAutoMemoryCacheCapacity = -1;
 
 static uint32_t const kDefaultDiskCacheCapacity = 250 * 1024; // 250 MB
 uint32_t CacheObserver::sDiskCacheCapacity = kDefaultDiskCacheCapacity;
 
 static bool const kDefaultSmartCacheSizeEnabled = false;
 bool CacheObserver::sSmartCacheSizeEnabled = kDefaultSmartCacheSizeEnabled;
 
+static uint32_t const kDefaultPreloadChunkCount = 4;
+uint32_t CacheObserver::sPreloadChunkCount = kDefaultPreloadChunkCount;
+
 static uint32_t const kDefaultMaxMemoryEntrySize = 4 * 1024; // 4 MB
 uint32_t CacheObserver::sMaxMemoryEntrySize = kDefaultMaxMemoryEntrySize;
 
 static uint32_t const kDefaultMaxDiskEntrySize = 50 * 1024; // 50 MB
 uint32_t CacheObserver::sMaxDiskEntrySize = kDefaultMaxDiskEntrySize;
 
 static uint32_t const kDefaultCompressionLevel = 1;
 uint32_t CacheObserver::sCompressionLevel = kDefaultCompressionLevel;
@@ -137,16 +140,19 @@ CacheObserver::AttachToPreferences()
   mozilla::Preferences::AddUintVarCache(
     &sDiskCacheCapacity, "browser.cache.disk.capacity", kDefaultDiskCacheCapacity);
   mozilla::Preferences::AddBoolVarCache(
     &sSmartCacheSizeEnabled, "browser.cache.disk.smart_size.enabled", kDefaultSmartCacheSizeEnabled);
   mozilla::Preferences::AddIntVarCache(
     &sMemoryCacheCapacity, "browser.cache.memory.capacity", kDefaultMemoryCacheCapacity);
 
   mozilla::Preferences::AddUintVarCache(
+    &sPreloadChunkCount, "browser.cache.disk.preload_chunk_count", kDefaultPreloadChunkCount);
+
+  mozilla::Preferences::AddUintVarCache(
     &sMaxDiskEntrySize, "browser.cache.disk.max_entry_size", kDefaultMaxDiskEntrySize);
   mozilla::Preferences::AddUintVarCache(
     &sMaxMemoryEntrySize, "browser.cache.memory.max_entry_size", kDefaultMaxMemoryEntrySize);
 
   // http://mxr.mozilla.org/mozilla-central/source/netwerk/cache/nsCacheEntryDescriptor.cpp#367
   mozilla::Preferences::AddUintVarCache(
     &sCompressionLevel, "browser.cache.compression_level", kDefaultCompressionLevel);
 
--- a/netwerk/cache2/CacheObserver.h
+++ b/netwerk/cache2/CacheObserver.h
@@ -35,16 +35,18 @@ class CacheObserver : public nsIObserver
   static uint32_t const MetadataMemoryLimit() // result in bytes.
     { return sMetadataMemoryLimit << 10; }
   static uint32_t const MemoryCacheCapacity(); // result in bytes.
   static uint32_t const DiskCacheCapacity() // result in bytes.
     { return sDiskCacheCapacity << 10; }
   static void SetDiskCacheCapacity(uint32_t); // parameter in bytes.
   static bool const SmartCacheSizeEnabled()
     { return sSmartCacheSizeEnabled; }
+  static uint32_t const PreloadChunkCount()
+    { return sPreloadChunkCount; }
   static uint32_t const MaxMemoryEntrySize() // result in bytes.
     { return sMaxMemoryEntrySize << 10; }
   static uint32_t const MaxDiskEntrySize() // result in bytes.
     { return sMaxDiskEntrySize << 10; }
   static uint32_t const CompressionLevel()
     { return sCompressionLevel; }
   static uint32_t const HalfLifeSeconds()
     { return sHalfLifeHours * 60 * 60; }
@@ -66,16 +68,17 @@ private:
   static uint32_t sUseNewCache;
   static bool sUseMemoryCache;
   static bool sUseDiskCache;
   static uint32_t sMetadataMemoryLimit;
   static int32_t sMemoryCacheCapacity;
   static int32_t sAutoMemoryCacheCapacity;
   static uint32_t sDiskCacheCapacity;
   static bool sSmartCacheSizeEnabled;
+  static uint32_t sPreloadChunkCount;
   static uint32_t sMaxMemoryEntrySize;
   static uint32_t sMaxDiskEntrySize;
   static uint32_t sCompressionLevel;
   static uint32_t sHalfLifeHours;
   static int32_t sHalfLifeExperiment;
   static bool sSanitizeOnShutdown;
   static bool sClearCacheOnShutdown;