Bug 1066726 - Concurrent HTTP cache read and write issues. r=michal, r=jduell, a=sledru
authorHonza Bambas <honzab.moz@firemni.cz>
Tue, 16 Sep 2014 15:51:50 +0200
changeset 216781 8a1cffa4c130
parent 216780 6c39ccb686a5
child 216782 1e3320340bd2
push id3910
push userryanvm@gmail.com
push date2014-09-18 14:48 +0000
treeherdermozilla-beta@af1dbe183e3d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmichal, jduell, sledru
bugs1066726
milestone33.0
Bug 1066726 - Concurrent HTTP cache read and write issues. r=michal, r=jduell, a=sledru
netwerk/cache2/CacheFile.cpp
netwerk/protocol/http/nsHttpChannel.cpp
netwerk/protocol/http/nsHttpChannel.h
netwerk/test/unit/test_cache2-28-concurrent_read_resumable_entry_size_zero.js
netwerk/test/unit/test_cache2-29-concurrent_read_non-resumable_entry_size_zero.js
netwerk/test/unit/test_partial_response_entry_size_smart_shrink.js
netwerk/test/unit/xpcshell.ini
--- a/netwerk/cache2/CacheFile.cpp
+++ b/netwerk/cache2/CacheFile.cpp
@@ -673,16 +673,29 @@ CacheFile::OpenInputStream(nsIInputStrea
 
   if (!mReady) {
     LOG(("CacheFile::OpenInputStream() - CacheFile is not ready [this=%p]",
          this));
 
     return NS_ERROR_NOT_AVAILABLE;
   }
 
+  if (NS_FAILED(mStatus)) {
+    LOG(("CacheFile::OpenInputStream() - CacheFile is in a failure state "
+         "[this=%p, status=0x%08x]", this, mStatus));
+
+    // Don't allow opening the input stream when this CacheFile is in
+    // a failed state.  This is the only way to protect consumers correctly
+    // from reading a broken entry.  When the file is in the failed state,
+    // it's also doomed, so reopening the entry won't make any difference -
+    // data will still be inaccessible anymore.  Note that for just doomed 
+    // files, we must allow reading the data.
+    return mStatus;
+  }
+
   // Once we open input stream we no longer allow preloading of chunks without
   // input stream, i.e. we will no longer keep first few chunks preloaded when
   // the last input stream is closed.
   mPreloadWithoutInputStreams = false;
 
   CacheFileInputStream *input = new CacheFileInputStream(this);
 
   LOG(("CacheFile::OpenInputStream() - Creating new input stream %p [this=%p]",
@@ -1560,16 +1573,23 @@ CacheFile::RemoveOutput(CacheFileOutputS
   mOutput = nullptr;
 
   // Cancel all queued chunk and update listeners that cannot be satisfied
   NotifyListenersAboutOutputRemoval();
 
   if (!mMemoryOnly)
     WriteMetadataIfNeededLocked();
 
+  // Make sure the CacheFile status is set to a failure when the output stream
+  // is closed with a fatal error.  This way we propagate correctly and w/o any
+  // windows the failure state of this entry to end consumers.
+  if (NS_SUCCEEDED(mStatus) && NS_FAILED(aStatus) && aStatus != NS_BASE_STREAM_CLOSED) {
+    mStatus = aStatus;
+  }
+
   // Notify close listener as the last action
   aOutput->NotifyCloseListener();
 
   Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS,
                         StatusToTelemetryEnum(aStatus));
 
   return NS_OK;
 }
--- a/netwerk/protocol/http/nsHttpChannel.cpp
+++ b/netwerk/protocol/http/nsHttpChannel.cpp
@@ -898,23 +898,23 @@ nsHttpChannel::CallOnStartRequest()
             }
         }
     }
 
     if (mResponseHead && mResponseHead->ContentCharset().IsEmpty())
         mResponseHead->SetContentCharset(mContentCharsetHint);
 
     if (mResponseHead && mCacheEntry) {
-        // If we have a cache entry, set its predicted size to ContentLength to
+        // If we have a cache entry, set its predicted size to TotalEntitySize to
         // avoid caching an entry that will exceed the max size limit.
         rv = mCacheEntry->SetPredictedDataSize(
-            mResponseHead->ContentLength());
+            mResponseHead->TotalEntitySize());
         if (NS_ERROR_FILE_TOO_BIG == rv) {
-          mCacheEntry = nullptr;
-          LOG(("  entry too big, throwing away"));
+          // Don't throw the entry away, we will need it later.
+          LOG(("  entry too big"));
         } else {
           NS_ENSURE_SUCCESS(rv, rv);
         }
     }
 
     LOG(("  calling mListener->OnStartRequest\n"));
     if (mListener) {
         rv = mListener->OnStartRequest(this, mListenerContext);
@@ -2054,22 +2054,23 @@ nsHttpChannel::IsResumable(int64_t parti
            (partialLen > 0 || ignoreMissingPartialLen) &&
            !hasContentEncoding &&
            mCachedResponseHead->IsResumable() &&
            !mCustomConditionalRequest &&
            !mCachedResponseHead->NoStore();
 }
 
 nsresult
-nsHttpChannel::MaybeSetupByteRangeRequest(int64_t partialLen, int64_t contentLength)
+nsHttpChannel::MaybeSetupByteRangeRequest(int64_t partialLen, int64_t contentLength,
+                                          bool ignoreMissingPartialLen)
 {
     // Be pesimistic
     mIsPartialRequest = false;
 
-    if (!IsResumable(partialLen, contentLength))
+    if (!IsResumable(partialLen, contentLength, ignoreMissingPartialLen))
       return NS_ERROR_NOT_RESUMABLE;
 
     // looks like a partial entry we can reuse; add If-Range
     // and Range headers.
     nsresult rv = SetupByteRangeRequest(partialLen);
     if (NS_FAILED(rv)) {
         // Make the request unconditional again.
         mRequestHead.ClearHeader(nsHttp::Range);
@@ -5131,17 +5132,19 @@ nsHttpChannel::OnStopRequest(nsIRequest 
                 // mayhemer TODO - we have to restart read from cache here at the size offset
                 MOZ_ASSERT(false);
                 LOG(("  cache entry write is still in progress, but we just "
                      "finished reading the cache entry"));
             }
             else if (contentLength != int64_t(-1) && contentLength != size) {
                 LOG(("  concurrent cache entry write has been interrupted"));
                 mCachedResponseHead = Move(mResponseHead);
-                rv = MaybeSetupByteRangeRequest(size, contentLength);
+                // Ignore zero partial length because we also want to resume when
+                // no data at all has been read from the cache.
+                rv = MaybeSetupByteRangeRequest(size, contentLength, true);
                 if (NS_SUCCEEDED(rv) && mIsPartialRequest) {
                     // Prevent read from cache again
                     mCachedContentIsValid = 0;
                     mCachedContentIsPartial = 1;
 
                     // Perform the range request
                     rv = ContinueConnect();
                     if (NS_SUCCEEDED(rv)) {
--- a/netwerk/protocol/http/nsHttpChannel.h
+++ b/netwerk/protocol/http/nsHttpChannel.h
@@ -321,17 +321,18 @@ private:
     // and ensure the transaction is updated to use it.
     void UpdateAggregateCallbacks();
 
     static bool HasQueryString(nsHttpRequestHead::ParsedMethodType method, nsIURI * uri);
     bool ResponseWouldVary(nsICacheEntry* entry) const;
     bool MustValidateBasedOnQueryUrl() const;
     bool IsResumable(int64_t partialLen, int64_t contentLength,
                      bool ignoreMissingPartialLen = false) const;
-    nsresult MaybeSetupByteRangeRequest(int64_t partialLen, int64_t contentLength);
+    nsresult MaybeSetupByteRangeRequest(int64_t partialLen, int64_t contentLength,
+                                        bool ignoreMissingPartialLen = false);
     nsresult SetupByteRangeRequest(int64_t partialLen);
     nsresult OpenCacheInputStream(nsICacheEntry* cacheEntry, bool startBuffering,
                                   bool checkingAppCacheEntry);
 
 private:
     nsCOMPtr<nsISupports>             mSecurityInfo;
     nsCOMPtr<nsICancelable>           mProxyRequest;
 
new file mode 100644
--- /dev/null
+++ b/netwerk/test/unit/test_cache2-28-concurrent_read_resumable_entry_size_zero.js
@@ -0,0 +1,72 @@
+/*
+
+Checkes if the concurrent cache read/write works when the write is interrupted because of max-entry-size limits
+This test is using a resumable response.
+- with a profile, set max-entry-size to 0
+- first channel makes a request for a resumable response
+- second channel makes a request for the same resource, concurrent read happens
+- first channel sets predicted data size on the entry, it's doomed
+- second channel now must engage interrupted concurrent write algorithm and read the content again from the network
+- both channels must deliver full content w/o errors
+
+*/
+
+Cu.import("resource://testing-common/httpd.js");
+Cu.import("resource://gre/modules/Services.jsm");
+
+XPCOMUtils.defineLazyGetter(this, "URL", function() {
+  return "http://localhost:" + httpServer.identity.primaryPort;
+});
+
+var httpServer = null;
+
+function make_channel(url, callback, ctx) {
+  var ios = Cc["@mozilla.org/network/io-service;1"].
+            getService(Ci.nsIIOService);
+  return ios.newChannel(url, "", null);
+}
+
+const responseBody = "response body";
+
+function contentHandler(metadata, response)
+{
+  response.setHeader("Content-Type", "text/plain");
+  response.setHeader("ETag", "Just testing");
+  response.setHeader("Cache-Control", "max-age=99999");
+  response.setHeader("Accept-Ranges", "bytes");
+  response.setHeader("Content-Length", "" + responseBody.length);
+  if (metadata.hasHeader("If-Range")) {
+	  response.setStatusLine(metadata.httpVersion, 206, "Partial Content");
+	  response.setHeader("Content-Range", "0-12/13");
+  }
+  response.bodyOutputStream.write(responseBody, responseBody.length);
+}
+
+function run_test()
+{
+  do_get_profile();
+
+  Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 0);
+
+  httpServer = new HttpServer();
+  httpServer.registerPathHandler("/content", contentHandler);
+  httpServer.start(-1);
+
+  var chan1 = make_channel(URL + "/content");
+  chan1.asyncOpen(new ChannelListener(firstTimeThrough, null), null);
+  var chan2 = make_channel(URL + "/content");
+  chan2.asyncOpen(new ChannelListener(secondTimeThrough, null), null);
+
+  do_test_pending();
+}
+
+function firstTimeThrough(request, buffer)
+{
+  do_check_eq(buffer, responseBody);
+}
+
+function secondTimeThrough(request, buffer)
+{
+  do_check_eq(buffer, responseBody);
+  httpServer.stop(do_test_finished);
+}
new file mode 100644
--- /dev/null
+++ b/netwerk/test/unit/test_cache2-29-concurrent_read_non-resumable_entry_size_zero.js
@@ -0,0 +1,72 @@
+/*
+
+Checkes if the concurrent cache read/write works when the write is interrupted because of max-entry-size limits.
+This test is using a non-resumable response.
+- with a profile, set max-entry-size to 0
+- first channel makes a request for a non-resumable (chunked) response
+- second channel makes a request for the same resource, concurrent read is bypassed (non-resumable response)
+- first channel writes first bytes to the cache output stream, but that fails because of the max-entry-size limit and entry is doomed
+- cache entry output stream is closed
+- second channel gets the entry, opening the input stream must fail
+- second channel must read the content again from the network
+- both channels must deliver full content w/o errors
+
+*/
+
+Cu.import("resource://testing-common/httpd.js");
+Cu.import("resource://gre/modules/Services.jsm");
+
+XPCOMUtils.defineLazyGetter(this, "URL", function() {
+  return "http://localhost:" + httpServer.identity.primaryPort;
+});
+
+var httpServer = null;
+
+function make_channel(url, callback, ctx) {
+  var ios = Cc["@mozilla.org/network/io-service;1"].
+            getService(Ci.nsIIOService);
+  return ios.newChannel(url, "", null);
+}
+
+const responseBody = "c\r\ndata reached\r\n3\r\nhej\r\n0\r\n\r\n";
+const responseBodyDecoded = "data reachedhej";
+
+function contentHandler(metadata, response)
+{
+  response.seizePower();
+  response.write("HTTP/1.1 200 OK\r\n");
+  response.write("Content-Type: text/plain\r\n");
+  response.write("Transfer-Encoding: chunked\r\n");
+  response.write("\r\n");
+  response.write(responseBody);
+  response.finish();
+}
+
+function run_test()
+{
+  do_get_profile();
+
+  Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 0);
+
+  httpServer = new HttpServer();
+  httpServer.registerPathHandler("/content", contentHandler);
+  httpServer.start(-1);
+
+  var chan1 = make_channel(URL + "/content");
+  chan1.asyncOpen(new ChannelListener(firstTimeThrough, null, CL_ALLOW_UNKNOWN_CL), null);
+  var chan2 = make_channel(URL + "/content");
+  chan2.asyncOpen(new ChannelListener(secondTimeThrough, null, CL_ALLOW_UNKNOWN_CL), null);
+
+  do_test_pending();
+}
+
+function firstTimeThrough(request, buffer)
+{
+  do_check_eq(buffer, responseBodyDecoded);
+}
+
+function secondTimeThrough(request, buffer)
+{
+  do_check_eq(buffer, responseBodyDecoded);
+  httpServer.stop(do_test_finished);
+}
new file mode 100644
--- /dev/null
+++ b/netwerk/test/unit/test_partial_response_entry_size_smart_shrink.js
@@ -0,0 +1,79 @@
+/*
+
+  This is only a crash test.  We load a partial content, cache it.  Then we change the limit
+  for single cache entry size (shrink it) so that the next request for the rest of the content
+  will hit that limit and doom/remove the entry.  We change the size manually, but in reality
+  it's being changed by cache smart size.
+
+*/
+
+Cu.import("resource://testing-common/httpd.js");
+Cu.import("resource://gre/modules/Services.jsm");
+
+XPCOMUtils.defineLazyGetter(this, "URL", function() {
+  return "http://localhost:" + httpServer.identity.primaryPort;
+});
+
+var httpServer = null;
+
+function make_channel(url, callback, ctx) {
+  var ios = Cc["@mozilla.org/network/io-service;1"].
+            getService(Ci.nsIIOService);
+  return ios.newChannel(url, "", null);
+}
+
+// Have 2kb response (8 * 2 ^ 8)
+var responseBody = "response";
+for (var i = 0; i < 8; ++i) responseBody += responseBody;
+
+function contentHandler(metadata, response)
+{
+  response.setHeader("Content-Type", "text/plain", false);
+  response.setHeader("ETag", "range");
+  response.setHeader("Accept-Ranges", "bytes");
+  response.setHeader("Cache-Control", "max-age=360000");
+
+  if (!metadata.hasHeader("If-Range")) {
+    response.setHeader("Content-Length", responseBody.length + "");
+    response.processAsync();
+    var slice = responseBody.slice(0, 100);
+    response.bodyOutputStream.write(slice, slice.length);
+    response.finish();
+  } else {
+    var slice = responseBody.slice(100);
+    response.setStatusLine(metadata.httpVersion, 206, "Partial Content");
+    response.setHeader("Content-Range",
+      (responseBody.length - slice.length).toString() + "-" +
+      (responseBody.length - 1).toString() + "/" +
+      (responseBody.length).toString());
+
+    response.setHeader("Content-Length", slice.length + "");
+    response.bodyOutputStream.write(slice, slice.length);
+  }
+}
+
+function run_test()
+{
+  httpServer = new HttpServer();
+  httpServer.registerPathHandler("/content", contentHandler);
+  httpServer.start(-1);
+
+  var chan = make_channel(URL + "/content");
+  chan.asyncOpen(new ChannelListener(firstTimeThrough, null, CL_EXPECT_LATE_FAILURE), null);
+  do_test_pending();
+}
+
+function firstTimeThrough(request, buffer)
+{
+  // Change single cache entry limit to 1 kb.  This emulates smart size change.
+  Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 1);
+
+  var chan = make_channel(URL + "/content");
+  chan.asyncOpen(new ChannelListener(finish_test, null), null);
+}
+
+function finish_test(request, buffer)
+{
+  do_check_eq(buffer, responseBody);
+  httpServer.stop(do_test_finished);
+}
--- a/netwerk/test/unit/xpcshell.ini
+++ b/netwerk/test/unit/xpcshell.ini
@@ -64,16 +64,19 @@ skip-if = os == "android"
 [test_cache2-23-read-over-chunk.js]
 [test_cache2-24-exists.js]
 # Bug 675039, comment 6: "The difference is that the memory cache is disabled in Armv6 builds."
 skip-if = os == "android"
 [test_cache2-25-chunk-memory-limit.js]
 [test_cache2-26-no-outputstream-open.js]
 # GC, that this patch is depenedent on, doesn't work well on Android."
 skip-if = os == "android"
+[test_cache2-28-concurrent_read_resumable_entry_size_zero.js]
+[test_cache2-29-concurrent_read_non-resumable_entry_size_zero.js]
+[test_partial_response_entry_size_smart_shrink.js]
 [test_304_responses.js]
 # Bug 675039: test hangs on Android-armv6 
 skip-if = os == "android"
 [test_cacheForOfflineUse_no-store.js]
 [test_307_redirect.js]
 [test_NetUtil.js]
 [test_URIs.js]
 [test_aboutblank.js]