Bug 1274818 - Early return from nsHttpChannel::CallOnStartRequest on its second call, r=dragana
authorDragana Damjanovic <dd.mozilla@gmail.com>
Fri, 15 Jul 2016 12:14:00 +0200
changeset 330779 c0503b9ac96a6f83d94c1e9b922d6e783586a51c
parent 330778 cb72efabe34e4397e3c413fa495860b203f1f938
child 330780 61f5cb9b36df99327afc958e6a34b0c8ac876e48
push id9858
push userjlund@mozilla.com
push dateMon, 01 Aug 2016 14:37:10 +0000
treeherdermozilla-aurora@203106ef6cb6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdragana
bugs1274818
milestone50.0a1
Bug 1274818 - Early return from nsHttpChannel::CallOnStartRequest on its second call, r=dragana
netwerk/protocol/http/nsHttpChannel.cpp
netwerk/protocol/http/nsHttpChannel.h
netwerk/test/unit/test_cache2-29c-concurrent_read_half-interrupted.js
netwerk/test/unit/test_cache2-29d-concurrent_read_half-corrupted-206.js
netwerk/test/unit/test_cache2-29e-non-206-response.js
netwerk/test/unit/xpcshell.ini
--- a/netwerk/protocol/http/nsHttpChannel.cpp
+++ b/netwerk/protocol/http/nsHttpChannel.cpp
@@ -250,17 +250,17 @@ nsHttpChannel::nsHttpChannel()
     , mCustomConditionalRequest(false)
     , mFallingBack(false)
     , mWaitingForRedirectCallback(false)
     , mRequestTimeInitialized(false)
     , mCacheEntryIsReadOnly(false)
     , mCacheEntryIsWriteOnly(false)
     , mCacheEntriesToWaitFor(0)
     , mHasQueryString(0)
-    , mConcurentCacheAccess(0)
+    , mConcurrentCacheAccess(0)
     , mIsPartialRequest(0)
     , mHasAutoRedirectVetoNotifier(0)
     , mPinCacheContent(0)
     , mIsPackagedAppResource(0)
     , mIsCorsPreflightDone(0)
     , mStronglyFramed(false)
     , mPushedStream(nullptr)
     , mLocalBlocklist(false)
@@ -940,16 +940,30 @@ nsHttpChannel::CallOnStartRequest()
     MOZ_RELEASE_ASSERT(!(mRequireCORSPreflight &&
                          mInterceptCache != INTERCEPTED) ||
                        mIsCorsPreflightDone,
                        "CORS preflight must have been finished by the time we "
                        "call OnStartRequest");
 
     nsresult rv;
 
+    if (mOnStartRequestCalled) {
+        // This can only happen when a range request loading rest of the data
+        // after interrupted concurrent cache read asynchronously failed, e.g.
+        // the response range bytes are not as expected or this channel has
+        // been externally canceled.
+        //
+        // It's legal to bypass CallOnStartRequest for that case since we've
+        // already called OnStartRequest on our listener and also added all
+        // content converters before.
+        MOZ_ASSERT(mConcurrentCacheAccess);
+        LOG(("CallOnStartRequest already invoked before"));
+        return mStatus;
+    }
+
     mTracingEnabled = false;
 
     // Allow consumers to override our content type
     if (mLoadFlags & LOAD_CALL_CONTENT_SNIFFERS) {
         // NOTE: We can have both a txn pump and a cache pump when the cache
         // content is partial. In that case, we need to read from the cache,
         // because that's the one that has the initial contents. If that fails
         // then give the transaction pump a shot.
@@ -1031,16 +1045,17 @@ nsHttpChannel::CallOnStartRequest()
         MOZ_ASSERT(!mOnStartRequestCalled,
                    "We should not call OsStartRequest twice");
         rv = mListener->OnStartRequest(this, mListenerContext);
         mOnStartRequestCalled = true;
         if (NS_FAILED(rv))
             return rv;
     } else {
         NS_WARNING("OnStartRequest skipped because of null listener");
+        mOnStartRequestCalled = true;
     }
 
     // Install stream converter if required.
     // If we use unknownDecoder, stream converters will be installed later (in
     // nsUnknownDecoder) after OnStartRequest is called for the real listener.
     if (!unknownDecoderStarted) {
       nsCOMPtr<nsIStreamListener> listener;
       nsISupports *ctxt = mListenerContext;
@@ -1060,17 +1075,17 @@ nsHttpChannel::CallOnStartRequest()
 
     // if this channel is for a download, close off access to the cache.
     if (mCacheEntry && mChannelIsForDownload) {
         mCacheEntry->AsyncDoom(nullptr);
 
         // We must keep the cache entry in case of partial request.
         // Concurrent access is the same, we need the entry in
         // OnStopRequest.
-        if (!mCachedContentIsPartial && !mConcurentCacheAccess)
+        if (!mCachedContentIsPartial && !mConcurrentCacheAccess)
             CloseCacheEntry(false);
     }
 
     if (!mCanceled) {
         // create offline cache entry if offline caching was requested
         if (ShouldUpdateOfflineCacheEntry()) {
             LOG(("writing to the offline cache"));
             rv = InitOfflineCacheEntry();
@@ -1704,16 +1719,22 @@ nsHttpChannel::ProcessResponse()
         if (NS_SUCCEEDED(mResponseHead->GetHeader(nsHttp::Set_Cookie, cookie))) {
             SetCookie(cookie.get());
         }
         if ((httpStatus < 500) && (httpStatus != 421)) {
             ProcessAltService();
         }
     }
 
+    if (mConcurrentCacheAccess && mCachedContentIsPartial && httpStatus != 206) {
+        LOG(("  only expecting 206 when doing partial request during "
+             "interrupted cache concurrent read"));
+        return NS_ERROR_CORRUPTED_CONTENT;
+    }
+
     // handle unused username and password in url (see bug 232567)
     if (httpStatus != 401 && httpStatus != 407) {
         if (!mAuthRetryPending)
             mAuthProvider->CheckForSuperfluousAuth();
         if (mCanceled)
             return CallOnStartRequest();
 
         // reset the authentication's current continuation state because our
@@ -2732,17 +2753,17 @@ nsHttpChannel::ProcessPartialContent()
              "206 has different total entity size than the content length "
              "of the original partially cached entity.\n", this));
 
         mCacheEntry->AsyncDoom(nullptr);
         Cancel(NS_ERROR_CORRUPTED_CONTENT);
         return CallOnStartRequest();
     }
 
-    if (mConcurentCacheAccess) {
+    if (mConcurrentCacheAccess) {
         // We started to read cached data sooner than its write has been done.
         // But the concurrent write has not finished completely, so we had to
         // do a range request.  Now let the content coming from the network
         // be presented to consumers and also stored to the cache entry.
 
         rv = InstallCacheListener(mLogicalOffset);
         if (NS_FAILED(rv)) return rv;
 
@@ -2773,19 +2794,23 @@ nsHttpChannel::ProcessPartialContent()
 
     rv = UpdateExpirationTime();
     if (NS_FAILED(rv)) return rv;
 
     // notify observers interested in looking at a response that has been
     // merged with any cached headers (http-on-examine-merged-response).
     gHttpHandler->OnExamineMergedResponse(this);
 
-    if (mConcurentCacheAccess) {
+    if (mConcurrentCacheAccess) {
         mCachedContentIsPartial = false;
-        mConcurentCacheAccess = 0;
+        // Leave the mConcurrentCacheAccess flag set, we want to use it
+        // to prevent duplicate OnStartRequest call on the target listener
+        // in case this channel is canceled before it gets its OnStartRequest
+        // from the http transaction.
+
         // Now we continue reading the network response.
     } else {
         // the cached content is valid, although incomplete.
         mCachedContentIsValid = true;
         rv = ReadFromCache(false);
     }
 
     return rv;
@@ -3083,17 +3108,17 @@ IsSubRangeRequest(nsHttpRequestHead &aRe
 
 nsresult
 nsHttpChannel::OpenCacheEntry(bool isHttps)
 {
     // Handle correctly mCacheEntriesToWaitFor
     AutoCacheWaitFlags waitFlags(this);
 
     // Drop this flag here
-    mConcurentCacheAccess = 0;
+    mConcurrentCacheAccess = 0;
 
     nsresult rv;
 
     mLoadedFromApplicationCache = false;
     mHasQueryString = HasQueryString(mRequestHead.ParsedMethod(), mURI);
 
     LOG(("nsHttpChannel::OpenCacheEntry [this=%p]", this));
 
@@ -3487,17 +3512,17 @@ nsHttpChannel::OnCacheEntryCheck(nsICach
             // Ignore !(size > 0) from the resumability condition
             if (!IsResumable(size, contentLength, true)) {
                 LOG(("  wait for entry completion, "
                      "response is not resumable"));
 
                 wantCompleteEntry = true;
             }
             else {
-                mConcurentCacheAccess = 1;
+                mConcurrentCacheAccess = 1;
             }
         }
         else if (contentLength != int64_t(-1) && contentLength != size) {
             LOG(("Cached data size does not match the Content-Length header "
                  "[content-length=%lld size=%lld]\n", contentLength, size));
 
             rv = MaybeSetupByteRangeRequest(size, contentLength);
             mCachedContentIsPartial = NS_SUCCEEDED(rv) && mIsPartialRequest;
@@ -3735,26 +3760,26 @@ nsHttpChannel::OnCacheEntryCheck(nsICach
         // the cached content must not be weakly framed or marked immutable
         //
         // do not override conditional headers when consumer has defined its own
         if (!mCachedResponseHead->NoStore() &&
             (mRequestHead.IsGet() || mRequestHead.IsHead()) &&
             !mCustomConditionalRequest && !weaklyFramed && !isImmutable &&
             (mCachedResponseHead->Status() < 400)) {
 
-            if (mConcurentCacheAccess) {
+            if (mConcurrentCacheAccess) {
                 // In case of concurrent read and also validation request we
                 // must wait for the current writer to close the output stream
                 // first.  Otherwise, when the writer's job would have been interrupted
                 // before all the data were downloaded, we'd have to do a range request
                 // which would be a second request in line during this channel's
                 // life-time.  nsHttpChannel is not designed to do that, so rather
                 // turn off concurrent read and wait for entry's completion.
                 // Then only re-validation or range-re-validation request will go out.
-                mConcurentCacheAccess = 0;
+                mConcurrentCacheAccess = 0;
                 // This will cause that OnCacheEntryCheck is called again with the same
                 // entry after the writer is done.
                 wantCompleteEntry = true;
             } else {
                 nsAutoCString val;
                 // Add If-Modified-Since header if a Last-Modified was given
                 // and we are allowed to do this (see bugs 510359 and 269303)
                 if (canAddImsHeader) {
@@ -4524,17 +4549,17 @@ nsHttpChannel::InitCacheEntry()
     mCacheEntry->SetMetaDataElement("strongly-framed", "0");
 
     rv = AddCacheEntryHeaders(mCacheEntry);
     if (NS_FAILED(rv)) return rv;
 
     mInitedCacheEntry = true;
 
     // Don't perform the check when writing (doesn't make sense)
-    mConcurentCacheAccess = 0;
+    mConcurrentCacheAccess = 0;
 
     return NS_OK;
 }
 
 void
 nsHttpChannel::UpdateInhibitPersistentCachingFlag()
 {
     // The no-store directive within the 'Cache-Control:' header indicates
@@ -6238,17 +6263,17 @@ nsHttpChannel::OnStopRequest(nsIRequest 
             if (request == mCachePump) {
                 bool streamDone;
                 status = OnDoneReadingPartialCacheEntry(&streamDone);
                 if (NS_SUCCEEDED(status) && !streamDone)
                     return status;
                 // otherwise, fall through and fire OnStopRequest...
             }
             else if (request == mTransactionPump) {
-                MOZ_ASSERT(mConcurentCacheAccess);
+                MOZ_ASSERT(mConcurrentCacheAccess);
             }
             else
                 NS_NOTREACHED("unexpected request");
         }
         // Do not to leave the transaction in a suspended state in error cases.
         if (NS_FAILED(status) && mTransaction)
             gHttpHandler->CancelTransaction(mTransaction, status);
     }
@@ -6341,17 +6366,17 @@ nsHttpChannel::OnStopRequest(nsIRequest 
             mResponseHead && mResponseHead->Status() == 101) {
             gHttpHandler->ConnMgr()->CompleteUpgrade(stickyConn,
                                                      mUpgradeProtocolCallback);
         }
     }
 
     // if needed, check cache entry has all data we expect
     if (mCacheEntry && mCachePump &&
-        mConcurentCacheAccess && contentComplete) {
+        mConcurrentCacheAccess && contentComplete) {
         int64_t size, contentLength;
         nsresult rv = CheckPartial(mCacheEntry, &size, &contentLength);
         if (NS_SUCCEEDED(rv)) {
             if (size == int64_t(-1)) {
                 // mayhemer TODO - we have to restart read from cache here at the size offset
                 MOZ_ASSERT(false);
                 LOG(("  cache entry write is still in progress, but we just "
                      "finished reading the cache entry"));
--- a/netwerk/protocol/http/nsHttpChannel.h
+++ b/netwerk/protocol/http/nsHttpChannel.h
@@ -522,17 +522,17 @@ private:
     uint32_t                          mCacheEntryIsWriteOnly : 1;
     // see WAIT_FOR_* constants above
     uint32_t                          mCacheEntriesToWaitFor : 2;
     uint32_t                          mHasQueryString : 1;
     // whether cache entry data write was in progress during cache entry check
     // when true, after we finish read from cache we must check all data
     // had been loaded from cache. If not, then an error has to be propagated
     // to the consumer.
-    uint32_t                          mConcurentCacheAccess : 1;
+    uint32_t                          mConcurrentCacheAccess : 1;
     // whether the request is setup be byte-range
     uint32_t                          mIsPartialRequest : 1;
     // true iff there is AutoRedirectVetoNotifier on the stack
     uint32_t                          mHasAutoRedirectVetoNotifier : 1;
     // consumers set this to true to use cache pinning, this has effect
     // only when the channel is in an app context (load context has an appid)
     uint32_t                          mPinCacheContent : 1;
     // Whether fetching the content is meant to be handled by the
new file mode 100644
--- /dev/null
+++ b/netwerk/test/unit/test_cache2-29c-concurrent_read_half-interrupted.js
@@ -0,0 +1,91 @@
+/*
+
+Checkes if the concurrent cache read/write works when the write is interrupted because of max-entry-size limits.
+This is enhancement of 29a test, this test checks that cocurrency is resumed when the first channel is interrupted
+in the middle of reading and the second channel already consumed some content from the cache entry.
+This test is using a resumable response.
+- with a profile, set max-entry-size to 1 (=1024 bytes)
+- first channel makes a request for a resumable response
+- second channel makes a request for the same resource, concurrent read happens
+- first channel sets predicted data size on the entry with every chunk, it's doomed on 1024
+- second channel now must engage interrupted concurrent write algorithm and read the rest of the content from the network
+- both channels must deliver full content w/o errors
+
+*/
+
+Cu.import("resource://testing-common/httpd.js");
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://gre/modules/NetUtil.jsm");
+
+
+XPCOMUtils.defineLazyGetter(this, "URL", function() {
+  return "http://localhost:" + httpServer.identity.primaryPort;
+});
+
+var httpServer = null;
+
+function make_channel(url, callback, ctx) {
+  return NetUtil.newChannel({uri: url, loadUsingSystemPrincipal: true});
+}
+
+// need something bigger than 1024 bytes
+const responseBody =
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
+
+function contentHandler(metadata, response)
+{
+  response.setHeader("Content-Type", "text/plain");
+  response.setHeader("ETag", "Just testing");
+  response.setHeader("Cache-Control", "max-age=99999");
+  response.setHeader("Accept-Ranges", "bytes");
+  response.setHeader("Content-Length", "" + responseBody.length);
+  if (metadata.hasHeader("If-Range")) {
+	  response.setStatusLine(metadata.httpVersion, 206, "Partial Content");
+
+    let len = responseBody.length;
+	  response.setHeader("Content-Range", "0-" + (len - 1) + "/" + len);
+  }
+  response.bodyOutputStream.write(responseBody, responseBody.length);
+}
+
+function run_test()
+{
+  // Static check
+  do_check_true(responseBody.length > 1024);
+
+  do_get_profile();
+
+  Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 1);
+
+  httpServer = new HttpServer();
+  httpServer.registerPathHandler("/content", contentHandler);
+  httpServer.start(-1);
+
+  var chan1 = make_channel(URL + "/content");
+  chan1.asyncOpen2(new ChannelListener(firstTimeThrough, null));
+  var chan2 = make_channel(URL + "/content");
+  chan2.asyncOpen2(new ChannelListener(secondTimeThrough, null));
+
+  do_test_pending();
+}
+
+function firstTimeThrough(request, buffer)
+{
+  do_check_eq(buffer, responseBody);
+}
+
+function secondTimeThrough(request, buffer)
+{
+  do_check_eq(buffer, responseBody);
+  httpServer.stop(do_test_finished);
+}
new file mode 100644
--- /dev/null
+++ b/netwerk/test/unit/test_cache2-29d-concurrent_read_half-corrupted-206.js
@@ -0,0 +1,90 @@
+/*
+
+Checkes if the concurrent cache read/write works when the write is interrupted because of max-entry-size limits.
+This is enhancement of 29c test, this test checks that a corrupted 206 response is correctly handled (no crashes or asserion failures)
+This test is using a resumable response.
+- with a profile, set max-entry-size to 1 (=1024 bytes)
+- first channel makes a request for a resumable response
+- second channel makes a request for the same resource, concurrent read happens
+- first channel sets predicted data size on the entry with every chunk, it's doomed on 1024
+- second channel now must engage interrupted concurrent write algorithm and read the rest of the content from the network
+- the response to the range request is broken (bad Content-Range header)
+- the first must deliver full content w/o errors
+- the second channel must correctly fail
+
+*/
+
+Cu.import("resource://testing-common/httpd.js");
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://gre/modules/NetUtil.jsm");
+
+
+XPCOMUtils.defineLazyGetter(this, "URL", function() {
+  return "http://localhost:" + httpServer.identity.primaryPort;
+});
+
+var httpServer = null;
+
+function make_channel(url, callback, ctx) {
+  return NetUtil.newChannel({uri: url, loadUsingSystemPrincipal: true});
+}
+
+// need something bigger than 1024 bytes
+const responseBody =
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
+
+function contentHandler(metadata, response)
+{
+  response.setHeader("Content-Type", "text/plain");
+  response.setHeader("ETag", "Just testing");
+  response.setHeader("Cache-Control", "max-age=99999");
+  response.setHeader("Accept-Ranges", "bytes");
+  response.setHeader("Content-Length", "" + responseBody.length);
+  if (metadata.hasHeader("If-Range")) {
+	  response.setStatusLine(metadata.httpVersion, 206, "Partial Content");
+    // Deliberately broken response header to trigger corrupted content error on the second channel
+	  response.setHeader("Content-Range", "0-1/2");
+  }
+  response.bodyOutputStream.write(responseBody, responseBody.length);
+}
+
+function run_test()
+{
+  // Static check
+  do_check_true(responseBody.length > 1024);
+
+  do_get_profile();
+
+  Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 1);
+
+  httpServer = new HttpServer();
+  httpServer.registerPathHandler("/content", contentHandler);
+  httpServer.start(-1);
+
+  var chan1 = make_channel(URL + "/content");
+  chan1.asyncOpen2(new ChannelListener(firstTimeThrough, null));
+  var chan2 = make_channel(URL + "/content");
+  chan2.asyncOpen2(new ChannelListener(secondTimeThrough, null, CL_EXPECT_FAILURE));
+
+  do_test_pending();
+}
+
+function firstTimeThrough(request, buffer)
+{
+  do_check_eq(buffer, responseBody);
+}
+
+function secondTimeThrough(request, buffer)
+{
+  httpServer.stop(do_test_finished);
+}
new file mode 100644
--- /dev/null
+++ b/netwerk/test/unit/test_cache2-29e-non-206-response.js
@@ -0,0 +1,85 @@
+/*
+
+Checkes if the concurrent cache read/write works when the write is interrupted because of max-entry-size limits.
+This is enhancement of 29c test, this test checks that a corrupted 206 response is correctly handled (no crashes or asserion failures)
+This test is using a resumable response.
+- with a profile, set max-entry-size to 1 (=1024 bytes)
+- first channel makes a request for a resumable response
+- second channel makes a request for the same resource, concurrent read happens
+- first channel sets predicted data size on the entry with every chunk, it's doomed on 1024
+- second channel now must engage interrupted concurrent write algorithm and read the rest of the content from the network
+- the response to the range request is plain 200
+- the first must deliver full content w/o errors
+- the second channel must correctly fail
+
+*/
+
+Cu.import("resource://testing-common/httpd.js");
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://gre/modules/NetUtil.jsm");
+
+
+XPCOMUtils.defineLazyGetter(this, "URL", function() {
+  return "http://localhost:" + httpServer.identity.primaryPort;
+});
+
+var httpServer = null;
+
+function make_channel(url, callback, ctx) {
+  return NetUtil.newChannel({uri: url, loadUsingSystemPrincipal: true});
+}
+
+// need something bigger than 1024 bytes
+const responseBody =
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +
+  "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
+
+function contentHandler(metadata, response)
+{
+  response.setHeader("Content-Type", "text/plain");
+  response.setHeader("ETag", "Just testing");
+  response.setHeader("Cache-Control", "max-age=99999");
+  response.setHeader("Accept-Ranges", "bytes");
+  response.setHeader("Content-Length", "" + responseBody.length);
+  response.bodyOutputStream.write(responseBody, responseBody.length);
+}
+
+function run_test()
+{
+  // Static check
+  do_check_true(responseBody.length > 1024);
+
+  do_get_profile();
+
+  Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 1);
+
+  httpServer = new HttpServer();
+  httpServer.registerPathHandler("/content", contentHandler);
+  httpServer.start(-1);
+
+  var chan1 = make_channel(URL + "/content");
+  chan1.asyncOpen2(new ChannelListener(firstTimeThrough, null));
+  var chan2 = make_channel(URL + "/content");
+  chan2.asyncOpen2(new ChannelListener(secondTimeThrough, null, CL_EXPECT_FAILURE));
+
+  do_test_pending();
+}
+
+function firstTimeThrough(request, buffer)
+{
+  do_check_eq(buffer, responseBody);
+}
+
+function secondTimeThrough(request, buffer)
+{
+  httpServer.stop(do_test_finished);
+}
--- a/netwerk/test/unit/xpcshell.ini
+++ b/netwerk/test/unit/xpcshell.ini
@@ -67,16 +67,19 @@ skip-if = os == "android"
 [test_cache2-28-last-access-attrs.js]
 # This test will be fixed in bug 1067931
 skip-if = true
 [test_cache2-28a-OPEN_SECRETLY.js]
 # This test will be fixed in bug 1067931
 skip-if = true
 [test_cache2-29a-concurrent_read_resumable_entry_size_zero.js]
 [test_cache2-29b-concurrent_read_non-resumable_entry_size_zero.js]
+[test_cache2-29c-concurrent_read_half-interrupted.js]
+[test_cache2-29d-concurrent_read_half-corrupted-206.js]
+[test_cache2-29e-non-206-response.js]
 [test_cache2-30a-entry-pinning.js]
 [test_cache2-30b-pinning-storage-clear.js]
 [test_cache2-30c-pinning-deferred-doom.js]
 [test_cache2-30d-pinning-WasEvicted-API.js]
 [test_partial_response_entry_size_smart_shrink.js]
 [test_304_responses.js]
 [test_421.js]
 [test_cacheForOfflineUse_no-store.js]