Bug 651100 - Browser becomes unresponsive while watching embedded Youtube <video>. r=bjarne a=sheriff
authorMichal Novotny <michal.novotny@gmail.com>
Fri, 20 May 2011 19:27:11 +0200
changeset 70027 f7fc3beccae7c00ddf8500fede6c64259669284c
parent 70026 1a645ea075fc9fba940b2c03baef039834ed7e07
child 70028 1106d349833d4de10519efa668ec469788ace565
push id1
push userroot
push dateMon, 20 Oct 2014 17:29:22 +0000
reviewersbjarne, sheriff
bugs651100
milestone6.0a1
Bug 651100 - Browser becomes unresponsive while watching embedded Youtube <video>. r=bjarne a=sheriff
netwerk/cache/nsDiskCacheDevice.cpp
netwerk/cache/nsDiskCacheMap.cpp
netwerk/cache/nsDiskCacheMap.h
netwerk/cache/nsDiskCacheStreams.cpp
netwerk/test/unit/test_bug651100.js
netwerk/test/unit/xpcshell.ini
--- a/netwerk/cache/nsDiskCacheDevice.cpp
+++ b/netwerk/cache/nsDiskCacheDevice.cpp
@@ -858,18 +858,20 @@ nsDiskCacheDevice::OnDataSizeChange(nsCa
 #endif
             nsCacheService::DoomEntry(entry);
         NS_ASSERTION(NS_SUCCEEDED(rv),"DoomEntry() failed.");
         return NS_ERROR_ABORT;
     }
 
     PRUint32  sizeK = ((entry->DataSize() + 0x03FF) >> 10); // round up to next 1k
 
-    NS_ASSERTION(sizeK <= USHRT_MAX, "data size out of range");
-    NS_ASSERTION(newSizeK <= USHRT_MAX, "data size out of range");
+    // In total count we ignore anything over kMaxDataSizeK (bug #651100), so
+    // the target capacity should be calculated the same way.
+    if (sizeK > kMaxDataSizeK) sizeK = kMaxDataSizeK;
+    if (newSizeK > kMaxDataSizeK) newSizeK = kMaxDataSizeK;
 
     // pre-evict entries to make space for new data
     PRUint32  targetCapacity = mCacheCapacity > (newSizeK - sizeK)
                              ? mCacheCapacity - (newSizeK - sizeK)
                              : 0;
     EvictDiskCacheEntries(targetCapacity);
     
     return NS_OK;
--- a/netwerk/cache/nsDiskCacheMap.cpp
+++ b/netwerk/cache/nsDiskCacheMap.cpp
@@ -575,18 +575,20 @@ nsDiskCacheMap::EvictRecords( nsDiskCach
     PRUint32  tempRank[kBuckets];
     int       bucketIndex = 0;
     
     // copy eviction rank array
     for (bucketIndex = 0; bucketIndex < kBuckets; ++bucketIndex)
         tempRank[bucketIndex] = mHeader.mEvictionRank[bucketIndex];
 
     // Maximum number of iterations determined by number of records
-    // as a safety limiter for the loop
-    for (int n = 0; n < mHeader.mEntryCount; ++n) {
+    // as a safety limiter for the loop. Use a copy of mHeader.mEntryCount since
+    // the value could decrease if some entry is evicted.
+    PRInt32 entryCount = mHeader.mEntryCount;
+    for (int n = 0; n < entryCount; ++n) {
     
         // find bucket with highest eviction rank
         PRUint32    rank  = 0;
         for (int i = 0; i < kBuckets; ++i) {
             if (rank < tempRank[i]) {
                 rank = tempRank[i];
                 bucketIndex = i;
             }
@@ -824,17 +826,16 @@ nsDiskCacheMap::WriteDiskCacheEntry(nsDi
 
     // Deallocate old storage if necessary    
     if (binding->mRecord.MetaLocationInitialized()) {
         // we have existing storage
 
         if ((binding->mRecord.MetaFile() == 0) &&
             (fileIndex == 0)) {  // keeping the separate file
             // just decrement total
-            // XXX if bindRecord.MetaFileSize == USHRT_MAX, stat the file to see how big it is
             DecrementTotalSize(binding->mRecord.MetaFileSize());
             NS_ASSERTION(binding->mRecord.MetaFileGeneration() == binding->mGeneration,
                          "generations out of sync");
         } else {
             rv = DeleteStorage(&binding->mRecord, nsDiskCache::kMetaData);
             NS_ENSURE_SUCCESS(rv, rv);
         }
     }
@@ -872,24 +873,25 @@ nsDiskCacheMap::WriteDiskCacheEntry(nsDi
             // try next block file
             fileIndex++;
         }
     }
 
     if (fileIndex == 0) {
         // Write entry data to separate file
         PRUint32 metaFileSizeK = ((size + 0x03FF) >> 10); // round up to nearest 1k
-        nsCOMPtr<nsILocalFile> localFile;
-        
-        // XXX handle metaFileSizeK > USHRT_MAX
+        if (metaFileSizeK > kMaxDataSizeK)
+            metaFileSizeK = kMaxDataSizeK;
+
         binding->mRecord.SetMetaFileGeneration(binding->mGeneration);
         binding->mRecord.SetMetaFileSize(metaFileSizeK);
         rv = UpdateRecord(&binding->mRecord);
         NS_ENSURE_SUCCESS(rv, rv);
 
+        nsCOMPtr<nsILocalFile> localFile;
         rv = GetLocalFileForDiskCacheRecord(&binding->mRecord,
                                             nsDiskCache::kMetaData,
                                             PR_TRUE,
                                             getter_AddRefs(localFile));
         NS_ENSURE_SUCCESS(rv, rv);
         
         // open the file
         PRFileDesc * fd;
@@ -899,17 +901,17 @@ nsDiskCacheMap::WriteDiskCacheEntry(nsDi
 
         // write the file
         PRInt32 bytesWritten = PR_Write(fd, diskEntry, size);
         
         PRStatus err = PR_Close(fd);
         if ((bytesWritten != (PRInt32)size) || (err != PR_SUCCESS)) {
             return NS_ERROR_UNEXPECTED;
         }
-        // XXX handle metaFileSizeK == USHRT_MAX
+
         IncrementTotalSize(metaFileSizeK);
     }
 
     return rv;
 }
 
 
 nsresult
--- a/netwerk/cache/nsDiskCacheMap.h
+++ b/netwerk/cache/nsDiskCacheMap.h
@@ -73,19 +73,19 @@ struct nsDiskCacheEntry;
  *    0000 0000 0000 0000 0000 0000 1111 1111 : eFileGenerationMask
  *
  *  File Selector:
  *      0 = separate file on disk
  *      1 = 256 byte block file
  *      2 = 1k block file
  *      3 = 4k block file
  *
- *  eFileSizeMask note:  Files larger than 64 MiB have zero size stored in the
- *                       location.  The file itself must be examined to determine
- *                       its actual size.  (XXX This is broken in places -darin)
+ *  eFileSizeMask note:  Files larger than 65535 KiB have this limit stored in
+ *                       the location.  The file itself must be examined to
+ *                       determine its actual size if necessary.
  *
  *****************************************************************************/
 
 /*
   We have 3 block files with roughly the same max size (32MB)
     1 - block size 256B, number of blocks 131072
     2 - block size  1kB, number of blocks  32768
     3 - block size  4kB, number of blocks   8192
@@ -94,20 +94,28 @@ struct nsDiskCacheEntry;
 #define SIZE_SHIFT(idx)            (2 * ((idx) - 1))
 #define BLOCK_SIZE_FOR_INDEX(idx)  ((idx) ? (256    << SIZE_SHIFT(idx)) : 0)
 #define BITMAP_SIZE_FOR_INDEX(idx) ((idx) ? (131072 >> SIZE_SHIFT(idx)) : 0)
 
 // Min and max values for the number of records in the DiskCachemap
 #define kMinRecordCount    512
 
 #define kSeparateFile      0
-// #define must always  be <= 65535KB, or overflow. See bug 443067 Comment 8
 #define kMaxDataFileSize   5 * 1024 * 1024  // 5 MB (in bytes) 
 #define kBuckets           (1 << 5)    // must be a power of 2!
 
+// Maximum size in K which can be stored in the location (see eFileSizeMask).
+// Both data and metadata can be larger, but only up to kMaxDataSizeK can be
+// counted into total cache size. I.e. if there are entries where either data or
+// metadata is larger than kMaxDataSizeK, the total cache size will be
+// inaccurate (smaller) than the actual cache size. The alternative is to stat
+// the files to find the real size, which was decided against for performance
+// reasons. See bug #651100 comment #21.
+#define kMaxDataSizeK      0xFFFF
+
 // preallocate up to 1MB of separate cache file
 #define kPreallocateLimit  1 * 1024 * 1024
 
 class nsDiskCacheRecord {
 
 private:
     PRUint32    mHashNumber;
     PRUint32    mEvictionRank;
--- a/netwerk/cache/nsDiskCacheStreams.cpp
+++ b/netwerk/cache/nsDiskCacheStreams.cpp
@@ -645,18 +645,22 @@ nsDiskCacheStreamIO::Write( const char *
 
 void
 nsDiskCacheStreamIO::UpdateFileSize()
 {
     NS_ASSERTION(mFD, "nsDiskCacheStreamIO::UpdateFileSize should not have been called");
     
     nsDiskCacheRecord * record = &mBinding->mRecord;
     const PRUint32      oldSizeK  = record->DataFileSize();
-    const PRUint32      newSizeK  = (mStreamEnd + 0x03FF) >> 10;
-    
+    PRUint32            newSizeK  = (mStreamEnd + 0x03FF) >> 10;
+
+    // make sure the size won't overflow (bug #651100)
+    if (newSizeK > kMaxDataSizeK)
+        newSizeK = kMaxDataSizeK;
+
     if (newSizeK == oldSizeK)  return;
     
     record->SetDataFileSize(newSizeK);
 
     // update cache size totals
     nsDiskCacheMap * cacheMap = mDevice->CacheMap();
     cacheMap->DecrementTotalSize(oldSizeK);       // decrement old size
     cacheMap->IncrementTotalSize(newSizeK);       // increment new size
new file mode 100644
--- /dev/null
+++ b/netwerk/test/unit/test_bug651100.js
@@ -0,0 +1,166 @@
+const Cc = Components.classes;
+const Ci = Components.interfaces;
+const Cr = Components.results;
+
+var _CSvc;
+function get_cache_service() {
+  if (_CSvc)
+    return _CSvc;
+
+  return _CSvc = Cc["@mozilla.org/network/cache-service;1"].
+                 getService(Ci.nsICacheService);
+}
+
+function get_ostream_for_entry(key, asFile, entryRef)
+{
+  var cache = get_cache_service();
+  var session = cache.createSession(
+                  "HTTP",
+                  asFile ? Ci.nsICache.STORE_ON_DISK_AS_FILE
+                         : Ci.nsICache.STORE_ON_DISK,
+                  Ci.nsICache.STREAM_BASED);
+  var cacheEntry = session.openCacheEntry(key, Ci.nsICache.ACCESS_WRITE, true);
+  var oStream = cacheEntry.openOutputStream(0);
+  entryRef.value = cacheEntry;
+  return oStream;
+}
+
+function gen_1MiB()
+{
+  var i;
+  var data="x";
+  for (i=0 ; i < 20 ; i++)
+    data+=data;
+  return data;
+}
+
+function write_and_check(str, data, len)
+{
+  var written = str.write(data, len);
+  if (written != len) {
+    do_throw("str.write has not written all data!\n" +
+             "  Expected: " + len  + "\n" +
+             "  Actual: " + written + "\n");
+  }
+}
+
+function write_big_datafile()
+{
+  var entry = {};
+  var oStr = get_ostream_for_entry("bigdata", true, entry);
+  var data = gen_1MiB();
+
+  // >64MiB
+  var i;
+  for (i=0 ; i<65 ; i++)
+    write_and_check(oStr, data, data.length);
+
+  oStr.close();
+  entry.value.close();
+}
+
+function write_and_doom_big_metafile()
+{
+  var entry = {};
+  var oStr = get_ostream_for_entry("bigmetadata", true, entry);
+  var data = gen_1MiB();
+
+  // >64MiB
+  var i;
+  for (i=0 ; i<65 ; i++)
+    entry.value.setMetaDataElement("metadata_"+i, data);
+
+  write_and_check(oStr, data, data.length);
+
+  oStr.close();
+  entry.value.doom();
+  entry.value.close();
+}
+
+function check_cache_size() {
+  var diskDeviceVisited;
+  var visitor = {
+    visitDevice: function (deviceID, deviceInfo) {
+      if (deviceID == "disk") {
+        diskDeviceVisited = true;
+        do_check_eq(deviceInfo.totalSize, 0)
+      }
+      return false;
+    },
+    visitEntry: function (deviceID, entryInfo) {
+      do_throw("unexpected call to visitEntry");
+    }
+  };
+
+  var cs = get_cache_service();
+  diskDeviceVisited = false;
+  cs.visitEntries(visitor);
+  do_check_true(diskDeviceVisited);
+}
+
+var _continue_with = null;
+function sync_with_cache_IO_thread(aFunc)
+{
+  do_check_eq(sync_with_cache_IO_thread_cb.listener, null);
+  sync_with_cache_IO_thread_cb.listener = aFunc;
+  var cache = get_cache_service();
+  var session = cache.createSession(
+                  "HTTP",
+                  Ci.nsICache.STORE_ON_DISK,
+                  Ci.nsICache.STREAM_BASED);
+  var cacheEntry = session.asyncOpenCacheEntry(
+                     "nonexistententry",
+                     Ci.nsICache.ACCESS_READ,
+                     sync_with_cache_IO_thread_cb);
+}
+
+var sync_with_cache_IO_thread_cb = {
+  listener: null,
+
+  onCacheEntryAvailable: function oCEA(descriptor, accessGranted, status) {
+    do_check_eq(status, Cr.NS_ERROR_CACHE_KEY_NOT_FOUND);
+    cb = this.listener;
+    this.listener = null;
+    do_timeout(0, cb);
+  }
+};
+
+
+function run_test() {
+  var prefBranch = Cc["@mozilla.org/preferences-service;1"].
+                     getService(Ci.nsIPrefBranch);
+  prefBranch.setIntPref("browser.cache.disk.capacity", 50000);
+
+  do_get_profile();
+
+  // clear the cache
+  get_cache_service().evictEntries(Ci.nsICache.STORE_ANYWHERE);
+
+  // write an entry with data > 64MiB
+  write_big_datafile();
+
+  // DoomEntry() is called when the cache is full, but the data is really
+  // deleted (and the cache size updated) on the background thread when the
+  // entry is deactivated. We need to sync with the cache IO thread before we
+  // continue with the test.
+  sync_with_cache_IO_thread(run_test_2);
+
+  do_test_pending();
+}
+
+function run_test_2()
+{
+  check_cache_size();
+
+  // write an entry with metadata > 64MiB
+  write_and_doom_big_metafile();
+
+  sync_with_cache_IO_thread(run_test_3);
+}
+
+function run_test_3()
+{
+  check_cache_size();
+
+  do_test_finished();
+}
--- a/netwerk/test/unit/xpcshell.ini
+++ b/netwerk/test/unit/xpcshell.ini
@@ -52,16 +52,17 @@ tail =
 [test_bug543805.js]
 [test_bug553970.js]
 [test_bug561276.js]
 [test_bug580508.js]
 [test_bug586908.js]
 [test_bug588389.js]
 [test_bug596443.js]
 [test_bug652761.js]
+[test_bug651100.js]
 [test_cacheflags.js]
 [test_channel_close.js]
 [test_compareURIs.js]
 [test_content_sniffer.js]
 [test_cookie_header.js]
 [test_data_protocol.js]
 [test_dns_service.js]
 [test_event_sink.js]