Bug 891238. Don't resume reading from a paused connection if we still have plenty of data cached. This could save power. r=cpearce
authorRobert O'Callahan <robert@ocallahan.org>
Fri, 19 Jul 2013 21:00:19 +1200
changeset 139365 8e995398c00adf7b3a0893332c00c44d875b964c
parent 139364 9e7d1f4496ffec08ad3a0cd72df54c8f320b6fed
child 139366 70669c459d460ed94912e987df77b38ff24a2961
push id1
push userroot
push dateMon, 20 Oct 2014 17:29:22 +0000
reviewerscpearce
bugs891238
milestone25.0a1
Bug 891238. Don't resume reading from a paused connection if we still have plenty of data cached. This could save power. r=cpearce
content/media/MediaCache.cpp
--- a/content/media/MediaCache.cpp
+++ b/content/media/MediaCache.cpp
@@ -50,16 +50,20 @@ static const uint32_t REPLAY_PENALTY_FAC
 
 // When looking for a reusable block, scan forward this many blocks
 // from the desired "best" block location to look for free blocks,
 // before we resort to scanning the whole cache. The idea is to try to
 // store runs of stream blocks close-to-consecutively in the cache if we
 // can.
 static const uint32_t FREE_BLOCK_SCAN_LIMIT = 16;
 
+// Try to save power by not resuming paused reads if the stream won't need new
+// data within this time interval in the future
+static const uint32_t CACHE_POWERSAVE_WAKEUP_LOW_THRESHOLD_MS = 10000;
+
 #ifdef DEBUG
 // Turn this on to do very expensive cache state validation
 // #define DEBUG_VERIFY_CACHE
 #endif
 
 // There is at most one media cache (although that could quite easily be
 // relaxed if we wanted to manage multiple caches with independent
 // size limits).
@@ -1014,40 +1018,44 @@ MediaCache::Update()
 #ifdef DEBUG
     mInUpdate = true;
 #endif
 
     int32_t maxBlocks = GetMaxBlocks();
     TimeStamp now = TimeStamp::Now();
 
     int32_t freeBlockCount = mFreeBlocks.GetCount();
-    // Try to trim back the cache to its desired maximum size. The cache may
-    // have overflowed simply due to data being received when we have
-    // no blocks in the main part of the cache that are free or lower
-    // priority than the new data. The cache can also be overflowing because
-    // the media.cache_size preference was reduced.
-    // First, figure out what the least valuable block in the cache overflow
-    // is. We don't want to replace any blocks in the main part of the
-    // cache whose expected time of next use is earlier or equal to that.
-    // If we allow that, we can effectively end up discarding overflowing
-    // blocks (by moving an overflowing block to the main part of the cache,
-    // and then overwriting it with another overflowing block), and we try
-    // to avoid that since it requires HTTP seeks.
-    // We also use this loop to eliminate overflowing blocks from
-    // freeBlockCount.
     TimeDuration latestPredictedUseForOverflow = 0;
-    for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks;
-         --blockIndex) {
-      if (IsBlockFree(blockIndex)) {
-        // Don't count overflowing free blocks in our free block count
-        --freeBlockCount;
-        continue;
+    if (mIndex.Length() > uint32_t(maxBlocks)) {
+      // Try to trim back the cache to its desired maximum size. The cache may
+      // have overflowed simply due to data being received when we have
+      // no blocks in the main part of the cache that are free or lower
+      // priority than the new data. The cache can also be overflowing because
+      // the media.cache_size preference was reduced.
+      // First, figure out what the least valuable block in the cache overflow
+      // is. We don't want to replace any blocks in the main part of the
+      // cache whose expected time of next use is earlier or equal to that.
+      // If we allow that, we can effectively end up discarding overflowing
+      // blocks (by moving an overflowing block to the main part of the cache,
+      // and then overwriting it with another overflowing block), and we try
+      // to avoid that since it requires HTTP seeks.
+      // We also use this loop to eliminate overflowing blocks from
+      // freeBlockCount.
+      for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks;
+           --blockIndex) {
+        if (IsBlockFree(blockIndex)) {
+          // Don't count overflowing free blocks in our free block count
+          --freeBlockCount;
+          continue;
+        }
+        TimeDuration predictedUse = PredictNextUse(now, blockIndex);
+        latestPredictedUseForOverflow = std::max(latestPredictedUseForOverflow, predictedUse);
       }
-      TimeDuration predictedUse = PredictNextUse(now, blockIndex);
-      latestPredictedUseForOverflow = std::max(latestPredictedUseForOverflow, predictedUse);
+    } else {
+      freeBlockCount += maxBlocks - mIndex.Length();
     }
 
     // Now try to move overflowing blocks to the main part of the cache.
     for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks;
          --blockIndex) {
       if (IsBlockFree(blockIndex))
         continue;
 
@@ -1185,31 +1193,39 @@ MediaCache::Update()
         // free). So stop reading ahead now.
         LOG(PR_LOG_DEBUG, ("Stream %p throttling non-seekable readahead", stream));
         enableReading = false;
       } else if (mIndex.Length() > uint32_t(maxBlocks)) {
         // We're in the process of bringing the cache size back to the
         // desired limit, so don't bring in more data yet
         LOG(PR_LOG_DEBUG, ("Stream %p throttling to reduce cache size", stream));
         enableReading = false;
-      } else if (freeBlockCount > 0 || mIndex.Length() < uint32_t(maxBlocks)) {
-        // Free blocks in the cache, so keep reading
-        LOG(PR_LOG_DEBUG, ("Stream %p reading since there are free blocks", stream));
-        enableReading = true;
-      } else if (latestNextUse <= TimeDuration(0)) {
-        // No reusable blocks, so can't read anything
-        LOG(PR_LOG_DEBUG, ("Stream %p throttling due to no reusable blocks", stream));
-        enableReading = false;
       } else {
-        // Read ahead if the data we expect to read is more valuable than
-        // the least valuable block in the main part of the cache
         TimeDuration predictedNewDataUse = PredictNextUseForIncomingData(stream);
-        LOG(PR_LOG_DEBUG, ("Stream %p predict next data in %f, current worst block is %f",
-            stream, predictedNewDataUse.ToSeconds(), latestNextUse.ToSeconds()));
-        enableReading = predictedNewDataUse < latestNextUse;
+
+        if (stream->mCacheSuspended &&
+            predictedNewDataUse.ToMilliseconds() > CACHE_POWERSAVE_WAKEUP_LOW_THRESHOLD_MS) {
+          // Don't need data for a while, so don't bother waking up the stream
+          LOG(PR_LOG_DEBUG, ("Stream %p avoiding wakeup since more data is not needed", stream));
+          enableReading = false;
+        } else if (freeBlockCount > 0) {
+          // Free blocks in the cache, so keep reading
+          LOG(PR_LOG_DEBUG, ("Stream %p reading since there are free blocks", stream));
+          enableReading = true;
+        } else if (latestNextUse <= TimeDuration(0)) {
+          // No reusable blocks, so can't read anything
+          LOG(PR_LOG_DEBUG, ("Stream %p throttling due to no reusable blocks", stream));
+          enableReading = false;
+        } else {
+          // Read ahead if the data we expect to read is more valuable than
+          // the least valuable block in the main part of the cache
+          LOG(PR_LOG_DEBUG, ("Stream %p predict next data in %f, current worst block is %f",
+              stream, predictedNewDataUse.ToSeconds(), latestNextUse.ToSeconds()));
+          enableReading = predictedNewDataUse < latestNextUse;
+        }
       }
 
       if (enableReading) {
         for (uint32_t j = 0; j < i; ++j) {
           MediaCacheStream* other = mStreams[j];
           if (other->mResourceID == stream->mResourceID &&
               !other->mClient->IsSuspended() &&
               other->mChannelOffset/BLOCK_SIZE == desiredOffset/BLOCK_SIZE) {