Bug 1350177 - Refactor a thread-safe ExpirationTracker for the use in SurfaceCache. r=froydnj, r=mystor, r=aosmond, a=gchang
authorBevis Tseng <btseng@mozilla.com>
Mon, 10 Apr 2017 15:08:42 +0800
changeset 395804 c4eb93e5c5d4aa0ac3377e9c6b77993498d17acf
parent 395803 a3d016aadda6f2eef67f12e2f707d55d2242748a
child 395805 c189e40723ec806949e569d1c003df65da34d2cb
push id1468
push userasasaki@mozilla.com
push dateMon, 05 Jun 2017 19:31:07 +0000
treeherdermozilla-release@0641fc6ee9d1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersfroydnj, mystor, aosmond, gchang
bugs1350177
milestone54.0a2
Bug 1350177 - Refactor a thread-safe ExpirationTracker for the use in SurfaceCache. r=froydnj, r=mystor, r=aosmond, a=gchang
docshell/shistory/nsSHEntryShared.h
image/SurfaceCache.cpp
xpcom/ds/nsExpirationTracker.h
--- a/docshell/shistory/nsSHEntryShared.h
+++ b/docshell/shistory/nsSHEntryShared.h
@@ -41,24 +41,24 @@ public:
   static void Shutdown();
 
   nsSHEntryShared();
 
   NS_DECL_ISUPPORTS
   NS_DECL_NSIMUTATIONOBSERVER
   NS_DECL_NSIBFCACHEENTRY
 
+  nsExpirationState *GetExpirationState() { return &mExpirationState; }
+
 private:
   ~nsSHEntryShared();
 
   friend class nsSHEntry;
 
   friend class HistoryTracker;
-  friend class nsExpirationTracker<nsSHEntryShared, 3>;
-  nsExpirationState *GetExpirationState() { return &mExpirationState; }
 
   static already_AddRefed<nsSHEntryShared> Duplicate(nsSHEntryShared* aEntry);
 
   void RemoveFromExpirationTracker();
   void Expire();
   nsresult SyncPresentationState();
   void DropPresentationState();
 
--- a/image/SurfaceCache.cpp
+++ b/image/SurfaceCache.cpp
@@ -418,30 +418,32 @@ private:
 
     UnregisterWeakMemoryReporter(this);
   }
 
 public:
   void InitMemoryReporter() { RegisterWeakMemoryReporter(this); }
 
   InsertOutcome Insert(NotNull<ISurfaceProvider*> aProvider,
-                       bool                       aSetAvailable)
+                       bool                       aSetAvailable,
+                       const StaticMutexAutoLock& aAutoLock)
   {
     // If this is a duplicate surface, refuse to replace the original.
     // XXX(seth): Calling Lookup() and then RemoveEntry() does the lookup
     // twice. We'll make this more efficient in bug 1185137.
     LookupResult result = Lookup(aProvider->GetImageKey(),
                                  aProvider->GetSurfaceKey(),
+                                 aAutoLock,
                                  /* aMarkUsed = */ false);
     if (MOZ_UNLIKELY(result)) {
       return InsertOutcome::FAILURE_ALREADY_PRESENT;
     }
 
     if (result.Type() == MatchType::PENDING) {
-      RemoveEntry(aProvider->GetImageKey(), aProvider->GetSurfaceKey());
+      RemoveEntry(aProvider->GetImageKey(), aProvider->GetSurfaceKey(), aAutoLock);
     }
 
     MOZ_ASSERT(result.Type() == MatchType::NOT_FOUND ||
                result.Type() == MatchType::PENDING,
                "A LookupResult with no surface should be NOT_FOUND or PENDING");
 
     // If this is bigger than we can hold after discarding everything we can,
     // refuse to cache it.
@@ -451,17 +453,17 @@ public:
       return InsertOutcome::FAILURE;
     }
 
     // Remove elements in order of cost until we can fit this in the cache. Note
     // that locked surfaces aren't in mCosts, so we never remove them here.
     while (cost > mAvailableCost) {
       MOZ_ASSERT(!mCosts.IsEmpty(),
                  "Removed everything and it still won't fit");
-      Remove(mCosts.LastElement().Surface());
+      Remove(mCosts.LastElement().Surface(), aAutoLock);
     }
 
     // Locate the appropriate per-image cache. If there's not an existing cache
     // for this image, create it.
     RefPtr<ImageSurfaceCache> cache = GetImageCache(aProvider->GetImageKey());
     if (!cache) {
       cache = new ImageSurfaceCache;
       mImageCaches.Put(aProvider->GetImageKey(), cache);
@@ -483,75 +485,78 @@ public:
       if (!surface->IsLocked()) {
         return InsertOutcome::FAILURE;
       }
     }
 
     // Insert.
     MOZ_ASSERT(cost <= mAvailableCost, "Inserting despite too large a cost");
     cache->Insert(surface);
-    StartTracking(surface);
+    StartTracking(surface, aAutoLock);
 
     return InsertOutcome::SUCCESS;
   }
 
-  void Remove(NotNull<CachedSurface*> aSurface)
+  void Remove(NotNull<CachedSurface*> aSurface,
+              const StaticMutexAutoLock& aAutoLock)
   {
     ImageKey imageKey = aSurface->GetImageKey();
 
     RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey);
     MOZ_ASSERT(cache, "Shouldn't try to remove a surface with no image cache");
 
     // If the surface was not a placeholder, tell its image that we discarded it.
     if (!aSurface->IsPlaceholder()) {
       static_cast<Image*>(imageKey)->OnSurfaceDiscarded();
     }
 
-    StopTracking(aSurface);
+    StopTracking(aSurface, aAutoLock);
     cache->Remove(aSurface);
 
     // Remove the per-image cache if it's unneeded now. (Keep it if the image is
     // locked, since the per-image cache is where we store that state.)
     if (cache->IsEmpty() && !cache->IsLocked()) {
       mImageCaches.Remove(imageKey);
     }
   }
 
-  void StartTracking(NotNull<CachedSurface*> aSurface)
+  void StartTracking(NotNull<CachedSurface*> aSurface,
+                     const StaticMutexAutoLock& aAutoLock)
   {
     CostEntry costEntry = aSurface->GetCostEntry();
     MOZ_ASSERT(costEntry.GetCost() <= mAvailableCost,
                "Cost too large and the caller didn't catch it");
 
     mAvailableCost -= costEntry.GetCost();
 
     if (aSurface->IsLocked()) {
       mLockedCost += costEntry.GetCost();
       MOZ_ASSERT(mLockedCost <= mMaxCost, "Locked more than we can hold?");
     } else {
       mCosts.InsertElementSorted(costEntry);
       // This may fail during XPCOM shutdown, so we need to ensure the object is
       // tracked before calling RemoveObject in StopTracking.
-      mExpirationTracker.AddObject(aSurface);
+      mExpirationTracker.AddObjectLocked(aSurface, aAutoLock);
     }
   }
 
-  void StopTracking(NotNull<CachedSurface*> aSurface)
+  void StopTracking(NotNull<CachedSurface*> aSurface,
+                    const StaticMutexAutoLock& aAutoLock)
   {
     CostEntry costEntry = aSurface->GetCostEntry();
 
     if (aSurface->IsLocked()) {
       MOZ_ASSERT(mLockedCost >= costEntry.GetCost(), "Costs don't balance");
       mLockedCost -= costEntry.GetCost();
       // XXX(seth): It'd be nice to use an O(log n) lookup here. This is O(n).
       MOZ_ASSERT(!mCosts.Contains(costEntry),
                  "Shouldn't have a cost entry for a locked surface");
     } else {
       if (MOZ_LIKELY(aSurface->GetExpirationState()->IsTracked())) {
-        mExpirationTracker.RemoveObject(aSurface);
+        mExpirationTracker.RemoveObjectLocked(aSurface, aAutoLock);
       } else {
         // Our call to AddObject must have failed in StartTracking; most likely
         // we're in XPCOM shutdown right now.
         NS_ASSERTION(ShutdownTracker::ShutdownHasStarted(),
                      "Not expiration-tracking an unlocked surface!");
       }
 
       DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry);
@@ -560,16 +565,17 @@ public:
 
     mAvailableCost += costEntry.GetCost();
     MOZ_ASSERT(mAvailableCost <= mMaxCost,
                "More available cost than we started with");
   }
 
   LookupResult Lookup(const ImageKey    aImageKey,
                       const SurfaceKey& aSurfaceKey,
+                      const StaticMutexAutoLock& aAutoLock,
                       bool aMarkUsed = true)
   {
     RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
     if (!cache) {
       // No cached surfaces for this image.
       return LookupResult(MatchType::NOT_FOUND);
     }
 
@@ -582,31 +588,32 @@ public:
     if (surface->IsPlaceholder()) {
       return LookupResult(MatchType::PENDING);
     }
 
     DrawableSurface drawableSurface = surface->GetDrawableSurface();
     if (!drawableSurface) {
       // The surface was released by the operating system. Remove the cache
       // entry as well.
-      Remove(WrapNotNull(surface));
+      Remove(WrapNotNull(surface), aAutoLock);
       return LookupResult(MatchType::NOT_FOUND);
     }
 
     if (aMarkUsed) {
-      MarkUsed(WrapNotNull(surface), WrapNotNull(cache));
+      MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock);
     }
 
     MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey,
                "Lookup() not returning an exact match?");
     return LookupResult(Move(drawableSurface), MatchType::EXACT);
   }
 
   LookupResult LookupBestMatch(const ImageKey         aImageKey,
-                               const SurfaceKey&      aSurfaceKey)
+                               const SurfaceKey&      aSurfaceKey,
+                               const StaticMutexAutoLock& aAutoLock)
   {
     RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
     if (!cache) {
       // No cached surfaces for this image.
       return LookupResult(MatchType::NOT_FOUND);
     }
 
     // Repeatedly look up the best match, trying again if the resulting surface
@@ -627,165 +634,167 @@ public:
 
       drawableSurface = surface->GetDrawableSurface();
       if (drawableSurface) {
         break;
       }
 
       // The surface was released by the operating system. Remove the cache
       // entry as well.
-      Remove(WrapNotNull(surface));
+      Remove(WrapNotNull(surface), aAutoLock);
     }
 
     MOZ_ASSERT_IF(matchType == MatchType::EXACT,
                   surface->GetSurfaceKey() == aSurfaceKey);
     MOZ_ASSERT_IF(matchType == MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND ||
                   matchType == MatchType::SUBSTITUTE_BECAUSE_PENDING,
       surface->GetSurfaceKey().SVGContext() == aSurfaceKey.SVGContext() &&
       surface->GetSurfaceKey().Playback() == aSurfaceKey.Playback() &&
       surface->GetSurfaceKey().Flags() == aSurfaceKey.Flags());
 
     if (matchType == MatchType::EXACT) {
-      MarkUsed(WrapNotNull(surface), WrapNotNull(cache));
+      MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock);
     }
 
     return LookupResult(Move(drawableSurface), matchType);
   }
 
   bool CanHold(const Cost aCost) const
   {
     return aCost <= mMaxCost;
   }
 
   size_t MaximumCapacity() const
   {
     return size_t(mMaxCost);
   }
 
-  void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider)
+  void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider,
+                        const StaticMutexAutoLock& aAutoLock)
   {
     if (!aProvider->Availability().IsPlaceholder()) {
       MOZ_ASSERT_UNREACHABLE("Calling SurfaceAvailable on non-placeholder");
       return;
     }
 
     // Reinsert the provider, requesting that Insert() mark it available. This
     // may or may not succeed, depending on whether some other decoder has
     // beaten us to the punch and inserted a non-placeholder version of this
     // surface first, but it's fine either way.
     // XXX(seth): This could be implemented more efficiently; we should be able
     // to just update our data structures without reinserting.
-    Insert(aProvider, /* aSetAvailable = */ true);
+    Insert(aProvider, /* aSetAvailable = */ true, aAutoLock);
   }
 
   void LockImage(const ImageKey aImageKey)
   {
     RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
     if (!cache) {
       cache = new ImageSurfaceCache;
       mImageCaches.Put(aImageKey, cache);
     }
 
     cache->SetLocked(true);
 
     // We don't relock this image's existing surfaces right away; instead, the
     // image should arrange for Lookup() to touch them if they are still useful.
   }
 
-  void UnlockImage(const ImageKey aImageKey)
+  void UnlockImage(const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock)
   {
     RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
     if (!cache || !cache->IsLocked()) {
       return;  // Already unlocked.
     }
 
     cache->SetLocked(false);
-    DoUnlockSurfaces(WrapNotNull(cache));
+    DoUnlockSurfaces(WrapNotNull(cache), aAutoLock);
   }
 
-  void UnlockEntries(const ImageKey aImageKey)
+  void UnlockEntries(const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock)
   {
     RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
     if (!cache || !cache->IsLocked()) {
       return;  // Already unlocked.
     }
 
     // (Note that we *don't* unlock the per-image cache here; that's the
     // difference between this and UnlockImage.)
-    DoUnlockSurfaces(WrapNotNull(cache));
+    DoUnlockSurfaces(WrapNotNull(cache), aAutoLock);
   }
 
-  void RemoveImage(const ImageKey aImageKey)
+  void RemoveImage(const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock)
   {
     RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
     if (!cache) {
       return;  // No cached surfaces for this image, so nothing to do.
     }
 
     // Discard all of the cached surfaces for this image.
     // XXX(seth): This is O(n^2) since for each item in the cache we are
     // removing an element from the costs array. Since n is expected to be
     // small, performance should be good, but if usage patterns change we should
     // change the data structure used for mCosts.
     for (auto iter = cache->ConstIter(); !iter.Done(); iter.Next()) {
-      StopTracking(WrapNotNull(iter.UserData()));
+      StopTracking(WrapNotNull(iter.UserData()), aAutoLock);
     }
 
     // The per-image cache isn't needed anymore, so remove it as well.
     // This implicitly unlocks the image if it was locked.
     mImageCaches.Remove(aImageKey);
   }
 
-  void DiscardAll()
+  void DiscardAll(const StaticMutexAutoLock& aAutoLock)
   {
     // Remove in order of cost because mCosts is an array and the other data
     // structures are all hash tables. Note that locked surfaces are not
     // removed, since they aren't present in mCosts.
     while (!mCosts.IsEmpty()) {
-      Remove(mCosts.LastElement().Surface());
+      Remove(mCosts.LastElement().Surface(), aAutoLock);
     }
   }
 
-  void DiscardForMemoryPressure()
+  void DiscardForMemoryPressure(const StaticMutexAutoLock& aAutoLock)
   {
     // Compute our discardable cost. Since locked surfaces aren't discardable,
     // we exclude them.
     const Cost discardableCost = (mMaxCost - mAvailableCost) - mLockedCost;
     MOZ_ASSERT(discardableCost <= mMaxCost, "Discardable cost doesn't add up");
 
     // Our target is to raise our available cost by (1 / mDiscardFactor) of our
     // discardable cost - in other words, we want to end up with about
     // (discardableCost / mDiscardFactor) fewer bytes stored in the surface
     // cache after we're done.
     const Cost targetCost = mAvailableCost + (discardableCost / mDiscardFactor);
 
     if (targetCost > mMaxCost - mLockedCost) {
       MOZ_ASSERT_UNREACHABLE("Target cost is more than we can discard");
-      DiscardAll();
+      DiscardAll(aAutoLock);
       return;
     }
 
     // Discard surfaces until we've reduced our cost to our target cost.
     while (mAvailableCost < targetCost) {
       MOZ_ASSERT(!mCosts.IsEmpty(), "Removed everything and still not done");
-      Remove(mCosts.LastElement().Surface());
+      Remove(mCosts.LastElement().Surface(), aAutoLock);
     }
   }
 
-  void LockSurface(NotNull<CachedSurface*> aSurface)
+  void LockSurface(NotNull<CachedSurface*> aSurface,
+                   const StaticMutexAutoLock& aAutoLock)
   {
     if (aSurface->IsPlaceholder() || aSurface->IsLocked()) {
       return;
     }
 
-    StopTracking(aSurface);
+    StopTracking(aSurface, aAutoLock);
 
     // Lock the surface. This can fail.
     aSurface->SetLocked(true);
-    StartTracking(aSurface);
+    StartTracking(aSurface, aAutoLock);
   }
 
   NS_IMETHOD
   CollectReports(nsIHandleReportCallback* aHandleReport,
                  nsISupports*             aData,
                  bool                     aAnonymize) override
   {
     StaticMutexAutoLock lock(sInstanceMutex);
@@ -842,83 +851,92 @@ private:
   // means that the result would be meaningless: another thread could insert a
   // surface or lock an image at any time.
   bool CanHoldAfterDiscarding(const Cost aCost) const
   {
     return aCost <= mMaxCost - mLockedCost;
   }
 
   void MarkUsed(NotNull<CachedSurface*> aSurface,
-                NotNull<ImageSurfaceCache*> aCache)
+                NotNull<ImageSurfaceCache*> aCache,
+                const StaticMutexAutoLock& aAutoLock)
   {
     if (aCache->IsLocked()) {
-      LockSurface(aSurface);
+      LockSurface(aSurface, aAutoLock);
     } else {
-      mExpirationTracker.MarkUsed(aSurface);
+      mExpirationTracker.MarkUsedLocked(aSurface, aAutoLock);
     }
   }
 
-  void DoUnlockSurfaces(NotNull<ImageSurfaceCache*> aCache)
+  void DoUnlockSurfaces(NotNull<ImageSurfaceCache*> aCache,
+                        const StaticMutexAutoLock& aAutoLock)
   {
     // Unlock all the surfaces the per-image cache is holding.
     for (auto iter = aCache->ConstIter(); !iter.Done(); iter.Next()) {
       NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData());
       if (surface->IsPlaceholder() || !surface->IsLocked()) {
         continue;
       }
-      StopTracking(surface);
+      StopTracking(surface, aAutoLock);
       surface->SetLocked(false);
-      StartTracking(surface);
+      StartTracking(surface, aAutoLock);
     }
   }
 
   void RemoveEntry(const ImageKey    aImageKey,
-                   const SurfaceKey& aSurfaceKey)
+                   const SurfaceKey& aSurfaceKey,
+                   const StaticMutexAutoLock& aAutoLock)
   {
     RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey);
     if (!cache) {
       return;  // No cached surfaces for this image.
     }
 
     RefPtr<CachedSurface> surface = cache->Lookup(aSurfaceKey);
     if (!surface) {
       return;  // Lookup in the per-image cache missed.
     }
 
-    Remove(WrapNotNull(surface));
+    Remove(WrapNotNull(surface), aAutoLock);
   }
 
-  struct SurfaceTracker : public nsExpirationTracker<CachedSurface, 2>
+  struct SurfaceTracker : public ExpirationTrackerImpl<CachedSurface, 2,
+                                                       StaticMutex,
+                                                       StaticMutexAutoLock>
   {
     explicit SurfaceTracker(uint32_t aSurfaceCacheExpirationTimeMS)
-      : nsExpirationTracker<CachedSurface, 2>(aSurfaceCacheExpirationTimeMS,
-                                              "SurfaceTracker")
+      : ExpirationTrackerImpl<CachedSurface, 2,
+                              StaticMutex, StaticMutexAutoLock>(
+          aSurfaceCacheExpirationTimeMS, "SurfaceTracker")
     { }
 
   protected:
-    virtual void NotifyExpired(CachedSurface* aSurface) override
+    void NotifyExpiredLocked(CachedSurface* aSurface,
+                             const StaticMutexAutoLock& aAutoLock) override
     {
-      StaticMutexAutoLock lock(sInstanceMutex);
-      if (sInstance) {
-        sInstance->Remove(WrapNotNull(aSurface));
-      }
+      sInstance->Remove(WrapNotNull(aSurface), aAutoLock);
+    }
+
+    StaticMutex& GetMutex() override
+    {
+      return sInstanceMutex;
     }
   };
 
   struct MemoryPressureObserver : public nsIObserver
   {
     NS_DECL_ISUPPORTS
 
     NS_IMETHOD Observe(nsISupports*,
                        const char* aTopic,
                        const char16_t*) override
     {
       StaticMutexAutoLock lock(sInstanceMutex);
       if (sInstance && strcmp(aTopic, "memory-pressure") == 0) {
-        sInstance->DiscardForMemoryPressure();
+        sInstance->DiscardForMemoryPressure(lock);
       }
       return NS_OK;
     }
 
   private:
     virtual ~MemoryPressureObserver() { }
   };
 
@@ -1010,40 +1028,40 @@ SurfaceCache::Shutdown()
 SurfaceCache::Lookup(const ImageKey         aImageKey,
                      const SurfaceKey&      aSurfaceKey)
 {
   StaticMutexAutoLock lock(sInstanceMutex);
   if (!sInstance) {
     return LookupResult(MatchType::NOT_FOUND);
   }
 
-  return sInstance->Lookup(aImageKey, aSurfaceKey);
+  return sInstance->Lookup(aImageKey, aSurfaceKey, lock);
 }
 
 /* static */ LookupResult
 SurfaceCache::LookupBestMatch(const ImageKey         aImageKey,
                               const SurfaceKey&      aSurfaceKey)
 {
   StaticMutexAutoLock lock(sInstanceMutex);
   if (!sInstance) {
     return LookupResult(MatchType::NOT_FOUND);
   }
 
-  return sInstance->LookupBestMatch(aImageKey, aSurfaceKey);
+  return sInstance->LookupBestMatch(aImageKey, aSurfaceKey, lock);
 }
 
 /* static */ InsertOutcome
 SurfaceCache::Insert(NotNull<ISurfaceProvider*> aProvider)
 {
   StaticMutexAutoLock lock(sInstanceMutex);
   if (!sInstance) {
     return InsertOutcome::FAILURE;
   }
 
-  return sInstance->Insert(aProvider, /* aSetAvailable = */ false);
+  return sInstance->Insert(aProvider, /* aSetAvailable = */ false, lock);
 }
 
 /* static */ bool
 SurfaceCache::CanHold(const IntSize& aSize, uint32_t aBytesPerPixel /* = 4 */)
 {
   StaticMutexAutoLock lock(sInstanceMutex);
   if (!sInstance) {
     return false;
@@ -1067,61 +1085,61 @@ SurfaceCache::CanHold(size_t aSize)
 /* static */ void
 SurfaceCache::SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider)
 {
   StaticMutexAutoLock lock(sInstanceMutex);
   if (!sInstance) {
     return;
   }
 
-  sInstance->SurfaceAvailable(aProvider);
+  sInstance->SurfaceAvailable(aProvider, lock);
 }
 
 /* static */ void
 SurfaceCache::LockImage(const ImageKey aImageKey)
 {
   StaticMutexAutoLock lock(sInstanceMutex);
   if (sInstance) {
     return sInstance->LockImage(aImageKey);
   }
 }
 
 /* static */ void
 SurfaceCache::UnlockImage(const ImageKey aImageKey)
 {
   StaticMutexAutoLock lock(sInstanceMutex);
   if (sInstance) {
-    return sInstance->UnlockImage(aImageKey);
+    return sInstance->UnlockImage(aImageKey, lock);
   }
 }
 
 /* static */ void
 SurfaceCache::UnlockEntries(const ImageKey aImageKey)
 {
   StaticMutexAutoLock lock(sInstanceMutex);
   if (sInstance) {
-    return sInstance->UnlockEntries(aImageKey);
+    return sInstance->UnlockEntries(aImageKey, lock);
   }
 }
 
 /* static */ void
 SurfaceCache::RemoveImage(const ImageKey aImageKey)
 {
   StaticMutexAutoLock lock(sInstanceMutex);
   if (sInstance) {
-    sInstance->RemoveImage(aImageKey);
+    sInstance->RemoveImage(aImageKey, lock);
   }
 }
 
 /* static */ void
 SurfaceCache::DiscardAll()
 {
   StaticMutexAutoLock lock(sInstanceMutex);
   if (sInstance) {
-    sInstance->DiscardAll();
+    sInstance->DiscardAll(lock);
   }
 }
 
 /* static */ void
 SurfaceCache::CollectSizeOfSurfaces(const ImageKey                  aImageKey,
                                     nsTArray<SurfaceMemoryCounter>& aCounters,
                                     MallocSizeOf                    aMallocSizeOf)
 {
--- a/xpcom/ds/nsExpirationTracker.h
+++ b/xpcom/ds/nsExpirationTracker.h
@@ -8,18 +8,20 @@
 #define NSEXPIRATIONTRACKER_H_
 
 #include "mozilla/Logging.h"
 #include "nsTArray.h"
 #include "nsITimer.h"
 #include "nsCOMPtr.h"
 #include "nsAutoPtr.h"
 #include "nsComponentManagerUtils.h"
+#include "nsIEventTarget.h"
 #include "nsIObserver.h"
 #include "nsIObserverService.h"
+#include "nsThreadUtils.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/Services.h"
 
 /**
  * Data used to track the expiration state of an object. We promise that this
  * is 32 bits so that objects that includes this as a field can pad and align
  * efficiently.
  */
@@ -37,17 +39,21 @@ struct nsExpirationState
   /**
    * The generation that this object belongs to, or NOT_TRACKED.
    */
   uint32_t mGeneration:4;
   uint32_t mIndexInGeneration:28;
 };
 
 /**
- * nsExpirationTracker can track the lifetimes and usage of a large number of
+ * ExpirationTracker classes:
+ * - ExpirationTrackerImpl (Thread-safe class)
+ * - nsExpirationTracker (Main-thread only class)
+ *
+ * These classes can track the lifetimes and usage of a large number of
  * objects, and send a notification some window of time after a live object was
  * last used. This is very useful when you manage a large number of objects
  * and want to flush some after they haven't been used for a while.
  * nsExpirationTracker is designed to be very space and time efficient.
  *
  * The type parameter T is the object type that we will track pointers to. T
  * must include an accessible method GetExpirationState() that returns a
  * pointer to an nsExpirationState associated with the object (preferably,
@@ -66,81 +72,95 @@ struct nsExpirationState
  * are stored in a cyclic array; when a timer interrupt fires, we advance
  * the current generation pointer to effectively age all objects very efficiently.
  * By storing information in each object about its generation and index within its
  * generation array, we make removal of objects from a generation very cheap.
  *
  * Future work:
  * -- Add a method to change the timer period?
  */
-template<class T, uint32_t K>
-class nsExpirationTracker
+
+/**
+ * Base class for ExiprationTracker implementations.
+ *
+ * nsExpirationTracker class below is a specialized class to be inherited by the
+ * instances to be accessed only on main-thread.
+ *
+ * For creating a thread-safe tracker, you can define a subclass inheriting this
+ * base class and specialize the Mutex and AutoLock to be used.
+ *
+ */
+template<typename T, uint32_t K, typename Mutex, typename AutoLock>
+class ExpirationTrackerImpl
 {
 public:
   /**
    * Initialize the tracker.
    * @param aTimerPeriod the timer period in milliseconds. The guarantees
    * provided by the tracker are defined in terms of this period. If the
    * period is zero, then we don't use a timer and rely on someone calling
-   * AgeOneGeneration explicitly.
+   * AgeOneGenerationLocked explicitly.
    */
-  explicit nsExpirationTracker(uint32_t aTimerPeriod, const char* aName)
+  ExpirationTrackerImpl(uint32_t aTimerPeriod, const char* aName)
     : mTimerPeriod(aTimerPeriod)
     , mNewestGeneration(0)
     , mInAgeOneGeneration(false)
     , mName(aName)
   {
     static_assert(K >= 2 && K <= nsExpirationState::NOT_TRACKED,
                   "Unsupported number of generations (must be 2 <= K <= 15)");
+    MOZ_ASSERT(NS_IsMainThread());
     mObserver = new ExpirationTrackerObserver();
     mObserver->Init(this);
   }
-  virtual ~nsExpirationTracker()
+
+  virtual ~ExpirationTrackerImpl()
   {
+    MOZ_ASSERT(NS_IsMainThread());
     if (mTimer) {
       mTimer->Cancel();
     }
     mObserver->Destroy();
   }
 
   /**
    * Add an object to be tracked. It must not already be tracked. It will
    * be added to the newest generation, i.e., as if it was just used.
    * @return an error on out-of-memory
    */
-  nsresult AddObject(T* aObj)
+  nsresult AddObjectLocked(T* aObj, const AutoLock& aAutoLock)
   {
     nsExpirationState* state = aObj->GetExpirationState();
     NS_ASSERTION(!state->IsTracked(),
                  "Tried to add an object that's already tracked");
     nsTArray<T*>& generation = mGenerations[mNewestGeneration];
     uint32_t index = generation.Length();
     if (index > nsExpirationState::MAX_INDEX_IN_GENERATION) {
       NS_WARNING("More than 256M elements tracked, this is probably a problem");
       return NS_ERROR_OUT_OF_MEMORY;
     }
     if (index == 0) {
       // We might need to start the timer
-      nsresult rv = CheckStartTimer();
+      nsresult rv = CheckStartTimerLocked(aAutoLock);
       if (NS_FAILED(rv)) {
         return rv;
       }
     }
     if (!generation.AppendElement(aObj)) {
       return NS_ERROR_OUT_OF_MEMORY;
     }
     state->mGeneration = mNewestGeneration;
     state->mIndexInGeneration = index;
     return NS_OK;
   }
 
   /**
    * Remove an object from the tracker. It must currently be tracked.
    */
-  void RemoveObject(T* aObj)
+  void RemoveObjectLocked(T* aObj, const AutoLock& aAutoLock)
   {
     nsExpirationState* state = aObj->GetExpirationState();
     NS_ASSERTION(state->IsTracked(), "Tried to remove an object that's not tracked");
     nsTArray<T*>& generation = mGenerations[state->mGeneration];
     uint32_t index = state->mIndexInGeneration;
     NS_ASSERTION(generation.Length() > index &&
                  generation[index] == aObj, "Object is lying about its index");
     // Move the last object to fill the hole created by removing aObj
@@ -156,95 +176,96 @@ public:
     // thrash by incessantly creating and destroying timers if someone
     // kept adding and removing an object from the tracker.
   }
 
   /**
    * Notify that an object has been used.
    * @return an error if we lost the object from the tracker...
    */
-  nsresult MarkUsed(T* aObj)
+  nsresult MarkUsedLocked(T* aObj, const AutoLock& aAutoLock)
   {
     nsExpirationState* state = aObj->GetExpirationState();
     if (mNewestGeneration == state->mGeneration) {
       return NS_OK;
     }
-    RemoveObject(aObj);
-    return AddObject(aObj);
+    RemoveObjectLocked(aObj, aAutoLock);
+    return AddObjectLocked(aObj, aAutoLock);
   }
 
   /**
    * The timer calls this, but it can also be manually called if you want
-   * to age objects "artifically". This can result in calls to NotifyExpired.
+   * to age objects "artifically". This can result in calls to NotifyExpiredLocked.
    */
-  void AgeOneGeneration()
+  void AgeOneGenerationLocked(const AutoLock& aAutoLock)
   {
     if (mInAgeOneGeneration) {
       NS_WARNING("Can't reenter AgeOneGeneration from NotifyExpired");
       return;
     }
 
     mInAgeOneGeneration = true;
     uint32_t reapGeneration =
       mNewestGeneration > 0 ? mNewestGeneration - 1 : K - 1;
     nsTArray<T*>& generation = mGenerations[reapGeneration];
     // The following is rather tricky. We have to cope with objects being
     // removed from this generation either because of a call to RemoveObject
-    // (or indirectly via MarkUsed) inside NotifyExpired. Fortunately no
-    // objects can be added to this generation because it's not the newest
+    // (or indirectly via MarkUsedLocked) inside NotifyExpiredLocked. Fortunately
+    // no objects can be added to this generation because it's not the newest
     // generation. We depend on the fact that RemoveObject can only cause
     // the indexes of objects in this generation to *decrease*, not increase.
     // So if we start from the end and work our way backwards we are guaranteed
     // to see each object at least once.
     size_t index = generation.Length();
     for (;;) {
       // Objects could have been removed so index could be outside
       // the array
       index = XPCOM_MIN(index, generation.Length());
       if (index == 0) {
         break;
       }
       --index;
-      NotifyExpired(generation[index]);
+      NotifyExpiredLocked(generation[index], aAutoLock);
     }
     // Any leftover objects from reapGeneration just end up in the new
     // newest-generation. This is bad form, though, so warn if there are any.
     if (!generation.IsEmpty()) {
       NS_WARNING("Expired objects were not removed or marked used");
     }
     // Free excess memory used by the generation array, since we probably
     // just removed most or all of its elements.
     generation.Compact();
     mNewestGeneration = reapGeneration;
     mInAgeOneGeneration = false;
   }
 
   /**
-   * This just calls AgeOneGeneration K times. Under normal circumstances this
-   * will result in all objects getting NotifyExpired called on them, but
-   * if NotifyExpired itself marks some objects as used, then those objects
-   * might not expire. This would be a good thing to call if we get into
+   * This just calls AgeOneGenerationLocked K times. Under normal circumstances
+   * this will result in all objects getting NotifyExpiredLocked called on them,
+   * but if NotifyExpiredLocked itself marks some objects as used, then those
+   * objects might not expire. This would be a good thing to call if we get into
    * a critically-low memory situation.
    */
-  void AgeAllGenerations()
+  void AgeAllGenerationsLocked(const AutoLock& aAutoLock)
   {
     uint32_t i;
     for (i = 0; i < K; ++i) {
-      AgeOneGeneration();
+      AgeOneGenerationLocked(aAutoLock);
     }
   }
 
   class Iterator
   {
   private:
-    nsExpirationTracker<T, K>* mTracker;
+    ExpirationTrackerImpl<T, K, Mutex, AutoLock>* mTracker;
     uint32_t mGeneration;
     uint32_t mIndex;
   public:
-    explicit Iterator(nsExpirationTracker<T, K>* aTracker)
+    Iterator(ExpirationTrackerImpl<T, K, Mutex, AutoLock>* aTracker,
+             AutoLock& aAutoLock)
       : mTracker(aTracker)
       , mGeneration(0)
       , mIndex(0)
     {
     }
 
     T* Next()
     {
@@ -258,72 +279,75 @@ public:
         mIndex = 0;
       }
       return nullptr;
     }
   };
 
   friend class Iterator;
 
-  bool IsEmpty()
+  bool IsEmptyLocked(const AutoLock& aAutoLock)
   {
     for (uint32_t i = 0; i < K; ++i) {
       if (!mGenerations[i].IsEmpty()) {
         return false;
       }
     }
     return true;
   }
 
 protected:
   /**
    * This must be overridden to catch notifications. It is called whenever
    * we detect that an object has not been used for at least (K-1)*mTimerPeriod
    * milliseconds. If timer events are not delayed, it will be called within
-   * roughly K*mTimerPeriod milliseconds after the last use. (Unless AgeOneGeneration
-   * or AgeAllGenerations have been called to accelerate the aging process.)
+   * roughly K*mTimerPeriod milliseconds after the last use.
+   * (Unless AgeOneGenerationLocked or AgeAllGenerationsLocked have been called
+   * to accelerate the aging process.)
    *
    * NOTE: These bounds ignore delays in timer firings due to actual work being
    * performed by the browser. We use a slack timer so there is always at least
    * mTimerPeriod milliseconds between firings, which gives us (K-1)*mTimerPeriod
    * as a pretty solid lower bound. The upper bound is rather loose, however.
    * If the maximum amount by which any given timer firing is delayed is D, then
-   * the upper bound before NotifyExpired is called is K*(mTimerPeriod + D).
+   * the upper bound before NotifyExpiredLocked is called is K*(mTimerPeriod + D).
    *
-   * The NotifyExpired call is expected to remove the object from the tracker,
+   * The NotifyExpiredLocked call is expected to remove the object from the tracker,
    * but it need not. The object (or other objects) could be "resurrected"
-   * by calling MarkUsed() on them, or they might just not be removed.
+   * by calling MarkUsedLocked() on them, or they might just not be removed.
    * Any objects left over that have not been resurrected or removed
    * are placed in the new newest-generation, but this is considered "bad form"
    * and should be avoided (we'll issue a warning). (This recycling counts
    * as "a use" for the purposes of the expiry guarantee above...)
    *
    * For robustness and simplicity, we allow objects to be notified more than
    * once here in the same timer tick.
    */
-  virtual void NotifyExpired(T* aObj) = 0;
+  virtual void NotifyExpiredLocked(T*, const AutoLock&) = 0;
+
+  virtual Mutex& GetMutex() = 0;
 
 private:
   class ExpirationTrackerObserver;
   RefPtr<ExpirationTrackerObserver> mObserver;
   nsTArray<T*>       mGenerations[K];
   nsCOMPtr<nsITimer> mTimer;
   uint32_t           mTimerPeriod;
   uint32_t           mNewestGeneration;
   bool               mInAgeOneGeneration;
   const char* const  mName;   // Used for timer firing profiling.
 
   /**
-   * Whenever "memory-pressure" is observed, it calls AgeAllGenerations()
+   * Whenever "memory-pressure" is observed, it calls AgeAllGenerationsLocked()
    * to minimize memory usage.
    */
   class ExpirationTrackerObserver final : public nsIObserver
   {
   public:
-    void Init(nsExpirationTracker<T, K>* aObj)
+    void Init(ExpirationTrackerImpl<T, K, Mutex, AutoLock>* aObj)
     {
       mOwner = aObj;
       nsCOMPtr<nsIObserverService> obs = mozilla::services::GetObserverService();
       if (obs) {
         obs->AddObserver(this, "memory-pressure", false);
       }
     }
     void Destroy()
@@ -332,87 +356,213 @@ private:
       nsCOMPtr<nsIObserverService> obs = mozilla::services::GetObserverService();
       if (obs) {
         obs->RemoveObserver(this, "memory-pressure");
       }
     }
     NS_DECL_ISUPPORTS
     NS_DECL_NSIOBSERVER
   private:
-    nsExpirationTracker<T, K>* mOwner;
+    ExpirationTrackerImpl<T, K, Mutex, AutoLock>* mOwner;
   };
 
+  void HandleLowMemory() {
+    AutoLock lock(GetMutex());
+    AgeAllGenerationsLocked(lock);
+  }
+
+  void HandleTimeout() {
+    AutoLock lock(GetMutex());
+    AgeOneGenerationLocked(lock);
+    // Cancel the timer if we have no objects to track
+    if (IsEmptyLocked(lock)) {
+      mTimer->Cancel();
+      mTimer = nullptr;
+    }
+  }
+
   static void TimerCallback(nsITimer* aTimer, void* aThis)
   {
-    nsExpirationTracker* tracker = static_cast<nsExpirationTracker*>(aThis);
-    tracker->AgeOneGeneration();
-    // Cancel the timer if we have no objects to track
-    if (tracker->IsEmpty()) {
-      tracker->mTimer->Cancel();
-      tracker->mTimer = nullptr;
-    }
+    ExpirationTrackerImpl* tracker = static_cast<ExpirationTrackerImpl*>(aThis);
+    tracker->HandleTimeout();
   }
 
-  nsresult CheckStartTimer()
+  nsresult CheckStartTimerLocked(const AutoLock& aAutoLock)
   {
     if (mTimer || !mTimerPeriod) {
       return NS_OK;
     }
     mTimer = do_CreateInstance("@mozilla.org/timer;1");
     if (!mTimer) {
       return NS_ERROR_OUT_OF_MEMORY;
     }
+    if (!NS_IsMainThread()) {
+      // TimerCallback should always be run on the main thread to prevent races
+      // to the destruction of the tracker.
+      nsCOMPtr<nsIEventTarget> target = do_GetMainThread();
+      NS_ENSURE_STATE(target);
+      mTimer->SetTarget(target);
+    }
     mTimer->InitWithNamedFuncCallback(TimerCallback, this, mTimerPeriod,
                                       nsITimer::TYPE_REPEATING_SLACK, mName);
     return NS_OK;
   }
 };
 
-template<class T, uint32_t K>
+namespace detail {
+
+class PlaceholderLock {
+public:
+  void Lock() {}
+  void Unlock() {}
+};
+
+class PlaceholderAutoLock {
+public:
+  explicit PlaceholderAutoLock(PlaceholderLock&) { }
+  ~PlaceholderAutoLock() = default;
+
+};
+
+template<typename T, uint32_t K>
+using SingleThreadedExpirationTracker =
+  ExpirationTrackerImpl<T, K, PlaceholderLock, PlaceholderAutoLock>;
+
+} // namespace detail
+
+template<typename T, uint32_t K>
+class nsExpirationTracker : protected ::detail::SingleThreadedExpirationTracker<T, K>
+{
+  typedef ::detail::PlaceholderLock Lock;
+  typedef ::detail::PlaceholderAutoLock AutoLock;
+
+  Lock mLock;
+
+  AutoLock FakeLock() {
+    return AutoLock(mLock);
+  }
+
+  Lock& GetMutex() override
+  {
+    return mLock;
+  }
+
+  void NotifyExpiredLocked(T* aObject, const AutoLock&) override
+  {
+    NotifyExpired(aObject);
+  }
+
+protected:
+  virtual void NotifyExpired(T* aObj) = 0;
+
+public:
+  nsExpirationTracker(uint32_t aTimerPeriod, const char* aName)
+    : ::detail::SingleThreadedExpirationTracker<T, K>(aTimerPeriod, aName)
+  { }
+
+  virtual ~nsExpirationTracker()
+  { }
+
+  nsresult AddObject(T* aObj)
+  {
+    return this->AddObjectLocked(aObj, FakeLock());
+  }
+
+  void RemoveObject(T* aObj)
+  {
+    this->RemoveObjectLocked(aObj, FakeLock());
+  }
+
+  nsresult MarkUsed(T* aObj)
+  {
+    return this->MarkUsedLocked(aObj, FakeLock());
+  }
+
+  void AgeOneGeneration()
+  {
+    this->AgeOneGenerationLocked(FakeLock());
+  }
+
+  void AgeAllGenerations()
+  {
+    this->AgeAllGenerationsLocked(FakeLock());
+  }
+
+  class Iterator
+  {
+  private:
+    AutoLock mAutoLock;
+    typename ExpirationTrackerImpl<T, K, Lock, AutoLock>::Iterator mIterator;
+  public:
+    explicit Iterator(nsExpirationTracker<T, K>* aTracker)
+      : mAutoLock(aTracker->GetMutex())
+      , mIterator(aTracker, mAutoLock)
+    {
+    }
+
+    T* Next()
+    {
+      return mIterator.Next();
+    }
+  };
+
+  friend class Iterator;
+
+  bool IsEmpty()
+  {
+    return this->IsEmptyLocked(FakeLock());
+  }
+};
+
+template<typename T, uint32_t K, typename Mutex, typename AutoLock>
 NS_IMETHODIMP
-nsExpirationTracker<T, K>::ExpirationTrackerObserver::Observe(
+ExpirationTrackerImpl<T, K, Mutex, AutoLock>::
+ExpirationTrackerObserver::Observe(
     nsISupports* aSubject, const char* aTopic, const char16_t* aData)
 {
   if (!strcmp(aTopic, "memory-pressure") && mOwner) {
-    mOwner->AgeAllGenerations();
+    mOwner->HandleLowMemory();
   }
   return NS_OK;
 }
 
-template<class T, uint32_t K>
+template<class T, uint32_t K, typename Mutex, typename AutoLock>
 NS_IMETHODIMP_(MozExternalRefCountType)
-nsExpirationTracker<T, K>::ExpirationTrackerObserver::AddRef(void)
+ExpirationTrackerImpl<T, K, Mutex, AutoLock>::
+ExpirationTrackerObserver::AddRef(void)
 {
   MOZ_ASSERT(int32_t(mRefCnt) >= 0, "illegal refcnt");
   NS_ASSERT_OWNINGTHREAD(ExpirationTrackerObserver);
   ++mRefCnt;
   NS_LOG_ADDREF(this, mRefCnt, "ExpirationTrackerObserver", sizeof(*this));
   return mRefCnt;
 }
 
-template<class T, uint32_t K>
+template<class T, uint32_t K, typename Mutex, typename AutoLock>
 NS_IMETHODIMP_(MozExternalRefCountType)
-nsExpirationTracker<T, K>::ExpirationTrackerObserver::Release(void)
+ExpirationTrackerImpl<T, K, Mutex, AutoLock>::
+ExpirationTrackerObserver::Release(void)
 {
   MOZ_ASSERT(int32_t(mRefCnt) > 0, "dup release");
   NS_ASSERT_OWNINGTHREAD(ExpirationTrackerObserver);
   --mRefCnt;
   NS_LOG_RELEASE(this, mRefCnt, "ExpirationTrackerObserver");
   if (mRefCnt == 0) {
     NS_ASSERT_OWNINGTHREAD(ExpirationTrackerObserver);
     mRefCnt = 1; /* stabilize */
     delete (this);
     return 0;
   }
   return mRefCnt;
 }
 
-template<class T, uint32_t K>
+template<class T, uint32_t K, typename Mutex, typename AutoLock>
 NS_IMETHODIMP
-nsExpirationTracker<T, K>::ExpirationTrackerObserver::QueryInterface(
+ExpirationTrackerImpl<T, K, Mutex, AutoLock>::
+ExpirationTrackerObserver::QueryInterface(
     REFNSIID aIID, void** aInstancePtr)
 {
   NS_ASSERTION(aInstancePtr,
                "QueryInterface requires a non-NULL destination!");
   nsresult rv = NS_ERROR_FAILURE;
   NS_INTERFACE_TABLE(ExpirationTrackerObserver, nsIObserver)
   return rv;
 }