Bug 1196847 - Part 1: Allow storage of a unique id for a cell independent of address; r=jonco
authorTerrence Cole <terrence@mozilla.com>
Thu, 20 Aug 2015 10:35:22 -0700
changeset 260540 c9e469c6b9159e42c83dacdc91b5084b9600a93e
parent 260539 ed6830f48c58fcd145c1967f3f879cc67553377d
child 260541 7f6585a46cd09020655d1861c8e42858830f950e
push id64523
push usertcole@mozilla.com
push dateWed, 02 Sep 2015 16:33:28 +0000
treeherdermozilla-inbound@d92d88957742 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjonco
bugs1196847
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1196847 - Part 1: Allow storage of a unique id for a cell independent of address; r=jonco
js/public/HeapAPI.h
js/src/ds/LockGuard.h
js/src/ds/SpinLock.h
js/src/gc/GCRuntime.h
js/src/gc/Heap.h
js/src/gc/Marking.cpp
js/src/gc/Marking.h
js/src/gc/Nursery.cpp
js/src/gc/Nursery.h
js/src/gc/Zone.h
js/src/jsapi-tests/moz.build
js/src/jsapi-tests/testGCUniqueId.cpp
js/src/jsgc.cpp
js/src/jsobjinlines.h
--- a/js/public/HeapAPI.h
+++ b/js/public/HeapAPI.h
@@ -49,17 +49,18 @@ const size_t CellMask = CellSize - 1;
 #ifdef JS_GC_SMALL_CHUNK_SIZE
 const size_t ChunkMarkBitmapOffset = 258104;
 const size_t ChunkMarkBitmapBits = 31744;
 #else
 const size_t ChunkMarkBitmapOffset = 1032352;
 const size_t ChunkMarkBitmapBits = 129024;
 #endif
 const size_t ChunkRuntimeOffset = ChunkSize - sizeof(void*);
-const size_t ChunkLocationOffset = ChunkSize - 2 * sizeof(void*) - sizeof(uint64_t);
+const size_t ChunkTrailerSize = 2 * sizeof(uintptr_t) + sizeof(uint64_t);
+const size_t ChunkLocationOffset = ChunkSize - ChunkTrailerSize;
 const size_t ArenaZoneOffset = 0;
 
 /*
  * Live objects are marked black. How many other additional colors are available
  * depends on the size of the GCThing. Objects marked gray are eligible for
  * cycle collection.
  */
 static const uint32_t BLACK = 0;
new file mode 100644
--- /dev/null
+++ b/js/src/ds/LockGuard.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_LockGuard_h
+#define js_LockGuard_h
+
+#include "mozilla/GuardObjects.h"
+
+namespace js {
+
+// An implementation of C++11's std::lock_guard, enhanced with a guard object
+// to help with correct usage.
+template <typename LockType>
+class LockGuard
+{
+    LockType& lockRef_;
+    MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
+
+  public:
+    explicit LockGuard(LockType& lock
+                       MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+      : lockRef_(lock)
+    {
+        MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+        lockRef_.lock();
+    }
+
+    ~LockGuard() {
+        lockRef_.unlock();
+    }
+};
+
+} // namespace js
+
+#endif // js_LockGuard_h
new file mode 100644
--- /dev/null
+++ b/js/src/ds/SpinLock.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_SpinLock_h
+#define js_SpinLock_h
+
+#include "mozilla/Atomics.h"
+
+#include "ds/LockGuard.h"
+
+namespace js {
+
+// A trivial spin-lock implementation. Extremely fast when rarely-contended.
+class SpinLock
+{
+    mozilla::Atomic<bool, mozilla::ReleaseAcquire> locked_;
+
+  public:
+    SpinLock() : locked_(false) {}
+
+    void lock() {
+        do {
+            while (locked_)
+                ; // Spin until the lock seems free.
+        } while (!locked_.compareExchange(false, true)); // Atomically take the lock.
+    }
+
+    void unlock() {
+        locked_ = false;
+    }
+};
+
+using AutoSpinLock = LockGuard<SpinLock>;
+
+} // namespace js
+
+#endif // js_SpinLock_h
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -649,16 +649,21 @@ class GCRuntime
     void maybeVerifyPreBarriers(bool always);
     bool selectForMarking(JSObject* object);
     void clearSelectedForMarking();
     void setDeterministic(bool enable);
 #endif
 
     size_t maxMallocBytesAllocated() { return maxMallocBytes; }
 
+    uint64_t nextCellUniqueId() {
+        MOZ_ASSERT(nextCellUniqueId_ > 0);
+        return nextCellUniqueId_++;
+    }
+
   public:
     // Internal public interface
     js::gc::State state() const { return incrementalState; }
     bool isHeapCompacting() const { return state() == COMPACT; }
     bool isBackgroundSweeping() { return helperState.isBackgroundSweeping(); }
     void waitBackgroundSweepEnd() { helperState.waitBackgroundSweepEnd(); }
     void waitBackgroundSweepOrAllocEnd() {
         helperState.waitBackgroundSweepEnd();
@@ -1003,16 +1008,19 @@ class GCRuntime
     // When all arenas in a chunk are used, it is moved to the fullChunks pool
     // so as to reduce the cost of operations on the available lists.
     ChunkPool             fullChunks_;
 
     RootedValueMap rootsHash;
 
     size_t maxMallocBytes;
 
+    // An incrementing id used to assign unique ids to cells that require one.
+    uint64_t nextCellUniqueId_;
+
     /*
      * Number of the committed arenas in all GC chunks including empty chunks.
      */
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
     VerifyPreTracer* verifyPreData;
     bool chunkAllocationSinceLastGC;
     int64_t nextFullGCTime;
     int64_t lastGCTime;
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -288,16 +288,19 @@ class TenuredCell : public Cell
     static MOZ_ALWAYS_INLINE void writeBarrierPost(void* cellp, TenuredCell* prior,
                                                    TenuredCell* next);
 
 #ifdef DEBUG
     inline bool isAligned() const;
 #endif
 };
 
+/* Cells are aligned to CellShift, so the largest tagged null pointer is: */
+const uintptr_t LargestTaggedNullCellPointer = (1 << CellShift) - 1;
+
 /*
  * The mark bitmap has one bit per each GC cell. For multi-cell GC things this
  * wastes space but allows to avoid expensive devisions by thing's size when
  * accessing the bitmap. In addition this allows to use some bits for colored
  * marking during the cycle GC.
  */
 const size_t ArenaCellCount = size_t(1) << (ArenaShift - CellShift);
 const size_t ArenaBitmapBits = ArenaCellCount;
@@ -799,28 +802,40 @@ ArenaHeader::getThingSize() const
 
 /*
  * The tail of the chunk info is shared between all chunks in the system, both
  * nursery and tenured. This structure is locatable from any GC pointer by
  * aligning to 1MiB.
  */
 struct ChunkTrailer
 {
+    /* Construct a Nursery ChunkTrailer. */
+    ChunkTrailer(JSRuntime* rt, StoreBuffer* sb)
+      : location(gc::ChunkLocationBitNursery), storeBuffer(sb), runtime(rt)
+    {}
+
+    /* Construct a Tenured heap ChunkTrailer. */
+    explicit ChunkTrailer(JSRuntime* rt)
+      : location(gc::ChunkLocationBitTenuredHeap), storeBuffer(nullptr), runtime(rt)
+    {}
+
+  public:
     /* The index the chunk in the nursery, or LocationTenuredHeap. */
     uint32_t        location;
     uint32_t        padding;
 
     /* The store buffer for writes to things in this chunk or nullptr. */
     StoreBuffer*    storeBuffer;
 
+    /* This provides quick access to the runtime from absolutely anywhere. */
     JSRuntime*      runtime;
 };
 
-static_assert(sizeof(ChunkTrailer) == 2 * sizeof(uintptr_t) + sizeof(uint64_t),
-              "ChunkTrailer size is incorrect.");
+static_assert(sizeof(ChunkTrailer) == ChunkTrailerSize,
+              "ChunkTrailer size must match the API defined size.");
 
 /* The chunk header (located at the end of the chunk to preserve arena alignment). */
 struct ChunkInfo
 {
     void init() {
         next = prev = nullptr;
         age = 0;
     }
@@ -999,23 +1014,26 @@ struct Chunk
     PerArenaBitmap  decommittedArenas;
     ChunkInfo       info;
 
     static Chunk* fromAddress(uintptr_t addr) {
         addr &= ~ChunkMask;
         return reinterpret_cast<Chunk*>(addr);
     }
 
-    static bool withinArenasRange(uintptr_t addr) {
+    static bool withinValidRange(uintptr_t addr) {
         uintptr_t offset = addr & ChunkMask;
-        return offset < ArenasPerChunk * ArenaSize;
+        return Chunk::fromAddress(addr)->isNurseryChunk()
+               ? offset < ChunkSize - sizeof(ChunkTrailer)
+               : offset < ArenasPerChunk * ArenaSize;
     }
 
     static size_t arenaIndex(uintptr_t addr) {
-        MOZ_ASSERT(withinArenasRange(addr));
+        MOZ_ASSERT(!Chunk::fromAddress(addr)->isNurseryChunk());
+        MOZ_ASSERT(withinValidRange(addr));
         return (addr & ChunkMask) >> ArenaShift;
     }
 
     uintptr_t address() const {
         uintptr_t addr = reinterpret_cast<uintptr_t>(this);
         MOZ_ASSERT(!(addr & ChunkMask));
         return addr;
     }
@@ -1023,16 +1041,20 @@ struct Chunk
     bool unused() const {
         return info.numArenasFree == ArenasPerChunk;
     }
 
     bool hasAvailableArenas() const {
         return info.numArenasFree != 0;
     }
 
+    bool isNurseryChunk() const {
+        return info.trailer.storeBuffer;
+    }
+
     ArenaHeader* allocateArena(JSRuntime* rt, JS::Zone* zone, AllocKind kind,
                                const AutoLockGC& lock);
 
     void releaseArena(JSRuntime* rt, ArenaHeader* aheader, const AutoLockGC& lock);
     void recycleArena(ArenaHeader* aheader, SortedArenaList& dest, AllocKind thingKind,
                       size_t thingsPerArena);
 
     bool decommitOneFreeArena(JSRuntime* rt, AutoLockGC& lock);
@@ -1122,17 +1144,17 @@ class HeapUsage
 };
 
 inline uintptr_t
 ArenaHeader::address() const
 {
     uintptr_t addr = reinterpret_cast<uintptr_t>(this);
     MOZ_ASSERT(addr);
     MOZ_ASSERT(!(addr & ArenaMask));
-    MOZ_ASSERT(Chunk::withinArenasRange(addr));
+    MOZ_ASSERT(Chunk::withinValidRange(addr));
     return addr;
 }
 
 inline Chunk*
 ArenaHeader::chunk() const
 {
     return Chunk::fromAddress(address());
 }
@@ -1291,17 +1313,17 @@ Cell::shadowRuntimeFromAnyThread() const
     return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromAnyThread());
 }
 
 inline uintptr_t
 Cell::address() const
 {
     uintptr_t addr = uintptr_t(this);
     MOZ_ASSERT(addr % CellSize == 0);
-    MOZ_ASSERT(Chunk::withinArenasRange(addr));
+    MOZ_ASSERT(Chunk::withinValidRange(addr));
     return addr;
 }
 
 Chunk*
 Cell::chunk() const
 {
     uintptr_t addr = uintptr_t(this);
     MOZ_ASSERT(addr % CellSize == 0);
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -2135,17 +2135,23 @@ js::TenuringTracer::moveObjectToTenured(
      *
      * For Arrays we're reducing tenuredSize to the smaller srcSize
      * because moveElementsToTenured() accounts for all Array elements,
      * even if they are inlined.
      */
     if (src->is<ArrayObject>())
         tenuredSize = srcSize = sizeof(NativeObject);
 
+    // Copy the Cell contents.
     js_memcpy(dst, src, srcSize);
+
+    // Move any hash code attached to the object.
+    src->zone()->transferUniqueId(dst, src);
+
+    // Move the slots and elements, if we need to.
     if (src->isNative()) {
         NativeObject* ndst = &dst->as<NativeObject>();
         NativeObject* nsrc = &src->as<NativeObject>();
         tenuredSize += moveSlotsToTenured(ndst, nsrc, dstKind);
         tenuredSize += moveElementsToTenured(ndst, nsrc, dstKind);
 
         // The shape's list head may point into the old object. This can only
         // happen for dictionaries, which are native objects.
--- a/js/src/gc/Marking.h
+++ b/js/src/gc/Marking.h
@@ -421,17 +421,17 @@ ToMarkable(Cell* cell)
     return cell;
 }
 
 // Return true if the pointer is nullptr, or if it is a tagged pointer to
 // nullptr.
 MOZ_ALWAYS_INLINE bool
 IsNullTaggedPointer(void* p)
 {
-    return uintptr_t(p) < 32;
+    return uintptr_t(p) <= LargestTaggedNullCellPointer;
 }
 
 // HashKeyRef represents a reference to a HashMap key. This should normally
 // be used through the HashTableWriteBarrierPost function.
 template <typename Map, typename Key>
 class HashKeyRef : public BufferableRef
 {
     Map* map;
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -59,16 +59,19 @@ js::Nursery::init(uint32_t maxNurseryByt
 
     /* If no chunks are specified then the nursery is permenantly disabled. */
     if (numNurseryChunks_ == 0)
         return true;
 
     if (!mallocedBuffers.init())
         return false;
 
+    if (!cellsWithUid_.init())
+        return false;
+
     void* heap = MapAlignedPages(nurserySize(), Alignment);
     if (!heap)
         return false;
 
     freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp());
     if (!freeMallocedBuffersTask || !freeMallocedBuffersTask->init())
         return false;
 
@@ -643,16 +646,26 @@ js::Nursery::waitBackgroundFreeEnd()
 {
     MOZ_ASSERT(freeMallocedBuffersTask);
     freeMallocedBuffersTask->join();
 }
 
 void
 js::Nursery::sweep()
 {
+    /* Sweep unique id's in all in-use chunks. */
+    for (CellsWithUniqueIdSet::Enum e(cellsWithUid_); !e.empty(); e.popFront()) {
+        JSObject* obj = static_cast<JSObject*>(e.front());
+        if (!IsForwarded(obj))
+            obj->zone()->removeUniqueId(obj);
+        else
+            MOZ_ASSERT(Forwarded(obj)->zone()->hasUniqueId(Forwarded(obj)));
+    }
+    cellsWithUid_.clear();
+
 #ifdef JS_GC_ZEAL
     /* Poison the nursery contents so touching a freed object will crash. */
     JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, nurserySize());
     for (int i = 0; i < numNurseryChunks_; ++i)
         initChunk(i);
 
     if (runtime()->gcZeal() == ZealGenerationalGCValue) {
         MOZ_ASSERT(numActiveChunks_ == numNurseryChunks_);
@@ -660,20 +673,18 @@ js::Nursery::sweep()
         /* Only reset the alloc point when we are close to the end. */
         if (currentChunk_ + 1 == numNurseryChunks_)
             setCurrentChunk(0);
     } else
 #endif
     {
 #ifdef JS_CRASH_DIAGNOSTICS
         JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, allocationEnd() - start());
-        for (int i = 0; i < numActiveChunks_; ++i) {
-            chunk(i).trailer.location = gc::ChunkLocationBitNursery;
-            chunk(i).trailer.runtime = runtime();
-        }
+        for (int i = 0; i < numActiveChunks_; ++i)
+            initChunk(i);
 #endif
         setCurrentChunk(0);
     }
 
     /* Set current start position for isEmpty checks. */
     currentStart_ = position();
 }
 
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -177,16 +177,24 @@ class Nursery
 
     /* Mark a malloced buffer as no longer needing to be freed. */
     void removeMallocedBuffer(void* buffer) {
         mallocedBuffers.remove(buffer);
     }
 
     void waitBackgroundFreeEnd();
 
+    bool addedUniqueIdToCell(gc::Cell* cell) {
+        if (!IsInsideNursery(cell) || !isEnabled())
+            return true;
+        MOZ_ASSERT(cellsWithUid_.initialized());
+        MOZ_ASSERT(!cellsWithUid_.has(cell));
+        return cellsWithUid_.put(cell);
+    }
+
     size_t sizeOfHeapCommitted() const {
         return numActiveChunks_ * gc::ChunkSize;
     }
     size_t sizeOfHeapDecommitted() const {
         return (numNurseryChunks_ - numActiveChunks_) * gc::ChunkSize;
     }
     size_t sizeOfMallocedBuffers(mozilla::MallocSizeOf mallocSizeOf) const {
         size_t total = 0;
@@ -260,16 +268,31 @@ class Nursery
      * new location with a forwarding pointer at the base. This does not work
      * for buffers whose length is less than pointer width, or when different
      * buffers might overlap each other. For these, an entry in the following
      * table is used.
      */
     typedef HashMap<void*, void*, PointerHasher<void*, 1>, SystemAllocPolicy> ForwardedBufferMap;
     ForwardedBufferMap forwardedBuffers;
 
+    /*
+     * When we assign a unique id to cell in the nursery, that almost always
+     * means that the cell will be in a hash table, and thus, held live,
+     * automatically moving the uid from the nursery to its new home in
+     * tenured. It is possible, if rare, for an object that acquired a uid to
+     * be dead before the next collection, in which case we need to know to
+     * remove it when we sweep.
+     *
+     * Note: we store the pointers as Cell* here, resulting in an ugly cast in
+     *       sweep. This is because this structure is used to help implement
+     *       stable object hashing and we have to break the cycle somehow.
+     */
+    using CellsWithUniqueIdSet = HashSet<gc::Cell*, PointerHasher<gc::Cell*, 3>, SystemAllocPolicy>;
+    CellsWithUniqueIdSet cellsWithUid_;
+
     /* The maximum number of bytes allowed to reside in nursery buffers. */
     static const size_t MaxNurseryBufferSize = 1024;
 
     /* The amount of space in the mapped nursery available to allocations. */
     static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
 
     struct NurseryChunkLayout {
         char data[NurseryChunkUsableSize];
@@ -281,20 +304,18 @@ class Nursery
                   "Nursery chunk size must match gc::Chunk size.");
     NurseryChunkLayout& chunk(int index) const {
         MOZ_ASSERT(index < numNurseryChunks_);
         MOZ_ASSERT(start());
         return reinterpret_cast<NurseryChunkLayout*>(start())[index];
     }
 
     MOZ_ALWAYS_INLINE void initChunk(int chunkno) {
-        NurseryChunkLayout& c = chunk(chunkno);
-        c.trailer.storeBuffer = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
-        c.trailer.location = gc::ChunkLocationBitNursery;
-        c.trailer.runtime = runtime();
+        gc::StoreBuffer* sb = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
+        new (&chunk(chunkno).trailer) gc::ChunkTrailer(runtime(), sb);
     }
 
     MOZ_ALWAYS_INLINE void setCurrentChunk(int chunkno) {
         MOZ_ASSERT(chunkno < numNurseryChunks_);
         MOZ_ASSERT(chunkno < numActiveChunks_);
         currentChunk_ = chunkno;
         position_ = chunk(chunkno).start();
         currentEnd_ = chunk(chunkno).end();
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -8,19 +8,22 @@
 #define gc_Zone_h
 
 #include "mozilla/Atomics.h"
 #include "mozilla/DebugOnly.h"
 #include "mozilla/MemoryReporting.h"
 
 #include "jscntxt.h"
 
+#include "ds/SpinLock.h"
+#include "ds/SplayTree.h"
 #include "gc/FindSCCs.h"
 #include "gc/GCRuntime.h"
 #include "js/TracingAPI.h"
+#include "vm/MallocProvider.h"
 #include "vm/TypeInference.h"
 
 namespace js {
 
 namespace jit {
 class JitZone;
 } // namespace jit
 
@@ -53,16 +56,78 @@ class ZoneHeapThreshold
     static double computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,
                                                          const GCSchedulingTunables& tunables,
                                                          const GCSchedulingState& state);
     static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
                                           JSGCInvocationKind gckind,
                                           const GCSchedulingTunables& tunables);
 };
 
+// Maps a Cell* to a unique, 64bit id. This implementation uses a SplayTree
+// instead of a HashMap. While a SplayTree has worse worst-case performance,
+// the typical usage of storing stable hashmap keys tends to cluster with
+// extremely frequent lookups of the same key repeatedly. Thus, we typically
+// get very close to HashMap-like O(1) performance with much denser storage.
+class UniqueIdMap
+{
+    struct Pair {
+        uint64_t uniqueId;
+        Cell* key;
+
+      public:
+        Pair(Cell* cell, uint64_t uid) : uniqueId(uid), key(cell) {}
+        Pair(const Pair& other) : uniqueId(other.uniqueId), key(other.key) {}
+
+        static ptrdiff_t compare(const Pair& a, const Pair& b) {
+            return b.key - a.key;
+        }
+    };
+
+    // Use a relatively small chunk, as many users will not have many entries.
+    const size_t AllocChunkSize = mozilla::RoundUpPow2(16 * sizeof(Pair));
+
+    LifoAlloc alloc;
+    SplayTree<Pair, Pair> map;
+
+  public:
+    UniqueIdMap() : alloc(AllocChunkSize), map(&alloc) {}
+
+    // Returns true if the map is empty.
+    bool isEmpty() { return map.empty(); }
+
+    // Return true if the cell is present in the map.
+    bool has(Cell* cell) {
+        return map.maybeLookup(Pair(cell, 0));
+    }
+
+    // Returns whether the cell is present or not. If true, sets the uid.
+    bool lookup(Cell* cell, uint64_t* uidp) {
+        Pair tmp(nullptr, 0);
+        if (!map.contains(Pair(cell, 0), &tmp))
+            return false;
+        MOZ_ASSERT(tmp.key == cell);
+        MOZ_ASSERT(tmp.uniqueId > 0);
+        *uidp = tmp.uniqueId;
+        return true;
+    }
+
+    // Inserts a value; returns false on OOM.
+    bool put(Cell* cell, uint64_t uid) {
+        MOZ_ASSERT(uid > 0);
+        return map.insert(Pair(cell, uid));
+    }
+
+    // Remove the given key from the map.
+    void remove(Cell* cell) {
+        map.remove(Pair(cell, 0));
+    }
+};
+
+extern uint64_t NextCellUniqueId(JSRuntime* rt);
+
 } // namespace gc
 } // namespace js
 
 namespace JS {
 
 // A zone is a collection of compartments. Every compartment belongs to exactly
 // one zone. In Firefox, there is roughly one zone per tab along with a system
 // zone for everything else. Zones mainly serve as boundaries for garbage
@@ -245,16 +310,23 @@ struct Zone : public JS::shadow::Zone,
     void sweepCompartments(js::FreeOp* fop, bool keepAtleastOne, bool lastGC);
 
     js::jit::JitZone* createJitZone(JSContext* cx);
 
     bool isQueuedForBackgroundSweep() {
         return isOnList();
     }
 
+    // Side map for storing a unique ids for cells, independent of address.
+    js::gc::UniqueIdMap uniqueIds_;
+
+    // Guards the uniqueIds_ map as it is accessed directly from the background
+    // sweeping thread. This uses a spinlock, since it is normally uncontended.
+    js::SpinLock uniqueIdsLock_;
+
   public:
     bool hasDebuggers() const { return debuggers && debuggers->length(); }
     DebuggerVector* getDebuggers() const { return debuggers; }
     DebuggerVector* getOrCreateDebuggers(JSContext* cx);
 
     void enqueueForPromotionToTenuredLogging(JSObject& obj) {
         MOZ_ASSERT(hasDebuggers());
         MOZ_ASSERT(!IsInsideNursery(&obj));
@@ -313,16 +385,84 @@ struct Zone : public JS::shadow::Zone,
 
     bool usedByExclusiveThread;
 
     // True when there are active frames.
     bool active;
 
     mozilla::DebugOnly<unsigned> gcLastZoneGroupIndex;
 
+    // Creates a HashNumber based on getUniqueId. Returns false on OOM.
+    bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
+        uint64_t uid;
+        if (!getUniqueId(cell, &uid))
+            return false;
+        *hashp = (uid >> 32) & (uid & 0xFFFFFFFF);
+        return true;
+    }
+
+    // Puts an existing UID in |uidp|, or creates a new UID for this Cell and
+    // puts that into |uidp|. Returns false on OOM.
+    bool getUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
+        MOZ_ASSERT(uidp);
+        js::AutoSpinLock lock(uniqueIdsLock_);
+
+        // Get an existing uid, if one has been set.
+        if (uniqueIds_.lookup(cell, uidp))
+            return true;
+
+        // Set a new uid on the cell.
+        *uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
+        if (!uniqueIds_.put(cell, *uidp))
+            return false;
+
+        // If the cell was in the nursery, hopefully unlikely, then we need to
+        // tell the nursery about it so that it can sweep the uid if the thing
+        // does not get tenured.
+        if (!runtimeFromAnyThread()->gc.nursery.addedUniqueIdToCell(cell))
+            js::CrashAtUnhandlableOOM("failed to allocate tracking data for a nursery uid");
+        return true;
+    }
+
+    // Return true if this cell has a UID associated with it.
+    bool hasUniqueId(js::gc::Cell* cell) {
+        js::AutoSpinLock lock(uniqueIdsLock_);
+        uint64_t tmp;
+        return uniqueIds_.lookup(cell, &tmp);
+    }
+
+    // Transfer an id from another cell. This must only be called on behalf of a
+    // moving GC. This method is infallible.
+    void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
+        MOZ_ASSERT(src != tgt);
+        MOZ_ASSERT(!IsInsideNursery(tgt));
+        MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
+        js::AutoSpinLock lock(uniqueIdsLock_);
+
+        // Return early if we do not have a UID set on the source.
+        uint64_t uid = 0;
+        if (!uniqueIds_.lookup(src, &uid))
+            return;
+
+        // Remove from the source first to guarantee that at least one node
+        // will be available in the free pool. This allows us to avoid OOM
+        // in all cases when transfering uids.
+        uniqueIds_.remove(src);
+
+        MOZ_ASSERT(uid > 0);
+        mozilla::DebugOnly<bool> ok = uniqueIds_.put(tgt, uid);
+        MOZ_ASSERT(ok);
+    }
+
+    // Remove any unique id associated with this Cell.
+    void removeUniqueId(js::gc::Cell* cell) {
+        js::AutoSpinLock lock(uniqueIdsLock_);
+        uniqueIds_.remove(cell);
+    }
+
   private:
     js::jit::JitZone* jitZone_;
 
     GCState gcState_;
     bool gcScheduled_;
     bool gcPreserveCode_;
     bool jitUsingBarriers_;
 
--- a/js/src/jsapi-tests/moz.build
+++ b/js/src/jsapi-tests/moz.build
@@ -37,16 +37,17 @@ UNIFIED_SOURCES += [
     'testGCCellPtr.cpp',
     'testGCChunkPool.cpp',
     'testGCExactRooting.cpp',
     'testGCFinalizeCallback.cpp',
     'testGCHeapPostBarriers.cpp',
     'testGCMarking.cpp',
     'testGCOutOfMemory.cpp',
     'testGCStoreBufferRemoval.cpp',
+    'testGCUniqueId.cpp',
     'testGetPropertyDescriptor.cpp',
     'testHashTable.cpp',
     'testIndexToString.cpp',
     'testIntern.cpp',
     'testIntlAvailableLocales.cpp',
     'testIntString.cpp',
     'testIntTypesABI.cpp',
     'testIsInsideNursery.cpp',
new file mode 100644
--- /dev/null
+++ b/js/src/jsapi-tests/testGCUniqueId.cpp
@@ -0,0 +1,120 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+* vim: set ts=8 sts=4 et sw=4 tw=99:
+*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/GCInternals.h"
+#include "gc/Zone.h"
+
+static void
+MinimizeHeap(JSRuntime* rt)
+{
+    // The second collection is to force us to wait for the background
+    // sweeping that the first GC started to finish.
+    JS_GC(rt);
+    JS_GC(rt);
+    js::gc::AutoFinishGC finish(rt);
+}
+
+BEGIN_TEST(testGCUID)
+{
+#ifdef JS_GC_ZEAL
+    AutoLeaveZeal nozeal(cx);
+#endif /* JS_GC_ZEAL */
+
+    uint64_t uid = 0;
+    uint64_t tmp = 0;
+
+    // Ensure the heap is as minimal as it can get.
+    MinimizeHeap(rt);
+
+    JS::RootedObject obj(cx, JS_NewPlainObject(cx));
+    uintptr_t nurseryAddr = uintptr_t(obj.get());
+    CHECK(obj);
+    CHECK(js::gc::IsInsideNursery(obj));
+
+    // Do not start with an ID.
+    CHECK(!obj->zone()->hasUniqueId(obj));
+
+    // Ensure we can get a new UID.
+    CHECK(obj->zone()->getUniqueId(obj, &uid));
+    CHECK(uid > js::gc::LargestTaggedNullCellPointer);
+
+    // We should now have an id.
+    CHECK(obj->zone()->hasUniqueId(obj));
+
+    // Calling again should get us the same thing.
+    CHECK(obj->zone()->getUniqueId(obj, &tmp));
+    CHECK(uid == tmp);
+
+    // Tenure the thing and check that the UID moved with it.
+    MinimizeHeap(rt);
+    uintptr_t tenuredAddr = uintptr_t(obj.get());
+    CHECK(tenuredAddr != nurseryAddr);
+    CHECK(!js::gc::IsInsideNursery(obj));
+    CHECK(obj->zone()->hasUniqueId(obj));
+    CHECK(obj->zone()->getUniqueId(obj, &tmp));
+    CHECK(uid == tmp);
+
+    // Allocate a new nursery thing in the same location and check that we
+    // removed the prior uid that was attached to the location.
+    obj = JS_NewPlainObject(cx);
+    CHECK(obj);
+    CHECK(uintptr_t(obj.get()) == nurseryAddr);
+    CHECK(!obj->zone()->hasUniqueId(obj));
+
+    // Try to get another tenured object in the same location and check that
+    // the uid was removed correctly.
+    obj = nullptr;
+    MinimizeHeap(rt);
+    obj = JS_NewPlainObject(cx);
+    MinimizeHeap(rt);
+    CHECK(uintptr_t(obj.get()) == tenuredAddr);
+    CHECK(!obj->zone()->hasUniqueId(obj));
+    CHECK(obj->zone()->getUniqueId(obj, &tmp));
+    CHECK(uid != tmp);
+    uid = tmp;
+
+    // Allocate a few arenas worth of objects to ensure we get some compaction.
+    const static size_t N = 2049;
+    using ObjectVector = js::TraceableVector<JSObject*>;
+    JS::Rooted<ObjectVector> vec(cx, ObjectVector(cx));
+    for (size_t i = 0; i < N; ++i) {
+        obj = JS_NewPlainObject(cx);
+        CHECK(obj);
+        CHECK(vec.append(obj));
+    }
+
+    // Transfer our vector to tenured if it isn't there already.
+    MinimizeHeap(rt);
+
+    // Tear holes in the heap by unrooting the even objects and collecting.
+    JS::Rooted<ObjectVector> vec2(cx, ObjectVector(cx));
+    for (size_t i = 0; i < N; ++i) {
+        if (i % 2 == 1)
+            vec2.append(vec[i]);
+    }
+    vec.clear();
+    MinimizeHeap(rt);
+
+    // Grab the last object in the vector as our object of interest.
+    obj = vec2.back();
+    CHECK(obj);
+    tenuredAddr = uintptr_t(obj.get());
+    CHECK(obj->zone()->getUniqueId(obj, &uid));
+
+    // Force a compaction to move the object and check that the uid moved to
+    // the new tenured heap location.
+    JS::PrepareForFullGC(rt);
+    JS::GCForReason(rt, GC_SHRINK, JS::gcreason::API);
+    MinimizeHeap(rt);
+    CHECK(uintptr_t(obj.get()) != tenuredAddr);
+    CHECK(obj->zone()->hasUniqueId(obj));
+    CHECK(obj->zone()->getUniqueId(obj, &tmp));
+    CHECK(uid == tmp);
+
+    return true;
+}
+END_TEST(testGCUID)
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -819,19 +819,17 @@ Chunk::init(JSRuntime* rt)
     /*
      * Decommit the arenas. We do this after poisoning so that if the OS does
      * not have to recycle the pages, we still get the benefit of poisoning.
      */
     decommitAllArenas(rt);
 
     /* Initialize the chunk info. */
     info.init();
-    info.trailer.storeBuffer = nullptr;
-    info.trailer.location = ChunkLocationBitTenuredHeap;
-    info.trailer.runtime = rt;
+    new (&info.trailer) ChunkTrailer(rt);
 
     /* The rest of info fields are initialized in pickChunk. */
 }
 
 /*
  * Search for and return the next decommitted Arena. Our goal is to keep
  * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
  * it to the most recently freed arena when we free, and forcing it to
@@ -1095,16 +1093,17 @@ GCRuntime::GCRuntime(JSRuntime* rt) :
     rt(rt),
     systemZone(nullptr),
     nursery(rt),
     storeBuffer(rt, nursery),
     stats(rt),
     marker(rt),
     usage(nullptr),
     maxMallocBytes(0),
+    nextCellUniqueId_(LargestTaggedNullCellPointer + 1), // Ensure disjoint from null tagged pointers.
     numArenasFreeCommitted(0),
     verifyPreData(nullptr),
     chunkAllocationSinceLastGC(false),
     nextFullGCTime(0),
     lastGCTime(PRMJ_Now()),
     mode(JSGC_MODE_INCREMENTAL),
     numActiveZoneIters(0),
     decommitThreshold(32 * 1024 * 1024),
@@ -2017,16 +2016,19 @@ RelocateCell(Zone* zone, TenuredCell* sr
         dstAlloc = GCRuntime::refillFreeListInGC(zone, thingKind);
     if (!dstAlloc)
         return false;
     TenuredCell* dst = TenuredCell::fromPointer(dstAlloc);
 
     // Copy source cell contents to destination.
     memcpy(dst, src, thingSize);
 
+    // Move any uid attached to the object.
+    src->zone()->transferUniqueId(dst, src);
+
     if (IsObjectAllocKind(thingKind)) {
         JSObject* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
         JSObject* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
 
         if (srcObj->isNative()) {
             NativeObject* srcNative = &srcObj->as<NativeObject>();
             NativeObject* dstNative = &dstObj->as<NativeObject>();
 
@@ -7310,16 +7312,22 @@ JS::AutoDisableGenerationalGC::~AutoDisa
 }
 
 JS_PUBLIC_API(bool)
 JS::IsGenerationalGCEnabled(JSRuntime* rt)
 {
     return rt->gc.isGenerationalGCEnabled();
 }
 
+uint64_t
+js::gc::NextCellUniqueId(JSRuntime* rt)
+{
+    return rt->gc.nextCellUniqueId();
+}
+
 namespace js {
 namespace gc {
 namespace MemInfo {
 
 static bool
 GCBytesGetter(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -70,16 +70,21 @@ JSObject::finalize(js::FreeOp* fop)
 
 #ifdef DEBUG
     MOZ_ASSERT(isTenured());
     if (!IsBackgroundFinalized(asTenured().getAllocKind())) {
         /* Assert we're on the main thread. */
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(fop->runtime()));
     }
 #endif
+
+    // Remove any UID attached to this object.
+    if (zoneFromAnyThread()->hasUniqueId(this))
+        zoneFromAnyThread()->removeUniqueId(this);
+
     const js::Class* clasp = getClass();
     if (clasp->finalize)
         clasp->finalize(fop, this);
 
     if (!clasp->isNative())
         return;
 
     js::NativeObject* nobj = &as<js::NativeObject>();