Bug 1196847 - Part 1: Allow storage of a unique id for a cell independent of address; r=jonco
authorTerrence Cole <terrence@mozilla.com>
Thu, 20 Aug 2015 10:35:22 -0700
changeset 296724 cca86cd156cf57a2d7bbbc103a4cd0ec92b03f05
parent 296723 cc2beb2c6e3c4e142847ed81408c7019f6ac8ed9
child 296725 74608aa063b909315f4bf6c86c749ab1bb0c7bc5
push id5880
push userarmenzg@mozilla.com
push dateMon, 28 Sep 2015 13:39:44 +0000
reviewersjonco
bugs1196847
milestone44.0a1
Bug 1196847 - Part 1: Allow storage of a unique id for a cell independent of address; r=jonco * * * imported patch rewrite_uid_on_ht_with_zone_sweeping
js/public/HashTable.h
js/public/HeapAPI.h
js/src/ds/LockGuard.h
js/src/ds/SpinLock.h
js/src/gc/GCRuntime.h
js/src/gc/Heap.h
js/src/gc/Marking.cpp
js/src/gc/Marking.h
js/src/gc/Nursery.cpp
js/src/gc/Nursery.h
js/src/gc/Zone.cpp
js/src/gc/Zone.h
js/src/jsapi-tests/moz.build
js/src/jsapi-tests/testGCUniqueId.cpp
js/src/jsgc.cpp
js/src/jsobjinlines.h
--- a/js/public/HashTable.h
+++ b/js/public/HashTable.h
@@ -648,36 +648,35 @@ class HashMapEntry
 {
     Key key_;
     Value value_;
 
     template <class, class, class> friend class detail::HashTable;
     template <class> friend class detail::HashTableEntry;
     template <class, class, class, class> friend class HashMap;
 
-    Key & mutableKey() { return key_; }
-
   public:
     template<typename KeyInput, typename ValueInput>
     HashMapEntry(KeyInput&& k, ValueInput&& v)
       : key_(mozilla::Forward<KeyInput>(k)),
         value_(mozilla::Forward<ValueInput>(v))
     {}
 
     HashMapEntry(HashMapEntry&& rhs)
       : key_(mozilla::Move(rhs.key_)),
         value_(mozilla::Move(rhs.value_))
     {}
 
     typedef Key KeyType;
     typedef Value ValueType;
 
-    const Key & key() const { return key_; }
-    const Value & value() const { return value_; }
-    Value & value() { return value_; }
+    const Key& key() const { return key_; }
+    Key& mutableKey() { return key_; }
+    const Value& value() const { return value_; }
+    Value& value() { return value_; }
 
   private:
     HashMapEntry(const HashMapEntry&) = delete;
     void operator=(const HashMapEntry&) = delete;
 };
 
 } // namespace js
 
--- a/js/public/HeapAPI.h
+++ b/js/public/HeapAPI.h
@@ -49,17 +49,18 @@ const size_t CellMask = CellSize - 1;
 #ifdef JS_GC_SMALL_CHUNK_SIZE
 const size_t ChunkMarkBitmapOffset = 258104;
 const size_t ChunkMarkBitmapBits = 31744;
 #else
 const size_t ChunkMarkBitmapOffset = 1032352;
 const size_t ChunkMarkBitmapBits = 129024;
 #endif
 const size_t ChunkRuntimeOffset = ChunkSize - sizeof(void*);
-const size_t ChunkLocationOffset = ChunkSize - 2 * sizeof(void*) - sizeof(uint64_t);
+const size_t ChunkTrailerSize = 2 * sizeof(uintptr_t) + sizeof(uint64_t);
+const size_t ChunkLocationOffset = ChunkSize - ChunkTrailerSize;
 const size_t ArenaZoneOffset = 0;
 
 /*
  * Live objects are marked black. How many other additional colors are available
  * depends on the size of the GCThing. Objects marked gray are eligible for
  * cycle collection.
  */
 static const uint32_t BLACK = 0;
new file mode 100644
--- /dev/null
+++ b/js/src/ds/LockGuard.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_LockGuard_h
+#define js_LockGuard_h
+
+#include "mozilla/GuardObjects.h"
+
+namespace js {
+
+// An implementation of C++11's std::lock_guard, enhanced with a guard object
+// to help with correct usage.
+template <typename LockType>
+class LockGuard
+{
+    LockType& lockRef_;
+    MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
+
+  public:
+    explicit LockGuard(LockType& lock
+                       MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+      : lockRef_(lock)
+    {
+        MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+        lockRef_.lock();
+    }
+
+    ~LockGuard() {
+        lockRef_.unlock();
+    }
+};
+
+} // namespace js
+
+#endif // js_LockGuard_h
new file mode 100644
--- /dev/null
+++ b/js/src/ds/SpinLock.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_SpinLock_h
+#define js_SpinLock_h
+
+#include "mozilla/Atomics.h"
+
+#include "ds/LockGuard.h"
+
+namespace js {
+
+// A trivial spin-lock implementation. Extremely fast when rarely-contended.
+class SpinLock
+{
+    mozilla::Atomic<bool, mozilla::ReleaseAcquire> locked_;
+
+  public:
+    SpinLock() : locked_(false) {}
+
+    void lock() {
+        do {
+            while (locked_)
+                ; // Spin until the lock seems free.
+        } while (!locked_.compareExchange(false, true)); // Atomically take the lock.
+    }
+
+    void unlock() {
+        locked_ = false;
+    }
+};
+
+using AutoSpinLock = LockGuard<SpinLock>;
+
+} // namespace js
+
+#endif // js_SpinLock_h
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -650,16 +650,21 @@ class GCRuntime
     void maybeVerifyPreBarriers(bool always);
     bool selectForMarking(JSObject* object);
     void clearSelectedForMarking();
     void setDeterministic(bool enable);
 #endif
 
     size_t maxMallocBytesAllocated() { return maxMallocBytes; }
 
+    uint64_t nextCellUniqueId() {
+        MOZ_ASSERT(nextCellUniqueId_ > 0);
+        return nextCellUniqueId_++;
+    }
+
   public:
     // Internal public interface
     js::gc::State state() const { return incrementalState; }
     bool isHeapCompacting() const { return state() == COMPACT; }
     bool isBackgroundSweeping() { return helperState.isBackgroundSweeping(); }
     void waitBackgroundSweepEnd() { helperState.waitBackgroundSweepEnd(); }
     void waitBackgroundSweepOrAllocEnd() {
         helperState.waitBackgroundSweepEnd();
@@ -1008,16 +1013,19 @@ class GCRuntime
     // When all arenas in a chunk are used, it is moved to the fullChunks pool
     // so as to reduce the cost of operations on the available lists.
     ChunkPool             fullChunks_;
 
     RootedValueMap rootsHash;
 
     size_t maxMallocBytes;
 
+    // An incrementing id used to assign unique ids to cells that require one.
+    uint64_t nextCellUniqueId_;
+
     /*
      * Number of the committed arenas in all GC chunks including empty chunks.
      */
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
     VerifyPreTracer* verifyPreData;
     bool chunkAllocationSinceLastGC;
     int64_t nextFullGCTime;
     int64_t lastGCTime;
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -289,16 +289,19 @@ class TenuredCell : public Cell
     static MOZ_ALWAYS_INLINE void writeBarrierPost(void* cellp, TenuredCell* prior,
                                                    TenuredCell* next);
 
 #ifdef DEBUG
     inline bool isAligned() const;
 #endif
 };
 
+/* Cells are aligned to CellShift, so the largest tagged null pointer is: */
+const uintptr_t LargestTaggedNullCellPointer = (1 << CellShift) - 1;
+
 /*
  * The mark bitmap has one bit per each GC cell. For multi-cell GC things this
  * wastes space but allows to avoid expensive devisions by thing's size when
  * accessing the bitmap. In addition this allows to use some bits for colored
  * marking during the cycle GC.
  */
 const size_t ArenaCellCount = size_t(1) << (ArenaShift - CellShift);
 const size_t ArenaBitmapBits = ArenaCellCount;
@@ -801,28 +804,40 @@ ArenaHeader::getThingSize() const
 
 /*
  * The tail of the chunk info is shared between all chunks in the system, both
  * nursery and tenured. This structure is locatable from any GC pointer by
  * aligning to 1MiB.
  */
 struct ChunkTrailer
 {
+    /* Construct a Nursery ChunkTrailer. */
+    ChunkTrailer(JSRuntime* rt, StoreBuffer* sb)
+      : location(gc::ChunkLocationBitNursery), storeBuffer(sb), runtime(rt)
+    {}
+
+    /* Construct a Tenured heap ChunkTrailer. */
+    explicit ChunkTrailer(JSRuntime* rt)
+      : location(gc::ChunkLocationBitTenuredHeap), storeBuffer(nullptr), runtime(rt)
+    {}
+
+  public:
     /* The index the chunk in the nursery, or LocationTenuredHeap. */
     uint32_t        location;
     uint32_t        padding;
 
     /* The store buffer for writes to things in this chunk or nullptr. */
     StoreBuffer*    storeBuffer;
 
+    /* This provides quick access to the runtime from absolutely anywhere. */
     JSRuntime*      runtime;
 };
 
-static_assert(sizeof(ChunkTrailer) == 2 * sizeof(uintptr_t) + sizeof(uint64_t),
-              "ChunkTrailer size is incorrect.");
+static_assert(sizeof(ChunkTrailer) == ChunkTrailerSize,
+              "ChunkTrailer size must match the API defined size.");
 
 /* The chunk header (located at the end of the chunk to preserve arena alignment). */
 struct ChunkInfo
 {
     void init() {
         next = prev = nullptr;
         age = 0;
     }
@@ -1001,23 +1016,26 @@ struct Chunk
     PerArenaBitmap  decommittedArenas;
     ChunkInfo       info;
 
     static Chunk* fromAddress(uintptr_t addr) {
         addr &= ~ChunkMask;
         return reinterpret_cast<Chunk*>(addr);
     }
 
-    static bool withinArenasRange(uintptr_t addr) {
+    static bool withinValidRange(uintptr_t addr) {
         uintptr_t offset = addr & ChunkMask;
-        return offset < ArenasPerChunk * ArenaSize;
+        return Chunk::fromAddress(addr)->isNurseryChunk()
+               ? offset < ChunkSize - sizeof(ChunkTrailer)
+               : offset < ArenasPerChunk * ArenaSize;
     }
 
     static size_t arenaIndex(uintptr_t addr) {
-        MOZ_ASSERT(withinArenasRange(addr));
+        MOZ_ASSERT(!Chunk::fromAddress(addr)->isNurseryChunk());
+        MOZ_ASSERT(withinValidRange(addr));
         return (addr & ChunkMask) >> ArenaShift;
     }
 
     uintptr_t address() const {
         uintptr_t addr = reinterpret_cast<uintptr_t>(this);
         MOZ_ASSERT(!(addr & ChunkMask));
         return addr;
     }
@@ -1025,16 +1043,20 @@ struct Chunk
     bool unused() const {
         return info.numArenasFree == ArenasPerChunk;
     }
 
     bool hasAvailableArenas() const {
         return info.numArenasFree != 0;
     }
 
+    bool isNurseryChunk() const {
+        return info.trailer.storeBuffer;
+    }
+
     ArenaHeader* allocateArena(JSRuntime* rt, JS::Zone* zone, AllocKind kind,
                                const AutoLockGC& lock);
 
     void releaseArena(JSRuntime* rt, ArenaHeader* aheader, const AutoLockGC& lock);
     void recycleArena(ArenaHeader* aheader, SortedArenaList& dest, AllocKind thingKind,
                       size_t thingsPerArena);
 
     bool decommitOneFreeArena(JSRuntime* rt, AutoLockGC& lock);
@@ -1124,17 +1146,17 @@ class HeapUsage
 };
 
 inline uintptr_t
 ArenaHeader::address() const
 {
     uintptr_t addr = reinterpret_cast<uintptr_t>(this);
     MOZ_ASSERT(addr);
     MOZ_ASSERT(!(addr & ArenaMask));
-    MOZ_ASSERT(Chunk::withinArenasRange(addr));
+    MOZ_ASSERT(Chunk::withinValidRange(addr));
     return addr;
 }
 
 inline Chunk*
 ArenaHeader::chunk() const
 {
     return Chunk::fromAddress(address());
 }
@@ -1293,17 +1315,17 @@ Cell::shadowRuntimeFromAnyThread() const
     return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromAnyThread());
 }
 
 inline uintptr_t
 Cell::address() const
 {
     uintptr_t addr = uintptr_t(this);
     MOZ_ASSERT(addr % CellSize == 0);
-    MOZ_ASSERT(Chunk::withinArenasRange(addr));
+    MOZ_ASSERT(Chunk::withinValidRange(addr));
     return addr;
 }
 
 Chunk*
 Cell::chunk() const
 {
     uintptr_t addr = uintptr_t(this);
     MOZ_ASSERT(addr % CellSize == 0);
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -2112,17 +2112,23 @@ js::TenuringTracer::moveObjectToTenured(
      *
      * For Arrays we're reducing tenuredSize to the smaller srcSize
      * because moveElementsToTenured() accounts for all Array elements,
      * even if they are inlined.
      */
     if (src->is<ArrayObject>())
         tenuredSize = srcSize = sizeof(NativeObject);
 
+    // Copy the Cell contents.
     js_memcpy(dst, src, srcSize);
+
+    // Move any hash code attached to the object.
+    src->zone()->transferUniqueId(dst, src);
+
+    // Move the slots and elements, if we need to.
     if (src->isNative()) {
         NativeObject* ndst = &dst->as<NativeObject>();
         NativeObject* nsrc = &src->as<NativeObject>();
         tenuredSize += moveSlotsToTenured(ndst, nsrc, dstKind);
         tenuredSize += moveElementsToTenured(ndst, nsrc, dstKind);
 
         // The shape's list head may point into the old object. This can only
         // happen for dictionaries, which are native objects.
--- a/js/src/gc/Marking.h
+++ b/js/src/gc/Marking.h
@@ -420,17 +420,17 @@ ToMarkable(Cell* cell)
     return cell;
 }
 
 // Return true if the pointer is nullptr, or if it is a tagged pointer to
 // nullptr.
 MOZ_ALWAYS_INLINE bool
 IsNullTaggedPointer(void* p)
 {
-    return uintptr_t(p) < 32;
+    return uintptr_t(p) <= LargestTaggedNullCellPointer;
 }
 
 // HashKeyRef represents a reference to a HashMap key. This should normally
 // be used through the HashTableWriteBarrierPost function.
 template <typename Map, typename Key>
 class HashKeyRef : public BufferableRef
 {
     Map* map;
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -62,16 +62,19 @@ js::Nursery::init(uint32_t maxNurseryByt
 
     /* If no chunks are specified then the nursery is permenantly disabled. */
     if (numNurseryChunks_ == 0)
         return true;
 
     if (!mallocedBuffers.init())
         return false;
 
+    if (!cellsWithUid_.init())
+        return false;
+
     void* heap = MapAlignedPages(nurserySize(), Alignment);
     if (!heap)
         return false;
 
     freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp());
     if (!freeMallocedBuffersTask || !freeMallocedBuffersTask->init())
         return false;
 
@@ -648,16 +651,26 @@ js::Nursery::waitBackgroundFreeEnd()
 {
     MOZ_ASSERT(freeMallocedBuffersTask);
     freeMallocedBuffersTask->join();
 }
 
 void
 js::Nursery::sweep()
 {
+    /* Sweep unique id's in all in-use chunks. */
+    for (CellsWithUniqueIdSet::Enum e(cellsWithUid_); !e.empty(); e.popFront()) {
+        JSObject* obj = static_cast<JSObject*>(e.front());
+        if (!IsForwarded(obj))
+            obj->zone()->removeUniqueId(obj);
+        else
+            MOZ_ASSERT(Forwarded(obj)->zone()->hasUniqueId(Forwarded(obj)));
+    }
+    cellsWithUid_.clear();
+
 #ifdef JS_GC_ZEAL
     /* Poison the nursery contents so touching a freed object will crash. */
     JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, nurserySize());
     for (int i = 0; i < numNurseryChunks_; ++i)
         initChunk(i);
 
     if (runtime()->gcZeal() == ZealGenerationalGCValue) {
         MOZ_ASSERT(numActiveChunks_ == numNurseryChunks_);
@@ -665,20 +678,18 @@ js::Nursery::sweep()
         /* Only reset the alloc point when we are close to the end. */
         if (currentChunk_ + 1 == numNurseryChunks_)
             setCurrentChunk(0);
     } else
 #endif
     {
 #ifdef JS_CRASH_DIAGNOSTICS
         JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, allocationEnd() - start());
-        for (int i = 0; i < numActiveChunks_; ++i) {
-            chunk(i).trailer.location = gc::ChunkLocationBitNursery;
-            chunk(i).trailer.runtime = runtime();
-        }
+        for (int i = 0; i < numActiveChunks_; ++i)
+            initChunk(i);
 #endif
         setCurrentChunk(0);
     }
 
     /* Set current start position for isEmpty checks. */
     currentStart_ = position();
     MemProfiler::SweepNursery(runtime());
 }
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -178,16 +178,24 @@ class Nursery
 
     /* Mark a malloced buffer as no longer needing to be freed. */
     void removeMallocedBuffer(void* buffer) {
         mallocedBuffers.remove(buffer);
     }
 
     void waitBackgroundFreeEnd();
 
+    bool addedUniqueIdToCell(gc::Cell* cell) {
+        if (!IsInsideNursery(cell) || !isEnabled())
+            return true;
+        MOZ_ASSERT(cellsWithUid_.initialized());
+        MOZ_ASSERT(!cellsWithUid_.has(cell));
+        return cellsWithUid_.put(cell);
+    }
+
     size_t sizeOfHeapCommitted() const {
         return numActiveChunks_ * gc::ChunkSize;
     }
     size_t sizeOfHeapDecommitted() const {
         return (numNurseryChunks_ - numActiveChunks_) * gc::ChunkSize;
     }
     size_t sizeOfMallocedBuffers(mozilla::MallocSizeOf mallocSizeOf) const {
         size_t total = 0;
@@ -261,16 +269,31 @@ class Nursery
      * new location with a forwarding pointer at the base. This does not work
      * for buffers whose length is less than pointer width, or when different
      * buffers might overlap each other. For these, an entry in the following
      * table is used.
      */
     typedef HashMap<void*, void*, PointerHasher<void*, 1>, SystemAllocPolicy> ForwardedBufferMap;
     ForwardedBufferMap forwardedBuffers;
 
+    /*
+     * When we assign a unique id to cell in the nursery, that almost always
+     * means that the cell will be in a hash table, and thus, held live,
+     * automatically moving the uid from the nursery to its new home in
+     * tenured. It is possible, if rare, for an object that acquired a uid to
+     * be dead before the next collection, in which case we need to know to
+     * remove it when we sweep.
+     *
+     * Note: we store the pointers as Cell* here, resulting in an ugly cast in
+     *       sweep. This is because this structure is used to help implement
+     *       stable object hashing and we have to break the cycle somehow.
+     */
+    using CellsWithUniqueIdSet = HashSet<gc::Cell*, PointerHasher<gc::Cell*, 3>, SystemAllocPolicy>;
+    CellsWithUniqueIdSet cellsWithUid_;
+
     /* The maximum number of bytes allowed to reside in nursery buffers. */
     static const size_t MaxNurseryBufferSize = 1024;
 
     /* The amount of space in the mapped nursery available to allocations. */
     static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
 
     struct NurseryChunkLayout {
         char data[NurseryChunkUsableSize];
@@ -282,20 +305,18 @@ class Nursery
                   "Nursery chunk size must match gc::Chunk size.");
     NurseryChunkLayout& chunk(int index) const {
         MOZ_ASSERT(index < numNurseryChunks_);
         MOZ_ASSERT(start());
         return reinterpret_cast<NurseryChunkLayout*>(start())[index];
     }
 
     MOZ_ALWAYS_INLINE void initChunk(int chunkno) {
-        NurseryChunkLayout& c = chunk(chunkno);
-        c.trailer.storeBuffer = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
-        c.trailer.location = gc::ChunkLocationBitNursery;
-        c.trailer.runtime = runtime();
+        gc::StoreBuffer* sb = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
+        new (&chunk(chunkno).trailer) gc::ChunkTrailer(runtime(), sb);
     }
 
     MOZ_ALWAYS_INLINE void setCurrentChunk(int chunkno) {
         MOZ_ASSERT(chunkno < numNurseryChunks_);
         MOZ_ASSERT(chunkno < numActiveChunks_);
         currentChunk_ = chunkno;
         position_ = chunk(chunkno).start();
         currentEnd_ = chunk(chunkno).end();
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -61,17 +61,17 @@ Zone::~Zone()
 
     js_delete(debuggers);
     js_delete(jitZone_);
 }
 
 bool Zone::init(bool isSystemArg)
 {
     isSystem = isSystemArg;
-    return gcZoneGroupEdges.init();
+    return uniqueIds_.init() && gcZoneGroupEdges.init();
 }
 
 void
 Zone::setNeedsIncrementalBarrier(bool needs, ShouldUpdateJit updateJit)
 {
     if (updateJit == UpdateJit && needs != jitUsingBarriers_) {
         jit::ToggleBarriers(this, needs);
         jitUsingBarriers_ = needs;
@@ -151,17 +151,16 @@ Zone::logPromotionsToTenured()
             if ((*dbgp)->isDebuggee(range.front()->compartment()))
                 (*dbgp)->logTenurePromotion(rt, *range.front(), now);
         }
     }
 
     awaitingTenureLogging.clear();
 }
 
-
 void
 Zone::sweepBreakpoints(FreeOp* fop)
 {
     if (fop->runtime()->debuggerList.isEmpty())
         return;
 
     /*
      * Sweep all compartments in a zone at the same time, since there is no way
@@ -252,16 +251,25 @@ Zone::discardJitCode(FreeOp* fop)
              */
             script->resetWarmUpCounter();
         }
 
         jitZone()->optimizedStubSpace()->free();
     }
 }
 
+#ifdef JSGC_HASH_TABLE_CHECKS
+void
+JS::Zone::checkUniqueIdTableAfterMovingGC()
+{
+    for (UniqueIdMap::Enum e(uniqueIds_); !e.empty(); e.popFront())
+        js::gc::CheckGCThingAfterMovingGC(e.front().key());
+}
+#endif
+
 uint64_t
 Zone::gcNumber()
 {
     // Zones in use by exclusive threads are not collected, and threads using
     // them cannot access the main runtime's gcNumber without racing.
     return usedByExclusiveThread ? 0 : runtimeFromMainThread()->gc.gcNumber();
 }
 
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -8,19 +8,22 @@
 #define gc_Zone_h
 
 #include "mozilla/Atomics.h"
 #include "mozilla/DebugOnly.h"
 #include "mozilla/MemoryReporting.h"
 
 #include "jscntxt.h"
 
+#include "ds/SpinLock.h"
+#include "ds/SplayTree.h"
 #include "gc/FindSCCs.h"
 #include "gc/GCRuntime.h"
 #include "js/TracingAPI.h"
+#include "vm/MallocProvider.h"
 #include "vm/TypeInference.h"
 
 namespace js {
 
 namespace jit {
 class JitZone;
 } // namespace jit
 
@@ -53,16 +56,21 @@ class ZoneHeapThreshold
     static double computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,
                                                          const GCSchedulingTunables& tunables,
                                                          const GCSchedulingState& state);
     static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
                                           JSGCInvocationKind gckind,
                                           const GCSchedulingTunables& tunables);
 };
 
+// Maps a Cell* to a unique, 64bit id.
+using UniqueIdMap = HashMap<Cell*, uint64_t, PointerHasher<Cell*, 3>, SystemAllocPolicy>;
+
+extern uint64_t NextCellUniqueId(JSRuntime* rt);
+
 } // namespace gc
 } // namespace js
 
 namespace JS {
 
 // A zone is a collection of compartments. Every compartment belongs to exactly
 // one zone. In Firefox, there is roughly one zone per tab along with a system
 // zone for everything else. Zones mainly serve as boundaries for garbage
@@ -237,25 +245,34 @@ struct Zone : public JS::shadow::Zone,
 
   private:
     DebuggerVector* debuggers;
 
     using LogTenurePromotionQueue = js::Vector<JSObject*, 0, js::SystemAllocPolicy>;
     LogTenurePromotionQueue awaitingTenureLogging;
 
     void sweepBreakpoints(js::FreeOp* fop);
+    void sweepUniqueIds(js::FreeOp* fop);
     void sweepWeakMaps();
     void sweepCompartments(js::FreeOp* fop, bool keepAtleastOne, bool lastGC);
 
     js::jit::JitZone* createJitZone(JSContext* cx);
 
     bool isQueuedForBackgroundSweep() {
         return isOnList();
     }
 
+    // Side map for storing a unique ids for cells, independent of address.
+    js::gc::UniqueIdMap uniqueIds_;
+
+    // Guards the uniqueIds_ map, which is accessed concurrently from
+    // off-thread parsing and the main thread. This uses a spinlock, since it
+    // is normally uncontended.
+    js::SpinLock uniqueIdsLock_;
+
   public:
     bool hasDebuggers() const { return debuggers && debuggers->length(); }
     DebuggerVector* getDebuggers() const { return debuggers; }
     DebuggerVector* getOrCreateDebuggers(JSContext* cx);
 
     void enqueueForPromotionToTenuredLogging(JSObject& obj) {
         MOZ_ASSERT(hasDebuggers());
         MOZ_ASSERT(!IsInsideNursery(&obj));
@@ -318,16 +335,78 @@ struct Zone : public JS::shadow::Zone,
 
     bool usedByExclusiveThread;
 
     // True when there are active frames.
     bool active;
 
     mozilla::DebugOnly<unsigned> gcLastZoneGroupIndex;
 
+    // Creates a HashNumber based on getUniqueId. Returns false on OOM.
+    bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
+        uint64_t uid;
+        if (!getUniqueId(cell, &uid))
+            return false;
+        *hashp = js::HashNumber(uid >> 32) ^ js::HashNumber(uid & 0xFFFFFFFF);
+        return true;
+    }
+
+    // Puts an existing UID in |uidp|, or creates a new UID for this Cell and
+    // puts that into |uidp|. Returns false on OOM.
+    bool getUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
+        MOZ_ASSERT(uidp);
+        js::AutoSpinLock lock(uniqueIdsLock_);
+
+        // Get an existing uid, if one has been set.
+        auto p = uniqueIds_.lookupForAdd(cell);
+        if (p) {
+            *uidp = p->value();
+            return true;
+        }
+
+        // Set a new uid on the cell.
+        *uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
+        if (!uniqueIds_.add(p, cell, *uidp))
+            return false;
+
+        // If the cell was in the nursery, hopefully unlikely, then we need to
+        // tell the nursery about it so that it can sweep the uid if the thing
+        // does not get tenured.
+        if (!runtimeFromAnyThread()->gc.nursery.addedUniqueIdToCell(cell))
+            js::CrashAtUnhandlableOOM("failed to allocate tracking data for a nursery uid");
+        return true;
+    }
+
+    // Return true if this cell has a UID associated with it.
+    bool hasUniqueId(js::gc::Cell* cell) {
+        js::AutoSpinLock lock(uniqueIdsLock_);
+        return uniqueIds_.has(cell);
+    }
+
+    // Transfer an id from another cell. This must only be called on behalf of a
+    // moving GC. This method is infallible.
+    void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
+        MOZ_ASSERT(src != tgt);
+        MOZ_ASSERT(!IsInsideNursery(tgt));
+        MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
+        js::AutoSpinLock lock(uniqueIdsLock_);
+        uniqueIds_.rekeyIfMoved(src, tgt);
+    }
+
+    // Remove any unique id associated with this Cell.
+    void removeUniqueId(js::gc::Cell* cell) {
+        js::AutoSpinLock lock(uniqueIdsLock_);
+        uniqueIds_.remove(cell);
+    }
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+    // Assert that the UniqueId table has been redirected successfully.
+    void checkUniqueIdTableAfterMovingGC();
+#endif
+
   private:
     js::jit::JitZone* jitZone_;
 
     GCState gcState_;
     bool gcScheduled_;
     bool gcPreserveCode_;
     bool jitUsingBarriers_;
 
--- a/js/src/jsapi-tests/moz.build
+++ b/js/src/jsapi-tests/moz.build
@@ -38,16 +38,17 @@ UNIFIED_SOURCES += [
     'testGCCellPtr.cpp',
     'testGCChunkPool.cpp',
     'testGCExactRooting.cpp',
     'testGCFinalizeCallback.cpp',
     'testGCHeapPostBarriers.cpp',
     'testGCMarking.cpp',
     'testGCOutOfMemory.cpp',
     'testGCStoreBufferRemoval.cpp',
+    'testGCUniqueId.cpp',
     'testGetPropertyDescriptor.cpp',
     'testHashTable.cpp',
     'testIndexToString.cpp',
     'testIntern.cpp',
     'testIntlAvailableLocales.cpp',
     'testIntString.cpp',
     'testIntTypesABI.cpp',
     'testIsInsideNursery.cpp',
new file mode 100644
--- /dev/null
+++ b/js/src/jsapi-tests/testGCUniqueId.cpp
@@ -0,0 +1,120 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+* vim: set ts=8 sts=4 et sw=4 tw=99:
+*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/GCInternals.h"
+#include "gc/Zone.h"
+
+static void
+MinimizeHeap(JSRuntime* rt)
+{
+    // The second collection is to force us to wait for the background
+    // sweeping that the first GC started to finish.
+    JS_GC(rt);
+    JS_GC(rt);
+    js::gc::AutoFinishGC finish(rt);
+}
+
+BEGIN_TEST(testGCUID)
+{
+#ifdef JS_GC_ZEAL
+    AutoLeaveZeal nozeal(cx);
+#endif /* JS_GC_ZEAL */
+
+    uint64_t uid = 0;
+    uint64_t tmp = 0;
+
+    // Ensure the heap is as minimal as it can get.
+    MinimizeHeap(rt);
+
+    JS::RootedObject obj(cx, JS_NewPlainObject(cx));
+    uintptr_t nurseryAddr = uintptr_t(obj.get());
+    CHECK(obj);
+    CHECK(js::gc::IsInsideNursery(obj));
+
+    // Do not start with an ID.
+    CHECK(!obj->zone()->hasUniqueId(obj));
+
+    // Ensure we can get a new UID.
+    CHECK(obj->zone()->getUniqueId(obj, &uid));
+    CHECK(uid > js::gc::LargestTaggedNullCellPointer);
+
+    // We should now have an id.
+    CHECK(obj->zone()->hasUniqueId(obj));
+
+    // Calling again should get us the same thing.
+    CHECK(obj->zone()->getUniqueId(obj, &tmp));
+    CHECK(uid == tmp);
+
+    // Tenure the thing and check that the UID moved with it.
+    MinimizeHeap(rt);
+    uintptr_t tenuredAddr = uintptr_t(obj.get());
+    CHECK(tenuredAddr != nurseryAddr);
+    CHECK(!js::gc::IsInsideNursery(obj));
+    CHECK(obj->zone()->hasUniqueId(obj));
+    CHECK(obj->zone()->getUniqueId(obj, &tmp));
+    CHECK(uid == tmp);
+
+    // Allocate a new nursery thing in the same location and check that we
+    // removed the prior uid that was attached to the location.
+    obj = JS_NewPlainObject(cx);
+    CHECK(obj);
+    CHECK(uintptr_t(obj.get()) == nurseryAddr);
+    CHECK(!obj->zone()->hasUniqueId(obj));
+
+    // Try to get another tenured object in the same location and check that
+    // the uid was removed correctly.
+    obj = nullptr;
+    MinimizeHeap(rt);
+    obj = JS_NewPlainObject(cx);
+    MinimizeHeap(rt);
+    CHECK(uintptr_t(obj.get()) == tenuredAddr);
+    CHECK(!obj->zone()->hasUniqueId(obj));
+    CHECK(obj->zone()->getUniqueId(obj, &tmp));
+    CHECK(uid != tmp);
+    uid = tmp;
+
+    // Allocate a few arenas worth of objects to ensure we get some compaction.
+    const static size_t N = 2049;
+    using ObjectVector = js::TraceableVector<JSObject*>;
+    JS::Rooted<ObjectVector> vec(cx, ObjectVector(cx));
+    for (size_t i = 0; i < N; ++i) {
+        obj = JS_NewPlainObject(cx);
+        CHECK(obj);
+        CHECK(vec.append(obj));
+    }
+
+    // Transfer our vector to tenured if it isn't there already.
+    MinimizeHeap(rt);
+
+    // Tear holes in the heap by unrooting the even objects and collecting.
+    JS::Rooted<ObjectVector> vec2(cx, ObjectVector(cx));
+    for (size_t i = 0; i < N; ++i) {
+        if (i % 2 == 1)
+            vec2.append(vec[i]);
+    }
+    vec.clear();
+    MinimizeHeap(rt);
+
+    // Grab the last object in the vector as our object of interest.
+    obj = vec2.back();
+    CHECK(obj);
+    tenuredAddr = uintptr_t(obj.get());
+    CHECK(obj->zone()->getUniqueId(obj, &uid));
+
+    // Force a compaction to move the object and check that the uid moved to
+    // the new tenured heap location.
+    JS::PrepareForFullGC(rt);
+    JS::GCForReason(rt, GC_SHRINK, JS::gcreason::API);
+    MinimizeHeap(rt);
+    CHECK(uintptr_t(obj.get()) != tenuredAddr);
+    CHECK(obj->zone()->hasUniqueId(obj));
+    CHECK(obj->zone()->getUniqueId(obj, &tmp));
+    CHECK(uid == tmp);
+
+    return true;
+}
+END_TEST(testGCUID)
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -828,19 +828,17 @@ Chunk::init(JSRuntime* rt)
     /*
      * Decommit the arenas. We do this after poisoning so that if the OS does
      * not have to recycle the pages, we still get the benefit of poisoning.
      */
     decommitAllArenas(rt);
 
     /* Initialize the chunk info. */
     info.init();
-    info.trailer.storeBuffer = nullptr;
-    info.trailer.location = ChunkLocationBitTenuredHeap;
-    info.trailer.runtime = rt;
+    new (&info.trailer) ChunkTrailer(rt);
 
     /* The rest of info fields are initialized in pickChunk. */
 }
 
 /*
  * Search for and return the next decommitted Arena. Our goal is to keep
  * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
  * it to the most recently freed arena when we free, and forcing it to
@@ -1105,16 +1103,17 @@ GCRuntime::GCRuntime(JSRuntime* rt) :
     systemZone(nullptr),
     nursery(rt),
     storeBuffer(rt, nursery),
     stats(rt),
     marker(rt),
     usage(nullptr),
     mMemProfiler(rt),
     maxMallocBytes(0),
+    nextCellUniqueId_(LargestTaggedNullCellPointer + 1), // Ensure disjoint from null tagged pointers.
     numArenasFreeCommitted(0),
     verifyPreData(nullptr),
     chunkAllocationSinceLastGC(false),
     nextFullGCTime(0),
     lastGCTime(PRMJ_Now()),
     mode(JSGC_MODE_INCREMENTAL),
     numActiveZoneIters(0),
     decommitThreshold(32 * 1024 * 1024),
@@ -2037,16 +2036,19 @@ RelocateCell(Zone* zone, TenuredCell* sr
 
     // Allocate a new cell.
     MOZ_ASSERT(zone == src->zone());
     TenuredCell* dst = AllocRelocatedCell(zone, thingKind, thingSize);
 
     // Copy source cell contents to destination.
     memcpy(dst, src, thingSize);
 
+    // Move any uid attached to the object.
+    src->zone()->transferUniqueId(dst, src);
+
     if (IsObjectAllocKind(thingKind)) {
         JSObject* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
         JSObject* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
 
         if (srcObj->isNative()) {
             NativeObject* srcNative = &srcObj->as<NativeObject>();
             NativeObject* dstNative = &dstObj->as<NativeObject>();
 
@@ -2168,17 +2170,16 @@ ShouldRelocateZone(size_t arenaCount, si
 
     return (relocCount * 100.0) / arenaCount >= MIN_ZONE_RECLAIM_PERCENT;
 }
 
 bool
 ArenaLists::relocateArenas(Zone* zone, ArenaHeader*& relocatedListOut, JS::gcreason::Reason reason,
                            SliceBudget& sliceBudget, gcstats::Statistics& stats)
 {
-
     // This is only called from the main thread while we are doing a GC, so
     // there is no need to lock.
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
     MOZ_ASSERT(runtime_->gc.isHeapCompacting());
     MOZ_ASSERT(!runtime_->gc.isBackgroundSweeping());
 
     // Flush all the freeLists back into the arena headers
     purge();
@@ -3583,16 +3584,39 @@ GCRuntime::shouldReleaseObservedTypes()
         releaseTypes = true;
 
     if (releaseTypes)
         jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
 
     return releaseTypes;
 }
 
+struct IsAboutToBeFinalizedFunctor {
+    template <typename T> bool operator()(Cell** t) {
+        mozilla::DebugOnly<const Cell*> prior = *t;
+        bool result = IsAboutToBeFinalizedUnbarriered(reinterpret_cast<T**>(t));
+        // Sweep should not have to deal with moved pointers, since moving GC
+        // handles updating the UID table manually.
+        MOZ_ASSERT(*t == prior);
+        return result;
+    }
+};
+
+void
+JS::Zone::sweepUniqueIds(js::FreeOp* fop)
+{
+    for (UniqueIdMap::Enum e(uniqueIds_); !e.empty(); e.popFront()) {
+        if (DispatchTraceKindTyped(IsAboutToBeFinalizedFunctor(), e.front().key()->getTraceKind(),
+                                   &e.front().mutableKey()))
+        {
+            e.removeFront();
+        }
+    }
+}
+
 /*
  * It's simpler if we preserve the invariant that every zone has at least one
  * compartment. If we know we're deleting the entire zone, then
  * SweepCompartments is allowed to delete all compartments. In this case,
  * |keepAtleastOne| is false. If some objects remain in the zone so that it
  * cannot be deleted, then we set |keepAtleastOne| to true, which prohibits
  * SweepCompartments from deleting every compartment. Instead, it preserves an
  * arbitrary compartment in the zone.
@@ -5053,16 +5077,22 @@ GCRuntime::beginSweepingZoneGroup()
         }
 
         {
             gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_BREAKPOINT);
             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
                 zone->sweepBreakpoints(&fop);
             }
         }
+
+        {
+            gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_BREAKPOINT);
+            for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
+                zone->sweepUniqueIds(&fop);
+        }
     }
 
     if (sweepingAtoms) {
         gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_SYMBOL_REGISTRY);
         rt->symbolRegistry().sweep();
     }
 
     // Rejoin our off-main-thread tasks.
@@ -7145,16 +7175,19 @@ JS::GCCellPtr::mayBeOwnedByOtherRuntime(
 #ifdef JSGC_HASH_TABLE_CHECKS
 void
 js::gc::CheckHashTablesAfterMovingGC(JSRuntime* rt)
 {
     /*
      * Check that internal hash tables no longer have any pointers to things
      * that have been moved.
      */
+    for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
+        zone->checkUniqueIdTableAfterMovingGC();
+    }
     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
         c->objectGroups.checkTablesAfterMovingGC();
         c->checkInitialShapesTableAfterMovingGC();
         c->checkWrapperMapAfterMovingGC();
         c->checkBaseShapeTableAfterMovingGC();
         if (c->debugScopes)
             c->debugScopes->checkHashTablesAfterMovingGC(rt);
     }
@@ -7370,16 +7403,22 @@ JS::AutoDisableGenerationalGC::~AutoDisa
 }
 
 JS_PUBLIC_API(bool)
 JS::IsGenerationalGCEnabled(JSRuntime* rt)
 {
     return rt->gc.isGenerationalGCEnabled();
 }
 
+uint64_t
+js::gc::NextCellUniqueId(JSRuntime* rt)
+{
+    return rt->gc.nextCellUniqueId();
+}
+
 namespace js {
 namespace gc {
 namespace MemInfo {
 
 static bool
 GCBytesGetter(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -71,16 +71,17 @@ JSObject::finalize(js::FreeOp* fop)
 
 #ifdef DEBUG
     MOZ_ASSERT(isTenured());
     if (!IsBackgroundFinalized(asTenured().getAllocKind())) {
         /* Assert we're on the main thread. */
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(fop->runtime()));
     }
 #endif
+
     const js::Class* clasp = getClass();
     if (clasp->finalize)
         clasp->finalize(fop, this);
 
     if (!clasp->isNative())
         return;
 
     js::NativeObject* nobj = &as<js::NativeObject>();