Back out 682bf201edde, ef35ba222ac8, and 6a20cf61289d (bug 750907, bug 751003, bug 751377) because of build failure on a CLOSED TREE
authorMatt Brubeck <mbrubeck@mozilla.com>
Thu, 03 May 2012 14:55:52 -0700
changeset 93045 26738df8a4e08a5474342dcfa36e16b537fb5eeb
parent 93044 43b2b050af51f23240a204ebed7403c76d2066dc
child 93046 aeea5b83cf89dba073ca078b9528707c9b467505
push id8949
push usermbrubeck@mozilla.com
push dateThu, 03 May 2012 21:55:20 +0000
treeherdermozilla-inbound@26738df8a4e0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs750907, 751003, 751377
milestone15.0a1
backs out682bf201eddec8eb14495895bd1fde0fba04616f
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Back out 682bf201edde, ef35ba222ac8, and 6a20cf61289d (bug 750907, bug 751003, bug 751377) because of build failure on a CLOSED TREE
js/src/Makefile.in
js/src/builtin/MapObject.cpp
js/src/frontend/Parser.cpp
js/src/gc/Barrier-inl.h
js/src/gc/Barrier.h
js/src/gc/Heap.h
js/src/gc/Marking.cpp
js/src/gc/Marking.h
js/src/jsapi.cpp
js/src/jsarray.cpp
js/src/jsatom.cpp
js/src/jsatom.h
js/src/jscell.h
js/src/jscntxt.cpp
js/src/jscompartment.cpp
js/src/jsdbgapi.cpp
js/src/jsexn.cpp
js/src/jsfun.cpp
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jsgcmark.cpp
js/src/jsgcmark.h
js/src/jsinfer.cpp
js/src/jsinfer.h
js/src/jsinferinlines.h
js/src/jsinterp.cpp
js/src/jsiter.cpp
js/src/jsobj.cpp
js/src/jsobj.h
js/src/jsobjinlines.h
js/src/jsproxy.cpp
js/src/jsscope.cpp
js/src/jsscope.h
js/src/jsscopeinlines.h
js/src/jsscript.cpp
js/src/jsstr.h
js/src/jstypedarray.cpp
js/src/jswatchpoint.cpp
js/src/jsweakmap.cpp
js/src/jsweakmap.h
js/src/jswrapper.cpp
js/src/jsxml.cpp
js/src/jsxml.h
js/src/methodjit/MethodJIT.cpp
js/src/methodjit/StubCalls.cpp
js/src/vm/Debugger.cpp
js/src/vm/ObjectImpl-inl.h
js/src/vm/ObjectImpl.cpp
js/src/vm/RegExpStatics.h
js/src/vm/Stack.cpp
js/src/vm/String-inl.h
js/src/vm/String.cpp
js/src/vm/String.h
--- a/js/src/Makefile.in
+++ b/js/src/Makefile.in
@@ -113,16 +113,17 @@ CPPSRCS		= \
 		jsdate.cpp \
 		jsdbgapi.cpp \
 		jsdhash.cpp \
 		jsdtoa.cpp \
 		jsexn.cpp \
 		jsfriendapi.cpp \
 		jsfun.cpp \
 		jsgc.cpp \
+		jsgcmark.cpp \
 		jscrashreport.cpp \
 		jshash.cpp \
 		jsinfer.cpp \
 		jsinterp.cpp \
 		jsiter.cpp \
 		jslog2.cpp \
 		jsmath.cpp \
 		jsnativestack.cpp \
@@ -166,17 +167,16 @@ CPPSRCS		= \
 		TokenStream.cpp \
 		TestingFunctions.cpp \
 		LifoAlloc.cpp \
 		MapObject.cpp \
 		MemoryMetrics.cpp \
 		RegExpObject.cpp \
 		RegExpStatics.cpp \
 		RegExp.cpp \
-		Marking.cpp \
 		Memory.cpp \
 		Statistics.cpp \
 		StringBuffer.cpp \
 		Unicode.cpp \
 		Xdr.cpp \
 		$(NULL)
 
 # Changes to internal header files, used externally, massively slow down
@@ -191,16 +191,17 @@ INSTALLED_HEADERS = \
 		jsatom.h \
 		jsatom.tbl \
 		jsclass.h \
 		jsclist.h \
 		jsdbgapi.h \
 		jsdhash.h \
 		jsfriendapi.h \
 		jsgc.h \
+		jscell.h \
 		jshash.h \
 		jslock.h \
 		json.h \
 		jsproxy.h \
 		jsprf.h \
 		jsproto.tbl \
 		jsprvtd.h \
 		jspubtd.h \
@@ -217,20 +218,19 @@ INSTALLED_HEADERS = \
 #
 EXPORTS_NAMESPACES += ds gc
 
 EXPORTS_ds = \
 		BitArray.h \
 		$(NULL)
 
 EXPORTS_gc = \
+		Statistics.h \
 		Barrier.h \
-		Heap.h \
 		Root.h \
-		Statistics.h \
 		$(NULL)
 
 ######################################################
 # BEGIN include exported headers from the JS engine
 #
 #       Ultimately, after cleansing INSTALLED_HEADERS,
 #       these will be the ONLY headers exported by
 #       the js engine
--- a/js/src/builtin/MapObject.cpp
+++ b/js/src/builtin/MapObject.cpp
@@ -38,20 +38,20 @@
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "mozilla/FloatingPoint.h"
 
 #include "builtin/MapObject.h"
 
 #include "jscntxt.h"
+#include "jsgcmark.h"
 #include "jsiter.h"
 #include "jsobj.h"
 
-#include "gc/Marking.h"
 #include "vm/GlobalObject.h"
 #include "vm/MethodGuard.h"
 #include "vm/Stack.h"
 
 #include "jsobjinlines.h"
 
 using namespace js;
 
--- a/js/src/frontend/Parser.cpp
+++ b/js/src/frontend/Parser.cpp
@@ -60,31 +60,31 @@
 #include "jsutil.h"
 #include "jsapi.h"
 #include "jsarray.h"
 #include "jsatom.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jsiter.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsopcode.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jsstr.h"
 
 #include "frontend/BytecodeEmitter.h"
 #include "frontend/FoldConstants.h"
 #include "frontend/ParseMaps.h"
 #include "frontend/TokenStream.h"
-#include "gc/Marking.h"
 
 #if JS_HAS_XML_SUPPORT
 #include "jsxml.h"
 #endif
 
 #include "jsatominlines.h"
 #include "jsscriptinlines.h"
 
--- a/js/src/gc/Barrier-inl.h
+++ b/js/src/gc/Barrier-inl.h
@@ -35,18 +35,19 @@
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef jsgc_barrier_inl_h___
 #define jsgc_barrier_inl_h___
 
+#include "jsgcmark.h"
+
 #include "gc/Barrier.h"
-#include "gc/Marking.h"
 
 #include "vm/ObjectImpl-inl.h"
 #include "vm/String-inl.h"
 
 namespace js {
 
 inline void
 EncapsulatedValue::writeBarrierPre(const Value &value)
--- a/js/src/gc/Barrier.h
+++ b/js/src/gc/Barrier.h
@@ -36,18 +36,18 @@
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef jsgc_barrier_h___
 #define jsgc_barrier_h___
 
 #include "jsapi.h"
+#include "jscell.h"
 
-#include "gc/Heap.h"
 #include "js/HashTable.h"
 
 /*
  * A write barrier is a mechanism used by incremental or generation GCs to
  * ensure that every value that needs to be marked is marked. In general, the
  * write barrier should be invoked whenever a write can cause the set of things
  * traced through by the GC to change. This includes:
  *   - writes to object properties
@@ -242,16 +242,17 @@ BarrieredSetPair(JSCompartment *comp,
     v1.post();
     v2.post();
 }
 
 struct Shape;
 class BaseShape;
 namespace types { struct TypeObject; }
 
+typedef HeapPtr<JSAtom> HeapPtrAtom;
 typedef HeapPtr<JSObject> HeapPtrObject;
 typedef HeapPtr<JSFunction> HeapPtrFunction;
 typedef HeapPtr<JSString> HeapPtrString;
 typedef HeapPtr<JSScript> HeapPtrScript;
 typedef HeapPtr<Shape> HeapPtrShape;
 typedef HeapPtr<BaseShape> HeapPtrBaseShape;
 typedef HeapPtr<types::TypeObject> HeapPtrTypeObject;
 typedef HeapPtr<JSXML> HeapPtrXML;
deleted file mode 100644
--- a/js/src/gc/Heap.h
+++ /dev/null
@@ -1,984 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef gc_heap_h___
-#define gc_heap_h___
-
-#include "mozilla/Attributes.h"
-#include "mozilla/StandardInteger.h"
-
-#include <stddef.h>
-
-#include "jstypes.h"
-#include "jsutil.h"
-
-#include "ds/BitArray.h"
-
-struct JSCompartment;
-
-extern "C" {
-struct JSRuntime;
-}
-
-namespace js {
-
-class FreeOp;
-
-namespace gc {
-
-struct Arena;
-struct ArenaHeader;
-struct Chunk;
-
-/*
- * Live objects are marked black. How many other additional colors are available
- * depends on the size of the GCThing. Objects marked gray are eligible for
- * cycle collection.
- */
-static const uint32_t BLACK = 0;
-static const uint32_t GRAY = 1;
-
-/* The GC allocation kinds. */
-enum AllocKind {
-    FINALIZE_OBJECT0,
-    FINALIZE_OBJECT0_BACKGROUND,
-    FINALIZE_OBJECT2,
-    FINALIZE_OBJECT2_BACKGROUND,
-    FINALIZE_OBJECT4,
-    FINALIZE_OBJECT4_BACKGROUND,
-    FINALIZE_OBJECT8,
-    FINALIZE_OBJECT8_BACKGROUND,
-    FINALIZE_OBJECT12,
-    FINALIZE_OBJECT12_BACKGROUND,
-    FINALIZE_OBJECT16,
-    FINALIZE_OBJECT16_BACKGROUND,
-    FINALIZE_OBJECT_LAST = FINALIZE_OBJECT16_BACKGROUND,
-    FINALIZE_SCRIPT,
-    FINALIZE_SHAPE,
-    FINALIZE_BASE_SHAPE,
-    FINALIZE_TYPE_OBJECT,
-#if JS_HAS_XML_SUPPORT
-    FINALIZE_XML,
-#endif
-    FINALIZE_SHORT_STRING,
-    FINALIZE_STRING,
-    FINALIZE_EXTERNAL_STRING,
-    FINALIZE_LAST = FINALIZE_EXTERNAL_STRING
-};
-
-static const unsigned FINALIZE_LIMIT = FINALIZE_LAST + 1;
-static const unsigned FINALIZE_OBJECT_LIMIT = FINALIZE_OBJECT_LAST + 1;
-
-/*
- * This must be an upper bound, but we do not need the least upper bound, so
- * we just exclude non-background objects.
- */
-static const size_t MAX_BACKGROUND_FINALIZE_KINDS = FINALIZE_LIMIT - FINALIZE_OBJECT_LIMIT / 2;
-
-/*
- * A GC cell is the base class for all GC things.
- */
-struct Cell
-{
-    static const size_t CellShift = 3;
-    static const size_t CellSize = size_t(1) << CellShift;
-    static const size_t CellMask = CellSize - 1;
-
-    inline uintptr_t address() const;
-    inline ArenaHeader *arenaHeader() const;
-    inline Chunk *chunk() const;
-    inline AllocKind getAllocKind() const;
-    MOZ_ALWAYS_INLINE bool isMarked(uint32_t color = BLACK) const;
-    MOZ_ALWAYS_INLINE bool markIfUnmarked(uint32_t color = BLACK) const;
-    MOZ_ALWAYS_INLINE void unmark(uint32_t color) const;
-
-    inline JSCompartment *compartment() const;
-
-#ifdef DEBUG
-    inline bool isAligned() const;
-#endif
-};
-
-/*
- * Page size is 4096 by default, except for SPARC, where it is 8192.
- * Note: Do not use JS_CPU_SPARC here, this header is used outside JS.
- * Bug 692267: Move page size definition to gc/Memory.h and include it
- *             directly once jsgc.h is no longer an installed header.
- */
-#if defined(SOLARIS) && (defined(__sparc) || defined(__sparcv9))
-const size_t PageShift = 13;
-#else
-const size_t PageShift = 12;
-#endif
-const size_t PageSize = size_t(1) << PageShift;
-
-const size_t ChunkShift = 20;
-const size_t ChunkSize = size_t(1) << ChunkShift;
-const size_t ChunkMask = ChunkSize - 1;
-
-const size_t ArenaShift = PageShift;
-const size_t ArenaSize = PageSize;
-const size_t ArenaMask = ArenaSize - 1;
-
-/*
- * This is the maximum number of arenas we allow in the FreeCommitted state
- * before we trigger a GC_SHRINK to release free arenas to the OS.
- */
-const static uint32_t FreeCommittedArenasThreshold = (32 << 20) / ArenaSize;
-
-/*
- * The mark bitmap has one bit per each GC cell. For multi-cell GC things this
- * wastes space but allows to avoid expensive devisions by thing's size when
- * accessing the bitmap. In addition this allows to use some bits for colored
- * marking during the cycle GC.
- */
-const size_t ArenaCellCount = size_t(1) << (ArenaShift - Cell::CellShift);
-const size_t ArenaBitmapBits = ArenaCellCount;
-const size_t ArenaBitmapBytes = ArenaBitmapBits / 8;
-const size_t ArenaBitmapWords = ArenaBitmapBits / JS_BITS_PER_WORD;
-
-/*
- * A FreeSpan represents a contiguous sequence of free cells in an Arena.
- * |first| is the address of the first free cell in the span. |last| is the
- * address of the last free cell in the span. This last cell holds a FreeSpan
- * data structure for the next span unless this is the last span on the list
- * of spans in the arena. For this last span |last| points to the last byte of
- * the last thing in the arena and no linkage is stored there, so
- * |last| == arenaStart + ArenaSize - 1. If the space at the arena end is
- * fully used this last span is empty and |first| == |last + 1|.
- *
- * Thus |first| < |last| implies that we have either the last span with at least
- * one element or that the span is not the last and contains at least 2
- * elements. In both cases to allocate a thing from this span we need simply
- * to increment |first| by the allocation size.
- *
- * |first| == |last| implies that we have a one element span that records the
- * next span. So to allocate from it we need to update the span list head
- * with a copy of the span stored at |last| address so the following
- * allocations will use that span.
- *
- * |first| > |last| implies that we have an empty last span and the arena is
- * fully used.
- *
- * Also only for the last span (|last| & 1)! = 0 as all allocation sizes are
- * multiples of Cell::CellSize.
- */
-struct FreeSpan
-{
-    uintptr_t   first;
-    uintptr_t   last;
-
-  public:
-    FreeSpan() {}
-
-    FreeSpan(uintptr_t first, uintptr_t last)
-      : first(first), last(last) {
-        checkSpan();
-    }
-
-    /*
-     * To minimize the size of the arena header the first span is encoded
-     * there as offsets from the arena start.
-     */
-    static size_t encodeOffsets(size_t firstOffset, size_t lastOffset) {
-        /* Check that we can pack the offsets into uint16. */
-        JS_STATIC_ASSERT(ArenaShift < 16);
-        JS_ASSERT(firstOffset <= ArenaSize);
-        JS_ASSERT(lastOffset < ArenaSize);
-        JS_ASSERT(firstOffset <= ((lastOffset + 1) & ~size_t(1)));
-        return firstOffset | (lastOffset << 16);
-    }
-
-    /*
-     * Encoded offsets for a full arena when its first span is the last one
-     * and empty.
-     */
-    static const size_t FullArenaOffsets = ArenaSize | ((ArenaSize - 1) << 16);
-
-    static FreeSpan decodeOffsets(uintptr_t arenaAddr, size_t offsets) {
-        JS_ASSERT(!(arenaAddr & ArenaMask));
-
-        size_t firstOffset = offsets & 0xFFFF;
-        size_t lastOffset = offsets >> 16;
-        JS_ASSERT(firstOffset <= ArenaSize);
-        JS_ASSERT(lastOffset < ArenaSize);
-
-        /*
-         * We must not use | when calculating first as firstOffset is
-         * ArenaMask + 1 for the empty span.
-         */
-        return FreeSpan(arenaAddr + firstOffset, arenaAddr | lastOffset);
-    }
-
-    void initAsEmpty(uintptr_t arenaAddr = 0) {
-        JS_ASSERT(!(arenaAddr & ArenaMask));
-        first = arenaAddr + ArenaSize;
-        last = arenaAddr | (ArenaSize  - 1);
-        JS_ASSERT(isEmpty());
-    }
-
-    bool isEmpty() const {
-        checkSpan();
-        return first > last;
-    }
-
-    bool hasNext() const {
-        checkSpan();
-        return !(last & uintptr_t(1));
-    }
-
-    const FreeSpan *nextSpan() const {
-        JS_ASSERT(hasNext());
-        return reinterpret_cast<FreeSpan *>(last);
-    }
-
-    FreeSpan *nextSpanUnchecked(size_t thingSize) const {
-#ifdef DEBUG
-        uintptr_t lastOffset = last & ArenaMask;
-        JS_ASSERT(!(lastOffset & 1));
-        JS_ASSERT((ArenaSize - lastOffset) % thingSize == 0);
-#endif
-        return reinterpret_cast<FreeSpan *>(last);
-    }
-
-    uintptr_t arenaAddressUnchecked() const {
-        return last & ~ArenaMask;
-    }
-
-    uintptr_t arenaAddress() const {
-        checkSpan();
-        return arenaAddressUnchecked();
-    }
-
-    ArenaHeader *arenaHeader() const {
-        return reinterpret_cast<ArenaHeader *>(arenaAddress());
-    }
-
-    bool isSameNonEmptySpan(const FreeSpan *another) const {
-        JS_ASSERT(!isEmpty());
-        JS_ASSERT(!another->isEmpty());
-        return first == another->first && last == another->last;
-    }
-
-    bool isWithinArena(uintptr_t arenaAddr) const {
-        JS_ASSERT(!(arenaAddr & ArenaMask));
-
-        /* Return true for the last empty span as well. */
-        return arenaAddress() == arenaAddr;
-    }
-
-    size_t encodeAsOffsets() const {
-        /*
-         * We must use first - arenaAddress(), not first & ArenaMask as
-         * first == ArenaMask + 1 for an empty span.
-         */
-        uintptr_t arenaAddr = arenaAddress();
-        return encodeOffsets(first - arenaAddr, last & ArenaMask);
-    }
-
-    /* See comments before FreeSpan for details. */
-    MOZ_ALWAYS_INLINE void *allocate(size_t thingSize) {
-        JS_ASSERT(thingSize % Cell::CellSize == 0);
-        checkSpan();
-        uintptr_t thing = first;
-        if (thing < last) {
-            /* Bump-allocate from the current span. */
-            first = thing + thingSize;
-        } else if (JS_LIKELY(thing == last)) {
-            /*
-             * Move to the next span. We use JS_LIKELY as without PGO
-             * compilers mis-predict == here as unlikely to succeed.
-             */
-            *this = *reinterpret_cast<FreeSpan *>(thing);
-        } else {
-            return NULL;
-        }
-        checkSpan();
-        return reinterpret_cast<void *>(thing);
-    }
-
-    /* A version of allocate when we know that the span is not empty. */
-    MOZ_ALWAYS_INLINE void *infallibleAllocate(size_t thingSize) {
-        JS_ASSERT(thingSize % Cell::CellSize == 0);
-        checkSpan();
-        uintptr_t thing = first;
-        if (thing < last) {
-            first = thing + thingSize;
-        } else {
-            JS_ASSERT(thing == last);
-            *this = *reinterpret_cast<FreeSpan *>(thing);
-        }
-        checkSpan();
-        return reinterpret_cast<void *>(thing);
-    }
-
-    /*
-     * Allocate from a newly allocated arena. We do not move the free list
-     * from the arena. Rather we set the arena up as fully used during the
-     * initialization so to allocate we simply return the first thing in the
-     * arena and set the free list to point to the second.
-     */
-    MOZ_ALWAYS_INLINE void *allocateFromNewArena(uintptr_t arenaAddr, size_t firstThingOffset,
-                                                size_t thingSize) {
-        JS_ASSERT(!(arenaAddr & ArenaMask));
-        uintptr_t thing = arenaAddr | firstThingOffset;
-        first = thing + thingSize;
-        last = arenaAddr | ArenaMask;
-        checkSpan();
-        return reinterpret_cast<void *>(thing);
-    }
-
-    void checkSpan() const {
-#ifdef DEBUG
-        /* We do not allow spans at the end of the address space. */
-        JS_ASSERT(last != uintptr_t(-1));
-        JS_ASSERT(first);
-        JS_ASSERT(last);
-        JS_ASSERT(first - 1 <= last);
-        uintptr_t arenaAddr = arenaAddressUnchecked();
-        if (last & 1) {
-            /* The span is the last. */
-            JS_ASSERT((last & ArenaMask) == ArenaMask);
-
-            if (first - 1 == last) {
-                /* The span is last and empty. The above start != 0 check
-                 * implies that we are not at the end of the address space.
-                 */
-                return;
-            }
-            size_t spanLength = last - first + 1;
-            JS_ASSERT(spanLength % Cell::CellSize == 0);
-
-            /* Start and end must belong to the same arena. */
-            JS_ASSERT((first & ~ArenaMask) == arenaAddr);
-            return;
-        }
-
-        /* The span is not the last and we have more spans to follow. */
-        JS_ASSERT(first <= last);
-        size_t spanLengthWithoutOneThing = last - first;
-        JS_ASSERT(spanLengthWithoutOneThing % Cell::CellSize == 0);
-
-        JS_ASSERT((first & ~ArenaMask) == arenaAddr);
-
-        /*
-         * If there is not enough space before the arena end to allocate one
-         * more thing, then the span must be marked as the last one to avoid
-         * storing useless empty span reference.
-         */
-        size_t beforeTail = ArenaSize - (last & ArenaMask);
-        JS_ASSERT(beforeTail >= sizeof(FreeSpan) + Cell::CellSize);
-
-        FreeSpan *next = reinterpret_cast<FreeSpan *>(last);
-
-        /*
-         * The GC things on the list of free spans come from one arena
-         * and the spans are linked in ascending address order with
-         * at least one non-free thing between spans.
-         */
-        JS_ASSERT(last < next->first);
-        JS_ASSERT(arenaAddr == next->arenaAddressUnchecked());
-
-        if (next->first > next->last) {
-            /*
-             * The next span is the empty span that terminates the list for
-             * arenas that do not have any free things at the end.
-             */
-            JS_ASSERT(next->first - 1 == next->last);
-            JS_ASSERT(arenaAddr + ArenaSize == next->first);
-        }
-#endif
-    }
-
-};
-
-/* Every arena has a header. */
-struct ArenaHeader
-{
-    friend struct FreeLists;
-
-    JSCompartment   *compartment;
-
-    /*
-     * ArenaHeader::next has two purposes: when unallocated, it points to the
-     * next available Arena's header. When allocated, it points to the next
-     * arena of the same size class and compartment.
-     */
-    ArenaHeader     *next;
-
-  private:
-    /*
-     * The first span of free things in the arena. We encode it as the start
-     * and end offsets within the arena, not as FreeSpan structure, to
-     * minimize the header size.
-     */
-    size_t          firstFreeSpanOffsets;
-
-    /*
-     * One of AllocKind constants or FINALIZE_LIMIT when the arena does not
-     * contain any GC things and is on the list of empty arenas in the GC
-     * chunk. The latter allows to quickly check if the arena is allocated
-     * during the conservative GC scanning without searching the arena in the
-     * list.
-     */
-    size_t       allocKind          : 8;
-
-    /*
-     * When recursive marking uses too much stack the marking is delayed and
-     * the corresponding arenas are put into a stack using the following field
-     * as a linkage. To distinguish the bottom of the stack from the arenas
-     * not present in the stack we use an extra flag to tag arenas on the
-     * stack.
-     *
-     * Delayed marking is also used for arenas that we allocate into during an
-     * incremental GC. In this case, we intend to mark all the objects in the
-     * arena, and it's faster to do this marking in bulk.
-     *
-     * To minimize the ArenaHeader size we record the next delayed marking
-     * linkage as arenaAddress() >> ArenaShift and pack it with the allocKind
-     * field and hasDelayedMarking flag. We use 8 bits for the allocKind, not
-     * ArenaShift - 1, so the compiler can use byte-level memory instructions
-     * to access it.
-     */
-  public:
-    size_t       hasDelayedMarking  : 1;
-    size_t       allocatedDuringIncremental : 1;
-    size_t       markOverflow : 1;
-    size_t       nextDelayedMarking : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
-
-    static void staticAsserts() {
-        /* We must be able to fit the allockind into uint8_t. */
-        JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
-
-        /*
-         * nextDelayedMarkingpacking assumes that ArenaShift has enough bits
-         * to cover allocKind and hasDelayedMarking.
-         */
-        JS_STATIC_ASSERT(ArenaShift >= 8 + 1 + 1 + 1);
-    }
-
-    inline uintptr_t address() const;
-    inline Chunk *chunk() const;
-
-    bool allocated() const {
-        JS_ASSERT(allocKind <= size_t(FINALIZE_LIMIT));
-        return allocKind < size_t(FINALIZE_LIMIT);
-    }
-
-    void init(JSCompartment *comp, AllocKind kind) {
-        JS_ASSERT(!allocated());
-        JS_ASSERT(!markOverflow);
-        JS_ASSERT(!allocatedDuringIncremental);
-        JS_ASSERT(!hasDelayedMarking);
-        compartment = comp;
-
-        JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
-        allocKind = size_t(kind);
-
-        /* See comments in FreeSpan::allocateFromNewArena. */
-        firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
-    }
-
-    void setAsNotAllocated() {
-        allocKind = size_t(FINALIZE_LIMIT);
-        markOverflow = 0;
-        allocatedDuringIncremental = 0;
-        hasDelayedMarking = 0;
-        nextDelayedMarking = 0;
-    }
-
-    inline uintptr_t arenaAddress() const;
-    inline Arena *getArena();
-
-    AllocKind getAllocKind() const {
-        JS_ASSERT(allocated());
-        return AllocKind(allocKind);
-    }
-
-    inline size_t getThingSize() const;
-
-    bool hasFreeThings() const {
-        return firstFreeSpanOffsets != FreeSpan::FullArenaOffsets;
-    }
-
-    inline bool isEmpty() const;
-
-    void setAsFullyUsed() {
-        firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
-    }
-
-    inline FreeSpan getFirstFreeSpan() const;
-    inline void setFirstFreeSpan(const FreeSpan *span);
-
-#ifdef DEBUG
-    void checkSynchronizedWithFreeList() const;
-#endif
-
-    inline ArenaHeader *getNextDelayedMarking() const;
-    inline void setNextDelayedMarking(ArenaHeader *aheader);
-};
-
-struct Arena
-{
-    /*
-     * Layout of an arena:
-     * An arena is 4K in size and 4K-aligned. It starts with the ArenaHeader
-     * descriptor followed by some pad bytes. The remainder of the arena is
-     * filled with the array of T things. The pad bytes ensure that the thing
-     * array ends exactly at the end of the arena.
-     *
-     * +-------------+-----+----+----+-----+----+
-     * | ArenaHeader | pad | T0 | T1 | ... | Tn |
-     * +-------------+-----+----+----+-----+----+
-     *
-     * <----------------------------------------> = ArenaSize bytes
-     * <-------------------> = first thing offset
-     */
-    ArenaHeader aheader;
-    uint8_t     data[ArenaSize - sizeof(ArenaHeader)];
-
-  private:
-    static JS_FRIEND_DATA(const uint32_t) ThingSizes[];
-    static JS_FRIEND_DATA(const uint32_t) FirstThingOffsets[];
-
-  public:
-    static void staticAsserts();
-
-    static size_t thingSize(AllocKind kind) {
-        return ThingSizes[kind];
-    }
-
-    static size_t firstThingOffset(AllocKind kind) {
-        return FirstThingOffsets[kind];
-    }
-
-    static size_t thingsPerArena(size_t thingSize) {
-        JS_ASSERT(thingSize % Cell::CellSize == 0);
-
-        /* We should be able to fit FreeSpan in any GC thing. */
-        JS_ASSERT(thingSize >= sizeof(FreeSpan));
-
-        return (ArenaSize - sizeof(ArenaHeader)) / thingSize;
-    }
-
-    static size_t thingsSpan(size_t thingSize) {
-        return thingsPerArena(thingSize) * thingSize;
-    }
-
-    static bool isAligned(uintptr_t thing, size_t thingSize) {
-        /* Things ends at the arena end. */
-        uintptr_t tailOffset = (ArenaSize - thing) & ArenaMask;
-        return tailOffset % thingSize == 0;
-    }
-
-    uintptr_t address() const {
-        return aheader.address();
-    }
-
-    uintptr_t thingsStart(AllocKind thingKind) {
-        return address() | firstThingOffset(thingKind);
-    }
-
-    uintptr_t thingsEnd() {
-        return address() + ArenaSize;
-    }
-
-    template <typename T>
-    bool finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize);
-};
-
-inline size_t
-ArenaHeader::getThingSize() const
-{
-    JS_ASSERT(allocated());
-    return Arena::thingSize(getAllocKind());
-}
-
-/* The chunk header (located at the end of the chunk to preserve arena alignment). */
-struct ChunkInfo
-{
-    Chunk           *next;
-    Chunk           **prevp;
-
-    /* Free arenas are linked together with aheader.next. */
-    ArenaHeader     *freeArenasHead;
-
-    /*
-     * Decommitted arenas are tracked by a bitmap in the chunk header. We use
-     * this offset to start our search iteration close to a decommitted arena
-     * that we can allocate.
-     */
-    uint32_t        lastDecommittedArenaOffset;
-
-    /* Number of free arenas, either committed or decommitted. */
-    uint32_t        numArenasFree;
-
-    /* Number of free, committed arenas. */
-    uint32_t        numArenasFreeCommitted;
-
-    /* Number of GC cycles this chunk has survived. */
-    uint32_t        age;
-};
-
-/*
- * Calculating ArenasPerChunk:
- *
- * In order to figure out how many Arenas will fit in a chunk, we need to know
- * how much extra space is available after we allocate the header data. This
- * is a problem because the header size depends on the number of arenas in the
- * chunk. The two dependent fields are bitmap and decommittedArenas.
- *
- * For the mark bitmap, we know that each arena will use a fixed number of full
- * bytes: ArenaBitmapBytes. The full size of the header data is this number
- * multiplied by the eventual number of arenas we have in the header. We,
- * conceptually, distribute this header data among the individual arenas and do
- * not include it in the header. This way we do not have to worry about its
- * variable size: it gets attached to the variable number we are computing.
- *
- * For the decommitted arena bitmap, we only have 1 bit per arena, so this
- * technique will not work. Instead, we observe that we do not have enough
- * header info to fill 8 full arenas: it is currently 4 on 64bit, less on
- * 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas.
- * This will not become 63 bytes unless we double the data required in the
- * header. Therefore, we just compute the number of bytes required to track
- * every possible arena and do not worry about slop bits, since there are too
- * few to usefully allocate.
- *
- * To actually compute the number of arenas we can allocate in a chunk, we
- * divide the amount of available space less the header info (not including
- * the mark bitmap which is distributed into the arena size) by the size of
- * the arena (with the mark bitmap bytes it uses).
- */
-const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
-const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
-const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
-const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
-
-/* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
-struct ChunkBitmap
-{
-    uintptr_t bitmap[ArenaBitmapWords * ArenasPerChunk];
-
-    MOZ_ALWAYS_INLINE void getMarkWordAndMask(const Cell *cell, uint32_t color,
-                                             uintptr_t **wordp, uintptr_t *maskp);
-
-    MOZ_ALWAYS_INLINE bool isMarked(const Cell *cell, uint32_t color) {
-        uintptr_t *word, mask;
-        getMarkWordAndMask(cell, color, &word, &mask);
-        return *word & mask;
-    }
-
-    MOZ_ALWAYS_INLINE bool markIfUnmarked(const Cell *cell, uint32_t color) {
-        uintptr_t *word, mask;
-        getMarkWordAndMask(cell, BLACK, &word, &mask);
-        if (*word & mask)
-            return false;
-        *word |= mask;
-        if (color != BLACK) {
-            /*
-             * We use getMarkWordAndMask to recalculate both mask and word as
-             * doing just mask << color may overflow the mask.
-             */
-            getMarkWordAndMask(cell, color, &word, &mask);
-            if (*word & mask)
-                return false;
-            *word |= mask;
-        }
-        return true;
-    }
-
-    MOZ_ALWAYS_INLINE void unmark(const Cell *cell, uint32_t color) {
-        uintptr_t *word, mask;
-        getMarkWordAndMask(cell, color, &word, &mask);
-        *word &= ~mask;
-    }
-
-    void clear() {
-        PodArrayZero(bitmap);
-    }
-
-#ifdef DEBUG
-    bool noBitsSet(ArenaHeader *aheader) {
-        /*
-         * We assume that the part of the bitmap corresponding to the arena
-         * has the exact number of words so we do not need to deal with a word
-         * that covers bits from two arenas.
-         */
-        JS_STATIC_ASSERT(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD);
-
-        uintptr_t *word, unused;
-        getMarkWordAndMask(reinterpret_cast<Cell *>(aheader->address()), BLACK, &word, &unused);
-        for (size_t i = 0; i != ArenaBitmapWords; i++) {
-            if (word[i])
-                return false;
-        }
-        return true;
-    }
-#endif
-};
-
-JS_STATIC_ASSERT(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap));
-
-typedef BitArray<ArenasPerChunk> PerArenaBitmap;
-
-const size_t ChunkPadSize = ChunkSize
-                            - (sizeof(Arena) * ArenasPerChunk)
-                            - sizeof(ChunkBitmap)
-                            - sizeof(PerArenaBitmap)
-                            - sizeof(ChunkInfo);
-JS_STATIC_ASSERT(ChunkPadSize < BytesPerArenaWithHeader);
-
-/*
- * Chunks contain arenas and associated data structures (mark bitmap, delayed
- * marking state).
- */
-struct Chunk
-{
-    Arena           arenas[ArenasPerChunk];
-
-    /* Pad to full size to ensure cache alignment of ChunkInfo. */
-    uint8_t         padding[ChunkPadSize];
-
-    ChunkBitmap     bitmap;
-    PerArenaBitmap  decommittedArenas;
-    ChunkInfo       info;
-
-    static Chunk *fromAddress(uintptr_t addr) {
-        addr &= ~ChunkMask;
-        return reinterpret_cast<Chunk *>(addr);
-    }
-
-    static bool withinArenasRange(uintptr_t addr) {
-        uintptr_t offset = addr & ChunkMask;
-        return offset < ArenasPerChunk * ArenaSize;
-    }
-
-    static size_t arenaIndex(uintptr_t addr) {
-        JS_ASSERT(withinArenasRange(addr));
-        return (addr & ChunkMask) >> ArenaShift;
-    }
-
-    uintptr_t address() const {
-        uintptr_t addr = reinterpret_cast<uintptr_t>(this);
-        JS_ASSERT(!(addr & ChunkMask));
-        return addr;
-    }
-
-    bool unused() const {
-        return info.numArenasFree == ArenasPerChunk;
-    }
-
-    bool hasAvailableArenas() const {
-        return info.numArenasFree != 0;
-    }
-
-    inline void addToAvailableList(JSCompartment *compartment);
-    inline void insertToAvailableList(Chunk **insertPoint);
-    inline void removeFromAvailableList();
-
-    ArenaHeader *allocateArena(JSCompartment *comp, AllocKind kind);
-
-    void releaseArena(ArenaHeader *aheader);
-
-    static Chunk *allocate(JSRuntime *rt);
-
-    /* Must be called with the GC lock taken. */
-    static inline void release(JSRuntime *rt, Chunk *chunk);
-    static inline void releaseList(JSRuntime *rt, Chunk *chunkListHead);
-
-    /* Must be called with the GC lock taken. */
-    inline void prepareToBeFreed(JSRuntime *rt);
-
-    /*
-     * Assuming that the info.prevp points to the next field of the previous
-     * chunk in a doubly-linked list, get that chunk.
-     */
-    Chunk *getPrevious() {
-        JS_ASSERT(info.prevp);
-        return fromPointerToNext(info.prevp);
-    }
-
-    /* Get the chunk from a pointer to its info.next field. */
-    static Chunk *fromPointerToNext(Chunk **nextFieldPtr) {
-        uintptr_t addr = reinterpret_cast<uintptr_t>(nextFieldPtr);
-        JS_ASSERT((addr & ChunkMask) == offsetof(Chunk, info.next));
-        return reinterpret_cast<Chunk *>(addr - offsetof(Chunk, info.next));
-    }
-
-  private:
-    inline void init();
-
-    /* Search for a decommitted arena to allocate. */
-    unsigned findDecommittedArenaOffset();
-    ArenaHeader* fetchNextDecommittedArena();
-
-  public:
-    /* Unlink and return the freeArenasHead. */
-    inline ArenaHeader* fetchNextFreeArena(JSRuntime *rt);
-
-    inline void addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader);
-};
-
-JS_STATIC_ASSERT(sizeof(Chunk) == ChunkSize);
-
-inline uintptr_t
-Cell::address() const
-{
-    uintptr_t addr = uintptr_t(this);
-    JS_ASSERT(addr % Cell::CellSize == 0);
-    JS_ASSERT(Chunk::withinArenasRange(addr));
-    return addr;
-}
-
-inline uintptr_t
-ArenaHeader::address() const
-{
-    uintptr_t addr = reinterpret_cast<uintptr_t>(this);
-    JS_ASSERT(!(addr & ArenaMask));
-    JS_ASSERT(Chunk::withinArenasRange(addr));
-    return addr;
-}
-
-inline Chunk *
-ArenaHeader::chunk() const
-{
-    return Chunk::fromAddress(address());
-}
-
-inline uintptr_t
-ArenaHeader::arenaAddress() const
-{
-    return address();
-}
-
-inline Arena *
-ArenaHeader::getArena()
-{
-    return reinterpret_cast<Arena *>(arenaAddress());
-}
-
-inline bool
-ArenaHeader::isEmpty() const
-{
-    /* Arena is empty if its first span covers the whole arena. */
-    JS_ASSERT(allocated());
-    size_t firstThingOffset = Arena::firstThingOffset(getAllocKind());
-    return firstFreeSpanOffsets == FreeSpan::encodeOffsets(firstThingOffset, ArenaMask);
-}
-
-FreeSpan
-ArenaHeader::getFirstFreeSpan() const
-{
-#ifdef DEBUG
-    checkSynchronizedWithFreeList();
-#endif
-    return FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets);
-}
-
-void
-ArenaHeader::setFirstFreeSpan(const FreeSpan *span)
-{
-    JS_ASSERT(span->isWithinArena(arenaAddress()));
-    firstFreeSpanOffsets = span->encodeAsOffsets();
-}
-
-inline ArenaHeader *
-ArenaHeader::getNextDelayedMarking() const
-{
-    return &reinterpret_cast<Arena *>(nextDelayedMarking << ArenaShift)->aheader;
-}
-
-inline void
-ArenaHeader::setNextDelayedMarking(ArenaHeader *aheader)
-{
-    JS_ASSERT(!(uintptr_t(aheader) & ArenaMask));
-    hasDelayedMarking = 1;
-    nextDelayedMarking = aheader->arenaAddress() >> ArenaShift;
-}
-
-JS_ALWAYS_INLINE void
-ChunkBitmap::getMarkWordAndMask(const Cell *cell, uint32_t color,
-                                uintptr_t **wordp, uintptr_t *maskp)
-{
-    size_t bit = (cell->address() & ChunkMask) / Cell::CellSize + color;
-    JS_ASSERT(bit < ArenaBitmapBits * ArenasPerChunk);
-    *maskp = uintptr_t(1) << (bit % JS_BITS_PER_WORD);
-    *wordp = &bitmap[bit / JS_BITS_PER_WORD];
-}
-
-static void
-AssertValidColor(const void *thing, uint32_t color)
-{
-#ifdef DEBUG
-    ArenaHeader *aheader = reinterpret_cast<const Cell *>(thing)->arenaHeader();
-    JS_ASSERT_IF(color, color < aheader->getThingSize() / Cell::CellSize);
-#endif
-}
-
-inline ArenaHeader *
-Cell::arenaHeader() const
-{
-    uintptr_t addr = address();
-    addr &= ~ArenaMask;
-    return reinterpret_cast<ArenaHeader *>(addr);
-}
-
-Chunk *
-Cell::chunk() const
-{
-    uintptr_t addr = uintptr_t(this);
-    JS_ASSERT(addr % Cell::CellSize == 0);
-    addr &= ~(ChunkSize - 1);
-    return reinterpret_cast<Chunk *>(addr);
-}
-
-AllocKind
-Cell::getAllocKind() const
-{
-    return arenaHeader()->getAllocKind();
-}
-
-bool
-Cell::isMarked(uint32_t color /* = BLACK */) const
-{
-    AssertValidColor(this, color);
-    return chunk()->bitmap.isMarked(this, color);
-}
-
-bool
-Cell::markIfUnmarked(uint32_t color /* = BLACK */) const
-{
-    AssertValidColor(this, color);
-    return chunk()->bitmap.markIfUnmarked(this, color);
-}
-
-void
-Cell::unmark(uint32_t color) const
-{
-    JS_ASSERT(color != BLACK);
-    AssertValidColor(this, color);
-    chunk()->bitmap.unmark(this, color);
-}
-
-JSCompartment *
-Cell::compartment() const
-{
-    return arenaHeader()->compartment;
-}
-
-#ifdef DEBUG
-bool
-Cell::isAligned() const
-{
-    return Arena::isAligned(address(), arenaHeader()->getThingSize());
-}
-#endif
-
-} /* namespace gc */
-
-} /* namespace js */
-
-#endif /* gc_heap_h___ */
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -60,16 +60,17 @@
 #include "jsclone.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsdate.h"
 #include "jsdtoa.h"
 #include "jsexn.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jsiter.h"
 #include "jslock.h"
 #include "jsmath.h"
 #include "jsnativestack.h"
 #include "jsnum.h"
 #include "json.h"
 #include "jsobj.h"
@@ -85,17 +86,16 @@
 #include "jstypedarray.h"
 #include "jsxml.h"
 
 #include "ds/LifoAlloc.h"
 #include "builtin/MapObject.h"
 #include "builtin/RegExp.h"
 #include "frontend/BytecodeCompiler.h"
 #include "frontend/BytecodeEmitter.h"
-#include "gc/Marking.h"
 #include "gc/Memory.h"
 #include "js/MemoryMetrics.h"
 #include "yarr/BumpPointerAllocator.h"
 #include "vm/MethodGuard.h"
 #include "vm/NumericConversions.h"
 #include "vm/StringBuffer.h"
 #include "vm/Xdr.h"
 
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -110,28 +110,28 @@
 #include "jsapi.h"
 #include "jsarray.h"
 #include "jsatom.h"
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jsiter.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsscope.h"
 #include "jswrapper.h"
 #include "methodjit/MethodJIT.h"
 #include "methodjit/StubCalls.h"
 #include "methodjit/StubCalls-inl.h"
 
-#include "gc/Marking.h"
 #include "vm/ArgumentsObject.h"
 #include "vm/MethodGuard.h"
 #include "vm/NumericConversions.h"
 #include "vm/StringBuffer.h"
 
 #include "ds/Sort.h"
 
 #include "jsarrayinlines.h"
--- a/js/src/jsatom.cpp
+++ b/js/src/jsatom.cpp
@@ -49,24 +49,24 @@
 #include "jstypes.h"
 #include "jsutil.h"
 #include "jshash.h"
 #include "jsprf.h"
 #include "jsapi.h"
 #include "jsatom.h"
 #include "jscntxt.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsstr.h"
 #include "jsversion.h"
 #include "jsxml.h"
 
 #include "frontend/Parser.h"
-#include "gc/Marking.h"
 
 #include "jsstrinlines.h"
 #include "jsatominlines.h"
 #include "jsobjinlines.h"
 
 #include "vm/String-inl.h"
 #include "vm/Xdr.h"
 
--- a/js/src/jsatom.h
+++ b/js/src/jsatom.h
@@ -46,17 +46,16 @@
 #include "jsapi.h"
 #include "jsprvtd.h"
 #include "jshash.h"
 #include "jspubtd.h"
 #include "jslock.h"
 
 #include "gc/Barrier.h"
 #include "js/HashTable.h"
-#include "vm/String.h"
 
 struct JSIdArray {
     int length;
     js::HeapId vector[1];    /* actually, length jsid words */
 };
 
 /* Engine-internal extensions of jsid */
 
new file mode 100644
--- /dev/null
+++ b/js/src/jscell.h
@@ -0,0 +1,119 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is SpiderMonkey code.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 2010
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Gregor Wagner <anygregor@gmail.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jscell_h___
+#define jscell_h___
+
+#include "jspubtd.h"
+
+struct JSCompartment;
+
+namespace js {
+namespace gc {
+
+struct ArenaHeader;
+struct Chunk;
+
+/* The GC allocation kinds. */
+enum AllocKind {
+    FINALIZE_OBJECT0,
+    FINALIZE_OBJECT0_BACKGROUND,
+    FINALIZE_OBJECT2,
+    FINALIZE_OBJECT2_BACKGROUND,
+    FINALIZE_OBJECT4,
+    FINALIZE_OBJECT4_BACKGROUND,
+    FINALIZE_OBJECT8,
+    FINALIZE_OBJECT8_BACKGROUND,
+    FINALIZE_OBJECT12,
+    FINALIZE_OBJECT12_BACKGROUND,
+    FINALIZE_OBJECT16,
+    FINALIZE_OBJECT16_BACKGROUND,
+    FINALIZE_OBJECT_LAST = FINALIZE_OBJECT16_BACKGROUND,
+    FINALIZE_SCRIPT,
+    FINALIZE_SHAPE,
+    FINALIZE_BASE_SHAPE,
+    FINALIZE_TYPE_OBJECT,
+#if JS_HAS_XML_SUPPORT
+    FINALIZE_XML,
+#endif
+    FINALIZE_SHORT_STRING,
+    FINALIZE_STRING,
+    FINALIZE_EXTERNAL_STRING,
+    FINALIZE_LAST = FINALIZE_EXTERNAL_STRING
+};
+
+static const unsigned FINALIZE_LIMIT = FINALIZE_LAST + 1;
+static const unsigned FINALIZE_OBJECT_LIMIT = FINALIZE_OBJECT_LAST + 1;
+
+/*
+ * Live objects are marked black. How many other additional colors are available
+ * depends on the size of the GCThing. Objects marked gray are eligible for
+ * cycle collection.
+ */
+static const uint32_t BLACK = 0;
+static const uint32_t GRAY = 1;
+
+/*
+ * A GC cell is the base class for all GC things.
+ */
+struct Cell {
+    static const size_t CellShift = 3;
+    static const size_t CellSize = size_t(1) << CellShift;
+    static const size_t CellMask = CellSize - 1;
+
+    inline uintptr_t address() const;
+    inline ArenaHeader *arenaHeader() const;
+    inline Chunk *chunk() const;
+    inline AllocKind getAllocKind() const;
+
+    JS_ALWAYS_INLINE bool isMarked(uint32_t color = BLACK) const;
+    JS_ALWAYS_INLINE bool markIfUnmarked(uint32_t color = BLACK) const;
+    JS_ALWAYS_INLINE void unmark(uint32_t color) const;
+
+    inline JSCompartment *compartment() const;
+
+#ifdef DEBUG
+    inline bool isAligned() const;
+#endif
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* jscell_h___ */
--- a/js/src/jscntxt.cpp
+++ b/js/src/jscntxt.cpp
@@ -60,32 +60,32 @@
 #include "jsprf.h"
 #include "jsatom.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsdbgapi.h"
 #include "jsexn.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsiter.h"
 #include "jslock.h"
 #include "jsmath.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsopcode.h"
 #include "jspubtd.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jsstr.h"
 
 #ifdef JS_METHODJIT
 # include "assembler/assembler/MacroAssembler.h"
 # include "methodjit/MethodJIT.h"
 #endif
-#include "gc/Marking.h"
 #include "frontend/TokenStream.h"
 #include "frontend/ParseMaps.h"
 #include "yarr/BumpPointerAllocator.h"
 
 #include "jsatominlines.h"
 #include "jscntxtinlines.h"
 #include "jscompartment.h"
 #include "jsobjinlines.h"
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -36,25 +36,25 @@
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "jscntxt.h"
 #include "jscompartment.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsiter.h"
 #include "jsmath.h"
 #include "jsproxy.h"
 #include "jsscope.h"
 #include "jswatchpoint.h"
 #include "jswrapper.h"
 
 #include "assembler/wtf/Platform.h"
-#include "gc/Marking.h"
 #include "js/MemoryMetrics.h"
 #include "methodjit/MethodJIT.h"
 #include "methodjit/PolyIC.h"
 #include "methodjit/MonoIC.h"
 #include "vm/Debugger.h"
 #include "yarr/BumpPointerAllocator.h"
 
 #include "jsgcinlines.h"
--- a/js/src/jsdbgapi.cpp
+++ b/js/src/jsdbgapi.cpp
@@ -49,27 +49,27 @@
 #include "jsutil.h"
 #include "jsclist.h"
 #include "jsapi.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsdbgapi.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jslock.h"
 #include "jsobj.h"
 #include "jsopcode.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jsstr.h"
 #include "jswatchpoint.h"
 #include "jswrapper.h"
 
-#include "gc/Marking.h"
 #include "frontend/BytecodeEmitter.h"
 #include "frontend/Parser.h"
 #include "vm/Debugger.h"
 
 #include "jsatominlines.h"
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 #include "jsinterpinlines.h"
--- a/js/src/jsexn.cpp
+++ b/js/src/jsexn.cpp
@@ -50,25 +50,25 @@
 #include "jsutil.h"
 #include "jsprf.h"
 #include "jsapi.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsexn.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsopcode.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jswrapper.h"
 
-#include "gc/Marking.h"
 #include "vm/GlobalObject.h"
 #include "vm/StringBuffer.h"
 
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 
 #include "vm/Stack-inl.h"
 #include "vm/String-inl.h"
--- a/js/src/jsfun.cpp
+++ b/js/src/jsfun.cpp
@@ -50,31 +50,31 @@
 #include "jsapi.h"
 #include "jsarray.h"
 #include "jsatom.h"
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsexn.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsopcode.h"
 #include "jspropertytree.h"
 #include "jsproxy.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jsstr.h"
 
 #include "frontend/BytecodeCompiler.h"
 #include "frontend/BytecodeEmitter.h"
 #include "frontend/TokenStream.h"
-#include "gc/Marking.h"
 #include "vm/Debugger.h"
 #include "vm/MethodGuard.h"
 #include "vm/ScopeObject.h"
 #include "vm/Xdr.h"
 
 #if JS_HAS_GENERATORS
 # include "jsiter.h"
 #endif
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -86,33 +86,33 @@
 #include "jscrashreport.h"
 #include "jscrashformat.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsdbgapi.h"
 #include "jsexn.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jsiter.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsprobes.h"
 #include "jsproxy.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jswatchpoint.h"
 #include "jsweakmap.h"
 #if JS_HAS_XML_SUPPORT
 #include "jsxml.h"
 #endif
 
 #include "frontend/Parser.h"
-#include "gc/Marking.h"
 #include "gc/Memory.h"
 #include "methodjit/MethodJIT.h"
 #include "vm/Debugger.h"
 #include "vm/String.h"
 
 #include "jsinterpinlines.h"
 #include "jsobjinlines.h"
 
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -49,19 +49,19 @@
 
 #include "jsalloc.h"
 #include "jstypes.h"
 #include "jsprvtd.h"
 #include "jspubtd.h"
 #include "jslock.h"
 #include "jsutil.h"
 #include "jsversion.h"
+#include "jscell.h"
 
 #include "ds/BitArray.h"
-#include "gc/Heap.h"
 #include "gc/Statistics.h"
 #include "js/HashTable.h"
 #include "js/Vector.h"
 #include "js/TemplateLib.h"
 
 struct JSCompartment;
 
 extern void
@@ -82,16 +82,747 @@ namespace gc {
 
 enum State {
     NO_INCREMENTAL,
     MARK_ROOTS,
     MARK,
     INVALID
 };
 
+struct Arena;
+
+/*
+ * This must be an upper bound, but we do not need the least upper bound, so
+ * we just exclude non-background objects.
+ */
+const size_t MAX_BACKGROUND_FINALIZE_KINDS = FINALIZE_LIMIT - FINALIZE_OBJECT_LIMIT / 2;
+
+/*
+ * Page size is 4096 by default, except for SPARC, where it is 8192.
+ * Note: Do not use JS_CPU_SPARC here, this header is used outside JS.
+ * Bug 692267: Move page size definition to gc/Memory.h and include it
+ *             directly once jsgc.h is no longer an installed header.
+ */
+#if defined(SOLARIS) && (defined(__sparc) || defined(__sparcv9))
+const size_t PageShift = 13;
+#else
+const size_t PageShift = 12;
+#endif
+const size_t PageSize = size_t(1) << PageShift;
+
+const size_t ChunkShift = 20;
+const size_t ChunkSize = size_t(1) << ChunkShift;
+const size_t ChunkMask = ChunkSize - 1;
+
+const size_t ArenaShift = PageShift;
+const size_t ArenaSize = PageSize;
+const size_t ArenaMask = ArenaSize - 1;
+
+/*
+ * This is the maximum number of arenas we allow in the FreeCommitted state
+ * before we trigger a GC_SHRINK to release free arenas to the OS.
+ */
+const static uint32_t FreeCommittedArenasThreshold = (32 << 20) / ArenaSize;
+
+/*
+ * The mark bitmap has one bit per each GC cell. For multi-cell GC things this
+ * wastes space but allows to avoid expensive devisions by thing's size when
+ * accessing the bitmap. In addition this allows to use some bits for colored
+ * marking during the cycle GC.
+ */
+const size_t ArenaCellCount = size_t(1) << (ArenaShift - Cell::CellShift);
+const size_t ArenaBitmapBits = ArenaCellCount;
+const size_t ArenaBitmapBytes = ArenaBitmapBits / 8;
+const size_t ArenaBitmapWords = ArenaBitmapBits / JS_BITS_PER_WORD;
+
+/*
+ * A FreeSpan represents a contiguous sequence of free cells in an Arena.
+ * |first| is the address of the first free cell in the span. |last| is the
+ * address of the last free cell in the span. This last cell holds a FreeSpan
+ * data structure for the next span unless this is the last span on the list
+ * of spans in the arena. For this last span |last| points to the last byte of
+ * the last thing in the arena and no linkage is stored there, so
+ * |last| == arenaStart + ArenaSize - 1. If the space at the arena end is
+ * fully used this last span is empty and |first| == |last + 1|.
+ *
+ * Thus |first| < |last| implies that we have either the last span with at least
+ * one element or that the span is not the last and contains at least 2
+ * elements. In both cases to allocate a thing from this span we need simply
+ * to increment |first| by the allocation size.
+ *
+ * |first| == |last| implies that we have a one element span that records the
+ * next span. So to allocate from it we need to update the span list head
+ * with a copy of the span stored at |last| address so the following
+ * allocations will use that span.
+ *
+ * |first| > |last| implies that we have an empty last span and the arena is
+ * fully used.
+ *
+ * Also only for the last span (|last| & 1)! = 0 as all allocation sizes are
+ * multiples of Cell::CellSize.
+ */
+struct FreeSpan {
+    uintptr_t   first;
+    uintptr_t   last;
+
+  public:
+    FreeSpan() {}
+
+    FreeSpan(uintptr_t first, uintptr_t last)
+      : first(first), last(last) {
+        checkSpan();
+    }
+
+    /*
+     * To minimize the size of the arena header the first span is encoded
+     * there as offsets from the arena start.
+     */
+    static size_t encodeOffsets(size_t firstOffset, size_t lastOffset) {
+        /* Check that we can pack the offsets into uint16. */
+        JS_STATIC_ASSERT(ArenaShift < 16);
+        JS_ASSERT(firstOffset <= ArenaSize);
+        JS_ASSERT(lastOffset < ArenaSize);
+        JS_ASSERT(firstOffset <= ((lastOffset + 1) & ~size_t(1)));
+        return firstOffset | (lastOffset << 16);
+    }
+
+    /*
+     * Encoded offsets for a full arena when its first span is the last one
+     * and empty.
+     */
+    static const size_t FullArenaOffsets = ArenaSize | ((ArenaSize - 1) << 16);
+
+    static FreeSpan decodeOffsets(uintptr_t arenaAddr, size_t offsets) {
+        JS_ASSERT(!(arenaAddr & ArenaMask));
+
+        size_t firstOffset = offsets & 0xFFFF;
+        size_t lastOffset = offsets >> 16;
+        JS_ASSERT(firstOffset <= ArenaSize);
+        JS_ASSERT(lastOffset < ArenaSize);
+
+        /*
+         * We must not use | when calculating first as firstOffset is
+         * ArenaMask + 1 for the empty span.
+         */
+        return FreeSpan(arenaAddr + firstOffset, arenaAddr | lastOffset);
+    }
+
+    void initAsEmpty(uintptr_t arenaAddr = 0) {
+        JS_ASSERT(!(arenaAddr & ArenaMask));
+        first = arenaAddr + ArenaSize;
+        last = arenaAddr | (ArenaSize  - 1);
+        JS_ASSERT(isEmpty());
+    }
+
+    bool isEmpty() const {
+        checkSpan();
+        return first > last;
+    }
+
+    bool hasNext() const {
+        checkSpan();
+        return !(last & uintptr_t(1));
+    }
+
+    const FreeSpan *nextSpan() const {
+        JS_ASSERT(hasNext());
+        return reinterpret_cast<FreeSpan *>(last);
+    }
+
+    FreeSpan *nextSpanUnchecked(size_t thingSize) const {
+#ifdef DEBUG
+        uintptr_t lastOffset = last & ArenaMask;
+        JS_ASSERT(!(lastOffset & 1));
+        JS_ASSERT((ArenaSize - lastOffset) % thingSize == 0);
+#endif
+        return reinterpret_cast<FreeSpan *>(last);
+    }
+
+    uintptr_t arenaAddressUnchecked() const {
+        return last & ~ArenaMask;
+    }
+
+    uintptr_t arenaAddress() const {
+        checkSpan();
+        return arenaAddressUnchecked();
+    }
+
+    ArenaHeader *arenaHeader() const {
+        return reinterpret_cast<ArenaHeader *>(arenaAddress());
+    }
+
+    bool isSameNonEmptySpan(const FreeSpan *another) const {
+        JS_ASSERT(!isEmpty());
+        JS_ASSERT(!another->isEmpty());
+        return first == another->first && last == another->last;
+    }
+
+    bool isWithinArena(uintptr_t arenaAddr) const {
+        JS_ASSERT(!(arenaAddr & ArenaMask));
+
+        /* Return true for the last empty span as well. */
+        return arenaAddress() == arenaAddr;
+    }
+
+    size_t encodeAsOffsets() const {
+        /*
+         * We must use first - arenaAddress(), not first & ArenaMask as
+         * first == ArenaMask + 1 for an empty span.
+         */
+        uintptr_t arenaAddr = arenaAddress();
+        return encodeOffsets(first - arenaAddr, last & ArenaMask);
+    }
+
+    /* See comments before FreeSpan for details. */
+    JS_ALWAYS_INLINE void *allocate(size_t thingSize) {
+        JS_ASSERT(thingSize % Cell::CellSize == 0);
+        checkSpan();
+        uintptr_t thing = first;
+        if (thing < last) {
+            /* Bump-allocate from the current span. */
+            first = thing + thingSize;
+        } else if (JS_LIKELY(thing == last)) {
+            /*
+             * Move to the next span. We use JS_LIKELY as without PGO
+             * compilers mis-predict == here as unlikely to succeed.
+             */
+            *this = *reinterpret_cast<FreeSpan *>(thing);
+        } else {
+            return NULL;
+        }
+        checkSpan();
+        return reinterpret_cast<void *>(thing);
+    }
+
+    /* A version of allocate when we know that the span is not empty. */
+    JS_ALWAYS_INLINE void *infallibleAllocate(size_t thingSize) {
+        JS_ASSERT(thingSize % Cell::CellSize == 0);
+        checkSpan();
+        uintptr_t thing = first;
+        if (thing < last) {
+            first = thing + thingSize;
+        } else {
+            JS_ASSERT(thing == last);
+            *this = *reinterpret_cast<FreeSpan *>(thing);
+        }
+        checkSpan();
+        return reinterpret_cast<void *>(thing);
+    }
+
+    /*
+     * Allocate from a newly allocated arena. We do not move the free list
+     * from the arena. Rather we set the arena up as fully used during the
+     * initialization so to allocate we simply return the first thing in the
+     * arena and set the free list to point to the second.
+     */
+    JS_ALWAYS_INLINE void *allocateFromNewArena(uintptr_t arenaAddr, size_t firstThingOffset,
+                                                size_t thingSize) {
+        JS_ASSERT(!(arenaAddr & ArenaMask));
+        uintptr_t thing = arenaAddr | firstThingOffset;
+        first = thing + thingSize;
+        last = arenaAddr | ArenaMask;
+        checkSpan();
+        return reinterpret_cast<void *>(thing);
+    }
+
+    void checkSpan() const {
+#ifdef DEBUG
+        /* We do not allow spans at the end of the address space. */
+        JS_ASSERT(last != uintptr_t(-1));
+        JS_ASSERT(first);
+        JS_ASSERT(last);
+        JS_ASSERT(first - 1 <= last);
+        uintptr_t arenaAddr = arenaAddressUnchecked();
+        if (last & 1) {
+            /* The span is the last. */
+            JS_ASSERT((last & ArenaMask) == ArenaMask);
+
+            if (first - 1 == last) {
+                /* The span is last and empty. The above start != 0 check
+                 * implies that we are not at the end of the address space.
+                 */
+                return;
+            }
+            size_t spanLength = last - first + 1;
+            JS_ASSERT(spanLength % Cell::CellSize == 0);
+
+            /* Start and end must belong to the same arena. */
+            JS_ASSERT((first & ~ArenaMask) == arenaAddr);
+            return;
+        }
+
+        /* The span is not the last and we have more spans to follow. */
+        JS_ASSERT(first <= last);
+        size_t spanLengthWithoutOneThing = last - first;
+        JS_ASSERT(spanLengthWithoutOneThing % Cell::CellSize == 0);
+
+        JS_ASSERT((first & ~ArenaMask) == arenaAddr);
+
+        /*
+         * If there is not enough space before the arena end to allocate one
+         * more thing, then the span must be marked as the last one to avoid
+         * storing useless empty span reference.
+         */
+        size_t beforeTail = ArenaSize - (last & ArenaMask);
+        JS_ASSERT(beforeTail >= sizeof(FreeSpan) + Cell::CellSize);
+
+        FreeSpan *next = reinterpret_cast<FreeSpan *>(last);
+
+        /*
+         * The GC things on the list of free spans come from one arena
+         * and the spans are linked in ascending address order with
+         * at least one non-free thing between spans.
+         */
+        JS_ASSERT(last < next->first);
+        JS_ASSERT(arenaAddr == next->arenaAddressUnchecked());
+
+        if (next->first > next->last) {
+            /*
+             * The next span is the empty span that terminates the list for
+             * arenas that do not have any free things at the end.
+             */
+            JS_ASSERT(next->first - 1 == next->last);
+            JS_ASSERT(arenaAddr + ArenaSize == next->first);
+        }
+#endif
+    }
+
+};
+
+/* Every arena has a header. */
+struct ArenaHeader {
+    friend struct FreeLists;
+
+    JSCompartment   *compartment;
+
+    /*
+     * ArenaHeader::next has two purposes: when unallocated, it points to the
+     * next available Arena's header. When allocated, it points to the next
+     * arena of the same size class and compartment.
+     */
+    ArenaHeader     *next;
+
+  private:
+    /*
+     * The first span of free things in the arena. We encode it as the start
+     * and end offsets within the arena, not as FreeSpan structure, to
+     * minimize the header size.
+     */
+    size_t          firstFreeSpanOffsets;
+
+    /*
+     * One of AllocKind constants or FINALIZE_LIMIT when the arena does not
+     * contain any GC things and is on the list of empty arenas in the GC
+     * chunk. The latter allows to quickly check if the arena is allocated
+     * during the conservative GC scanning without searching the arena in the
+     * list.
+     */
+    size_t       allocKind          : 8;
+
+    /*
+     * When recursive marking uses too much stack the marking is delayed and
+     * the corresponding arenas are put into a stack using the following field
+     * as a linkage. To distinguish the bottom of the stack from the arenas
+     * not present in the stack we use an extra flag to tag arenas on the
+     * stack.
+     *
+     * Delayed marking is also used for arenas that we allocate into during an
+     * incremental GC. In this case, we intend to mark all the objects in the
+     * arena, and it's faster to do this marking in bulk.
+     *
+     * To minimize the ArenaHeader size we record the next delayed marking
+     * linkage as arenaAddress() >> ArenaShift and pack it with the allocKind
+     * field and hasDelayedMarking flag. We use 8 bits for the allocKind, not
+     * ArenaShift - 1, so the compiler can use byte-level memory instructions
+     * to access it.
+     */
+  public:
+    size_t       hasDelayedMarking  : 1;
+    size_t       allocatedDuringIncremental : 1;
+    size_t       markOverflow : 1;
+    size_t       nextDelayedMarking : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
+
+    static void staticAsserts() {
+        /* We must be able to fit the allockind into uint8_t. */
+        JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
+
+        /*
+         * nextDelayedMarkingpacking assumes that ArenaShift has enough bits
+         * to cover allocKind and hasDelayedMarking.
+         */
+        JS_STATIC_ASSERT(ArenaShift >= 8 + 1 + 1 + 1);
+    }
+
+    inline uintptr_t address() const;
+    inline Chunk *chunk() const;
+
+    bool allocated() const {
+        JS_ASSERT(allocKind <= size_t(FINALIZE_LIMIT));
+        return allocKind < size_t(FINALIZE_LIMIT);
+    }
+
+    void init(JSCompartment *comp, AllocKind kind) {
+        JS_ASSERT(!allocated());
+        JS_ASSERT(!markOverflow);
+        JS_ASSERT(!allocatedDuringIncremental);
+        JS_ASSERT(!hasDelayedMarking);
+        compartment = comp;
+
+        JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
+        allocKind = size_t(kind);
+
+        /* See comments in FreeSpan::allocateFromNewArena. */
+        firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
+    }
+
+    void setAsNotAllocated() {
+        allocKind = size_t(FINALIZE_LIMIT);
+        markOverflow = 0;
+        allocatedDuringIncremental = 0;
+        hasDelayedMarking = 0;
+        nextDelayedMarking = 0;
+    }
+
+    uintptr_t arenaAddress() const {
+        return address();
+    }
+
+    Arena *getArena() {
+        return reinterpret_cast<Arena *>(arenaAddress());
+    }
+
+    AllocKind getAllocKind() const {
+        JS_ASSERT(allocated());
+        return AllocKind(allocKind);
+    }
+
+    inline size_t getThingSize() const;
+
+    bool hasFreeThings() const {
+        return firstFreeSpanOffsets != FreeSpan::FullArenaOffsets;
+    }
+
+    inline bool isEmpty() const;
+
+    void setAsFullyUsed() {
+        firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
+    }
+
+    FreeSpan getFirstFreeSpan() const {
+#ifdef DEBUG
+        checkSynchronizedWithFreeList();
+#endif
+        return FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets);
+    }
+
+    void setFirstFreeSpan(const FreeSpan *span) {
+        JS_ASSERT(span->isWithinArena(arenaAddress()));
+        firstFreeSpanOffsets = span->encodeAsOffsets();
+    }
+
+#ifdef DEBUG
+    void checkSynchronizedWithFreeList() const;
+#endif
+
+    inline ArenaHeader *getNextDelayedMarking() const;
+    inline void setNextDelayedMarking(ArenaHeader *aheader);
+};
+
+struct Arena {
+    /*
+     * Layout of an arena:
+     * An arena is 4K in size and 4K-aligned. It starts with the ArenaHeader
+     * descriptor followed by some pad bytes. The remainder of the arena is
+     * filled with the array of T things. The pad bytes ensure that the thing
+     * array ends exactly at the end of the arena.
+     *
+     * +-------------+-----+----+----+-----+----+
+     * | ArenaHeader | pad | T0 | T1 | ... | Tn |
+     * +-------------+-----+----+----+-----+----+
+     *
+     * <----------------------------------------> = ArenaSize bytes
+     * <-------------------> = first thing offset
+     */
+    ArenaHeader aheader;
+    uint8_t     data[ArenaSize - sizeof(ArenaHeader)];
+
+  private:
+    static JS_FRIEND_DATA(const uint32_t) ThingSizes[];
+    static JS_FRIEND_DATA(const uint32_t) FirstThingOffsets[];
+
+  public:
+    static void staticAsserts();
+
+    static size_t thingSize(AllocKind kind) {
+        return ThingSizes[kind];
+    }
+
+    static size_t firstThingOffset(AllocKind kind) {
+        return FirstThingOffsets[kind];
+    }
+
+    static size_t thingsPerArena(size_t thingSize) {
+        JS_ASSERT(thingSize % Cell::CellSize == 0);
+
+        /* We should be able to fit FreeSpan in any GC thing. */
+        JS_ASSERT(thingSize >= sizeof(FreeSpan));
+
+        return (ArenaSize - sizeof(ArenaHeader)) / thingSize;
+    }
+
+    static size_t thingsSpan(size_t thingSize) {
+        return thingsPerArena(thingSize) * thingSize;
+    }
+
+    static bool isAligned(uintptr_t thing, size_t thingSize) {
+        /* Things ends at the arena end. */
+        uintptr_t tailOffset = (ArenaSize - thing) & ArenaMask;
+        return tailOffset % thingSize == 0;
+    }
+
+    uintptr_t address() const {
+        return aheader.address();
+    }
+
+    uintptr_t thingsStart(AllocKind thingKind) {
+        return address() | firstThingOffset(thingKind);
+    }
+
+    uintptr_t thingsEnd() {
+        return address() + ArenaSize;
+    }
+
+    template <typename T>
+    bool finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize);
+};
+
+/* The chunk header (located at the end of the chunk to preserve arena alignment). */
+struct ChunkInfo {
+    Chunk           *next;
+    Chunk           **prevp;
+
+    /* Free arenas are linked together with aheader.next. */
+    ArenaHeader     *freeArenasHead;
+
+    /*
+     * Decommitted arenas are tracked by a bitmap in the chunk header. We use
+     * this offset to start our search iteration close to a decommitted arena
+     * that we can allocate.
+     */
+    uint32_t        lastDecommittedArenaOffset;
+
+    /* Number of free arenas, either committed or decommitted. */
+    uint32_t        numArenasFree;
+
+    /* Number of free, committed arenas. */
+    uint32_t        numArenasFreeCommitted;
+
+    /* Number of GC cycles this chunk has survived. */
+    uint32_t        age;
+};
+
+/*
+ * Calculating ArenasPerChunk:
+ *
+ * In order to figure out how many Arenas will fit in a chunk, we need to know
+ * how much extra space is available after we allocate the header data. This
+ * is a problem because the header size depends on the number of arenas in the
+ * chunk. The two dependent fields are bitmap and decommittedArenas.
+ *
+ * For the mark bitmap, we know that each arena will use a fixed number of full
+ * bytes: ArenaBitmapBytes. The full size of the header data is this number
+ * multiplied by the eventual number of arenas we have in the header. We,
+ * conceptually, distribute this header data among the individual arenas and do
+ * not include it in the header. This way we do not have to worry about its
+ * variable size: it gets attached to the variable number we are computing.
+ *
+ * For the decommitted arena bitmap, we only have 1 bit per arena, so this
+ * technique will not work. Instead, we observe that we do not have enough
+ * header info to fill 8 full arenas: it is currently 4 on 64bit, less on
+ * 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas.
+ * This will not become 63 bytes unless we double the data required in the
+ * header. Therefore, we just compute the number of bytes required to track
+ * every possible arena and do not worry about slop bits, since there are too
+ * few to usefully allocate.
+ *
+ * To actually compute the number of arenas we can allocate in a chunk, we
+ * divide the amount of available space less the header info (not including
+ * the mark bitmap which is distributed into the arena size) by the size of
+ * the arena (with the mark bitmap bytes it uses).
+ */
+const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
+const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
+const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
+const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
+
+/* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
+struct ChunkBitmap {
+    uintptr_t bitmap[ArenaBitmapWords * ArenasPerChunk];
+
+    JS_ALWAYS_INLINE void getMarkWordAndMask(const Cell *cell, uint32_t color,
+                                             uintptr_t **wordp, uintptr_t *maskp);
+
+    JS_ALWAYS_INLINE bool isMarked(const Cell *cell, uint32_t color) {
+        uintptr_t *word, mask;
+        getMarkWordAndMask(cell, color, &word, &mask);
+        return *word & mask;
+    }
+
+    JS_ALWAYS_INLINE bool markIfUnmarked(const Cell *cell, uint32_t color) {
+        uintptr_t *word, mask;
+        getMarkWordAndMask(cell, BLACK, &word, &mask);
+        if (*word & mask)
+            return false;
+        *word |= mask;
+        if (color != BLACK) {
+            /*
+             * We use getMarkWordAndMask to recalculate both mask and word as
+             * doing just mask << color may overflow the mask.
+             */
+            getMarkWordAndMask(cell, color, &word, &mask);
+            if (*word & mask)
+                return false;
+            *word |= mask;
+        }
+        return true;
+    }
+
+    JS_ALWAYS_INLINE void unmark(const Cell *cell, uint32_t color) {
+        uintptr_t *word, mask;
+        getMarkWordAndMask(cell, color, &word, &mask);
+        *word &= ~mask;
+    }
+
+    void clear() {
+        PodArrayZero(bitmap);
+    }
+
+#ifdef DEBUG
+    bool noBitsSet(ArenaHeader *aheader) {
+        /*
+         * We assume that the part of the bitmap corresponding to the arena
+         * has the exact number of words so we do not need to deal with a word
+         * that covers bits from two arenas.
+         */
+        JS_STATIC_ASSERT(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD);
+
+        uintptr_t *word, unused;
+        getMarkWordAndMask(reinterpret_cast<Cell *>(aheader->address()), BLACK, &word, &unused);
+        for (size_t i = 0; i != ArenaBitmapWords; i++) {
+            if (word[i])
+                return false;
+        }
+        return true;
+    }
+#endif
+};
+
+JS_STATIC_ASSERT(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap));
+
+typedef BitArray<ArenasPerChunk> PerArenaBitmap;
+
+const size_t ChunkPadSize = ChunkSize
+                            - (sizeof(Arena) * ArenasPerChunk)
+                            - sizeof(ChunkBitmap)
+                            - sizeof(PerArenaBitmap)
+                            - sizeof(ChunkInfo);
+JS_STATIC_ASSERT(ChunkPadSize < BytesPerArenaWithHeader);
+
+/*
+ * Chunks contain arenas and associated data structures (mark bitmap, delayed
+ * marking state).
+ */
+struct Chunk {
+    Arena           arenas[ArenasPerChunk];
+
+    /* Pad to full size to ensure cache alignment of ChunkInfo. */
+    uint8_t         padding[ChunkPadSize];
+
+    ChunkBitmap     bitmap;
+    PerArenaBitmap  decommittedArenas;
+    ChunkInfo       info;
+
+    static Chunk *fromAddress(uintptr_t addr) {
+        addr &= ~ChunkMask;
+        return reinterpret_cast<Chunk *>(addr);
+    }
+
+    static bool withinArenasRange(uintptr_t addr) {
+        uintptr_t offset = addr & ChunkMask;
+        return offset < ArenasPerChunk * ArenaSize;
+    }
+
+    static size_t arenaIndex(uintptr_t addr) {
+        JS_ASSERT(withinArenasRange(addr));
+        return (addr & ChunkMask) >> ArenaShift;
+    }
+
+    uintptr_t address() const {
+        uintptr_t addr = reinterpret_cast<uintptr_t>(this);
+        JS_ASSERT(!(addr & ChunkMask));
+        return addr;
+    }
+
+    bool unused() const {
+        return info.numArenasFree == ArenasPerChunk;
+    }
+
+    bool hasAvailableArenas() const {
+        return info.numArenasFree != 0;
+    }
+
+    inline void addToAvailableList(JSCompartment *compartment);
+    inline void insertToAvailableList(Chunk **insertPoint);
+    inline void removeFromAvailableList();
+
+    ArenaHeader *allocateArena(JSCompartment *comp, AllocKind kind);
+
+    void releaseArena(ArenaHeader *aheader);
+
+    static Chunk *allocate(JSRuntime *rt);
+
+    /* Must be called with the GC lock taken. */
+    static inline void release(JSRuntime *rt, Chunk *chunk);
+    static inline void releaseList(JSRuntime *rt, Chunk *chunkListHead);
+
+    /* Must be called with the GC lock taken. */
+    inline void prepareToBeFreed(JSRuntime *rt);
+
+    /*
+     * Assuming that the info.prevp points to the next field of the previous
+     * chunk in a doubly-linked list, get that chunk.
+     */
+    Chunk *getPrevious() {
+        JS_ASSERT(info.prevp);
+        return fromPointerToNext(info.prevp);
+    }
+
+    /* Get the chunk from a pointer to its info.next field. */
+    static Chunk *fromPointerToNext(Chunk **nextFieldPtr) {
+        uintptr_t addr = reinterpret_cast<uintptr_t>(nextFieldPtr);
+        JS_ASSERT((addr & ChunkMask) == offsetof(Chunk, info.next));
+        return reinterpret_cast<Chunk *>(addr - offsetof(Chunk, info.next));
+    }
+
+  private:
+    inline void init();
+
+    /* Search for a decommitted arena to allocate. */
+    unsigned findDecommittedArenaOffset();
+    ArenaHeader* fetchNextDecommittedArena();
+
+  public:
+    /* Unlink and return the freeArenasHead. */
+    inline ArenaHeader* fetchNextFreeArena(JSRuntime *rt);
+
+    inline void addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader);
+};
+
+JS_STATIC_ASSERT(sizeof(Chunk) == ChunkSize);
+
 class ChunkPool {
     Chunk   *emptyChunkListHead;
     size_t  emptyCount;
 
   public:
     ChunkPool()
       : emptyChunkListHead(NULL),
         emptyCount(0) { }
@@ -116,16 +847,148 @@ class ChunkPool {
 
     /* Must be called with the GC lock taken. */
     void expireAndFree(JSRuntime *rt, bool releaseAll);
 
     /* Must be called either during the GC or with the GC lock taken. */
     JS_FRIEND_API(int64_t) countCleanDecommittedArenas(JSRuntime *rt);
 };
 
+inline uintptr_t
+Cell::address() const
+{
+    uintptr_t addr = uintptr_t(this);
+    JS_ASSERT(addr % Cell::CellSize == 0);
+    JS_ASSERT(Chunk::withinArenasRange(addr));
+    return addr;
+}
+
+inline ArenaHeader *
+Cell::arenaHeader() const
+{
+    uintptr_t addr = address();
+    addr &= ~ArenaMask;
+    return reinterpret_cast<ArenaHeader *>(addr);
+}
+
+Chunk *
+Cell::chunk() const
+{
+    uintptr_t addr = uintptr_t(this);
+    JS_ASSERT(addr % Cell::CellSize == 0);
+    addr &= ~(ChunkSize - 1);
+    return reinterpret_cast<Chunk *>(addr);
+}
+
+AllocKind
+Cell::getAllocKind() const
+{
+    return arenaHeader()->getAllocKind();
+}
+
+#ifdef DEBUG
+inline bool
+Cell::isAligned() const
+{
+    return Arena::isAligned(address(), arenaHeader()->getThingSize());
+}
+#endif
+
+inline uintptr_t
+ArenaHeader::address() const
+{
+    uintptr_t addr = reinterpret_cast<uintptr_t>(this);
+    JS_ASSERT(!(addr & ArenaMask));
+    JS_ASSERT(Chunk::withinArenasRange(addr));
+    return addr;
+}
+
+inline Chunk *
+ArenaHeader::chunk() const
+{
+    return Chunk::fromAddress(address());
+}
+
+inline bool
+ArenaHeader::isEmpty() const
+{
+    /* Arena is empty if its first span covers the whole arena. */
+    JS_ASSERT(allocated());
+    size_t firstThingOffset = Arena::firstThingOffset(getAllocKind());
+    return firstFreeSpanOffsets == FreeSpan::encodeOffsets(firstThingOffset, ArenaMask);
+}
+
+inline size_t
+ArenaHeader::getThingSize() const
+{
+    JS_ASSERT(allocated());
+    return Arena::thingSize(getAllocKind());
+}
+
+inline ArenaHeader *
+ArenaHeader::getNextDelayedMarking() const
+{
+    return &reinterpret_cast<Arena *>(nextDelayedMarking << ArenaShift)->aheader;
+}
+
+inline void
+ArenaHeader::setNextDelayedMarking(ArenaHeader *aheader)
+{
+    JS_ASSERT(!(uintptr_t(aheader) & ArenaMask));
+    hasDelayedMarking = 1;
+    nextDelayedMarking = aheader->arenaAddress() >> ArenaShift;
+}
+
+JS_ALWAYS_INLINE void
+ChunkBitmap::getMarkWordAndMask(const Cell *cell, uint32_t color,
+                                uintptr_t **wordp, uintptr_t *maskp)
+{
+    size_t bit = (cell->address() & ChunkMask) / Cell::CellSize + color;
+    JS_ASSERT(bit < ArenaBitmapBits * ArenasPerChunk);
+    *maskp = uintptr_t(1) << (bit % JS_BITS_PER_WORD);
+    *wordp = &bitmap[bit / JS_BITS_PER_WORD];
+}
+
+static void
+AssertValidColor(const void *thing, uint32_t color)
+{
+#ifdef DEBUG
+    ArenaHeader *aheader = reinterpret_cast<const js::gc::Cell *>(thing)->arenaHeader();
+    JS_ASSERT_IF(color, color < aheader->getThingSize() / Cell::CellSize);
+#endif
+}
+
+inline bool
+Cell::isMarked(uint32_t color) const
+{
+    AssertValidColor(this, color);
+    return chunk()->bitmap.isMarked(this, color);
+}
+
+bool
+Cell::markIfUnmarked(uint32_t color) const
+{
+    AssertValidColor(this, color);
+    return chunk()->bitmap.markIfUnmarked(this, color);
+}
+
+void
+Cell::unmark(uint32_t color) const
+{
+    JS_ASSERT(color != BLACK);
+    AssertValidColor(this, color);
+    chunk()->bitmap.unmark(this, color);
+}
+
+JSCompartment *
+Cell::compartment() const
+{
+    return arenaHeader()->compartment;
+}
+
 static inline JSGCTraceKind
 MapAllocToTraceKind(AllocKind thingKind)
 {
     static const JSGCTraceKind map[FINALIZE_LIMIT] = {
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT0 */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT0_BACKGROUND */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT2 */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT2_BACKGROUND */
rename from js/src/gc/Marking.cpp
rename to js/src/jsgcmark.cpp
--- a/js/src/gc/Marking.cpp
+++ b/js/src/jsgcmark.cpp
@@ -1,42 +1,41 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+#include "jsgcmark.h"
 #include "jsprf.h"
 #include "jsscope.h"
 #include "jsstr.h"
 
-#include "gc/Marking.h"
-#include "methodjit/MethodJIT.h"
-
 #include "jsobjinlines.h"
 #include "jsscopeinlines.h"
 
 #include "vm/String-inl.h"
+#include "methodjit/MethodJIT.h"
 
 /*
  * There are two mostly separate mark paths. The first is a fast path used
  * internally in the GC. The second is a slow path used for root marking and
  * for API consumers like the cycle collector or Class::trace implementations.
  *
  * The fast path uses explicit stacks. The basic marking process during a GC is
  * that all roots are pushed on to a mark stack, and then each item on the
  * stack is scanned (possibly pushing more stuff) until the stack is empty.
  *
  * PushMarkStack pushes a GC thing onto the mark stack. In some cases (shapes
  * or strings) it eagerly marks the object rather than pushing it. Popping and
  * scanning is done by the processMarkStackTop method. For efficiency reasons
  * like tail recursion elimination that method also implements the scanning of
  * objects. For other GC things it uses helper methods.
  *
- * Most of the marking code outside Marking.cpp uses functions like MarkObject,
+ * Most of the marking code outside jsgcmark uses functions like MarkObject,
  * MarkString, etc. These functions check if an object is in the compartment
  * currently being GCed. If it is, they call PushMarkStack. Roots are pushed
  * this way as well as pointers traversed inside trace hooks (for things like
  * IteratorClass). It it always valid to call a MarkX function instead of
  * PushMarkStack, although it may be slower.
  *
  * The MarkX functions also handle non-GC object traversal. In this case, they
  * call a callback for each object visited. This is a recursive process; the
rename from js/src/gc/Marking.h
rename to js/src/jsgcmark.h
--- a/js/src/gc/Marking.h
+++ b/js/src/jsgcmark.h
@@ -1,44 +1,26 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#ifndef gc_marking_h___
-#define gc_marking_h___
+#ifndef jsgcmark_h___
+#define jsgcmark_h___
 
 #include "jsgc.h"
 #include "jscntxt.h"
+#include "jscompartment.h"
 #include "jslock.h"
 
 #include "gc/Barrier.h"
 #include "js/TemplateLib.h"
 
-extern "C" {
-struct JSContext;
-struct JSFunction;
-struct JSObject;
-struct JSScript;
-}
-
-class JSAtom;
-class JSLinearString;
-
 namespace js {
-
-class ArgumentsObject;
-class BaseShape;
-class GlobalObject;
-class UnownedBaseShape;
-struct Shape;
-
-template<class, typename> class HeapPtr;
-
 namespace gc {
 
 /*** Object Marking ***/
 
 /*
  * These functions expose marking functionality for all of the different GC
  * thing kinds. For each GC thing, there are several variants. As an example,
  * these are the variants generated for JSObject. They are listed from most to
@@ -81,18 +63,16 @@ DeclMarker(String, JSAtom)
 DeclMarker(String, JSString)
 DeclMarker(String, JSFlatString)
 DeclMarker(String, JSLinearString)
 DeclMarker(TypeObject, types::TypeObject)
 #if JS_HAS_XML_SUPPORT
 DeclMarker(XML, JSXML)
 #endif
 
-#undef DeclMarker
-
 /*** Externally Typed Marking ***/
 
 /*
  * Note: this must only be called by the GC and only when we are tracing through
  * MarkRoots. It is explicitly for ConservativeStackMarking and should go away
  * after we transition to exact rooting.
  */
 void
@@ -276,9 +256,9 @@ TraceKind(JSScript *script)
 void
 TraceChildren(JSTracer *trc, void *thing, JSGCTraceKind kind);
 
 void
 CallTracer(JSTracer *trc, void *thing, JSGCTraceKind kind);
 
 } /* namespace js */
 
-#endif /* gc_marking_h___ */
+#endif
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -39,28 +39,28 @@
 
 #include "jsapi.h"
 #include "jsautooplen.h"
 #include "jsbool.h"
 #include "jsdate.h"
 #include "jsexn.h"
 #include "jsfriendapi.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinfer.h"
 #include "jsmath.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsscript.h"
 #include "jscntxt.h"
 #include "jsscope.h"
 #include "jsstr.h"
 #include "jsiter.h"
 
 #include "frontend/TokenStream.h"
-#include "gc/Marking.h"
 #include "js/MemoryMetrics.h"
 #include "methodjit/MethodJIT.h"
 #include "methodjit/Retcon.h"
 #ifdef JS_METHODJIT
 # include "assembler/assembler/MacroAssembler.h"
 #endif
 
 #include "jsatominlines.h"
--- a/js/src/jsinfer.h
+++ b/js/src/jsinfer.h
@@ -38,22 +38,22 @@
  * ***** END LICENSE BLOCK ***** */
 
 /* Definitions related to javascript type inference. */
 
 #ifndef jsinfer_h___
 #define jsinfer_h___
 
 #include "jsalloc.h"
+#include "jscell.h"
 #include "jsfriendapi.h"
 #include "jsprvtd.h"
 
 #include "ds/LifoAlloc.h"
 #include "gc/Barrier.h"
-#include "gc/Heap.h"
 #include "js/HashTable.h"
 
 namespace JS {
 struct TypeInferenceSizes;
 }
 
 namespace js {
 namespace types {
--- a/js/src/jsinferinlines.h
+++ b/js/src/jsinferinlines.h
@@ -37,20 +37,19 @@
  *
  * ***** END LICENSE BLOCK ***** */
 
 /* Inline members for javascript type inference. */
 
 #include "jsarray.h"
 #include "jsanalyze.h"
 #include "jscompartment.h"
+#include "jsgcmark.h"
 #include "jsinfer.h"
 #include "jsprf.h"
-
-#include "gc/Marking.h"
 #include "vm/GlobalObject.h"
 
 #include "vm/Stack-inl.h"
 
 #ifndef jsinferinlines_h___
 #define jsinferinlines_h___
 
 /////////////////////////////////////////////////////////////////////
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -55,29 +55,29 @@
 #include "jsatom.h"
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsdate.h"
 #include "jsversion.h"
 #include "jsdbgapi.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jsiter.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsopcode.h"
 #include "jspropertycache.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jsstr.h"
 #include "jslibmath.h"
 
-#include "gc/Marking.h"
 #include "frontend/BytecodeEmitter.h"
 #ifdef JS_METHODJIT
 #include "methodjit/MethodJIT.h"
 #include "methodjit/Logging.h"
 #endif
 #include "vm/Debugger.h"
 
 #include "jsatominlines.h"
--- a/js/src/jsiter.cpp
+++ b/js/src/jsiter.cpp
@@ -49,33 +49,33 @@
 #include "jsarray.h"
 #include "jsatom.h"
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsexn.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jsiter.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsopcode.h"
 #include "jsproxy.h"
 #include "jsscope.h"
 #include "jsscript.h"
 
 #if JS_HAS_XML_SUPPORT
 #include "jsxml.h"
 #endif
 
 #include "ds/Sort.h"
 #include "frontend/TokenStream.h"
-#include "gc/Marking.h"
 #include "vm/GlobalObject.h"
 
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 
 #include "vm/MethodGuard-inl.h"
 #include "vm/Stack-inl.h"
 #include "vm/String-inl.h"
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -53,16 +53,17 @@
 #include "jsapi.h"
 #include "jsarray.h"
 #include "jsatom.h"
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jsiter.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsonparser.h"
 #include "jsopcode.h"
 #include "jsprobes.h"
@@ -75,17 +76,16 @@
 #include "jswatchpoint.h"
 #include "jswrapper.h"
 #include "jsxml.h"
 
 #include "builtin/MapObject.h"
 #include "frontend/BytecodeCompiler.h"
 #include "frontend/BytecodeEmitter.h"
 #include "frontend/Parser.h"
-#include "gc/Marking.h"
 #include "js/MemoryMetrics.h"
 #include "vm/StringBuffer.h"
 #include "vm/Xdr.h"
 
 #include "jsarrayinlines.h"
 #include "jsatominlines.h"
 #include "jsinterpinlines.h"
 #include "jsobjinlines.h"
@@ -4320,20 +4320,20 @@ js_FindClassObject(JSContext *cx, JSObje
 bool
 JSObject::allocSlot(JSContext *cx, uint32_t *slotp)
 {
     uint32_t slot = slotSpan();
     JS_ASSERT(slot >= JSSLOT_FREE(getClass()));
 
     /*
      * If this object is in dictionary mode, try to pull a free slot from the
-     * shape table's slot-number freelist.
+     * property table's slot-number freelist.
      */
     if (inDictionaryMode()) {
-        ShapeTable &table = lastProperty()->table();
+        PropertyTable &table = lastProperty()->table();
         uint32_t last = table.freelist;
         if (last != SHAPE_INVALID_SLOT) {
 #ifdef DEBUG
             JS_ASSERT(last < slot);
             uint32_t next = getSlot(last).toPrivateUint32();
             JS_ASSERT_IF(next != SHAPE_INVALID_SLOT, next < slot);
 #endif
 
@@ -6173,18 +6173,18 @@ JSObject::dump()
     if (obj->isDelegate()) fprintf(stderr, " delegate");
     if (obj->isSystem()) fprintf(stderr, " system");
     if (!obj->isExtensible()) fprintf(stderr, " not_extensible");
     if (obj->isIndexed()) fprintf(stderr, " indexed");
 
     if (obj->isNative()) {
         if (obj->inDictionaryMode())
             fprintf(stderr, " inDictionaryMode");
-        if (obj->hasShapeTable())
-            fprintf(stderr, " hasShapeTable");
+        if (obj->hasPropertyTable())
+            fprintf(stderr, " hasPropertyTable");
     }
     fprintf(stderr, "\n");
 
     if (obj->isDenseArray()) {
         unsigned slots = obj->getDenseArrayInitializedLength();
         fprintf(stderr, "elements\n");
         for (unsigned i = 0; i < slots; i++) {
             fprintf(stderr, " %3d: ", i);
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -53,19 +53,19 @@
 #include "jsatom.h"
 #include "jsclass.h"
 #include "jsfriendapi.h"
 #include "jsinfer.h"
 #include "jshash.h"
 #include "jspubtd.h"
 #include "jsprvtd.h"
 #include "jslock.h"
+#include "jscell.h"
 
 #include "gc/Barrier.h"
-#include "gc/Heap.h"
 
 #include "vm/ObjectImpl.h"
 #include "vm/String.h"
 
 namespace js {
 
 class AutoPropDescArrayRooter;
 class ProxyHandler;
@@ -391,17 +391,17 @@ struct JSObject : public js::ObjectImpl
 
     bool shadowingShapeChange(JSContext *cx, const js::Shape &shape);
 
     /* Whether there may be indexed properties on this object. */
     inline bool isIndexed() const;
 
     inline uint32_t propertyCount() const;
 
-    inline bool hasShapeTable() const;
+    inline bool hasPropertyTable() const;
 
     inline size_t computedSizeOfThisSlotsElements() const;
 
     inline void sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf,
                                     size_t *slotsSize, size_t *elementsSize,
                                     size_t *miscSize) const;
 
     static const uint32_t MAX_FIXED_SLOTS = 16;
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -43,32 +43,33 @@
 
 #include <new>
 
 #include "jsapi.h"
 #include "jsarray.h"
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsfun.h"
+#include "jsgcmark.h"
 #include "jsiter.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsprobes.h"
 #include "jspropertytree.h"
 #include "jsproxy.h"
 #include "jsscope.h"
 #include "jsstr.h"
 #include "jstypedarray.h"
 #include "jsxml.h"
 #include "jswrapper.h"
 
 #include "gc/Barrier.h"
-#include "gc/Marking.h"
 #include "js/TemplateLib.h"
+
 #include "vm/BooleanObject.h"
 #include "vm/GlobalObject.h"
 #include "vm/NumberObject.h"
 #include "vm/RegExpStatics.h"
 #include "vm/StringObject.h"
 
 #include "jsatominlines.h"
 #include "jsfuninlines.h"
@@ -970,17 +971,17 @@ JSObject::nativeEmpty() const
 
 inline uint32_t
 JSObject::propertyCount() const
 {
     return lastProperty()->entryCount();
 }
 
 inline bool
-JSObject::hasShapeTable() const
+JSObject::hasPropertyTable() const
 {
     return lastProperty()->hasTable();
 }
 
 inline size_t
 JSObject::computedSizeOfThisSlotsElements() const
 {
     size_t n = sizeOfThis();
--- a/js/src/jsproxy.cpp
+++ b/js/src/jsproxy.cpp
@@ -38,23 +38,23 @@
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include <string.h>
 #include "jsapi.h"
 #include "jscntxt.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsprvtd.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsproxy.h"
 #include "jsscope.h"
 
-#include "gc/Marking.h"
 #include "vm/MethodGuard.h"
 
 #include "jsatominlines.h"
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 
 using namespace js;
 using namespace js::gc;
--- a/js/src/jsscope.cpp
+++ b/js/src/jsscope.cpp
@@ -62,32 +62,32 @@
 #include "jsatominlines.h"
 #include "jsobjinlines.h"
 #include "jsscopeinlines.h"
 
 using namespace js;
 using namespace js::gc;
 
 bool
-ShapeTable::init(JSRuntime *rt, Shape *lastProp)
+PropertyTable::init(JSRuntime *rt, Shape *lastProp)
 {
     /*
      * Either we're creating a table for a large scope that was populated
      * via property cache hit logic under JSOP_INITPROP, JSOP_SETNAME, or
      * JSOP_SETPROP; or else calloc failed at least once already. In any
      * event, let's try to grow, overallocating to hold at least twice the
      * current population.
      */
     uint32_t sizeLog2 = JS_CEILING_LOG2W(2 * entryCount);
     if (sizeLog2 < MIN_SIZE_LOG2)
         sizeLog2 = MIN_SIZE_LOG2;
 
     /*
      * Use rt->calloc_ for memory accounting and overpressure handling
-     * without OOM reporting. See ShapeTable::change.
+     * without OOM reporting. See PropertyTable::change.
      */
     entries = (Shape **) rt->calloc_(sizeOfEntries(JS_BIT(sizeLog2)));
     if (!entries)
         return false;
 
     hashShift = HASH_BITS - sizeLog2;
     for (Shape::Range r = lastProp->all(); !r.empty(); r.popFront()) {
         const Shape &shape = r.front();
@@ -149,17 +149,17 @@ Shape::hashify(JSContext *cx)
     JS_ASSERT(!hasTable());
 
     RootedVarShape self(cx, this);
 
     if (!ensureOwnBaseShape(cx))
         return false;
 
     JSRuntime *rt = cx->runtime;
-    ShapeTable *table = rt->new_<ShapeTable>(self->entryCount());
+    PropertyTable *table = rt->new_<PropertyTable>(self->entryCount());
     if (!table)
         return false;
 
     if (!table->init(rt, self)) {
         rt->free_(table);
         return false;
     }
 
@@ -170,17 +170,17 @@ Shape::hashify(JSContext *cx)
 /*
  * Double hashing needs the second hash code to be relatively prime to table
  * size, so we simply make hash2 odd.
  */
 #define HASH1(hash0,shift)      ((hash0) >> (shift))
 #define HASH2(hash0,log2,shift) ((((hash0) << (log2)) >> (shift)) | 1)
 
 Shape **
-ShapeTable::search(jsid id, bool adding)
+PropertyTable::search(jsid id, bool adding)
 {
     JSHashNumber hash0, hash1, hash2;
     int sizeLog2;
     Shape *stored, *shape, **spp, **firstRemoved;
     uint32_t sizeMask;
 
     JS_ASSERT(entries);
     JS_ASSERT(!JSID_IS_EMPTY(id));
@@ -248,17 +248,17 @@ ShapeTable::search(jsid id, bool adding)
         }
     }
 
     /* NOTREACHED */
     return NULL;
 }
 
 bool
-ShapeTable::change(int log2Delta, JSContext *cx)
+PropertyTable::change(int log2Delta, JSContext *cx)
 {
     JS_ASSERT(entries);
 
     /*
      * Grow, shrink, or compress by changing this->entries.
      */
     int oldlog2 = HASH_BITS - hashShift;
     int newlog2 = oldlog2 + log2Delta;
@@ -286,17 +286,17 @@ ShapeTable::change(int log2Delta, JSCont
     }
 
     /* Finally, free the old entries storage. */
     cx->free_(oldTable);
     return true;
 }
 
 bool
-ShapeTable::grow(JSContext *cx)
+PropertyTable::grow(JSContext *cx)
 {
     JS_ASSERT(needsToGrow());
 
     uint32_t size = capacity();
     int delta = removedCount < size >> 2;
 
     if (!change(delta, cx) && entryCount + removedCount == size - 1) {
         JS_ReportOutOfMemory(cx);
@@ -539,17 +539,17 @@ JSObject::addPropertyInternal(JSContext 
 {
     JS_ASSERT_IF(!allowDictionary, !inDictionaryMode());
 
     RootId idRoot(cx, &id);
     RootedVarObject self(cx, this);
 
     RootGetterSetter gsRoot(cx, attrs, &getter, &setter);
 
-    ShapeTable *table = NULL;
+    PropertyTable *table = NULL;
     if (!inDictionaryMode()) {
         bool stableSlot =
             (slot == SHAPE_INVALID_SLOT) ||
             lastProperty()->hasMissingSlot() ||
             (slot == lastProperty()->maybeSlot() + 1);
         JS_ASSERT_IF(!allowDictionary, stableSlot);
         if (allowDictionary &&
             (!stableSlot || lastProperty()->entryCount() >= PropertyTree::MAX_HEIGHT)) {
@@ -894,17 +894,17 @@ JSObject::removeProperty(JSContext *cx, 
     }
 
     /*
      * A dictionary-mode object owns mutable, unique shapes on a non-circular
      * doubly linked list, hashed by lastProperty()->table. So we can edit the
      * list and hash in place.
      */
     if (self->inDictionaryMode()) {
-        ShapeTable &table = self->lastProperty()->table();
+        PropertyTable &table = self->lastProperty()->table();
 
         if (SHAPE_HAD_COLLISION(*spp)) {
             *spp = SHAPE_REMOVED;
             ++table.removedCount;
             --table.entryCount;
         } else {
             *spp = NULL;
             --table.entryCount;
@@ -928,21 +928,21 @@ JSObject::removeProperty(JSContext *cx, 
         /* Hand off table from the old to new last property. */
         oldLastProp->handoffTableTo(self->lastProperty());
 
         /* Generate a new shape for the object, infallibly. */
         JS_ALWAYS_TRUE(self->generateOwnShape(cx, spare));
 
         /* Consider shrinking table if its load factor is <= .25. */
         uint32_t size = table.capacity();
-        if (size > ShapeTable::MIN_SIZE && table.entryCount <= size >> 2)
+        if (size > PropertyTable::MIN_SIZE && table.entryCount <= size >> 2)
             (void) table.change(-1, cx);
     } else {
         /*
-         * Non-dictionary-mode shape tables are shared immutables, so all we
+         * Non-dictionary-mode property tables are shared immutables, so all we
          * need do is retract the last property and we'll either get or else
          * lazily make via a later hashify the exact table for the new property
          * lineage.
          */
         JS_ASSERT(shape == self->lastProperty());
         self->removeLastProperty(cx);
     }
 
@@ -1007,17 +1007,17 @@ JSObject::replaceWithNewEquivalentShape(
         RootObject selfRoot(cx, &self);
         RootShape oldRoot(cx, &oldShape);
         newShape = js_NewGCShape(cx);
         if (!newShape)
             return NULL;
         new (newShape) Shape(oldShape->base()->unowned(), 0);
     }
 
-    ShapeTable &table = self->lastProperty()->table();
+    PropertyTable &table = self->lastProperty()->table();
     Shape **spp = oldShape->isEmptyShape()
                   ? NULL
                   : table.search(oldShape->propidRef(), false);
 
     /*
      * Splice the new shape into the same position as the old shape, preserving
      * enumeration order (see bug 601399).
      */
--- a/js/src/jsscope.h
+++ b/js/src/jsscope.h
@@ -108,20 +108,20 @@
  * 
  * 3. A property represented by a non-last Shape in a shape lineage has its
  *    attributes modified.
  * 
  * To find the Shape for a particular property of an object initially requires
  * a linear search. But if the number of searches starting at any particular
  * Shape in the property tree exceeds MAX_LINEAR_SEARCHES and the Shape's
  * lineage has (excluding the EmptyShape) at least MIN_ENTRIES, we create an
- * auxiliary hash table -- the ShapeTable -- that allows faster lookup.
- * Furthermore, a ShapeTable is always created for dictionary mode lists,
- * and it is attached to the last Shape in the lineage. Shape tables for
- * property tree Shapes never change, but shape tables for dictionary mode
+ * auxiliary hash table -- the PropertyTable -- that allows faster lookup.
+ * Furthermore, a PropertyTable is always created for dictionary mode lists,
+ * and it is attached to the last Shape in the lineage. Property tables for
+ * property tree Shapes never change, but property tables for dictionary mode
  * Shapes can grow and shrink.
  *
  * There used to be a long, math-heavy comment here explaining why property
  * trees are more space-efficient than alternatives.  This was removed in bug
  * 631138; see that bug for the full details.
  *
  * Because many Shapes have similar data, there is actually a secondary type
  * called a BaseShape that holds some of a Shape's data.  Many shapes can share
@@ -133,52 +133,52 @@ namespace js {
 /* Limit on the number of slotful properties in an object. */
 static const uint32_t SHAPE_INVALID_SLOT = JS_BIT(24) - 1;
 static const uint32_t SHAPE_MAXIMUM_SLOT = JS_BIT(24) - 2;
 
 /*
  * Shapes use multiplicative hashing, but specialized to
  * minimize footprint.
  */
-struct ShapeTable {
+struct PropertyTable {
     static const uint32_t HASH_BITS     = tl::BitSize<HashNumber>::result;
     static const uint32_t MIN_ENTRIES   = 7;
     static const uint32_t MIN_SIZE_LOG2 = 4;
     static const uint32_t MIN_SIZE      = JS_BIT(MIN_SIZE_LOG2);
 
     int             hashShift;          /* multiplicative hash shift */
 
     uint32_t        entryCount;         /* number of entries in table */
     uint32_t        removedCount;       /* removed entry sentinels in table */
     uint32_t        freelist;           /* SHAPE_INVALID_SLOT or head of slot
                                            freelist in owning dictionary-mode
                                            object */
     js::Shape       **entries;          /* table of ptrs to shared tree nodes */
 
-    ShapeTable(uint32_t nentries)
+    PropertyTable(uint32_t nentries)
       : hashShift(HASH_BITS - MIN_SIZE_LOG2),
         entryCount(nentries),
         removedCount(0),
         freelist(SHAPE_INVALID_SLOT)
     {
         /* NB: entries is set by init, which must be called. */
     }
 
-    ~ShapeTable() {
+    ~PropertyTable() {
         js::UnwantedForeground::free_(entries);
     }
 
     /* By definition, hashShift = HASH_BITS - log2(capacity). */
     uint32_t capacity() const { return JS_BIT(HASH_BITS - hashShift); }
 
     /* Computes the size of the entries array for a given capacity. */
     static size_t sizeOfEntries(size_t cap) { return cap * sizeof(Shape *); }
 
     /*
-     * This counts the ShapeTable object itself (which must be
+     * This counts the PropertyTable object itself (which must be
      * heap-allocated) and its |entries| array.
      */
     size_t sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf) const {
         return mallocSizeOf(this) + mallocSizeOf(entries);
     }
 
     /* Whether we need to grow.  We want to do this if the load factor is >= 0.75 */
     bool needsToGrow() const {
@@ -221,17 +221,17 @@ class PropertyTree;
  * property. This information is split across the Shape and the BaseShape
  * at shape->base(). Both Shape and BaseShape can be either owned or unowned
  * by, respectively, the Object or Shape referring to them.
  *
  * Owned Shapes are used in dictionary objects, and form a doubly linked list
  * whose entries are all owned by that dictionary. Unowned Shapes are all in
  * the property tree.
  *
- * Owned BaseShapes are used for shapes which have shape tables, including
+ * Owned BaseShapes are used for shapes which have property tables, including
  * the last properties in all dictionaries. Unowned BaseShapes compactly store
  * information common to many shapes. In a given compartment there is a single
  * BaseShape for each combination of BaseShape information. This information
  * is cloned in owned BaseShapes so that information can be quickly looked up
  * for a given object or shape without regard to whether the base shape is
  * owned or not.
  *
  * All combinations of owned/unowned Shapes/BaseShapes are possible:
@@ -242,21 +242,21 @@ class PropertyTree;
  *     property to property as the object's last property changes.
  *
  * Owned Shape, Unowned BaseShape:
  *
  *     Property in a dictionary object other than the last one.
  *
  * Unowned Shape, Owned BaseShape:
  *
- *     Property in the property tree which has a shape table.
+ *     Property in the property tree which has a property table.
  *
  * Unowned Shape, Unowned BaseShape:
  *
- *     Property in the property tree which does not have a shape table.
+ *     Property in the property tree which does not have a property table.
  *
  * BaseShapes additionally encode some information about the referring object
  * itself. This includes the object's class, parent and various flags that may
  * be set for the object. Except for the class, this information is mutable and
  * may change when the object has an established property lineage. On such
  * changes the entire property lineage is not updated, but rather only the
  * last property (and its base shape). This works because only the object's
  * last property is used to query information about the object. Care must be
@@ -319,18 +319,18 @@ class BaseShape : public js::gc::Cell
         js::StrictPropertyOp rawSetter; /* setter hook for shape */
         JSObject        *setterObj;     /* user-defined callable "set" object or
                                            null if shape->hasSetterValue() */
     };
 
     /* For owned BaseShapes, the canonical unowned BaseShape. */
     HeapPtr<UnownedBaseShape> unowned_;
 
-    /* For owned BaseShapes, the shape's shape table. */
-    ShapeTable       *table_;
+    /* For owned BaseShapes, the shape's property table. */
+    PropertyTable       *table_;
 
   public:
     void finalize(FreeOp *fop);
 
     inline BaseShape(Class *clasp, JSObject *parent, uint32_t objectFlags);
     inline BaseShape(Class *clasp, JSObject *parent, uint32_t objectFlags,
                      uint8_t attrs, PropertyOp rawGetter, StrictPropertyOp rawSetter);
     inline BaseShape(const StackBaseShape &base);
@@ -353,18 +353,18 @@ class BaseShape : public js::gc::Cell
 
     bool hasGetterObject() const { return !!(flags & HAS_GETTER_OBJECT); }
     JSObject *getterObject() const { JS_ASSERT(hasGetterObject()); return getterObj; }
 
     bool hasSetterObject() const { return !!(flags & HAS_SETTER_OBJECT); }
     JSObject *setterObject() const { JS_ASSERT(hasSetterObject()); return setterObj; }
 
     bool hasTable() const { JS_ASSERT_IF(table_, isOwned()); return table_ != NULL; }
-    ShapeTable &table() const { JS_ASSERT(table_ && isOwned()); return *table_; }
-    void setTable(ShapeTable *table) { JS_ASSERT(isOwned()); table_ = table; }
+    PropertyTable &table() const { JS_ASSERT(table_ && isOwned()); return *table_; }
+    void setTable(PropertyTable *table) { JS_ASSERT(isOwned()); table_ = table; }
 
     uint32_t slotSpan() const { JS_ASSERT(isOwned()); return slotSpan_; }
     void setSlotSpan(uint32_t slotSpan) { JS_ASSERT(isOwned()); slotSpan_ = slotSpan; }
 
     /* Lookup base shapes from the compartment's baseShapes table. */
     static UnownedBaseShape *getUnowned(JSContext *cx, const StackBaseShape &base);
 
     /* Get the canonical base shape. */
@@ -545,17 +545,17 @@ struct Shape : public js::gc::Cell
             return true;
         return makeOwnBaseShape(cx);
     }
 
     bool makeOwnBaseShape(JSContext *cx);
 
   public:
     bool hasTable() const { return base()->hasTable(); }
-    js::ShapeTable &table() const { return base()->table(); }
+    js::PropertyTable &table() const { return base()->table(); }
 
     void sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf,
                              size_t *propTableSize, size_t *kidsSize) const {
         *propTableSize = hasTable() ? table().sizeOfIncludingThis(mallocSizeOf) : 0;
         *kidsSize = !inDictionary() && kids.isHash()
                   ? kids.toHash()->sizeOfIncludingThis(mallocSizeOf)
                   : 0;
     }
@@ -847,23 +847,23 @@ struct Shape : public js::gc::Cell
 
         const js::Shape *shape = this;
         uint32_t count = 0;
         for (js::Shape::Range r = shape->all(); !r.empty(); r.popFront())
             ++count;
         return count;
     }
 
-    bool isBigEnoughForAShapeTable() const {
+    bool isBigEnoughForAPropertyTable() const {
         JS_ASSERT(!hasTable());
         const js::Shape *shape = this;
         uint32_t count = 0;
         for (js::Shape::Range r = shape->all(); !r.empty(); r.popFront()) {
             ++count;
-            if (count >= ShapeTable::MIN_ENTRIES)
+            if (count >= PropertyTable::MIN_ENTRIES)
                 return true;
         }
         return false;
     }
 
 #ifdef DEBUG
     void dump(JSContext *cx, FILE *fp) const;
     void dumpSubtree(JSContext *cx, int level, FILE *fp) const;
@@ -1068,17 +1068,17 @@ Shape::search(JSContext *cx, Shape *star
     *pspp = NULL;
 
     if (start->hasTable()) {
         Shape **spp = start->table().search(id, adding);
         return SHAPE_FETCH(spp);
     }
 
     if (start->numLinearSearches() == LINEAR_SEARCHES_MAX) {
-        if (start->isBigEnoughForAShapeTable()) {
+        if (start->isBigEnoughForAPropertyTable()) {
             RootShape startRoot(cx, &start);
             RootId idRoot(cx, &id);
             if (start->hashify(cx)) {
                 Shape **spp = start->table().search(id, adding);
                 return SHAPE_FETCH(spp);
             }
         }
         /*
--- a/js/src/jsscopeinlines.h
+++ b/js/src/jsscopeinlines.h
@@ -42,21 +42,21 @@
 
 #include <new>
 
 #include "jsarray.h"
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsdbgapi.h"
 #include "jsfun.h"
-#include "jsgc.h"
 #include "jsobj.h"
 #include "jsscope.h"
+#include "jsgc.h"
+#include "jsgcmark.h"
 
-#include "gc/Marking.h"
 #include "vm/ArgumentsObject.h"
 #include "vm/ScopeObject.h"
 #include "vm/StringObject.h"
 
 #include "jscntxtinlines.h"
 #include "jsgcinlines.h"
 #include "jsobjinlines.h"
 
@@ -171,17 +171,17 @@ BaseShape::adoptUnowned(UnownedBaseShape
      * This is a base shape owned by a dictionary object, update it to reflect the
      * unowned base shape of a new last property.
      */
     JS_ASSERT(isOwned());
     DebugOnly<uint32_t> flags = getObjectFlags();
     JS_ASSERT((flags & other->getObjectFlags()) == flags);
 
     uint32_t span = slotSpan();
-    ShapeTable *table = &this->table();
+    PropertyTable *table = &this->table();
 
     *this = *other;
     setOwned(other);
     setTable(table);
     setSlotSpan(span);
 
     assertConsistency();
 }
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -50,24 +50,24 @@
 #include "jsprf.h"
 #include "jsapi.h"
 #include "jsatom.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsdbgapi.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsopcode.h"
 #include "jsscope.h"
 #include "jsscript.h"
 
-#include "gc/Marking.h"
 #include "frontend/BytecodeEmitter.h"
 #include "frontend/Parser.h"
 #include "js/MemoryMetrics.h"
 #include "methodjit/MethodJIT.h"
 #include "methodjit/Retcon.h"
 #include "vm/Debugger.h"
 #include "vm/Xdr.h"
 
--- a/js/src/jsstr.h
+++ b/js/src/jsstr.h
@@ -40,16 +40,17 @@
 #ifndef jsstr_h___
 #define jsstr_h___
 
 #include <ctype.h>
 #include "jsapi.h"
 #include "jsatom.h"
 #include "jsprvtd.h"
 #include "jslock.h"
+#include "jscell.h"
 #include "jsutil.h"
 
 #include "js/HashTable.h"
 #include "vm/Unicode.h"
 
 namespace js {
 
 /* Implemented in jsstrinlines.h */
--- a/js/src/jstypedarray.cpp
+++ b/js/src/jstypedarray.cpp
@@ -48,23 +48,23 @@
 #include "jsprf.h"
 #include "jsapi.h"
 #include "jsarray.h"
 #include "jsatom.h"
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jstypedarray.h"
 
-#include "gc/Marking.h"
 #include "vm/GlobalObject.h"
 #include "vm/NumericConversions.h"
 
 #include "jsatominlines.h"
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 #include "jstypedarrayinlines.h"
 
--- a/js/src/jswatchpoint.cpp
+++ b/js/src/jswatchpoint.cpp
@@ -32,21 +32,19 @@
  * use your version of this file under the terms of the MPL, indicate your
  * decision by deleting the provisions above and replace them with the notice
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
+#include "jswatchpoint.h"
 #include "jsatom.h"
-#include "jswatchpoint.h"
-
-#include "gc/Marking.h"
-
+#include "jsgcmark.h"
 #include "jsobjinlines.h"
 
 using namespace js;
 using namespace js::gc;
 
 inline HashNumber
 DefaultHasher<WatchKey>::hash(const Lookup &key)
 {
--- a/js/src/jsweakmap.cpp
+++ b/js/src/jsweakmap.cpp
@@ -40,19 +40,19 @@
  * ***** END LICENSE BLOCK ***** */
 
 #include <string.h>
 #include "jsapi.h"
 #include "jscntxt.h"
 #include "jsfriendapi.h"
 #include "jsgc.h"
 #include "jsobj.h"
+#include "jsgcmark.h"
 #include "jsweakmap.h"
 
-#include "gc/Marking.h"
 #include "vm/GlobalObject.h"
 
 #include "jsgcinlines.h"
 #include "jsobjinlines.h"
 
 #include "vm/MethodGuard-inl.h"
 
 using namespace js;
--- a/js/src/jsweakmap.h
+++ b/js/src/jsweakmap.h
@@ -41,18 +41,18 @@
 
 #ifndef jsweakmap_h___
 #define jsweakmap_h___
 
 #include "jsapi.h"
 #include "jsfriendapi.h"
 #include "jscntxt.h"
 #include "jsobj.h"
+#include "jsgcmark.h"
 
-#include "gc/Marking.h"
 #include "js/HashTable.h"
 
 namespace js {
 
 // A subclass template of js::HashMap whose keys and values may be garbage-collected. When
 // a key is collected, the table entry disappears, dropping its reference to the value.
 //
 // More precisely:
--- a/js/src/jswrapper.cpp
+++ b/js/src/jswrapper.cpp
@@ -36,29 +36,28 @@
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "jsapi.h"
 #include "jscntxt.h"
-#include "jscompartment.h"
 #include "jsexn.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsiter.h"
 #include "jsnum.h"
 #include "jswrapper.h"
-
+#include "methodjit/PolyIC.h"
+#include "methodjit/MonoIC.h"
 #ifdef JS_METHODJIT
 # include "assembler/jit/ExecutableAllocator.h"
 #endif
-#include "gc/Marking.h"
-#include "methodjit/PolyIC.h"
-#include "methodjit/MonoIC.h"
+#include "jscompartment.h"
 
 #include "jsobjinlines.h"
 
 #include "vm/RegExpObject-inl.h"
 
 using namespace js;
 using namespace js::gc;
 
--- a/js/src/jsxml.cpp
+++ b/js/src/jsxml.cpp
@@ -52,28 +52,28 @@
 #include "jsutil.h"
 #include "jsapi.h"
 #include "jsarray.h"
 #include "jsatom.h"
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsfun.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsopcode.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jsstr.h"
 #include "jsxml.h"
 
 #include "frontend/Parser.h"
 #include "frontend/TokenStream.h"
-#include "gc/Marking.h"
 #include "vm/GlobalObject.h"
 #include "vm/MethodGuard.h"
 #include "vm/StringBuffer.h"
 
 #include "jsatominlines.h"
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 
--- a/js/src/jsxml.h
+++ b/js/src/jsxml.h
@@ -36,19 +36,19 @@
  *
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef jsxml_h___
 #define jsxml_h___
 
 #include "jspubtd.h"
 #include "jsobj.h"
+#include "jscell.h"
 
 #include "gc/Barrier.h"
-#include "gc/Heap.h"
 
 extern const char js_AnyName_str[];
 extern const char js_AttributeName_str[];
 extern const char js_isXMLName_str[];
 extern const char js_XMLList_str[];
 
 extern const char js_amp_entity_str[];
 extern const char js_gt_entity_str[];
--- a/js/src/methodjit/MethodJIT.cpp
+++ b/js/src/methodjit/MethodJIT.cpp
@@ -35,18 +35,18 @@
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "MethodJIT.h"
 #include "Logging.h"
 #include "assembler/jit/ExecutableAllocator.h"
 #include "assembler/assembler/RepatchBuffer.h"
-#include "gc/Marking.h"
 #include "js/MemoryMetrics.h"
+#include "jsgcmark.h"
 #include "BaseAssembler.h"
 #include "Compiler.h"
 #include "MonoIC.h"
 #include "PolyIC.h"
 #include "TrampolineCompiler.h"
 #include "jscntxtinlines.h"
 #include "jscompartment.h"
 #include "jsscope.h"
--- a/js/src/methodjit/StubCalls.cpp
+++ b/js/src/methodjit/StubCalls.cpp
@@ -40,23 +40,23 @@
 
 #include "mozilla/FloatingPoint.h"
 
 #include "jscntxt.h"
 #include "jsscope.h"
 #include "jsobj.h"
 #include "jslibmath.h"
 #include "jsiter.h"
+#include "jsgcmark.h"
 #include "jsnum.h"
 #include "jsxml.h"
 #include "jsbool.h"
 #include "assembler/assembler/MacroAssemblerCodeRef.h"
 #include "jstypes.h"
 
-#include "gc/Marking.h"
 #include "vm/Debugger.h"
 #include "vm/NumericConversions.h"
 #include "vm/String.h"
 #include "methodjit/Compiler.h"
 #include "methodjit/StubCalls.h"
 #include "methodjit/Retcon.h"
 
 #include "jsinterpinlines.h"
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -37,28 +37,28 @@
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "vm/Debugger.h"
 #include "jsapi.h"
 #include "jscntxt.h"
+#include "jsgcmark.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jswrapper.h"
 #include "jsarrayinlines.h"
 #include "jsgcinlines.h"
 #include "jsinterpinlines.h"
 #include "jsobjinlines.h"
 #include "jsopcodeinlines.h"
 
 #include "frontend/BytecodeCompiler.h"
 #include "frontend/BytecodeEmitter.h"
-#include "gc/Marking.h"
 #include "methodjit/Retcon.h"
 #include "js/Vector.h"
 
 #include "vm/Stack-inl.h"
 
 using namespace js;
 
 
--- a/js/src/vm/ObjectImpl-inl.h
+++ b/js/src/vm/ObjectImpl-inl.h
@@ -5,22 +5,22 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef ObjectImpl_inl_h___
 #define ObjectImpl_inl_h___
 
 #include "mozilla/Assertions.h"
 
+#include "jscell.h"
 #include "jscompartment.h"
 #include "jsgc.h"
+#include "jsgcmark.h"
 #include "jsinterp.h"
 
-#include "gc/Heap.h"
-#include "gc/Marking.h"
 #include "js/TemplateLib.h"
 
 #include "ObjectImpl.h"
 
 namespace js {
 
 static MOZ_ALWAYS_INLINE void
 Debug_SetSlotRangeToCrashOnTouch(HeapSlot *vec, uint32_t len)
--- a/js/src/vm/ObjectImpl.cpp
+++ b/js/src/vm/ObjectImpl.cpp
@@ -165,17 +165,17 @@ js::ObjectImpl::checkShapeConsistency()
     MOZ_ASSERT(isNative());
 
     Shape *shape = lastProperty();
     Shape *prev = NULL;
 
     if (inDictionaryMode()) {
         MOZ_ASSERT(shape->hasTable());
 
-        ShapeTable &table = shape->table();
+        PropertyTable &table = shape->table();
         for (uint32_t fslot = table.freelist; fslot != SHAPE_INVALID_SLOT;
              fslot = getSlot(fslot).toPrivateUint32()) {
             MOZ_ASSERT(fslot < slotSpan());
         }
 
         for (int n = throttle; --n >= 0 && shape->parent; shape = shape->parent) {
             MOZ_ASSERT_IF(shape != lastProperty(), !shape->hasTable());
 
@@ -192,17 +192,17 @@ js::ObjectImpl::checkShapeConsistency()
             } else {
                 MOZ_ASSERT(shape->listp == &prev->parent);
             }
             prev = shape;
         }
     } else {
         for (int n = throttle; --n >= 0 && shape->parent; shape = shape->parent) {
             if (shape->hasTable()) {
-                ShapeTable &table = shape->table();
+                PropertyTable &table = shape->table();
                 MOZ_ASSERT(shape->parent);
                 for (Shape::Range r(shape); !r.empty(); r.popFront()) {
                     Shape **spp = table.search(r.front().propid(), false);
                     MOZ_ASSERT(SHAPE_FETCH(spp) == &r.front());
                 }
             }
             if (prev) {
                 MOZ_ASSERT(prev->maybeSlot() >= shape->maybeSlot());
--- a/js/src/vm/RegExpStatics.h
+++ b/js/src/vm/RegExpStatics.h
@@ -37,19 +37,19 @@
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef RegExpStatics_h__
 #define RegExpStatics_h__
 
 #include "jscntxt.h"
+#include "jsgcmark.h"
 
 #include "gc/Barrier.h"
-#include "gc/Marking.h"
 #include "js/Vector.h"
 
 #include "vm/MatchPairs.h"
 
 namespace js {
 
 class RegExpStatics
 {
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -34,17 +34,17 @@
  * decision by deleting the provisions above and replace them with the notice
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "jscntxt.h"
-#include "gc/Marking.h"
+#include "jsgcmark.h"
 #include "methodjit/MethodJIT.h"
 #include "Stack.h"
 
 #include "jsgcinlines.h"
 #include "jsobjinlines.h"
 
 #include "Stack-inl.h"
 
--- a/js/src/vm/String-inl.h
+++ b/js/src/vm/String-inl.h
@@ -37,19 +37,19 @@
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef String_inl_h__
 #define String_inl_h__
 
 #include "jscntxt.h"
+#include "jsgcmark.h"
 #include "jsprobes.h"
 
-#include "gc/Marking.h"
 #include "String.h"
 
 #include "jsgcinlines.h"
 #include "jsobjinlines.h"
 #include "gc/Barrier-inl.h"
 
 inline void
 JSString::writeBarrierPre(JSString *str)
--- a/js/src/vm/String.cpp
+++ b/js/src/vm/String.cpp
@@ -35,17 +35,17 @@
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "mozilla/RangedPtr.h"
 
-#include "gc/Marking.h"
+#include "jsgcmark.h"
 
 #include "String.h"
 #include "String-inl.h"
 
 #include "jsobjinlines.h"
 
 using namespace mozilla;
 using namespace js;
--- a/js/src/vm/String.h
+++ b/js/src/vm/String.h
@@ -39,21 +39,19 @@
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef String_h_
 #define String_h_
 
 #include "mozilla/Attributes.h"
 
 #include "jsapi.h"
+#include "jscell.h"
 #include "jsfriendapi.h"
 
-#include "gc/Barrier.h"
-#include "gc/Heap.h"
-
 class JSString;
 class JSDependentString;
 class JSExtensibleString;
 class JSExternalString;
 class JSLinearString;
 class JSFixedString;
 class JSRope;
 class JSAtom;
@@ -670,20 +668,16 @@ class JSAtom : public JSFixedString
 
 #ifdef DEBUG
     void dump();
 #endif
 };
 
 JS_STATIC_ASSERT(sizeof(JSAtom) == sizeof(JSString));
 
-namespace js {
-typedef HeapPtr<JSAtom> HeapPtrAtom;
-}
-
 class JSInlineAtom : public JSInlineString /*, JSAtom */
 {
     /*
      * JSInlineAtom is not explicitly used and is only present for consistency.
      * See Atomize() for how JSInlineStrings get morphed into JSInlineAtoms.
      */
 };