Bug 1125930 - Rename TypeObject to ObjectGroup, r=jandem.
authorBrian Hackett <bhackett1024@gmail.com>
Mon, 02 Feb 2015 11:20:38 -0700
changeset 240506 e3ff19a4994504fbedc7a02830b1063efb194183
parent 240505 64db189b540a5999b2a766d0244ba145ee41aa85
child 240507 3f097e9f306919638aab9552a093e61076da438b
push id544
push userahalberstadt@mozilla.com
push dateTue, 03 Feb 2015 16:54:17 +0000
reviewersjandem
bugs1125930
milestone38.0a1
Bug 1125930 - Rename TypeObject to ObjectGroup, r=jandem.
js/public/MemoryMetrics.h
js/public/TracingAPI.h
js/src/builtin/Object.cpp
js/src/builtin/Object.h
js/src/builtin/TypedObject.cpp
js/src/builtin/TypedObject.h
js/src/frontend/BytecodeEmitter.cpp
js/src/gc/Barrier.h
js/src/gc/GCRuntime.h
js/src/gc/GCTrace.cpp
js/src/gc/GCTrace.h
js/src/gc/Heap.h
js/src/gc/Marking.cpp
js/src/gc/Marking.h
js/src/gc/Nursery.cpp
js/src/gc/Nursery.h
js/src/gc/RootMarking.cpp
js/src/gc/Rooting.h
js/src/gc/Tracer.cpp
js/src/gc/Tracer.h
js/src/jit-test/tests/heap-analysis/findPath.js
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineIC.cpp
js/src/jit/BaselineIC.h
js/src/jit/BaselineInspector.cpp
js/src/jit/BaselineInspector.h
js/src/jit/CodeGenerator.cpp
js/src/jit/CodeGenerator.h
js/src/jit/Ion.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/IonAnalysis.h
js/src/jit/IonBuilder.cpp
js/src/jit/IonBuilder.h
js/src/jit/IonCaches.cpp
js/src/jit/IonCaches.h
js/src/jit/IonTypes.h
js/src/jit/JitCompartment.h
js/src/jit/LIR-Common.h
js/src/jit/LOpcodes.h
js/src/jit/Lowering.cpp
js/src/jit/Lowering.h
js/src/jit/MCallOptimize.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MIRGenerator.h
js/src/jit/MIRGraph.cpp
js/src/jit/MOpcodes.h
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/OptimizationTracking.cpp
js/src/jit/Recover.cpp
js/src/jit/VMFunctions.cpp
js/src/jit/VMFunctions.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/CodeGenerator-arm.h
js/src/jit/arm/LIR-arm.h
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/arm/Lowering-arm.h
js/src/jit/mips/CodeGenerator-mips.cpp
js/src/jit/mips/CodeGenerator-mips.h
js/src/jit/mips/LIR-mips.h
js/src/jit/mips/Lowering-mips.cpp
js/src/jit/mips/Lowering-mips.h
js/src/jit/none/LIR-none.h
js/src/jit/none/Lowering-none.h
js/src/jit/shared/CodeGenerator-x86-shared.cpp
js/src/jit/shared/CodeGenerator-x86-shared.h
js/src/jit/shared/LIR-x86-shared.h
js/src/jit/shared/Lowering-x86-shared.cpp
js/src/jit/shared/Lowering-x86-shared.h
js/src/jsarray.cpp
js/src/jsarray.h
js/src/jscntxt.cpp
js/src/jscntxt.h
js/src/jscompartment.cpp
js/src/jscompartment.h
js/src/jsfriendapi.cpp
js/src/jsfriendapi.h
js/src/jsfun.cpp
js/src/jsfuninlines.h
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jsgcinlines.h
js/src/jsinfer.cpp
js/src/jsinfer.h
js/src/jsinferinlines.h
js/src/jsiter.cpp
js/src/jsobj.cpp
js/src/jsobj.h
js/src/jsobjinlines.h
js/src/jspubtd.h
js/src/jsstr.cpp
js/src/jsstr.h
js/src/vm/ArgumentsObject.cpp
js/src/vm/ArrayBufferObject.cpp
js/src/vm/ArrayObject-inl.h
js/src/vm/ArrayObject.h
js/src/vm/HelperThreads.cpp
js/src/vm/Interpreter-inl.h
js/src/vm/Interpreter.cpp
js/src/vm/Interpreter.h
js/src/vm/JSONParser.cpp
js/src/vm/MemoryMetrics.cpp
js/src/vm/NativeObject-inl.h
js/src/vm/NativeObject.cpp
js/src/vm/NativeObject.h
js/src/vm/ProxyObject.cpp
js/src/vm/RegExpObject.cpp
js/src/vm/RegExpObject.h
js/src/vm/RegExpStatics.cpp
js/src/vm/Runtime-inl.h
js/src/vm/Runtime.h
js/src/vm/ScopeObject-inl.h
js/src/vm/ScopeObject.cpp
js/src/vm/ScopeObject.h
js/src/vm/SelfHosting.cpp
js/src/vm/Shape.cpp
js/src/vm/Shape.h
js/src/vm/SharedTypedArrayObject.cpp
js/src/vm/Stack-inl.h
js/src/vm/Stack.cpp
js/src/vm/Stack.h
js/src/vm/TypedArrayObject.cpp
js/src/vm/TypedArrayObject.h
js/src/vm/UbiNode.cpp
js/src/vm/UnboxedObject.cpp
js/src/vm/UnboxedObject.h
js/xpconnect/src/XPCJSRuntime.cpp
--- a/js/public/MemoryMetrics.h
+++ b/js/public/MemoryMetrics.h
@@ -439,18 +439,18 @@ struct ZoneStats
 {
 #define FOR_EACH_SIZE(macro) \
     macro(Other,   IsLiveGCThing,  symbolsGCHeap) \
     macro(Other,   NotLiveGCThing, gcHeapArenaAdmin) \
     macro(Other,   NotLiveGCThing, unusedGCThings) \
     macro(Other,   IsLiveGCThing,  lazyScriptsGCHeap) \
     macro(Other,   NotLiveGCThing, lazyScriptsMallocHeap) \
     macro(Other,   IsLiveGCThing,  jitCodesGCHeap) \
-    macro(Other,   IsLiveGCThing,  typeObjectsGCHeap) \
-    macro(Other,   NotLiveGCThing, typeObjectsMallocHeap) \
+    macro(Other,   IsLiveGCThing,  objectGroupsGCHeap) \
+    macro(Other,   NotLiveGCThing, objectGroupsMallocHeap) \
     macro(Other,   NotLiveGCThing, typePool) \
     macro(Other,   NotLiveGCThing, baselineStubsOptimized) \
 
     ZoneStats()
       : FOR_EACH_SIZE(ZERO_SIZE)
         stringInfo(),
         extra(),
         allStrings(nullptr),
--- a/js/public/TracingAPI.h
+++ b/js/public/TracingAPI.h
@@ -46,19 +46,19 @@ enum JSGCTraceKind
 
     // A kind that indicates the real kind should be looked up in the arena.
     JSTRACE_OUTOFLINE = 0x07,
 
     // The following kinds do not have an exposed C++ idiom.
     JSTRACE_BASE_SHAPE = 0x0F,
     JSTRACE_JITCODE = 0x1F,
     JSTRACE_LAZY_SCRIPT = 0x2F,
-    JSTRACE_TYPE_OBJECT = 0x3F,
+    JSTRACE_OBJECT_GROUP = 0x3F,
 
-    JSTRACE_LAST = JSTRACE_TYPE_OBJECT
+    JSTRACE_LAST = JSTRACE_OBJECT_GROUP
 };
 
 namespace JS {
 // Returns a static string equivalent of |kind|.
 JS_FRIEND_API(const char *)
 GCTraceKindToAscii(JSGCTraceKind kind);
 }
 
--- a/js/src/builtin/Object.cpp
+++ b/js/src/builtin/Object.cpp
@@ -618,49 +618,50 @@ obj_isPrototypeOf(JSContext *cx, unsigne
     bool isDelegate;
     if (!IsDelegate(cx, obj, args[0], &isDelegate))
         return false;
     args.rval().setBoolean(isDelegate);
     return true;
 }
 
 PlainObject *
-js::ObjectCreateImpl(JSContext *cx, HandleObject proto, NewObjectKind newKind, HandleTypeObject type)
+js::ObjectCreateImpl(JSContext *cx, HandleObject proto, NewObjectKind newKind,
+                     HandleObjectGroup group)
 {
     // Give the new object a small number of fixed slots, like we do for empty
     // object literals ({}).
     gc::AllocKind allocKind = GuessObjectGCKind(0);
 
     if (!proto) {
         // Object.create(null) is common, optimize it by using an allocation
-        // site specific TypeObject. Because GetTypeCallerInitObject is pretty
-        // slow, the caller can pass in the type if it's known and we use that
+        // site specific ObjectGroup. Because GetCallerInitGroup is pretty
+        // slow, the caller can pass in the group if it's known and we use that
         // instead.
-        RootedTypeObject ntype(cx, type);
-        if (!ntype) {
-            ntype = GetTypeCallerInitObject(cx, JSProto_Null);
-            if (!ntype)
+        RootedObjectGroup ngroup(cx, group);
+        if (!ngroup) {
+            ngroup = GetCallerInitGroup(cx, JSProto_Null);
+            if (!ngroup)
                 return nullptr;
         }
 
-        MOZ_ASSERT(!ntype->proto().toObjectOrNull());
+        MOZ_ASSERT(!ngroup->proto().toObjectOrNull());
 
-        return NewObjectWithType<PlainObject>(cx, ntype, cx->global(), allocKind,
-                                              newKind);
+        return NewObjectWithGroup<PlainObject>(cx, ngroup, cx->global(), allocKind,
+                                               newKind);
     }
 
     return NewObjectWithGivenProto<PlainObject>(cx, proto, cx->global(), allocKind, newKind);
 }
 
 PlainObject *
 js::ObjectCreateWithTemplate(JSContext *cx, HandlePlainObject templateObj)
 {
     RootedObject proto(cx, templateObj->getProto());
-    RootedTypeObject type(cx, templateObj->type());
-    return ObjectCreateImpl(cx, proto, GenericObject, type);
+    RootedObjectGroup group(cx, templateObj->group());
+    return ObjectCreateImpl(cx, proto, GenericObject, group);
 }
 
 /* ES5 15.2.3.5: Object.create(O [, Properties]) */
 bool
 js::obj_create(JSContext *cx, unsigned argc, Value *vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     if (args.length() == 0) {
@@ -1144,17 +1145,17 @@ CreateObjectPrototype(JSContext *cx, JSP
     if (!objectProto)
         return nullptr;
 
     /*
      * The default 'new' type of Object.prototype is required by type inference
      * to have unknown properties, to simplify handling of e.g. heterogenous
      * objects in JSON and script literals.
      */
-    if (!JSObject::setNewTypeUnknown(cx, &PlainObject::class_, objectProto))
+    if (!JSObject::setNewGroupUnknown(cx, &PlainObject::class_, objectProto))
         return nullptr;
 
     return objectProto;
 }
 
 static bool
 FinishObjectClassInit(JSContext *cx, JS::HandleObject ctor, JS::HandleObject proto)
 {
--- a/js/src/builtin/Object.h
+++ b/js/src/builtin/Object.h
@@ -20,17 +20,17 @@ namespace js {
 bool
 obj_construct(JSContext *cx, unsigned argc, JS::Value *vp);
 
 bool
 obj_valueOf(JSContext *cx, unsigned argc, JS::Value *vp);
 
 PlainObject *
 ObjectCreateImpl(JSContext *cx, HandleObject proto, NewObjectKind newKind = GenericObject,
-                 HandleTypeObject type = js::NullPtr());
+                 HandleObjectGroup group = js::NullPtr());
 
 PlainObject *
 ObjectCreateWithTemplate(JSContext *cx, HandlePlainObject templateObj);
 
 // Object methods exposed so they can be installed in the self-hosting global.
 bool
 obj_create(JSContext *cx, unsigned argc, JS::Value *vp);
 
--- a/js/src/builtin/TypedObject.cpp
+++ b/js/src/builtin/TypedObject.cpp
@@ -1490,23 +1490,23 @@ OutlineTypedObject::createUnattachedWith
                                               const Class *clasp,
                                               HandleTypeDescr descr,
                                               int32_t length,
                                               gc::InitialHeap heap)
 {
     MOZ_ASSERT(clasp == &OutlineTransparentTypedObject::class_ ||
                clasp == &OutlineOpaqueTypedObject::class_);
 
-    RootedTypeObject type(cx, cx->getNewType(clasp, TaggedProto(&descr->typedProto()), descr));
-    if (!type)
+    RootedObjectGroup group(cx, cx->getNewGroup(clasp, TaggedProto(&descr->typedProto()), descr));
+    if (!group)
         return nullptr;
 
     NewObjectKind newKind = (heap == gc::TenuredHeap) ? MaybeSingletonObject : GenericObject;
-    OutlineTypedObject *obj = NewObjectWithType<OutlineTypedObject>(cx, type, cx->global(),
-                                                                    gc::FINALIZE_OBJECT0, newKind);
+    OutlineTypedObject *obj = NewObjectWithGroup<OutlineTypedObject>(cx, group, cx->global(),
+                                                                     gc::FINALIZE_OBJECT0, newKind);
     if (!obj)
         return nullptr;
 
     obj->setOwnerAndData(nullptr, nullptr);
     return obj;
 }
 
 void
@@ -2174,22 +2174,22 @@ OutlineTypedObject::neuter(void *newData
 InlineTypedObject::create(JSContext *cx, HandleTypeDescr descr, gc::InitialHeap heap)
 {
     gc::AllocKind allocKind = allocKindForTypeDescriptor(descr);
 
     const Class *clasp = descr->opaque()
                          ? &InlineOpaqueTypedObject::class_
                          : &InlineTransparentTypedObject::class_;
 
-    RootedTypeObject type(cx, cx->getNewType(clasp, TaggedProto(&descr->typedProto()), descr));
-    if (!type)
+    RootedObjectGroup group(cx, cx->getNewGroup(clasp, TaggedProto(&descr->typedProto()), descr));
+    if (!group)
         return nullptr;
 
     NewObjectKind newKind = (heap == gc::TenuredHeap) ? MaybeSingletonObject : GenericObject;
-    return NewObjectWithType<InlineTypedObject>(cx, type, cx->global(), allocKind, newKind);
+    return NewObjectWithGroup<InlineTypedObject>(cx, group, cx->global(), allocKind, newKind);
 }
 
 /* static */ InlineTypedObject *
 InlineTypedObject::createCopy(JSContext *cx, Handle<InlineTypedObject *> templateObject,
                               gc::InitialHeap heap)
 {
     Rooted<TypeDescr *> descr(cx, &templateObject->typeDescr());
     InlineTypedObject *res = create(cx, descr, heap);
--- a/js/src/builtin/TypedObject.h
+++ b/js/src/builtin/TypedObject.h
@@ -579,17 +579,17 @@ class TypedObject : public JSObject
         return getProto()->as<TypedProto>();
     }
 
     TypedProto &maybeForwardedTypedProto() const {
         return MaybeForwarded(getProto())->as<TypedProto>();
     }
 
     TypeDescr &typeDescr() const {
-        return type()->typeDescr();
+        return group()->typeDescr();
     }
 
     TypeDescr &maybeForwardedTypeDescr() const {
         return MaybeForwarded(&typeDescr())->as<TypeDescr>();
     }
 
     int32_t offset() const;
     int32_t length() const;
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -4290,17 +4290,17 @@ ParseNode::getConstantValue(ExclusiveCon
                 return true;
             }
             id = INT_TO_JSID(idx);
             if (!DefineProperty(cx, obj, id, value, nullptr, nullptr, JSPROP_ENUMERATE))
                 return false;
         }
         MOZ_ASSERT(idx == count);
 
-        types::FixArrayType(cx, obj);
+        types::FixArrayGroup(cx, obj);
         vp.setObject(*obj);
         return true;
       }
       case PNK_OBJECT: {
         MOZ_ASSERT(isOp(JSOP_NEWINIT));
         MOZ_ASSERT(!(pn_xflags & PNX_NONCONST));
 
         if (allowObjects == DontAllowObjects) {
@@ -4353,17 +4353,17 @@ ParseNode::getConstantValue(ExclusiveCon
                 if (!DefineProperty(cx, obj, name->asPropertyName(), value,
                                     nullptr, nullptr, JSPROP_ENUMERATE))
                 {
                     return false;
                 }
             }
         }
 
-        types::FixObjectType(cx, obj);
+        types::FixObjectGroup(cx, obj);
         vp.setObject(*obj);
         return true;
       }
       default:
         MOZ_CRASH("Unexpected node");
     }
     return false;
 }
@@ -4371,17 +4371,17 @@ ParseNode::getConstantValue(ExclusiveCon
 static bool
 EmitSingletonInitialiser(ExclusiveContext *cx, BytecodeEmitter *bce, ParseNode *pn)
 {
     RootedValue value(cx);
     if (!pn->getConstantValue(cx, ParseNode::AllowObjects, &value))
         return false;
 
     RootedNativeObject obj(cx, &value.toObject().as<NativeObject>());
-    if (!obj->is<ArrayObject>() && !JSObject::setSingletonType(cx, obj))
+    if (!obj->is<ArrayObject>() && !JSObject::setSingleton(cx, obj))
         return false;
 
     ObjectBox *objbox = bce->parser->newObjectBox(obj);
     if (!objbox)
         return false;
 
     return EmitObjectOp(cx, objbox, JSOP_OBJECT, bce);
 }
--- a/js/src/gc/Barrier.h
+++ b/js/src/gc/Barrier.h
@@ -178,17 +178,17 @@ class PlainObject;
 class PropertyName;
 class SavedFrame;
 class ScopeObject;
 class ScriptSourceObject;
 class Shape;
 class UnownedBaseShape;
 
 namespace types {
-struct TypeObject;
+struct ObjectGroup;
 }
 
 namespace jit {
 class JitCode;
 }
 
 #ifdef DEBUG
 // Barriers can't be triggered during backend Ion compilation, which may run on
@@ -227,17 +227,17 @@ template <> struct MapTypeToTraceKind<Pr
 template <> struct MapTypeToTraceKind<SavedFrame>       { static const JSGCTraceKind kind = JSTRACE_OBJECT; };
 template <> struct MapTypeToTraceKind<ScopeObject>      { static const JSGCTraceKind kind = JSTRACE_OBJECT; };
 template <> struct MapTypeToTraceKind<Shape>            { static const JSGCTraceKind kind = JSTRACE_SHAPE; };
 template <> struct MapTypeToTraceKind<AccessorShape>    { static const JSGCTraceKind kind = JSTRACE_SHAPE; };
 template <> struct MapTypeToTraceKind<SharedArrayBufferObject>{ static const JSGCTraceKind kind = JSTRACE_OBJECT; };
 template <> struct MapTypeToTraceKind<SharedTypedArrayObject>{ static const JSGCTraceKind kind = JSTRACE_OBJECT; };
 template <> struct MapTypeToTraceKind<UnownedBaseShape> { static const JSGCTraceKind kind = JSTRACE_BASE_SHAPE; };
 template <> struct MapTypeToTraceKind<jit::JitCode>     { static const JSGCTraceKind kind = JSTRACE_JITCODE; };
-template <> struct MapTypeToTraceKind<types::TypeObject>{ static const JSGCTraceKind kind = JSTRACE_TYPE_OBJECT; };
+template <> struct MapTypeToTraceKind<types::ObjectGroup> { static const JSGCTraceKind kind = JSTRACE_OBJECT_GROUP; };
 
 // Direct value access used by the write barriers and the jits.
 void
 MarkValueUnbarriered(JSTracer *trc, Value *v, const char *name);
 
 // These three declarations are also present in gc/Marking.h, via the DeclMarker
 // macro.  Not great, but hard to avoid.
 void
@@ -801,17 +801,17 @@ typedef HeapPtr<JSLinearString*> HeapPtr
 typedef HeapPtr<JSObject*> HeapPtrObject;
 typedef HeapPtr<JSScript*> HeapPtrScript;
 typedef HeapPtr<JSString*> HeapPtrString;
 typedef HeapPtr<PlainObject*> HeapPtrPlainObject;
 typedef HeapPtr<PropertyName*> HeapPtrPropertyName;
 typedef HeapPtr<Shape*> HeapPtrShape;
 typedef HeapPtr<UnownedBaseShape*> HeapPtrUnownedBaseShape;
 typedef HeapPtr<jit::JitCode*> HeapPtrJitCode;
-typedef HeapPtr<types::TypeObject*> HeapPtrTypeObject;
+typedef HeapPtr<types::ObjectGroup*> HeapPtrObjectGroup;
 
 typedef PreBarriered<Value> PreBarrieredValue;
 typedef RelocatablePtr<Value> RelocatableValue;
 typedef HeapPtr<Value> HeapValue;
 
 typedef PreBarriered<jsid> PreBarrieredId;
 typedef RelocatablePtr<jsid> RelocatableId;
 typedef HeapPtr<jsid> HeapId;
@@ -822,17 +822,17 @@ typedef ImmutableTenuredPtr<JS::Symbol*>
 typedef ReadBarriered<DebugScopeObject*> ReadBarrieredDebugScopeObject;
 typedef ReadBarriered<GlobalObject*> ReadBarrieredGlobalObject;
 typedef ReadBarriered<JSFunction*> ReadBarrieredFunction;
 typedef ReadBarriered<JSObject*> ReadBarrieredObject;
 typedef ReadBarriered<ScriptSourceObject*> ReadBarrieredScriptSourceObject;
 typedef ReadBarriered<Shape*> ReadBarrieredShape;
 typedef ReadBarriered<UnownedBaseShape*> ReadBarrieredUnownedBaseShape;
 typedef ReadBarriered<jit::JitCode*> ReadBarrieredJitCode;
-typedef ReadBarriered<types::TypeObject*> ReadBarrieredTypeObject;
+typedef ReadBarriered<types::ObjectGroup*> ReadBarrieredObjectGroup;
 typedef ReadBarriered<JSAtom*> ReadBarrieredAtom;
 typedef ReadBarriered<JS::Symbol*> ReadBarrieredSymbol;
 
 typedef ReadBarriered<Value> ReadBarrieredValue;
 
 // A pre- and post-barriered Value that is specialized to be aware that it
 // resides in a slots or elements vector. This allows it to be relocated in
 // memory, but with substantially less overhead than a RelocatablePtr.
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -821,17 +821,17 @@ class GCRuntime
 
   private:
     enum IncrementalProgress
     {
         NotFinished = 0,
         Finished
     };
 
-    void minorGCImpl(JS::gcreason::Reason reason, Nursery::TypeObjectList *pretenureTypes);
+    void minorGCImpl(JS::gcreason::Reason reason, Nursery::ObjectGroupList *pretenureGroups);
 
     // For ArenaLists::allocateFromArena()
     friend class ArenaLists;
     Chunk *pickChunk(const AutoLockGC &lock,
                      AutoMaybeStartBackgroundAllocation &maybeStartBGAlloc);
     ArenaHeader *allocateArena(Chunk *chunk, Zone *zone, AllocKind kind, const AutoLockGC &lock);
     inline void arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena);
 
--- a/js/src/gc/GCTrace.cpp
+++ b/js/src/gc/GCTrace.cpp
@@ -20,17 +20,17 @@ using namespace js::gc;
 using namespace js::types;
 
 JS_STATIC_ASSERT(AllocKinds == FINALIZE_LIMIT);
 JS_STATIC_ASSERT(LastObjectAllocKind == FINALIZE_OBJECT_LAST);
 
 static FILE *gcTraceFile = nullptr;
 
 static HashSet<const Class *, DefaultHasher<const Class *>, SystemAllocPolicy> tracedClasses;
-static HashSet<const TypeObject *, DefaultHasher<const TypeObject *>, SystemAllocPolicy> tracedTypes;
+static HashSet<const ObjectGroup *, DefaultHasher<const ObjectGroup *>, SystemAllocPolicy> tracedGroups;
 
 static inline void
 WriteWord(uint64_t data)
 {
     if (gcTraceFile)
         fwrite(&data, sizeof(data), 1, gcTraceFile);
 }
 
@@ -151,58 +151,58 @@ MaybeTraceClass(const Class *clasp)
     TraceString(clasp->name);
     TraceInt(clasp->flags);
     TraceInt(clasp->finalize != nullptr);
 
     MOZ_ALWAYS_TRUE(tracedClasses.put(clasp));
 }
 
 static void
-MaybeTraceType(TypeObject *type)
+MaybeTraceGroup(ObjectGroup *group)
 {
-    if (tracedTypes.has(type))
+    if (tracedGroups.has(group))
         return;
 
-    MaybeTraceClass(type->clasp());
-    TraceEvent(TraceEventTypeInfo, uint64_t(type));
-    TraceAddress(type->clasp());
-    TraceInt(type->flags());
+    MaybeTraceClass(group->clasp());
+    TraceEvent(TraceEventGroupInfo, uint64_t(group));
+    TraceAddress(group->clasp());
+    TraceInt(group->flags());
 
-    MOZ_ALWAYS_TRUE(tracedTypes.put(type));
+    MOZ_ALWAYS_TRUE(tracedGroups.put(group));
 }
 
 void
-js::gc::TraceTypeNewScript(TypeObject *type)
+js::gc::TraceTypeNewScript(ObjectGroup *group)
 {
     const size_t bufLength = 128;
     static char buffer[bufLength];
-    MOZ_ASSERT(type->hasNewScript());
-    JSAtom *funName = type->newScript()->fun->displayAtom();
+    MOZ_ASSERT(group->hasNewScript());
+    JSAtom *funName = group->newScript()->fun->displayAtom();
     if (!funName)
         return;
 
     size_t length = funName->length();
     MOZ_ALWAYS_TRUE(length < bufLength);
     CopyChars(reinterpret_cast<Latin1Char *>(buffer), *funName);
     buffer[length] = 0;
 
-    TraceEvent(TraceEventTypeNewScript, uint64_t(type));
+    TraceEvent(TraceEventTypeNewScript, uint64_t(group));
     TraceString(buffer);
 }
 
 void
 js::gc::TraceCreateObject(JSObject* object)
 {
     if (!gcTraceFile)
         return;
 
-    TypeObject *type = object->type();
-    MaybeTraceType(type);
+    ObjectGroup *group = object->group();
+    MaybeTraceGroup(group);
     TraceEvent(TraceEventCreateObject, uint64_t(object));
-    TraceAddress(type);
+    TraceAddress(group);
 }
 
 void
 js::gc::TraceMinorGCStart()
 {
     TraceEvent(TraceEventMinorGCStart);
 }
 
@@ -225,18 +225,18 @@ js::gc::TraceMajorGCStart()
     TraceEvent(TraceEventMajorGCStart);
 }
 
 void
 js::gc::TraceTenuredFinalize(Cell *thing)
 {
     if (!gcTraceFile)
         return;
-    if (thing->tenuredGetAllocKind() == FINALIZE_TYPE_OBJECT)
-        tracedTypes.remove(static_cast<const TypeObject *>(thing));
+    if (thing->tenuredGetAllocKind() == FINALIZE_OBJECT_GROUP)
+        tracedGroups.remove(static_cast<const ObjectGroup *>(thing));
     TraceEvent(TraceEventTenuredFinalize, uint64_t(thing));
 }
 
 void
 js::gc::TraceMajorGCEnd()
 {
     TraceEvent(TraceEventMajorGCEnd);
 }
--- a/js/src/gc/GCTrace.h
+++ b/js/src/gc/GCTrace.h
@@ -6,17 +6,17 @@
 
 #ifndef gc_GCTrace_h
 #define gc_GCTrace_h
 
 #include "gc/Heap.h"
 
 namespace js {
 
-namespace types { struct TypeObject; }
+namespace types { struct ObjectGroup; }
 
 namespace gc {
 
 #ifdef JS_GC_TRACE
 
 extern bool InitTrace(GCRuntime &gc);
 extern void FinishTrace();
 extern bool TraceEnabled();
@@ -24,32 +24,32 @@ extern void TraceNurseryAlloc(Cell *thin
 extern void TraceTenuredAlloc(Cell *thing, AllocKind kind);
 extern void TraceCreateObject(JSObject* object);
 extern void TraceMinorGCStart();
 extern void TracePromoteToTenured(Cell *src, Cell *dst);
 extern void TraceMinorGCEnd();
 extern void TraceMajorGCStart();
 extern void TraceTenuredFinalize(Cell *thing);
 extern void TraceMajorGCEnd();
-extern void TraceTypeNewScript(js::types::TypeObject *type);
+extern void TraceTypeNewScript(js::types::ObjectGroup *group);
 
 #else
 
 inline bool InitTrace(GCRuntime &gc) { return true; }
 inline void FinishTrace() {}
 inline bool TraceEnabled() { return false; }
 inline void TraceNurseryAlloc(Cell *thing, size_t size) {}
 inline void TraceTenuredAlloc(Cell *thing, AllocKind kind) {}
 inline void TraceCreateObject(JSObject* object) {}
 inline void TraceMinorGCStart() {}
 inline void TracePromoteToTenured(Cell *src, Cell *dst) {}
 inline void TraceMinorGCEnd() {}
 inline void TraceMajorGCStart() {}
 inline void TraceTenuredFinalize(Cell *thing) {}
 inline void TraceMajorGCEnd() {}
-inline void TraceTypeNewScript(js::types::TypeObject *type) {}
+inline void TraceTypeNewScript(js::types::ObjectGroup *group) {}
 
 #endif
 
 } /* namespace gc */
 } /* namespace js */
 
 #endif
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -89,55 +89,55 @@ enum AllocKind {
     FINALIZE_OBJECT16,
     FINALIZE_OBJECT16_BACKGROUND,
     FINALIZE_OBJECT_LAST = FINALIZE_OBJECT16_BACKGROUND,
     FINALIZE_SCRIPT,
     FINALIZE_LAZY_SCRIPT,
     FINALIZE_SHAPE,
     FINALIZE_ACCESSOR_SHAPE,
     FINALIZE_BASE_SHAPE,
-    FINALIZE_TYPE_OBJECT,
+    FINALIZE_OBJECT_GROUP,
     FINALIZE_FAT_INLINE_STRING,
     FINALIZE_STRING,
     FINALIZE_EXTERNAL_STRING,
     FINALIZE_SYMBOL,
     FINALIZE_JITCODE,
     FINALIZE_LAST = FINALIZE_JITCODE
 };
 
 static const unsigned FINALIZE_LIMIT = FINALIZE_LAST + 1;
 static const unsigned FINALIZE_OBJECT_LIMIT = FINALIZE_OBJECT_LAST + 1;
 
 static inline JSGCTraceKind
 MapAllocToTraceKind(AllocKind kind)
 {
     static const JSGCTraceKind map[] = {
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT0 */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT0_BACKGROUND */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT2 */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT2_BACKGROUND */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT4 */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT4_BACKGROUND */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT8 */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT8_BACKGROUND */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT12 */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT12_BACKGROUND */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT16 */
-        JSTRACE_OBJECT,     /* FINALIZE_OBJECT16_BACKGROUND */
-        JSTRACE_SCRIPT,     /* FINALIZE_SCRIPT */
-        JSTRACE_LAZY_SCRIPT,/* FINALIZE_LAZY_SCRIPT */
-        JSTRACE_SHAPE,      /* FINALIZE_SHAPE */
-        JSTRACE_SHAPE,      /* FINALIZE_ACCESSOR_SHAPE */
-        JSTRACE_BASE_SHAPE, /* FINALIZE_BASE_SHAPE */
-        JSTRACE_TYPE_OBJECT,/* FINALIZE_TYPE_OBJECT */
-        JSTRACE_STRING,     /* FINALIZE_FAT_INLINE_STRING */
-        JSTRACE_STRING,     /* FINALIZE_STRING */
-        JSTRACE_STRING,     /* FINALIZE_EXTERNAL_STRING */
-        JSTRACE_SYMBOL,     /* FINALIZE_SYMBOL */
-        JSTRACE_JITCODE,    /* FINALIZE_JITCODE */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT0 */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT0_BACKGROUND */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT2 */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT2_BACKGROUND */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT4 */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT4_BACKGROUND */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT8 */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT8_BACKGROUND */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT12 */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT12_BACKGROUND */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT16 */
+        JSTRACE_OBJECT,       /* FINALIZE_OBJECT16_BACKGROUND */
+        JSTRACE_SCRIPT,       /* FINALIZE_SCRIPT */
+        JSTRACE_LAZY_SCRIPT,  /* FINALIZE_LAZY_SCRIPT */
+        JSTRACE_SHAPE,        /* FINALIZE_SHAPE */
+        JSTRACE_SHAPE,        /* FINALIZE_ACCESSOR_SHAPE */
+        JSTRACE_BASE_SHAPE,   /* FINALIZE_BASE_SHAPE */
+        JSTRACE_OBJECT_GROUP, /* FINALIZE_OBJECT_GROUP */
+        JSTRACE_STRING,       /* FINALIZE_FAT_INLINE_STRING */
+        JSTRACE_STRING,       /* FINALIZE_STRING */
+        JSTRACE_STRING,       /* FINALIZE_EXTERNAL_STRING */
+        JSTRACE_SYMBOL,       /* FINALIZE_SYMBOL */
+        JSTRACE_JITCODE,      /* FINALIZE_JITCODE */
     };
 
     static_assert(MOZ_ARRAY_LENGTH(map) == FINALIZE_LIMIT,
                   "AllocKind-to-TraceKind mapping must be in sync");
     return map[kind];
 }
 
 /*
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -82,28 +82,28 @@ PushMarkStack(GCMarker *gcmarker, Shape 
 
 static inline void
 PushMarkStack(GCMarker *gcmarker, JSString *str);
 
 static inline void
 PushMarkStack(GCMarker *gcmarker, JS::Symbol *sym);
 
 static inline void
-PushMarkStack(GCMarker *gcmarker, types::TypeObject *thing);
+PushMarkStack(GCMarker *gcmarker, types::ObjectGroup *thing);
 
 namespace js {
 namespace gc {
 
 static void MarkChildren(JSTracer *trc, JSString *str);
 static void MarkChildren(JSTracer *trc, JS::Symbol *sym);
 static void MarkChildren(JSTracer *trc, JSScript *script);
 static void MarkChildren(JSTracer *trc, LazyScript *lazy);
 static void MarkChildren(JSTracer *trc, Shape *shape);
 static void MarkChildren(JSTracer *trc, BaseShape *base);
-static void MarkChildren(JSTracer *trc, types::TypeObject *type);
+static void MarkChildren(JSTracer *trc, types::ObjectGroup *group);
 static void MarkChildren(JSTracer *trc, jit::JitCode *code);
 
 } /* namespace gc */
 } /* namespace js */
 
 /*** Object Marking ***/
 
 #if defined(DEBUG)
@@ -609,17 +609,17 @@ DeclMarkerImpl(Script, JSScript)
 DeclMarkerImpl(LazyScript, LazyScript)
 DeclMarkerImpl(Shape, Shape)
 DeclMarkerImpl(String, JSAtom)
 DeclMarkerImpl(String, JSString)
 DeclMarkerImpl(String, JSFlatString)
 DeclMarkerImpl(String, JSLinearString)
 DeclMarkerImpl(String, PropertyName)
 DeclMarkerImpl(Symbol, JS::Symbol)
-DeclMarkerImpl(TypeObject, js::types::TypeObject)
+DeclMarkerImpl(ObjectGroup, js::types::ObjectGroup)
 
 } /* namespace gc */
 } /* namespace js */
 
 /*** Externally Typed Marking ***/
 
 void
 gc::MarkKind(JSTracer *trc, void **thingp, JSGCTraceKind kind)
@@ -649,18 +649,18 @@ gc::MarkKind(JSTracer *trc, void **thing
         MarkInternal(trc, reinterpret_cast<jit::JitCode **>(thingp));
         break;
       case JSTRACE_LAZY_SCRIPT:
         MarkInternal(trc, reinterpret_cast<LazyScript **>(thingp));
         break;
       case JSTRACE_SHAPE:
         MarkInternal(trc, reinterpret_cast<Shape **>(thingp));
         break;
-      case JSTRACE_TYPE_OBJECT:
-        MarkInternal(trc, reinterpret_cast<types::TypeObject **>(thingp));
+      case JSTRACE_OBJECT_GROUP:
+        MarkInternal(trc, reinterpret_cast<types::ObjectGroup **>(thingp));
         break;
       default:
         MOZ_CRASH("Invalid trace kind in MarkKind.");
     }
 }
 
 static void
 MarkGCThingInternal(JSTracer *trc, void **thingp, const char *name)
@@ -786,24 +786,24 @@ gc::MarkValueRoot(JSTracer *trc, Value *
     MarkValueInternal(trc, v);
 }
 
 void
 gc::MarkTypeRoot(JSTracer *trc, types::Type *v, const char *name)
 {
     JS_ROOT_MARKING_ASSERT(trc);
     trc->setTracingName(name);
-    if (v->isSingleObject()) {
-        JSObject *obj = v->singleObject();
+    if (v->isSingleton()) {
+        JSObject *obj = v->singleton();
         MarkInternal(trc, &obj);
         *v = types::Type::ObjectType(obj);
-    } else if (v->isTypeObject()) {
-        types::TypeObject *typeObj = v->typeObject();
-        MarkInternal(trc, &typeObj);
-        *v = types::Type::ObjectType(typeObj);
+    } else if (v->isGroup()) {
+        types::ObjectGroup *group = v->group();
+        MarkInternal(trc, &group);
+        *v = types::Type::ObjectType(group);
     }
 }
 
 void
 gc::MarkValueRange(JSTracer *trc, size_t len, BarrieredBase<Value> *vec, const char *name)
 {
     for (size_t i = 0; i < len; ++i) {
         trc->setTracingIndex(name, i);
@@ -1061,17 +1061,17 @@ PushMarkStack(GCMarker *gcmarker, JSFunc
     JS_COMPARTMENT_ASSERT(gcmarker->runtime(), thing);
     MOZ_ASSERT(!IsInsideNursery(thing));
 
     if (thing->asTenured().markIfUnmarked(gcmarker->getMarkColor()))
         gcmarker->pushObject(thing);
 }
 
 static void
-PushMarkStack(GCMarker *gcmarker, types::TypeObject *thing)
+PushMarkStack(GCMarker *gcmarker, types::ObjectGroup *thing)
 {
     JS_COMPARTMENT_ASSERT(gcmarker->runtime(), thing);
     MOZ_ASSERT(!IsInsideNursery(thing));
 
     if (thing->markIfUnmarked(gcmarker->getMarkColor()))
         gcmarker->pushType(thing);
 }
 
@@ -1417,73 +1417,73 @@ gc::MarkCycleCollectorChildren(JSTracer 
             MOZ_ASSERT(tmp == shape->setterObject());
         }
 
         shape = shape->previous();
     } while (shape);
 }
 
 static void
-ScanTypeObject(GCMarker *gcmarker, types::TypeObject *type)
+ScanObjectGroup(GCMarker *gcmarker, types::ObjectGroup *group)
 {
-    unsigned count = type->getPropertyCount();
+    unsigned count = group->getPropertyCount();
     for (unsigned i = 0; i < count; i++) {
-        if (types::Property *prop = type->getProperty(i))
-            MarkId(gcmarker, &prop->id, "TypeObject property id");
+        if (types::Property *prop = group->getProperty(i))
+            MarkId(gcmarker, &prop->id, "ObjectGroup property id");
     }
 
-    if (type->proto().isObject())
-        PushMarkStack(gcmarker, type->proto().toObject());
+    if (group->proto().isObject())
+        PushMarkStack(gcmarker, group->proto().toObject());
 
-    if (type->singleton() && !type->lazy())
-        PushMarkStack(gcmarker, type->singleton());
+    if (group->singleton() && !group->lazy())
+        PushMarkStack(gcmarker, group->singleton());
 
-    if (type->newScript())
-        type->newScript()->trace(gcmarker);
+    if (group->newScript())
+        group->newScript()->trace(gcmarker);
 
-    if (type->maybeUnboxedLayout())
-        type->unboxedLayout().trace(gcmarker);
+    if (group->maybeUnboxedLayout())
+        group->unboxedLayout().trace(gcmarker);
 
-    if (TypeDescr *descr = type->maybeTypeDescr())
+    if (TypeDescr *descr = group->maybeTypeDescr())
         PushMarkStack(gcmarker, descr);
 
-    if (JSFunction *fun = type->maybeInterpretedFunction())
+    if (JSFunction *fun = group->maybeInterpretedFunction())
         PushMarkStack(gcmarker, fun);
 }
 
 static void
-gc::MarkChildren(JSTracer *trc, types::TypeObject *type)
+gc::MarkChildren(JSTracer *trc, types::ObjectGroup *group)
 {
-    unsigned count = type->getPropertyCount();
+    unsigned count = group->getPropertyCount();
     for (unsigned i = 0; i < count; i++) {
-        types::Property *prop = type->getProperty(i);
+        types::Property *prop = group->getProperty(i);
         if (prop)
-            MarkId(trc, &prop->id, "type_prop");
+            MarkId(trc, &prop->id, "group_property");
     }
 
-    if (type->proto().isObject())
-        MarkObject(trc, &type->protoRaw(), "type_proto");
+    if (group->proto().isObject())
+        MarkObject(trc, &group->protoRaw(), "group_proto");
 
-    if (type->singleton() && !type->lazy())
-        MarkObject(trc, &type->singletonRaw(), "type_singleton");
+    if (group->singleton() && !group->lazy())
+        MarkObject(trc, &group->singletonRaw(), "group_singleton");
 
-    if (type->newScript())
-        type->newScript()->trace(trc);
+    if (group->newScript())
+        group->newScript()->trace(trc);
 
-    if (type->maybeUnboxedLayout())
-        type->unboxedLayout().trace(trc);
+    if (group->maybeUnboxedLayout())
+        group->unboxedLayout().trace(trc);
 
-    if (JSObject *descr = type->maybeTypeDescr()) {
-        MarkObjectUnbarriered(trc, &descr, "type_descr");
-        type->setTypeDescr(&descr->as<TypeDescr>());
+    if (JSObject *descr = group->maybeTypeDescr()) {
+        MarkObjectUnbarriered(trc, &descr, "group_type_descr");
+        group->setTypeDescr(&descr->as<TypeDescr>());
     }
 
-    if (JSObject *fun = type->maybeInterpretedFunction()) {
-        MarkObjectUnbarriered(trc, &fun, "type_function");
-        type->setInterpretedFunction(&fun->as<JSFunction>());
+    if (JSObject *fun = group->maybeInterpretedFunction()) {
+        MarkObjectUnbarriered(trc, &fun, "group_function");
+        group->setInterpretedFunction(&fun->as<JSFunction>());
     }
 }
 
 static void
 gc::MarkChildren(JSTracer *trc, jit::JitCode *code)
 {
     code->trace(trc);
 }
@@ -1527,18 +1527,18 @@ gc::PushArena(GCMarker *gcmarker, ArenaH
       case JSTRACE_LAZY_SCRIPT:
         PushArenaTyped<LazyScript>(gcmarker, aheader);
         break;
 
       case JSTRACE_SHAPE:
         PushArenaTyped<js::Shape>(gcmarker, aheader);
         break;
 
-      case JSTRACE_TYPE_OBJECT:
-        PushArenaTyped<js::types::TypeObject>(gcmarker, aheader);
+      case JSTRACE_OBJECT_GROUP:
+        PushArenaTyped<js::types::ObjectGroup>(gcmarker, aheader);
         break;
 
       default:
         MOZ_CRASH("Invalid trace kind in PushArena.");
     }
 }
 
 struct SlotArrayLayout
@@ -1646,18 +1646,18 @@ GCMarker::restoreValueArray(NativeObject
 
     MOZ_ASSERT(*vpp <= *endp);
     return true;
 }
 
 void
 GCMarker::processMarkStackOther(uintptr_t tag, uintptr_t addr)
 {
-    if (tag == TypeTag) {
-        ScanTypeObject(this, reinterpret_cast<types::TypeObject *>(addr));
+    if (tag == GroupTag) {
+        ScanObjectGroup(this, reinterpret_cast<types::ObjectGroup *>(addr));
     } else if (tag == SavedValueArrayTag) {
         MOZ_ASSERT(!(addr & CellMask));
         NativeObject *obj = reinterpret_cast<NativeObject *>(addr);
         HeapValue *vp, *end;
         if (restoreValueArray(obj, (void **)&vp, (void **)&end))
             pushValueArray(obj, vp, end);
         else
             pushObject(obj);
@@ -1791,24 +1791,24 @@ GCMarker::processMarkStackTop(SliceBudge
         JS_COMPARTMENT_ASSERT(runtime(), obj);
 
         budget.step();
         if (budget.isOverBudget()) {
             pushObject(obj);
             return;
         }
 
-        types::TypeObject *type = obj->typeFromGC();
-        PushMarkStack(this, type);
+        types::ObjectGroup *group = obj->groupFromGC();
+        PushMarkStack(this, group);
 
         Shape *shape = obj->lastProperty();
         PushMarkStack(this, shape);
 
         /* Call the trace hook if necessary. */
-        const Class *clasp = type->clasp();
+        const Class *clasp = group->clasp();
         if (clasp->trace) {
             // Global objects all have the same trace hook. That hook is safe without barriers
             // if the global has no custom trace hook of its own, or has been moved to a different
             // compartment, and so can't have one.
             MOZ_ASSERT_IF(!(clasp->trace == JS_GlobalObjectTraceHook &&
                             (!obj->compartment()->options().getTrace() || !obj->isOwnGlobal())),
                           clasp->flags & JSCLASS_IMPLEMENTS_BARRIERS);
             if (clasp->trace == InlineTypedObject::obj_trace) {
@@ -1945,18 +1945,18 @@ js::TraceChildren(JSTracer *trc, void *t
       case JSTRACE_LAZY_SCRIPT:
         MarkChildren(trc, static_cast<LazyScript *>(thing));
         break;
 
       case JSTRACE_SHAPE:
         MarkChildren(trc, static_cast<Shape *>(thing));
         break;
 
-      case JSTRACE_TYPE_OBJECT:
-        MarkChildren(trc, (types::TypeObject *)thing);
+      case JSTRACE_OBJECT_GROUP:
+        MarkChildren(trc, (types::ObjectGroup *)thing);
         break;
 
       default:
         MOZ_CRASH("Invalid trace kind in TraceChildren.");
     }
 }
 
 #ifdef DEBUG
--- a/js/src/gc/Marking.h
+++ b/js/src/gc/Marking.h
@@ -127,17 +127,17 @@ DeclMarker(Script, JSScript)
 DeclMarker(LazyScript, LazyScript)
 DeclMarker(Shape, Shape)
 DeclMarker(String, JSAtom)
 DeclMarker(String, JSString)
 DeclMarker(String, JSFlatString)
 DeclMarker(String, JSLinearString)
 DeclMarker(String, PropertyName)
 DeclMarker(Symbol, JS::Symbol)
-DeclMarker(TypeObject, types::TypeObject)
+DeclMarker(ObjectGroup, types::ObjectGroup)
 
 #undef DeclMarker
 
 void
 MarkPermanentAtom(JSTracer *trc, JSAtom *atom, const char *name);
 
 void
 MarkWellKnownSymbol(JSTracer *trc, JS::Symbol *sym);
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -159,20 +159,20 @@ js::Nursery::verifyFinalizerList()
 {
 #ifdef DEBUG
     for (ListItem *current = finalizers_; current; current = current->next()) {
         JSObject *obj = current->get();
         RelocationOverlay *overlay = RelocationOverlay::fromCell(obj);
         if (overlay->isForwarded())
             obj = static_cast<JSObject *>(overlay->forwardingAddress());
         MOZ_ASSERT(obj);
-        MOZ_ASSERT(obj->type());
-        MOZ_ASSERT(obj->type()->clasp());
-        MOZ_ASSERT(obj->type()->clasp()->finalize);
-        MOZ_ASSERT(obj->type()->clasp()->flags & JSCLASS_FINALIZE_FROM_NURSERY);
+        MOZ_ASSERT(obj->group());
+        MOZ_ASSERT(obj->group()->clasp());
+        MOZ_ASSERT(obj->group()->clasp()->finalize);
+        MOZ_ASSERT(obj->group()->clasp()->flags & JSCLASS_FINALIZE_FROM_NURSERY);
     }
 #endif // DEBUG
 }
 
 JSObject *
 js::Nursery::allocateObject(JSContext *cx, size_t size, size_t numDynamic, const js::Class *clasp)
 {
     /* Ensure there's enough space to replace the contents with a RelocationOverlay. */
@@ -528,50 +528,50 @@ js::Nursery::forwardBufferPointer(HeapSl
 
         *pSlotsElems = *reinterpret_cast<HeapSlot **>(old);
     } while (false);
 
     MOZ_ASSERT(!isInside(*pSlotsElems));
     MOZ_ASSERT(IsWriteableAddress(*pSlotsElems));
 }
 
-// Structure for counting how many times objects of a particular type have been
-// tenured during a minor collection.
+// Structure for counting how many times objects in a particular group have
+// been tenured during a minor collection.
 struct TenureCount
 {
-    types::TypeObject *type;
+    types::ObjectGroup *group;
     int count;
 };
 
-// Keep rough track of how many times we tenure objects of particular types
+// Keep rough track of how many times we tenure objects in particular groups
 // during minor collections, using a fixed size hash for efficiency at the cost
 // of potential collisions.
 struct Nursery::TenureCountCache
 {
     TenureCount entries[16];
 
     TenureCountCache() { PodZero(this); }
 
-    TenureCount &findEntry(types::TypeObject *type) {
-        return entries[PointerHasher<types::TypeObject *, 3>::hash(type) % ArrayLength(entries)];
+    TenureCount &findEntry(types::ObjectGroup *group) {
+        return entries[PointerHasher<types::ObjectGroup *, 3>::hash(group) % ArrayLength(entries)];
     }
 };
 
 void
 js::Nursery::collectToFixedPoint(MinorCollectionTracer *trc, TenureCountCache &tenureCounts)
 {
     for (RelocationOverlay *p = trc->head; p; p = p->next()) {
         JSObject *obj = static_cast<JSObject*>(p->forwardingAddress());
         traceObject(trc, obj);
 
-        TenureCount &entry = tenureCounts.findEntry(obj->type());
-        if (entry.type == obj->type()) {
+        TenureCount &entry = tenureCounts.findEntry(obj->group());
+        if (entry.group == obj->group()) {
             entry.count++;
-        } else if (!entry.type) {
-            entry.type = obj->type();
+        } else if (!entry.group) {
+            entry.group = obj->group();
             entry.count = 1;
         }
     }
 }
 
 MOZ_ALWAYS_INLINE void
 js::Nursery::traceObject(MinorCollectionTracer *trc, JSObject *obj)
 {
@@ -760,17 +760,17 @@ js::Nursery::MinorGCCallback(JSTracer *j
         *thingp = trc->nursery->moveToTenured(trc, static_cast<JSObject *>(*thingp));
 }
 
 #define TIME_START(name) int64_t timstampStart_##name = enableProfiling_ ? PRMJ_Now() : 0
 #define TIME_END(name) int64_t timstampEnd_##name = enableProfiling_ ? PRMJ_Now() : 0
 #define TIME_TOTAL(name) (timstampEnd_##name - timstampStart_##name)
 
 void
-js::Nursery::collect(JSRuntime *rt, JS::gcreason::Reason reason, TypeObjectList *pretenureTypes)
+js::Nursery::collect(JSRuntime *rt, JS::gcreason::Reason reason, ObjectGroupList *pretenureGroups)
 {
     if (rt->mainThread.suppressGC)
         return;
 
     JS_AbortIfWrongThread(rt);
 
     StoreBuffer &sb = rt->gc.storeBuffer;
     if (!isEnabled() || isEmpty()) {
@@ -892,24 +892,24 @@ js::Nursery::collect(JSRuntime *rt, JS::
     if (promotionRate > 0.05)
         growAllocableSpace();
     else if (promotionRate < 0.01)
         shrinkAllocableSpace();
     TIME_END(resize);
 
     // If we are promoting the nursery, or exhausted the store buffer with
     // pointers to nursery things, which will force a collection well before
-    // the nursery is full, look for object types that are getting promoted
+    // the nursery is full, look for object groups that are getting promoted
     // excessively and try to pretenure them.
     TIME_START(pretenure);
-    if (pretenureTypes && (promotionRate > 0.8 || reason == JS::gcreason::FULL_STORE_BUFFER)) {
+    if (pretenureGroups && (promotionRate > 0.8 || reason == JS::gcreason::FULL_STORE_BUFFER)) {
         for (size_t i = 0; i < ArrayLength(tenureCounts.entries); i++) {
             const TenureCount &entry = tenureCounts.entries[i];
             if (entry.count >= 3000)
-                pretenureTypes->append(entry.type); // ignore alloc failure
+                pretenureGroups->append(entry.group); // ignore alloc failure
         }
     }
     TIME_END(pretenure);
 
     // We ignore gcMaxBytes when allocating for minor collection. However, if we
     // overflowed, we disable the nursery. The next time we allocate, we'll fail
     // because gcBytes >= gcMaxBytes.
     if (rt->gc.usage.gcBytes() >= rt->gc.tunables.gcMaxBytes())
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -35,17 +35,17 @@ void SetGCZeal(JSRuntime *, uint8_t, uin
 
 namespace gc {
 struct Cell;
 class Collector;
 class MinorCollectionTracer;
 } /* namespace gc */
 
 namespace types {
-struct TypeObject;
+struct ObjectGroup;
 }
 
 namespace jit {
 class CodeGenerator;
 class MacroAssembler;
 class ICStubCompiler;
 class BaselineCompiler;
 }
@@ -112,23 +112,23 @@ class Nursery
 
     /* Resize an existing elements vector. */
     ObjectElements *reallocateElements(JSObject *obj, ObjectElements *oldHeader,
                                        uint32_t oldCount, uint32_t newCount);
 
     /* Free a slots array. */
     void freeSlots(HeapSlot *slots);
 
-    typedef Vector<types::TypeObject *, 0, SystemAllocPolicy> TypeObjectList;
+    typedef Vector<types::ObjectGroup *, 0, SystemAllocPolicy> ObjectGroupList;
 
     /*
-     * Do a minor collection, optionally specifying a list to store types which
+     * Do a minor collection, optionally specifying a list to store groups which
      * should be pretenured afterwards.
      */
-    void collect(JSRuntime *rt, JS::gcreason::Reason reason, TypeObjectList *pretenureTypes);
+    void collect(JSRuntime *rt, JS::gcreason::Reason reason, ObjectGroupList *pretenureGroups);
 
     /*
      * Check if the thing at |*ref| in the Nursery has been forwarded. If so,
      * sets |*ref| to the new location of the object and returns true. Otherwise
      * returns false and leaves |*ref| unset.
      */
     template <typename T>
     MOZ_ALWAYS_INLINE bool getForwardedPointer(T **ref);
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -98,18 +98,18 @@ MarkExactStackRootList(JSTracer *trc, So
 
 template<class T>
 static void
 MarkExactStackRootsAcrossTypes(T context, JSTracer *trc)
 {
     MarkExactStackRootList<JSObject *, MarkObjectRoot>(trc, context, "exact-object");
     MarkExactStackRootList<Shape *, MarkShapeRoot>(trc, context, "exact-shape");
     MarkExactStackRootList<BaseShape *, MarkBaseShapeRoot>(trc, context, "exact-baseshape");
-    MarkExactStackRootList<types::TypeObject *, MarkTypeObjectRoot>(
-        trc, context, "exact-typeobject");
+    MarkExactStackRootList<types::ObjectGroup *, MarkObjectGroupRoot>(
+        trc, context, "exact-objectgroup");
     MarkExactStackRootList<JSString *, MarkStringRoot>(trc, context, "exact-string");
     MarkExactStackRootList<JS::Symbol *, MarkSymbolRoot>(trc, context, "exact-symbol");
     MarkExactStackRootList<jit::JitCode *, MarkJitCodeRoot>(trc, context, "exact-jitcode");
     MarkExactStackRootList<JSScript *, MarkScriptRoot>(trc, context, "exact-script");
     MarkExactStackRootList<LazyScript *, MarkLazyScriptRoot>(trc, context, "exact-lazy-script");
     MarkExactStackRootList<jsid, MarkIdRoot>(trc, context, "exact-id");
     MarkExactStackRootList<Value, MarkValueRoot>(trc, context, "exact-value");
     MarkExactStackRootList<types::Type, MarkTypeRoot>(trc, context, "types::Type");
--- a/js/src/gc/Rooting.h
+++ b/js/src/gc/Rooting.h
@@ -16,37 +16,37 @@ namespace js {
 
 class PropertyName;
 class NativeObject;
 class ArrayObject;
 class PlainObject;
 class ScriptSourceObject;
 class Shape;
 
-namespace types { struct TypeObject; }
+namespace types { struct ObjectGroup; }
 
 // These are internal counterparts to the public types such as HandleObject.
 
 typedef JS::Handle<NativeObject*>      HandleNativeObject;
 typedef JS::Handle<Shape*>             HandleShape;
-typedef JS::Handle<types::TypeObject*> HandleTypeObject;
+typedef JS::Handle<types::ObjectGroup*> HandleObjectGroup;
 typedef JS::Handle<JSAtom*>            HandleAtom;
 typedef JS::Handle<JSLinearString*>    HandleLinearString;
 typedef JS::Handle<PropertyName*>      HandlePropertyName;
 typedef JS::Handle<ArrayObject*>       HandleArrayObject;
 typedef JS::Handle<PlainObject*>       HandlePlainObject;
 typedef JS::Handle<ScriptSourceObject*> HandleScriptSource;
 
 typedef JS::MutableHandle<Shape*>      MutableHandleShape;
 typedef JS::MutableHandle<JSAtom*>     MutableHandleAtom;
 typedef JS::MutableHandle<NativeObject*> MutableHandleNativeObject;
 
 typedef JS::Rooted<NativeObject*>      RootedNativeObject;
 typedef JS::Rooted<Shape*>             RootedShape;
-typedef JS::Rooted<types::TypeObject*> RootedTypeObject;
+typedef JS::Rooted<types::ObjectGroup*> RootedObjectGroup;
 typedef JS::Rooted<JSAtom*>            RootedAtom;
 typedef JS::Rooted<JSLinearString*>    RootedLinearString;
 typedef JS::Rooted<PropertyName*>      RootedPropertyName;
 typedef JS::Rooted<ArrayObject*>       RootedArrayObject;
 typedef JS::Rooted<PlainObject*>       RootedPlainObject;
 typedef JS::Rooted<ScriptSourceObject*> RootedScriptSource;
 
 } /* namespace js */
--- a/js/src/gc/Tracer.cpp
+++ b/js/src/gc/Tracer.cpp
@@ -228,18 +228,18 @@ JS_GetTraceThingInfo(char *buf, size_t b
       case JSTRACE_LAZY_SCRIPT:
         name = "lazyscript";
         break;
 
       case JSTRACE_SHAPE:
         name = "shape";
         break;
 
-      case JSTRACE_TYPE_OBJECT:
-        name = "type_object";
+      case JSTRACE_OBJECT_GROUP:
+        name = "object_group";
         break;
 
       default:
         name = "INVALID";
         break;
     }
 
     n = strlen(name);
--- a/js/src/gc/Tracer.h
+++ b/js/src/gc/Tracer.h
@@ -18,17 +18,17 @@ class NativeObject;
 class GCMarker;
 namespace gc {
 struct ArenaHeader;
 }
 namespace jit {
 class JitCode;
 }
 namespace types {
-struct TypeObject;
+struct ObjectGroup;
 }
 
 static const size_t NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY = 4096;
 static const size_t INCREMENTAL_MARK_STACK_BASE_CAPACITY = 32768;
 
 /*
  * When the native stack is low, the GC does not call JS_TraceChildren to mark
  * the reachable "children" of the thing. Rather the thing is put aside and
@@ -138,18 +138,18 @@ class GCMarker : public JSTracer
     void start();
     void stop();
     void reset();
 
     void pushObject(JSObject *obj) {
         pushTaggedPtr(ObjectTag, obj);
     }
 
-    void pushType(types::TypeObject *type) {
-        pushTaggedPtr(TypeTag, type);
+    void pushType(types::ObjectGroup *group) {
+        pushTaggedPtr(GroupTag, group);
     }
 
     void pushJitCode(jit::JitCode *code) {
         pushTaggedPtr(JitCodeTag, code);
     }
 
     uint32_t getMarkColor() const {
         return color;
@@ -226,17 +226,17 @@ class GCMarker : public JSTracer
     /*
      * We use a common mark stack to mark GC things of different types and use
      * the explicit tags to distinguish them when it cannot be deduced from
      * the context of push or pop operation.
      */
     enum StackTag {
         ValueArrayTag,
         ObjectTag,
-        TypeTag,
+        GroupTag,
         XmlTag,
         SavedValueArrayTag,
         JitCodeTag,
         LastTag = JitCodeTag
     };
 
     static const uintptr_t StackTagMask = 7;
     static_assert(StackTagMask >= uintptr_t(LastTag), "The tag mask must subsume the tags.");
--- a/js/src/jit-test/tests/heap-analysis/findPath.js
+++ b/js/src/jit-test/tests/heap-analysis/findPath.js
@@ -16,18 +16,18 @@ print(uneval(findPath(o, o.w.x.y.z)));
 var a = [ , o ];
 Match.Pattern([{node: {}, edge: "objectElements[1]"}])
   .assert(findPath(a, o));
 print(uneval(findPath(a, o)));
 
 function C() {}
 C.prototype.obj = {};
 var c = new C;
-Match.Pattern([{node: {}, edge: "type"},
-               {node: Match.Pattern.ANY, edge: "type_proto"},
+Match.Pattern([{node: {}, edge: "group"},
+               {node: Match.Pattern.ANY, edge: "group_proto"},
                {node: { constructor: Match.Pattern.ANY }, edge: "obj"}])
   .assert(findPath(c, c.obj));
 print(uneval(findPath(c, c.obj)));
 
 function f(x) { return function g(y) { return x+y; }; }
 var o = {}
 var gc = f(o);
 Match.Pattern([{node: gc, edge: "fun_environment"},
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -1716,31 +1716,31 @@ BaselineCompiler::emit_JSOP_LINENO()
 }
 
 bool
 BaselineCompiler::emit_JSOP_NEWARRAY()
 {
     frame.syncStack(0);
 
     uint32_t length = GET_UINT24(pc);
-    RootedTypeObject type(cx);
-    if (!types::UseNewTypeForInitializer(script, pc, JSProto_Array)) {
-        type = types::TypeScript::InitObject(cx, script, pc, JSProto_Array);
-        if (!type)
+    RootedObjectGroup group(cx);
+    if (!types::UseSingletonForInitializer(script, pc, JSProto_Array)) {
+        group = types::TypeScript::InitGroup(cx, script, pc, JSProto_Array);
+        if (!group)
             return false;
     }
 
-    // Pass length in R0, type in R1.
+    // Pass length in R0, group in R1.
     masm.move32(Imm32(length), R0.scratchReg());
-    masm.movePtr(ImmGCPtr(type), R1.scratchReg());
+    masm.movePtr(ImmGCPtr(group), R1.scratchReg());
 
     ArrayObject *templateObject = NewDenseUnallocatedArray(cx, length, nullptr, TenuredObject);
     if (!templateObject)
         return false;
-    templateObject->setType(type);
+    templateObject->setGroup(group);
 
     ICNewArray_Fallback::Compiler stubCompiler(cx, templateObject);
     if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
         return false;
 
     frame.push(R0);
     return true;
 }
@@ -1791,43 +1791,43 @@ BaselineCompiler::emit_JSOP_INITELEM_ARR
     return true;
 }
 
 bool
 BaselineCompiler::emit_JSOP_NEWOBJECT()
 {
     frame.syncStack(0);
 
-    RootedTypeObject type(cx);
-    if (!types::UseNewTypeForInitializer(script, pc, JSProto_Object)) {
-        type = types::TypeScript::InitObject(cx, script, pc, JSProto_Object);
-        if (!type)
+    RootedObjectGroup group(cx);
+    if (!types::UseSingletonForInitializer(script, pc, JSProto_Object)) {
+        group = types::TypeScript::InitGroup(cx, script, pc, JSProto_Object);
+        if (!group)
             return false;
     }
 
     RootedPlainObject baseObject(cx, &script->getObject(pc)->as<PlainObject>());
     RootedPlainObject templateObject(cx, CopyInitializerObject(cx, baseObject, TenuredObject));
     if (!templateObject)
         return false;
 
-    if (type) {
-        templateObject->setType(type);
+    if (group) {
+        templateObject->setGroup(group);
     } else {
-        if (!JSObject::setSingletonType(cx, templateObject))
+        if (!JSObject::setSingleton(cx, templateObject))
             return false;
     }
 
     // Try to do the allocation inline.
     Label done;
-    if (type && !type->shouldPreTenure() && !templateObject->hasDynamicSlots()) {
+    if (group && !group->shouldPreTenure() && !templateObject->hasDynamicSlots()) {
         Label slowPath;
         Register objReg = R0.scratchReg();
         Register tempReg = R1.scratchReg();
-        masm.movePtr(ImmGCPtr(type), tempReg);
-        masm.branchTest32(Assembler::NonZero, Address(tempReg, types::TypeObject::offsetOfFlags()),
+        masm.movePtr(ImmGCPtr(group), tempReg);
+        masm.branchTest32(Assembler::NonZero, Address(tempReg, types::ObjectGroup::offsetOfFlags()),
                           Imm32(types::OBJECT_FLAG_PRE_TENURE), &slowPath);
         masm.branchPtr(Assembler::NotEqual, AbsoluteAddress(cx->compartment()->addressOfMetadataCallback()),
                       ImmWord(0), &slowPath);
         masm.createGCObject(objReg, tempReg, templateObject, gc::DefaultHeap, &slowPath);
         masm.tagValue(JSVAL_TYPE_OBJECT, objReg, R0);
         masm.jump(&done);
         masm.bind(&slowPath);
     }
@@ -1842,48 +1842,48 @@ BaselineCompiler::emit_JSOP_NEWOBJECT()
 }
 
 bool
 BaselineCompiler::emit_JSOP_NEWINIT()
 {
     frame.syncStack(0);
     JSProtoKey key = JSProtoKey(GET_UINT8(pc));
 
-    RootedTypeObject type(cx);
-    if (!types::UseNewTypeForInitializer(script, pc, key)) {
-        type = types::TypeScript::InitObject(cx, script, pc, key);
-        if (!type)
+    RootedObjectGroup group(cx);
+    if (!types::UseSingletonForInitializer(script, pc, key)) {
+        group = types::TypeScript::InitGroup(cx, script, pc, key);
+        if (!group)
             return false;
     }
 
     if (key == JSProto_Array) {
-        // Pass length in R0, type in R1.
+        // Pass length in R0, group in R1.
         masm.move32(Imm32(0), R0.scratchReg());
-        masm.movePtr(ImmGCPtr(type), R1.scratchReg());
+        masm.movePtr(ImmGCPtr(group), R1.scratchReg());
 
         ArrayObject *templateObject = NewDenseUnallocatedArray(cx, 0, nullptr, TenuredObject);
         if (!templateObject)
             return false;
-        templateObject->setType(type);
+        templateObject->setGroup(group);
 
         ICNewArray_Fallback::Compiler stubCompiler(cx, templateObject);
         if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
             return false;
     } else {
         MOZ_ASSERT(key == JSProto_Object);
 
         RootedPlainObject templateObject(cx,
             NewBuiltinClassInstance<PlainObject>(cx, TenuredObject));
         if (!templateObject)
             return false;
 
-        if (type) {
-            templateObject->setType(type);
+        if (group) {
+            templateObject->setGroup(group);
         } else {
-            if (!JSObject::setSingletonType(cx, templateObject))
+            if (!JSObject::setSingleton(cx, templateObject))
                 return false;
         }
 
         ICNewObject_Fallback::Compiler stubCompiler(cx, templateObject);
         if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
             return false;
     }
 
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -249,22 +249,22 @@ ICStub::trace(JSTracer *trc)
       case ICStub::GetElem_TypedArray: {
         ICGetElem_TypedArray *getElemStub = toGetElem_TypedArray();
         MarkShape(trc, &getElemStub->shape(), "baseline-getelem-typedarray-shape");
         break;
       }
       case ICStub::SetElem_Dense: {
         ICSetElem_Dense *setElemStub = toSetElem_Dense();
         MarkShape(trc, &setElemStub->shape(), "baseline-getelem-dense-shape");
-        MarkTypeObject(trc, &setElemStub->type(), "baseline-setelem-dense-type");
+        MarkObjectGroup(trc, &setElemStub->group(), "baseline-setelem-dense-group");
         break;
       }
       case ICStub::SetElem_DenseAdd: {
         ICSetElem_DenseAdd *setElemStub = toSetElem_DenseAdd();
-        MarkTypeObject(trc, &setElemStub->type(), "baseline-setelem-denseadd-type");
+        MarkObjectGroup(trc, &setElemStub->group(), "baseline-setelem-denseadd-group");
 
         JS_STATIC_ASSERT(ICSetElem_DenseAdd::MAX_PROTO_CHAIN_DEPTH == 4);
 
         switch (setElemStub->protoChainDepth()) {
           case 0: setElemStub->toImpl<0>()->traceShapes(trc); break;
           case 1: setElemStub->toImpl<1>()->traceShapes(trc); break;
           case 2: setElemStub->toImpl<2>()->traceShapes(trc); break;
           case 3: setElemStub->toImpl<3>()->traceShapes(trc); break;
@@ -275,32 +275,32 @@ ICStub::trace(JSTracer *trc)
       }
       case ICStub::SetElem_TypedArray: {
         ICSetElem_TypedArray *setElemStub = toSetElem_TypedArray();
         MarkShape(trc, &setElemStub->shape(), "baseline-setelem-typedarray-shape");
         break;
       }
       case ICStub::TypeMonitor_SingleObject: {
         ICTypeMonitor_SingleObject *monitorStub = toTypeMonitor_SingleObject();
-        MarkObject(trc, &monitorStub->object(), "baseline-monitor-singleobject");
+        MarkObject(trc, &monitorStub->object(), "baseline-monitor-singleton");
         break;
       }
-      case ICStub::TypeMonitor_TypeObject: {
-        ICTypeMonitor_TypeObject *monitorStub = toTypeMonitor_TypeObject();
-        MarkTypeObject(trc, &monitorStub->type(), "baseline-monitor-typeobject");
+      case ICStub::TypeMonitor_ObjectGroup: {
+        ICTypeMonitor_ObjectGroup *monitorStub = toTypeMonitor_ObjectGroup();
+        MarkObjectGroup(trc, &monitorStub->group(), "baseline-monitor-group");
         break;
       }
       case ICStub::TypeUpdate_SingleObject: {
         ICTypeUpdate_SingleObject *updateStub = toTypeUpdate_SingleObject();
-        MarkObject(trc, &updateStub->object(), "baseline-update-singleobject");
+        MarkObject(trc, &updateStub->object(), "baseline-update-singleton");
         break;
       }
-      case ICStub::TypeUpdate_TypeObject: {
-        ICTypeUpdate_TypeObject *updateStub = toTypeUpdate_TypeObject();
-        MarkTypeObject(trc, &updateStub->type(), "baseline-update-typeobject");
+      case ICStub::TypeUpdate_ObjectGroup: {
+        ICTypeUpdate_ObjectGroup *updateStub = toTypeUpdate_ObjectGroup();
+        MarkObjectGroup(trc, &updateStub->group(), "baseline-update-group");
         break;
       }
       case ICStub::GetName_Global: {
         ICGetName_Global *globalStub = toGetName_Global();
         MarkShape(trc, &globalStub->shape(), "baseline-global-stub-shape");
         break;
       }
       case ICStub::GetName_Scope0:
@@ -360,17 +360,17 @@ ICStub::trace(JSTracer *trc)
           case 7: propStub->toImpl<7>()->traceShapes(trc); break;
           case 8: propStub->toImpl<8>()->traceShapes(trc); break;
           default: MOZ_CRASH("Invalid proto stub.");
         }
         break;
       }
       case ICStub::GetProp_Unboxed: {
         ICGetProp_Unboxed *propStub = toGetProp_Unboxed();
-        MarkTypeObject(trc, &propStub->type(), "baseline-getprop-unboxed-stub-type");
+        MarkObjectGroup(trc, &propStub->group(), "baseline-getprop-unboxed-stub-group");
         break;
       }
       case ICStub::GetProp_TypedObject: {
         ICGetProp_TypedObject *propStub = toGetProp_TypedObject();
         MarkShape(trc, &propStub->shape(), "baseline-getprop-typedobject-stub-shape");
         break;
       }
       case ICStub::GetProp_CallDOMProxyNative:
@@ -417,45 +417,45 @@ ICStub::trace(JSTracer *trc)
         MarkObject(trc, &callStub->holder(), "baseline-getpropcallnativeproto-stub-holder");
         MarkShape(trc, &callStub->holderShape(), "baseline-getpropcallnativeproto-stub-holdershape");
         MarkObject(trc, &callStub->getter(), "baseline-getpropcallnativeproto-stub-getter");
         break;
       }
       case ICStub::SetProp_Native: {
         ICSetProp_Native *propStub = toSetProp_Native();
         MarkShape(trc, &propStub->shape(), "baseline-setpropnative-stub-shape");
-        MarkTypeObject(trc, &propStub->type(), "baseline-setpropnative-stub-type");
+        MarkObjectGroup(trc, &propStub->group(), "baseline-setpropnative-stub-group");
         break;
       }
       case ICStub::SetProp_NativeAdd: {
         ICSetProp_NativeAdd *propStub = toSetProp_NativeAdd();
-        MarkTypeObject(trc, &propStub->type(), "baseline-setpropnativeadd-stub-type");
+        MarkObjectGroup(trc, &propStub->group(), "baseline-setpropnativeadd-stub-group");
         MarkShape(trc, &propStub->newShape(), "baseline-setpropnativeadd-stub-newshape");
-        if (propStub->newType())
-            MarkTypeObject(trc, &propStub->newType(), "baseline-setpropnativeadd-stub-new-type");
+        if (propStub->newGroup())
+            MarkObjectGroup(trc, &propStub->newGroup(), "baseline-setpropnativeadd-stub-new-group");
         JS_STATIC_ASSERT(ICSetProp_NativeAdd::MAX_PROTO_CHAIN_DEPTH == 4);
         switch (propStub->protoChainDepth()) {
           case 0: propStub->toImpl<0>()->traceShapes(trc); break;
           case 1: propStub->toImpl<1>()->traceShapes(trc); break;
           case 2: propStub->toImpl<2>()->traceShapes(trc); break;
           case 3: propStub->toImpl<3>()->traceShapes(trc); break;
           case 4: propStub->toImpl<4>()->traceShapes(trc); break;
           default: MOZ_CRASH("Invalid proto stub.");
         }
         break;
       }
       case ICStub::SetProp_Unboxed: {
         ICSetProp_Unboxed *propStub = toSetProp_Unboxed();
-        MarkTypeObject(trc, &propStub->type(), "baseline-setprop-unboxed-stub-type");
+        MarkObjectGroup(trc, &propStub->group(), "baseline-setprop-unboxed-stub-group");
         break;
       }
       case ICStub::SetProp_TypedObject: {
         ICSetProp_TypedObject *propStub = toSetProp_TypedObject();
         MarkShape(trc, &propStub->shape(), "baseline-setprop-typedobject-stub-shape");
-        MarkTypeObject(trc, &propStub->type(), "baseline-setprop-typedobject-stub-type");
+        MarkObjectGroup(trc, &propStub->group(), "baseline-setprop-typedobject-stub-group");
         break;
       }
       case ICStub::SetProp_CallScripted: {
         ICSetProp_CallScripted *callStub = toSetProp_CallScripted();
         MarkShape(trc, &callStub->shape(), "baseline-setpropcallscripted-stub-shape");
         MarkObject(trc, &callStub->holder(), "baseline-setpropcallscripted-stub-holder");
         MarkShape(trc, &callStub->holderShape(), "baseline-setpropcallscripted-stub-holdershape");
         MarkObject(trc, &callStub->setter(), "baseline-setpropcallscripted-stub-setter");
@@ -1093,17 +1093,17 @@ ICTypeMonitor_Fallback::addMonitorStubFo
         JitSpew(JitSpew_BaselineIC, "  %s TypeMonitor stub %p for primitive type %d",
                 existingStub ? "Modified existing" : "Created new", stub, type);
 
         if (!existingStub) {
             MOZ_ASSERT(!hasStub(TypeMonitor_PrimitiveSet));
             addOptimizedMonitorStub(stub);
         }
 
-    } else if (val.toObject().hasSingletonType()) {
+    } else if (val.toObject().isSingleton()) {
         RootedObject obj(cx, &val.toObject());
 
         // Check for existing TypeMonitor stub.
         for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) {
             if (iter->isTypeMonitor_SingleObject() &&
                 iter->toTypeMonitor_SingleObject()->object() == obj)
             {
                 return true;
@@ -1118,36 +1118,36 @@ ICTypeMonitor_Fallback::addMonitorStubFo
         }
 
         JitSpew(JitSpew_BaselineIC, "  Added TypeMonitor stub %p for singleton %p",
                 stub, obj.get());
 
         addOptimizedMonitorStub(stub);
 
     } else {
-        RootedTypeObject type(cx, val.toObject().type());
+        RootedObjectGroup group(cx, val.toObject().group());
 
         // Check for existing TypeMonitor stub.
         for (ICStubConstIterator iter(firstMonitorStub()); !iter.atEnd(); iter++) {
-            if (iter->isTypeMonitor_TypeObject() &&
-                iter->toTypeMonitor_TypeObject()->type() == type)
+            if (iter->isTypeMonitor_ObjectGroup() &&
+                iter->toTypeMonitor_ObjectGroup()->group() == group)
             {
                 return true;
             }
         }
 
-        ICTypeMonitor_TypeObject::Compiler compiler(cx, type);
+        ICTypeMonitor_ObjectGroup::Compiler compiler(cx, group);
         ICStub *stub = compiler.getStub(compiler.getStubSpace(script));
         if (!stub) {
             js_ReportOutOfMemory(cx);
             return false;
         }
 
-        JitSpew(JitSpew_BaselineIC, "  Added TypeMonitor stub %p for TypeObject %p",
-                stub, type.get());
+        JitSpew(JitSpew_BaselineIC, "  Added TypeMonitor stub %p for ObjectGroup %p",
+                stub, group.get());
 
         addOptimizedMonitorStub(stub);
     }
 
     bool firstMonitorStubAdded = wasDetachedMonitorChain && (numOptimizedMonitorStubs_ > 0);
 
     if (firstMonitorStubAdded) {
         // Was an empty monitor chain before, but a new stub was added.  This is the
@@ -1283,27 +1283,27 @@ ICTypeMonitor_SingleObject::Compiler::ge
     EmitReturnFromIC(masm);
 
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
     return true;
 }
 
 bool
-ICTypeMonitor_TypeObject::Compiler::generateStubCode(MacroAssembler &masm)
+ICTypeMonitor_ObjectGroup::Compiler::generateStubCode(MacroAssembler &masm)
 {
     Label failure;
     masm.branchTestObject(Assembler::NotEqual, R0, &failure);
 
-    // Guard on the object's TypeObject.
+    // Guard on the object's ObjectGroup.
     Register obj = masm.extractObject(R0, ExtractTemp0);
-    masm.loadPtr(Address(obj, JSObject::offsetOfType()), R1.scratchReg());
-
-    Address expectedType(BaselineStubReg, ICTypeMonitor_TypeObject::offsetOfType());
-    masm.branchPtr(Assembler::NotEqual, expectedType, R1.scratchReg(), &failure);
+    masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), R1.scratchReg());
+
+    Address expectedGroup(BaselineStubReg, ICTypeMonitor_ObjectGroup::offsetOfGroup());
+    masm.branchPtr(Assembler::NotEqual, expectedGroup, R1.scratchReg(), &failure);
 
     EmitReturnFromIC(masm);
 
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
     return true;
 }
 
@@ -1345,17 +1345,17 @@ ICUpdatedStub::addUpdateStubForValue(JSC
         if (!existingStub) {
             MOZ_ASSERT(!hasTypeUpdateStub(TypeUpdate_PrimitiveSet));
             addOptimizedUpdateStub(stub);
         }
 
         JitSpew(JitSpew_BaselineIC, "  %s TypeUpdate stub %p for primitive type %d",
                 existingStub ? "Modified existing" : "Created new", stub, type);
 
-    } else if (val.toObject().hasSingletonType()) {
+    } else if (val.toObject().isSingleton()) {
         RootedObject obj(cx, &val.toObject());
 
         // Check for existing TypeUpdate stub.
         for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) {
             if (iter->isTypeUpdate_SingleObject() &&
                 iter->toTypeUpdate_SingleObject()->object() == obj)
             {
                 return true;
@@ -1367,34 +1367,34 @@ ICUpdatedStub::addUpdateStubForValue(JSC
         if (!stub)
             return false;
 
         JitSpew(JitSpew_BaselineIC, "  Added TypeUpdate stub %p for singleton %p", stub, obj.get());
 
         addOptimizedUpdateStub(stub);
 
     } else {
-        RootedTypeObject type(cx, val.toObject().type());
+        RootedObjectGroup group(cx, val.toObject().group());
 
         // Check for existing TypeUpdate stub.
         for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) {
-            if (iter->isTypeUpdate_TypeObject() &&
-                iter->toTypeUpdate_TypeObject()->type() == type)
+            if (iter->isTypeUpdate_ObjectGroup() &&
+                iter->toTypeUpdate_ObjectGroup()->group() == group)
             {
                 return true;
             }
         }
 
-        ICTypeUpdate_TypeObject::Compiler compiler(cx, type);
+        ICTypeUpdate_ObjectGroup::Compiler compiler(cx, group);
         ICStub *stub = compiler.getStub(compiler.getStubSpace(script));
         if (!stub)
             return false;
 
-        JitSpew(JitSpew_BaselineIC, "  Added TypeUpdate stub %p for TypeObject %p",
-                stub, type.get());
+        JitSpew(JitSpew_BaselineIC, "  Added TypeUpdate stub %p for ObjectGroup %p",
+                stub, group.get());
 
         addOptimizedUpdateStub(stub);
     }
 
     return true;
 }
 
 //
@@ -1533,29 +1533,29 @@ ICTypeUpdate_SingleObject::Compiler::gen
     EmitReturnFromIC(masm);
 
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
     return true;
 }
 
 bool
-ICTypeUpdate_TypeObject::Compiler::generateStubCode(MacroAssembler &masm)
+ICTypeUpdate_ObjectGroup::Compiler::generateStubCode(MacroAssembler &masm)
 {
     Label failure;
     masm.branchTestObject(Assembler::NotEqual, R0, &failure);
 
-    // Guard on the object's TypeObject.
+    // Guard on the object's ObjectGroup.
     Register obj = masm.extractObject(R0, R1.scratchReg());
-    masm.loadPtr(Address(obj, JSObject::offsetOfType()), R1.scratchReg());
-
-    Address expectedType(BaselineStubReg, ICTypeUpdate_TypeObject::offsetOfType());
-    masm.branchPtr(Assembler::NotEqual, expectedType, R1.scratchReg(), &failure);
-
-    // Type matches, load true into R1.scratchReg() and return.
+    masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), R1.scratchReg());
+
+    Address expectedGroup(BaselineStubReg, ICTypeUpdate_ObjectGroup::offsetOfGroup());
+    masm.branchPtr(Assembler::NotEqual, expectedGroup, R1.scratchReg(), &failure);
+
+    // Group matches, load true into R1.scratchReg() and return.
     masm.mov(ImmWord(1), R1.scratchReg());
     EmitReturnFromIC(masm);
 
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
     return true;
 }
 
@@ -1620,29 +1620,29 @@ ICThis_Fallback::Compiler::generateStubC
 }
 
 //
 // NewArray_Fallback
 //
 
 static bool
 DoNewArray(JSContext *cx, ICNewArray_Fallback *stub, uint32_t length,
-           HandleTypeObject type, MutableHandleValue res)
+           HandleObjectGroup group, MutableHandleValue res)
 {
     FallbackICSpew(cx, stub, "NewArray");
 
-    JSObject *obj = NewDenseArray(cx, length, type, NewArray_FullyAllocating);
+    JSObject *obj = NewDenseArray(cx, length, group, NewArray_FullyAllocating);
     if (!obj)
         return false;
 
     res.setObject(*obj);
     return true;
 }
 
-typedef bool(*DoNewArrayFn)(JSContext *, ICNewArray_Fallback *, uint32_t, HandleTypeObject,
+typedef bool(*DoNewArrayFn)(JSContext *, ICNewArray_Fallback *, uint32_t, HandleObjectGroup,
                             MutableHandleValue);
 static const VMFunction DoNewArrayInfo = FunctionInfo<DoNewArrayFn>(DoNewArray, TailCall);
 
 bool
 ICNewArray_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
 {
     EmitRestoreTailCallReg(masm);
 
@@ -2056,18 +2056,18 @@ ICCompare_ObjectWithUndefined::Compiler:
     if (op == JSOP_STRICTEQ || op == JSOP_STRICTNE) {
         // obj !== undefined for all objects.
         masm.moveValue(BooleanValue(op == JSOP_STRICTNE), R0);
         EmitReturnFromIC(masm);
     } else {
         // obj != undefined only where !obj->getClass()->emulatesUndefined()
         Label emulatesUndefined;
         Register obj = masm.extractObject(objectOperand, ExtractTemp0);
-        masm.loadPtr(Address(obj, JSObject::offsetOfType()), obj);
-        masm.loadPtr(Address(obj, types::TypeObject::offsetOfClasp()), obj);
+        masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), obj);
+        masm.loadPtr(Address(obj, types::ObjectGroup::offsetOfClasp()), obj);
         masm.branchTest32(Assembler::NonZero,
                           Address(obj, Class::offsetOfFlags()),
                           Imm32(JSCLASS_EMULATES_UNDEFINED),
                           &emulatesUndefined);
         masm.moveValue(BooleanValue(op == JSOP_NE), R0);
         EmitReturnFromIC(masm);
         masm.bind(&emulatesUndefined);
         masm.moveValue(BooleanValue(op == JSOP_EQ), R0);
@@ -4515,18 +4515,18 @@ static void
 LoadTypedThingLength(MacroAssembler &masm, TypedThingLayout layout, Register obj, Register result)
 {
     switch (layout) {
       case Layout_TypedArray:
         masm.unboxInt32(Address(obj, TypedArrayLayout::lengthOffset()), result);
         break;
       case Layout_OutlineTypedObject:
       case Layout_InlineTypedObject:
-        masm.loadPtr(Address(obj, JSObject::offsetOfType()), result);
-        masm.loadPtr(Address(result, types::TypeObject::offsetOfAddendum()), result);
+        masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), result);
+        masm.loadPtr(Address(result, types::ObjectGroup::offsetOfAddendum()), result);
         masm.unboxInt32(Address(result, ArrayTypeDescr::offsetOfLength()), result);
         break;
       default:
         MOZ_CRASH();
     }
 }
 
 static void
@@ -4826,23 +4826,23 @@ SetElemDenseAddHasSameShapes(ICSetElem_D
 static bool
 DenseSetElemStubExists(JSContext *cx, ICStub::Kind kind, ICSetElem_Fallback *stub, HandleObject obj)
 {
     MOZ_ASSERT(kind == ICStub::SetElem_Dense || kind == ICStub::SetElem_DenseAdd);
 
     for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
         if (kind == ICStub::SetElem_Dense && iter->isSetElem_Dense()) {
             ICSetElem_Dense *dense = iter->toSetElem_Dense();
-            if (obj->lastProperty() == dense->shape() && obj->getType(cx) == dense->type())
+            if (obj->lastProperty() == dense->shape() && obj->getGroup(cx) == dense->group())
                 return true;
         }
 
         if (kind == ICStub::SetElem_DenseAdd && iter->isSetElem_DenseAdd()) {
             ICSetElem_DenseAdd *dense = iter->toSetElem_DenseAdd();
-            if (obj->getType(cx) == dense->type() && SetElemDenseAddHasSameShapes(dense, obj))
+            if (obj->getGroup(cx) == dense->group() && SetElemDenseAddHasSameShapes(dense, obj))
                 return true;
         }
     }
     return false;
 }
 
 static bool
 TypedArraySetElemStubExists(ICSetElem_Fallback *stub, HandleObject obj, bool expectOOB)
@@ -5018,40 +5018,40 @@ DoSetElemFallback(JSContext *cx, Baselin
         bool addingCase;
         size_t protoDepth;
 
         if (CanOptimizeDenseSetElem(&obj->as<NativeObject>(), index.toInt32(),
                                     oldShape, oldCapacity, oldInitLength,
                                     &addingCase, &protoDepth))
         {
             RootedShape shape(cx, obj->lastProperty());
-            RootedTypeObject type(cx, obj->getType(cx));
-            if (!type)
+            RootedObjectGroup group(cx, obj->getGroup(cx));
+            if (!group)
                 return false;
 
             if (addingCase && !DenseSetElemStubExists(cx, ICStub::SetElem_DenseAdd, stub, obj)) {
                 JitSpew(JitSpew_BaselineIC,
                         "  Generating SetElem_DenseAdd stub "
-                        "(shape=%p, type=%p, protoDepth=%u)",
-                        obj->lastProperty(), type.get(), protoDepth);
+                        "(shape=%p, group=%p, protoDepth=%u)",
+                        obj->lastProperty(), group.get(), protoDepth);
                 ICSetElemDenseAddCompiler compiler(cx, obj, protoDepth);
                 ICUpdatedStub *denseStub = compiler.getStub(compiler.getStubSpace(script));
                 if (!denseStub)
                     return false;
                 if (!denseStub->addUpdateStubForValue(cx, script, obj, JSID_VOIDHANDLE, rhs))
                     return false;
 
                 stub->addNewStub(denseStub);
             } else if (!addingCase &&
                        !DenseSetElemStubExists(cx, ICStub::SetElem_Dense, stub, obj))
             {
                 JitSpew(JitSpew_BaselineIC,
-                        "  Generating SetElem_Dense stub (shape=%p, type=%p)",
-                        obj->lastProperty(), type.get());
-                ICSetElem_Dense::Compiler compiler(cx, shape, type);
+                        "  Generating SetElem_Dense stub (shape=%p, group=%p)",
+                        obj->lastProperty(), group.get());
+                ICSetElem_Dense::Compiler compiler(cx, shape, group);
                 ICUpdatedStub *denseStub = compiler.getStub(compiler.getStubSpace(script));
                 if (!denseStub)
                     return false;
                 if (!denseStub->addUpdateStubForValue(cx, script, obj, JSID_VOIDHANDLE, rhs))
                     return false;
 
                 stub->addNewStub(denseStub);
             }
@@ -5188,20 +5188,20 @@ ICSetElem_Dense::Compiler::generateStubC
     // Stow both R0 and R1 (object and key)
     // But R0 and R1 still hold their values.
     EmitStowICValues(masm, 2);
 
     // We may need to free up some registers.
     regs = availableGeneralRegs(0);
     regs.take(R0);
 
-    // Guard that the type object matches.
+    // Guard that the object group matches.
     Register typeReg = regs.takeAny();
-    masm.loadPtr(Address(BaselineStubReg, ICSetElem_Dense::offsetOfType()), typeReg);
-    masm.branchPtr(Assembler::NotEqual, Address(obj, JSObject::offsetOfType()), typeReg,
+    masm.loadPtr(Address(BaselineStubReg, ICSetElem_Dense::offsetOfGroup()), typeReg);
+    masm.branchPtr(Assembler::NotEqual, Address(obj, JSObject::offsetOfGroup()), typeReg,
                    &failureUnstow);
     regs.add(typeReg);
 
     // Stack is now: { ..., rhs-value, object-value, key-value, maybe?-RET-ADDR }
     // Load rhs-value in to R0
     masm.loadValue(Address(BaselineStackReg, 2 * sizeof(Value) + ICStackValueOffset), R0);
 
     // Call the type-update stub.
@@ -5356,20 +5356,20 @@ ICSetElemDenseAddCompiler::generateStubC
     // Stow both R0 and R1 (object and key)
     // But R0 and R1 still hold their values.
     EmitStowICValues(masm, 2);
 
     // We may need to free up some registers.
     regs = availableGeneralRegs(0);
     regs.take(R0);
 
-    // Guard that the type object matches.
+    // Guard that the object group matches.
     Register typeReg = regs.takeAny();
-    masm.loadPtr(Address(BaselineStubReg, ICSetElem_DenseAdd::offsetOfType()), typeReg);
-    masm.branchPtr(Assembler::NotEqual, Address(obj, JSObject::offsetOfType()), typeReg,
+    masm.loadPtr(Address(BaselineStubReg, ICSetElem_DenseAdd::offsetOfGroup()), typeReg);
+    masm.branchPtr(Assembler::NotEqual, Address(obj, JSObject::offsetOfGroup()), typeReg,
                    &failureUnstow);
     regs.add(typeReg);
 
     // Shape guard objects on the proto chain.
     scratchReg = regs.takeAny();
     Register protoReg = regs.takeAny();
     for (size_t i = 0; i < protoChainDepth_; i++) {
         masm.loadObjProto(i == 0 ? obj : protoReg, protoReg);
@@ -6354,20 +6354,20 @@ UpdateExistingGenerationalDOMProxyStub(I
         }
     }
     return false;
 }
 
 static bool
 HasUnanalyzedNewScript(JSObject *obj)
 {
-    if (obj->hasSingletonType())
-        return false;
-
-    types::TypeNewScript *newScript = obj->type()->newScript();
+    if (obj->isSingleton())
+        return false;
+
+    types::TypeNewScript *newScript = obj->group()->newScript();
     if (newScript && !newScript->analyzed())
         return true;
 
     return false;
 }
 
 static void
 StripPreliminaryObjectStubs(JSContext *cx, ICFallbackStub *stub)
@@ -6624,17 +6624,17 @@ TryAttachUnboxedGetPropStub(JSContext *c
     Rooted<UnboxedPlainObject *> obj(cx, &val.toObject().as<UnboxedPlainObject>());
 
     const UnboxedLayout::Property *property = obj->layout().lookup(name);
     if (!property)
         return true;
 
     ICStub *monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
 
-    ICGetProp_Unboxed::Compiler compiler(cx, monitorStub, obj->type(),
+    ICGetProp_Unboxed::Compiler compiler(cx, monitorStub, obj->group(),
                                          property->offset + UnboxedPlainObject::offsetOfData(),
                                          property->type);
     ICStub *newStub = compiler.getStub(compiler.getStubSpace(script));
     if (!newStub)
         return false;
     stub->addNewStub(newStub);
 
     StripPreliminaryObjectStubs(cx, stub);
@@ -7809,21 +7809,21 @@ bool
 ICGetProp_Unboxed::Compiler::generateStubCode(MacroAssembler &masm)
 {
     Label failure;
 
     GeneralRegisterSet regs(availableGeneralRegs(1));
 
     Register scratch = regs.takeAnyExcluding(BaselineTailCallReg);
 
-    // Object and type guard.
+    // Object and group guard.
     masm.branchTestObject(Assembler::NotEqual, R0, &failure);
     Register object = masm.extractObject(R0, ExtractTemp0);
-    masm.loadPtr(Address(BaselineStubReg, ICGetProp_Unboxed::offsetOfType()), scratch);
-    masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfType()), scratch,
+    masm.loadPtr(Address(BaselineStubReg, ICGetProp_Unboxed::offsetOfGroup()), scratch);
+    masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfGroup()), scratch,
                    &failure);
 
     // Get the address being read from.
     masm.load32(Address(BaselineStubReg, ICGetProp_Unboxed::offsetOfFieldOffset()), scratch);
 
     masm.loadUnboxedProperty(BaseIndex(object, scratch, TimesOne), fieldType_, TypedOrValueRegister(R0));
 
     // Only monitor the result if its type might change.
@@ -7927,17 +7927,17 @@ BaselineScript::noteAccessedGetter(uint3
 //
 // SetProp_Fallback
 //
 
 // Attach an optimized property set stub for a SETPROP/SETGNAME/SETNAME op on a
 // value property.
 static bool
 TryAttachSetValuePropStub(JSContext *cx, HandleScript script, jsbytecode *pc, ICSetProp_Fallback *stub,
-                          HandleObject obj, HandleShape oldShape, HandleTypeObject oldType, uint32_t oldSlots,
+                          HandleObject obj, HandleShape oldShape, HandleObjectGroup oldGroup, uint32_t oldSlots,
                           HandlePropertyName name, HandleId id, HandleValue rhs, bool *attached)
 {
     MOZ_ASSERT(!*attached);
 
     if (!obj->isNative() || obj->watched())
         return true;
 
     RootedShape shape(cx);
@@ -7950,27 +7950,27 @@ TryAttachSetValuePropStub(JSContext *cx,
         // Don't attach if proto chain depth is too high.
         if (chainDepth > ICSetProp_NativeAdd::MAX_PROTO_CHAIN_DEPTH)
             return true;
 
         // Don't attach if we are adding a property to an object which the new
         // script properties analysis hasn't been performed for yet, as there
         // may be a shape change required here afterwards. Pretend we attached
         // a stub, though, so the access is not marked as unoptimizable.
-        if (oldType->newScript() && !oldType->newScript()->analyzed()) {
+        if (oldGroup->newScript() && !oldGroup->newScript()->analyzed()) {
             *attached = true;
             return true;
         }
 
         bool isFixedSlot;
         uint32_t offset;
         GetFixedOrDynamicSlotOffset(&obj->as<NativeObject>(), shape->slot(), &isFixedSlot, &offset);
 
         JitSpew(JitSpew_BaselineIC, "  Generating SetProp(NativeObject.ADD) stub");
-        ICSetPropNativeAddCompiler compiler(cx, obj, oldShape, oldType,
+        ICSetPropNativeAddCompiler compiler(cx, obj, oldShape, oldGroup,
                                             chainDepth, isFixedSlot, offset);
         ICUpdatedStub *newStub = compiler.getStub(compiler.getStubSpace(script));
         if (!newStub)
             return false;
         if (!newStub->addUpdateStubForValue(cx, script, obj, id, rhs))
             return false;
 
         stub->addNewStub(newStub);
@@ -8104,17 +8104,17 @@ TryAttachUnboxedSetPropStub(JSContext *c
 
     if (!obj->is<UnboxedPlainObject>())
         return true;
 
     const UnboxedLayout::Property *property = obj->as<UnboxedPlainObject>().layout().lookup(id);
     if (!property)
         return true;
 
-    ICSetProp_Unboxed::Compiler compiler(cx, obj->type(),
+    ICSetProp_Unboxed::Compiler compiler(cx, obj->group(),
                                          property->offset + UnboxedPlainObject::offsetOfData(),
                                          property->type);
     ICUpdatedStub *newStub = compiler.getStub(compiler.getStubSpace(script));
     if (!newStub)
         return false;
     if (compiler.needsUpdateStubs() && !newStub->addUpdateStubForValue(cx, script, obj, id, rhs))
         return false;
 
@@ -8149,17 +8149,17 @@ TryAttachTypedObjectSetPropStub(JSContex
         return true;
 
     Rooted<TypeDescr *> fieldDescr(cx, &structDescr->fieldDescr(fieldIndex));
     if (!fieldDescr->is<SimpleTypeDescr>())
         return true;
 
     uint32_t fieldOffset = structDescr->fieldOffset(fieldIndex);
 
-    ICSetProp_TypedObject::Compiler compiler(cx, obj->lastProperty(), obj->type(), fieldOffset,
+    ICSetProp_TypedObject::Compiler compiler(cx, obj->lastProperty(), obj->group(), fieldOffset,
                                              &fieldDescr->as<SimpleTypeDescr>());
     ICUpdatedStub *newStub = compiler.getStub(compiler.getStubSpace(script));
     if (!newStub)
         return false;
     if (compiler.needsUpdateStubs() && !newStub->addUpdateStubForValue(cx, script, obj, id, rhs))
         return false;
 
     stub->addNewStub(newStub);
@@ -8196,18 +8196,18 @@ DoSetPropFallback(JSContext *cx, Baselin
     else
         name = script->getName(pc);
     RootedId id(cx, NameToId(name));
 
     RootedObject obj(cx, ToObjectFromStack(cx, lhs));
     if (!obj)
         return false;
     RootedShape oldShape(cx, obj->lastProperty());
-    RootedTypeObject oldType(cx, obj->getType(cx));
-    if (!oldType)
+    RootedObjectGroup oldGroup(cx, obj->getGroup(cx));
+    if (!oldGroup)
         return false;
     uint32_t oldSlots = obj->isNative() ? obj->as<NativeObject>().numDynamicSlots() : 0;
 
     bool attached = false;
     // There are some reasons we can fail to attach a stub that are temporary.
     // We want to avoid calling noteUnoptimizableAccess() if the reason we
     // failed to attach a stub is one of those temporary reasons, since we might
     // end up attaching a stub for the exact same access later.
@@ -8257,17 +8257,17 @@ DoSetPropFallback(JSContext *cx, Baselin
     if (stub->numOptimizedStubs() >= ICSetProp_Fallback::MAX_OPTIMIZED_STUBS) {
         // TODO: Discard all stubs in this IC and replace with generic setprop stub.
         return true;
     }
 
     if (!attached &&
         lhs.isObject() &&
         !TryAttachSetValuePropStub(cx, script, pc, stub, obj, oldShape,
-                                   oldType, oldSlots, name, id, rhs, &attached))
+                                   oldGroup, oldSlots, name, id, rhs, &attached))
     {
         return false;
     }
     if (attached)
         return true;
 
     if (!attached &&
         lhs.isObject() &&
@@ -8358,19 +8358,19 @@ ICSetProp_Native::Compiler::generateStub
     GeneralRegisterSet regs(availableGeneralRegs(2));
     Register scratch = regs.takeAny();
 
     // Unbox and shape guard.
     Register objReg = masm.extractObject(R0, ExtractTemp0);
     masm.loadPtr(Address(BaselineStubReg, ICSetProp_Native::offsetOfShape()), scratch);
     masm.branchTestObjShape(Assembler::NotEqual, objReg, scratch, &failure);
 
-    // Guard that the type object matches.
-    masm.loadPtr(Address(BaselineStubReg, ICSetProp_Native::offsetOfType()), scratch);
-    masm.branchPtr(Assembler::NotEqual, Address(objReg, JSObject::offsetOfType()), scratch,
+    // Guard that the object group matches.
+    masm.loadPtr(Address(BaselineStubReg, ICSetProp_Native::offsetOfGroup()), scratch);
+    masm.branchPtr(Assembler::NotEqual, Address(objReg, JSObject::offsetOfGroup()), scratch,
                    &failure);
 
     // Stow both R0 and R1 (object and value).
     EmitStowICValues(masm, 2);
 
     // Type update stub expects the value to check in R0.
     masm.moveValue(R1, R0);
 
@@ -8454,19 +8454,19 @@ ICSetPropNativeAddCompiler::generateStub
     GeneralRegisterSet regs(availableGeneralRegs(2));
     Register scratch = regs.takeAny();
 
     // Unbox and guard against old shape.
     Register objReg = masm.extractObject(R0, ExtractTemp0);
     masm.loadPtr(Address(BaselineStubReg, ICSetProp_NativeAddImpl<0>::offsetOfShape(0)), scratch);
     masm.branchTestObjShape(Assembler::NotEqual, objReg, scratch, &failure);
 
-    // Guard that the type object matches.
-    masm.loadPtr(Address(BaselineStubReg, ICSetProp_NativeAdd::offsetOfType()), scratch);
-    masm.branchPtr(Assembler::NotEqual, Address(objReg, JSObject::offsetOfType()), scratch,
+    // Guard that the object group matches.
+    masm.loadPtr(Address(BaselineStubReg, ICSetProp_NativeAdd::offsetOfGroup()), scratch);
+    masm.branchPtr(Assembler::NotEqual, Address(objReg, JSObject::offsetOfGroup()), scratch,
                    &failure);
 
     // Stow both R0 and R1 (object and value).
     EmitStowICValues(masm, 2);
 
     regs = availableGeneralRegs(1);
     scratch = regs.takeAny();
     Register protoReg = regs.takeAny();
@@ -8495,39 +8495,39 @@ ICSetPropNativeAddCompiler::generateStub
     scratch = regs.takeAny();
 
     // Changing object shape.  Write the object's new shape.
     Address shapeAddr(objReg, JSObject::offsetOfShape());
     EmitPreBarrier(masm, shapeAddr, MIRType_Shape);
     masm.loadPtr(Address(BaselineStubReg, ICSetProp_NativeAdd::offsetOfNewShape()), scratch);
     masm.storePtr(scratch, shapeAddr);
 
-    // Try to change the object's type.
-    Label noTypeChange;
-
-    // Check if the cache has a new type to change to.
-    masm.loadPtr(Address(BaselineStubReg, ICSetProp_NativeAdd::offsetOfNewType()), scratch);
-    masm.branchTestPtr(Assembler::Zero, scratch, scratch, &noTypeChange);
-
-    // Check if the old type still has a newScript.
-    masm.loadPtr(Address(objReg, JSObject::offsetOfType()), scratch);
+    // Try to change the object's group.
+    Label noGroupChange;
+
+    // Check if the cache has a new group to change to.
+    masm.loadPtr(Address(BaselineStubReg, ICSetProp_NativeAdd::offsetOfNewGroup()), scratch);
+    masm.branchTestPtr(Assembler::Zero, scratch, scratch, &noGroupChange);
+
+    // Check if the old group still has a newScript.
+    masm.loadPtr(Address(objReg, JSObject::offsetOfGroup()), scratch);
     masm.branchPtr(Assembler::Equal,
-                   Address(scratch, types::TypeObject::offsetOfAddendum()),
+                   Address(scratch, types::ObjectGroup::offsetOfAddendum()),
                    ImmWord(0),
-                   &noTypeChange);
-
-    // Reload the new type from the cache.
-    masm.loadPtr(Address(BaselineStubReg, ICSetProp_NativeAdd::offsetOfNewType()), scratch);
-
-    // Change the object's type.
-    Address typeAddr(objReg, JSObject::offsetOfType());
-    EmitPreBarrier(masm, typeAddr, MIRType_TypeObject);
-    masm.storePtr(scratch, typeAddr);
-
-    masm.bind(&noTypeChange);
+                   &noGroupChange);
+
+    // Reload the new group from the cache.
+    masm.loadPtr(Address(BaselineStubReg, ICSetProp_NativeAdd::offsetOfNewGroup()), scratch);
+
+    // Change the object's group.
+    Address groupAddr(objReg, JSObject::offsetOfGroup());
+    EmitPreBarrier(masm, groupAddr, MIRType_ObjectGroup);
+    masm.storePtr(scratch, groupAddr);
+
+    masm.bind(&noGroupChange);
 
     Register holderReg;
     regs.add(R0);
     regs.takeUnchecked(objReg);
     if (isFixedSlot_) {
         holderReg = objReg;
     } else {
         holderReg = regs.takeAny();
@@ -8568,20 +8568,20 @@ ICSetProp_Unboxed::Compiler::generateStu
     Label failure;
 
     // Guard input is an object.
     masm.branchTestObject(Assembler::NotEqual, R0, &failure);
 
     GeneralRegisterSet regs(availableGeneralRegs(2));
     Register scratch = regs.takeAny();
 
-    // Unbox and type guard.
+    // Unbox and group guard.
     Register object = masm.extractObject(R0, ExtractTemp0);
-    masm.loadPtr(Address(BaselineStubReg, ICSetProp_Unboxed::offsetOfType()), scratch);
-    masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfType()), scratch,
+    masm.loadPtr(Address(BaselineStubReg, ICSetProp_Unboxed::offsetOfGroup()), scratch);
+    masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfGroup()), scratch,
                    &failure);
 
     if (needsUpdateStubs()) {
         // Stow both R0 and R1 (object and value).
         masm.push(object);
         masm.push(BaselineStubReg);
         EmitStowICValues(masm, 2);
 
@@ -8644,19 +8644,19 @@ ICSetProp_TypedObject::Compiler::generat
     GeneralRegisterSet regs(availableGeneralRegs(2));
     Register scratch = regs.takeAny();
 
     // Unbox and shape guard.
     Register object = masm.extractObject(R0, ExtractTemp0);
     masm.loadPtr(Address(BaselineStubReg, ICSetProp_TypedObject::offsetOfShape()), scratch);
     masm.branchTestObjShape(Assembler::NotEqual, object, scratch, &failure);
 
-    // Guard that the type object matches.
-    masm.loadPtr(Address(BaselineStubReg, ICSetProp_TypedObject::offsetOfType()), scratch);
-    masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfType()), scratch,
+    // Guard that the object group matches.
+    masm.loadPtr(Address(BaselineStubReg, ICSetProp_TypedObject::offsetOfGroup()), scratch);
+    masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfGroup()), scratch,
                    &failure);
 
     if (needsUpdateStubs()) {
         // Stow both R0 and R1 (object and value).
         masm.push(object);
         masm.push(BaselineStubReg);
         EmitStowICValues(masm, 2);
 
@@ -9034,56 +9034,56 @@ GetTemplateObjectForNative(JSContext *cx
         if (args.length() != 1)
             count = args.length();
         else if (args.length() == 1 && args[0].isInt32() && args[0].toInt32() >= 0)
             count = args[0].toInt32();
         res.set(NewDenseUnallocatedArray(cx, count, nullptr, TenuredObject));
         if (!res)
             return false;
 
-        types::TypeObject *type = types::TypeScript::InitObject(cx, script, pc, JSProto_Array);
-        if (!type)
-            return false;
-        res->setType(type);
+        types::ObjectGroup *group = types::TypeScript::InitGroup(cx, script, pc, JSProto_Array);
+        if (!group)
+            return false;
+        res->setGroup(group);
         return true;
     }
 
     if (native == intrinsic_NewDenseArray) {
         res.set(NewDenseUnallocatedArray(cx, 0, nullptr, TenuredObject));
         if (!res)
             return false;
 
-        types::TypeObject *type = types::TypeScript::InitObject(cx, script, pc, JSProto_Array);
-        if (!type)
-            return false;
-        res->setType(type);
+        types::ObjectGroup *group = types::TypeScript::InitGroup(cx, script, pc, JSProto_Array);
+        if (!group)
+            return false;
+        res->setGroup(group);
         return true;
     }
 
     if (native == js::array_concat) {
         if (args.thisv().isObject() && args.thisv().toObject().is<ArrayObject>() &&
-            !args.thisv().toObject().hasSingletonType())
+            !args.thisv().toObject().isSingleton())
         {
             res.set(NewDenseEmptyArray(cx, args.thisv().toObject().getProto(), TenuredObject));
             if (!res)
                 return false;
-            res->setType(args.thisv().toObject().type());
+            res->setGroup(args.thisv().toObject().group());
             return true;
         }
     }
 
     if (native == js::str_split && args.length() == 1 && args[0].isString()) {
         res.set(NewDenseUnallocatedArray(cx, 0, nullptr, TenuredObject));
         if (!res)
             return false;
 
-        types::TypeObject *type = types::TypeScript::InitObject(cx, script, pc, JSProto_Array);
-        if (!type)
-            return false;
-        res->setType(type);
+        types::ObjectGroup *group = types::TypeScript::InitGroup(cx, script, pc, JSProto_Array);
+        if (!group)
+            return false;
+        res->setGroup(group);
         return true;
     }
 
     if (native == js_String) {
         RootedString emptyString(cx, cx->runtime()->emptyString);
         res.set(StringObject::create(cx, emptyString, TenuredObject));
         if (!res)
             return false;
@@ -9151,19 +9151,19 @@ IsOptimizableCallStringSplit(Value calle
         return false;
 
     return true;
 }
 
 static bool
 TryAttachCallStub(JSContext *cx, ICCall_Fallback *stub, HandleScript script, jsbytecode *pc,
                   JSOp op, uint32_t argc, Value *vp, bool constructing, bool isSpread,
-                  bool useNewType)
-{
-    if (useNewType || op == JSOP_EVAL || op == JSOP_STRICTEVAL)
+                  bool createSingleton)
+{
+    if (createSingleton || op == JSOP_EVAL || op == JSOP_STRICTEVAL)
         return true;
 
     if (stub->numOptimizedStubs() >= ICCall_Fallback::MAX_OPTIMIZED_STUBS) {
         // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
         // But for now we just bail.
         return true;
     }
 
@@ -9184,17 +9184,17 @@ TryAttachCallStub(JSContext *cx, ICCall_
 
     RootedObject obj(cx, &callee.toObject());
     if (!obj->is<JSFunction>()) {
         // Try to attach a stub for a call/construct hook on the object.
         // Ignore proxies, which are special cased by callHook/constructHook.
         if (obj->is<ProxyObject>())
             return true;
         if (JSNative hook = constructing ? obj->constructHook() : obj->callHook()) {
-            if (op != JSOP_FUNAPPLY && !isSpread && !useNewType) {
+            if (op != JSOP_FUNAPPLY && !isSpread && !createSingleton) {
                 RootedObject templateObject(cx);
                 CallArgs args = CallArgsFromVp(argc, vp);
                 if (!GetTemplateObjectForClassHook(cx, hook, args, &templateObject))
                     return false;
 
                 JitSpew(JitSpew_BaselineIC, "  Generating Call_ClassHook stub");
                 ICCall_ClassHook::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
                                                     obj->getClass(), hook, templateObject,
@@ -9270,17 +9270,17 @@ TryAttachCallStub(JSContext *cx, ICCall_
             if (thisObject->is<PlainObject>() || thisObject->is<UnboxedPlainObject>()) {
                 templateObject = thisObject;
 
                 // If we are calling a constructor for which the new script
                 // properties analysis has not been performed yet, don't attach a
                 // stub. After the analysis is performed, CreateThisForFunction may
                 // start returning objects with a different type, and the Ion
                 // compiler might get confused.
-                types::TypeNewScript *newScript = templateObject->type()->newScript();
+                types::TypeNewScript *newScript = templateObject->group()->newScript();
                 if (newScript && !newScript->analyzed()) {
                     // Clear the object just created from the preliminary objects
                     // on the TypeNewScript, as it will not be used or filled in by
                     // running code.
                     newScript->unregisterNewObject(&templateObject->as<PlainObject>());
                     return true;
                 }
             }
@@ -9368,25 +9368,25 @@ TryAttachCallStub(JSContext *cx, ICCall_
 
 static bool
 CopyArray(JSContext *cx, HandleArrayObject obj, MutableHandleValue result)
 {
     MOZ_ASSERT(obj->is<ArrayObject>());
     uint32_t length = obj->as<ArrayObject>().length();
     MOZ_ASSERT(obj->getDenseInitializedLength() == length);
 
-    RootedTypeObject type(cx, obj->getType(cx));
-    if (!type)
+    RootedObjectGroup group(cx, obj->getGroup(cx));
+    if (!group)
         return false;
 
     RootedArrayObject newObj(cx, NewDenseFullyAllocatedArray(cx, length, nullptr, TenuredObject));
     if (!newObj)
         return false;
 
-    newObj->setType(type);
+    newObj->setGroup(group);
     newObj->setDenseInitializedLength(length);
     newObj->initDenseElements(0, obj->getDenseElements(), length);
     result.setObject(*newObj);
     return true;
 }
 
 static bool
 TryAttachStringSplit(JSContext *cx, ICCall_Fallback *stub, HandleScript script,
@@ -9468,22 +9468,22 @@ DoCallFallback(JSContext *cx, BaselineFr
 
     // Handle funapply with JSOP_ARGUMENTS
     if (op == JSOP_FUNAPPLY && argc == 2 && args[1].isMagic(JS_OPTIMIZED_ARGUMENTS)) {
         CallArgs callArgs = CallArgsFromVp(argc, vp);
         if (!GuardFunApplyArgumentsOptimization(cx, frame, callArgs))
             return false;
     }
 
-    // Compute construcing and useNewType flags.
+    // Compute construcing and useNewGroup flags.
     bool constructing = (op == JSOP_NEW);
-    bool newType = types::UseNewType(cx, script, pc);
+    bool createSingleton = types::UseSingletonForNewObject(cx, script, pc);
 
     // Try attaching a call stub.
-    if (!TryAttachCallStub(cx, stub, script, pc, op, argc, vp, constructing, false, newType))
+    if (!TryAttachCallStub(cx, stub, script, pc, op, argc, vp, constructing, false, createSingleton))
         return false;
 
     if (!MaybeCloneFunctionAtCallsite(cx, &callee, script, pc))
         return false;
 
     if (op == JSOP_NEW) {
         if (!InvokeConstructor(cx, callee, argc, args, res))
             return false;
@@ -11465,29 +11465,29 @@ ICRetSub_Resume::Compiler::generateStubC
     return true;
 }
 
 ICTypeMonitor_SingleObject::ICTypeMonitor_SingleObject(JitCode *stubCode, HandleObject obj)
   : ICStub(TypeMonitor_SingleObject, stubCode),
     obj_(obj)
 { }
 
-ICTypeMonitor_TypeObject::ICTypeMonitor_TypeObject(JitCode *stubCode, HandleTypeObject type)
-  : ICStub(TypeMonitor_TypeObject, stubCode),
-    type_(type)
+ICTypeMonitor_ObjectGroup::ICTypeMonitor_ObjectGroup(JitCode *stubCode, HandleObjectGroup group)
+  : ICStub(TypeMonitor_ObjectGroup, stubCode),
+    group_(group)
 { }
 
 ICTypeUpdate_SingleObject::ICTypeUpdate_SingleObject(JitCode *stubCode, HandleObject obj)
   : ICStub(TypeUpdate_SingleObject, stubCode),
     obj_(obj)
 { }
 
-ICTypeUpdate_TypeObject::ICTypeUpdate_TypeObject(JitCode *stubCode, HandleTypeObject type)
-  : ICStub(TypeUpdate_TypeObject, stubCode),
-    type_(type)
+ICTypeUpdate_ObjectGroup::ICTypeUpdate_ObjectGroup(JitCode *stubCode, HandleObjectGroup group)
+  : ICStub(TypeUpdate_ObjectGroup, stubCode),
+    group_(group)
 { }
 
 ICGetElemNativeStub::ICGetElemNativeStub(ICStub::Kind kind, JitCode *stubCode,
                                          ICStub *firstMonitorStub,
                                          HandleShape shape, HandlePropertyName name,
                                          AccessType acctype, bool needsAtomize)
   : ICMonitoredStub(kind, stubCode, firstMonitorStub),
     shape_(shape),
@@ -11586,40 +11586,40 @@ ICGetElem_TypedArray::ICGetElem_TypedArr
 
 /* static */ ICGetElem_Arguments *
 ICGetElem_Arguments::Clone(JSContext *, ICStubSpace *space, ICStub *firstMonitorStub,
                            ICGetElem_Arguments &other)
 {
     return New(space, other.jitCode(), firstMonitorStub, other.which());
 }
 
-ICSetElem_Dense::ICSetElem_Dense(JitCode *stubCode, HandleShape shape, HandleTypeObject type)
+ICSetElem_Dense::ICSetElem_Dense(JitCode *stubCode, HandleShape shape, HandleObjectGroup group)
   : ICUpdatedStub(SetElem_Dense, stubCode),
     shape_(shape),
-    type_(type)
+    group_(group)
 { }
 
-ICSetElem_DenseAdd::ICSetElem_DenseAdd(JitCode *stubCode, types::TypeObject *type,
+ICSetElem_DenseAdd::ICSetElem_DenseAdd(JitCode *stubCode, types::ObjectGroup *group,
                                        size_t protoChainDepth)
   : ICUpdatedStub(SetElem_DenseAdd, stubCode),
-    type_(type)
+    group_(group)
 {
     MOZ_ASSERT(protoChainDepth <= MAX_PROTO_CHAIN_DEPTH);
     extra_ = protoChainDepth;
 }
 
 template <size_t ProtoChainDepth>
 ICUpdatedStub *
 ICSetElemDenseAddCompiler::getStubSpecific(ICStubSpace *space, const AutoShapeVector *shapes)
 {
-    RootedTypeObject objType(cx, obj_->getType(cx));
-    if (!objType)
+    RootedObjectGroup group(cx, obj_->getGroup(cx));
+    if (!group)
         return nullptr;
     Rooted<JitCode *> stubCode(cx, getStubCode());
-    return ICSetElem_DenseAddImpl<ProtoChainDepth>::New(space, stubCode, objType, shapes);
+    return ICSetElem_DenseAddImpl<ProtoChainDepth>::New(space, stubCode, group, shapes);
 }
 
 ICSetElem_TypedArray::ICSetElem_TypedArray(JitCode *stubCode, HandleShape shape, Scalar::Type type,
                                            bool expectOutOfBounds)
   : ICStub(SetElem_TypedArray, stubCode),
     shape_(shape)
 {
     extra_ = uint8_t(type);
@@ -11798,77 +11798,77 @@ ICGetProp_CallNativePrototype::Clone(JSC
     RootedShape receiverShape(cx, other.receiverShape_);
     RootedObject holder(cx, other.holder_);
     RootedShape holderShape(cx, other.holderShape_);
     RootedFunction getter(cx, other.getter_);
     return New(space, other.jitCode(), firstMonitorStub, receiverShape, holder,
                holderShape, getter, other.pcOffset_);
 }
 
-ICSetProp_Native::ICSetProp_Native(JitCode *stubCode, HandleTypeObject type, HandleShape shape,
+ICSetProp_Native::ICSetProp_Native(JitCode *stubCode, HandleObjectGroup group, HandleShape shape,
                                    uint32_t offset)
   : ICUpdatedStub(SetProp_Native, stubCode),
-    type_(type),
+    group_(group),
     shape_(shape),
     offset_(offset)
 { }
 
 ICSetProp_Native *
 ICSetProp_Native::Compiler::getStub(ICStubSpace *space)
 {
-    RootedTypeObject type(cx, obj_->getType(cx));
-    if (!type)
+    RootedObjectGroup group(cx, obj_->getGroup(cx));
+    if (!group)
         return nullptr;
 
     RootedShape shape(cx, obj_->lastProperty());
-    ICSetProp_Native *stub = ICSetProp_Native::New(space, getStubCode(), type, shape, offset_);
+    ICSetProp_Native *stub = ICSetProp_Native::New(space, getStubCode(), group, shape, offset_);
     if (!stub || !stub->initUpdatingChain(cx, space))
         return nullptr;
     return stub;
 }
 
-ICSetProp_NativeAdd::ICSetProp_NativeAdd(JitCode *stubCode, HandleTypeObject type,
+ICSetProp_NativeAdd::ICSetProp_NativeAdd(JitCode *stubCode, HandleObjectGroup group,
                                          size_t protoChainDepth,
                                          HandleShape newShape,
-                                         HandleTypeObject newType,
+                                         HandleObjectGroup newGroup,
                                          uint32_t offset)
   : ICUpdatedStub(SetProp_NativeAdd, stubCode),
-    type_(type),
+    group_(group),
     newShape_(newShape),
-    newType_(newType),
+    newGroup_(newGroup),
     offset_(offset)
 {
     MOZ_ASSERT(protoChainDepth <= MAX_PROTO_CHAIN_DEPTH);
     extra_ = protoChainDepth;
 }
 
 template <size_t ProtoChainDepth>
 ICSetProp_NativeAddImpl<ProtoChainDepth>::ICSetProp_NativeAddImpl(JitCode *stubCode,
-                                                                  HandleTypeObject type,
+                                                                  HandleObjectGroup group,
                                                                   const AutoShapeVector *shapes,
                                                                   HandleShape newShape,
-                                                                  HandleTypeObject newType,
+                                                                  HandleObjectGroup newGroup,
                                                                   uint32_t offset)
-  : ICSetProp_NativeAdd(stubCode, type, ProtoChainDepth, newShape, newType, offset)
+  : ICSetProp_NativeAdd(stubCode, group, ProtoChainDepth, newShape, newGroup, offset)
 {
     MOZ_ASSERT(shapes->length() == NumShapes);
     for (size_t i = 0; i < NumShapes; i++)
         shapes_[i].init((*shapes)[i]);
 }
 
 ICSetPropNativeAddCompiler::ICSetPropNativeAddCompiler(JSContext *cx, HandleObject obj,
                                                        HandleShape oldShape,
-                                                       HandleTypeObject oldType,
+                                                       HandleObjectGroup oldGroup,
                                                        size_t protoChainDepth,
                                                        bool isFixedSlot,
                                                        uint32_t offset)
   : ICStubCompiler(cx, ICStub::SetProp_NativeAdd),
     obj_(cx, obj),
     oldShape_(cx, oldShape),
-    oldType_(cx, oldType),
+    oldGroup_(cx, oldGroup),
     protoChainDepth_(protoChainDepth),
     isFixedSlot_(isFixedSlot),
     offset_(offset)
 {
     MOZ_ASSERT(protoChainDepth_ <= ICSetProp_NativeAdd::MAX_PROTO_CHAIN_DEPTH);
 }
 
 ICSetPropCallSetter::ICSetPropCallSetter(Kind kind, JitCode *stubCode, HandleShape shape,
--- a/js/src/jit/BaselineIC.h
+++ b/js/src/jit/BaselineIC.h
@@ -137,18 +137,18 @@ namespace jit {
 //
 // TypeUpdate ICs
 // --------------
 // Update ICs update heap typesets and monitor the input types of setter operations
 // (setelem, setprop inputs, etc.).  Unlike monitor ICs, they are not shared
 // between stubs on an IC, but instead are kept track of on a per-stub basis.
 //
 // This is because the main stubs for the operation will each identify a potentially
-// different TypeObject to update.  New input types must be tracked on a typeobject-to-
-// typeobject basis.
+// different ObjectGroup to update.  New input types must be tracked on a group-to-
+// group basis.
 //
 // Type-update ICs cannot be called in tail position (they must return to the
 // the stub that called them so that the stub may continue to perform its original
 // purpose).  This means that any VMCall to perform a manual type update from C++ must be
 // done from within the main IC stub.  This necessitates that the stub enter a
 // "BaselineStub" frame before making the call.
 //
 // If the type-update IC chain could itself make the VMCall, then the BaselineStub frame
@@ -331,22 +331,22 @@ class ICEntry
 };
 
 // List of baseline IC stub kinds.
 #define IC_STUB_KIND_LIST(_)    \
     _(WarmUpCounter_Fallback)   \
                                 \
     _(TypeMonitor_Fallback)     \
     _(TypeMonitor_SingleObject) \
-    _(TypeMonitor_TypeObject)   \
+    _(TypeMonitor_ObjectGroup)  \
     _(TypeMonitor_PrimitiveSet) \
                                 \
     _(TypeUpdate_Fallback)      \
     _(TypeUpdate_SingleObject)  \
-    _(TypeUpdate_TypeObject)    \
+    _(TypeUpdate_ObjectGroup)   \
     _(TypeUpdate_PrimitiveSet)  \
                                 \
     _(This_Fallback)            \
                                 \
     _(NewArray_Fallback)        \
     _(NewObject_Fallback)       \
                                 \
     _(Compare_Fallback)         \
@@ -1557,54 +1557,54 @@ class ICTypeMonitor_SingleObject : publi
         { }
 
         ICTypeMonitor_SingleObject *getStub(ICStubSpace *space) {
             return ICTypeMonitor_SingleObject::New(space, getStubCode(), obj_);
         }
     };
 };
 
-class ICTypeMonitor_TypeObject : public ICStub
+class ICTypeMonitor_ObjectGroup : public ICStub
 {
     friend class ICStubSpace;
 
-    HeapPtrTypeObject type_;
-
-    ICTypeMonitor_TypeObject(JitCode *stubCode, HandleTypeObject type);
-
-  public:
-    static inline ICTypeMonitor_TypeObject *New(
-            ICStubSpace *space, JitCode *code, HandleTypeObject type)
+    HeapPtrObjectGroup group_;
+
+    ICTypeMonitor_ObjectGroup(JitCode *stubCode, HandleObjectGroup group);
+
+  public:
+    static inline ICTypeMonitor_ObjectGroup *New(
+            ICStubSpace *space, JitCode *code, HandleObjectGroup group)
     {
         if (!code)
             return nullptr;
-        return space->allocate<ICTypeMonitor_TypeObject>(code, type);
-    }
-
-    HeapPtrTypeObject &type() {
-        return type_;
-    }
-
-    static size_t offsetOfType() {
-        return offsetof(ICTypeMonitor_TypeObject, type_);
+        return space->allocate<ICTypeMonitor_ObjectGroup>(code, group);
+    }
+
+    HeapPtrObjectGroup &group() {
+        return group_;
+    }
+
+    static size_t offsetOfGroup() {
+        return offsetof(ICTypeMonitor_ObjectGroup, group_);
     }
 
     class Compiler : public ICStubCompiler {
       protected:
-        HandleTypeObject type_;
+        HandleObjectGroup group_;
         bool generateStubCode(MacroAssembler &masm);
 
       public:
-        Compiler(JSContext *cx, HandleTypeObject type)
-          : ICStubCompiler(cx, TypeMonitor_TypeObject),
-            type_(type)
+        Compiler(JSContext *cx, HandleObjectGroup group)
+          : ICStubCompiler(cx, TypeMonitor_ObjectGroup),
+            group_(group)
         { }
 
-        ICTypeMonitor_TypeObject *getStub(ICStubSpace *space) {
-            return ICTypeMonitor_TypeObject::New(space, getStubCode(), type_);
+        ICTypeMonitor_ObjectGroup *getStub(ICStubSpace *space) {
+            return ICTypeMonitor_ObjectGroup::New(space, getStubCode(), group_);
         }
     };
 };
 
 // TypeUpdate
 
 extern const VMFunction DoTypeUpdateFallbackInfo;
 
@@ -1720,55 +1720,55 @@ class ICTypeUpdate_SingleObject : public
         { }
 
         ICTypeUpdate_SingleObject *getStub(ICStubSpace *space) {
             return ICTypeUpdate_SingleObject::New(space, getStubCode(), obj_);
         }
     };
 };
 
-// Type update stub to handle a single TypeObject.
-class ICTypeUpdate_TypeObject : public ICStub
+// Type update stub to handle a single ObjectGroup.
+class ICTypeUpdate_ObjectGroup : public ICStub
 {
     friend class ICStubSpace;
 
-    HeapPtrTypeObject type_;
-
-    ICTypeUpdate_TypeObject(JitCode *stubCode, HandleTypeObject type);
-
-  public:
-    static inline ICTypeUpdate_TypeObject *New(ICStubSpace *space, JitCode *code,
-                                               HandleTypeObject type)
+    HeapPtrObjectGroup group_;
+
+    ICTypeUpdate_ObjectGroup(JitCode *stubCode, HandleObjectGroup group);
+
+  public:
+    static inline ICTypeUpdate_ObjectGroup *New(ICStubSpace *space, JitCode *code,
+                                                HandleObjectGroup group)
     {
         if (!code)
             return nullptr;
-        return space->allocate<ICTypeUpdate_TypeObject>(code, type);
-    }
-
-    HeapPtrTypeObject &type() {
-        return type_;
-    }
-
-    static size_t offsetOfType() {
-        return offsetof(ICTypeUpdate_TypeObject, type_);
+        return space->allocate<ICTypeUpdate_ObjectGroup>(code, group);
+    }
+
+    HeapPtrObjectGroup &group() {
+        return group_;
+    }
+
+    static size_t offsetOfGroup() {
+        return offsetof(ICTypeUpdate_ObjectGroup, group_);
     }
 
     class Compiler : public ICStubCompiler {
       protected:
-        HandleTypeObject type_;
+        HandleObjectGroup group_;
         bool generateStubCode(MacroAssembler &masm);
 
       public:
-        Compiler(JSContext *cx, HandleTypeObject type)
-          : ICStubCompiler(cx, TypeUpdate_TypeObject),
-            type_(type)
+        Compiler(JSContext *cx, HandleObjectGroup group)
+          : ICStubCompiler(cx, TypeUpdate_ObjectGroup),
+            group_(group)
         { }
 
-        ICTypeUpdate_TypeObject *getStub(ICStubSpace *space) {
-            return ICTypeUpdate_TypeObject::New(space, getStubCode(), type_);
+        ICTypeUpdate_ObjectGroup *getStub(ICStubSpace *space) {
+            return ICTypeUpdate_ObjectGroup::New(space, getStubCode(), group_);
         }
     };
 };
 
 // This
 //      JSOP_THIS
 
 class ICThis_Fallback : public ICFallbackStub
@@ -3507,61 +3507,61 @@ class ICSetElem_Fallback : public ICFall
     };
 };
 
 class ICSetElem_Dense : public ICUpdatedStub
 {
     friend class ICStubSpace;
 
     HeapPtrShape shape_;
-    HeapPtrTypeObject type_;
-
-    ICSetElem_Dense(JitCode *stubCode, HandleShape shape, HandleTypeObject type);
+    HeapPtrObjectGroup group_;
+
+    ICSetElem_Dense(JitCode *stubCode, HandleShape shape, HandleObjectGroup group);
 
   public:
     static inline ICSetElem_Dense *New(ICStubSpace *space, JitCode *code, HandleShape shape,
-                                       HandleTypeObject type) {
+                                       HandleObjectGroup group) {
         if (!code)
             return nullptr;
-        return space->allocate<ICSetElem_Dense>(code, shape, type);
+        return space->allocate<ICSetElem_Dense>(code, shape, group);
     }
 
     static size_t offsetOfShape() {
         return offsetof(ICSetElem_Dense, shape_);
     }
-    static size_t offsetOfType() {
-        return offsetof(ICSetElem_Dense, type_);
+    static size_t offsetOfGroup() {
+        return offsetof(ICSetElem_Dense, group_);
     }
 
     HeapPtrShape &shape() {
         return shape_;
     }
-    HeapPtrTypeObject &type() {
-        return type_;
+    HeapPtrObjectGroup &group() {
+        return group_;
     }
 
     class Compiler : public ICStubCompiler {
         RootedShape shape_;
 
         // Compiler is only live on stack during compilation, it should
-        // outlive any RootedTypeObject it's passed.  So it can just
+        // outlive any RootedObjectGroup it's passed.  So it can just
         // use the handle.
-        HandleTypeObject type_;
+        HandleObjectGroup group_;
 
         bool generateStubCode(MacroAssembler &masm);
 
       public:
-        Compiler(JSContext *cx, Shape *shape, HandleTypeObject type)
+        Compiler(JSContext *cx, Shape *shape, HandleObjectGroup group)
           : ICStubCompiler(cx, ICStub::SetElem_Dense),
             shape_(cx, shape),
-            type_(type)
+            group_(group)
         {}
 
         ICUpdatedStub *getStub(ICStubSpace *space) {
-            ICSetElem_Dense *stub = ICSetElem_Dense::New(space, getStubCode(), shape_, type_);
+            ICSetElem_Dense *stub = ICSetElem_Dense::New(space, getStubCode(), shape_, group_);
             if (!stub || !stub->initUpdatingChain(cx, space))
                 return nullptr;
             return stub;
         }
     };
 };
 
 template <size_t ProtoChainDepth> class ICSetElem_DenseAddImpl;
@@ -3569,27 +3569,27 @@ template <size_t ProtoChainDepth> class 
 class ICSetElem_DenseAdd : public ICUpdatedStub
 {
     friend class ICStubSpace;
 
   public:
     static const size_t MAX_PROTO_CHAIN_DEPTH = 4;
 
   protected:
-    HeapPtrTypeObject type_;
-
-    ICSetElem_DenseAdd(JitCode *stubCode, types::TypeObject *type, size_t protoChainDepth);
-
-  public:
-    static size_t offsetOfType() {
-        return offsetof(ICSetElem_DenseAdd, type_);
-    }
-
-    HeapPtrTypeObject &type() {
-        return type_;
+    HeapPtrObjectGroup group_;
+
+    ICSetElem_DenseAdd(JitCode *stubCode, types::ObjectGroup *group, size_t protoChainDepth);
+
+  public:
+    static size_t offsetOfGroup() {
+        return offsetof(ICSetElem_DenseAdd, group_);
+    }
+
+    HeapPtrObjectGroup &group() {
+        return group_;
     }
     size_t protoChainDepth() const {
         MOZ_ASSERT(extra_ <= MAX_PROTO_CHAIN_DEPTH);
         return extra_;
     }
 
     template <size_t ProtoChainDepth>
     ICSetElem_DenseAddImpl<ProtoChainDepth> *toImplUnchecked() {
@@ -3606,33 +3606,33 @@ class ICSetElem_DenseAdd : public ICUpda
 template <size_t ProtoChainDepth>
 class ICSetElem_DenseAddImpl : public ICSetElem_DenseAdd
 {
     friend class ICStubSpace;
 
     static const size_t NumShapes = ProtoChainDepth + 1;
     mozilla::Array<HeapPtrShape, NumShapes> shapes_;
 
-    ICSetElem_DenseAddImpl(JitCode *stubCode, types::TypeObject *type,
+    ICSetElem_DenseAddImpl(JitCode *stubCode, types::ObjectGroup *group,
                            const AutoShapeVector *shapes)
-      : ICSetElem_DenseAdd(stubCode, type, ProtoChainDepth)
+      : ICSetElem_DenseAdd(stubCode, group, ProtoChainDepth)
     {
         MOZ_ASSERT(shapes->length() == NumShapes);
         for (size_t i = 0; i < NumShapes; i++)
             shapes_[i].init((*shapes)[i]);
     }
 
   public:
     static inline ICSetElem_DenseAddImpl *New(ICStubSpace *space, JitCode *code,
-                                              types::TypeObject *type,
+                                              types::ObjectGroup *group,
                                               const AutoShapeVector *shapes)
     {
         if (!code)
             return nullptr;
-        return space->allocate<ICSetElem_DenseAddImpl<ProtoChainDepth> >(code, type, shapes);
+        return space->allocate<ICSetElem_DenseAddImpl<ProtoChainDepth> >(code, group, shapes);
     }
 
     void traceShapes(JSTracer *trc) {
         for (size_t i = 0; i < NumShapes; i++)
             MarkShape(trc, &shapes_[i], "baseline-setelem-denseadd-stub-shape");
     }
     Shape *shape(size_t i) const {
         MOZ_ASSERT(i < NumShapes);
@@ -4524,74 +4524,74 @@ class ICGetPropNativeDoesNotExistCompile
 
     ICStub *getStub(ICStubSpace *space);
 };
 
 class ICGetProp_Unboxed : public ICMonitoredStub
 {
     friend class ICStubSpace;
 
-    HeapPtrTypeObject type_;
+    HeapPtrObjectGroup group_;
     uint32_t fieldOffset_;
 
-    ICGetProp_Unboxed(JitCode *stubCode, ICStub *firstMonitorStub, HandleTypeObject type,
+    ICGetProp_Unboxed(JitCode *stubCode, ICStub *firstMonitorStub, HandleObjectGroup group,
                       uint32_t fieldOffset)
       : ICMonitoredStub(ICStub::GetProp_Unboxed, stubCode, firstMonitorStub),
-        type_(type), fieldOffset_(fieldOffset)
+        group_(group), fieldOffset_(fieldOffset)
     {
         (void) fieldOffset_; // Silence clang warning
     }
 
   public:
     static inline ICGetProp_Unboxed *New(ICStubSpace *space, JitCode *code,
-                                         ICStub *firstMonitorStub, HandleTypeObject shape,
+                                         ICStub *firstMonitorStub, HandleObjectGroup group,
                                          uint32_t fieldOffset)
     {
         if (!code)
             return nullptr;
-        return space->allocate<ICGetProp_Unboxed>(code, firstMonitorStub, shape, fieldOffset);
-    }
-
-    HeapPtrTypeObject &type() {
-        return type_;
-    }
-
-    static size_t offsetOfType() {
-        return offsetof(ICGetProp_Unboxed, type_);
+        return space->allocate<ICGetProp_Unboxed>(code, firstMonitorStub, group, fieldOffset);
+    }
+
+    HeapPtrObjectGroup &group() {
+        return group_;
+    }
+
+    static size_t offsetOfGroup() {
+        return offsetof(ICGetProp_Unboxed, group_);
     }
     static size_t offsetOfFieldOffset() {
         return offsetof(ICGetProp_Unboxed, fieldOffset_);
     }
 
     class Compiler : public ICStubCompiler {
       protected:
         ICStub *firstMonitorStub_;
-        RootedTypeObject type_;
+        RootedObjectGroup group_;
         uint32_t fieldOffset_;
         JSValueType fieldType_;
 
         bool generateStubCode(MacroAssembler &masm);
 
         virtual int32_t getKey() const {
             return static_cast<int32_t>(kind) | (static_cast<int32_t>(fieldType_)) << 16;
         }
 
       public:
         Compiler(JSContext *cx, ICStub *firstMonitorStub,
-                 types::TypeObject *type, uint32_t fieldOffset, JSValueType fieldType)
+                 types::ObjectGroup *group, uint32_t fieldOffset, JSValueType fieldType)
           : ICStubCompiler(cx, ICStub::GetProp_Unboxed),
             firstMonitorStub_(firstMonitorStub),
-            type_(cx, type),
+            group_(cx, group),
             fieldOffset_(fieldOffset),
             fieldType_(fieldType)
         {}
 
         ICStub *getStub(ICStubSpace *space) {
             return ICGetProp_Unboxed::New(space, getStubCode(), firstMonitorStub_,
-                                          type_, fieldOffset_);
+                                          group_, fieldOffset_);
         }
     };
 };
 
 static uint32_t
 SimpleTypeDescrKey(SimpleTypeDescr *descr)
 {
     if (descr->is<ScalarTypeDescr>())
@@ -5293,44 +5293,44 @@ class ICSetProp_Fallback : public ICFall
 };
 
 // Optimized SETPROP/SETGNAME/SETNAME stub.
 class ICSetProp_Native : public ICUpdatedStub
 {
     friend class ICStubSpace;
 
   protected: // Protected to silence Clang warning.
-    HeapPtrTypeObject type_;
+    HeapPtrObjectGroup group_;
     HeapPtrShape shape_;
     uint32_t offset_;
 
-    ICSetProp_Native(JitCode *stubCode, HandleTypeObject type, HandleShape shape, uint32_t offset);
-
-  public:
-    static inline ICSetProp_Native *New(ICStubSpace *space, JitCode *code, HandleTypeObject type,
+    ICSetProp_Native(JitCode *stubCode, HandleObjectGroup group, HandleShape shape, uint32_t offset);
+
+  public:
+    static inline ICSetProp_Native *New(ICStubSpace *space, JitCode *code, HandleObjectGroup group,
                                         HandleShape shape, uint32_t offset)
     {
         if (!code)
             return nullptr;
-        return space->allocate<ICSetProp_Native>(code, type, shape, offset);
-    }
-    HeapPtrTypeObject &type() {
-        return type_;
+        return space->allocate<ICSetProp_Native>(code, group, shape, offset);
+    }
+    HeapPtrObjectGroup &group() {
+        return group_;
     }
     HeapPtrShape &shape() {
         return shape_;
     }
     void notePreliminaryObject() {
         extra_ = 1;
     }
     bool hasPreliminaryObject() const {
         return extra_;
     }
-    static size_t offsetOfType() {
-        return offsetof(ICSetProp_Native, type_);
+    static size_t offsetOfGroup() {
+        return offsetof(ICSetProp_Native, group_);
     }
     static size_t offsetOfShape() {
         return offsetof(ICSetProp_Native, shape_);
     }
     static size_t offsetOfOffset() {
         return offsetof(ICSetProp_Native, offset_);
     }
 
@@ -5362,290 +5362,290 @@ class ICSetProp_Native : public ICUpdate
 template <size_t ProtoChainDepth> class ICSetProp_NativeAddImpl;
 
 class ICSetProp_NativeAdd : public ICUpdatedStub
 {
   public:
     static const size_t MAX_PROTO_CHAIN_DEPTH = 4;
 
   protected: // Protected to silence Clang warning.
-    HeapPtrTypeObject type_;
+    HeapPtrObjectGroup group_;
     HeapPtrShape newShape_;
-    HeapPtrTypeObject newType_;
+    HeapPtrObjectGroup newGroup_;
     uint32_t offset_;
 
-    ICSetProp_NativeAdd(JitCode *stubCode, HandleTypeObject type, size_t protoChainDepth,
-                        HandleShape newShape, HandleTypeObject newType, uint32_t offset);
+    ICSetProp_NativeAdd(JitCode *stubCode, HandleObjectGroup group, size_t protoChainDepth,
+                        HandleShape newShape, HandleObjectGroup newGroup, uint32_t offset);
 
   public:
     size_t protoChainDepth() const {
         return extra_;
     }
-    HeapPtrTypeObject &type() {
-        return type_;
+    HeapPtrObjectGroup &group() {
+        return group_;
     }
     HeapPtrShape &newShape() {
         return newShape_;
     }
-    HeapPtrTypeObject &newType() {
-        return newType_;
+    HeapPtrObjectGroup &newGroup() {
+        return newGroup_;
     }
 
     template <size_t ProtoChainDepth>
     ICSetProp_NativeAddImpl<ProtoChainDepth> *toImpl() {
         MOZ_ASSERT(ProtoChainDepth == protoChainDepth());
         return static_cast<ICSetProp_NativeAddImpl<ProtoChainDepth> *>(this);
     }
 
-    static size_t offsetOfType() {
-        return offsetof(ICSetProp_NativeAdd, type_);
+    static size_t offsetOfGroup() {
+        return offsetof(ICSetProp_NativeAdd, group_);
     }
     static size_t offsetOfNewShape() {
         return offsetof(ICSetProp_NativeAdd, newShape_);
     }
-    static size_t offsetOfNewType() {
-        return offsetof(ICSetProp_NativeAdd, newType_);
+    static size_t offsetOfNewGroup() {
+        return offsetof(ICSetProp_NativeAdd, newGroup_);
     }
     static size_t offsetOfOffset() {
         return offsetof(ICSetProp_NativeAdd, offset_);
     }
 };
 
 template <size_t ProtoChainDepth>
 class ICSetProp_NativeAddImpl : public ICSetProp_NativeAdd
 {
     friend class ICStubSpace;
 
     static const size_t NumShapes = ProtoChainDepth + 1;
     mozilla::Array<HeapPtrShape, NumShapes> shapes_;
 
-    ICSetProp_NativeAddImpl(JitCode *stubCode, HandleTypeObject type,
+    ICSetProp_NativeAddImpl(JitCode *stubCode, HandleObjectGroup group,
                             const AutoShapeVector *shapes,
-                            HandleShape newShape, HandleTypeObject newType, uint32_t offset);
+                            HandleShape newShape, HandleObjectGroup newGroup, uint32_t offset);
 
   public:
     static inline ICSetProp_NativeAddImpl *New(
-            ICStubSpace *space, JitCode *code, HandleTypeObject type,
+            ICStubSpace *space, JitCode *code, HandleObjectGroup group,
             const AutoShapeVector *shapes, HandleShape newShape,
-            HandleTypeObject newType, uint32_t offset)
+            HandleObjectGroup newGroup, uint32_t offset)
     {
         if (!code)
             return nullptr;
         return space->allocate<ICSetProp_NativeAddImpl<ProtoChainDepth> >(
-                            code, type, shapes, newShape, newType, offset);
+                            code, group, shapes, newShape, newGroup, offset);
     }
 
     void traceShapes(JSTracer *trc) {
         for (size_t i = 0; i < NumShapes; i++)
             MarkShape(trc, &shapes_[i], "baseline-setpropnativeadd-stub-shape");
     }
 
     static size_t offsetOfShape(size_t idx) {
         return offsetof(ICSetProp_NativeAddImpl, shapes_) + (idx * sizeof(HeapPtrShape));
     }
 };
 
 class ICSetPropNativeAddCompiler : public ICStubCompiler
 {
     RootedObject obj_;
     RootedShape oldShape_;
-    RootedTypeObject oldType_;
+    RootedObjectGroup oldGroup_;
     size_t protoChainDepth_;
     bool isFixedSlot_;
     uint32_t offset_;
 
   protected:
     virtual int32_t getKey() const {
         return static_cast<int32_t>(kind) | (static_cast<int32_t>(isFixedSlot_) << 16) |
                (static_cast<int32_t>(protoChainDepth_) << 20);
     }
 
     bool generateStubCode(MacroAssembler &masm);
 
   public:
     ICSetPropNativeAddCompiler(JSContext *cx, HandleObject obj,
-                               HandleShape oldShape, HandleTypeObject oldType,
+                               HandleShape oldShape, HandleObjectGroup oldGroup,
                                size_t protoChainDepth, bool isFixedSlot, uint32_t offset);
 
     template <size_t ProtoChainDepth>
     ICUpdatedStub *getStubSpecific(ICStubSpace *space, const AutoShapeVector *shapes)
     {
-        RootedTypeObject newType(cx, obj_->getType(cx));
-        if (!newType)
+        RootedObjectGroup newGroup(cx, obj_->getGroup(cx));
+        if (!newGroup)
             return nullptr;
 
-        // Only specify newType when the object's type changes due to the
+        // Only specify newGroup when the object's group changes due to the
         // object becoming fully initialized per the acquired properties
         // analysis.
-        if (newType == oldType_)
-            newType = nullptr;
+        if (newGroup == oldGroup_)
+            newGroup = nullptr;
 
         RootedShape newShape(cx, obj_->lastProperty());
 
         return ICSetProp_NativeAddImpl<ProtoChainDepth>::New(
-                    space, getStubCode(), oldType_, shapes, newShape, newType, offset_);
+                    space, getStubCode(), oldGroup_, shapes, newShape, newGroup, offset_);
     }
 
     ICUpdatedStub *getStub(ICStubSpace *space);
 };
 
 class ICSetProp_Unboxed : public ICUpdatedStub
 {
     friend class ICStubSpace;
 
-    HeapPtrTypeObject type_;
+    HeapPtrObjectGroup group_;
     uint32_t fieldOffset_;
 
-    ICSetProp_Unboxed(JitCode *stubCode, HandleTypeObject type, uint32_t fieldOffset)
+    ICSetProp_Unboxed(JitCode *stubCode, HandleObjectGroup group, uint32_t fieldOffset)
       : ICUpdatedStub(ICStub::SetProp_Unboxed, stubCode),
-        type_(type),
+        group_(group),
         fieldOffset_(fieldOffset)
     {
         (void) fieldOffset_; // Silence clang warning
     }
 
   public:
     static inline ICSetProp_Unboxed *New(ICStubSpace *space, JitCode *code,
-                                         HandleTypeObject type, uint32_t fieldOffset)
+                                         HandleObjectGroup group, uint32_t fieldOffset)
     {
         if (!code)
             return nullptr;
-        return space->allocate<ICSetProp_Unboxed>(code, type, fieldOffset);
-    }
-
-    HeapPtrTypeObject &type() {
-        return type_;
-    }
-
-    static size_t offsetOfType() {
-        return offsetof(ICSetProp_Unboxed, type_);
+        return space->allocate<ICSetProp_Unboxed>(code, group, fieldOffset);
+    }
+
+    HeapPtrObjectGroup &group() {
+        return group_;
+    }
+
+    static size_t offsetOfGroup() {
+        return offsetof(ICSetProp_Unboxed, group_);
     }
     static size_t offsetOfFieldOffset() {
         return offsetof(ICSetProp_Unboxed, fieldOffset_);
     }
 
     class Compiler : public ICStubCompiler {
       protected:
-        RootedTypeObject type_;
+        RootedObjectGroup group_;
         uint32_t fieldOffset_;
         JSValueType fieldType_;
 
         bool generateStubCode(MacroAssembler &masm);
 
         virtual int32_t getKey() const {
             return static_cast<int32_t>(kind) |
                    (static_cast<int32_t>(fieldType_) << 16);
         }
 
       public:
-        Compiler(JSContext *cx, types::TypeObject *type, uint32_t fieldOffset,
+        Compiler(JSContext *cx, types::ObjectGroup *group, uint32_t fieldOffset,
                  JSValueType fieldType)
           : ICStubCompiler(cx, ICStub::SetProp_Unboxed),
-            type_(cx, type),
+            group_(cx, group),
             fieldOffset_(fieldOffset),
             fieldType_(fieldType)
         {}
 
         ICUpdatedStub *getStub(ICStubSpace *space) {
             ICUpdatedStub *stub = ICSetProp_Unboxed::New(space, getStubCode(),
-                                                         type_, fieldOffset_);
+                                                         group_, fieldOffset_);
             if (!stub || !stub->initUpdatingChain(cx, space))
                 return nullptr;
             return stub;
         }
 
         bool needsUpdateStubs() {
             return fieldType_ == JSVAL_TYPE_OBJECT;
         }
     };
 };
 
 class ICSetProp_TypedObject : public ICUpdatedStub
 {
     friend class ICStubSpace;
 
     HeapPtrShape shape_;
-    HeapPtrTypeObject type_;
+    HeapPtrObjectGroup group_;
     uint32_t fieldOffset_;
     bool isObjectReference_;
 
-    ICSetProp_TypedObject(JitCode *stubCode, HandleShape shape, HandleTypeObject type,
+    ICSetProp_TypedObject(JitCode *stubCode, HandleShape shape, HandleObjectGroup group,
                           uint32_t fieldOffset, bool isObjectReference)
       : ICUpdatedStub(ICStub::SetProp_TypedObject, stubCode),
         shape_(shape),
-        type_(type),
+        group_(group),
         fieldOffset_(fieldOffset),
         isObjectReference_(isObjectReference)
     {
         (void) fieldOffset_; // Silence clang warning
     }
 
   public:
     static inline ICSetProp_TypedObject *New(ICStubSpace *space, JitCode *code,
-                                             HandleShape shape, HandleTypeObject type,
+                                             HandleShape shape, HandleObjectGroup group,
                                              uint32_t fieldOffset, bool isObjectReference)
     {
         if (!code)
             return nullptr;
-        return space->allocate<ICSetProp_TypedObject>(code, shape, type,
+        return space->allocate<ICSetProp_TypedObject>(code, shape, group,
                                                       fieldOffset, isObjectReference);
     }
 
     HeapPtrShape &shape() {
         return shape_;
     }
-    HeapPtrTypeObject &type() {
-        return type_;
+    HeapPtrObjectGroup &group() {
+        return group_;
     }
     bool isObjectReference() {
         return isObjectReference_;
     }
 
     static size_t offsetOfShape() {
         return offsetof(ICSetProp_TypedObject, shape_);
     }
-    static size_t offsetOfType() {
-        return offsetof(ICSetProp_TypedObject, type_);
+    static size_t offsetOfGroup() {
+        return offsetof(ICSetProp_TypedObject, group_);
     }
     static size_t offsetOfFieldOffset() {
         return offsetof(ICSetProp_TypedObject, fieldOffset_);
     }
 
     class Compiler : public ICStubCompiler {
       protected:
         RootedShape shape_;
-        RootedTypeObject type_;
+        RootedObjectGroup group_;
         uint32_t fieldOffset_;
         TypedThingLayout layout_;
         Rooted<SimpleTypeDescr *> fieldDescr_;
 
         bool generateStubCode(MacroAssembler &masm);
 
         virtual int32_t getKey() const {
             return static_cast<int32_t>(kind) |
                    (static_cast<int32_t>(SimpleTypeDescrKey(fieldDescr_)) << 16) |
                    (static_cast<int32_t>(layout_) << 24);
         }
 
       public:
-        Compiler(JSContext *cx, Shape *shape, types::TypeObject *type, uint32_t fieldOffset,
+        Compiler(JSContext *cx, Shape *shape, types::ObjectGroup *group, uint32_t fieldOffset,
                  SimpleTypeDescr *fieldDescr)
           : ICStubCompiler(cx, ICStub::SetProp_TypedObject),
             shape_(cx, shape),
-            type_(cx, type),
+            group_(cx, group),
             fieldOffset_(fieldOffset),
             layout_(GetTypedThingLayout(shape->getObjectClass())),
             fieldDescr_(cx, fieldDescr)
         {}
 
         ICUpdatedStub *getStub(ICStubSpace *space) {
             bool isObjectReference =
                 fieldDescr_->is<ReferenceTypeDescr>() &&
                 fieldDescr_->as<ReferenceTypeDescr>().type() == ReferenceTypeDescr::TYPE_OBJECT;
-            ICUpdatedStub *stub = ICSetProp_TypedObject::New(space, getStubCode(), shape_, type_,
+            ICUpdatedStub *stub = ICSetProp_TypedObject::New(space, getStubCode(), shape_, group_,
                                                              fieldOffset_, isObjectReference);
             if (!stub || !stub->initUpdatingChain(cx, space))
                 return nullptr;
             return stub;
         }
 
         bool needsUpdateStubs() {
             return fieldDescr_->is<ReferenceTypeDescr>() &&
--- a/js/src/jit/BaselineInspector.cpp
+++ b/js/src/jit/BaselineInspector.cpp
@@ -77,91 +77,91 @@ SetElemICInspector::sawTypedArrayWrite()
             return true;
     }
     return false;
 }
 
 bool
 BaselineInspector::maybeInfoForPropertyOp(jsbytecode *pc,
                                           ShapeVector &nativeShapes,
-                                          TypeObjectVector &unboxedTypes)
+                                          ObjectGroupVector &unboxedGroups)
 {
     // Return lists of native shapes and unboxed objects seen by the baseline
     // IC for the current op. Empty lists indicate no shapes/types are known,
     // or there was an uncacheable access.
     MOZ_ASSERT(nativeShapes.empty());
-    MOZ_ASSERT(unboxedTypes.empty());
+    MOZ_ASSERT(unboxedGroups.empty());
 
     if (!hasBaselineScript())
         return true;
 
     MOZ_ASSERT(isValidPC(pc));
     const ICEntry &entry = icEntryFromPC(pc);
 
     ICStub *stub = entry.firstStub();
     while (stub->next()) {
         Shape *shape = nullptr;
-        types::TypeObject *type = nullptr;
+        types::ObjectGroup *group = nullptr;
         if (stub->isGetProp_Native()) {
             shape = stub->toGetProp_Native()->shape();
         } else if (stub->isSetProp_Native()) {
             shape = stub->toSetProp_Native()->shape();
         } else if (stub->isGetProp_Unboxed()) {
-            type = stub->toGetProp_Unboxed()->type();
+            group = stub->toGetProp_Unboxed()->group();
         } else if (stub->isSetProp_Unboxed()) {
-            type = stub->toSetProp_Unboxed()->type();
+            group = stub->toSetProp_Unboxed()->group();
         } else {
             nativeShapes.clear();
-            unboxedTypes.clear();
+            unboxedGroups.clear();
             return true;
         }
 
-        // Don't add the same shape/type twice (this can happen if there are
-        // multiple SetProp_Native stubs with different TypeObject's).
+        // Don't add the same shape/group twice (this can happen if there are
+        // multiple SetProp_Native stubs with different ObjectGroups).
         if (shape) {
             bool found = false;
             for (size_t i = 0; i < nativeShapes.length(); i++) {
                 if (nativeShapes[i] == shape) {
                     found = true;
                     break;
                 }
             }
             if (!found && !nativeShapes.append(shape))
                 return false;
         } else {
             bool found = false;
-            for (size_t i = 0; i < unboxedTypes.length(); i++) {
-                if (unboxedTypes[i] == type) {
+            for (size_t i = 0; i < unboxedGroups.length(); i++) {
+                if (unboxedGroups[i] == group) {
                     found = true;
                     break;
                 }
             }
-            if (!found && !unboxedTypes.append(type))
+            if (!found && !unboxedGroups.append(group))
                 return false;
         }
 
         stub = stub->next();
     }
 
     if (stub->isGetProp_Fallback()) {
         if (stub->toGetProp_Fallback()->hadUnoptimizableAccess()) {
             nativeShapes.clear();
-            unboxedTypes.clear();
+            unboxedGroups.clear();
         }
     } else {
         if (stub->toSetProp_Fallback()->hadUnoptimizableAccess()) {
             nativeShapes.clear();
-            unboxedTypes.clear();
+            unboxedGroups.clear();
         }
     }
 
-    // Don't inline if there are more than 5 shapes/types.
-    if (nativeShapes.length() + unboxedTypes.length() > 5) {
+    // Don't inline if there are more than 5 shapes/groups.
+    if (nativeShapes.length() + unboxedGroups.length() > 5) {
         nativeShapes.clear();
-        unboxedTypes.clear();
+        unboxedGroups.clear();
     }
 
     return true;
 }
 
 ICStub *
 BaselineInspector::monomorphicStub(jsbytecode *pc)
 {
--- a/js/src/jit/BaselineInspector.h
+++ b/js/src/jit/BaselineInspector.h
@@ -88,18 +88,20 @@ class BaselineInspector
         return ICInspectorType(this, pc, ent);
     }
 
     ICStub *monomorphicStub(jsbytecode *pc);
     bool dimorphicStub(jsbytecode *pc, ICStub **pfirst, ICStub **psecond);
 
   public:
     typedef Vector<Shape *, 4, JitAllocPolicy> ShapeVector;
-    typedef Vector<types::TypeObject *, 4, JitAllocPolicy> TypeObjectVector;
-    bool maybeInfoForPropertyOp(jsbytecode *pc, ShapeVector &nativeShapes, TypeObjectVector &unboxedTypes);
+    typedef Vector<types::ObjectGroup *, 4, JitAllocPolicy> ObjectGroupVector;
+    bool maybeInfoForPropertyOp(jsbytecode *pc,
+                                ShapeVector &nativeShapes,
+                                ObjectGroupVector &unboxedGroups);
 
     SetElemICInspector setElemICInspector(jsbytecode *pc) {
         return makeICInspector<SetElemICInspector>(pc, ICStub::SetElem_Fallback);
     }
 
     MIRType expectedResultType(jsbytecode *pc);
     MCompare::CompareType expectedCompareType(jsbytecode *pc);
     MIRType expectedBinaryArithSpecialization(jsbytecode *pc);
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -718,59 +718,59 @@ CodeGenerator::visitFunctionDispatch(LFu
         casesWithFallback = mir->numCases() + 1;
         lastLabel = skipTrivialBlocks(mir->getFallback())->lir()->label();
     }
 
     // Compare function pointers, except for the last case.
     for (size_t i = 0; i < casesWithFallback - 1; i++) {
         MOZ_ASSERT(i < mir->numCases());
         LBlock *target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();
-        if (types::TypeObject *funcType = mir->getCaseTypeObject(i)) {
-            masm.branchPtr(Assembler::Equal, Address(input, JSObject::offsetOfType()),
-                           ImmGCPtr(funcType), target->label());
+        if (types::ObjectGroup *funcGroup = mir->getCaseObjectGroup(i)) {
+            masm.branchPtr(Assembler::Equal, Address(input, JSObject::offsetOfGroup()),
+                           ImmGCPtr(funcGroup), target->label());
         } else {
             JSFunction *func = mir->getCase(i);
             masm.branchPtr(Assembler::Equal, input, ImmGCPtr(func), target->label());
         }
     }
 
     // Jump to the last case.
     masm.jump(lastLabel);
 }
 
 void
-CodeGenerator::visitTypeObjectDispatch(LTypeObjectDispatch *lir)
-{
-    MTypeObjectDispatch *mir = lir->mir();
+CodeGenerator::visitObjectGroupDispatch(LObjectGroupDispatch *lir)
+{
+    MObjectGroupDispatch *mir = lir->mir();
     Register input = ToRegister(lir->input());
     Register temp = ToRegister(lir->temp());
 
-    // Hold the incoming TypeObject.
-
-    masm.loadPtr(Address(input, JSObject::offsetOfType()), temp);
-
-    // Compare TypeObjects.
+    // Hold the incoming ObjectGroup.
+
+    masm.loadPtr(Address(input, JSObject::offsetOfGroup()), temp);
+
+    // Compare ObjectGroups.
 
     MacroAssembler::BranchGCPtr lastBranch;
     LBlock *lastBlock = nullptr;
     InlinePropertyTable *propTable = mir->propTable();
     for (size_t i = 0; i < mir->numCases(); i++) {
         JSFunction *func = mir->getCase(i);
         LBlock *target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();
 
         DebugOnly<bool> found = false;
         for (size_t j = 0; j < propTable->numEntries(); j++) {
             if (propTable->getFunction(j) != func)
                 continue;
 
             if (lastBranch.isInitialized())
                 lastBranch.emit(masm);
 
-            types::TypeObject *typeObj = propTable->getTypeObject(j);
-            lastBranch = MacroAssembler::BranchGCPtr(Assembler::Equal, temp, ImmGCPtr(typeObj),
+            types::ObjectGroup *group = propTable->getObjectGroup(j);
+            lastBranch = MacroAssembler::BranchGCPtr(Assembler::Equal, temp, ImmGCPtr(group),
                                                      target->label());
             lastBlock = target;
             found = true;
         }
         MOZ_ASSERT(found);
     }
 
     // Unknown function: jump to fallback block.
@@ -1699,17 +1699,17 @@ CodeGenerator::visitLambdaArrow(LLambdaA
     Register output = ToRegister(lir->output());
     Register tempReg = ToRegister(lir->temp());
     const LambdaFunctionInfo &info = lir->mir()->info();
 
     OutOfLineCode *ool = oolCallVM(LambdaArrowInfo, lir,
                                    (ArgList(), ImmGCPtr(info.fun), scopeChain, thisv),
                                    StoreRegisterTo(output));
 
-    MOZ_ASSERT(!info.useNewTypeForClone);
+    MOZ_ASSERT(!info.useSingletonForClone);
 
     if (info.singletonType) {
         // If the function has a singleton type, this instruction will only be
         // executed once so we don't bother inlining it.
         masm.jump(ool->entry());
         masm.bind(ool->rejoin());
         return;
     }
@@ -2229,58 +2229,58 @@ CodeGenerator::visitStoreSlotV(LStoreSlo
 }
 
 void
 CodeGenerator::emitGetPropertyPolymorphic(LInstruction *ins, Register obj, Register scratch,
                                           const TypedOrValueRegister &output)
 {
     MGetPropertyPolymorphic *mir = ins->mirRaw()->toGetPropertyPolymorphic();
 
-    size_t total = mir->numUnboxedTypes() + mir->numShapes();
+    size_t total = mir->numUnboxedGroups() + mir->numShapes();
     MOZ_ASSERT(total > 1);
 
-    bool typeInScratch = mir->numUnboxedTypes() > 1;
+    bool groupInScratch = mir->numUnboxedGroups() > 1;
     bool shapeInScratch = mir->numShapes() > 1;
 
     Label done;
 
     for (size_t i = 0; i < total; i++) {
-        bool unboxedType = i < mir->numUnboxedTypes();
-
-        ImmGCPtr comparePtr = unboxedType
-                              ? ImmGCPtr(mir->unboxedType(i))
-                              : ImmGCPtr(mir->objShape(i - mir->numUnboxedTypes()));
-        Address addr(obj, unboxedType ? JSObject::offsetOfType() : JSObject::offsetOfShape());
-
-        if ((i == 0 && typeInScratch) || (i == mir->numUnboxedTypes() && shapeInScratch))
+        bool unboxedGroup = i < mir->numUnboxedGroups();
+
+        ImmGCPtr comparePtr = unboxedGroup
+                              ? ImmGCPtr(mir->unboxedGroup(i))
+                              : ImmGCPtr(mir->objShape(i - mir->numUnboxedGroups()));
+        Address addr(obj, unboxedGroup ? JSObject::offsetOfGroup() : JSObject::offsetOfShape());
+
+        if ((i == 0 && groupInScratch) || (i == mir->numUnboxedGroups() && shapeInScratch))
             masm.loadPtr(addr, scratch);
 
-        bool inScratch = unboxedType ? typeInScratch : shapeInScratch;
+        bool inScratch = unboxedGroup ? groupInScratch : shapeInScratch;
 
         Label next;
         if (i == total - 1) {
             if (inScratch)
                 bailoutCmpPtr(Assembler::NotEqual, scratch, comparePtr, ins->snapshot());
             else
                 bailoutCmpPtr(Assembler::NotEqual, addr, comparePtr, ins->snapshot());
         } else {
             if (inScratch)
                 masm.branchPtr(Assembler::NotEqual, scratch, comparePtr, &next);
             else
                 masm.branchPtr(Assembler::NotEqual, addr, comparePtr, &next);
         }
 
-        if (unboxedType) {
+        if (unboxedGroup) {
             const UnboxedLayout::Property *property =
-                mir->unboxedType(i)->unboxedLayout().lookup(mir->name());
+                mir->unboxedGroup(i)->unboxedLayout().lookup(mir->name());
             Address propertyAddr(obj, UnboxedPlainObject::offsetOfData() + property->offset);
 
             masm.loadUnboxedProperty(propertyAddr, property->type, output);
         } else {
-            Shape *shape = mir->shape(i - mir->numUnboxedTypes());
+            Shape *shape = mir->shape(i - mir->numUnboxedGroups());
             if (shape->slot() < shape->numFixedSlots()) {
                 // Fixed slot.
                 masm.loadTypedOrValue(Address(obj, NativeObject::getFixedSlotOffset(shape->slot())),
                                       output);
             } else {
                 // Dynamic slot.
                 uint32_t offset = (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value);
                 masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
@@ -2316,64 +2316,64 @@ CodeGenerator::visitGetPropertyPolymorph
 }
 
 void
 CodeGenerator::emitSetPropertyPolymorphic(LInstruction *ins, Register obj, Register scratch,
                                           const ConstantOrRegister &value)
 {
     MSetPropertyPolymorphic *mir = ins->mirRaw()->toSetPropertyPolymorphic();
 
-    size_t total = mir->numUnboxedTypes() + mir->numShapes();
+    size_t total = mir->numUnboxedGroups() + mir->numShapes();
     MOZ_ASSERT(total > 1);
 
-    bool typeInScratch = mir->numUnboxedTypes() > 1;
+    bool groupInScratch = mir->numUnboxedGroups() > 1;
     bool shapeInScratch = mir->numShapes() > 1;
 
     Label done;
     for (size_t i = 0; i < total; i++) {
-        bool unboxedType = i < mir->numUnboxedTypes();
-
-        ImmGCPtr comparePtr = unboxedType
-                              ? ImmGCPtr(mir->unboxedType(i))
-                              : ImmGCPtr(mir->objShape(i - mir->numUnboxedTypes()));
-        Address addr(obj, unboxedType ? JSObject::offsetOfType() : JSObject::offsetOfShape());
-
-        if ((i == 0 && typeInScratch) || (i == mir->numUnboxedTypes() && shapeInScratch))
+        bool unboxedGroup = i < mir->numUnboxedGroups();
+
+        ImmGCPtr comparePtr = unboxedGroup
+                              ? ImmGCPtr(mir->unboxedGroup(i))
+                              : ImmGCPtr(mir->objShape(i - mir->numUnboxedGroups()));
+        Address addr(obj, unboxedGroup ? JSObject::offsetOfGroup() : JSObject::offsetOfShape());
+
+        if ((i == 0 && groupInScratch) || (i == mir->numUnboxedGroups() && shapeInScratch))
             masm.loadPtr(addr, scratch);
 
-        bool inScratch = unboxedType ? typeInScratch : shapeInScratch;
+        bool inScratch = unboxedGroup ? groupInScratch : shapeInScratch;
 
         Label next;
         if (i == total - 1) {
             if (inScratch)
                 bailoutCmpPtr(Assembler::NotEqual, scratch, comparePtr, ins->snapshot());
             else
                 bailoutCmpPtr(Assembler::NotEqual, addr, comparePtr, ins->snapshot());
         } else {
             if (inScratch)
                 masm.branchPtr(Assembler::NotEqual, scratch, comparePtr, &next);
             else
                 masm.branchPtr(Assembler::NotEqual, addr, comparePtr, &next);
         }
 
-        if (unboxedType) {
+        if (unboxedGroup) {
             const UnboxedLayout::Property *property =
-                mir->unboxedType(i)->unboxedLayout().lookup(mir->name());
+                mir->unboxedGroup(i)->unboxedLayout().lookup(mir->name());
             Address propertyAddr(obj, UnboxedPlainObject::offsetOfData() + property->offset);
 
             if (property->type == JSVAL_TYPE_OBJECT)
                 masm.patchableCallPreBarrier(propertyAddr, MIRType_Object);
             else if (property->type == JSVAL_TYPE_STRING)
                 masm.patchableCallPreBarrier(propertyAddr, MIRType_String);
             else
                 MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(property->type));
 
             masm.storeUnboxedProperty(propertyAddr, property->type, value, nullptr);
         } else {
-            Shape *shape = mir->shape(i - mir->numUnboxedTypes());
+            Shape *shape = mir->shape(i - mir->numUnboxedGroups());
             if (shape->slot() < shape->numFixedSlots()) {
                 // Fixed slot.
                 Address addr(obj, NativeObject::getFixedSlotOffset(shape->slot()));
                 if (mir->needsBarrier())
                     emitPreBarrier(addr);
                 masm.storeConstantOrRegister(value, addr);
             } else {
                 // Dynamic slot.
@@ -3671,21 +3671,21 @@ CodeGenerator::emitObjectOrStringResultC
         if (mir->resultTypeSet()->getObjectCount() > 0)
             masm.guardObjectType(output, mir->resultTypeSet(), temp, &miss);
         else
             masm.jump(&miss);
         masm.jump(&ok);
 
         masm.bind(&miss);
 
-        // Type set guards might miss when an object's type changes and its
+        // Type set guards might miss when an object's group changes and its
         // properties become unknown, so check for this case.
-        masm.loadPtr(Address(output, JSObject::offsetOfType()), temp);
+        masm.loadPtr(Address(output, JSObject::offsetOfGroup()), temp);
         masm.branchTestPtr(Assembler::NonZero,
-                           Address(temp, types::TypeObject::offsetOfFlags()),
+                           Address(temp, types::ObjectGroup::offsetOfFlags()),
                            Imm32(types::OBJECT_FLAG_UNKNOWN_PROPERTIES), &ok);
 
         masm.assumeUnreachable("MIR instruction returned object with unexpected type");
 
         masm.bind(&ok);
     }
 
     // Check that we have a valid GC pointer.
@@ -3748,24 +3748,24 @@ CodeGenerator::emitValueResultChecks(LIn
     if (mir->resultTypeSet() && !mir->resultTypeSet()->unknown()) {
         // We have a result TypeSet, assert this value is in it.
         Label miss, ok;
         masm.guardTypeSet(output, mir->resultTypeSet(), BarrierKind::TypeSet, temp1, &miss);
         masm.jump(&ok);
 
         masm.bind(&miss);
 
-        // Type set guards might miss when an object's type changes and its
+        // Type set guards might miss when an object's group changes and its
         // properties become unknown, so check for this case.
         Label realMiss;
         masm.branchTestObject(Assembler::NotEqual, output, &realMiss);
         Register payload = masm.extractObject(output, temp1);
-        masm.loadPtr(Address(payload, JSObject::offsetOfType()), temp1);
+        masm.loadPtr(Address(payload, JSObject::offsetOfGroup()), temp1);
         masm.branchTestPtr(Assembler::NonZero,
-                           Address(temp1, types::TypeObject::offsetOfFlags()),
+                           Address(temp1, types::ObjectGroup::offsetOfFlags()),
                            Imm32(types::OBJECT_FLAG_UNKNOWN_PROPERTIES), &ok);
         masm.bind(&realMiss);
 
         masm.assumeUnreachable("MIR instruction returned value with unexpected type");
 
         masm.bind(&ok);
     }
 
@@ -3929,34 +3929,34 @@ class OutOfLineNewArray : public OutOfLi
         codegen->visitOutOfLineNewArray(this);
     }
 
     LNewArray *lir() const {
         return lir_;
     }
 };
 
-typedef ArrayObject *(*NewDenseArrayFn)(ExclusiveContext *, uint32_t, HandleTypeObject,
+typedef ArrayObject *(*NewDenseArrayFn)(ExclusiveContext *, uint32_t, HandleObjectGroup,
                                         AllocatingBehaviour);
 static const VMFunction NewDenseArrayInfo = FunctionInfo<NewDenseArrayFn>(NewDenseArray);
 
 void
 CodeGenerator::visitNewArrayCallVM(LNewArray *lir)
 {
     Register objReg = ToRegister(lir->output());
 
     MOZ_ASSERT(!lir->isCall());
     saveLive(lir);
 
     JSObject *templateObject = lir->mir()->templateObject();
-    types::TypeObject *type =
-        templateObject->hasSingletonType() ? nullptr : templateObject->type();
+    types::ObjectGroup *group =
+        templateObject->isSingleton() ? nullptr : templateObject->group();
 
     pushArg(Imm32(lir->mir()->allocatingBehaviour()));
-    pushArg(ImmGCPtr(type));
+    pushArg(ImmGCPtr(group));
     pushArg(Imm32(lir->mir()->count()));
 
     callVM(NewDenseArrayInfo, lir);
 
     if (ReturnReg != objReg)
         masm.movePtr(ReturnReg, objReg);
 
     restoreLive(lir);
@@ -4062,45 +4062,45 @@ CodeGenerator::visitNewArrayCopyOnWrite(
                                    (ArgList(), ImmGCPtr(templateObject), Imm32(initialHeap)),
                                    StoreRegisterTo(objReg));
 
     masm.createGCObject(objReg, tempReg, templateObject, initialHeap, ool->entry());
 
     masm.bind(ool->rejoin());
 }
 
-typedef ArrayObject *(*ArrayConstructorOneArgFn)(JSContext *, HandleTypeObject, int32_t length);
+typedef ArrayObject *(*ArrayConstructorOneArgFn)(JSContext *, HandleObjectGroup, int32_t length);
 static const VMFunction ArrayConstructorOneArgInfo =
     FunctionInfo<ArrayConstructorOneArgFn>(ArrayConstructorOneArg);
 
 void
 CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength *lir)
 {
     Register lengthReg = ToRegister(lir->length());
     Register objReg = ToRegister(lir->output());
     Register tempReg = ToRegister(lir->temp());
 
     ArrayObject *templateObject = lir->mir()->templateObject();
     gc::InitialHeap initialHeap = lir->mir()->initialHeap();
 
     OutOfLineCode *ool = oolCallVM(ArrayConstructorOneArgInfo, lir,
-                                   (ArgList(), ImmGCPtr(templateObject->type()), lengthReg),
+                                   (ArgList(), ImmGCPtr(templateObject->group()), lengthReg),
                                    StoreRegisterTo(objReg));
 
     size_t numSlots = gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
     size_t inlineLength = numSlots >= ObjectElements::VALUES_PER_HEADER
                         ? numSlots - ObjectElements::VALUES_PER_HEADER
                         : 0;
 
     // Try to do the allocation inline if the template object is big enough
     // for the length in lengthReg. If the length is bigger we could still
     // use the template object and not allocate the elements, but it's more
     // efficient to do a single big allocation than (repeatedly) reallocating
     // the array later on when filling it.
-    if (!templateObject->hasSingletonType() && templateObject->length() <= inlineLength)
+    if (!templateObject->isSingleton() && templateObject->length() <= inlineLength)
         masm.branch32(Assembler::Above, lengthReg, Imm32(templateObject->length()), ool->entry());
     else
         masm.jump(ool->entry());
 
     masm.createGCObject(objReg, tempReg, templateObject, initialHeap, ool->entry());
 
     size_t lengthOffset = NativeObject::offsetOfFixedElements() + ObjectElements::offsetOfLength();
     masm.store32(lengthReg, Address(objReg, lengthOffset));
@@ -4328,30 +4328,30 @@ CodeGenerator::visitSimdBox(LSimdBox *li
 void
 CodeGenerator::visitSimdUnbox(LSimdUnbox *lir)
 {
     Register object = ToRegister(lir->input());
     FloatRegister simd = ToFloatRegister(lir->output());
     Register temp = ToRegister(lir->temp());
     Label bail;
 
-    // obj->type()
-    masm.loadPtr(Address(object, JSObject::offsetOfType()), temp);
+    // obj->group()
+    masm.loadPtr(Address(object, JSObject::offsetOfGroup()), temp);
 
     // Guard that the object has the same representation as the one produced for
     // SIMD value-type.
-    Address clasp(temp, types::TypeObject::offsetOfClasp());
+    Address clasp(temp, types::ObjectGroup::offsetOfClasp());
     static_assert(!SimdTypeDescr::Opaque, "SIMD objects are transparent");
     masm.branchPtr(Assembler::NotEqual, clasp, ImmPtr(&InlineTransparentTypedObject::class_),
                    &bail);
 
     // obj->type()->typeDescr()
     // The previous class pointer comparison implies that the addendumKind is
     // Addendum_TypeDescr.
-    masm.loadPtr(Address(temp, types::TypeObject::offsetOfAddendum()), temp);
+    masm.loadPtr(Address(temp, types::ObjectGroup::offsetOfAddendum()), temp);
 
     // Check for the /Kind/ reserved slot of the TypeDescr.  This is an Int32
     // Value which is equivalent to the object class check.
     static_assert(JS_DESCR_SLOT_KIND < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
     Address typeDescrKind(temp, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_KIND));
     masm.assertTestInt32(Assembler::Equal, typeDescrKind,
       "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_KIND).isInt32())");
     masm.branch32(Assembler::NotEqual, masm.ToPayload(typeDescrKind), Imm32(js::type::Simd), &bail);
@@ -4413,33 +4413,33 @@ CodeGenerator::visitNewDeclEnvObject(LNe
 
     bool initFixedSlots = ShouldInitFixedSlots(lir, templateObj);
     masm.createGCObject(objReg, tempReg, templateObj, gc::DefaultHeap, ool->entry(),
                         initFixedSlots);
 
     masm.bind(ool->rejoin());
 }
 
-typedef JSObject *(*NewCallObjectFn)(JSContext *, HandleShape, HandleTypeObject, uint32_t);
+typedef JSObject *(*NewCallObjectFn)(JSContext *, HandleShape, HandleObjectGroup, uint32_t);
 static const VMFunction NewCallObjectInfo =
     FunctionInfo<NewCallObjectFn>(NewCallObject);
 
 void
 CodeGenerator::visitNewCallObject(LNewCallObject *lir)
 {
     Register objReg = ToRegister(lir->output());
     Register tempReg = ToRegister(lir->temp());
 
     CallObject *templateObj = lir->mir()->templateObject();
 
     JSScript *script = lir->mir()->block()->info().script();
     uint32_t lexicalBegin = script->bindings.aliasedBodyLevelLexicalBegin();
     OutOfLineCode *ool = oolCallVM(NewCallObjectInfo, lir,
                                    (ArgList(), ImmGCPtr(templateObj->lastProperty()),
-                                               ImmGCPtr(templateObj->type()),
+                                               ImmGCPtr(templateObj->group()),
                                                Imm32(lexicalBegin)),
                                    StoreRegisterTo(objReg));
 
     // Inline call object creation, using the OOL path only for tricky cases.
     bool initFixedSlots = ShouldInitFixedSlots(lir, templateObj);
     masm.createGCObject(objReg, tempReg, templateObj, gc::DefaultHeap, ool->entry(),
                         initFixedSlots);
 
@@ -4635,17 +4635,17 @@ static const VMFunction NewGCObjectInfo 
     FunctionInfo<NewGCObjectFn>(js::jit::NewGCObject);
 
 void
 CodeGenerator::visitCreateThisWithTemplate(LCreateThisWithTemplate *lir)
 {
     JSObject *templateObject = lir->mir()->templateObject();
     gc::AllocKind allocKind = templateObject->asTenured().getAllocKind();
     gc::InitialHeap initialHeap = lir->mir()->initialHeap();
-    const js::Class *clasp = templateObject->type()->clasp();
+    const js::Class *clasp = templateObject->getClass();
     Register objReg = ToRegister(lir->output());
     Register tempReg = ToRegister(lir->temp());
 
     OutOfLineCode *ool = oolCallVM(NewGCObjectInfo, lir,
                                    (ArgList(), Imm32(allocKind), Imm32(initialHeap),
                                     ImmPtr(clasp)),
                                    StoreRegisterTo(objReg));
 
@@ -4804,18 +4804,18 @@ CodeGenerator::visitTypedArrayElements(L
 }
 
 void
 CodeGenerator::visitTypedObjectDescr(LTypedObjectDescr *lir)
 {
     Register obj = ToRegister(lir->object());
     Register out = ToRegister(lir->output());
 
-    masm.loadPtr(Address(obj, JSObject::offsetOfType()), out);
-    masm.loadPtr(Address(out, types::TypeObject::offsetOfAddendum()), out);
+    masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), out);
+    masm.loadPtr(Address(out, types::ObjectGroup::offsetOfAddendum()), out);
 }
 
 void
 CodeGenerator::visitTypedObjectElements(LTypedObjectElements *lir)
 {
     Register obj = ToRegister(lir->object());
     Register out = ToRegister(lir->output());
 
@@ -6028,25 +6028,25 @@ CodeGenerator::visitFromCharCode(LFromCh
                   ool->entry());
 
     masm.movePtr(ImmPtr(&GetJitContext()->runtime->staticStrings().unitStaticTable), output);
     masm.loadPtr(BaseIndex(output, code, ScalePointer), output);
 
     masm.bind(ool->rejoin());
 }
 
-typedef JSObject *(*StringSplitFn)(JSContext *, HandleTypeObject, HandleString, HandleString);
+typedef JSObject *(*StringSplitFn)(JSContext *, HandleObjectGroup, HandleString, HandleString);
 static const VMFunction StringSplitInfo = FunctionInfo<StringSplitFn>(js::str_split_string);
 
 void
 CodeGenerator::visitStringSplit(LStringSplit *lir)
 {
     pushArg(ToRegister(lir->separator()));
     pushArg(ToRegister(lir->string()));
-    pushArg(ImmGCPtr(lir->mir()->typeObject()));
+    pushArg(ImmGCPtr(lir->mir()->group()));
 
     callVM(StringSplitInfo, lir);
 }
 
 void
 CodeGenerator::visitInitializedLength(LInitializedLength *lir)
 {
     Address initLength(ToRegister(lir->elements()), ObjectElements::offsetOfInitializedLength());
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -87,17 +87,17 @@ class CodeGenerator : public CodeGenerat
     void visitFloat32ToDouble(LFloat32ToDouble *lir);
     void visitDoubleToFloat32(LDoubleToFloat32 *lir);
     void visitInt32ToFloat32(LInt32ToFloat32 *lir);
     void visitInt32ToDouble(LInt32ToDouble *lir);
     void emitOOLTestObject(Register objreg, Label *ifTruthy, Label *ifFalsy, Register scratch);
     void visitTestOAndBranch(LTestOAndBranch *lir);
     void visitTestVAndBranch(LTestVAndBranch *lir);
     void visitFunctionDispatch(LFunctionDispatch *lir);
-    void visitTypeObjectDispatch(LTypeObjectDispatch *lir);
+    void visitObjectGroupDispatch(LObjectGroupDispatch *lir);
     void visitBooleanToString(LBooleanToString *lir);
     void emitIntToString(Register input, Register output, Label *ool);
     void visitIntToString(LIntToString *lir);
     void visitDoubleToString(LDoubleToString *lir);
     void visitValueToString(LValueToString *lir);
     void visitValueToObjectOrNull(LValueToObjectOrNull *lir);
     void visitInteger(LInteger *lir);
     void visitRegExp(LRegExp *lir);
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -278,19 +278,19 @@ JitRuntime::initialize(JSContext *cx)
     if (!objectPreBarrier_)
         return false;
 
     JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for Shape");
     shapePreBarrier_ = generatePreBarrier(cx, MIRType_Shape);
     if (!shapePreBarrier_)
         return false;
 
-    JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for TypeObject");
-    typeObjectPreBarrier_ = generatePreBarrier(cx, MIRType_TypeObject);
-    if (!typeObjectPreBarrier_)
+    JitSpew(JitSpew_Codegen, "# Emitting Pre Barrier for ObjectGroup");
+    objectGroupPreBarrier_ = generatePreBarrier(cx, MIRType_ObjectGroup);
+    if (!objectGroupPreBarrier_)
         return false;
 
     JitSpew(JitSpew_Codegen, "# Emitting malloc stub");
     mallocStub_ = generateMallocStub(cx);
     if (!mallocStub_)
         return false;
 
     JitSpew(JitSpew_Codegen, "# Emitting free stub");
@@ -1750,17 +1750,17 @@ OffThreadCompilationAvailable(JSContext 
     return cx->runtime()->canUseOffthreadIonCompilation()
         && HelperThreadState().cpuCount > 1
         && CanUseExtraThreads();
 }
 
 static void
 TrackAllProperties(JSContext *cx, JSObject *obj)
 {
-    MOZ_ASSERT(obj->hasSingletonType());
+    MOZ_ASSERT(obj->isSingleton());
 
     for (Shape::Range<NoGC> range(obj->lastProperty()); !range.empty(); range.popFront())
         types::EnsureTrackPropertyTypes(cx, obj, range.front().propid());
 }
 
 static void
 TrackPropertiesForSingletonScopes(JSContext *cx, JSScript *script, BaselineFrame *baselineFrame)
 {
@@ -1768,24 +1768,24 @@ TrackPropertiesForSingletonScopes(JSCont
     // could access are tracked. These are generally accessed through
     // ALIASEDVAR operations in baseline and will not be tracked even if they
     // have been accessed in baseline code.
     JSObject *environment = script->functionNonDelazifying()
                             ? script->functionNonDelazifying()->environment()
                             : nullptr;
 
     while (environment && !environment->is<GlobalObject>()) {
-        if (environment->is<CallObject>() && environment->hasSingletonType())
+        if (environment->is<CallObject>() && environment->isSingleton())
             TrackAllProperties(cx, environment);
         environment = environment->enclosingScope();
     }
 
     if (baselineFrame) {
         JSObject *scope = baselineFrame->scopeChain();
-        if (scope->is<CallObject>() && scope->hasSingletonType())
+        if (scope->is<CallObject>() && scope->isSingleton())
             TrackAllProperties(cx, scope);
     }
 }
 
 static void
 TrackIonAbort(JSContext *cx, JSScript *script, jsbytecode *pc, const char *message)
 {
     if (!cx->runtime()->jitRuntime()->isOptimizationTrackingEnabled(cx->runtime()))
@@ -1903,19 +1903,19 @@ IonCompile(JSContext *cx, JSScript *scri
     builder->clearForBackEnd();
 
     if (!succeeded) {
         AbortReason reason = builder->abortReason();
         if (reason == AbortReason_NewScriptProperties) {
             // Some type was accessed which needs the new script properties
             // analysis to be performed. Do this now and we will try to build
             // again shortly.
-            const MIRGenerator::TypeObjectVector &types = builder->abortedNewScriptPropertiesTypes();
-            for (size_t i = 0; i < types.length(); i++) {
-                if (!types[i]->newScript()->maybeAnalyze(cx, types[i], nullptr, /* force = */ true))
+            const MIRGenerator::ObjectGroupVector &groups = builder->abortedNewScriptPropertiesGroups();
+            for (size_t i = 0; i < groups.length(); i++) {
+                if (!groups[i]->newScript()->maybeAnalyze(cx, groups[i], nullptr, /* force = */ true))
                     return AbortReason_Alloc;
             }
         }
 
         if (builder->hadActionableAbort()) {
             JSScript *abortScript;
             jsbytecode *abortPc;
             const char *abortMessage;
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -2059,17 +2059,17 @@ IsResumableMIRType(MIRType type)
       case MIRType_MagicHole:
       case MIRType_MagicIsConstructing:
       case MIRType_ObjectOrNull:
       case MIRType_None:
       case MIRType_Slots:
       case MIRType_Elements:
       case MIRType_Pointer:
       case MIRType_Shape:
-      case MIRType_TypeObject:
+      case MIRType_ObjectGroup:
       case MIRType_Float32x4:
       case MIRType_Int32x4:
       case MIRType_Doublex2:
         return false;
     }
     MOZ_CRASH("Unknown MIRType.");
 }
 
@@ -2875,17 +2875,17 @@ jit::ConvertLinearInequality(TempAllocat
     MCompare *compare = MCompare::New(alloc, lhsDef, rhsDef, op);
     block->insertAtEnd(compare);
     compare->setCompareType(MCompare::Compare_Int32);
 
     return compare;
 }
 
 static bool
-AnalyzePoppedThis(JSContext *cx, types::TypeObject *type,
+AnalyzePoppedThis(JSContext *cx, types::ObjectGroup *group,
                   MDefinition *thisValue, MInstruction *ins, bool definitelyExecuted,
                   HandlePlainObject baseobj,
                   Vector<types::TypeNewScript::Initializer> *initializerList,
                   Vector<PropertyName *> *accessedProperties,
                   bool *phandled)
 {
     // Determine the effect that a use of the |this| value when calling |new|
     // on a script has on the properties definitely held by the new object.
@@ -2924,17 +2924,17 @@ AnalyzePoppedThis(JSContext *cx, types::
         if (GetGCKindSlots(gc::GetGCObjectKind(baseobj->slotSpan() + 1)) <= baseobj->slotSpan())
             return true;
 
         // Assignments to new properties must always execute.
         if (!definitelyExecuted)
             return true;
 
         RootedId id(cx, NameToId(setprop->name()));
-        if (!types::AddClearDefiniteGetterSetterForPrototypeChain(cx, type, id)) {
+        if (!types::AddClearDefiniteGetterSetterForPrototypeChain(cx, group, id)) {
             // The prototype chain already contains a getter/setter for this
             // property, or type information is too imprecise.
             return true;
         }
 
         // Add the property to the object, being careful not to update type information.
         DebugOnly<unsigned> slotSpan = baseobj->slotSpan();
         MOZ_ASSERT(!baseobj->containsPure(id));
@@ -2984,17 +2984,17 @@ AnalyzePoppedThis(JSContext *cx, types::
          *   is not later added as one. Since the definite properties are
          *   added to the object at the point of its creation, reading a
          *   definite property before it is assigned could incorrectly hit.
          */
         RootedId id(cx, NameToId(get->name()));
         if (!baseobj->lookup(cx, id) && !accessedProperties->append(get->name()))
             return false;
 
-        if (!types::AddClearDefiniteGetterSetterForPrototypeChain(cx, type, id)) {
+        if (!types::AddClearDefiniteGetterSetterForPrototypeChain(cx, group, id)) {
             // The |this| value can escape if any property reads it does go
             // through a getter.
             return true;
         }
 
         *phandled = true;
         return true;
     }
@@ -3011,17 +3011,17 @@ static int
 CmpInstructions(const void *a, const void *b)
 {
     return (*static_cast<MInstruction * const *>(a))->id() -
            (*static_cast<MInstruction * const *>(b))->id();
 }
 
 bool
 jit::AnalyzeNewScriptDefiniteProperties(JSContext *cx, JSFunction *fun,
-                                        types::TypeObject *type, HandlePlainObject baseobj,
+                                        types::ObjectGroup *group, HandlePlainObject baseobj,
                                         Vector<types::TypeNewScript::Initializer> *initializerList)
 {
     MOZ_ASSERT(cx->zone()->types.activeAnalysis);
 
     // When invoking 'new' on the specified script, try to find some properties
     // which will definitely be added to the created object before it has a
     // chance to escape and be accessed elsewhere.
 
@@ -3048,17 +3048,17 @@ jit::AnalyzeNewScriptDefiniteProperties(
     if (!script->hasBaselineScript()) {
         MethodStatus status = BaselineCompile(cx, script);
         if (status == Method_Error)
             return false;
         if (status != Method_Compiled)
             return true;
     }
 
-    types::TypeScript::SetThis(cx, script, types::Type::ObjectType(type));
+    types::TypeScript::SetThis(cx, script, types::Type::ObjectType(group));
 
     MIRGraph graph(&temp);
     InlineScriptTree *inlineScriptTree = InlineScriptTree::New(&temp, nullptr, nullptr, script);
     if (!inlineScriptTree)
         return false;
 
     CompileInfo info(script, fun,
                      /* osrPc = */ nullptr, /* constructing = */ false,
@@ -3153,17 +3153,17 @@ jit::AnalyzeNewScriptDefiniteProperties(
         // an access will always execute in the script, if it executes multiple
         // times then we can get confused when rolling back objects while
         // clearing the new script information.
         if (ins->block()->loopDepth() != 0)
             definitelyExecuted = false;
 
         bool handled = false;
         size_t slotSpan = baseobj->slotSpan();
-        if (!AnalyzePoppedThis(cx, type, thisValue, ins, definitelyExecuted,
+        if (!AnalyzePoppedThis(cx, group, thisValue, ins, definitelyExecuted,
                                baseobj, initializerList, &accessedProperties, &handled))
         {
             return false;
         }
         if (!handled)
             break;
 
         if (slotSpan != baseobj->slotSpan()) {
@@ -3181,17 +3181,17 @@ jit::AnalyzeNewScriptDefiniteProperties(
         for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
             // Inlining decisions made after the last new property was added to
             // the object don't need to be frozen.
             if (block->id() > lastAddedBlock)
                 break;
             if (MResumePoint *rp = block->callerResumePoint()) {
                 if (block->numPredecessors() == 1 && block->getPredecessor(0) == rp->block()) {
                     JSScript *script = rp->block()->info().script();
-                    if (!types::AddClearDefiniteFunctionUsesInScript(cx, type, script, block->info().script()))
+                    if (!types::AddClearDefiniteFunctionUsesInScript(cx, group, script, block->info().script()))
                         return false;
                 }
             }
         }
     }
 
     return true;
 }
--- a/js/src/jit/IonAnalysis.h
+++ b/js/src/jit/IonAnalysis.h
@@ -165,17 +165,17 @@ ConvertLinearSum(TempAllocator &alloc, M
 
 // Convert the test 'sum >= 0' to a comparison, adding any necessary
 // instructions to the end of block.
 MCompare *
 ConvertLinearInequality(TempAllocator &alloc, MBasicBlock *block, const LinearSum &sum);
 
 bool
 AnalyzeNewScriptDefiniteProperties(JSContext *cx, JSFunction *fun,
-                                   types::TypeObject *type, HandlePlainObject baseobj,
+                                   types::ObjectGroup *group, HandlePlainObject baseobj,
                                    Vector<types::TypeNewScript::Initializer> *initializerList);
 
 bool
 AnalyzeArgumentsUsage(JSContext *cx, JSScript *script);
 
 bool
 DeadIfUnused(const MDefinition *def);
 
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -65,17 +65,17 @@ jit::NewBaselineFrameInspector(TempAlloc
         return nullptr;
 
     // Note: copying the actual values into a temporary structure for use
     // during compilation could capture nursery pointers, so the values' types
     // are recorded instead.
 
     inspector->thisType = types::GetMaybeUntrackedValueType(frame->thisValue());
 
-    if (frame->scopeChain()->hasSingletonType())
+    if (frame->scopeChain()->isSingleton())
         inspector->singletonScopeChain = frame->scopeChain();
 
     JSScript *script = frame->script();
 
     if (script->functionNonDelazifying()) {
         if (!inspector->argTypes.reserve(frame->numFormalArgs()))
             return nullptr;
         for (size_t i = 0; i < frame->numFormalArgs(); i++) {
@@ -311,17 +311,17 @@ IonBuilder::CFGState::TableSwitch(jsbyte
 }
 
 JSFunction *
 IonBuilder::getSingleCallTarget(types::TemporaryTypeSet *calleeTypes)
 {
     if (!calleeTypes)
         return nullptr;
 
-    JSObject *obj = calleeTypes->getSingleton();
+    JSObject *obj = calleeTypes->maybeSingleton();
     if (!obj || !obj->is<JSFunction>())
         return nullptr;
 
     return &obj->as<JSFunction>();
 }
 
 bool
 IonBuilder::getPolyCallTargets(types::TemporaryTypeSet *calleeTypes, bool constructing,
@@ -338,31 +338,31 @@ IonBuilder::getPolyCallTargets(types::Te
     unsigned objCount = calleeTypes->getObjectCount();
 
     if (objCount == 0 || objCount > maxTargets)
         return true;
 
     if (!targets.reserve(objCount))
         return false;
     for (unsigned i = 0; i < objCount; i++) {
-        JSObject *obj = calleeTypes->getSingleObject(i);
+        JSObject *obj = calleeTypes->getSingleton(i);
         if (obj) {
-            MOZ_ASSERT(obj->hasSingletonType());
+            MOZ_ASSERT(obj->isSingleton());
         } else {
-            types::TypeObject *typeObj = calleeTypes->getTypeObject(i);
-            if (!typeObj)
+            types::ObjectGroup *group = calleeTypes->getGroup(i);
+            if (!group)
                 continue;
 
-            obj = typeObj->maybeInterpretedFunction();
+            obj = group->maybeInterpretedFunction();
             if (!obj) {
                 targets.clear();
                 return true;
             }
 
-            MOZ_ASSERT(!obj->hasSingletonType());
+            MOZ_ASSERT(!obj->isSingleton());
         }
 
         // Don't optimize if the callee is not callable or constructable per
         // the manner it is being invoked, so that CallKnown does not have to
         // handle these cases (they will always throw).
         if (constructing ? !obj->isConstructor() : !obj->isCallable()) {
             targets.clear();
             return true;
@@ -484,18 +484,18 @@ IonBuilder::canInlineTarget(JSFunction *
         return DontInline(inlineScript, "Script that needs an arguments object");
     }
 
     if (inlineScript->isDebuggee()) {
         trackOptimizationOutcome(TrackedOutcome::CantInlineDebuggee);
         return DontInline(inlineScript, "Script is debuggee");
     }
 
-    types::TypeObjectKey *targetType = types::TypeObjectKey::get(target);
-    if (targetType->unknownProperties()) {
+    types::ObjectGroupKey *targetKey = types::ObjectGroupKey::get(target);
+    if (targetKey->unknownProperties()) {
         trackOptimizationOutcome(TrackedOutcome::CantInlineUnknownProps);
         return DontInline(inlineScript, "Target type has unknown properties");
     }
 
     return InliningDecision_Inline;
 }
 
 void
@@ -841,17 +841,17 @@ IonBuilder::build()
     replaceMaybeFallbackFunctionGetter(nullptr);
 
     if (!maybeAddOsrTypeBarriers())
         return false;
 
     if (!processIterators())
         return false;
 
-    if (!abortedNewScriptPropertiesTypes().empty()) {
+    if (!abortedNewScriptPropertiesGroups().empty()) {
         MOZ_ASSERT(!info().isAnalysis());
         abortReason_ = AbortReason_NewScriptProperties;
         return false;
     }
 
     if (shouldForceAbort()) {
         abortReason_ = AbortReason_Disable;
         return false;
@@ -996,17 +996,17 @@ IonBuilder::buildInline(IonBuilder *call
     insertRecompileCheck();
 
     if (!traverseBytecode())
         return false;
 
     // Discard unreferenced & pre-allocated resume points.
     replaceMaybeFallbackFunctionGetter(nullptr);
 
-    if (!abortedNewScriptPropertiesTypes().empty()) {
+    if (!abortedNewScriptPropertiesGroups().empty()) {
         MOZ_ASSERT(!info().isAnalysis());
         abortReason_ = AbortReason_NewScriptProperties;
         return false;
     }
 
     if (shouldForceAbort()) {
         abortReason_ = AbortReason_Disable;
         return false;
@@ -3449,17 +3449,17 @@ IonBuilder::improveTypesAtCompare(MCompa
             // If TypeSet emulates undefined, then we cannot filter the objects.
             if (subject->resultTypeSet()->maybeEmulatesUndefined(constraints()))
                 flags |= types::TYPE_FLAG_ANYOBJECT;
         }
 
         if (altersNull)
             flags |= types::TYPE_FLAG_NULL;
 
-        types::TemporaryTypeSet base(flags, static_cast<types::TypeObjectKey**>(nullptr));
+        types::TemporaryTypeSet base(flags, static_cast<types::ObjectGroupKey**>(nullptr));
         type = types::TypeSet::intersectSets(&base, subject->resultTypeSet(), alloc_->lifoAlloc());
     }
 
     if (!type)
         return false;
 
     return replaceTypeSet(subject, type, test);
 }
@@ -3578,17 +3578,17 @@ IonBuilder::improveTypesAtTest(MDefiniti
         // typeset. The first part takes care of primitives and AnyObject,
         // while the second line specific (type)objects.
         if (!oldType->hasAnyFlag(~flags & types::TYPE_FLAG_BASE_MASK) &&
             (oldType->maybeEmulatesUndefined(constraints()) || !oldType->maybeObject()))
         {
             return true;
         }
 
-        types::TemporaryTypeSet base(flags, static_cast<types::TypeObjectKey**>(nullptr));
+        types::TemporaryTypeSet base(flags, static_cast<types::ObjectGroupKey**>(nullptr));
         type = types::TypeSet::intersectSets(&base, oldType, alloc_->lifoAlloc());
     }
 
     return replaceTypeSet(ins, type, test);
 }
 
 bool
 IonBuilder::jsop_label()
@@ -4486,20 +4486,20 @@ IonBuilder::inlineScriptedCall(CallInfo 
         // Inlining the callee failed. Mark the callee as uninlineable only if
         // the inlining was aborted for a non-exception reason.
         if (inlineBuilder.abortReason_ == AbortReason_Disable) {
             calleeScript->setUninlineable();
             abortReason_ = AbortReason_Inlining;
         } else if (inlineBuilder.abortReason_ == AbortReason_Inlining) {
             abortReason_ = AbortReason_Inlining;
         } else if (inlineBuilder.abortReason_ == AbortReason_NewScriptProperties) {
-            const TypeObjectVector &types = inlineBuilder.abortedNewScriptPropertiesTypes();
-            MOZ_ASSERT(!types.empty());
-            for (size_t i = 0; i < types.length(); i++)
-                addAbortedNewScriptPropertiesType(types[i]);
+            const ObjectGroupVector &groups = inlineBuilder.abortedNewScriptPropertiesGroups();
+            MOZ_ASSERT(!groups.empty());
+            for (size_t i = 0; i < groups.length(); i++)
+                addAbortedNewScriptPropertiesGroup(groups[i]);
             abortReason_ = AbortReason_NewScriptProperties;
         }
 
         return false;
     }
 
     // Create return block.
     jsbytecode *postCall = GetNextPc(pc);
@@ -4732,18 +4732,18 @@ IonBuilder::makeInliningDecision(JSObjec
             trackOptimizationOutcome(TrackedOutcome::CantInlineNotHot);
             JitSpew(JitSpew_Inlining, "Cannot inline %s:%u: callee is insufficiently hot.",
                     targetScript->filename(), targetScript->lineno());
             return InliningDecision_WarmUpCountTooLow;
         }
     }
 
     // TI calls ObjectStateChange to trigger invalidation of the caller.
-    types::TypeObjectKey *targetType = types::TypeObjectKey::get(target);
-    targetType->watchStateChangeForInlinedCall(constraints());
+    types::ObjectGroupKey *targetKey = types::ObjectGroupKey::get(target);
+    targetKey->watchStateChangeForInlinedCall(constraints());
 
     return InliningDecision_Inline;
 }
 
 bool
 IonBuilder::selectInliningTargets(const ObjectVector &targets, CallInfo &callInfo, BoolVector &choiceSet,
                                   uint32_t *numInlineable)
 {
@@ -4859,34 +4859,34 @@ class WrapMGetPropertyCache
     MGetPropertyCache *get() {
         return cache_;
     }
     MGetPropertyCache *operator->() {
         return get();
     }
 
     // This function returns the cache given to the constructor if the
-    // GetPropertyCache can be moved into the TypeObject fallback path.
+    // GetPropertyCache can be moved into the ObjectGroup fallback path.
     MGetPropertyCache *moveableCache(bool hasTypeBarrier, MDefinition *thisDef) {
         // If we have unhandled uses of the MGetPropertyCache, then we cannot
-        // move it to the TypeObject fallback path.
+        // move it to the ObjectGroup fallback path.
         if (!hasTypeBarrier) {
             if (cache_->hasUses())
                 return nullptr;
         } else {
             // There is the TypeBarrier consumer, so we check that this is the
             // only consumer.
             MOZ_ASSERT(cache_->hasUses());
             if (!cache_->hasOneUse())
                 return nullptr;
         }
 
         // If the this-object is not identical to the object of the
         // MGetPropertyCache, then we cannot use the InlinePropertyTable, or if
-        // we do not yet have enough information from the TypeObject.
+        // we do not yet have enough information from the ObjectGroup.
         if (!CanInlineGetPropertyCache(cache_, thisDef))
             return nullptr;
 
         MGetPropertyCache *ret = cache_;
         cache_ = nullptr;
         return ret;
     }
 };
@@ -4960,17 +4960,17 @@ IonBuilder::inlineCallsite(const ObjectV
     if (targets.empty()) {
         trackOptimizationAttempt(TrackedStrategy::Call_Inline);
         trackOptimizationOutcome(TrackedOutcome::CantInlineNoTarget);
         return InliningStatus_NotInlined;
     }
 
     // Is the function provided by an MGetPropertyCache?
     // If so, the cache may be movable to a fallback path, with a dispatch
-    // instruction guarding on the incoming TypeObject.
+    // instruction guarding on the incoming ObjectGroup.
     WrapMGetPropertyCache propCache(getInlineableGetPropertyCache(callInfo));
     keepFallbackFunctionGetter(propCache.get());
 
     // Inline single targets -- unless they derive from a cache, in which case
     // avoiding the cache and guarding is still faster.
     if (!propCache.get() && targets.length() == 1) {
         JSObject *target = targets[0];
 
@@ -4992,17 +4992,17 @@ IonBuilder::inlineCallsite(const ObjectV
         // Inlining will elminate uses of the original callee, but it needs to
         // be preserved in phis if we bail out.  Mark the old callee definition as
         // implicitly used to ensure this happens.
         callInfo.fun()->setImplicitlyUsedUnchecked();
 
         // If the callee is not going to be a lambda (which may vary across
         // different invocations), then the callee definition can be replaced by a
         // constant.
-        if (target->hasSingletonType()) {
+        if (target->isSingleton()) {
             // Replace the function with an MConstant.
             MConstant *constFun = constant(ObjectValue(*target));
             callInfo.setFun(constFun);
         }
 
         return inlineSingleCall(callInfo, target);
     }
 
@@ -5042,26 +5042,26 @@ IonBuilder::inlineGenericFallback(JSFunc
     if (!makeCall(target, fallbackInfo, clonedAtCallsite))
         return false;
 
     // Pass return block to caller as |current|.
     return true;
 }
 
 bool
-IonBuilder::inlineTypeObjectFallback(CallInfo &callInfo, MBasicBlock *dispatchBlock,
-                                     MTypeObjectDispatch *dispatch, MGetPropertyCache *cache,
+IonBuilder::inlineObjectGroupFallback(CallInfo &callInfo, MBasicBlock *dispatchBlock,
+                                     MObjectGroupDispatch *dispatch, MGetPropertyCache *cache,
                                      MBasicBlock **fallbackTarget)
 {
     // Getting here implies the following:
     // 1. The call function is an MGetPropertyCache, or an MGetPropertyCache
     //    followed by an MTypeBarrier.
     MOZ_ASSERT(callInfo.fun()->isGetPropertyCache() || callInfo.fun()->isTypeBarrier());
 
-    // 2. The MGetPropertyCache has inlineable cases by guarding on the TypeObject.
+    // 2. The MGetPropertyCache has inlineable cases by guarding on the ObjectGroup.
     MOZ_ASSERT(dispatch->numCases() > 0);
 
     // 3. The MGetPropertyCache (and, if applicable, MTypeBarrier) only
     //    have at most a single use.
     MOZ_ASSERT_IF(callInfo.fun()->isGetPropertyCache(), !cache->hasUses());
     MOZ_ASSERT_IF(callInfo.fun()->isTypeBarrier(), cache->hasOneUse());
 
     // This means that no resume points yet capture the MGetPropertyCache,
@@ -5172,17 +5172,17 @@ IonBuilder::inlineCalls(CallInfo &callIn
         propTable->trimToTargets(originals);
         if (propTable->numEntries() == 0)
             maybeCache = nullptr;
     }
 
     // Generate a dispatch based on guard kind.
     MDispatchInstruction *dispatch;
     if (maybeCache) {
-        dispatch = MTypeObjectDispatch::New(alloc(), maybeCache->object(), maybeCache->propTable());
+        dispatch = MObjectGroupDispatch::New(alloc(), maybeCache->object(), maybeCache->propTable());
         callInfo.fun()->setImplicitlyUsedUnchecked();
     } else {
         dispatch = MFunctionDispatch::New(alloc(), callInfo.fun());
     }
 
     // Generate a return block to host the rval-collecting MPhi.
     jsbytecode *postCall = GetNextPc(pc);
     MBasicBlock *returnBlock = newBlock(nullptr, postCall);
@@ -5206,17 +5206,17 @@ IonBuilder::inlineCalls(CallInfo &callIn
     uint32_t count = 1; // Possible fallback block.
     for (uint32_t i = 0; i < targets.length(); i++) {
         if (choiceSet[i])
             count++;
     }
     retPhi->reserveLength(count);
 
     // During inlining the 'this' value is assigned a type set which is
-    // specialized to the type objects which can generate that inlining target.
+    // specialized to the groups which can generate that inlining target.
     // After inlining the original type set is restored.
     types::TemporaryTypeSet *cacheObjectTypeSet =
         maybeCache ? maybeCache->object()->resultTypeSet() : nullptr;
 
     // Inline each of the inlineable targets.
     MOZ_ASSERT(targets.length() == originals.length());
     for (uint32_t i = 0; i < targets.length(); i++) {
         // Target must be inlineable.
@@ -5243,17 +5243,17 @@ IonBuilder::inlineCalls(CallInfo &callIn
         MBasicBlock *inlineBlock = newBlock(dispatchBlock, pc);
         if (!inlineBlock)
             return false;
 
         // Create a function MConstant to use in the entry ResumePoint. If we
         // can't use a constant, add a no-op MPolyInlineGuard, to prevent
         // hoisting scope chain gets above the dispatch instruction.
         MInstruction *funcDef;
-        if (target->hasSingletonType())
+        if (target->isSingleton())
             funcDef = MConstant::New(alloc(), ObjectValue(*target), constraints());
         else
             funcDef = MPolyInlineGuard::New(alloc(), callInfo.fun());
 
         funcDef->setImplicitlyUsedUnchecked();
         dispatchBlock->add(funcDef);
 
         // Use the inlined callee in the inline resume point and on stack.
@@ -5296,18 +5296,18 @@ IonBuilder::inlineCalls(CallInfo &callIn
         // inlineSingleCall() changed |current| to the inline return block.
         MBasicBlock *inlineReturnBlock = current;
         setCurrent(dispatchBlock);
 
         // Connect the inline path to the returnBlock.
         //
         // Note that guarding is on the original function pointer even
         // if there is a clone, since cloning occurs at the callsite.
-        types::TypeObject *funType = original->hasSingletonType() ? nullptr : original->type();
-        dispatch->addCase(original, funType, inlineBlock);
+        types::ObjectGroup *funcGroup = original->isSingleton() ? nullptr : original->group();
+        dispatch->addCase(original, funcGroup, inlineBlock);
 
         MDefinition *retVal = inlineReturnBlock->peek(-1);
         retPhi->addInput(retVal);
         inlineReturnBlock->end(MGoto::New(alloc(), returnBlock));
         if (!returnBlock->addPredecessorWithoutPhis(inlineReturnBlock))
             return false;
     }
 
@@ -5323,22 +5323,22 @@ IonBuilder::inlineCalls(CallInfo &callIn
         // If all paths were vetoed, output only a generic fallback path.
         if (propTable->numEntries() == 0) {
             MOZ_ASSERT(dispatch->numCases() == 0);
             maybeCache = nullptr;
         }
     }
 
     // If necessary, generate a fallback path.
-    // MTypeObjectDispatch always uses a fallback path.
+    // MObjectGroupDispatch always uses a fallback path.
     if (maybeCache || dispatch->numCases() < targets.length()) {
         // Generate fallback blocks, and set |current| to the fallback return block.
         if (maybeCache) {
             MBasicBlock *fallbackTarget;
-            if (!inlineTypeObjectFallback(callInfo, dispatchBlock, (MTypeObjectDispatch *)dispatch,
+            if (!inlineObjectGroupFallback(callInfo, dispatchBlock, (MObjectGroupDispatch *)dispatch,
                                           maybeCache, &fallbackTarget))
             {
                 return false;
             }
             dispatch->addFallback(fallbackTarget);
         } else {
             JSFunction *remaining = nullptr;
             bool clonedAtCallsite = false;
@@ -5349,17 +5349,17 @@ IonBuilder::inlineCalls(CallInfo &callIn
                 for (uint32_t i = 0; i < originals.length(); i++) {
                     if (choiceSet[i])
                         continue;
 
                     MOZ_ASSERT(!remaining);
 
                     if (targets[i]->is<JSFunction>()) {
                         JSFunction *target = &targets[i]->as<JSFunction>();
-                        if (target->hasSingletonType()) {
+                        if (target->isSingleton()) {
                             remaining = target;
                             clonedAtCallsite = target != originals[i];
                         }
                     }
                     break;
                 }
             }
 
@@ -5490,22 +5490,22 @@ IonBuilder::createThisScripted(MDefiniti
     current->add(createThis);
 
     return createThis;
 }
 
 JSObject *
 IonBuilder::getSingletonPrototype(JSFunction *target)
 {
-    types::TypeObjectKey *targetType = types::TypeObjectKey::get(target);
-    if (targetType->unknownProperties())
+    types::ObjectGroupKey *targetKey = types::ObjectGroupKey::get(target);
+    if (targetKey->unknownProperties())
         return nullptr;
 
     jsid protoid = NameToId(names().prototype);
-    types::HeapTypeSetKey protoProperty = targetType->property(protoid);
+    types::HeapTypeSetKey protoProperty = targetKey->property(protoid);
 
     return protoProperty.singleton(constraints());
 }
 
 MDefinition *
 IonBuilder::createThisScriptedSingleton(JSFunction *target, MDefinition *callee)
 {
     // Get the singleton prototype (if exists)
@@ -5516,30 +5516,30 @@ IonBuilder::createThisScriptedSingleton(
     JSObject *templateObject = inspector->getTemplateObject(pc);
     if (!templateObject)
         return nullptr;
     if (!templateObject->is<PlainObject>() && !templateObject->is<UnboxedPlainObject>())
         return nullptr;
     if (!templateObject->hasTenuredProto() || templateObject->getProto() != proto)
         return nullptr;
 
-    types::TypeObjectKey *templateObjectType = types::TypeObjectKey::get(templateObject->type());
-    if (templateObjectType->hasFlags(constraints(), types::OBJECT_FLAG_NEW_SCRIPT_CLEARED))
+    types::ObjectGroupKey *templateObjectKey = types::ObjectGroupKey::get(templateObject->group());
+    if (templateObjectKey->hasFlags(constraints(), types::OBJECT_FLAG_NEW_SCRIPT_CLEARED))
         return nullptr;
 
     types::StackTypeSet *thisTypes = types::TypeScript::ThisTypes(target->nonLazyScript());
     if (!thisTypes || !thisTypes->hasType(types::Type::ObjectType(templateObject)))
         return nullptr;
 
     // Generate an inline path to create a new |this| object with
     // the given singleton prototype.
     MConstant *templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
     MCreateThisWithTemplate *createThis =
         MCreateThisWithTemplate::New(alloc(), constraints(), templateConst,
-                                     templateObject->type()->initialHeap(constraints()));
+                                     templateObject->group()->initialHeap(constraints()));
     current->add(templateConst);
     current->add(createThis);
 
     return createThis;
 }
 
 MDefinition *
 IonBuilder::createThis(JSFunction *target, MDefinition *callee)
@@ -5883,24 +5883,24 @@ IonBuilder::testShouldDOMCall(types::Typ
     DOMInstanceClassHasProtoAtDepth instanceChecker =
         compartment->runtime()->DOMcallbacks()->instanceClassMatchesProto;
 
     const JSJitInfo *jinfo = func->jitInfo();
     if (jinfo->type() != opType)
         return false;
 
     for (unsigned i = 0; i < inTypes->getObjectCount(); i++) {
-        types::TypeObjectKey *curType = inTypes->getObject(i);
-        if (!curType)
+        types::ObjectGroupKey *key = inTypes->getObject(i);
+        if (!key)
             continue;
 
-        if (!curType->hasStableClassAndProto(constraints()))
+        if (!key->hasStableClassAndProto(constraints()))
             return false;
 
-        if (!instanceChecker(curType->clasp(), jinfo->protoID, jinfo->depth))
+        if (!instanceChecker(key->clasp(), jinfo->protoID, jinfo->depth))
             return false;
     }
 
     return true;
 }
 
 static bool
 ArgumentTypesMatch(MDefinition *def, types::StackTypeSet *calleeTypes)
@@ -6190,33 +6190,33 @@ IonBuilder::jsop_newarray(uint32_t count
             current->add(unknown);
             current->push(unknown);
             return true;
         }
         return abort("No template object for NEWARRAY");
     }
 
     MOZ_ASSERT(templateObject->is<ArrayObject>());
-    if (templateObject->type()->unknownProperties()) {
+    if (templateObject->group()->unknownProperties()) {
         if (info().analysisMode() == Analysis_ArgumentsUsage) {
             MUnknownValue *unknown = MUnknownValue::New(alloc());
             current->add(unknown);
             current->push(unknown);
             return true;
         }
         // We will get confused in jsop_initelem_array if we can't find the
-        // type object being initialized.
+        // object group being initialized.
         return abort("New array has unknown properties");
     }
 
     MConstant *templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
     current->add(templateConst);
 
     MNewArray *ins = MNewArray::New(alloc(), constraints(), count, templateConst,
-                                    templateObject->type()->initialHeap(constraints()),
+                                    templateObject->group()->initialHeap(constraints()),
                                     NewArray_FullyAllocating);
     current->add(ins);
     current->push(ins);
 
     types::TemporaryTypeSet::DoubleConversion conversion =
         ins->resultTypeSet()->convertDoubleElements(constraints());
 
     if (conversion == types::TemporaryTypeSet::AlwaysConvertToDoubles)
@@ -6231,21 +6231,21 @@ IonBuilder::jsop_newarray_copyonwrite()
 {
     ArrayObject *templateObject = types::GetCopyOnWriteObject(script(), pc);
 
     // The baseline compiler should have ensured the template object has a type
     // with the copy on write flag set already. During the arguments usage
     // analysis the baseline compiler hasn't run yet, however, though in this
     // case the template object's type doesn't matter.
     MOZ_ASSERT_IF(info().analysisMode() != Analysis_ArgumentsUsage,
-                  templateObject->type()->hasAnyFlags(types::OBJECT_FLAG_COPY_ON_WRITE));
+                  templateObject->group()->hasAnyFlags(types::OBJECT_FLAG_COPY_ON_WRITE));
 
     MNewArrayCopyOnWrite *ins =
         MNewArrayCopyOnWrite::New(alloc(), constraints(), templateObject,
-                                  templateObject->type()->initialHeap(constraints()));
+                                  templateObject->group()->initialHeap(constraints()));
 
     current->add(ins);
     current->push(ins);
 
     return true;
 }
 
 bool
@@ -6261,19 +6261,19 @@ IonBuilder::jsop_newobject()
         }
         return abort("No template object for NEWOBJECT");
     }
 
     MOZ_ASSERT(templateObject->is<PlainObject>());
     MConstant *templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
     current->add(templateConst);
     MNewObject *ins = MNewObject::New(alloc(), constraints(), templateConst,
-                                      templateObject->hasSingletonType()
+                                      templateObject->isSingleton()
                                       ? gc::TenuredHeap
-                                      : templateObject->type()->initialHeap(constraints()),
+                                      : templateObject->group()->initialHeap(constraints()),
                                       MNewObject::ObjectLiteral);
 
     current->add(ins);
     current->push(ins);
 
     return resumeAfter(ins);
 }
 
@@ -6298,17 +6298,17 @@ IonBuilder::jsop_initelem_array()
 
     // Make sure that arrays have the type being written to them by the
     // intializer, and that arrays are marked as non-packed when writing holes
     // to them during initialization.
     bool needStub = false;
     if (obj->isUnknownValue()) {
         needStub = true;
     } else {
-        types::TypeObjectKey *initializer = obj->resultTypeSet()->getObject(0);
+        types::ObjectGroupKey *initializer = obj->resultTypeSet()->getObject(0);
         if (value->type() == MIRType_MagicHole) {
             if (!initializer->hasFlags(constraints(), types::OBJECT_FLAG_NON_PACKED))
                 needStub = true;
         } else if (!initializer->unknownProperties()) {
             types::HeapTypeSetKey elemTypes = initializer->property(JSID_VOID);
             if (!TypeSetIncludes(elemTypes.maybeTypes(), value->type(), value->resultTypeSet())) {
                 elemTypes.freeze(constraints());
                 needStub = true;
@@ -6882,26 +6882,26 @@ IonBuilder::testSingletonProperty(JSObje
     // read on accesses to the object. If the property is later deleted or
     // reconfigured as a getter/setter then the type information for the
     // property will change and trigger invalidation.
 
     while (obj) {
         if (!ClassHasEffectlessLookup(obj->getClass(), name))
             return nullptr;
 
-        types::TypeObjectKey *objType = types::TypeObjectKey::get(obj);
+        types::ObjectGroupKey *objKey = types::ObjectGroupKey::get(obj);
         if (analysisContext)
-            objType->ensureTrackedProperty(analysisContext, NameToId(name));
-
-        if (objType->unknownProperties())
+            objKey->ensureTrackedProperty(analysisContext, NameToId(name));
+
+        if (objKey->unknownProperties())
             return nullptr;
 
-        types::HeapTypeSetKey property = objType->property(NameToId(name));
+        types::HeapTypeSetKey property = objKey->property(NameToId(name));
         if (property.isOwnProperty(constraints())) {
-            if (obj->hasSingletonType())
+            if (obj->isSingleton())
                 return property.singleton(constraints());
             return nullptr;
         }
 
         if (ClassHasResolveHook(compartment, obj->getClass(), name))
             return nullptr;
 
         if (!obj->hasTenuredProto())
@@ -6922,17 +6922,17 @@ IonBuilder::testSingletonPropertyTypes(M
 
     *testObject = false;
     *testString = false;
 
     types::TemporaryTypeSet *types = obj->resultTypeSet();
     if (types && types->unknownObject())
         return false;
 
-    JSObject *objectSingleton = types ? types->getSingleton() : nullptr;
+    JSObject *objectSingleton = types ? types->maybeSingleton() : nullptr;
     if (objectSingleton)
         return testSingletonProperty(objectSingleton, name) == singleton;
 
     JSProtoKey key;
     switch (obj->type()) {
       case MIRType_String:
         key = JSProto_String;
         break;
@@ -6963,34 +6963,34 @@ IonBuilder::testSingletonPropertyTypes(M
 
         if (!types->maybeObject())
             return false;
 
         // For property accesses which may be on many objects, we just need to
         // find a prototype common to all the objects; if that prototype
         // has the singleton property, the access will not be on a missing property.
         for (unsigned i = 0; i < types->getObjectCount(); i++) {
-            types::TypeObjectKey *object = types->getObject(i);
-            if (!object)
+            types::ObjectGroupKey *key = types->getObject(i);
+            if (!key)
                 continue;
             if (analysisContext)
-                object->ensureTrackedProperty(analysisContext, NameToId(name));
-
-            const Class *clasp = object->clasp();
+                key->ensureTrackedProperty(analysisContext, NameToId(name));
+
+            const Class *clasp = key->clasp();
             if (!ClassHasEffectlessLookup(clasp, name) || ClassHasResolveHook(compartment, clasp, name))
                 return false;
-            if (object->unknownProperties())
+            if (key->unknownProperties())
                 return false;
-            types::HeapTypeSetKey property = object->property(NameToId(name));
+            types::HeapTypeSetKey property = key->property(NameToId(name));
             if (property.isOwnProperty(constraints()))
                 return false;
 
-            if (!object->hasTenuredProto())
+            if (!key->hasTenuredProto())
                 return false;
-            if (JSObject *proto = object->proto().toObjectOrNull()) {
+            if (JSObject *proto = key->proto().toObjectOrNull()) {
                 // Test this type.
                 if (testSingletonProperty(proto, name) != singleton)
                     return false;
             } else {
                 // Can't be on the prototype chain with no prototypes...
                 return false;
             }
         }
@@ -7166,17 +7166,17 @@ NumFixedSlots(JSObject *object)
 
 bool
 IonBuilder::getStaticName(JSObject *staticObject, PropertyName *name, bool *psucceeded,
                           MDefinition *lexicalCheck)
 {
     jsid id = NameToId(name);
 
     MOZ_ASSERT(staticObject->is<GlobalObject>() || staticObject->is<CallObject>());
-    MOZ_ASSERT(staticObject->hasSingletonType());
+    MOZ_ASSERT(staticObject->isSingleton());
 
     *psucceeded = true;
 
     if (staticObject->is<GlobalObject>()) {
         // Known values on the global definitely don't need TDZ checks.
         if (lexicalCheck)
             lexicalCheck->setNotGuardUnchecked();
 
@@ -7192,41 +7192,41 @@ IonBuilder::getStaticName(JSObject *stat
     // When not loading a known value on the global with a lexical check,
     // always emit the lexical check. This could be optimized, but is
     // currently not for simplicity's sake.
     if (lexicalCheck) {
         *psucceeded = false;
         return true;
     }
 
-    types::TypeObjectKey *staticType = types::TypeObjectKey::get(staticObject);
+    types::ObjectGroupKey *staticKey = types::ObjectGroupKey::get(staticObject);
     if (analysisContext)
-        staticType->ensureTrackedProperty(analysisContext, NameToId(name));
-
-    if (staticType->unknownProperties()) {
+        staticKey->ensureTrackedProperty(analysisContext, NameToId(name));
+
+    if (staticKey->unknownProperties()) {
         *psucceeded = false;
         return true;
     }
 
-    types::HeapTypeSetKey property = staticType->property(id);
+    types::HeapTypeSetKey property = staticKey->property(id);
     if (!property.maybeTypes() ||
         !property.maybeTypes()->definiteProperty() ||
         property.nonData(constraints()))
     {
         // The property has been reconfigured as non-configurable, non-enumerable
         // or non-writable.
         *psucceeded = false;
         return true;
     }
 
     types::TemporaryTypeSet *types = bytecodeTypes(pc);
-    BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), staticType,
+    BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), staticKey,
                                                        name, types, /* updateObserved = */ true);
 
-    JSObject *singleton = types->getSingleton();
+    JSObject *singleton = types->maybeSingleton();
 
     MIRType knownType = types->getKnownMIRType();
     if (barrier == BarrierKind::NoBarrier) {
         // Try to inline properties holding a known constant object.
         if (singleton) {
             if (testSingletonProperty(staticObject, name) == singleton)
                 return pushConstant(ObjectValue(*singleton));
         }
@@ -7296,21 +7296,21 @@ bool
 IonBuilder::setStaticName(JSObject *staticObject, PropertyName *name)
 {
     jsid id = NameToId(name);
 
     MOZ_ASSERT(staticObject->is<GlobalObject>() || staticObject->is<CallObject>());
 
     MDefinition *value = current->peek(-1);
 
-    types::TypeObjectKey *staticType = types::TypeObjectKey::get(staticObject);
-    if (staticType->unknownProperties())
+    types::ObjectGroupKey *staticKey = types::ObjectGroupKey::get(staticObject);
+    if (staticKey->unknownProperties())
         return jsop_setprop(name);
 
-    types::HeapTypeSetKey property = staticType->property(id);
+    types::HeapTypeSetKey property = staticKey->property(id);
     if (!property.maybeTypes() ||
         !property.maybeTypes()->definiteProperty() ||
         property.nonData(constraints()) ||
         property.nonWritable(constraints()))
     {
         // The property has been reconfigured as non-configurable, non-enumerable
         // or non-writable.
         return jsop_setprop(name);
@@ -7604,18 +7604,18 @@ IonBuilder::checkTypedObjectIndexInBound
     // Value to int32 using truncation.
     int32_t lenOfAll;
     MDefinition *length;
     if (objPrediction.hasKnownArrayLength(&lenOfAll)) {
         length = constantInt(lenOfAll);
 
         // If we are not loading the length from the object itself, only
         // optimize if the array buffer can't have been neutered.
-        types::TypeObjectKey *globalType = types::TypeObjectKey::get(&script()->global());
-        if (globalType->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED)) {
+        types::ObjectGroupKey *globalKey = types::ObjectGroupKey::get(&script()->global());
+        if (globalKey->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED)) {
             trackOptimizationOutcome(TrackedOutcome::TypedObjectNeutered);
             return false;
         }
     } else {
         trackOptimizationOutcome(TrackedOutcome::TypedObjectArrayRange);
         return false;
     }
 
@@ -7829,21 +7829,20 @@ IonBuilder::pushDerivedTypedObject(bool 
     MOZ_ASSERT_IF(expectedClass, IsTypedObjectClass(expectedClass));
 
     // Determine (if possible) the class/proto that the observed type set
     // describes.
     types::TemporaryTypeSet *observedTypes = bytecodeTypes(pc);
     const Class *observedClass = observedTypes->getKnownClass(constraints());
     JSObject *observedProto = observedTypes->getCommonPrototype(constraints());
 
-    // If expectedClass/expectedProto are both non-null (and hence
-    // known), we can predict precisely what TI type object
-    // derivedTypedObj will have. Therefore, if we observe that this
-    // TI type object is already contained in the set of
-    // observedTypes, we can skip the barrier.
+    // If expectedClass/expectedProto are both non-null (and hence known), we
+    // can predict precisely what object group derivedTypedObj will have.
+    // Therefore, if we observe that this group is already contained in the set
+    // of observedTypes, we can skip the barrier.
     //
     // Barriers still wind up being needed in some relatively
     // rare cases:
     //
     // - if multiple kinds of typed objects flow into this point,
     //   in which case we will not be able to predict expectedClass
     //   nor expectedProto.
     //
@@ -7918,24 +7917,24 @@ IonBuilder::getStaticTypedArrayObject(MD
         return nullptr;
     }
 
     if (!obj->resultTypeSet()) {
         trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
         return nullptr;
     }
 
-    JSObject *tarrObj = obj->resultTypeSet()->getSingleton();
+    JSObject *tarrObj = obj->resultTypeSet()->maybeSingleton();
     if (!tarrObj) {
         trackOptimizationOutcome(TrackedOutcome::NotSingleton);
         return nullptr;
     }
 
-    types::TypeObjectKey *tarrType = types::TypeObjectKey::get(tarrObj);
-    if (tarrType->unknownProperties()) {
+    types::ObjectGroupKey *tarrKey = types::ObjectGroupKey::get(tarrObj);
+    if (tarrKey->unknownProperties()) {
         trackOptimizationOutcome(TrackedOutcome::UnknownProperties);
         return nullptr;
     }
 
     return tarrObj;
 }
 
 bool
@@ -7956,18 +7955,18 @@ IonBuilder::getElemTryTypedStatic(bool *
 
     MDefinition *ptr = convertShiftToMaskForStaticTypedArray(index, viewType);
     if (!ptr)
         return true;
 
     // Emit LoadTypedArrayElementStatic.
 
     if (tarrObj->is<TypedArrayObject>()) {
-        types::TypeObjectKey *tarrType = types::TypeObjectKey::get(tarrObj);
-        tarrType->watchStateChangeForTypedArrayData(constraints());
+        types::ObjectGroupKey *tarrKey = types::ObjectGroupKey::get(tarrObj);
+        tarrKey->watchStateChangeForTypedArrayData(constraints());
     }
 
     obj->setImplicitlyUsedUnchecked();
     index->setImplicitlyUsedUnchecked();
 
     MLoadTypedArrayElementStatic *load = MLoadTypedArrayElementStatic::New(alloc(), tarrObj, ptr);
     current->add(load);
     current->push(load);
@@ -8297,30 +8296,30 @@ IonBuilder::addTypedArrayLengthAndData(M
                                        MInstruction **length, MInstruction **elements)
 {
     MOZ_ASSERT((index != nullptr) == (elements != nullptr));
     JSObject *tarr = nullptr;
 
     if (obj->isConstantValue() && obj->constantValue().isObject())
         tarr = &obj->constantValue().toObject();
     else if (obj->resultTypeSet())
-        tarr = obj->resultTypeSet()->getSingleton();
+        tarr = obj->resultTypeSet()->maybeSingleton();
 
     if (tarr) {
         void *data = AnyTypedArrayViewData(tarr);
         // Bug 979449 - Optimistically embed the elements and use TI to
         //              invalidate if we move them.
         bool isTenured = !tarr->runtimeFromMainThread()->gc.nursery.isInside(data);
-        if (isTenured && tarr->hasSingletonType()) {
+        if (isTenured && tarr->isSingleton()) {
             // The 'data' pointer of TypedArrayObject can change in rare circumstances
             // (ArrayBufferObject::changeContents).
-            types::TypeObjectKey *tarrType = types::TypeObjectKey::get(tarr);
-            if (!tarrType->unknownProperties()) {
+            types::ObjectGroupKey *tarrKey = types::ObjectGroupKey::get(tarr);
+            if (!tarrKey->unknownProperties()) {
                 if (tarr->is<TypedArrayObject>())
-                    tarrType->watchStateChangeForTypedArrayData(constraints());
+                    tarrKey->watchStateChangeForTypedArrayData(constraints());
 
                 obj->setImplicitlyUsedUnchecked();
 
                 int32_t len = AssertedCast<int32_t>(AnyTypedArrayLength(tarr));
                 *length = MConstant::New(alloc(), Int32Value(len));
                 current->add(*length);
 
                 if (index) {
@@ -8667,18 +8666,18 @@ IonBuilder::setElemTryTypedStatic(bool *
     Scalar::Type viewType = AnyTypedArrayType(tarrObj);
     MDefinition *ptr = convertShiftToMaskForStaticTypedArray(index, viewType);
     if (!ptr)
         return true;
 
     // Emit StoreTypedArrayElementStatic.
 
     if (tarrObj->is<TypedArrayObject>()) {
-        types::TypeObjectKey *tarrType = types::TypeObjectKey::get(tarrObj);
-        tarrType->watchStateChangeForTypedArrayData(constraints());
+        types::ObjectGroupKey *tarrKey = types::ObjectGroupKey::get(tarrObj);
+        tarrKey->watchStateChangeForTypedArrayData(constraints());
     }
 
     object->setImplicitlyUsedUnchecked();
     index->setImplicitlyUsedUnchecked();
 
     // Clamp value to [0, 255] for Uint8ClampedArray.
     MDefinition *toWrite = value;
     if (viewType == Scalar::Uint8Clamped) {
@@ -9075,18 +9074,18 @@ IonBuilder::jsop_length_fastPath()
             current->add(length);
             current->push(length);
             return true;
         }
 
         // Compute the length for array typed objects.
         TypedObjectPrediction prediction = typedObjectPrediction(obj);
         if (!prediction.isUseless()) {
-            types::TypeObjectKey *globalType = types::TypeObjectKey::get(&script()->global());
-            if (globalType->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
+            types::ObjectGroupKey *globalKey = types::ObjectGroupKey::get(&script()->global());
+            if (globalKey->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
                 return false;
 
             MInstruction *length;
             int32_t sizedLength;
             if (prediction.hasKnownArrayLength(&sizedLength)) {
                 obj->setImplicitlyUsedUnchecked();
                 length = MConstant::New(alloc(), Int32Value(sizedLength));
             } else {
@@ -9138,17 +9137,17 @@ IonBuilder::jsop_rest()
     unsigned numActuals = inlineCallInfo_->argv().length();
     unsigned numFormals = info().nargs() - 1;
     unsigned numRest = numActuals > numFormals ? numActuals - numFormals : 0;
 
     MConstant *templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
     current->add(templateConst);
 
     MNewArray *array = MNewArray::New(alloc(), constraints(), numRest, templateConst,
-                                      templateObject->type()->initialHeap(constraints()),
+                                      templateObject->group()->initialHeap(constraints()),
                                       NewArray_FullyAllocating);
     current->add(array);
 
     if (numRest == 0) {
         // No more updating to do. (Note that in this one case the length from
         // the template object is already correct.)
         current->push(array);
         return true;
@@ -9203,47 +9202,47 @@ IonBuilder::getDefiniteSlot(types::Tempo
     // analysis can have side effects so abort the builder and retry later.
     //
     // We always check this, so that even if we aren't able to find a common
     // slot we ensure that the new script analysis is performed on all accessed
     // objects. Later, this will let us elide baseline IC stubs for preliminary
     // objects, which often have a different number of fixed slots from
     // subsequent objects.
     for (size_t i = 0; i < types->getObjectCount(); i++) {
-        types::TypeObjectKey *type = types->getObject(i);
-        if (!type)
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (!key)
             continue;
 
-        if (types::TypeObject *typeObject = type->maybeType()) {
-            if (typeObject->newScript() && !typeObject->newScript()->analyzed()) {
-                addAbortedNewScriptPropertiesType(typeObject);
+        if (types::ObjectGroup *group = key->maybeGroup()) {
+            if (group->newScript() && !group->newScript()->analyzed()) {
+                addAbortedNewScriptPropertiesGroup(group);
                 trackOptimizationOutcome(TrackedOutcome::NoAnalysisInfo);
                 return UINT32_MAX;
             }
         }
     }
 
     uint32_t slot = UINT32_MAX;
 
     for (size_t i = 0; i < types->getObjectCount(); i++) {
-        types::TypeObjectKey *type = types->getObject(i);
-        if (!type)
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (!key)
             continue;
 
-        if (type->unknownProperties()) {
+        if (key->unknownProperties()) {
             trackOptimizationOutcome(TrackedOutcome::UnknownProperties);
             return UINT32_MAX;
         }
 
-        if (type->singleton()) {
+        if (key->isSingleton()) {
             trackOptimizationOutcome(TrackedOutcome::Singleton);
             return UINT32_MAX;
         }
 
-        types::HeapTypeSetKey property = type->property(NameToId(name));
+        types::HeapTypeSetKey property = key->property(NameToId(name));
         if (!property.maybeTypes() ||
             !property.maybeTypes()->definiteProperty() ||
             property.nonData(constraints()))
         {
             trackOptimizationOutcome(TrackedOutcome::NotFixedSlot);
             return UINT32_MAX;
         }
 
@@ -9265,31 +9264,31 @@ IonBuilder::getUnboxedOffset(types::Temp
     if (!types || types->unknownObject()) {
         trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
         return UINT32_MAX;
     }
 
     uint32_t offset = UINT32_MAX;
 
     for (size_t i = 0; i < types->getObjectCount(); i++) {
-        types::TypeObjectKey *type = types->getObject(i);
-        if (!type)
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (!key)
             continue;
 
-        if (type->unknownProperties()) {
+        if (key->unknownProperties()) {
             trackOptimizationOutcome(TrackedOutcome::UnknownProperties);
             return UINT32_MAX;
         }
 
-        if (type->singleton()) {
+        if (key->isSingleton()) {
             trackOptimizationOutcome(TrackedOutcome::Singleton);
             return UINT32_MAX;
         }
 
-        UnboxedLayout *layout = type->asTypeObject()->maybeUnboxedLayout();
+        UnboxedLayout *layout = key->group()->maybeUnboxedLayout();
         if (!layout) {
             trackOptimizationOutcome(TrackedOutcome::NotUnboxed);
             return UINT32_MAX;
         }
 
         const UnboxedLayout::Property *property = layout->lookup(name);
         if (!property) {
             trackOptimizationOutcome(TrackedOutcome::StructNoField);
@@ -9341,31 +9340,31 @@ IonBuilder::objectsHaveCommonPrototype(t
     // chain and do not have a property for name before reaching foundProto.
 
     // No sense looking if we don't know what's going on.
     if (!types || types->unknownObject())
         return false;
     *guardGlobal = false;
 
     for (unsigned i = 0; i < types->getObjectCount(); i++) {
-        if (types->getSingleObject(i) == foundProto)
+        if (types->getSingleton(i) == foundProto)
             continue;
 
-        types::TypeObjectKey *type = types->getObject(i);
-        if (!type)
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (!key)
             continue;
 
-        while (type) {
-            if (type->unknownProperties())
+        while (key) {
+            if (key->unknownProperties())
                 return false;
 
-            const Class *clasp = type->clasp();
+            const Class *clasp = key->clasp();
             if (!ClassHasEffectlessLookup(clasp, name))
                 return false;
-            JSObject *singleton = type->singleton();
+            JSObject *singleton = key->isSingleton() ? key->singleton() : nullptr;
             if (ClassHasResolveHook(compartment, clasp, name)) {
                 if (!singleton || !singleton->is<GlobalObject>())
                     return false;
                 *guardGlobal = true;
             }
 
             // Look for a getter/setter on the class itself which may need
             // to be called. Ignore the getGeneric hook for typed arrays, it
@@ -9373,68 +9372,68 @@ IonBuilder::objectsHaveCommonPrototype(t
             if (isGetter && clasp->ops.getGeneric && !IsAnyTypedArrayClass(clasp))
                 return false;
             if (!isGetter && clasp->ops.setGeneric)
                 return false;
 
             // Test for isOwnProperty() without freezing. If we end up
             // optimizing, freezePropertiesForCommonPropFunc will freeze the
             // property type sets later on.
-            types::HeapTypeSetKey property = type->property(NameToId(name));
+            types::HeapTypeSetKey property = key->property(NameToId(name));
             if (types::TypeSet *types = property.maybeTypes()) {
                 if (!types->empty() || types->nonDataProperty())
                     return false;
             }
             if (singleton) {
                 if (types::CanHaveEmptyPropertyTypesForOwnProperty(singleton)) {
                     MOZ_ASSERT(singleton->is<GlobalObject>());
                     *guardGlobal = true;
                 }
             }
 
-            JSObject *proto = type->protoMaybeInNursery().toObjectOrNull();
+            JSObject *proto = key->protoMaybeInNursery().toObjectOrNull();
             if (proto == foundProto)
                 break;
             if (!proto) {
                 // The foundProto being searched for did not show up on the
                 // object's prototype chain.
                 return false;
             }
-            type = types::TypeObjectKey::get(proto);
+            key = types::ObjectGroupKey::get(proto);
         }
     }
 
     return true;
 }
 
 void
 IonBuilder::freezePropertiesForCommonPrototype(types::TemporaryTypeSet *types, PropertyName *name,
                                                JSObject *foundProto,
                                                bool allowEmptyTypesforGlobal/* = false*/)
 {
     for (unsigned i = 0; i < types->getObjectCount(); i++) {
         // If we found a Singleton object's own-property, there's nothing to
         // freeze.
-        if (types->getSingleObject(i) == foundProto)
+        if (types->getSingleton(i) == foundProto)
             continue;
 
-        types::TypeObjectKey *type = types->getObject(i);
-        if (!type)
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (!key)
             continue;
 
         while (true) {
-            types::HeapTypeSetKey property = type->property(NameToId(name));
+            types::HeapTypeSetKey property = key->property(NameToId(name));
             JS_ALWAYS_TRUE(!property.isOwnProperty(constraints(), allowEmptyTypesforGlobal));
 
             // Don't mark the proto. It will be held down by the shape
             // guard. This allows us to use properties found on prototypes
             // with properties unknown to TI.
-            if (type->protoMaybeInNursery() == TaggedProto(foundProto))
+            if (key->protoMaybeInNursery() == TaggedProto(foundProto))
                 break;
-            type = types::TypeObjectKey::get(type->protoMaybeInNursery().toObjectOrNull());
+            key = types::ObjectGroupKey::get(key->protoMaybeInNursery().toObjectOrNull());
         }
     }
 }
 
 bool
 IonBuilder::testCommonGetterSetter(types::TemporaryTypeSet *types, PropertyName *name,
                                    bool isGetter, JSObject *foundProto, Shape *lastProperty,
                                    MDefinition **guard,
@@ -9496,59 +9495,59 @@ IonBuilder::annotateGetPropertyCache(MDe
 {
     PropertyName *name = getPropCache->name();
 
     // Ensure every pushed value is a singleton.
     if (pushedTypes->unknownObject() || pushedTypes->baseFlags() != 0)
         return true;
 
     for (unsigned i = 0; i < pushedTypes->getObjectCount(); i++) {
-        if (pushedTypes->getTypeObject(i) != nullptr)
+        if (pushedTypes->getGroup(i) != nullptr)
             return true;
     }
 
     // Object's typeset should be a proper object
     if (!objTypes || objTypes->baseFlags() || objTypes->unknownObject())
         return true;
 
     unsigned int objCount = objTypes->getObjectCount();
     if (objCount == 0)
         return true;
 
     InlinePropertyTable *inlinePropTable = getPropCache->initInlinePropertyTable(alloc(), pc);
     if (!inlinePropTable)
         return false;
 
-    // Ensure that the relevant property typeset for each type object is
+    // Ensure that the relevant property typeset for each group is
     // is a single-object typeset containing a JSFunction
     for (unsigned int i = 0; i < objCount; i++) {
-        types::TypeObject *baseTypeObj = objTypes->getTypeObject(i);
-        if (!baseTypeObj)
+        types::ObjectGroup *group = objTypes->getGroup(i);
+        if (!group)
             continue;
-        types::TypeObjectKey *typeObj = types::TypeObjectKey::get(baseTypeObj);
-        if (typeObj->unknownProperties() || !typeObj->hasTenuredProto() || !typeObj->proto().isObject())
+        types::ObjectGroupKey *key = types::ObjectGroupKey::get(group);
+        if (key->unknownProperties() || !key->hasTenuredProto() || !key->proto().isObject())
             continue;
 
-        const Class *clasp = typeObj->clasp();
+        const Class *clasp = key->clasp();
         if (!ClassHasEffectlessLookup(clasp, name) || ClassHasResolveHook(compartment, clasp, name))
             continue;
 
-        types::HeapTypeSetKey ownTypes = typeObj->property(NameToId(name));
+        types::HeapTypeSetKey ownTypes = key->property(NameToId(name));
         if (ownTypes.isOwnProperty(constraints()))
             continue;
 
-        JSObject *singleton = testSingletonProperty(typeObj->proto().toObject(), name);
+        JSObject *singleton = testSingletonProperty(key->proto().toObject(), name);
         if (!singleton || !singleton->is<JSFunction>())
             continue;
 
         // Don't add cases corresponding to non-observed pushes
         if (!pushedTypes->hasType(types::Type::ObjectType(singleton)))
             continue;
 
-        if (!inlinePropTable->addEntry(alloc(), baseTypeObj, &singleton->as<JSFunction>()))
+        if (!inlinePropTable->addEntry(alloc(), group, &singleton->as<JSFunction>()))
             return false;
     }
 
     if (inlinePropTable->numEntries() == 0) {
         getPropCache->clearInlinePropertyTable();
         return true;
     }
 
@@ -9799,29 +9798,29 @@ IonBuilder::getPropTryInferredConstant(b
 
     // Need a result typeset to optimize.
     types::TemporaryTypeSet *objTypes = obj->resultTypeSet();
     if (!objTypes) {
         trackOptimizationOutcome(TrackedOutcome::NoTypeInfo);
         return true;
     }
 
-    JSObject *singleton = objTypes->getSingleton();
+    JSObject *singleton = objTypes->maybeSingleton();
     if (!singleton) {
         trackOptimizationOutcome(TrackedOutcome::NotSingleton);
         return true;
     }
 
-    types::TypeObjectKey *type = types::TypeObjectKey::get(singleton);
-    if (type->unknownProperties()) {
+    types::ObjectGroupKey *key = types::ObjectGroupKey::get(singleton);
+    if (key->unknownProperties()) {
         trackOptimizationOutcome(TrackedOutcome::UnknownProperties);
         return true;
     }
 
-    types::HeapTypeSetKey property = type->property(NameToId(name));
+    types::HeapTypeSetKey property = key->property(NameToId(name));
 
     Value constantValue = UndefinedValue();
     if (property.constant(constraints(), &constantValue)) {
         spew("Optimized constant property");
         obj->setImplicitlyUsedUnchecked();
         if (!pushConstant(constantValue))
             return false;
         types->addType(types::GetValueType(constantValue), alloc_->lifoAlloc());
@@ -9888,17 +9887,17 @@ IonBuilder::getPropTryArgumentsCallee(bo
 }
 
 bool
 IonBuilder::getPropTryConstant(bool *emitted, MDefinition *obj, PropertyName *name,
                                types::TemporaryTypeSet *types)
 {
     MOZ_ASSERT(*emitted == false);
 
-    JSObject *singleton = types ? types->getSingleton() : nullptr;
+    JSObject *singleton = types ? types->maybeSingleton() : nullptr;
     if (!singleton) {
         trackOptimizationOutcome(TrackedOutcome::NotSingleton);
         return true;
     }
 
     bool testObject, testString;
     if (!testSingletonPropertyTypes(obj, singleton, name, &testObject, &testString))
         return true;
@@ -9964,18 +9963,18 @@ bool
 IonBuilder::getPropTryScalarPropOfTypedObject(bool *emitted, MDefinition *typedObj,
                                               int32_t fieldOffset,
                                               TypedObjectPrediction fieldPrediction)
 {
     // Must always be loading the same scalar type
     Scalar::Type fieldType = fieldPrediction.scalarType();
 
     // Don't optimize if the typed object might be neutered.
-    types::TypeObjectKey *globalType = types::TypeObjectKey::get(&script()->global());
-    if (globalType->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
+    types::ObjectGroupKey *globalKey = types::ObjectGroupKey::get(&script()->global());
+    if (globalKey->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
         return true;
 
     trackOptimizationSuccess();
     *emitted = true;
 
     LinearSum byteOffset(alloc());
     if (!byteOffset.add(fieldOffset))
         setForceAbort();
@@ -9986,18 +9985,18 @@ IonBuilder::getPropTryScalarPropOfTypedO
 bool
 IonBuilder::getPropTryReferencePropOfTypedObject(bool *emitted, MDefinition *typedObj,
                                                  int32_t fieldOffset,
                                                  TypedObjectPrediction fieldPrediction,
                                                  PropertyName *name)
 {
     ReferenceTypeDescr::Type fieldType = fieldPrediction.referenceType();
 
-    types::TypeObjectKey *globalType = types::TypeObjectKey::get(&script()->global());
-    if (globalType->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
+    types::ObjectGroupKey *globalKey = types::ObjectGroupKey::get(&script()->global());
+    if (globalKey->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
         return true;
 
     trackOptimizationSuccess();
     *emitted = true;
 
     LinearSum byteOffset(alloc());
     if (!byteOffset.add(fieldOffset))
         setForceAbort();
@@ -10008,18 +10007,18 @@ IonBuilder::getPropTryReferencePropOfTyp
 bool
 IonBuilder::getPropTryComplexPropOfTypedObject(bool *emitted,
                                                MDefinition *typedObj,
                                                int32_t fieldOffset,
                                                TypedObjectPrediction fieldPrediction,
                                                size_t fieldIndex)
 {
     // Don't optimize if the typed object might be neutered.
-    types::TypeObjectKey *globalType = types::TypeObjectKey::get(&script()->global());
-    if (globalType->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
+    types::ObjectGroupKey *globalKey = types::ObjectGroupKey::get(&script()->global());
+    if (globalKey->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
         return true;
 
     // OK, perform the optimization
 
     // Identify the type object for the field.
     MDefinition *type = loadTypedObjectType(typedObj);
     MDefinition *fieldTypeObj = typeObjectForFieldFromStructType(type, fieldIndex);
 
@@ -10184,17 +10183,17 @@ IonBuilder::getPropTryCommonGetter(bool 
 
     if (isDOM && testShouldDOMCall(objTypes, commonGetter, JSJitInfo::Getter)) {
         const JSJitInfo *jitinfo = commonGetter->jitInfo();
         MInstruction *get;
         if (jitinfo->isAlwaysInSlot) {
             // If our object is a singleton and we know the property is
             // constant (which is true if and only if the get doesn't alias
             // anything), we can just read the slot here and use that constant.
-            JSObject *singleton = objTypes->getSingleton();
+            JSObject *singleton = objTypes->maybeSingleton();
             if (singleton && jitinfo->aliasSet() == JSJitInfo::AliasNone) {
                 size_t slot = jitinfo->slotIndex;
                 *emitted = true;
                 return pushConstant(GetReservedSlot(singleton, slot));
             }
 
             // We can't use MLoadFixedSlot here because it might not have the
             // right aliasing behavior; we want to alias DOM setters as needed.
@@ -10282,19 +10281,19 @@ IonBuilder::getPropTryCommonGetter(bool 
         trackOptimizationSuccess();
 
     *emitted = true;
     return true;
 }
 
 bool
 IonBuilder::canInlinePropertyOpShapes(const BaselineInspector::ShapeVector &nativeShapes,
-                                      const BaselineInspector::TypeObjectVector &unboxedTypes)
-{
-    if (nativeShapes.empty() && unboxedTypes.empty()) {
+                                      const BaselineInspector::ObjectGroupVector &unboxedGroups)
+{
+    if (nativeShapes.empty() && unboxedGroups.empty()) {
         trackOptimizationOutcome(TrackedOutcome::NoShapeInfo);
         return false;
     }
 
     for (size_t i = 0; i < nativeShapes.length(); i++) {
         // We inline the property access as long as the shape is not in
         // dictionary mode. We cannot be sure that the shape is still a
         // lastProperty, and calling Shape::search() on dictionary mode
@@ -10343,28 +10342,28 @@ IonBuilder::getPropTryInlineAccess(bool 
     MOZ_ASSERT(*emitted == false);
 
     if (obj->type() != MIRType_Object) {
         trackOptimizationOutcome(TrackedOutcome::NotObject);
         return true;
     }
 
     BaselineInspector::ShapeVector nativeShapes(alloc());
-    BaselineInspector::TypeObjectVector unboxedTypes(alloc());
-    if (!inspector->maybeInfoForPropertyOp(pc, nativeShapes, unboxedTypes))
-        return false;
-
-    if (!canInlinePropertyOpShapes(nativeShapes, unboxedTypes))
+    BaselineInspector::ObjectGroupVector unboxedGroups(alloc());
+    if (!inspector->maybeInfoForPropertyOp(pc, nativeShapes, unboxedGroups))
+        return false;
+
+    if (!canInlinePropertyOpShapes(nativeShapes, unboxedGroups))
         return true;
 
     MIRType rvalType = types->getKnownMIRType();
     if (barrier != BarrierKind::NoBarrier || IsNullOrUndefined(rvalType))
         rvalType = MIRType_Value;
 
-    if (nativeShapes.length() == 1 && unboxedTypes.empty()) {
+    if (nativeShapes.length() == 1 && unboxedGroups.empty()) {
         // In the monomorphic case, use separate ShapeGuard and LoadSlot
         // instructions.
         spew("Inlining monomorphic GETPROP");
 
         Shape *objShape = nativeShapes[0];
         obj = addShapeGuard(obj, objShape, Bailout_ShapeGuard);
 
         Shape *shape = objShape->searchLinear(NameToId(name));
@@ -10373,49 +10372,49 @@ IonBuilder::getPropTryInlineAccess(bool 
         if (!loadSlot(obj, shape, rvalType, barrier, types))
             return false;
 
         trackOptimizationOutcome(TrackedOutcome::Monomorphic);
         *emitted = true;
         return true;
     }
 
-    if (nativeShapes.empty() && unboxedTypes.length() == 1) {
+    if (nativeShapes.empty() && unboxedGroups.length() == 1) {
         spew("Inlining monomorphic unboxed GETPROP");
 
-        types::TypeObject *unboxedType = unboxedTypes[0];
-
-        // Failures in this type guard should be treated the same as a shape guard failure.
-        obj = MGuardObjectType::New(alloc(), obj, unboxedType, /* bailOnEquality = */ false,
-                                    Bailout_ShapeGuard);
+        types::ObjectGroup *group = unboxedGroups[0];
+
+        // Failures in this group guard should be treated the same as a shape guard failure.
+        obj = MGuardObjectGroup::New(alloc(), obj, group, /* bailOnEquality = */ false,
+                                     Bailout_ShapeGuard);
         current->add(obj->toInstruction());
 
         if (failedShapeGuard_)
-            obj->toGuardObjectType()->setNotMovable();
-
-        const UnboxedLayout::Property *property = unboxedType->unboxedLayout().lookup(name);
+            obj->toGuardObjectGroup()->setNotMovable();
+
+        const UnboxedLayout::Property *property = group->unboxedLayout().lookup(name);
         MInstruction *load = loadUnboxedProperty(obj, property->offset, property->type, barrier, types);
         current->push(load);
 
         if (!pushTypeBarrier(load, types, barrier))
             return false;
 
         *emitted = true;
         return true;
     }
 
-    MOZ_ASSERT(nativeShapes.length() + unboxedTypes.length() > 1);
+    MOZ_ASSERT(nativeShapes.length() + unboxedGroups.length() > 1);
     spew("Inlining polymorphic GETPROP");
 
     BaselineInspector::ShapeVector propShapes(alloc());
     bool sameSlot;
     if (!GetPropertyShapes(NameToId(name), nativeShapes, propShapes, &sameSlot))
         return false;
 
-    if (sameSlot && unboxedTypes.empty()) {
+    if (sameSlot && unboxedGroups.empty()) {
         MGuardShapePolymorphic *guard = MGuardShapePolymorphic::New(alloc(), obj);
         current->add(guard);
         obj = guard;
 
         if (failedShapeGuard_)
             guard->setNotMovable();
 
         for (size_t i = 0; i < nativeShapes.length(); i++) {
@@ -10435,18 +10434,18 @@ IonBuilder::getPropTryInlineAccess(bool 
     current->add(load);
     current->push(load);
 
     for (size_t i = 0; i < nativeShapes.length(); i++) {
         if (!load->addShape(nativeShapes[i], propShapes[i]))
             return false;
     }
 
-    for (size_t i = 0; i < unboxedTypes.length(); i++) {
-        if (!load->addUnboxedType(unboxedTypes[i]))
+    for (size_t i = 0; i < unboxedGroups.length(); i++) {
+        if (!load->addUnboxedGroup(unboxedGroups[i]))
             return false;
     }
 
     if (failedShapeGuard_)
         load->setNotMovable();
 
     load->setResultType(rvalType);
     if (!pushTypeBarrier(load, types, barrier))
@@ -10503,17 +10502,17 @@ IonBuilder::getPropTryCache(bool *emitte
     // the InlinePropertyTable of the GetPropertyCache.  This information is
     // then used in inlineCallsite and inlineCalls, if the "this" definition is
     // matching the "object" definition of the GetPropertyCache (see
     // CanInlineGetPropertyCache).
     //
     // If this GetPropertyCache is idempotent, then we can dispatch to the right
     // function only by checking the typed object, instead of querying the value
     // of the property.  Thus this GetPropertyCache can be moved into the
-    // fallback path (see inlineTypeObjectFallback).  Otherwise, we always have
+    // fallback path (see inlineObjectGroupFallback).  Otherwise, we always have
     // to do the GetPropertyCache, and we can dispatch based on the JSFunction
     // value.
     if (JSOp(*pc) == JSOP_CALLPROP && load->idempotent()) {
         if (!annotateGetPropertyCache(obj, load, obj->resultTypeSet(), types))
             return false;
     }
 
     current->add(load);
@@ -10546,29 +10545,29 @@ IonBuilder::tryInnerizeWindow(MDefinitio
 
     if (obj->type() != MIRType_Object)
         return obj;
 
     types::TemporaryTypeSet *types = obj->resultTypeSet();
     if (!types)
         return obj;
 
-    JSObject *singleton = types->getSingleton();
+    JSObject *singleton = types->maybeSingleton();
     if (!singleton)
         return obj;
 
     JSObject *inner = GetInnerObject(singleton);
     if (inner == singleton || inner != &script()->global())
         return obj;
 
     // When we navigate, the outer object is brain transplanted and we'll mark
-    // its TypeObject as having unknown properties. The type constraint we add
+    // its ObjectGroup as having unknown properties. The type constraint we add
     // here will invalidate JIT code when this happens.
-    types::TypeObjectKey *objType = types::TypeObjectKey::get(singleton);
-    if (objType->hasFlags(constraints(), types::OBJECT_FLAG_UNKNOWN_PROPERTIES))
+    types::ObjectGroupKey *key = types::ObjectGroupKey::get(singleton);
+    if (key->hasFlags(constraints(), types::OBJECT_FLAG_UNKNOWN_PROPERTIES))
         return obj;
 
     obj->setImplicitlyUsedUnchecked();
     return constant(ObjectValue(script()->global()));
 }
 
 bool
 IonBuilder::getPropTryInnerize(bool *emitted, MDefinition *obj, PropertyName *name,
@@ -10829,18 +10828,18 @@ IonBuilder::setPropTryReferencePropOfTyp
                                                  MDefinition *obj,
                                                  int32_t fieldOffset,
                                                  MDefinition *value,
                                                  TypedObjectPrediction fieldPrediction,
                                                  PropertyName *name)
 {
     ReferenceTypeDescr::Type fieldType = fieldPrediction.referenceType();
 
-    types::TypeObjectKey *globalType = types::TypeObjectKey::get(&script()->global());
-    if (globalType->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
+    types::ObjectGroupKey *globalKey = types::ObjectGroupKey::get(&script()->global());
+    if (globalKey->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
         return true;
 
     LinearSum byteOffset(alloc());
     if (!byteOffset.add(fieldOffset))
         setForceAbort();
 
     if (!storeReferenceTypedObjectValue(obj, byteOffset, fieldType, value, name))
         return true;
@@ -10858,18 +10857,18 @@ IonBuilder::setPropTryScalarPropOfTypedO
                                               int32_t fieldOffset,
                                               MDefinition *value,
                                               TypedObjectPrediction fieldPrediction)
 {
     // Must always be loading the same scalar type
     Scalar::Type fieldType = fieldPrediction.scalarType();
 
     // Don't optimize if the typed object might be neutered.
-    types::TypeObjectKey *globalType = types::TypeObjectKey::get(&script()->global());
-    if (globalType->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
+    types::ObjectGroupKey *globalKey = types::ObjectGroupKey::get(&script()->global());
+    if (globalKey->hasFlags(constraints(), types::OBJECT_FLAG_TYPED_OBJECT_NEUTERED))
         return true;
 
     LinearSum byteOffset(alloc());
     if (!byteOffset.add(fieldOffset))
         setForceAbort();
 
     if (!storeScalarTypedObjectValue(obj, byteOffset, fieldType, false, value))
         return false;
@@ -10894,21 +10893,21 @@ IonBuilder::setPropTryDefiniteSlot(bool 
     }
 
     uint32_t slot = getDefiniteSlot(obj->resultTypeSet(), name);
     if (slot == UINT32_MAX)
         return true;
 
     bool writeBarrier = false;
     for (size_t i = 0; i < obj->resultTypeSet()->getObjectCount(); i++) {
-        types::TypeObjectKey *type = obj->resultTypeSet()->getObject(i);
-        if (!type)
+        types::ObjectGroupKey *key = obj->resultTypeSet()->getObject(i);
+        if (!key)
             continue;
 
-        types::HeapTypeSetKey property = type->property(NameToId(name));
+        types::HeapTypeSetKey property = key->property(NameToId(name));
         if (property.nonWritable(constraints())) {
             trackOptimizationOutcome(TrackedOutcome::NonWritableProperty);
             return true;
         }
         writeBarrier |= property.needsBarrier(constraints());
     }
 
     MInstruction *store;
@@ -11024,24 +11023,24 @@ IonBuilder::setPropTryInlineAccess(bool 
     MOZ_ASSERT(*emitted == false);
 
     if (barrier) {
         trackOptimizationOutcome(TrackedOutcome::NeedsTypeBarrier);
         return true;
     }
 
     BaselineInspector::ShapeVector nativeShapes(alloc());
-    BaselineInspector::TypeObjectVector unboxedTypes(alloc());
-    if (!inspector->maybeInfoForPropertyOp(pc, nativeShapes, unboxedTypes))
-        return false;
-
-    if (!canInlinePropertyOpShapes(nativeShapes, unboxedTypes))
-        return true;
-
-    if (nativeShapes.length() == 1 && unboxedTypes.empty()) {
+    BaselineInspector::ObjectGroupVector unboxedGroups(alloc());
+    if (!inspector->maybeInfoForPropertyOp(pc, nativeShapes, unboxedGroups))
+        return false;
+
+    if (!canInlinePropertyOpShapes(nativeShapes, unboxedGroups))
+        return true;
+
+    if (nativeShapes.length() == 1 && unboxedGroups.empty()) {
         spew("Inlining monomorphic SETPROP");
 
         // The Baseline IC was monomorphic, so we inline the property access as
         // long as the shape is not in dictionary mode. We cannot be sure
         // that the shape is still a lastProperty, and calling Shape::search
         // on dictionary mode shapes that aren't lastProperty is invalid.
         Shape *objShape = nativeShapes[0];
         obj = addShapeGuard(obj, objShape, Bailout_ShapeGuard);
@@ -11053,47 +11052,47 @@ IonBuilder::setPropTryInlineAccess(bool 
         if (!storeSlot(obj, shape, value, needsBarrier))
             return false;
 
         trackOptimizationOutcome(TrackedOutcome::Monomorphic);
         *emitted = true;
         return true;
     }
 
-    if (nativeShapes.empty() && unboxedTypes.length() == 1) {
+    if (nativeShapes.empty() && unboxedGroups.length() == 1) {
         spew("Inlining monomorphic unboxed SETPROP");
 
-        types::TypeObject *unboxedType = unboxedTypes[0];
-
-        // Failures in this type guard should be treated the same as a shape guard failure.
-        obj = MGuardObjectType::New(alloc(), obj, unboxedType, /* bailOnEquality = */ false,
+        types::ObjectGroup *group = unboxedGroups[0];
+
+        // Failures in this group guard should be treated the same as a shape guard failure.
+        obj = MGuardObjectGroup::New(alloc(), obj, group, /* bailOnEquality = */ false,
                                     Bailout_ShapeGuard);
         current->add(obj->toInstruction());
 
         if (failedShapeGuard_)
-            obj->toGuardObjectType()->setNotMovable();
-
-        const UnboxedLayout::Property *property = unboxedType->unboxedLayout().lookup(name);
+            obj->toGuardObjectGroup()->setNotMovable();
+
+        const UnboxedLayout::Property *property = group->unboxedLayout().lookup(name);
         storeUnboxedProperty(obj, property->offset, property->type, value);
 
         current->push(value);
 
         *emitted = true;
         return true;
     }
 
-    MOZ_ASSERT(nativeShapes.length() + unboxedTypes.length() > 1);
+    MOZ_ASSERT(nativeShapes.length() + unboxedGroups.length() > 1);
     spew("Inlining polymorphic SETPROP");
 
     BaselineInspector::ShapeVector propShapes(alloc());
     bool sameSlot;
     if (!GetPropertyShapes(NameToId(name), nativeShapes, propShapes, &sameSlot))
         return false;
 
-    if (sameSlot && unboxedTypes.empty()) {
+    if (sameSlot && unboxedGroups.empty()) {
         MGuardShapePolymorphic *guard = MGuardShapePolymorphic::New(alloc(), obj);
         current->add(guard);
         obj = guard;
 
         if (failedShapeGuard_)
             guard->setNotMovable();
 
         for (size_t i = 0; i < nativeShapes.length(); i++) {
@@ -11117,18 +11116,18 @@ IonBuilder::setPropTryInlineAccess(bool 
     for (size_t i = 0; i < nativeShapes.length(); i++) {
         Shape *objShape = nativeShapes[i];
         Shape *shape =  objShape->searchLinear(NameToId(name));
         MOZ_ASSERT(shape);
         if (!ins->addShape(objShape, shape))
             return false;
     }
 
-    for (size_t i = 0; i < unboxedTypes.length(); i++) {
-        if (!ins->addUnboxedType(unboxedTypes[i]))
+    for (size_t i = 0; i < unboxedGroups.length(); i++) {
+        if (!ins->addUnboxedGroup(unboxedGroups[i]))
             return false;
     }
 
     if (objTypes->propertyNeedsBarrier(constraints(), NameToId(name)))
         ins->setNeedsBarrier();
 
     if (!resumeAfter(ins))
         return false;
@@ -11198,18 +11197,18 @@ IonBuilder::jsop_regexp(RegExpObject *re
     // script could actually observe the effect of such cloning, for instance
     // by getting or setting properties on it.
     //
     // First, make sure the regex is one we can safely optimize. Lowering can
     // then check if this regex object only flows into known natives and can
     // avoid cloning in this case.
 
     bool mustClone = true;
-    types::TypeObjectKey *typeObj = types::TypeObjectKey::get(&script()->global());
-    if (!typeObj->hasFlags(constraints(), types::OBJECT_FLAG_REGEXP_FLAGS_SET)) {
+    types::ObjectGroupKey *globalKey = types::ObjectGroupKey::get(&script()->global());
+    if (!globalKey->hasFlags(constraints(), types::OBJECT_FLAG_REGEXP_FLAGS_SET)) {
 #ifdef DEBUG
         // Only compare the statics if the one on script()->global() has been
         // instantiated.
         if (script()->global().hasRegExpStatics()) {
             RegExpStatics *res = script()->global().getAlreadyCreatedRegExpStatics();
             MOZ_ASSERT(res);
             uint32_t origFlags = reobj->getFlags();
             uint32_t staticsFlags = res->getFlags();
@@ -11586,19 +11585,19 @@ IonBuilder::walkScopeChain(unsigned hops
 
 bool
 IonBuilder::hasStaticScopeObject(ScopeCoordinate sc, JSObject **pcall)
 {
     JSScript *outerScript = ScopeCoordinateFunctionScript(script(), pc);
     if (!outerScript || !outerScript->treatAsRunOnce())
         return false;
 
-    types::TypeObjectKey *funType =
-            types::TypeObjectKey::get(outerScript->functionNonDelazifying());
-    if (funType->hasFlags(constraints(), types::OBJECT_FLAG_RUNONCE_INVALIDATED))
+    types::ObjectGroupKey *funKey =
+        types::ObjectGroupKey::get(outerScript->functionNonDelazifying());
+    if (funKey->hasFlags(constraints(), types::OBJECT_FLAG_RUNONCE_INVALIDATED))
         return false;
 
     // The script this aliased var operation is accessing will run only once,
     // so there will be only one call object and the aliased var access can be
     // compiled in the same manner as a global access. We still need to find
     // the call object though.
 
     // Look for the call object on the current script's function's scope chain.
@@ -11609,17 +11608,17 @@ IonBuilder::hasStaticScopeObject(ScopeCo
     scope->setImplicitlyUsedUnchecked();
 
     JSObject *environment = script()->functionNonDelazifying()->environment();
     while (environment && !environment->is<GlobalObject>()) {
         if (environment->is<CallObject>() &&
             !environment->as<CallObject>().isForEval() &&
             environment->as<CallObject>().callee().nonLazyScript() == outerScript)
         {
-            MOZ_ASSERT(environment->hasSingletonType());
+            MOZ_ASSERT(environment->isSingleton());
             *pcall = environment;
             return true;
         }
         environment = environment->enclosingScope();
     }
 
     // Look for the call object on the current frame, if we are compiling the
     // outer script itself. Don't do this if we are at entry to the outer
@@ -11627,17 +11626,17 @@ IonBuilder::hasStaticScopeObject(ScopeCo
     // entering the Ion code a different call object will be created.
 
     if (script() == outerScript && baselineFrame_ && info().osrPc()) {
         JSObject *singletonScope = baselineFrame_->singletonScopeChain;
         if (singletonScope &&
             singletonScope->is<CallObject>() &&
             singletonScope->as<CallObject>().callee().nonLazyScript() == outerScript)
         {
-            MOZ_ASSERT(singletonScope->hasSingletonType());
+            MOZ_ASSERT(singletonScope->isSingleton());
             *pcall = singletonScope;
             return true;
         }
     }
 
     return true;
 }
 
@@ -11789,41 +11788,41 @@ IonBuilder::jsop_in_dense()
 
     current->add(ins);
     current->push(ins);
 
     return true;
 }
 
 static bool
-HasOnProtoChain(types::CompilerConstraintList *constraints, types::TypeObjectKey *object,
+HasOnProtoChain(types::CompilerConstraintList *constraints, types::ObjectGroupKey *key,
                 JSObject *protoObject, bool *hasOnProto)
 {
     MOZ_ASSERT(protoObject);
 
     while (true) {
-        if (!object->hasStableClassAndProto(constraints) ||
-            !object->clasp()->isNative() ||
-            !object->hasTenuredProto())
+        if (!key->hasStableClassAndProto(constraints) ||
+            !key->clasp()->isNative() ||
+            !key->hasTenuredProto())
         {
             return false;
         }
 
-        JSObject *proto = object->proto().toObjectOrNull();
+        JSObject *proto = key->proto().toObjectOrNull();
         if (!proto) {
             *hasOnProto = false;
             return true;
         }
 
         if (proto == protoObject) {
             *hasOnProto = true;
             return true;
         }
 
-        object = types::TypeObjectKey::get(proto);
+        key = types::ObjectGroupKey::get(proto);
     }
 
     MOZ_CRASH("Unreachable");
 }
 
 bool
 IonBuilder::tryFoldInstanceOf(MDefinition *lhs, JSObject *protoObject)
 {
@@ -11841,22 +11840,22 @@ IonBuilder::tryFoldInstanceOf(MDefinitio
         return false;
 
     // We can fold if either all objects have protoObject on their proto chain
     // or none have.
     bool isFirst = true;
     bool knownIsInstance = false;
 
     for (unsigned i = 0; i < lhsTypes->getObjectCount(); i++) {
-        types::TypeObjectKey *object = lhsTypes->getObject(i);
-        if (!object)
+        types::ObjectGroupKey *key = lhsTypes->getObject(i);
+        if (!key)
             continue;
 
         bool isInstance;
-        if (!HasOnProtoChain(constraints(), object, protoObject, &isInstance))
+        if (!HasOnProtoChain(constraints(), key, protoObject, &isInstance))
             return false;
 
         if (isFirst) {
             knownIsInstance = isInstance;
             isFirst = false;
         } else if (knownIsInstance != isInstance) {
             // Some of the objects have protoObject on their proto chain and
             // others don't, so we can't optimize this.
@@ -11884,26 +11883,26 @@ IonBuilder::jsop_instanceof()
 {
     MDefinition *rhs = current->pop();
     MDefinition *obj = current->pop();
 
     // If this is an 'x instanceof function' operation and we can determine the
     // exact function and prototype object being tested for, use a typed path.
     do {
         types::TemporaryTypeSet *rhsTypes = rhs->resultTypeSet();
-        JSObject *rhsObject = rhsTypes ? rhsTypes->getSingleton() : nullptr;
+        JSObject *rhsObject = rhsTypes ? rhsTypes->maybeSingleton() : nullptr;
         if (!rhsObject || !rhsObject->is<JSFunction>() || rhsObject->isBoundFunction())
             break;
 
-        types::TypeObjectKey *rhsType = types::TypeObjectKey::get(rhsObject);
-        if (rhsType->unknownProperties())
+        types::ObjectGroupKey *rhsKey = types::ObjectGroupKey::get(rhsObject);
+        if (rhsKey->unknownProperties())
             break;
 
         types::HeapTypeSetKey protoProperty =
-            rhsType->property(NameToId(names().prototype));
+            rhsKey->property(NameToId(names().prototype));
         JSObject *protoObject = protoProperty.singleton(constraints());
         if (!protoObject)
             break;
 
         rhs->setImplicitlyUsedUnchecked();
 
         if (tryFoldInstanceOf(obj, protoObject))
             return true;
@@ -12036,24 +12035,24 @@ IonBuilder::typedObjectPrediction(types:
         return TypedObjectPrediction();
 
     // And only known objects.
     if (types->unknownObject())
         return TypedObjectPrediction();
 
     TypedObjectPrediction out;
     for (uint32_t i = 0; i < types->getObjectCount(); i++) {
-        types::TypeObject *type = types->getTypeObject(i);
-        if (!type || !types::TypeObjectKey::get(type)->hasStableClassAndProto(constraints()))
+        types::ObjectGroup *group = types->getGroup(i);
+        if (!group || !types::ObjectGroupKey::get(group)->hasStableClassAndProto(constraints()))
             return TypedObjectPrediction();
 
-        if (!IsTypedObjectClass(type->clasp()))
+        if (!IsTypedObjectClass(group->clasp()))
             return TypedObjectPrediction();
 
-        out.addDescr(type->typeDescr());
+        out.addDescr(group->typeDescr());
     }
 
     return out;
 }
 
 MDefinition *
 IonBuilder::loadTypedObjectType(MDefinition *typedObj)
 {
--- a/js/src/jit/IonBuilder.h
+++ b/js/src/jit/IonBuilder.h
@@ -836,19 +836,19 @@ class IonBuilder
     InliningStatus inlineCallsite(const ObjectVector &targets, ObjectVector &originals,
                                   CallInfo &callInfo);
     bool inlineCalls(CallInfo &callInfo, const ObjectVector &targets, ObjectVector &originals,
                      BoolVector &choiceSet, MGetPropertyCache *maybeCache);
 
     // Inlining helpers.
     bool inlineGenericFallback(JSFunction *target, CallInfo &callInfo, MBasicBlock *dispatchBlock,
                                bool clonedAtCallsite);
-    bool inlineTypeObjectFallback(CallInfo &callInfo, MBasicBlock *dispatchBlock,
-                                  MTypeObjectDispatch *dispatch, MGetPropertyCache *cache,
-                                  MBasicBlock **fallbackTarget);
+    bool inlineObjectGroupFallback(CallInfo &callInfo, MBasicBlock *dispatchBlock,
+                                   MObjectGroupDispatch *dispatch, MGetPropertyCache *cache,
+                                   MBasicBlock **fallbackTarget);
 
     bool atomicsMeetsPreconditions(CallInfo &callInfo, Scalar::Type *arrayElementType);
     void atomicsCheckBounds(CallInfo &callInfo, MInstruction **elements, MDefinition **index);
 
     bool testNeedsArgumentCheck(JSFunction *target, CallInfo &callInfo);
 
     MDefinition *makeCallsiteClone(JSFunction *target, MDefinition *fun);
     MCall *makeCallHelper(JSFunction *target, CallInfo &callInfo, bool cloneAtCallsite);
@@ -887,17 +887,17 @@ class IonBuilder
                               JSValueType *punboxedType);
     MInstruction *loadUnboxedProperty(MDefinition *obj, size_t offset, JSValueType unboxedType,
                                       BarrierKind barrier, types::TemporaryTypeSet *types);
     MInstruction *storeUnboxedProperty(MDefinition *obj, size_t offset, JSValueType unboxedType,
                                        MDefinition *value);
     bool freezePropTypeSets(types::TemporaryTypeSet *types,
                             JSObject *foundProto, PropertyName *name);
     bool canInlinePropertyOpShapes(const BaselineInspector::ShapeVector &nativeShapes,
-                                   const BaselineInspector::TypeObjectVector &unboxedTypes);
+                                   const BaselineInspector::ObjectGroupVector &unboxedGroups);
 
     types::TemporaryTypeSet *bytecodeTypes(jsbytecode *pc);
 
     // Use one of the below methods for updating the current block, rather than
     // updating |current| directly. setCurrent() should only be used in cases
     // where the block cannot have phis whose type needs to be computed.
 
     bool setCurrentAndSpecializePhis(MBasicBlock *block) {
@@ -1071,17 +1071,17 @@ class IonBuilder
 
     // When compiling a call with multiple targets, we are first creating a
     // MGetPropertyCache.  This MGetPropertyCache is following the bytecode, and
     // is used to recover the JSFunction.  In some cases, the Type of the object
     // which own the property is enough for dispatching to the right function.
     // In such cases we do not have read the property, except when the type
     // object is unknown.
     //
-    // As an optimization, we can dispatch a call based on the type object,
+    // As an optimization, we can dispatch a call based on the object group,
     // without doing the MGetPropertyCache.  This is what is achieved by
     // |IonBuilder::inlineCalls|.  As we might not know all the functions, we
     // are adding a fallback path, where this MGetPropertyCache would be moved
     // into.
     //
     // In order to build the fallback path, we have to capture a resume point
     // ahead, for the potential fallback path.  This resume point is captured
     // while building MGetPropertyCache.  It is capturing the state of Baseline
--- a/js/src/jit/IonCaches.cpp
+++ b/js/src/jit/IonCaches.cpp
@@ -482,33 +482,33 @@ GeneratePrototypeGuards(JSContext *cx, I
      * it, and any other change to the holder, or adding a shadowing property will result
      * in reshaping the holder, and thus the failure of the shape guard.
      */
     MOZ_ASSERT(obj != holder);
 
     if (obj->hasUncacheableProto()) {
         // Note: objectReg and scratchReg may be the same register, so we cannot
         // use objectReg in the rest of this function.
-        masm.loadPtr(Address(objectReg, JSObject::offsetOfType()), scratchReg);
-        Address proto(scratchReg, types::TypeObject::offsetOfProto());
+        masm.loadPtr(Address(objectReg, JSObject::offsetOfGroup()), scratchReg);
+        Address proto(scratchReg, types::ObjectGroup::offsetOfProto());
         masm.branchPtr(Assembler::NotEqual, proto,
                        ImmMaybeNurseryPtr(obj->getProto()), failures);
     }
 
     JSObject *pobj = IsCacheableDOMProxy(obj)
                      ? obj->getTaggedProto().toObjectOrNull()
                      : obj->getProto();
     if (!pobj)
         return;
     while (pobj != holder) {
         if (pobj->hasUncacheableProto()) {
-            MOZ_ASSERT(!pobj->hasSingletonType());
+            MOZ_ASSERT(!pobj->isSingleton());
             masm.movePtr(ImmMaybeNurseryPtr(pobj), scratchReg);
-            Address objType(scratchReg, JSObject::offsetOfType());
-            masm.branchPtr(Assembler::NotEqual, objType, ImmGCPtr(pobj->type()), failures);
+            Address groupAddr(scratchReg, JSObject::offsetOfGroup());
+            masm.branchPtr(Assembler::NotEqual, groupAddr, ImmGCPtr(pobj->group()), failures);
         }
         pobj = pobj->getProto();
     }
 }
 
 // Note: This differs from IsCacheableProtoChain in BaselineIC.cpp in that
 // Ion caches can deal with objects on the proto chain that have uncacheable
 // prototypes.
@@ -783,18 +783,18 @@ GenerateReadSlot(JSContext *cx, IonScrip
     // Guard on the shape or type of the object, depending on whether it is native.
     if (obj->isNative()) {
         attacher.branchNextStubOrLabel(masm, Assembler::NotEqual,
                                        Address(object, JSObject::offsetOfShape()),
                                        ImmGCPtr(obj->lastProperty()),
                                        failures);
     } else {
         attacher.branchNextStubOrLabel(masm, Assembler::NotEqual,
-                                       Address(object, JSObject::offsetOfType()),
-                                       ImmGCPtr(obj->type()),
+                                       Address(object, JSObject::offsetOfGroup()),
+                                       ImmGCPtr(obj->group()),
                                        failures);
     }
 
     // If we need a scratch register, use either an output register or the
     // object register. After this point, we cannot jump directly to
     // |failures| since we may still have to pop the object register.
     bool restoreScratch = false;
     Register scratchReg = Register::FromCode(0); // Quell compiler warning.
@@ -885,18 +885,18 @@ GenerateReadSlot(JSContext *cx, IonScrip
 static void
 GenerateReadUnboxed(JSContext *cx, IonScript *ion, MacroAssembler &masm,
                     IonCache::StubAttacher &attacher, JSObject *obj,
                     const UnboxedLayout::Property *property,
                     Register object, TypedOrValueRegister output)
 {
     // Guard on the type of the object.
     attacher.branchNextStub(masm, Assembler::NotEqual,
-                            Address(object, JSObject::offsetOfType()),
-                            ImmGCPtr(obj->type()));
+                            Address(object, JSObject::offsetOfGroup()),
+                            ImmGCPtr(obj->group()));
 
     Address address(object, UnboxedPlainObject::offsetOfData() + property->offset);
 
     masm.loadUnboxedProperty(address, property->type, output);
 
     attacher.jumpRejoin(masm);
 }
 
@@ -1892,20 +1892,20 @@ IonCache::destroy()
 
 // Jump to failure if a value being written is not a property for obj/id.
 // This might clobber |object|.
 static void
 CheckTypeSetForWrite(MacroAssembler &masm, JSObject *obj, jsid id,
                      Register object, ConstantOrRegister value, Label *failure)
 {
     TypedOrValueRegister valReg = value.reg();
-    types::TypeObject *type = obj->type();
-    if (type->unknownProperties())
+    types::ObjectGroup *group = obj->group();
+    if (group->unknownProperties())
         return;
-    types::HeapTypeSet *propTypes = type->maybeGetProperty(id);
+    types::HeapTypeSet *propTypes = group->maybeGetProperty(id);
     MOZ_ASSERT(propTypes);
 
     // guardTypeSet can read from type sets without triggering read barriers.
     types::TypeSet::readBarrier(propTypes);
 
     Register scratch = object;
     masm.guardTypeSet(valReg, propTypes, BarrierKind::TypeSet, scratch, failure);
 }
@@ -1923,21 +1923,21 @@ GenerateSetSlot(JSContext *cx, MacroAsse
                    ImmGCPtr(obj->lastProperty()), &failures);
 
     // Guard that the incoming value is in the type set for the property
     // if a type barrier is required.
     if (needsTypeBarrier) {
         // We can't do anything that would change the HeapTypeSet, so
         // just guard that it's already there.
 
-        // Obtain and guard on the TypeObject of the object.
-        types::TypeObject *type = obj->type();
+        // Obtain and guard on the ObjectGroup of the object.
+        types::ObjectGroup *group = obj->group();
         masm.branchPtr(Assembler::NotEqual,
-                       Address(object, JSObject::offsetOfType()),
-                       ImmGCPtr(type), &failures);
+                       Address(object, JSObject::offsetOfGroup()),
+                       ImmGCPtr(group), &failures);
 
         if (checkTypeset) {
             masm.push(object);
             CheckTypeSetForWrite(masm, obj, shape->propid(), object, value, &barrierFailure);
             masm.pop(object);
         }
     }
 
@@ -2458,27 +2458,27 @@ SetPropertyIC::attachCallSetter(JSContex
     masm.bind(&failure);
     attacher.jumpNextStub(masm);
 
     return linkAndAttachStub(cx, masm, attacher, ion, "setter call");
 }
 
 static void
 GenerateAddSlot(JSContext *cx, MacroAssembler &masm, IonCache::StubAttacher &attacher,
-                NativeObject *obj, Shape *oldShape, types::TypeObject *oldType,
+                NativeObject *obj, Shape *oldShape, types::ObjectGroup *oldGroup,
                 Register object, ConstantOrRegister value,
                 bool checkTypeset)
 {
     MOZ_ASSERT(obj->isNative());
 
     Label failures;
 
     // Guard the type of the object
-    masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfType()),
-                   ImmGCPtr(oldType), &failures);
+    masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfGroup()),
+                   ImmGCPtr(oldGroup), &failures);
 
     // Guard shapes along prototype chain.
     masm.branchTestObjShape(Assembler::NotEqual, object, oldShape, &failures);
 
     Label failuresPopObject;
     masm.push(object);    // save object reg because we clobber it
 
     // Guard that the incoming value is in the type set for the property
@@ -2506,34 +2506,34 @@ GenerateAddSlot(JSContext *cx, MacroAsse
 
     // Changing object shape.  Write the object's new shape.
     Shape *newShape = obj->lastProperty();
     Address shapeAddr(object, JSObject::offsetOfShape());
     if (cx->zone()->needsIncrementalBarrier())
         masm.callPreBarrier(shapeAddr, MIRType_Shape);
     masm.storePtr(ImmGCPtr(newShape), shapeAddr);
 
-    if (oldType != obj->type()) {
-        // Changing object's type from a partially to fully initialized type,
-        // per the acquired properties analysis. Only change the type if the
-        // old type still has a newScript.
+    if (oldGroup != obj->group()) {
+        // Changing object's group from a partially to fully initialized group,
+        // per the acquired properties analysis. Only change the group if the
+        // old group still has a newScript.
         Label noTypeChange, skipPop;
 
         masm.push(object);
-        masm.loadPtr(Address(object, JSObject::offsetOfType()), object);
+        masm.loadPtr(Address(object, JSObject::offsetOfGroup()), object);
         masm.branchPtr(Assembler::Equal,
-                       Address(object, types::TypeObject::offsetOfAddendum()),
+                       Address(object, types::ObjectGroup::offsetOfAddendum()),
                        ImmWord(0),
                        &noTypeChange);
         masm.pop(object);
 
-        Address typeAddr(object, JSObject::offsetOfType());
+        Address groupAddr(object, JSObject::offsetOfGroup());
         if (cx->zone()->needsIncrementalBarrier())
-            masm.callPreBarrier(typeAddr, MIRType_TypeObject);
-        masm.storePtr(ImmGCPtr(obj->type()), typeAddr);
+            masm.callPreBarrier(groupAddr, MIRType_ObjectGroup);
+        masm.storePtr(ImmGCPtr(obj->group()), groupAddr);
 
         masm.jump(&skipPop);
         masm.bind(&noTypeChange);
         masm.pop(object);
         masm.bind(&skipPop);
     }
 
     // Set the value on the object. Since this is an add, obj->lastProperty()
@@ -2559,38 +2559,38 @@ GenerateAddSlot(JSContext *cx, MacroAsse
     masm.pop(object);
     masm.bind(&failures);
 
     attacher.jumpNextStub(masm);
 }
 
 bool
 SetPropertyIC::attachAddSlot(JSContext *cx, HandleScript outerScript, IonScript *ion,
-                             HandleNativeObject obj, HandleShape oldShape, HandleTypeObject oldType,
+                             HandleNativeObject obj, HandleShape oldShape, HandleObjectGroup oldGroup,
                              bool checkTypeset)
 {
     MOZ_ASSERT_IF(!needsTypeBarrier(), !checkTypeset);
 
     MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
     RepatchStubAppender attacher(*this);
-    GenerateAddSlot(cx, masm, attacher, obj, oldShape, oldType, object(), value(), checkTypeset);
+    GenerateAddSlot(cx, masm, attacher, obj, oldShape, oldGroup, object(), value(), checkTypeset);
     return linkAndAttachStub(cx, masm, attacher, ion, "adding");
 }
 
 static bool
 CanInlineSetPropTypeCheck(JSObject *obj, jsid id, ConstantOrRegister val, bool *checkTypeset)
 {
     bool shouldCheck = false;
-    types::TypeObject *type = obj->type();
-    if (!type->unknownProperties()) {
-        types::HeapTypeSet *propTypes = type->maybeGetProperty(id);
+    types::ObjectGroup *group = obj->group();
+    if (!group->unknownProperties()) {
+        types::HeapTypeSet *propTypes = group->maybeGetProperty(id);
         if (!propTypes)
             return false;
         if (!propTypes->unknown()) {
-            if (obj->hasSingletonType() && !propTypes->nonConstantProperty())
+            if (obj->isSingleton() && !propTypes->nonConstantProperty())
                 return false;
             shouldCheck = true;
             if (val.constant()) {
                 // If the input is a constant, then don't bother if the barrier will always fail.
                 if (!propTypes->hasType(types::GetValueType(val.value())))
                     return false;
                 shouldCheck = false;
             } else {
@@ -2690,17 +2690,17 @@ IsPropertyAddInlineable(NativeObject *ob
     // changed.  Need to ensure that a shape change for a subsequent object
     // won't involve reallocating the slot array.
     if (obj->numDynamicSlots() != oldSlots)
         return false;
 
     // Don't attach if we are adding a property to an object which the new
     // script properties analysis hasn't been performed for yet, as there
     // may be a shape change required here afterwards.
-    if (obj->type()->newScript() && !obj->type()->newScript()->analyzed())
+    if (obj->group()->newScript() && !obj->group()->newScript()->analyzed())
         return false;
 
     if (needsTypeBarrier)
         return CanInlineSetPropTypeCheck(obj, id, val, checkTypeset);
 
     *checkTypeset = false;
     return true;
 }
@@ -2742,18 +2742,18 @@ static void
 GenerateSetUnboxed(JSContext *cx, MacroAssembler &masm, IonCache::StubAttacher &attacher,
                    JSObject *obj, jsid id, uint32_t unboxedOffset, JSValueType unboxedType,
                    Register object, ConstantOrRegister value, bool checkTypeset)
 {
     Label failure, failurePopObject;
 
     // Guard on the type of the object.
     masm.branchPtr(Assembler::NotEqual,
-                   Address(object, JSObject::offsetOfType()),
-                   ImmGCPtr(obj->type()), &failure);
+                   Address(object, JSObject::offsetOfGroup()),
+                   ImmGCPtr(obj->group()), &failure);
 
     if (checkTypeset) {
         masm.push(object);
         CheckTypeSetForWrite(masm, obj, id, object, value, &failurePopObject);
         masm.pop(object);
     }
 
     Address address(object, UnboxedPlainObject::offsetOfData() + unboxedOffset);
@@ -2766,17 +2766,17 @@ GenerateSetUnboxed(JSContext *cx, MacroA
         else
             MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(unboxedType));
     }
 
     // If the unboxed object's type has known properties, then instances have
     // never been converted to native objects and the type set check performed
     // above ensures the value being written can be stored in the unboxed
     // object.
-    Label *storeFailure = obj->type()->unknownProperties() ? &failure : nullptr;
+    Label *storeFailure = obj->group()->unknownProperties() ? &failure : nullptr;
 
     masm.storeUnboxedProperty(address, unboxedType, value, storeFailure);
 
     attacher.jumpRejoin(masm);
 
     masm.bind(&failurePopObject);
     masm.pop(object);
     masm.bind(&failure);
@@ -2823,18 +2823,18 @@ SetPropertyIC::update(JSContext *cx, siz
 {
     void *returnAddr;
     RootedScript script(cx, GetTopJitJSScript(cx, &returnAddr));
     IonScript *ion = script->ionScript();
     SetPropertyIC &cache = ion->getCache(cacheIndex).toSetProperty();
     RootedPropertyName name(cx, cache.name());
     RootedId id(cx, AtomToId(name));
 
-    RootedTypeObject oldType(cx, obj->getType(cx));
-    if (!oldType)
+    RootedObjectGroup oldGroup(cx, obj->getGroup(cx));
+    if (!oldGroup)
         return false;
 
     // Stop generating new stubs once we hit the stub count limit, see
     // GetPropertyCache.
     NativeSetPropCacheability canCache = CanAttachNone;
     bool addedSetterStub = false;
     if (cache.canAttachStub() && !obj->watched()) {
         if (!addedSetterStub && obj->is<ProxyObject>()) {
@@ -2908,17 +2908,17 @@ SetPropertyIC::update(JSContext *cx, siz
     // The property did not exist before, now we can try to inline the property add.
     bool checkTypeset;
     if (!addedSetterStub && canCache == MaybeCanAttachAddSlot &&
         IsPropertyAddInlineable(&obj->as<NativeObject>(), id,
                                 cache.value(), oldSlots, oldShape, cache.needsTypeBarrier(),
                                 &checkTypeset))
     {
         RootedNativeObject nobj(cx, &obj->as<NativeObject>());
-        if (!cache.attachAddSlot(cx, script, ion, nobj, oldShape, oldType, checkTypeset))
+        if (!cache.attachAddSlot(cx, script, ion, nobj, oldShape, oldGroup, checkTypeset))
             return false;
         addedSetterStub = true;
     }
 
     if (!addedSetterStub)
         JitSpew(JitSpew_IonIC, "Failed to attach SETPROP cache");
 
     return true;
--- a/js/src/jit/IonCaches.h
+++ b/js/src/jit/IonCaches.h
@@ -743,17 +743,17 @@ class SetPropertyIC : public RepatchIonC
     bool attachSetSlot(JSContext *cx, HandleScript outerScript, IonScript *ion,
                        HandleNativeObject obj, HandleShape shape, bool checkTypeset);
 
     bool attachCallSetter(JSContext *cx, HandleScript outerScript, IonScript *ion,
                           HandleObject obj, HandleObject holder, HandleShape shape,
                           void *returnAddr);
 
     bool attachAddSlot(JSContext *cx, HandleScript outerScript, IonScript *ion,
-                       HandleNativeObject obj, HandleShape oldShape, HandleTypeObject oldType,
+                       HandleNativeObject obj, HandleShape oldShape, HandleObjectGroup oldGroup,
                        bool checkTypeset);
 
     bool attachSetUnboxed(JSContext *cx, HandleScript outerScript, IonScript *ion,
                           HandleObject obj, HandleId id,
                           uint32_t unboxedOffset, JSValueType unboxedType,
                           bool checkTypeset);
 
     bool attachGenericProxy(JSContext *cx, HandleScript outerScript, IonScript *ion,
--- a/js/src/jit/IonTypes.h
+++ b/js/src/jit/IonTypes.h
@@ -374,18 +374,18 @@ enum MIRType
     MIRType_MagicUninitializedLexical, // JS_UNINITIALIZED_LEXICAL magic value.
     MIRType_Value,
     MIRType_ObjectOrNull,
     MIRType_None,                      // Invalid, used as a placeholder.
     MIRType_Slots,                     // A slots vector
     MIRType_Elements,                  // An elements vector
     MIRType_Pointer,                   // An opaque pointer that receives no special treatment
     MIRType_Shape,                     // A Shape pointer.
-    MIRType_TypeObject,                // A TypeObject pointer.
-    MIRType_Last = MIRType_TypeObject,
+    MIRType_ObjectGroup,               // An ObjectGroup pointer.
+    MIRType_Last = MIRType_ObjectGroup,
     MIRType_Float32x4 = MIRType_Float32 | (2 << VECTOR_SCALE_SHIFT),
     MIRType_Int32x4   = MIRType_Int32   | (2 << VECTOR_SCALE_SHIFT),
     MIRType_Doublex2  = MIRType_Double  | (1 << VECTOR_SCALE_SHIFT)
 };
 
 static inline MIRType
 MIRTypeFromValueType(JSValueType type)
 {
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -181,17 +181,17 @@ class JitRuntime
     // Thunk that invalides an (Ion compiled) caller on the Ion stack.
     JitCode *invalidator_;
 
     // Thunk that calls the GC pre barrier.
     JitCode *valuePreBarrier_;
     JitCode *stringPreBarrier_;
     JitCode *objectPreBarrier_;
     JitCode *shapePreBarrier_;
-    JitCode *typeObjectPreBarrier_;
+    JitCode *objectGroupPreBarrier_;
 
     // Thunk to call malloc/free.
     JitCode *mallocStub_;
     JitCode *freeStub_;
 
     // Thunk called to finish compilation of an IonScript.
     JitCode *lazyLinkStub_;
 
@@ -356,17 +356,17 @@ class JitRuntime
     }
 
     JitCode *preBarrier(MIRType type) const {
         switch (type) {
           case MIRType_Value: return valuePreBarrier_;
           case MIRType_String: return stringPreBarrier_;
           case MIRType_Object: return objectPreBarrier_;
           case MIRType_Shape: return shapePreBarrier_;
-          case MIRType_TypeObject: return typeObjectPreBarrier_;
+          case MIRType_ObjectGroup: return objectGroupPreBarrier_;
           default: MOZ_CRASH();
         }
     }
 
     JitCode *mallocStub() const {
         return mallocStub_;
     }
 
--- a/js/src/jit/LIR-Common.h
+++ b/js/src/jit/LIR-Common.h
@@ -2093,35 +2093,35 @@ class LFunctionDispatch : public LInstru
         setOperand(0, in);
     }
 
     MFunctionDispatch *mir() {
         return mir_->toFunctionDispatch();
     }
 };
 
-class LTypeObjectDispatch : public LInstructionHelper<0, 1, 1>
-{
-    // Dispatch is performed based on a TypeObject -> block
+class LObjectGroupDispatch : public LInstructionHelper<0, 1, 1>
+{
+    // Dispatch is performed based on an ObjectGroup -> block
     // map inferred by the MIR.
 
   public:
-    LIR_HEADER(TypeObjectDispatch);
-
-    LTypeObjectDispatch(const LAllocation &in, const LDefinition &temp) {
+    LIR_HEADER(ObjectGroupDispatch);
+
+    LObjectGroupDispatch(const LAllocation &in, const LDefinition &temp) {
         setOperand(0, in);
         setTemp(0, temp);
     }
 
     const LDefinition *temp() {
         return getTemp(0);
     }
 
-    MTypeObjectDispatch *mir() {
-        return mir_->toTypeObjectDispatch();
+    MObjectGroupDispatch *mir() {
+        return mir_->toObjectGroupDispatch();
     }
 };
 
 // Compares two integral values of the same JS type, either integer or object.
 // For objects, both operands are in registers.
 class LCompare : public LInstructionHelper<1, 2, 0>
 {
     JSOp jsop_;
--- a/js/src/jit/LOpcodes.h
+++ b/js/src/jit/LOpcodes.h
@@ -98,17 +98,17 @@
     _(Throw)                        \
     _(Phi)                          \
     _(TestIAndBranch)               \
     _(TestDAndBranch)               \
     _(TestFAndBranch)               \
     _(TestVAndBranch)               \
     _(TestOAndBranch)               \
     _(FunctionDispatch)             \
-    _(TypeObjectDispatch)           \
+    _(ObjectGroupDispatch)          \
     _(Compare)                      \
     _(CompareAndBranch)             \
     _(CompareD)                     \
     _(CompareDAndBranch)            \
     _(CompareF)                     \
     _(CompareFAndBranch)            \
     _(CompareS)                     \
     _(CompareStrictS)               \
@@ -196,17 +196,17 @@
     _(MaybeToDoubleElement)         \
     _(MaybeCopyElementsForWrite)    \
     _(LoadSlotV)                    \
     _(LoadSlotT)                    \
     _(StoreSlotV)                   \
     _(StoreSlotT)                   \
     _(GuardShape)                   \
     _(GuardShapePolymorphic)        \
-    _(GuardObjectType)              \
+    _(GuardObjectGroup)             \
     _(GuardObjectIdentity)          \
     _(GuardClass)                   \
     _(TypeBarrierV)                 \
     _(TypeBarrierO)                 \
     _(MonitorTypes)                 \
     _(PostWriteBarrierO)            \
     _(PostWriteBarrierV)            \
     _(InitializedLength)            \
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -196,17 +196,17 @@ LIRGenerator::visitNewDeclEnvObject(MNew
     define(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
 LIRGenerator::visitNewCallObject(MNewCallObject *ins)
 {
     LInstruction *lir;
-    if (ins->templateObject()->hasSingletonType()) {
+    if (ins->templateObject()->isSingleton()) {
         LNewSingletonCallObject *singletonLir = new(alloc()) LNewSingletonCallObject(temp());
         define(singletonLir, ins);
         lir = singletonLir;
     } else {
         LNewCallObject *normalLir = new(alloc()) LNewCallObject(temp());
         define(normalLir, ins);
         lir = normalLir;
     }
@@ -839,19 +839,19 @@ LIRGenerator::visitGotoWithFake(MGotoWit
 void
 LIRGenerator::visitFunctionDispatch(MFunctionDispatch *ins)
 {
     LFunctionDispatch *lir = new(alloc()) LFunctionDispatch(useRegister(ins->input()));
     add(lir, ins);
 }
 
 void
-LIRGenerator::visitTypeObjectDispatch(MTypeObjectDispatch *ins)
-{
-    LTypeObjectDispatch *lir = new(alloc()) LTypeObjectDispatch(useRegister(ins->input()), temp());
+LIRGenerator::visitObjectGroupDispatch(MObjectGroupDispatch *ins)
+{
+    LObjectGroupDispatch *lir = new(alloc()) LObjectGroupDispatch(useRegister(ins->input()), temp());
     add(lir, ins);
 }
 
 static inline bool
 CanEmitCompareAtUses(MInstruction *ins)
 {
     if (!ins->canEmitAtUses())
         return false;
@@ -2119,21 +2119,21 @@ LIRGenerator::visitStringReplace(MString
                                                       useRegisterOrConstantAtStart(ins->replacement()));
     defineReturn(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
 LIRGenerator::visitLambda(MLambda *ins)
 {
-    if (ins->info().singletonType || ins->info().useNewTypeForClone) {
+    if (ins->info().singletonType || ins->info().useSingletonForClone) {
         // If the function has a singleton type, this instruction will only be
         // executed once so we don't bother inlining it.
         //
-        // If UseNewTypeForClone is true, we will assign a singleton type to
+        // If UseSingletonForClone is true, we will assign a singleton type to
         // the clone and we have to clone the script, we can't do that inline.
         LLambdaForSingleton *lir = new(alloc()) LLambdaForSingleton(useRegisterAtStart(ins->scopeChain()));
         defineReturn(lir, ins);
         assignSafepoint(lir, ins);
     } else {
         LLambda *lir = new(alloc()) LLambda(useRegister(ins->scopeChain()), temp());
         define(lir, ins);
         assignSafepoint(lir, ins);
@@ -2307,17 +2307,17 @@ LIRGenerator::visitTypeBarrier(MTypeBarr
         LTypeBarrierV *barrier = new(alloc()) LTypeBarrierV(tmp);
         useBox(barrier, LTypeBarrierV::Input, ins->input());
         assignSnapshot(barrier, Bailout_TypeBarrierV);
         redefine(ins, ins->input());
         add(barrier, ins);
         return;
     }
 
-    // Handle typebarrier with specific TypeObject/SingleObjects.
+    // Handle typebarrier with specific ObjectGroup/SingleObjects.
     if (inputType == MIRType_Object && !types->hasType(types::Type::AnyObjectType()) &&
         ins->barrierKind() != BarrierKind::TypeTagOnly)
     {
         LDefinition tmp = needTemp ? temp() : LDefinition::BogusTemp();
         LTypeBarrierO *barrier = new(alloc()) LTypeBarrierO(useRegister(ins->getOperand(0)), tmp);
         assignSnapshot(barrier, Bailout_TypeBarrierO);
         redefine(ins, ins->getOperand(0));
         add(barrier, ins);
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -104,17 +104,17 @@ class LIRGenerator : public LIRGenerator
     void visitUnreachable(MUnreachable *unreachable);
     void visitAssertFloat32(MAssertFloat32 *ins);
     void visitGetDynamicName(MGetDynamicName *ins);
     void visitFilterArgumentsOrEval(MFilterArgumentsOrEval *ins);
     void visitCallDirectEval(MCallDirectEval *ins);
     void visitTest(MTest *test);
     void visitGotoWithFake(MGotoWithFake *ins);
     void visitFunctionDispatch(MFunctionDispatch *ins);
-    void visitTypeObjectDispatch(MTypeObjectDispatch *ins);
+    void visitObjectGroupDispatch(MObjectGroupDispatch *ins);
     void visitCompare(MCompare *comp);
     void visitTypeOf(MTypeOf *ins);
     void visitToId(MToId *ins);
     void visitBitNot(MBitNot *ins);
     void visitBitAnd(MBitAnd *ins);
     void visitBitOr(MBitOr *ins);
     void visitBitXor(MBitXor *ins);
     void visitLsh(MLsh *ins);
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -366,19 +366,19 @@ IonBuilder::inlineArray(CallInfo &callIn
     }
     ArrayObject *templateArray = &templateObject->as<ArrayObject>();
 
     // Multiple arguments imply array initialization, not just construction.
     if (callInfo.argc() >= 2) {
         initLength = callInfo.argc();
         allocating = NewArray_FullyAllocating;
 
-        types::TypeObjectKey *type = types::TypeObjectKey::get(templateArray);
-        if (!type->unknownProperties()) {
-            types::HeapTypeSetKey elemTypes = type->property(JSID_VOID);
+        types::ObjectGroupKey *key = types::ObjectGroupKey::get(templateArray);
+        if (!key->unknownProperties()) {
+            types::HeapTypeSetKey elemTypes = key->property(JSID_VOID);
 
             for (uint32_t i = 0; i < initLength; i++) {
                 MDefinition *value = callInfo.getArg(i);
                 if (!TypeSetIncludes(elemTypes.maybeTypes(), value->type(), value->resultTypeSet())) {
                     elemTypes.freeze(constraints());
                     return InliningStatus_NotInlined;
                 }
             }
@@ -397,17 +397,17 @@ IonBuilder::inlineArray(CallInfo &callIn
         if (callInfo.getArg(0)->type() != MIRType_Int32)
             return InliningStatus_NotInlined;
 
         MDefinition *arg = callInfo.getArg(0);
         if (!arg->isConstantValue()) {
             callInfo.setImplicitlyUsedUnchecked();
             MNewArrayDynamicLength *ins =
                 MNewArrayDynamicLength::New(alloc(), constraints(), templateArray,
-                                            templateArray->type()->initialHeap(constraints()),
+                                            templateArray->group()->initialHeap(constraints()),
                                             arg);
             current->add(ins);
             current->push(ins);
             return InliningStatus_Inlined;
         }
 
         // The next several checks all may fail due to range conditions.
         trackOptimizationOutcome(TrackedOutcome::ArrayRange);
@@ -431,17 +431,17 @@ IonBuilder::inlineArray(CallInfo &callIn
     }
 
     callInfo.setImplicitlyUsedUnchecked();
 
     MConstant *templateConst = MConstant::NewConstraintlessObject(alloc(), templateArray);
     current->add(templateConst);
 
     MNewArray *ins = MNewArray::New(alloc(), constraints(), initLength, templateConst,
-                                    templateArray->type()->initialHeap(constraints()),
+                                    templateArray->group()->initialHeap(constraints()),
                                     allocating);
     current->add(ins);
     current->push(ins);
 
     if (callInfo.argc() >= 2) {
         // Get the elements vector.
         MElements *elements = MElements::New(alloc(), ins);
         current->add(elements);
@@ -495,17 +495,17 @@ IonBuilder::inlineArrayPopShift(CallInfo
     if (returnType == MIRType_Undefined || returnType == MIRType_Null)
         return InliningStatus_NotInlined;
     if (callInfo.thisArg()->type() != MIRType_Object)
         return InliningStatus_NotInlined;
 
     // Pop and shift are only handled for dense arrays that have never been
     // used in an iterator: popping elements does not account for suppressing
     // deleted properties in active iterators.
-    types::TypeObjectFlags unhandledFlags =
+    types::ObjectGroupFlags unhandledFlags =
         types::OBJECT_FLAG_SPARSE_INDEXES |
         types::OBJECT_FLAG_LENGTH_OVERFLOW |
         types::OBJECT_FLAG_ITERATED;
 
     MDefinition *obj = callInfo.thisArg();
     types::TemporaryTypeSet *thisTypes = obj->resultTypeSet();
     if (!thisTypes || thisTypes->getKnownClass(constraints()) != &ArrayObject::class_)
         return InliningStatus_NotInlined;
@@ -727,65 +727,65 @@ IonBuilder::inlineArrayConcat(CallInfo &
         return InliningStatus_NotInlined;
     }
 
     // Require the 'this' types to have a specific type matching the current
     // global, so we can create the result object inline.
     if (thisTypes->getObjectCount() != 1)
         return InliningStatus_NotInlined;
 
-    types::TypeObject *baseThisType = thisTypes->getTypeObject(0);
-    if (!baseThisType)
+    types::ObjectGroup *thisGroup = thisTypes->getGroup(0);
+    if (!thisGroup)
         return InliningStatus_NotInlined;
-    types::TypeObjectKey *thisType = types::TypeObjectKey::get(baseThisType);
-    if (thisType->unknownProperties())
+    types::ObjectGroupKey *thisKey = types::ObjectGroupKey::get(thisGroup);
+    if (thisKey->unknownProperties())
         return InliningStatus_NotInlined;
 
     // Don't inline if 'this' is packed and the argument may not be packed
     // (the result array will reuse the 'this' type).
     if (!thisTypes->hasObjectFlags(constraints(), types::OBJECT_FLAG_NON_PACKED) &&
         argTypes->hasObjectFlags(constraints(), types::OBJECT_FLAG_NON_PACKED))
     {
         trackOptimizationOutcome(TrackedOutcome::ArrayBadFlags);
         return InliningStatus_NotInlined;
     }
 
     // Constraints modeling this concat have not been generated by inference,
     // so check that type information already reflects possible side effects of
     // this call.
-    types::HeapTypeSetKey thisElemTypes = thisType->property(JSID_VOID);
+    types::HeapTypeSetKey thisElemTypes = thisKey->property(JSID_VOID);
 
     types::TemporaryTypeSet *resTypes = getInlineReturnTypeSet();
-    if (!resTypes->hasType(types::Type::ObjectType(thisType)))
+    if (!resTypes->hasType(types::Type::ObjectType(thisKey)))
         return InliningStatus_NotInlined;
 
     for (unsigned i = 0; i < argTypes->getObjectCount(); i++) {
-        types::TypeObjectKey *argType = argTypes->getObject(i);
-        if (!argType)
+        types::ObjectGroupKey *key = argTypes->getObject(i);
+        if (!key)
             continue;
 
-        if (argType->unknownProperties())
+        if (key->unknownProperties())
             return InliningStatus_NotInlined;
 
-        types::HeapTypeSetKey elemTypes = argType->property(JSID_VOID);
+        types::HeapTypeSetKey elemTypes = key->property(JSID_VOID);
         if (!elemTypes.knownSubset(constraints(), thisElemTypes))
             return InliningStatus_NotInlined;
     }
 
     // Inline the call.
     JSObject *templateObj = inspector->getTemplateObjectForNative(pc, js::array_concat);
-    if (!templateObj || templateObj->type() != baseThisType)
+    if (!templateObj || templateObj->group() != thisGroup)
         return InliningStatus_NotInlined;
     MOZ_ASSERT(templateObj->is<ArrayObject>());
 
     callInfo.setImplicitlyUsedUnchecked();
 
     MArrayConcat *ins = MArrayConcat::New(alloc(), constraints(), callInfo.thisArg(), callInfo.getArg(0),
                                           &templateObj->as<ArrayObject>(),
-                                          templateObj->type()->initialHeap(constraints()));
+                                          templateObj->group()->initialHeap(constraints()));
     current->add(ins);
     current->push(ins);
 
     if (!resumeAfter(ins))
         return InliningStatus_Error;
     return InliningStatus_Inlined;
 }
 
@@ -1366,21 +1366,21 @@ IonBuilder::inlineStringSplit(CallInfo &
     if (callInfo.getArg(0)->type() != MIRType_String)
         return InliningStatus_NotInlined;
 
     JSObject *templateObject = inspector->getTemplateObjectForNative(pc, js::str_split);
     if (!templateObject)
         return InliningStatus_NotInlined;
     MOZ_ASSERT(templateObject->is<ArrayObject>());
 
-    types::TypeObjectKey *retType = types::TypeObjectKey::get(templateObject);
-    if (retType->unknownProperties())
+    types::ObjectGroupKey *retKey = types::ObjectGroupKey::get(templateObject);
+    if (retKey->unknownProperties())
         return InliningStatus_NotInlined;
 
-    types::HeapTypeSetKey key = retType->property(JSID_VOID);
+    types::HeapTypeSetKey key = retKey->property(JSID_VOID);
     if (!key.maybeTypes())
         return InliningStatus_NotInlined;
 
     if (!key.maybeTypes()->hasType(types::Type::StringType())) {
         key.freeze(constraints());
         return InliningStatus_NotInlined;
     }
 
@@ -1685,40 +1685,40 @@ IonBuilder::inlineObjectCreate(CallInfo 
     if (callInfo.argc() != 1 || callInfo.constructing())
         return InliningStatus_NotInlined;
 
     JSObject *templateObject = inspector->getTemplateObjectForNative(pc, obj_create);
     if (!templateObject)
         return InliningStatus_NotInlined;
 
     MOZ_ASSERT(templateObject->is<PlainObject>());
-    MOZ_ASSERT(!templateObject->hasSingletonType());
+    MOZ_ASSERT(!templateObject->isSingleton());
 
     // Ensure the argument matches the template object's prototype.
     MDefinition *arg = callInfo.getArg(0);
     if (JSObject *proto = templateObject->getProto()) {
         if (IsInsideNursery(proto))
             return InliningStatus_NotInlined;
 
         types::TemporaryTypeSet *types = arg->resultTypeSet();
-        if (!types || types->getSingleton() != proto)
+        if (!types || types->maybeSingleton() != proto)
             return InliningStatus_NotInlined;
 
         MOZ_ASSERT(types->getKnownMIRType() == MIRType_Object);
     } else {
         if (arg->type() != MIRType_Null)
             return InliningStatus_NotInlined;
     }
 
     callInfo.setImplicitlyUsedUnchecked();
 
     MConstant *templateConst = MConstant::NewConstraintlessObject(alloc(), templateObject);
     current->add(templateConst);
     MNewObject *ins = MNewObject::New(alloc(), constraints(), templateConst,
-                                      templateObject->type()->initialHeap(constraints()),
+                                      templateObject->group()->initialHeap(constraints()),
                                       MNewObject::ObjectCreate);
     current->add(ins);
     current->push(ins);
     if (!resumeAfter(ins))
         return InliningStatus_Error;
 
     return InliningStatus_Inlined;
 }
@@ -2659,17 +2659,17 @@ IonBuilder::inlineConstructTypedObject(C
 
     InlineTypedObject *templateObject = &obj->as<InlineTypedObject>();
     if (&templateObject->typeDescr() != descr)
         return InliningStatus_NotInlined;
 
     callInfo.setImplicitlyUsedUnchecked();
 
     MNewTypedObject *ins = MNewTypedObject::New(alloc(), constraints(), templateObject,
-                                                templateObject->type()->initialHeap(constraints()));
+                                                templateObject->group()->initialHeap(constraints()));
     current->add(ins);
     current->push(ins);
 
     return InliningStatus_Inlined;
 }
 
 IonBuilder::InliningStatus
 IonBuilder::inlineConstructSimdObject(CallInfo &callInfo, SimdTypeDescr *descr)
@@ -2708,17 +2708,17 @@ IonBuilder::inlineConstructSimdObject(Ca
     MOZ_ASSERT(&inlineTypedObject->typeDescr() == descr);
 
     MSimdValueX4 *values = MSimdValueX4::New(alloc(), simdType,
                                              callInfo.getArg(0), callInfo.getArg(1),
                                              callInfo.getArg(2), callInfo.getArg(3));
     current->add(values);
 
     MSimdBox *obj = MSimdBox::New(alloc(), constraints(), values, inlineTypedObject,
-                                  inlineTypedObject->type()->initialHeap(constraints()));
+                                  inlineTypedObject->group()->initialHeap(constraints()));
     current->add(obj);
     current->push(obj);
 
     callInfo.setImplicitlyUsedUnchecked();
     return InliningStatus_Inlined;
 }
 
 IonBuilder::InliningStatus
@@ -2738,17 +2738,17 @@ IonBuilder::inlineSimdInt32x4BinaryArith
     // If the type of any of the arguments is neither a SIMD type, an Object
     // type, or a Value, then the applyTypes phase will add a fallible box &
     // unbox sequence.  This does not matter much as the binary arithmetic
     // instruction is supposed to produce a TypeError once it is called.
     MSimdBinaryArith *ins = MSimdBinaryArith::New(alloc(), callInfo.getArg(0), callInfo.getArg(1),
                                                   op, MIRType_Int32x4);
 
     MSimdBox *obj = MSimdBox::New(alloc(), constraints(), ins, inlineTypedObject,
-                                  inlineTypedObject->type()->initialHeap(constraints()));
+                                  inlineTypedObject->group()->initialHeap(constraints()));
 
     current->add(ins);
     current->add(obj);
     current->push(obj);
 
     callInfo.setImplicitlyUsedUnchecked();
     return InliningStatus_Inlined;
 }
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -645,23 +645,23 @@ MConstant *
 MConstant::NewConstraintlessObject(TempAllocator &alloc, JSObject *v)
 {
     return new(alloc) MConstant(v);
 }
 
 types::TemporaryTypeSet *
 jit::MakeSingletonTypeSet(types::CompilerConstraintList *constraints, JSObject *obj)
 {
-    // Invalidate when this object's TypeObject gets unknown properties. This
+    // Invalidate when this object's ObjectGroup gets unknown properties. This
     // happens for instance when we mutate an object's __proto__, in this case
     // we want to invalidate and mark this TypeSet as containing AnyObject
-    // (because mutating __proto__ will change an object's TypeObject).
+    // (because mutating __proto__ will change an object's ObjectGroup).
     MOZ_ASSERT(constraints);
-    types::TypeObjectKey *objType = types::TypeObjectKey::get(obj);
-    objType->hasStableClassAndProto(constraints);
+    types::ObjectGroupKey *key = types::ObjectGroupKey::get(obj);
+    key->hasStableClassAndProto(constraints);
 
     LifoAlloc *alloc = GetJitContext()->temp->lifoAlloc();
     return alloc->new_<types::TemporaryTypeSet>(alloc, types::Type::ObjectType(obj));
 }
 
 static types::TemporaryTypeSet *
 MakeUnknownTypeSet()
 {
@@ -807,17 +807,17 @@ MConstant::canProduceFloat32() const
 }
 
 MNurseryObject::MNurseryObject(JSObject *obj, uint32_t index, types::CompilerConstraintList *constraints)
   : index_(index)
 {
     setResultType(MIRType_Object);
 
     MOZ_ASSERT(IsInsideNursery(obj));
-    MOZ_ASSERT(!obj->hasSingletonType());
+    MOZ_ASSERT(!obj->isSingleton());
     setResultTypeSet(MakeSingletonTypeSet(constraints, obj));
 
     setMovable();
 }
 
 MNurseryObject *
 MNurseryObject::New(TempAllocator &alloc, JSObject *obj, uint32_t index,
                     types::CompilerConstraintList *constraints)
@@ -3636,17 +3636,17 @@ MBeta::printOpcode(FILE *fp) const
         fprintf(fp, " ???");
     }
 }
 
 bool
 MNewObject::shouldUseVM() const
 {
     PlainObject *obj = templateObject();
-    return obj->hasSingletonType() || obj->hasDynamicSlots();
+    return obj->isSingleton() || obj->hasDynamicSlots();
 }
 
 bool
 MCreateThisWithTemplate::canRecoverOnBailout() const
 {
     MOZ_ASSERT(templateObject()->is<PlainObject>() || templateObject()->is<UnboxedPlainObject>());
     MOZ_ASSERT_IF(templateObject()->is<PlainObject>(),
                   !templateObject()->as<PlainObject>().denseElementsAreCopyOnWrite());
@@ -3754,17 +3754,17 @@ MNewArray::shouldUseVM() const
 
     size_t arraySlots =
         gc::GetGCKindSlots(templateObject()->asTenured().getAllocKind()) - ObjectElements::VALUES_PER_HEADER;
 
     // Allocate space using the VMCall when mir hints it needs to get allocated
     // immediately, but only when data doesn't fit the available array slots.
     bool allocating = allocatingBehaviour() != NewArray_Unallocating && count() > arraySlots;
 
-    return templateObject()->hasSingletonType() || allocating;
+    return templateObject()->isSingleton() || allocating;
 }
 
 bool
 MLoadFixedSlot::mightAlias(const MDefinition *store) const
 {
     if (store->isStoreFixedSlot() && store->toStoreFixedSlot()->slot() != slot())
         return false;
     return true;
@@ -4034,17 +4034,17 @@ types::TemporaryTypeSet *
 InlinePropertyTable::buildTypeSetForFunction(JSFunction *func) const
 {
     LifoAlloc *alloc = GetJitContext()->temp->lifoAlloc();
     types::TemporaryTypeSet *types = alloc->new_<types::TemporaryTypeSet>();
     if (!types)
         return nullptr;
     for (size_t i = 0; i < numEntries(); i++) {
         if (entries_[i]->func == func)
-            types->addType(types::Type::ObjectType(entries_[i]->typeObj), alloc);
+            types->addType(types::Type::ObjectType(entries_[i]->group), alloc);
     }
     return types;
 }
 
 void *
 MLoadTypedArrayElementStatic::base() const
 {
     return AnyTypedArrayViewData(someTypedArray_);
@@ -4346,114 +4346,115 @@ jit::ElementAccessHasExtraIndexedPropert
 MIRType
 jit::DenseNativeElementType(types::CompilerConstraintList *constraints, MDefinition *obj)
 {
     types::TemporaryTypeSet *types = obj->resultTypeSet();
     MIRType elementType = MIRType_None;
     unsigned count = types->getObjectCount();
 
     for (unsigned i = 0; i < count; i++) {
-        types::TypeObjectKey *object = types->getObject(i);
-        if (!object)
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (!key)
             continue;
 
-        if (object->unknownProperties())
+        if (key->unknownProperties())
             return MIRType_None;
 
-        types::HeapTypeSetKey elementTypes = object->property(JSID_VOID);
+        types::HeapTypeSetKey elementTypes = key->property(JSID_VOID);
 
         MIRType type = elementTypes.knownMIRType(constraints);
         if (type == MIRType_None)
             return MIRType_None;
 
         if (elementType == MIRType_None)
             elementType = type;
         else if (elementType != type)
             return MIRType_None;
     }
 
     return elementType;
 }
 
 static BarrierKind
 PropertyReadNeedsTypeBarrier(types::CompilerConstraintList *constraints,
-                             types::TypeObjectKey *object, PropertyName *name,
+                             types::ObjectGroupKey *key, PropertyName *name,
                              types::TypeSet *observed)
 {
     // If the object being read from has types for the property which haven't
     // been observed at this access site, the read could produce a new type and
     // a barrier is needed. Note that this only covers reads from properties
     // which are accounted for by type information, i.e. native data properties
     // and elements.
     //
     // We also need a barrier if the object is a proxy, because then all bets
     // are off, just as if it has unknown properties.
-    if (object->unknownProperties() || observed->empty() ||
-        object->clasp()->isProxy())
+    if (key->unknownProperties() || observed->empty() ||
+        key->clasp()->isProxy())
     {
         return BarrierKind::TypeSet;
     }
 
     jsid id = name ? NameToId(name) : JSID_VOID;
-    types::HeapTypeSetKey property = object->property(id);
+    types::HeapTypeSetKey property = key->property(id);
     if (property.maybeTypes()) {
         if (!TypeSetIncludes(observed, MIRType_Value, property.maybeTypes())) {
             // If all possible objects have been observed, we don't have to
             // guard on the specific object types.
             if (property.maybeTypes()->objectsAreSubset(observed)) {
                 property.freeze(constraints);
                 return BarrierKind::TypeTagOnly;
             }
             return BarrierKind::TypeSet;
         }
     }
 
     // Type information for global objects is not required to reflect the
     // initial 'undefined' value for properties, in particular global
     // variables declared with 'var'. Until the property is assigned a value
     // other than undefined, a barrier is required.
-    if (JSObject *obj = object->singleton()) {
+    if (key->isSingleton()) {
+        JSObject *obj = key->singleton();
         if (name && types::CanHaveEmptyPropertyTypesForOwnProperty(obj) &&
             (!property.maybeTypes() || property.maybeTypes()->empty()))
         {
             return BarrierKind::TypeSet;
         }
     }
 
     property.freeze(constraints);
     return BarrierKind::NoBarrier;
 }
 
 BarrierKind
 jit::PropertyReadNeedsTypeBarrier(JSContext *propertycx,
                                   types::CompilerConstraintList *constraints,
-                                  types::TypeObjectKey *object, PropertyName *name,
+                                  types::ObjectGroupKey *key, PropertyName *name,
                                   types::TemporaryTypeSet *observed, bool updateObserved)
 {
     // If this access has never executed, try to add types to the observed set
     // according to any property which exists on the object or its prototype.
     if (updateObserved && observed->empty() && name) {
         JSObject *obj;
-        if (object->singleton())
-            obj = object->singleton();
-        else if (object->hasTenuredProto())
-            obj = object->proto().toObjectOrNull();
+        if (key->isSingleton())
+            obj = key->singleton();
+        else if (key->hasTenuredProto())
+            obj = key->proto().toObjectOrNull();
         else
             obj = nullptr;
 
         while (obj) {
             if (!obj->getClass()->isNative())
                 break;
 
-            types::TypeObjectKey *typeObj = types::TypeObjectKey::get(obj);
+            types::ObjectGroupKey *key = types::ObjectGroupKey::get(obj);
             if (propertycx)
-                typeObj->ensureTrackedProperty(propertycx, NameToId(name));
-
-            if (!typeObj->unknownProperties()) {
-                types::HeapTypeSetKey property = typeObj->property(NameToId(name));
+                key->ensureTrackedProperty(propertycx, NameToId(name));
+
+            if (!key->unknownProperties()) {
+                types::HeapTypeSetKey property = key->property(NameToId(name));
                 if (property.maybeTypes()) {
                     types::TypeSet::TypeList types;
                     if (!property.maybeTypes()->enumerateTypes(&types))
                         break;
                     if (types.length()) {
                         // Note: the return value here is ignored.
                         observed->addType(types[0], GetJitContext()->temp->lifoAlloc());
                         break;
@@ -4462,17 +4463,17 @@ jit::PropertyReadNeedsTypeBarrier(JSCont
             }
 
             if (!obj->hasTenuredProto())
                 break;
             obj = obj->getProto();
         }
     }
 
-    return PropertyReadNeedsTypeBarrier(constraints, object, name, observed);
+    return PropertyReadNeedsTypeBarrier(constraints, key, name, observed);
 }
 
 BarrierKind
 jit::PropertyReadNeedsTypeBarrier(JSContext *propertycx,
                                   types::CompilerConstraintList *constraints,
                                   MDefinition *obj, PropertyName *name,
                                   types::TemporaryTypeSet *observed)
 {
@@ -4482,19 +4483,19 @@ jit::PropertyReadNeedsTypeBarrier(JSCont
     types::TypeSet *types = obj->resultTypeSet();
     if (!types || types->unknownObject())
         return BarrierKind::TypeSet;
 
     BarrierKind res = BarrierKind::NoBarrier;
 
     bool updateObserved = types->getObjectCount() == 1;
     for (size_t i = 0; i < types->getObjectCount(); i++) {
-        types::TypeObjectKey *object = types->getObject(i);
-        if (object) {
-            BarrierKind kind = PropertyReadNeedsTypeBarrier(propertycx, constraints, object, name,
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (key) {
+            BarrierKind kind = PropertyReadNeedsTypeBarrier(propertycx, constraints, key, name,
                                                             observed, updateObserved);
             if (kind == BarrierKind::TypeSet)
                 return BarrierKind::TypeSet;
 
             if (kind == BarrierKind::TypeTagOnly) {
                 MOZ_ASSERT(res == BarrierKind::NoBarrier || res == BarrierKind::TypeTagOnly);
                 res = BarrierKind::TypeTagOnly;
             } else {
@@ -4516,26 +4517,26 @@ jit::PropertyReadOnPrototypeNeedsTypeBar
 
     types::TypeSet *types = obj->resultTypeSet();
     if (!types || types->unknownObject())
         return BarrierKind::TypeSet;
 
     BarrierKind res = BarrierKind::NoBarrier;
 
     for (size_t i = 0; i < types->getObjectCount(); i++) {
-        types::TypeObjectKey *object = types->getObject(i);
-        if (!object)
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (!key)
             continue;
         while (true) {
-            if (!object->hasStableClassAndProto(constraints) || !object->hasTenuredProto())
+            if (!key->hasStableClassAndProto(constraints) || !key->hasTenuredProto())
                 return BarrierKind::TypeSet;
-            if (!object->proto().isObject())
+            if (!key->proto().isObject())
                 break;
-            object = types::TypeObjectKey::get(object->proto().toObject());
-            BarrierKind kind = PropertyReadNeedsTypeBarrier(constraints, object, name, observed);
+            key = types::ObjectGroupKey::get(key->proto().toObject());
+            BarrierKind kind = PropertyReadNeedsTypeBarrier(constraints, key, name, observed);
             if (kind == BarrierKind::TypeSet)
                 return BarrierKind::TypeSet;
 
             if (kind == BarrierKind::TypeTagOnly) {
                 MOZ_ASSERT(res == BarrierKind::NoBarrier || res == BarrierKind::TypeTagOnly);
                 res = BarrierKind::TypeTagOnly;
             } else {
                 MOZ_ASSERT(kind == BarrierKind::NoBarrier);
@@ -4552,23 +4553,23 @@ jit::PropertyReadIsIdempotent(types::Com
 {
     // Determine if reading a property from obj is likely to be idempotent.
 
     types::TypeSet *types = obj->resultTypeSet();
     if (!types || types->unknownObject())
         return false;
 
     for (size_t i = 0; i < types->getObjectCount(); i++) {
-        types::TypeObjectKey *object = types->getObject(i);
-        if (object) {
-            if (object->unknownProperties())
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (key) {
+            if (key->unknownProperties())
                 return false;
 
             // Check if the property has been reconfigured or is a getter.
-            types::HeapTypeSetKey property = object->property(NameToId(name));
+            types::HeapTypeSetKey property = key->property(NameToId(name));
             if (property.nonData(constraints))
                 return false;
         }
     }
 
     return true;
 }
 
@@ -4583,40 +4584,40 @@ jit::AddObjectsForPropertyRead(MDefiniti
 
     types::TemporaryTypeSet *types = obj->resultTypeSet();
     if (!types || types->unknownObject()) {
         observed->addType(types::Type::AnyObjectType(), alloc);
         return;
     }
 
     for (size_t i = 0; i < types->getObjectCount(); i++) {
-        types::TypeObjectKey *object = types->getObject(i);
-        if (!object)
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (!key)
             continue;
 
-        if (object->unknownProperties()) {
+        if (key->unknownProperties()) {
             observed->addType(types::Type::AnyObjectType(), alloc);
             return;
         }
 
         jsid id = name ? NameToId(name) : JSID_VOID;
-        types::HeapTypeSetKey property = object->property(id);
+        types::HeapTypeSetKey property = key->property(id);
         types::HeapTypeSet *types = property.maybeTypes();
         if (!types)
             continue;
 
         if (types->unknownObject()) {
             observed->addType(types::Type::AnyObjectType(), alloc);
             return;
         }
 
         for (size_t i = 0; i < types->getObjectCount(); i++) {
-            types::TypeObjectKey *object = types->getObject(i);
-            if (object)
-                observed->addType(types::Type::ObjectType(object), alloc);
+            types::ObjectGroupKey *key = types->getObject(i);
+            if (key)
+                observed->addType(types::Type::ObjectType(key), alloc);
         }
     }
 }
 
 static bool
 PropertyTypeIncludes(TempAllocator &alloc, types::HeapTypeSetKey property,
                      MDefinition *value, MIRType implicitType)
 {
@@ -4646,25 +4647,25 @@ TryAddTypeBarrierForWrite(TempAllocator 
     // information.
 
     // All objects in the set must have the same types for name. Otherwise, we
     // could bail out without subsequently triggering a type change that
     // invalidates the compiled code.
     Maybe<types::HeapTypeSetKey> aggregateProperty;
 
     for (size_t i = 0; i < objTypes->getObjectCount(); i++) {
-        types::TypeObjectKey *object = objTypes->getObject(i);
-        if (!object)
+        types::ObjectGroupKey *key = objTypes->getObject(i);
+        if (!key)
             continue;
 
-        if (object->unknownProperties())
+        if (key->unknownProperties())
             return false;
 
         jsid id = name ? NameToId(name) : JSID_VOID;
-        types::HeapTypeSetKey property = object->property(id);
+        types::HeapTypeSetKey property = key->property(id);
         if (!property.maybeTypes() || property.couldBeConstant(constraints))
             return false;
 
         if (PropertyTypeIncludes(alloc, property, *pvalue, implicitType))
             return false;
 
         // This freeze is not required for correctness, but ensures that we
         // will recompile if the property types change and the barrier can
@@ -4718,31 +4719,31 @@ TryAddTypeBarrierForWrite(TempAllocator 
         kind = BarrierKind::TypeTagOnly;
 
     MInstruction *ins = MMonitorTypes::New(alloc, *pvalue, types, kind);
     current->add(ins);
     return true;
 }
 
 static MInstruction *
-AddTypeGuard(TempAllocator &alloc, MBasicBlock *current, MDefinition *obj,
-             types::TypeObjectKey *type, bool bailOnEquality)
+AddGroupGuard(TempAllocator &alloc, MBasicBlock *current, MDefinition *obj,
+              types::ObjectGroupKey *key, bool bailOnEquality)
 {
     MInstruction *guard;
 
-    if (type->isTypeObject()) {
-        guard = MGuardObjectType::New(alloc, obj, type->asTypeObject(), bailOnEquality,
-                                      Bailout_ObjectIdentityOrTypeGuard);
+    if (key->isGroup()) {
+        guard = MGuardObjectGroup::New(alloc, obj, key->group(), bailOnEquality,
+                                       Bailout_ObjectIdentityOrTypeGuard);
     } else {
-        guard = MGuardObjectIdentity::New(alloc, obj, type->asSingleObject(), bailOnEquality);
+        guard = MGuardObjectIdentity::New(alloc, obj, key->singleton(), bailOnEquality);
     }
 
     current->add(guard);
 
-    // For now, never move type object guards.
+    // For now, never move object group / identity guards.
     guard->setNotMovable();
 
     return guard;
 }
 
 // Whether value can be written to property without changing type information.
 bool
 jit::CanWriteProperty(TempAllocator &alloc, types::CompilerConstraintList *constraints,
@@ -4772,27 +4773,27 @@ jit::PropertyWriteNeedsTypeBarrier(TempA
 
     // If all of the objects being written to have property types which already
     // reflect the value, no barrier at all is needed. Additionally, if all
     // objects being written to have the same types for the property, and those
     // types do *not* reflect the value, add a type barrier for the value.
 
     bool success = true;
     for (size_t i = 0; i < types->getObjectCount(); i++) {
-        types::TypeObjectKey *object = types->getObject(i);
-        if (!object || object->unknownProperties())
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (!key || key->unknownProperties())
             continue;
 
-        // TI doesn't track TypedArray objects and should never insert a type
+        // TI doesn't track TypedArray indexes and should never insert a type
         // barrier for them.
-        if (!name && IsAnyTypedArrayClass(object->clasp()))
+        if (!name && IsAnyTypedArrayClass(key->clasp()))
             continue;
 
         jsid id = name ? NameToId(name) : JSID_VOID;
-        types::HeapTypeSetKey property = object->property(id);
+        types::HeapTypeSetKey property = key->property(id);
         if (!CanWriteProperty(alloc, constraints, property, *pvalue, implicitType)) {
             // Either pobj or pvalue needs to be modified to filter out the
             // types which the value could have but are not in the property,
             // or a VM call is required. A VM call is always required if pobj
             // and pvalue cannot be modified.
             if (!canModify)
                 return true;
             success = TryAddTypeBarrierForWrite(alloc, constraints, current, types, name, pvalue,
@@ -4806,31 +4807,31 @@ jit::PropertyWriteNeedsTypeBarrier(TempA
 
     // If all of the objects except one have property types which reflect the
     // value, and the remaining object has no types at all for the property,
     // add a guard that the object does not have that remaining object's type.
 
     if (types->getObjectCount() <= 1)
         return true;
 
-    types::TypeObjectKey *excluded = nullptr;
+    types::ObjectGroupKey *excluded = nullptr;
     for (size_t i = 0; i < types->getObjectCount(); i++) {
-        types::TypeObjectKey *object = types->getObject(i);
-        if (!object || object->unknownProperties())
+        types::ObjectGroupKey *key = types->getObject(i);
+        if (!key || key->unknownProperties())
             continue;
-        if (!name && IsAnyTypedArrayClass(object->clasp()))
+        if (!name && IsAnyTypedArrayClass(key->clasp()))
             continue;
 
         jsid id = name ? NameToId(name) : JSID_VOID;
-        types::HeapTypeSetKey property = object->property(id);
+        types::HeapTypeSetKey property = key->property(id);
         if (CanWriteProperty(alloc, constraints, property, *pvalue, implicitType))
             continue;
 
         if ((property.maybeTypes() && !property.maybeTypes()->empty()) || excluded)
             return true;
-        excluded = object;
+        excluded = key;
     }
 
     MOZ_ASSERT(excluded);
 
-    *pobj = AddTypeGuard(alloc, current, *pobj, excluded, /* bailOnEquality = */ true);
+    *pobj = AddGroupGuard(alloc, current, *pobj, excluded, /* bailOnEquality = */ true);
     return false;
 }
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -2687,17 +2687,17 @@ class MNewArray
               gc::InitialHeap initialHeap, AllocatingBehaviour allocating)
       : MUnaryInstruction(templateConst),
         count_(count),
         initialHeap_(initialHeap),
         allocating_(allocating)
     {
         ArrayObject *obj = templateObject();
         setResultType(MIRType_Object);
-        if (!obj->hasSingletonType())
+        if (!obj->isSingleton())
             setResultTypeSet(MakeSingletonTypeSet(constraints, obj));
     }
 
   public:
     INSTRUCTION_HEADER(NewArray)
 
     static MNewArray *New(TempAllocator &alloc, types::CompilerConstraintList *constraints,
                           uint32_t count, MConstant *templateConst,
@@ -2749,17 +2749,17 @@ class MNewArrayCopyOnWrite : public MNul
     AlwaysTenured<ArrayObject*> templateObject_;
     gc::InitialHeap initialHeap_;
 
     MNewArrayCopyOnWrite(types::CompilerConstraintList *constraints, ArrayObject *templateObject,
               gc::InitialHeap initialHeap)
       : templateObject_(templateObject),
         initialHeap_(initialHeap)
     {
-        MOZ_ASSERT(!templateObject->hasSingletonType());
+        MOZ_ASSERT(!templateObject->isSingleton());
         setResultType(MIRType_Object);
         setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject));
     }
 
   public:
     INSTRUCTION_HEADER(NewArrayCopyOnWrite)
 
     static MNewArrayCopyOnWrite *New(TempAllocator &alloc,
@@ -2793,17 +2793,17 @@ class MNewArrayDynamicLength
     MNewArrayDynamicLength(types::CompilerConstraintList *constraints, ArrayObject *templateObject,
                            gc::InitialHeap initialHeap, MDefinition *length)
       : MUnaryInstruction(length),
         templateObject_(templateObject),
         initialHeap_(initialHeap)
     {
         setGuard(); // Need to throw if length is negative.
         setResultType(MIRType_Object);
-        if (!templateObject->hasSingletonType())
+        if (!templateObject->isSingleton())
             setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject));
     }
 
   public:
     INSTRUCTION_HEADER(NewArrayDynamicLength)
 
     static MNewArrayDynamicLength *New(TempAllocator &alloc, types::CompilerConstraintList *constraints,
                                        ArrayObject *templateObject, gc::InitialHeap initialHeap,
@@ -2842,17 +2842,17 @@ class MNewObject
                gc::InitialHeap initialHeap, Mode mode)
       : MUnaryInstruction(templateConst),
         initialHeap_(initialHeap),
         mode_(mode)
     {
         PlainObject *obj = templateObject();
         MOZ_ASSERT_IF(mode != ObjectLiteral, !shouldUseVM());
         setResultType(MIRType_Object);
-        if (!obj->hasSingletonType())
+        if (!obj->isSingleton())
             setResultTypeSet(MakeSingletonTypeSet(constraints, obj));
 
         // The constant is kept separated in a MConstant, this way we can safely
         // mark it during GC if we recover the object allocation.  Otherwise, by
         // making it emittedAtUses, we do not produce register allocations for
         // it and inline its content inside the code produced by the
         // CodeGenerator.
         templateConst->setEmittedAtUses();
@@ -6308,18 +6308,18 @@ class MStringSplit
         return getOperand(0);
     }
     MDefinition *separator() const {
         return getOperand(1);
     }
     JSObject *templateObject() const {
         return &getOperand(2)->toConstant()->value().toObject();
     }
-    types::TypeObject *typeObject() const {
-        return templateObject()->type();
+    types::ObjectGroup *group() const {
+        return templateObject()->group();
     }
     bool possiblyCalls() const MOZ_OVERRIDE {
         return true;
     }
     virtual AliasSet getAliasSet() const MOZ_OVERRIDE {
         // Although this instruction returns a new array, we don't have to mark
         // it as store instruction, see also MNewArray.
         return AliasSet::None();
@@ -7147,46 +7147,46 @@ struct LambdaFunctionInfo
 {
     // The functions used in lambdas are the canonical original function in
     // the script, and are immutable except for delazification. Record this
     // information while still on the main thread to avoid races.
     AlwaysTenuredFunction fun;
     uint16_t flags;
     gc::Cell *scriptOrLazyScript;
     bool singletonType;
-    bool useNewTypeForClone;
+    bool useSingletonForClone;
 
     explicit LambdaFunctionInfo(JSFunction *fun)
       : fun(fun), flags(fun->flags()),
         scriptOrLazyScript(fun->hasScript()
                            ? (gc::Cell *) fun->nonLazyScript()
                            : (gc::Cell *) fun->lazyScript()),
-        singletonType(fun->hasSingletonType()),
-        useNewTypeForClone(types::UseNewTypeForClone(fun))
+        singletonType(fun->isSingleton()),
+        useSingletonForClone(types::UseSingletonForClone(fun))
     {}
 
     LambdaFunctionInfo(const LambdaFunctionInfo &info)
       : fun((JSFunction *) info.fun), flags(info.flags),
         scriptOrLazyScript(info.scriptOrLazyScript),
         singletonType(info.singletonType),
-        useNewTypeForClone(info.useNewTypeForClone)
+        useSingletonForClone(info.useSingletonForClone)
     {}
 };
 
 class MLambda
   : public MBinaryInstruction,
     public SingleObjectPolicy::Data
 {
     LambdaFunctionInfo info_;
 
     MLambda(types::CompilerConstraintList *constraints, MDefinition *scopeChain, MConstant *cst)
       : MBinaryInstruction(scopeChain, cst), info_(&cst->value().toObject().as<JSFunction>())
     {
         setResultType(MIRType_Object);
-        if (!info().fun->hasSingletonType() && !types::UseNewTypeForClone(info().fun))
+        if (!info().fun->isSingleton() && !types::UseSingletonForClone(info().fun))
             setResultTypeSet(MakeSingletonTypeSet(constraints, info().fun));
     }
 
   public:
     INSTRUCTION_HEADER(Lambda)
 
     static MLambda *New(TempAllocator &alloc, types::CompilerConstraintList *constraints,
                         MDefinition *scopeChain, MConstant *fun)
@@ -7214,18 +7214,18 @@ class MLambdaArrow
 {
     LambdaFunctionInfo info_;
 
     MLambdaArrow(types::CompilerConstraintList *constraints, MDefinition *scopeChain,
                  MDefinition *this_, JSFunction *fun)
       : MBinaryInstruction(scopeChain, this_), info_(fun)
     {
         setResultType(MIRType_Object);
-        MOZ_ASSERT(!types::UseNewTypeForClone(fun));
-        if (!fun->hasSingletonType())
+        MOZ_ASSERT(!types::UseSingletonForClone(fun));
+        if (!fun->isSingleton())
             setResultTypeSet(MakeSingletonTypeSet(constraints, fun));
     }
 
   public:
     INSTRUCTION_HEADER(LambdaArrow)
 
     static MLambdaArrow *New(TempAllocator &alloc, types::CompilerConstraintList *constraints,
                              MDefinition *scopeChain, MDefinition *this_, JSFunction *fun)
@@ -9190,21 +9190,21 @@ class MStoreFixedSlot
 };
 
 typedef Vector<JSObject *, 4, JitAllocPolicy> ObjectVector;
 typedef Vector<bool, 4, JitAllocPolicy> BoolVector;
 
 class InlinePropertyTable : public TempObject
 {
     struct Entry : public TempObject {
-        AlwaysTenured<types::TypeObject *> typeObj;
+        AlwaysTenured<types::ObjectGroup *> group;
         AlwaysTenuredFunction func;
 
-        Entry(types::TypeObject *typeObj, JSFunction *func)
-          : typeObj(typeObj), func(func)
+        Entry(types::ObjectGroup *group, JSFunction *func)
+          : group(group), func(func)
         { }
     };
 
     jsbytecode *pc_;
     MResumePoint *priorResumePoint_;
     Vector<Entry *, 4, JitAllocPolicy> entries_;
 
   public:
@@ -9221,27 +9221,27 @@ class InlinePropertyTable : public TempO
         priorResumePoint_ = nullptr;
         return rp;
     }
 
     jsbytecode *pc() const {
         return pc_;
     }
 
-    bool addEntry(TempAllocator &alloc, types::TypeObject *typeObj, JSFunction *func) {
-        return entries_.append(new(alloc) Entry(typeObj, func));
+    bool addEntry(TempAllocator &alloc, types::ObjectGroup *group, JSFunction *func) {
+        return entries_.append(new(alloc) Entry(group, func));
     }
 
     size_t numEntries() const {
         return entries_.length();
     }
 
-    types::TypeObject *getTypeObject(size_t i) const {
+    types::ObjectGroup *getObjectGroup(size_t i) const {
         MOZ_ASSERT(i < numEntries());
-        return entries_[i]->typeObj;
+        return entries_[i]->group;
     }
 
     JSFunction *getFunction(size_t i) const {
         MOZ_ASSERT(i < numEntries());
         return entries_[i]->func;
     }
 
     bool hasFunction(JSFunction *func) const;
@@ -9354,38 +9354,38 @@ class MGetPropertyCache
         }
         return AliasSet::Store(AliasSet::Any);
     }
 
     void setBlock(MBasicBlock *block) MOZ_OVERRIDE;
     bool updateForReplacement(MDefinition *ins) MOZ_OVERRIDE;
 };
 
-// Emit code to load a value from an object if its shape/type matches one of
-// the shapes/types observed by the baseline IC, else bails out.
+// Emit code to load a value from an object if its shape/group matches one of
+// the shapes/groups observed by the baseline IC, else bails out.
 class MGetPropertyPolymorphic
   : public MUnaryInstruction,
     public SingleObjectPolicy::Data
 {
     struct Entry {
         // The shape to guard against.
         Shape *objShape;
 
         // The property to laod.
         Shape *shape;
     };
 
     Vector<Entry, 4, JitAllocPolicy> nativeShapes_;
-    Vector<types::TypeObject *, 4, JitAllocPolicy> unboxedTypes_;
+    Vector<types::ObjectGroup *, 4, JitAllocPolicy> unboxedGroups_;
     AlwaysTenuredPropertyName name_;
 
     MGetPropertyPolymorphic(TempAllocator &alloc, MDefinition *obj, PropertyName *name)
       : MUnaryInstruction(obj),
         nativeShapes_(alloc),
-        unboxedTypes_(alloc),
+        unboxedGroups_(alloc),
         name_(name)
     {
         setGuard();
         setMovable();
         setResultType(MIRType_Value);
     }
 
   public:
@@ -9404,72 +9404,72 @@ class MGetPropertyPolymorphic
     }
 
     bool addShape(Shape *objShape, Shape *shape) {
         Entry entry;
         entry.objShape = objShape;
         entry.shape = shape;
         return nativeShapes_.append(entry);
     }
-    bool addUnboxedType(types::TypeObject *type) {
-        return unboxedTypes_.append(type);
+    bool addUnboxedGroup(types::ObjectGroup *group) {
+        return unboxedGroups_.append(group);
     }
     size_t numShapes() const {
         return nativeShapes_.length();
     }
     Shape *objShape(size_t i) const {
         return nativeShapes_[i].objShape;
     }
     Shape *shape(size_t i) const {
         return nativeShapes_[i].shape;
     }
-    size_t numUnboxedTypes() const {
-        return unboxedTypes_.length();
-    }
-    types::TypeObject *unboxedType(size_t i) const {
-        return unboxedTypes_[i];
+    size_t numUnboxedGroups() const {
+        return unboxedGroups_.length();
+    }
+    types::ObjectGroup *unboxedGroup(size_t i) const {
+        return unboxedGroups_[i];
     }
     PropertyName *name() const {
         return name_;
     }
     MDefinition *obj() const {
         return getOperand(0);
     }
     AliasSet getAliasSet() const MOZ_OVERRIDE {
         return AliasSet::Load(AliasSet::ObjectFields | AliasSet::FixedSlot | AliasSet::DynamicSlot |
-                              (unboxedTypes_.empty() ? 0 : (AliasSet::TypedArrayElement | AliasSet::Element)));
+                              (unboxedGroups_.empty() ? 0 : (AliasSet::TypedArrayElement | AliasSet::Element)));
     }
 
     bool mightAlias(const MDefinition *store) const MOZ_OVERRIDE;
 };
 
-// Emit code to store a value to an object's slots if its shape matches
-// one of the shapes observed by the baseline IC, else bails out.
+// Emit code to store a value to an object's slots if its shape/group matches
+// one of the shapes/groups observed by the baseline IC, else bails out.
 class MSetPropertyPolymorphic
   : public MBinaryInstruction,
     public MixPolicy<SingleObjectPolicy, NoFloatPolicy<1> >::Data
 {
     struct Entry {
         // The shape to guard against.
         Shape *objShape;
 
         // The property to load.
         Shape *shape;
     };
 
     Vector<Entry, 4, JitAllocPolicy> nativeShapes_;
-    Vector<types::TypeObject *, 4, JitAllocPolicy> unboxedTypes_;
+    Vector<types::ObjectGroup *, 4, JitAllocPolicy> unboxedGroups_;
     AlwaysTenuredPropertyName name_;
     bool needsBarrier_;
 
     MSetPropertyPolymorphic(TempAllocator &alloc, MDefinition *obj, MDefinition *value,
                             PropertyName *name)
       : MBinaryInstruction(obj, value),
         nativeShapes_(alloc),
-        unboxedTypes_(alloc),
+        unboxedGroups_(alloc),
         name_(name),
         needsBarrier_(false)
     {
     }
 
   public:
     INSTRUCTION_HEADER(SetPropertyPolymorphic)
 
@@ -9479,33 +9479,33 @@ class MSetPropertyPolymorphic
     }
 
     bool addShape(Shape *objShape, Shape *shape) {
         Entry entry;
         entry.objShape = objShape;
         entry.shape = shape;
         return nativeShapes_.append(entry);
     }
-    bool addUnboxedType(types::TypeObject *type) {
-        return unboxedTypes_.append(type);
+    bool addUnboxedGroup(types::ObjectGroup *group) {
+        return unboxedGroups_.append(group);
     }
     size_t numShapes() const {
         return nativeShapes_.length();
     }
     Shape *objShape(size_t i) const {
         return nativeShapes_[i].objShape;
     }
     Shape *shape(size_t i) const {
         return nativeShapes_[i].shape;
     }
-    size_t numUnboxedTypes() const {
-        return unboxedTypes_.length();
-    }
-    types::TypeObject *unboxedType(size_t i) const {
-        return unboxedTypes_[i];
+    size_t numUnboxedGroups() const {
+        return unboxedGroups_.length();
+    }
+    types::ObjectGroup *unboxedGroup(size_t i) const {
+        return unboxedGroups_[i];
     }
     PropertyName *name() const {
         return name_;
     }
     MDefinition *obj() const {
         return getOperand(0);
     }
     MDefinition *value() const {
@@ -9514,35 +9514,35 @@ class MSetPropertyPolymorphic
     bool needsBarrier() const {
         return needsBarrier_;
     }
     void setNeedsBarrier() {
         needsBarrier_ = true;
     }
     AliasSet getAliasSet() const MOZ_OVERRIDE {
         return AliasSet::Store(AliasSet::ObjectFields | AliasSet::FixedSlot | AliasSet::DynamicSlot |
-                               (unboxedTypes_.empty() ? 0 : (AliasSet::TypedArrayElement | AliasSet::Element)));
+                               (unboxedGroups_.empty() ? 0 : (AliasSet::TypedArrayElement | AliasSet::Element)));
     }
 };
 
 class MDispatchInstruction
   : public MControlInstruction,
     public SingleObjectPolicy::Data
 {
     // Map from JSFunction* -> MBasicBlock.
     struct Entry {
         JSFunction *func;
-        // If |func| has a singleton type, |funcType| is null. Otherwise,
-        // |funcType| holds the TypeObject for |func|, and dispatch guards
-        // on the type instead of directly on the function.
-        types::TypeObject *funcType;
+        // If |func| has a singleton group, |funcGroup| is null. Otherwise,
+        // |funcGroup| holds the ObjectGroup for |func|, and dispatch guards
+        // on the group instead of directly on the function.
+        types::ObjectGroup *funcGroup;
         MBasicBlock *block;
 
-        Entry(JSFunction *func, types::TypeObject *funcType, MBasicBlock *block)
-          : func(func), funcType(funcType), block(block)
+        Entry(JSFunction *func, types::ObjectGroup *funcGroup, MBasicBlock *block)
+          : func(func), funcGroup(funcGroup), block(block)
         { }
     };
     Vector<Entry, 4, JitAllocPolicy> map_;
 
     // An optional fallback path that uses MCall.
     MBasicBlock *fallback_;
     MUse operand_;
 
@@ -9600,27 +9600,27 @@ class MDispatchInstruction
     MBasicBlock *getSuccessor(size_t i) const MOZ_FINAL MOZ_OVERRIDE {
         MOZ_ASSERT(i < numSuccessors());
         if (i == map_.length())
             return fallback_;
         return map_[i].block;
     }
 
   public:
-    void addCase(JSFunction *func, types::TypeObject *funcType, MBasicBlock *block) {
-        map_.append(Entry(func, funcType, block));
+    void addCase(JSFunction *func, types::ObjectGroup *funcGroup, MBasicBlock *block) {
+        map_.append(Entry(func, funcGroup, block));
     }
     uint32_t numCases() const {
         return map_.length();
     }
     JSFunction *getCase(uint32_t i) const {
         return map_[i].func;
     }
-    types::TypeObject *getCaseTypeObject(uint32_t i) const {
-        return map_[i].funcType;
+    types::ObjectGroup *getCaseObjectGroup(uint32_t i) const {
+        return map_[i].funcGroup;
     }
     MBasicBlock *getCaseBlock(uint32_t i) const {
         return map_[i].block;
     }
 
     bool hasFallback() const {
         return bool(fallback_);
     }
@@ -9634,34 +9634,34 @@ class MDispatchInstruction
     }
 
   public:
     MDefinition *input() const {
         return getOperand(0);
     }
 };
 
-// Polymorphic dispatch for inlining, keyed off incoming TypeObject.
-class MTypeObjectDispatch : public MDispatchInstruction
-{
-    // Map TypeObject (of CallProp's Target Object) -> JSFunction (yielded by the CallProp).
+// Polymorphic dispatch for inlining, keyed off incoming ObjectGroup.
+class MObjectGroupDispatch : public MDispatchInstruction
+{
+    // Map ObjectGroup (of CallProp's Target Object) -> JSFunction (yielded by the CallProp).
     InlinePropertyTable *inlinePropertyTable_;
 
-    MTypeObjectDispatch(TempAllocator &alloc, MDefinition *input, InlinePropertyTable *table)
+    MObjectGroupDispatch(TempAllocator &alloc, MDefinition *input, InlinePropertyTable *table)
       : MDispatchInstruction(alloc, input),
         inlinePropertyTable_(table)
     { }
 
   public:
-    INSTRUCTION_HEADER(TypeObjectDispatch)
-
-    static MTypeObjectDispatch *New(TempAllocator &alloc, MDefinition *ins,
-                                    InlinePropertyTable *table)
-    {
-        return new(alloc) MTypeObjectDispatch(alloc, ins, table);
+    INSTRUCTION_HEADER(ObjectGroupDispatch)
+
+    static MObjectGroupDispatch *New(TempAllocator &alloc, MDefinition *ins,
+                                     InlinePropertyTable *table)
+    {
+        return new(alloc) MObjectGroupDispatch(alloc, ins, table);
     }
 
     InlinePropertyTable *propTable() const {
         return inlinePropertyTable_;
     }
 };
 
 // Polymorphic dispatch for inlining, keyed off incoming JSFunction*.
@@ -9842,65 +9842,65 @@ class MGuardShapePolymorphic
 
     bool congruentTo(const MDefinition *ins) const MOZ_OVERRIDE;
 
     AliasSet getAliasSet() const MOZ_OVERRIDE {
         return AliasSet::Load(AliasSet::ObjectFields);
     }
 };
 
-// Guard on an object's type, inclusively or exclusively.
-class MGuardObjectType
+// Guard on an object's group, inclusively or exclusively.
+class MGuardObjectGroup
   : public MUnaryInstruction,
     public SingleObjectPolicy::Data
 {
-    AlwaysTenured<types::TypeObject *> typeObject_;
+    AlwaysTenured<types::ObjectGroup *> group_;
     bool bailOnEquality_;
     BailoutKind bailoutKind_;
 
-    MGuardObjectType(MDefinition *obj, types::TypeObject *typeObject, bool bailOnEquality,
-                     BailoutKind bailoutKind)
+    MGuardObjectGroup(MDefinition *obj, types::ObjectGroup *group, bool bailOnEquality,
+                      BailoutKind bailoutKind)
       : MUnaryInstruction(obj),
-        typeObject_(typeObject),
+        group_(group),
         bailOnEquality_(bailOnEquality),
         bailoutKind_(bailoutKind)
     {
         setGuard();
         setMovable();
         setResultType(MIRType_Object);
     }
 
   public:
-    INSTRUCTION_HEADER(GuardObjectType)
-
-    static MGuardObjectType *New(TempAllocator &alloc, MDefinition *obj, types::TypeObject *typeObject,
-                                 bool bailOnEquality, BailoutKind bailoutKind) {
-        return new(alloc) MGuardObjectType(obj, typeObject, bailOnEquality, bailoutKind);
+    INSTRUCTION_HEADER(GuardObjectGroup)
+
+    static MGuardObjectGroup *New(TempAllocator &alloc, MDefinition *obj, types::ObjectGroup *group,
+                                  bool bailOnEquality, BailoutKind bailoutKind) {
+        return new(alloc) MGuardObjectGroup(obj, group, bailOnEquality, bailoutKind);
     }
 
     MDefinition *obj() const {
         return getOperand(0);
     }
-    const types::TypeObject *typeObject() const {
-        return typeObject_;
+    const types::ObjectGroup *group() const {
+        return group_;
     }
     bool bailOnEquality() const {
         return bailOnEquality_;
     }
     BailoutKind bailoutKind() const {
         return bailoutKind_;
     }
     bool congruentTo(const MDefinition *ins) const MOZ_OVERRIDE {
-        if (!ins->isGuardObjectType())
-            return false;
-        if (typeObject() != ins->toGuardObjectType()->typeObject())
-            return false;
-        if (bailOnEquality() != ins->toGuardObjectType()->bailOnEquality())
-            return false;
-        if (bailoutKind() != ins->toGuardObjectType()->bailoutKind())
+        if (!ins->isGuardObjectGroup())
+            return false;
+        if (group() != ins->toGuardObjectGroup()->group())
+            return false;
+        if (bailOnEquality() != ins->toGuardObjectGroup()->bailOnEquality())
+            return false;
+        if (bailoutKind() != ins->toGuardObjectGroup()->bailoutKind())
             return false;
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const MOZ_OVERRIDE {
         return AliasSet::Load(AliasSet::ObjectFields);
     }
 };
 
@@ -12687,17 +12687,17 @@ bool ElementAccessIsAnyTypedArray(types:
                                   Scalar::Type *arrayType);
 bool ElementAccessIsPacked(types::CompilerConstraintList *constraints, MDefinition *obj);
 bool ElementAccessMightBeCopyOnWrite(types::CompilerConstraintList *constraints, MDefinition *obj);
 bool ElementAccessHasExtraIndexedProperty(types::CompilerConstraintList *constraints,
                                           MDefinition *obj);
 MIRType DenseNativeElementType(types::CompilerConstraintList *constraints, MDefinition *obj);
 BarrierKind PropertyReadNeedsTypeBarrier(JSContext *propertycx,
                                          types::CompilerConstraintList *constraints,
-                                         types::TypeObjectKey *object, PropertyName *name,
+                                         types::ObjectGroupKey *object, PropertyName *name,
                                          types::TemporaryTypeSet *observed, bool updateObserved);
 BarrierKind PropertyReadNeedsTypeBarrier(JSContext *propertycx,
                                          types::CompilerConstraintList *constraints,
                                          MDefinition *obj, PropertyName *name,
                                          types::TemporaryTypeSet *observed);
 BarrierKind PropertyReadOnPrototypeNeedsTypeBarrier(types::CompilerConstraintList *constraints,
                                                     MDefinition *obj, PropertyName *name,
                                                     types::TemporaryTypeSet *observed);
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -149,37 +149,37 @@ class MIRGenerator
     uint32_t minAsmJSHeapLength() const {
         return minAsmJSHeapLength_;
     }
 
     bool modifiesFrameArguments() const {
         return modifiesFrameArguments_;
     }
 
-    typedef Vector<types::TypeObject *, 0, JitAllocPolicy> TypeObjectVector;
+    typedef Vector<types::ObjectGroup *, 0, JitAllocPolicy> ObjectGroupVector;
 
     // When abortReason() == AbortReason_NewScriptProperties, all types which
     // the new script properties analysis hasn't been performed on yet.
-    const TypeObjectVector &abortedNewScriptPropertiesTypes() const {
-        return abortedNewScriptPropertiesTypes_;
+    const ObjectGroupVector &abortedNewScriptPropertiesGroups() const {
+        return abortedNewScriptPropertiesGroups_;
     }
 
   public:
     CompileCompartment *compartment;
 
   protected:
     CompileInfo *info_;
     const OptimizationInfo *optimizationInfo_;
     TempAllocator *alloc_;
     JSFunction *fun_;
     uint32_t nslots_;
     MIRGraph *graph_;
     AbortReason abortReason_;
     bool shouldForceAbort_; // Force AbortReason_Disable
-    TypeObjectVector abortedNewScriptPropertiesTypes_;
+    ObjectGroupVector abortedNewScriptPropertiesGroups_;
     bool error_;
     mozilla::Atomic<bool, mozilla::Relaxed> *pauseBuild_;
     mozilla::Atomic<bool, mozilla::Relaxed> cancelBuild_;
 
     uint32_t maxAsmJSStackArgBytes_;
     bool performsCall_;
     bool usesSimd_;
     bool usesSimdCached_;
@@ -194,17 +194,17 @@ class MIRGenerator
     bool instrumentedProfilingIsCached_;
 
     // List of nursery objects used by this compilation. Can be traced by a
     // minor GC while compilation happens off-thread. This Vector should only
     // be accessed on the main thread (IonBuilder, nursery GC or
     // CodeGenerator::link).
     ObjectVector nurseryObjects_;
 
-    void addAbortedNewScriptPropertiesType(types::TypeObject *type);
+    void addAbortedNewScriptPropertiesGroup(types::ObjectGroup *type);
     void setForceAbort() {
         shouldForceAbort_ = true;
     }
     bool shouldForceAbort() {
         return shouldForceAbort_;
     }
 
 #if defined(JS_ION_PERF)
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -22,17 +22,17 @@ MIRGenerator::MIRGenerator(CompileCompar
                            const OptimizationInfo *optimizationInfo)
   : compartment(compartment),
     info_(info),
     optimizationInfo_(optimizationInfo),
     alloc_(alloc),
     graph_(graph),
     abortReason_(AbortReason_NoAbort),
     shouldForceAbort_(false),
-    abortedNewScriptPropertiesTypes_(*alloc_),
+    abortedNewScriptPropertiesGroups_(*alloc_),
     error_(false),
     pauseBuild_(nullptr),
     cancelBuild_(false),
     maxAsmJSStackArgBytes_(0),
     performsCall_(false),
     usesSimd_(false),
     usesSimdCached_(false),
     minAsmJSHeapLength_(0),
@@ -88,24 +88,24 @@ MIRGenerator::abort(const char *message,
     va_list ap;
     va_start(ap, message);
     abortFmt(message, ap);
     va_end(ap);
     return false;
 }
 
 void
-MIRGenerator::addAbortedNewScriptPropertiesType(types::TypeObject *type)
+MIRGenerator::addAbortedNewScriptPropertiesGroup(types::ObjectGroup *group)
 {
-    for (size_t i = 0; i < abortedNewScriptPropertiesTypes_.length(); i++) {
-        if (type == abortedNewScriptPropertiesTypes_[i])
+    for (size_t i = 0; i < abortedNewScriptPropertiesGroups_.length(); i++) {
+        if (group == abortedNewScriptPropertiesGroups_[i])
             return;
     }
-    if (!abortedNewScriptPropertiesTypes_.append(type))
-        CrashAtUnhandlableOOM("addAbortedNewScriptPropertiesType");
+    if (!abortedNewScriptPropertiesGroups_.append(group))
+        CrashAtUnhandlableOOM("addAbortedNewScriptPropertiesGroup");
 }
 
 void
 MIRGraph::addBlock(MBasicBlock *block)
 {
     MOZ_ASSERT(block);
     block->setId(blockIdGen_++);
     blocks_.pushBack(block);
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -34,17 +34,17 @@ namespace jit {
     _(CloneLiteral)                                                         \
     _(Parameter)                                                            \
     _(Callee)                                                               \
     _(IsConstructing)                                                       \
     _(TableSwitch)                                                          \
     _(Goto)                                                                 \
     _(Test)                                                                 \
     _(GotoWithFake)                                                         \
-    _(TypeObjectDispatch)                                                   \
+    _(ObjectGroupDispatch)                                                  \
     _(FunctionDispatch)                                                     \
     _(Compare)                                                              \
     _(Phi)                                                                  \
     _(Beta)                                                                 \
     _(OsrValue)                                                             \
     _(OsrScopeChain)                                                        \
     _(OsrReturnValue)                                                       \
     _(OsrArgumentsObject)                                                   \
@@ -155,17 +155,17 @@ namespace jit {
     _(GetPropertyCache)                                                     \
     _(GetPropertyPolymorphic)                                               \
     _(SetPropertyPolymorphic)                                               \
     _(GetElementCache)                                                      \
     _(SetElementCache)                                                      \
     _(BindNameCache)                                                        \
     _(GuardShape)                                                           \
     _(GuardShapePolymorphic)                                                \
-    _(GuardObjectType)                                                      \
+    _(GuardObjectGroup)                                                     \
     _(GuardObjectIdentity)                                                  \
     _(GuardClass)                                                           \
     _(ArrayLength)                                                          \
     _(SetArrayLength)                                                       \
     _(TypedArrayLength)                                                     \
     _(TypedArrayElements)                                                   \
     _(TypedObjectDescr)                                                     \
     _(TypedObjectElements)                                                  \
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -50,24 +50,24 @@ class TypeWrapper {
             return t == t_ || t_ == types::Type::DoubleType();
         return t == t_;
     }
     inline unsigned getObjectCount() const {
         if (t_.isAnyObject() || t_.isUnknown() || !t_.isObject())
             return 0;
         return 1;
     }
-    inline JSObject *getSingleObjectNoBarrier(unsigned) const {
-        if (t_.isSingleObject())
-            return t_.singleObjectNoBarrier();
+    inline JSObject *getSingletonNoBarrier(unsigned) const {
+        if (t_.isSingleton())
+            return t_.singletonNoBarrier();
         return nullptr;
     }
-    inline types::TypeObject *getTypeObjectNoBarrier(unsigned) const {
-        if (t_.isTypeObject())
-            return t_.typeObjectNoBarrier();
+    inline types::ObjectGroup *getGroupNoBarrier(unsigned) const {
+        if (t_.isGroup())
+            return t_.groupNoBarrier();
         return nullptr;
     }
 };
 
 } /* anonymous namespace */
 
 template <typename Source, typename TypeSet> void
 MacroAssembler::guardTypeSet(const Source &address, const TypeSet *types, BarrierKind kind,
@@ -139,19 +139,19 @@ MacroAssembler::guardTypeSet(const Sourc
         guardObjectType(obj, types, scratch, &fail);
         jump(&matched);
         bind(&fail);
 
         // Type set guards might miss when an object's type changes and its
         // properties become unknown, so check for this case.
         if (obj == scratch)
             extractObject(address, scratch);
-        loadPtr(Address(obj, JSObject::offsetOfType()), scratch);
+        loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
         branchTestPtr(Assembler::NonZero,
-                      Address(scratch, types::TypeObject::offsetOfFlags()),
+                      Address(scratch, types::ObjectGroup::offsetOfFlags()),
                       Imm32(types::OBJECT_FLAG_UNKNOWN_PROPERTIES), &matched);
 
         assumeUnreachable("Unexpected object type");
 #endif
     }
 
     bind(&matched);
 }
@@ -170,53 +170,53 @@ MacroAssembler::guardObjectType(Register
     // safe to do as the final JitCode object will be allocated during the
     // incremental GC (or the compilation canceled before we start sweeping),
     // see CodeGenerator::link. Other callers should use TypeSet::readBarrier
     // to trigger the barrier on the contents of type sets passed in here.
     Label matched;
 
     BranchGCPtr lastBranch;
     MOZ_ASSERT(!lastBranch.isInitialized());
-    bool hasTypeObjects = false;
+    bool hasObjectGroups = false;
     unsigned count = types->getObjectCount();
     for (unsigned i = 0; i < count; i++) {
-        if (!types->getSingleObjectNoBarrier(i)) {
-            hasTypeObjects = hasTypeObjects || types->getTypeObjectNoBarrier(i);
+        if (!types->getSingletonNoBarrier(i)) {
+            hasObjectGroups = hasObjectGroups || types->getGroupNoBarrier(i);
             continue;
         }
 
         if (lastBranch.isInitialized())
             lastBranch.emit(*this);
 
-        JSObject *object = types->getSingleObjectNoBarrier(i);
+        JSObject *object = types->getSingletonNoBarrier(i);
         lastBranch = BranchGCPtr(Equal, obj, ImmGCPtr(object), &matched);
     }
 
-    if (hasTypeObjects) {
+    if (hasObjectGroups) {
         // We are possibly going to overwrite the obj register. So already
         // emit the branch, since branch depends on previous value of obj
         // register and there is definitely a branch following. So no need
         // to invert the condition.
         if (lastBranch.isInitialized())
             lastBranch.emit(*this);
         lastBranch = BranchGCPtr();
 
         // Note: Some platforms give the same register for obj and scratch.
         // Make sure when writing to scratch, the obj register isn't used anymore!
-        loadPtr(Address(obj, JSObject::offsetOfType()), scratch);
+        loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
 
         for (unsigned i = 0; i < count; i++) {
-            if (!types->getTypeObjectNoBarrier(i))
+            if (!types->getGroupNoBarrier(i))
                 continue;
 
             if (lastBranch.isInitialized())
                 lastBranch.emit(*this);
 
-            types::TypeObject *object = types->getTypeObjectNoBarrier(i);
-            lastBranch = BranchGCPtr(Equal, scratch, ImmGCPtr(object), &matched);
+            types::ObjectGroup *group = types->getGroupNoBarrier(i);
+            lastBranch = BranchGCPtr(Equal, scratch, ImmGCPtr(group), &matched);
         }
     }
 
     if (!lastBranch.isInitialized()) {
         jump(miss);
         return;
     }
 
@@ -1187,17 +1187,17 @@ MacroAssembler::initGCSlots(Register obj
 
 void
 MacroAssembler::initGCThing(Register obj, Register slots, JSObject *templateObj,
                             bool initFixedSlots)
 {
     // Fast initialization of an empty object returned by allocateObject().
 
     storePtr(ImmGCPtr(templateObj->lastProperty()), Address(obj, JSObject::offsetOfShape()));
-    storePtr(ImmGCPtr(templateObj->type()), Address(obj, JSObject::offsetOfType()));
+    storePtr(ImmGCPtr(templateObj->group()), Address(obj, JSObject::offsetOfGroup()));
 
     if (templateObj->isNative()) {
         NativeObject *ntemplate = &templateObj->as<NativeObject>();
         MOZ_ASSERT_IF(!ntemplate->denseElementsAreCopyOnWrite(), !ntemplate->hasDynamicElements());
 
         if (ntemplate->hasDynamicSlots())
             storePtr(slots, Address(obj, NativeObject::offsetOfSlots()));
         else
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -284,23 +284,23 @@ class MacroAssembler : public MacroAssem
         loadPtr(Address(objReg, JSObject::offsetOfShape()), dest);
     }
     void loadBaseShape(Register objReg, Register dest) {
         loadPtr(Address(objReg, JSObject::offsetOfShape()), dest);
 
         loadPtr(Address(dest, Shape::offsetOfBase()), dest);
     }
     void loadObjClass(Register objReg, Register dest) {
-        loadPtr(Address(objReg, JSObject::offsetOfType()), dest);
-        loadPtr(Address(dest, types::TypeObject::offsetOfClasp()), dest);
+        loadPtr(Address(objReg, JSObject::offsetOfGroup()), dest);
+        loadPtr(Address(dest, types::ObjectGroup::offsetOfClasp()), dest);
     }
     void branchTestObjClass(Condition cond, Register obj, Register scratch, const js::Class *clasp,
                             Label *label) {
-        loadPtr(Address(obj, JSObject::offsetOfType()), scratch);
-        branchPtr(cond, Address(scratch, types::TypeObject::offsetOfClasp()), ImmPtr(clasp), label);
+        loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
+        branchPtr(cond, Address(scratch, types::ObjectGroup::offsetOfClasp()), ImmPtr(clasp), label);
     }
     void branchTestObjShape(Condition cond, Register obj, const Shape *shape, Label *label) {
         branchPtr(cond, Address(obj, JSObject::offsetOfShape()), ImmGCPtr(shape), label);
     }
     void branchTestObjShape(Condition cond, Register obj, Register shape, Label *label) {
         branchPtr(cond, Address(obj, JSObject::offsetOfShape()), shape, label);
     }
     void branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch,
@@ -342,18 +342,18 @@ class MacroAssembler : public MacroAssem
         branchTest32(Assembler::NonZero, reg, Imm32(0xFF), label);
     }
 
     void loadObjPrivate(Register obj, uint32_t nfixed, Register dest) {
         loadPtr(Address(obj, NativeObject::getPrivateDataOffset(nfixed)), dest);
     }
 
     void loadObjProto(Register obj, Register dest) {
-        loadPtr(Address(obj, JSObject::offsetOfType()), dest);
-        loadPtr(Address(dest, types::TypeObject::offsetOfProto()), dest);
+        loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
+        loadPtr(Address(dest, types::ObjectGroup::offsetOfProto()), dest);
     }
 
     void loadStringLength(Register str, Register dest) {
         load32(Address(str, JSString::offsetOfLength()), dest);
     }
 
     void loadFunctionFromCalleeToken(Address token, Register dest) {
         loadPtr(token, dest);
--- a/js/src/jit/OptimizationTracking.cpp
+++ b/js/src/jit/OptimizationTracking.cpp
@@ -194,17 +194,17 @@ CombineHash(HashNumber h, HashNumber n)
     h ^= (h >> 6);
     return h;
 }
 
 static inline HashNumber
 HashType(types::Type ty)
 {
     if (ty.isObjectUnchecked())
-        return PointerHasher<types::TypeObjectKey *, 3>::hash(ty.objectKey());
+        return PointerHasher<types::ObjectGroupKey *, 3>::hash(ty.objectKey());
     return HashNumber(ty.raw());
 }
 
 static HashNumber
 HashTypeList(const types::TypeSet::TypeList &types)
 {
     HashNumber h = 0;
     for (uint32_t i = 0; i < types.length(); i++)
--- a/js/src/jit/Recover.cpp
+++ b/js/src/jit/Recover.cpp
@@ -966,20 +966,20 @@ MStringSplit::writeRecoverData(CompactBu
 RStringSplit::RStringSplit(CompactBufferReader &reader)
 {}
 
 bool
 RStringSplit::recover(JSContext *cx, SnapshotIterator &iter) const
 {
     RootedString str(cx, iter.read().toString());
     RootedString sep(cx, iter.read().toString());
-    RootedTypeObject typeObj(cx, iter.read().toObject().type());
+    RootedObjectGroup group(cx, iter.read().toObject().group());
     RootedValue result(cx);
 
-    JSObject *res = str_split_string(cx, typeObj, str, sep);
+    JSObject *res = str_split_string(cx, group, str, sep);
     if (!res)
         return false;
 
     result.setObject(*res);
     iter.storeInstructionResult(result);
     return true;
 }
 
@@ -1206,23 +1206,23 @@ RNewArray::RNewArray(CompactBufferReader
     allocatingBehaviour_ = AllocatingBehaviour(reader.readByte());
 }
 
 bool
 RNewArray::recover(JSContext *cx, SnapshotIterator &iter) const
 {
     RootedObject templateObject(cx, &iter.read().toObject());
     RootedValue result(cx);
-    RootedTypeObject type(cx);
+    RootedObjectGroup group(cx);
 
     // See CodeGenerator::visitNewArrayCallVM
-    if (!templateObject->hasSingletonType())
-        type = templateObject->type();
+    if (!templateObject->isSingleton())
+        group = templateObject->group();
 
-    JSObject *resultObject = NewDenseArray(cx, count_, type, allocatingBehaviour_);
+    JSObject *resultObject = NewDenseArray(cx, count_, group, allocatingBehaviour_);
     if (!resultObject)
         return false;
 
     result.setObject(*resultObject);
     iter.storeInstructionResult(result);
     return true;
 }
 
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -282,26 +282,26 @@ StringsEqual(JSContext *cx, HandleString
 }
 
 template bool StringsEqual<true>(JSContext *cx, HandleString lhs, HandleString rhs, bool *res);
 template bool StringsEqual<false>(JSContext *cx, HandleString lhs, HandleString rhs, bool *res);
 
 JSObject*
 NewInitObject(JSContext *cx, HandlePlainObject templateObject)
 {
-    NewObjectKind newKind = templateObject->hasSingletonType() ? SingletonObject : GenericObject;
-    if (!templateObject->hasLazyType() && templateObject->type()->shouldPreTenure())
+    NewObjectKind newKind = templateObject->isSingleton() ? SingletonObject : GenericObject;
+    if (!templateObject->hasLazyGroup() && templateObject->group()->shouldPreTenure())
         newKind = TenuredObject;
     RootedObject obj(cx, CopyInitializerObject(cx, templateObject, newKind));
 
     if (!obj)
         return nullptr;
 
-    if (!templateObject->hasSingletonType())
-        obj->setType(templateObject->type());
+    if (!templateObject->isSingleton())
+        obj->setGroup(templateObject->group());
 
     return obj;
 }
 
 bool
 ArraySpliceDense(JSContext *cx, HandleObject obj, uint32_t start, uint32_t deleteCount)
 {
     JS::AutoValueArray<4> argv(cx);
@@ -514,19 +514,19 @@ InterruptCheck(JSContext *cx)
 
 void *
 MallocWrapper(JSRuntime *rt, size_t nbytes)
 {
     return rt->pod_malloc<uint8_t>(nbytes);
 }
 
 JSObject *
-NewCallObject(JSContext *cx, HandleShape shape, HandleTypeObject type, uint32_t lexicalBegin)
+NewCallObject(JSContext *cx, HandleShape shape, HandleObjectGroup group, uint32_t lexicalBegin)
 {
-    JSObject *obj = CallObject::create(cx, shape, type, lexicalBegin);
+    JSObject *obj = CallObject::create(cx, shape, group, lexicalBegin);
     if (!obj)
         return nullptr;
 
     // The JIT creates call objects in the nursery, so elides barriers for
     // the initializing writes. The interpreter, however, may have allocated
     // the call object tenured, so barrier as needed before re-entering.
     if (!IsInsideNursery(obj))
         cx->runtime()->gc.storeBuffer.putWholeCellFromMainThread(obj);
@@ -908,36 +908,36 @@ NewArgumentsObject(JSContext *cx, Baseli
 JSObject *
 InitRestParameter(JSContext *cx, uint32_t length, Value *rest, HandleObject templateObj,
                   HandleObject objRes)
 {
     if (objRes) {
         Rooted<ArrayObject*> arrRes(cx, &objRes->as<ArrayObject>());
 
         MOZ_ASSERT(!arrRes->getDenseInitializedLength());
-        MOZ_ASSERT(arrRes->type() == templateObj->type());
+        MOZ_ASSERT(arrRes->group() == templateObj->group());
 
         // Fast path: we managed to allocate the array inline; initialize the
         // slots.
         if (length > 0) {
             if (!arrRes->ensureElements(cx, length))
                 return nullptr;
             arrRes->setDenseInitializedLength(length);
             arrRes->initDenseElements(0, rest, length);
             arrRes->setLengthInt32(length);
         }
         return arrRes;
     }
 
-    NewObjectKind newKind = templateObj->type()->shouldPreTenure()
+    NewObjectKind newKind = templateObj->group()->shouldPreTenure()
                             ? TenuredObject
                             : GenericObject;
     ArrayObject *arrRes = NewDenseCopiedArray(cx, length, rest, nullptr, newKind);
     if (arrRes)
-        arrRes->setType(templateObj->type());
+        arrRes->setGroup(templateObj->group());
     return arrRes;
 }
 
 bool
 HandleDebugTrap(JSContext *cx, BaselineFrame *frame, uint8_t *retAddr, bool *mustReturn)
 {
     *mustReturn = false;
 
@@ -1172,18 +1172,18 @@ AutoDetectInvalidation::setReturnOverrid
 void
 AssertValidObjectPtr(JSContext *cx, JSObject *obj)
 {
     // Check what we can, so that we'll hopefully assert/crash if we get a
     // bogus object (pointer).
     MOZ_ASSERT(obj->compartment() == cx->compartment());
     MOZ_ASSERT(obj->runtimeFromMainThread() == cx->runtime());
 
-    MOZ_ASSERT_IF(!obj->hasLazyType(),
-                  obj->type()->clasp() == obj->lastProperty()->getObjectClass());
+    MOZ_ASSERT_IF(!obj->hasLazyGroup(),
+                  obj->group()->clasp() == obj->lastProperty()->getObjectClass());
 
     if (obj->isTenured()) {
         MOZ_ASSERT(obj->isAligned());
         gc::AllocKind kind = obj->asTenured().getAllocKind();
         MOZ_ASSERT(kind >= js::gc::FINALIZE_OBJECT0 && kind <= js::gc::FINALIZE_OBJECT_LAST);
         MOZ_ASSERT(obj->asTenured().zone() == cx->zone());
     }
 }
@@ -1283,19 +1283,19 @@ MarkObjectFromIon(JSRuntime *rt, JSObjec
 
 void
 MarkShapeFromIon(JSRuntime *rt, Shape **shapep)
 {
     gc::MarkShapeUnbarriered(&rt->gc.marker, shapep, "write barrier");
 }
 
 void
-MarkTypeObjectFromIon(JSRuntime *rt, types::TypeObject **typep)
+MarkObjectGroupFromIon(JSRuntime *rt, types::ObjectGroup **groupp)
 {
-    gc::MarkTypeObjectUnbarriered(&rt->gc.marker, typep, "write barrier");
+    gc::MarkObjectGroupUnbarriered(&rt->gc.marker, groupp, "write barrier");
 }
 
 bool
 ThrowUninitializedLexical(JSContext *cx)
 {
     ScriptFrameIter iter(cx);
     RootedScript script(cx, iter.script());
     ReportUninitializedLexical(cx, script, iter.pc());
--- a/js/src/jit/VMFunctions.h
+++ b/js/src/jit/VMFunctions.h
@@ -339,18 +339,18 @@ template <> struct TypeToArgProperties<H
     static const uint32_t result = TypeToArgProperties<Value>::result | VMFunction::ByRef;
 };
 template <> struct TypeToArgProperties<MutableHandleValue> {
     static const uint32_t result = TypeToArgProperties<Value>::result | VMFunction::ByRef;
 };
 template <> struct TypeToArgProperties<HandleShape> {
     static const uint32_t result = TypeToArgProperties<Shape *>::result | VMFunction::ByRef;
 };
-template <> struct TypeToArgProperties<HandleTypeObject> {
-    static const uint32_t result = TypeToArgProperties<types::TypeObject *>::result | VMFunction::ByRef;
+template <> struct TypeToArgProperties<HandleObjectGroup> {
+    static const uint32_t result = TypeToArgProperties<types::ObjectGroup *>::result | VMFunction::ByRef;
 };
 
 // Convert argument type to whether or not it should be passed in a float
 // register on platforms that have them, like x64.
 template <class T> struct TypeToPassInFloatReg {
     static const uint32_t result = 0;
 };
 template <> struct TypeToPassInFloatReg<double> {
@@ -377,17 +377,17 @@ template <> struct TypeToRootType<Handle
     static const uint32_t result = VMFunction::RootValue;
 };
 template <> struct TypeToRootType<MutableHandleValue> {
     static const uint32_t result = VMFunction::RootValue;
 };
 template <> struct TypeToRootType<HandleShape> {
     static const uint32_t result = VMFunction::RootCell;
 };
-template <> struct TypeToRootType<HandleTypeObject> {
+template <> struct TypeToRootType<HandleObjectGroup> {
     static const uint32_t result = VMFunction::RootCell;
 };
 template <> struct TypeToRootType<HandleScript> {
     static const uint32_t result = VMFunction::RootCell;
 };
 template <> struct TypeToRootType<Handle<NativeObject *> > {
     static const uint32_t result = VMFunction::RootObject;
 };
@@ -669,17 +669,17 @@ bool CharCodeAt(JSContext *cx, HandleStr
 JSFlatString *StringFromCharCode(JSContext *cx, int32_t code);
 
 bool SetProperty(JSContext *cx, HandleObject obj, HandlePropertyName name, HandleValue value,
                  bool strict, jsbytecode *pc);
 
 bool InterruptCheck(JSContext *cx);
 
 void *MallocWrapper(JSRuntime *rt, size_t nbytes);
-JSObject *NewCallObject(JSContext *cx, HandleShape shape, HandleTypeObject type,
+JSObject *NewCallObject(JSContext *cx, HandleShape shape, HandleObjectGroup group,
                         uint32_t lexicalBegin);
 JSObject *NewSingletonCallObject(JSContext *cx, HandleShape shape, uint32_t lexicalBegin);
 JSObject *NewStringObject(JSContext *cx, HandleString str);
 
 bool OperatorIn(JSContext *cx, HandleValue key, HandleObject obj, bool *out);
 bool OperatorInI(JSContext *cx, uint32_t index, HandleObject obj, bool *out);
 
 bool GetIntrinsicValue(JSContext *cx, HandlePropertyName name, MutableHandleValue rval);
@@ -754,33 +754,33 @@ void AssertValidStringPtr(JSContext *cx,
 void AssertValidSymbolPtr(JSContext *cx, JS::Symbol *sym);
 void AssertValidValue(JSContext *cx, Value *v);
 #endif
 
 void MarkValueFromIon(JSRuntime *rt, Value *vp);
 void MarkStringFromIon(JSRuntime *rt, JSString **stringp);
 void MarkObjectFromIon(JSRuntime *rt, JSObject **objp);
 void MarkShapeFromIon(JSRuntime *rt, Shape **shapep);
-void MarkTypeObjectFromIon(JSRuntime *rt, types::TypeObject **typep);
+void MarkObjectGroupFromIon(JSRuntime *rt, types::ObjectGroup **groupp);
 
 // Helper for generatePreBarrier.
 inline void *
 IonMarkFunction(MIRType type)
 {
     switch (type) {
       case MIRType_Value:
         return JS_FUNC_TO_DATA_PTR(void *, MarkValueFromIon);
       case MIRType_String:
         return JS_FUNC_TO_DATA_PTR(void *, MarkStringFromIon);
       case MIRType_Object:
         return JS_FUNC_TO_DATA_PTR(void *, MarkObjectFromIon);
       case MIRType_Shape:
         return JS_FUNC_TO_DATA_PTR(void *, MarkShapeFromIon);
-      case MIRType_TypeObject:
-        return JS_FUNC_TO_DATA_PTR(void *, MarkTypeObjectFromIon);
+      case MIRType_ObjectGroup:
+        return JS_FUNC_TO_DATA_PTR(void *, MarkObjectGroupFromIon);
       default: MOZ_CRASH();
     }
 }
 
 bool ObjectIsCallable(JSObject *obj);
 
 bool ThrowUninitializedLexical(JSContext *cx);
 
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -1649,23 +1649,23 @@ CodeGeneratorARM::visitGuardShape(LGuard
 
     masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfShape())), tmp);
     masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->shape()));
 
     bailoutIf(Assembler::NotEqual, guard->snapshot());
 }
 
 void
-CodeGeneratorARM::visitGuardObjectType(LGuardObjectType *guard)
+CodeGeneratorARM::visitGuardObjectGroup(LGuardObjectGroup *guard)
 {
     Register obj = ToRegister(guard->input());
     Register tmp = ToRegister(guard->tempInt());
 
-    masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfType())), tmp);
-    masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->typeObject()));
+    masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfGroup())), tmp);
+    masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->group()));
 
     Assembler::Condition cond =
         guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
     bailoutIf(cond, guard->snapshot());
 }
 
 void
 CodeGeneratorARM::visitGuardClass(LGuardClass *guard)
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -190,17 +190,17 @@ class CodeGeneratorARM : public CodeGene
     void visitBox(LBox *box);
     void visitBoxFloatingPoint(LBoxFloatingPoint *box);
     void visitUnbox(LUnbox *unbox);
     void visitValue(LValue *value);
     void visitDouble(LDouble *ins);
     void visitFloat32(LFloat32 *ins);
 
     void visitGuardShape(LGuardShape *guard);
-    void visitGuardObjectType(LGuardObjectType *guard);
+    void visitGuardObjectGroup(LGuardObjectGroup *guard);
     void visitGuardClass(LGuardClass *guard);
 
     void visitNegI(LNegI *lir);
     void visitNegD(LNegD *lir);
     void visitNegF(LNegF *lir);
     void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins);
     void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins);
     void visitAsmJSCall(LAsmJSCall *ins);
--- a/js/src/jit/arm/LIR-arm.h
+++ b/js/src/jit/arm/LIR-arm.h
@@ -375,27 +375,27 @@ class LGuardShape : public LInstructionH
     const MGuardShape *mir() const {
         return mir_->toGuardShape();
     }
     const LDefinition *tempInt() {
         return getTemp(0);
     }
 };
 
-class LGuardObjectType : public LInstructionHelper<0, 1, 1>
+class LGuardObjectGroup : public LInstructionHelper<0, 1, 1>
 {
   public:
-    LIR_HEADER(GuardObjectType);
+    LIR_HEADER(GuardObjectGroup);
 
-    LGuardObjectType(const LAllocation &in, const LDefinition &temp) {
+    LGuardObjectGroup(const LAllocation &in, const LDefinition &temp) {
         setOperand(0, in);
         setTemp(0, temp);
     }
-    const MGuardObjectType *mir() const {
-        return mir_->toGuardObjectType();
+    const MGuardObjectGroup *mir() const {
+        return mir_->toGuardObjectGroup();
     }
     const LDefinition *tempInt() {
         return getTemp(0);
     }
 };
 
 class LMulI : public LBinaryMath<0>
 {
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -382,22 +382,22 @@ LIRGeneratorARM::visitGuardShape(MGuardS
     LDefinition tempObj = temp(LDefinition::OBJECT);
     LGuardShape *guard = new(alloc()) LGuardShape(useRegister(ins->obj()), tempObj);
     assignSnapshot(guard, ins->bailoutKind());
     add(guard, ins);
     redefine(ins, ins->obj());
 }
 
 void
-LIRGeneratorARM::visitGuardObjectType(MGuardObjectType *ins)
+LIRGeneratorARM::visitGuardObjectGroup(MGuardObjectGroup *ins)
 {
     MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
 
     LDefinition tempObj = temp(LDefinition::OBJECT);
-    LGuardObjectType *guard = new(alloc()) LGuardObjectType(useRegister(ins->obj()), tempObj);
+    LGuardObjectGroup *guard = new(alloc()) LGuardObjectGroup(useRegister(ins->obj()), tempObj);
     assignSnapshot(guard, ins->bailoutKind());
     add(guard, ins);
     redefine(ins, ins->obj());
 }
 
 void
 LIRGeneratorARM::lowerUrshD(MUrsh *mir)
 {
--- a/js/src/jit/arm/Lowering-arm.h
+++ b/js/src/jit/arm/Lowering-arm.h
@@ -86,17 +86,17 @@ class LIRGeneratorARM : public LIRGenera
 
   public:
     void visitConstant(MConstant *ins);
     void visitBox(MBox *box);
     void visitUnbox(MUnbox *unbox);
     void visitReturn(MReturn *ret);
     void lowerPhi(MPhi *phi);
     void visitGuardShape(MGuardShape *ins);
-    void visitGuardObjectType(MGuardObjectType *ins);
+    void visitGuardObjectGroup(MGuardObjectGroup *ins);
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins);
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins);
     void visitAsmJSLoadHeap(MAsmJSLoadHeap *ins);
     void visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
     void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
     void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins);
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins);
--- a/js/src/jit/mips/CodeGenerator-mips.cpp
+++ b/js/src/jit/mips/CodeGenerator-mips.cpp
@@ -1739,26 +1739,26 @@ CodeGeneratorMIPS::visitGuardShape(LGuar
     Register tmp = ToRegister(guard->tempInt());
 
     masm.loadPtr(Address(obj, JSObject::offsetOfShape()), tmp);
     bailoutCmpPtr(Assembler::NotEqual, tmp, ImmGCPtr(guard->mir()->shape()),
                   guard->snapshot());
 }
 
 void
-CodeGeneratorMIPS::visitGuardObjectType(LGuardObjectType *guard)
+CodeGeneratorMIPS::visitGuardObjectGroup(LGuardObjectGroup *guard)
 {
     Register obj = ToRegister(guard->input());
     Register tmp = ToRegister(guard->tempInt());
 
-    masm.loadPtr(Address(obj, JSObject::offsetOfType()), tmp);
+    masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), tmp);
     Assembler::Condition cond = guard->mir()->bailOnEquality()
                                 ? Assembler::Equal
                                 : Assembler::NotEqual;
-    bailoutCmpPtr(cond, tmp, ImmGCPtr(guard->mir()->typeObject()), guard->snapshot());
+    bailoutCmpPtr(cond, tmp, ImmGCPtr(guard->mir()->group()), guard->snapshot());
 }
 
 void
 CodeGeneratorMIPS::visitGuardClass(LGuardClass *guard)
 {
     Register obj = ToRegister(guard->input());
     Register tmp = ToRegister(guard->tempInt());
 
--- a/js/src/jit/mips/CodeGenerator-mips.h
+++ b/js/src/jit/mips/CodeGenerator-mips.h
@@ -239,17 +239,17 @@ class CodeGeneratorMIPS : public CodeGen
     void visitBox(LBox *box);
     void visitBoxFloatingPoint(LBoxFloatingPoint *box);
     void visitUnbox(LUnbox *unbox);
     void visitValue(LValue *value);
     void visitDouble(LDouble *ins);
     void visitFloat32(LFloat32 *ins);
 
     void visitGuardShape(LGuardShape *guard);
-    void visitGuardObjectType(LGuardObjectType *guard);
+    void visitGuardObjectGroup(LGuardObjectGroup *guard);
     void visitGuardClass(LGuardClass *guard);
 
     void visitNegI(LNegI *lir);
     void visitNegD(LNegD *lir);
     void visitNegF(LNegF *lir);
     void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins);
     void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins);
     void visitAsmJSCall(LAsmJSCall *ins);
--- a/js/src/jit/mips/LIR-mips.h
+++ b/js/src/jit/mips/LIR-mips.h
@@ -323,27 +323,27 @@ class LGuardShape : public LInstructionH
     const MGuardShape *mir() const {
         return mir_->toGuardShape();
     }
     const LDefinition *tempInt() {
         return getTemp(0);
     }
 };
 
-class LGuardObjectType : public LInstructionHelper<0, 1, 1>
+class LGuardObjectGroup : public LInstructionHelper<0, 1, 1>
 {
   public:
-    LIR_HEADER(GuardObjectType);
+    LIR_HEADER(GuardObjectGroup);
 
-    LGuardObjectType(const LAllocation &in, const LDefinition &temp) {
+    LGuardObjectGroup(const LAllocation &in, const LDefinition &temp) {
         setOperand(0, in);
         setTemp(0, temp);
     }
-    const MGuardObjectType *mir() const {
-        return mir_->toGuardObjectType();
+    const MGuardObjectGroup *mir() const {
+        return mir_->toGuardObjectGroup();
     }
     const LDefinition *tempInt() {
         return getTemp(0);
     }
 };
 
 class LMulI : public LBinaryMath<0>
 {
--- a/js/src/jit/mips/Lowering-mips.cpp
+++ b/js/src/jit/mips/Lowering-mips.cpp
@@ -373,22 +373,22 @@ LIRGeneratorMIPS::visitGuardShape(MGuard
     LDefinition tempObj = temp(LDefinition::OBJECT);
     LGuardShape *guard = new(alloc()) LGuardShape(useRegister(ins->obj()), tempObj);
     assignSnapshot(guard, ins->bailoutKind());
     add(guard, ins);
     redefine(ins, ins->obj());
 }
 
 void
-LIRGeneratorMIPS::visitGuardObjectType(MGuardObjectType *ins)
+LIRGeneratorMIPS::visitGuardObjectGroup(MGuardObjectGroup *ins)
 {
     MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
 
     LDefinition tempObj = temp(LDefinition::OBJECT);
-    LGuardObjectType *guard = new(alloc()) LGuardObjectType(useRegister(ins->obj()), tempObj);
+    LGuardObjectGroup *guard = new(alloc()) LGuardObjectGroup(useRegister(ins->obj()), tempObj);
     assignSnapshot(guard, ins->bailoutKind());
     add(guard, ins);
     redefine(ins, ins->obj());
 }
 
 void
 LIRGeneratorMIPS::lowerUrshD(MUrsh *mir)
 {
--- a/js/src/jit/mips/Lowering-mips.h
+++ b/js/src/jit/mips/Lowering-mips.h
@@ -86,17 +86,17 @@ class LIRGeneratorMIPS : public LIRGener
 
   public:
     void visitConstant(MConstant *ins);
     void visitBox(MBox *box);
     void visitUnbox(MUnbox *unbox);
     void visitReturn(MReturn *ret);
     void lowerPhi(MPhi *phi);
     void visitGuardShape(MGuardShape *ins);
-    void visitGuardObjectType(MGuardObjectType *ins);
+    void visitGuardObjectGroup(MGuardObjectGroup *ins);
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins);
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins);
     void visitAsmJSLoadHeap(MAsmJSLoadHeap *ins);
     void visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
     void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins);
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins);
     void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins);
--- a/js/src/jit/none/LIR-none.h
+++ b/js/src/jit/none/LIR-none.h
@@ -39,15 +39,15 @@ class LTableSwitchV : public LInstructio
     const LDefinition *tempInt() { MOZ_CRASH(); }
     const LDefinition *tempFloat() { MOZ_CRASH(); }
     const LDefinition *tempPointer() { MOZ_CRASH(); }
 
     static const size_t InputValue = 0;
 };
 
 class LGuardShape : public LInstruction {};
-class LGuardObjectType : public LInstruction {};
+class LGuardObjectGroup : public LInstruction {};
 class LMulI : public LInstruction {};
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_none_LIR_none_h */
--- a/js/src/jit/none/Lowering-none.h
+++ b/js/src/jit/none/Lowering-none.h
@@ -65,17 +65,17 @@ class LIRGeneratorNone : public LIRGener
     void lowerUDiv(MDiv *) { MOZ_CRASH(); }
     void lowerUMod(MMod *) { MOZ_CRASH(); }
     void visitBox(MBox *box) { MOZ_CRASH(); }
     void visitUnbox(MUnbox *unbox) { MOZ_CRASH(); }
     void visitReturn(MReturn *ret) { MOZ_CRASH(); }
     void visitPowHalf(MPowHalf *) { MOZ_CRASH(); }
     void visitAsmJSNeg(MAsmJSNeg *) { MOZ_CRASH(); }
     void visitGuardShape(MGuardShape *ins) { MOZ_CRASH(); }
-    void visitGuardObjectType(MGuardObjectType *ins) { MOZ_CRASH(); }
+    void visitGuardObjectGroup(MGuardObjectGroup *ins) { MOZ_CRASH(); }
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins) { MOZ_CRASH(); }
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins) { MOZ_CRASH(); }
     void visitAsmJSLoadHeap(MAsmJSLoadHeap *ins) { MOZ_CRASH(); }
     void visitAsmJSStoreHeap(MAsmJSStoreHeap *ins) { MOZ_CRASH(); }
     void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins) { MOZ_CRASH(); }
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins) { MOZ_CRASH(); }
     void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins) { MOZ_CRASH(); }
     void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins) { MOZ_CRASH(); }
--- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp
@@ -1973,34 +1973,34 @@ CodeGeneratorX86Shared::visitGuardShape(
 {
     Register obj = ToRegister(guard->input());
     masm.cmpPtr(Operand(obj, JSObject::offsetOfShape()), ImmGCPtr(guard->mir()->shape()));
 
     bailoutIf(Assembler::NotEqual, guard->snapshot());
 }
 
 void
-CodeGeneratorX86Shared::visitGuardObjectType(LGuardObjectType *guard)
+CodeGeneratorX86Shared::visitGuardObjectGroup(LGuardObjectGroup *guard)
 {
     Register obj = ToRegister(guard->input());
-    masm.cmpPtr(Operand(obj, JSObject::offsetOfType()), ImmGCPtr(guard->mir()->typeObject()));
+    masm.cmpPtr(Operand(obj, JSObject::offsetOfGroup()), ImmGCPtr(guard->mir()->group()));
 
     Assembler::Condition cond =
         guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
     bailoutIf(cond, guard->snapshot());
 }
 
 void
 CodeGeneratorX86Shared::visitGuardClass(LGuardClass *guard)
 {
     Register obj = ToRegister(guard->input());
     Register tmp = ToRegister(guard->tempInt());
 
-    masm.loadPtr(Address(obj, JSObject::offsetOfType()), tmp);
-    masm.cmpPtr(Operand(tmp, types::TypeObject::offsetOfClasp()), ImmPtr(guard->mir()->getClass()));
+    masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), tmp);
+    masm.cmpPtr(Operand(tmp, types::ObjectGroup::offsetOfClasp()), ImmPtr(guard->mir()->getClass()));
     bailoutIf(Assembler::NotEqual, guard->snapshot());
 }
 
 void
 CodeGeneratorX86Shared::visitEffectiveAddress(LEffectiveAddress *ins)
 {
     const MEffectiveAddress *mir = ins->mir();
     Register base = ToRegister(ins->base());
--- a/js/src/jit/shared/CodeGenerator-x86-shared.h
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.h
@@ -193,17 +193,17 @@ class CodeGeneratorX86Shared : public Co
     virtual void visitMathF(LMathF *math);
     virtual void visitFloor(LFloor *lir);
     virtual void visitFloorF(LFloorF *lir);
     virtual void visitCeil(LCeil *lir);
     virtual void visitCeilF(LCeilF *lir);
     virtual void visitRound(LRound *lir);
     virtual void visitRoundF(LRoundF *lir);
     virtual void visitGuardShape(LGuardShape *guard);
-    virtual void visitGuardObjectType(LGuardObjectType *guard);
+    virtual void visitGuardObjectGroup(LGuardObjectGroup *guard);
     virtual void visitGuardClass(LGuardClass *guard);
     virtual void visitEffectiveAddress(LEffectiveAddress *ins);
     virtual void visitUDivOrMod(LUDivOrMod *ins);
     virtual void visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
     virtual void visitMemoryBarrier(LMemoryBarrier *ins);
 
     void visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds *ool);
 
--- a/js/src/jit/shared/LIR-x86-shared.h
+++ b/js/src/jit/shared/LIR-x86-shared.h
@@ -277,26 +277,26 @@ class LGuardShape : public LInstructionH
     explicit LGuardShape(const LAllocation &in) {
         setOperand(0, in);
     }
     const MGuardShape *mir() const {
         return mir_->toGuardShape();
     }
 };
 
-class LGuardObjectType : public LInstructionHelper<0, 1, 0>
+class LGuardObjectGroup : public LInstructionHelper<0, 1, 0>
 {
   public:
-    LIR_HEADER(GuardObjectType)
+    LIR_HEADER(GuardObjectGroup)
 
-    explicit LGuardObjectType(const LAllocation &in) {
+    explicit LGuardObjectGroup(const LAllocation &in) {
         setOperand(0, in);
     }
-    const MGuardObjectType *mir() const {
-        return mir_->toGuardObjectType();
+    const MGuardObjectGroup *mir() const {
+        return mir_->toGuardObjectGroup();
     }
 };
 
 class LMulI : public LBinaryMath<0, 1>
 {
   public:
     LIR_HEADER(MulI)
 
--- a/js/src/jit/shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/shared/Lowering-x86-shared.cpp
@@ -39,21 +39,21 @@ LIRGeneratorX86Shared::visitGuardShape(M
 
     LGuardShape *guard = new(alloc()) LGuardShape(useRegis