Bug 1384808 - Implement a linear cache for searching the shape lineage r=djvj
authorDenis Palmeiro <dpalmeiro@mozilla.com>
Sun, 03 Feb 2019 00:03:35 +0000
changeset 456671 a8bb75678922337675d05ec4c7261ab34dfb995b
parent 456670 3d38215a0b16586db0207b4fb9a4b2553fb1ba03
child 456672 1b1ecaa655a3e0a8481c538c95bb996672c58de0
push id111689
push userccoroiu@mozilla.com
push dateMon, 04 Feb 2019 21:54:26 +0000
treeherdermozilla-inbound@840cee0ce7b0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdjvj
bugs1384808
milestone67.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1384808 - Implement a linear cache for searching the shape lineage r=djvj Linearly searching the shape lineage can be expensive. It is going to cause branch misses and cache misses since we are traversing a linked list. Since this is done frequently enough, it may be worth while to "cache" results from the linear search. This revision hopes to lazily allocate a small linear cache after the first linear search on a shape. The results from each linear search afterwards will be placed into the cache. If the jsid that is being searched for is frequently looked up then we obtain a "cache hit" from a quick search in the cache. Otherwise, we fall back to a linear search and append the new entry to the cache. Once the cache is full, it will transform into a shape hash table like the previous approach. Differential Revision: https://phabricator.services.mozilla.com/D12155
js/src/gc/GC.cpp
js/src/gc/GenerateStatsPhases.py
js/src/gc/Marking.cpp
js/src/gc/Zone.cpp
js/src/gc/Zone.h
js/src/vm/JSObject.cpp
js/src/vm/NativeObject.h
js/src/vm/Shape-inl.h
js/src/vm/Shape.cpp
js/src/vm/Shape.h
js/src/vm/TypeInference.cpp
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -4233,25 +4233,25 @@ static void RelazifyFunctionsForShrinkin
     if (zone->isSelfHostingZone()) {
       continue;
     }
     RelazifyFunctions(zone, AllocKind::FUNCTION);
     RelazifyFunctions(zone, AllocKind::FUNCTION_EXTENDED);
   }
 }
 
-static void PurgeShapeTablesForShrinkingGC(JSRuntime* rt) {
-  gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::PURGE_SHAPE_TABLES);
+static void PurgeShapeCachesForShrinkingGC(JSRuntime* rt) {
+  gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::PURGE_SHAPE_CACHES);
   for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
-    if (!CanRelocateZone(zone) || zone->keepShapeTables()) {
+    if (!CanRelocateZone(zone) || zone->keepShapeCaches()) {
       continue;
     }
     for (auto baseShape = zone->cellIter<BaseShape>(); !baseShape.done();
          baseShape.next()) {
-      baseShape->maybePurgeTable();
+      baseShape->maybePurgeCache();
     }
   }
 }
 
 static void UnmarkCollectedZones(GCParallelTask* task) {
   JSRuntime* rt = task->runtime();
   for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
     /* Unmark everything in the zones being collected. */
@@ -4332,17 +4332,17 @@ bool GCRuntime::beginMarkPhase(JS::GCRea
      * functions with JIT code) and before the actual mark phase, so that
      * the current GC can collect the JSScripts we're unlinking here.  We do
      * this only when we're performing a shrinking GC, as too much
      * relazification can cause performance issues when we have to reparse
      * the same functions over and over.
      */
     if (invocationKind == GC_SHRINK) {
       RelazifyFunctionsForShrinkingGC(rt);
-      PurgeShapeTablesForShrinkingGC(rt);
+      PurgeShapeCachesForShrinkingGC(rt);
     }
 
     /*
      * We must purge the runtime at the beginning of an incremental GC. The
      * danger if we purge later is that the snapshot invariant of
      * incremental GC will be broken, as follows. If some object is
      * reachable only through some cache (say the dtoaCache) then it will
      * not be part of the snapshot.  If we purge after root marking, then
@@ -8451,19 +8451,18 @@ void js::gc::CheckHashTablesAfterMovingG
   for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
     zone->checkUniqueIdTableAfterMovingGC();
     zone->checkInitialShapesTableAfterMovingGC();
     zone->checkBaseShapeTableAfterMovingGC();
 
     JS::AutoCheckCannotGC nogc;
     for (auto baseShape = zone->cellIter<BaseShape>(); !baseShape.done();
          baseShape.next()) {
-      if (ShapeTable* table = baseShape->maybeTable(nogc)) {
-        table->checkAfterMovingGC();
-      }
+      ShapeCachePtr p = baseShape->getCache(nogc);
+      p.checkAfterMovingGC();
     }
   }
 
   for (CompartmentsIter c(rt); !c.done(); c.next()) {
     c->checkWrapperMapAfterMovingGC();
 
     for (RealmsInCompartmentIter r(c); !r.done(); r.next()) {
       r->checkObjectGroupTablesAfterMovingGC();
--- a/js/src/gc/GenerateStatsPhases.py
+++ b/js/src/gc/GenerateStatsPhases.py
@@ -84,17 +84,17 @@ PhaseKindGraphRoots = [
     ]),
     PhaseKind("WAIT_BACKGROUND_THREAD", "Wait Background Thread", 2),
     PhaseKind("PREPARE", "Prepare For Collection", 69, [
         PhaseKind("UNMARK", "Unmark", 7),
         PhaseKind("BUFFER_GRAY_ROOTS", "Buffer Gray Roots", 49),
         PhaseKind("MARK_DISCARD_CODE", "Mark Discard Code", 3),
         PhaseKind("RELAZIFY_FUNCTIONS", "Relazify Functions", 4),
         PhaseKind("PURGE", "Purge", 5),
-        PhaseKind("PURGE_SHAPE_TABLES", "Purge ShapeTables", 60),
+        PhaseKind("PURGE_SHAPE_CACHES", "Purge ShapeCaches", 60),
         JoinParallelTasksPhaseKind
     ]),
     PhaseKind("MARK", "Mark", 6, [
         MarkRootsPhaseKind,
         UnmarkGrayPhaseKind,
         PhaseKind("MARK_DELAYED", "Mark Delayed", 8, [
             UnmarkGrayPhaseKind,
         ]),
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -1091,18 +1091,18 @@ inline void js::GCMarker::eagerlyMarkChi
 
   do {
     // Special case: if a base shape has a shape table then all its pointers
     // must point to this shape or an anscestor.  Since these pointers will
     // be traced by this loop they do not need to be traced here as well.
     BaseShape* base = shape->base();
     CheckTraversedEdge(shape, base);
     if (mark(base)) {
-      MOZ_ASSERT(base->canSkipMarkingShapeTable(shape));
-      base->traceChildrenSkipShapeTable(this);
+      MOZ_ASSERT(base->canSkipMarkingShapeCache(shape));
+      base->traceChildrenSkipShapeCache(this);
     }
 
     traverseEdge(shape, shape->propidRef().get());
 
     // When triggered between slices on behalf of a barrier, these
     // objects may reside in the nursery, so require an extra check.
     // FIXME: Bug 1157967 - remove the isTenured checks.
     if (shape->hasGetterObject() && shape->getterObject()->isTenured()) {
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -65,17 +65,17 @@ JS::Zone::Zone(JSRuntime* rt)
       isSystem(this, false),
 #ifdef DEBUG
       gcLastSweepGroupIndex(0),
 #endif
       jitZone_(this, nullptr),
       gcScheduled_(false),
       gcScheduledSaved_(false),
       gcPreserveCode_(false),
-      keepShapeTables_(this, false),
+      keepShapeCaches_(this, false),
       listNext_(NotOnList) {
   /* Ensure that there are no vtables to mess us up here. */
   MOZ_ASSERT(reinterpret_cast<JS::shadow::Zone*>(this) ==
              static_cast<JS::shadow::Zone*>(this));
 
   AutoLockGC lock(rt);
   threshold.updateAfterGC(8192, GC_NORMAL, rt->gc.tunables,
                           rt->gc.schedulingState, lock);
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -198,17 +198,17 @@ class Zone : public JS::shadow::Zone,
       js::FreeOp* fop,
       ShouldDiscardBaselineCode discardBaselineCode = DiscardBaselineCode,
       ShouldReleaseTypes releaseTypes = KeepTypes);
 
   void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
                               size_t* typePool, size_t* regexpZone,
                               size_t* jitZone, size_t* baselineStubsOptimized,
                               size_t* cachedCFG, size_t* uniqueIdMap,
-                              size_t* shapeTables, size_t* atomsMarkBitmaps,
+                              size_t* shapeCaches, size_t* atomsMarkBitmaps,
                               size_t* compartmentObjects,
                               size_t* crossCompartmentWrappersTables,
                               size_t* compartmentsPrivateData);
 
   // Iterate over all cells in the zone. See the definition of ZoneCellIter
   // in gc/GC-inl.h for the possible arguments and documentation.
   template <typename T, typename... Args>
   js::gc::ZoneCellIter<T> cellIter(Args&&... args) {
@@ -640,18 +640,18 @@ class Zone : public JS::shadow::Zone,
   // off-thread zone into the target zone.
   void adoptUniqueIds(JS::Zone* source);
 
 #ifdef JSGC_HASH_TABLE_CHECKS
   // Assert that the UniqueId table has been redirected successfully.
   void checkUniqueIdTableAfterMovingGC();
 #endif
 
-  bool keepShapeTables() const { return keepShapeTables_; }
-  void setKeepShapeTables(bool b) { keepShapeTables_ = b; }
+  bool keepShapeCaches() const { return keepShapeCaches_; }
+  void setKeepShapeCaches(bool b) { keepShapeCaches_ = b; }
 
   // Delete an empty compartment after its contents have been merged.
   void deleteEmptyCompartment(JS::Compartment* comp);
 
   // Non-zero if the storage underlying any typed object in this zone might
   // be detached. This is stored in Zone because IC stubs bake in a pointer
   // to this field and Baseline IC code is shared across realms within a
   // Zone. Furthermore, it's not entirely clear if this flag is ever set to
@@ -659,17 +659,17 @@ class Zone : public JS::shadow::Zone,
   uint32_t detachedTypedObjects = 0;
 
  private:
   js::ZoneOrGCTaskData<js::jit::JitZone*> jitZone_;
 
   js::MainThreadData<bool> gcScheduled_;
   js::MainThreadData<bool> gcScheduledSaved_;
   js::MainThreadData<bool> gcPreserveCode_;
-  js::ZoneData<bool> keepShapeTables_;
+  js::ZoneData<bool> keepShapeCaches_;
 
   // Allow zones to be linked into a list
   friend class js::gc::ZoneList;
   static Zone* const NotOnList;
   js::MainThreadOrGCTaskData<Zone*> listNext_;
   bool isOnList() const;
   Zone* nextZone() const;
 
--- a/js/src/vm/JSObject.cpp
+++ b/js/src/vm/JSObject.cpp
@@ -3649,16 +3649,19 @@ void JSObject::dump(js::GenericPrinter& 
       obj->isNative() ? &obj->as<NativeObject>() : nullptr;
   if (nobj) {
     if (nobj->inDictionaryMode()) {
       out.put(" inDictionaryMode");
     }
     if (nobj->hasShapeTable()) {
       out.put(" hasShapeTable");
     }
+    if (nobj->hasShapeIC()) {
+      out.put(" hasShapeCache");
+    }
     if (nobj->hadElementsAccess()) {
       out.put(" had_elements_access");
     }
     if (nobj->isIndexed()) {
       out.put(" indexed");
     }
   } else {
     out.put(" not_native\n");
--- a/js/src/vm/NativeObject.h
+++ b/js/src/vm/NativeObject.h
@@ -501,16 +501,18 @@ class NativeObject : public ShapedObject
     MOZ_ASSERT(shape());
     return shape();
   }
 
   uint32_t propertyCount() const { return lastProperty()->entryCount(); }
 
   bool hasShapeTable() const { return lastProperty()->hasTable(); }
 
+  bool hasShapeIC() const { return lastProperty()->hasIC(); }
+
   HeapSlotArray getDenseElements() {
     return HeapSlotArray(elements_, !getElementsHeader()->isCopyOnWrite());
   }
   HeapSlotArray getDenseElementsAllowCopyOnWrite() {
     // Backdoor allowing direct access to copy on write elements.
     return HeapSlotArray(elements_, true);
   }
   const Value& getDenseElement(uint32_t idx) const {
@@ -873,17 +875,17 @@ class NativeObject : public ShapedObject
       JSContext* cx, HandleNativeObject obj, HandleShape parent,
       MutableHandle<StackShape> child);
   static MOZ_ALWAYS_INLINE Shape* getChildAccessorProperty(
       JSContext* cx, HandleNativeObject obj, HandleShape parent,
       MutableHandle<StackShape> child);
 
   static MOZ_ALWAYS_INLINE bool maybeConvertToOrGrowDictionaryForAdd(
       JSContext* cx, HandleNativeObject obj, HandleId id, ShapeTable** table,
-      ShapeTable::Entry** entry, const AutoKeepShapeTables& keep);
+      ShapeTable::Entry** entry, const AutoKeepShapeCaches& keep);
 
   static bool maybeToDictionaryModeForPut(JSContext* cx, HandleNativeObject obj,
                                           MutableHandleShape shape);
 
  public:
   /* Add a property whose id is not yet in this scope. */
   static MOZ_ALWAYS_INLINE Shape* addDataProperty(JSContext* cx,
                                                   HandleNativeObject obj,
@@ -928,22 +930,22 @@ class NativeObject : public ShapedObject
    * Notes:
    * 1. getter and setter must be normalized based on flags (see jsscope.cpp).
    * 2. Checks for non-extensibility must be done by callers.
    */
   static Shape* addDataPropertyInternal(JSContext* cx, HandleNativeObject obj,
                                         HandleId id, uint32_t slot,
                                         unsigned attrs, ShapeTable* table,
                                         ShapeTable::Entry* entry,
-                                        const AutoKeepShapeTables& keep);
+                                        const AutoKeepShapeCaches& keep);
 
   static Shape* addAccessorPropertyInternal(
       JSContext* cx, HandleNativeObject obj, HandleId id, JSGetterOp getter,
       JSSetterOp setter, unsigned attrs, ShapeTable* table,
-      ShapeTable::Entry* entry, const AutoKeepShapeTables& keep);
+      ShapeTable::Entry* entry, const AutoKeepShapeCaches& keep);
 
   static MOZ_MUST_USE bool fillInAfterSwap(JSContext* cx,
                                            HandleNativeObject obj,
                                            const AutoValueVector& values,
                                            void* priv);
 
  public:
   // Return true if this object has been converted from shared-immutable
--- a/js/src/vm/Shape-inl.h
+++ b/js/src/vm/Shape-inl.h
@@ -17,52 +17,52 @@
 #include "vm/TypedArrayObject.h"
 
 #include "gc/Marking-inl.h"
 #include "vm/JSAtom-inl.h"
 #include "vm/JSContext-inl.h"
 
 namespace js {
 
-inline AutoKeepShapeTables::AutoKeepShapeTables(JSContext* cx)
-    : cx_(cx), prev_(cx->zone()->keepShapeTables()) {
-  cx->zone()->setKeepShapeTables(true);
+inline AutoKeepShapeCaches::AutoKeepShapeCaches(JSContext* cx)
+    : cx_(cx), prev_(cx->zone()->keepShapeCaches()) {
+  cx->zone()->setKeepShapeCaches(true);
 }
 
-inline AutoKeepShapeTables::~AutoKeepShapeTables() {
-  cx_->zone()->setKeepShapeTables(prev_);
+inline AutoKeepShapeCaches::~AutoKeepShapeCaches() {
+  cx_->zone()->setKeepShapeCaches(prev_);
 }
 
 inline StackBaseShape::StackBaseShape(const Class* clasp, uint32_t objectFlags)
     : flags(objectFlags), clasp(clasp) {}
 
 MOZ_ALWAYS_INLINE Shape* Shape::search(JSContext* cx, jsid id) {
   return search(cx, this, id);
 }
 
-MOZ_ALWAYS_INLINE bool Shape::maybeCreateTableForLookup(JSContext* cx) {
-  if (hasTable()) {
+MOZ_ALWAYS_INLINE bool Shape::maybeCreateCacheForLookup(JSContext* cx) {
+  if (hasTable() || hasIC()) {
     return true;
   }
 
   if (!inDictionary() && numLinearSearches() < LINEAR_SEARCHES_MAX) {
     incrementNumLinearSearches();
     return true;
   }
 
   if (!isBigEnoughForAShapeTable()) {
     return true;
   }
 
-  return Shape::hashify(cx, this);
+  return Shape::cachify(cx, this);
 }
 
 template <MaybeAdding Adding>
 /* static */ inline bool Shape::search(JSContext* cx, Shape* start, jsid id,
-                                       const AutoKeepShapeTables& keep,
+                                       const AutoKeepShapeCaches& keep,
                                        Shape** pshape, ShapeTable** ptable,
                                        ShapeTable::Entry** pentry) {
   if (start->inDictionary()) {
     ShapeTable* table = start->ensureTableForDictionary(cx, keep);
     if (!table) {
       return false;
     }
     *ptable = table;
@@ -75,28 +75,41 @@ template <MaybeAdding Adding>
   *pentry = nullptr;
   *pshape = Shape::search<Adding>(cx, start, id);
   return true;
 }
 
 template <MaybeAdding Adding>
 /* static */ MOZ_ALWAYS_INLINE Shape* Shape::search(JSContext* cx, Shape* start,
                                                     jsid id) {
-  if (start->maybeCreateTableForLookup(cx)) {
+  Shape* foundShape = nullptr;
+  if (start->maybeCreateCacheForLookup(cx)) {
     JS::AutoCheckCannotGC nogc;
-    if (ShapeTable* table = start->maybeTable(nogc)) {
-      ShapeTable::Entry& entry = table->search<Adding>(id, nogc);
-      return entry.shape();
+    ShapeCachePtr cache = start->getCache(nogc);
+    if (cache.search<Adding>(id, start, &foundShape)) {
+      return foundShape;
     }
   } else {
     // Just do a linear search.
     cx->recoverFromOutOfMemory();
   }
 
-  return start->searchLinear(id);
+  foundShape = start->searchLinear(id);
+  if (start->hasIC()) {
+    JS::AutoCheckCannotGC nogc;
+    if (!start->appendShapeToIC(id, foundShape, nogc)) {
+      // Failure indicates that the cache is full, which means we missed
+      // the cache ShapeIC::MAX_SIZE times. This indicates the cache is no
+      // longer useful, so convert it into a ShapeTable.
+      if (!Shape::hashify(cx, start)) {
+        cx->recoverFromOutOfMemory();
+      }
+    }
+  }
+  return foundShape;
 }
 
 inline Shape* Shape::new_(JSContext* cx, Handle<StackShape> other,
                           uint32_t nfixed) {
   Shape* shape = other.isAccessorShape() ? js::Allocate<AccessorShape>(cx)
                                          : js::Allocate<Shape>(cx);
   if (!shape) {
     ReportOutOfMemory(cx);
@@ -338,52 +351,53 @@ MOZ_ALWAYS_INLINE ShapeTable::Entry& Sha
 #endif
   }
 
   MOZ_CRASH("Shape::search failed to find an expected entry.");
 }
 
 template <MaybeAdding Adding>
 MOZ_ALWAYS_INLINE ShapeTable::Entry& ShapeTable::search(
-    jsid id, const AutoKeepShapeTables&) {
+    jsid id, const AutoKeepShapeCaches&) {
   return searchUnchecked<Adding>(id);
 }
 
 template <MaybeAdding Adding>
 MOZ_ALWAYS_INLINE ShapeTable::Entry& ShapeTable::search(
     jsid id, const JS::AutoCheckCannotGC&) {
   return searchUnchecked<Adding>(id);
 }
 
 /*
  * Keep this function in sync with search. It neither hashifies the start
  * shape nor increments linear search count.
  */
 MOZ_ALWAYS_INLINE Shape* Shape::searchNoHashify(Shape* start, jsid id) {
   /*
-   * If we have a table, search in the shape table, else do a linear
-   * search. We never hashify into a table in parallel.
+   * If we have a cache, search in the shape cache, else do a linear
+   * search. We never hashify or cachify into a table in parallel.
    */
+  Shape* foundShape;
   JS::AutoCheckCannotGC nogc;
-  if (ShapeTable* table = start->maybeTable(nogc)) {
-    ShapeTable::Entry& entry = table->search<MaybeAdding::NotAdding>(id, nogc);
-    return entry.shape();
+  ShapeCachePtr cache = start->getCache(nogc);
+  if (!cache.search<MaybeAdding::NotAdding>(id, start, &foundShape)) {
+    foundShape = start->searchLinear(id);
   }
 
-  return start->searchLinear(id);
+  return foundShape;
 }
 
 /* static */ MOZ_ALWAYS_INLINE Shape* NativeObject::addDataProperty(
     JSContext* cx, HandleNativeObject obj, HandleId id, uint32_t slot,
     unsigned attrs) {
   MOZ_ASSERT(!JSID_IS_VOID(id));
   MOZ_ASSERT(obj->uninlinedNonProxyIsExtensible());
   MOZ_ASSERT(!obj->containsPure(id));
 
-  AutoKeepShapeTables keep(cx);
+  AutoKeepShapeCaches keep(cx);
   ShapeTable* table = nullptr;
   ShapeTable::Entry* entry = nullptr;
   if (obj->inDictionaryMode()) {
     table = obj->lastProperty()->ensureTableForDictionary(cx, keep);
     if (!table) {
       return nullptr;
     }
     entry = &table->search<MaybeAdding::Adding>(id, keep);
@@ -394,17 +408,17 @@ MOZ_ALWAYS_INLINE Shape* Shape::searchNo
 
 /* static */ MOZ_ALWAYS_INLINE Shape* NativeObject::addAccessorProperty(
     JSContext* cx, HandleNativeObject obj, HandleId id, GetterOp getter,
     SetterOp setter, unsigned attrs) {
   MOZ_ASSERT(!JSID_IS_VOID(id));
   MOZ_ASSERT(obj->uninlinedNonProxyIsExtensible());
   MOZ_ASSERT(!obj->containsPure(id));
 
-  AutoKeepShapeTables keep(cx);
+  AutoKeepShapeCaches keep(cx);
   ShapeTable* table = nullptr;
   ShapeTable::Entry* entry = nullptr;
   if (obj->inDictionaryMode()) {
     table = obj->lastProperty()->ensureTableForDictionary(cx, keep);
     if (!table) {
       return nullptr;
     }
     entry = &table->search<MaybeAdding::Adding>(id, keep);
--- a/js/src/vm/Shape.cpp
+++ b/js/src/vm/Shape.cpp
@@ -33,28 +33,34 @@ using namespace js;
 using mozilla::CeilingLog2Size;
 using mozilla::PodZero;
 
 using JS::AutoCheckCannotGC;
 
 Shape* const ShapeTable::Entry::SHAPE_REMOVED =
     (Shape*)ShapeTable::Entry::SHAPE_COLLISION;
 
+bool ShapeIC::init(JSContext* cx) {
+  size_ = MAX_SIZE;
+  entries_.reset(cx->pod_calloc<Entry>(size_));
+  return (!entries_) ? false : true;
+}
+
 bool ShapeTable::init(JSContext* cx, Shape* lastProp) {
   uint32_t sizeLog2 = CeilingLog2Size(entryCount_);
   uint32_t size = JS_BIT(sizeLog2);
   if (entryCount_ >= size - (size >> 2)) {
     sizeLog2++;
   }
   if (sizeLog2 < MIN_SIZE_LOG2) {
     sizeLog2 = MIN_SIZE_LOG2;
   }
 
   size = JS_BIT(sizeLog2);
-  entries_ = cx->pod_calloc<Entry>(size);
+  entries_.reset(cx->pod_calloc<Entry>(size));
   if (!entries_) {
     return false;
   }
 
   MOZ_ASSERT(sizeLog2 <= HASH_BITS);
   hashShift_ = HASH_BITS - sizeLog2;
 
   for (Shape::Range<NoGC> r(lastProp); !r.empty(); r.popFront()) {
@@ -150,27 +156,47 @@ void Shape::handoffTableTo(Shape* shape)
 
 /* static */ bool Shape::hashify(JSContext* cx, Shape* shape) {
   MOZ_ASSERT(!shape->hasTable());
 
   if (!shape->ensureOwnBaseShape(cx)) {
     return false;
   }
 
-  ShapeTable* table = cx->new_<ShapeTable>(shape->entryCount());
+  UniquePtr<ShapeTable> table =
+      cx->make_unique<ShapeTable>(shape->entryCount());
   if (!table) {
     return false;
   }
 
   if (!table->init(cx, shape)) {
-    js_free(table);
     return false;
   }
 
-  shape->base()->setTable(table);
+  shape->base()->setTable(table.release());
+  return true;
+}
+
+/* static */ bool Shape::cachify(JSContext* cx, Shape* shape) {
+  MOZ_ASSERT(!shape->hasTable() && !shape->hasIC());
+
+  if (!shape->ensureOwnBaseShape(cx)) {
+    return false;
+  }
+
+  UniquePtr<ShapeIC> ic = cx->make_unique<ShapeIC>();
+  if (!ic) {
+    return false;
+  }
+
+  if (!ic->init(cx)) {
+    return false;
+  }
+
+  shape->base()->setIC(ic.release());
   return true;
 }
 
 bool ShapeTable::change(JSContext* cx, int log2Delta) {
   MOZ_ASSERT(entries_);
   MOZ_ASSERT(-1 <= log2Delta && log2Delta <= 1);
 
   /*
@@ -184,18 +210,18 @@ bool ShapeTable::change(JSContext* cx, i
   if (!newTable) {
     return false;
   }
 
   /* Now that we have newTable allocated, update members. */
   MOZ_ASSERT(newLog2 <= HASH_BITS);
   hashShift_ = HASH_BITS - newLog2;
   removedCount_ = 0;
-  Entry* oldTable = entries_;
-  entries_ = newTable;
+  Entry* oldTable = entries_.release();
+  entries_.reset(newTable);
 
   /* Copy only live entries, leaving removed and free ones behind. */
   AutoCheckCannotGC nogc;
   for (Entry* oldEntry = oldTable; oldSize != 0; oldEntry++) {
     if (Shape* shape = oldEntry->shape()) {
       Entry& entry = search<MaybeAdding::Adding>(shape->propid(), nogc);
       MOZ_ASSERT(entry.isFree());
       entry.setShape(shape);
@@ -223,31 +249,66 @@ bool ShapeTable::grow(JSContext* cx) {
       ReportOutOfMemory(cx);
       return false;
     }
   }
 
   return true;
 }
 
+void ShapeCachePtr::trace(JSTracer* trc) {
+  if (isIC()) {
+    getICPointer()->trace(trc);
+  } else if (isTable()) {
+    getTablePointer()->trace(trc);
+  }
+}
+
+void ShapeIC::trace(JSTracer* trc) {
+  for (size_t i = 0; i < entryCount(); i++) {
+    Entry& entry = entries_[i];
+    if (entry.shape_) {
+      TraceManuallyBarrieredEdge(trc, &entry.shape_, "ShapeIC shape");
+    }
+  }
+}
+
 void ShapeTable::trace(JSTracer* trc) {
   for (size_t i = 0; i < capacity(); i++) {
     Entry& entry = getEntry(i);
     Shape* shape = entry.shape();
     if (shape) {
       TraceManuallyBarrieredEdge(trc, &shape, "ShapeTable shape");
       if (shape != entry.shape()) {
         entry.setPreservingCollision(shape);
       }
     }
   }
 }
 
 #ifdef JSGC_HASH_TABLE_CHECKS
 
+void ShapeCachePtr::checkAfterMovingGC() {
+  if (isIC()) {
+    getICPointer()->checkAfterMovingGC();
+  } else if (isTable()) {
+    getTablePointer()->checkAfterMovingGC();
+  }
+}
+
+void ShapeIC::checkAfterMovingGC() {
+  for (size_t i = 0; i < entryCount(); i++) {
+    Entry& entry = entries_[i];
+    Shape* shape = entry.shape_;
+    if (shape) {
+      CheckGCThingAfterMovingGC(shape);
+    }
+  }
+}
+
 void ShapeTable::checkAfterMovingGC() {
   for (size_t i = 0; i < capacity(); i++) {
     Entry& entry = getEntry(i);
     Shape* shape = entry.shape();
     if (shape) {
       CheckGCThingAfterMovingGC(shape);
     }
   }
@@ -495,17 +556,17 @@ class MOZ_RAII AutoCheckShapeConsistency
 #endif
 };
 
 }  // namespace js
 
 /* static */ MOZ_ALWAYS_INLINE bool
 NativeObject::maybeConvertToOrGrowDictionaryForAdd(
     JSContext* cx, HandleNativeObject obj, HandleId id, ShapeTable** table,
-    ShapeTable::Entry** entry, const AutoKeepShapeTables& keep) {
+    ShapeTable::Entry** entry, const AutoKeepShapeCaches& keep) {
   MOZ_ASSERT(!!*table == !!*entry);
 
   // The code below deals with either converting obj to dictionary mode or
   // growing an object that's already in dictionary mode.
   if (!obj->inDictionaryMode()) {
     if (!ShouldConvertToDictionary(obj)) {
       return true;
     }
@@ -524,17 +585,17 @@ NativeObject::maybeConvertToOrGrowDictio
 
   *entry = &(*table)->search<MaybeAdding::Adding>(id, keep);
   MOZ_ASSERT(!(*entry)->shape());
   return true;
 }
 
 MOZ_ALWAYS_INLINE void Shape::updateDictionaryTable(
     ShapeTable* table, ShapeTable::Entry* entry,
-    const AutoKeepShapeTables& keep) {
+    const AutoKeepShapeCaches& keep) {
   MOZ_ASSERT(table);
   MOZ_ASSERT(entry);
   MOZ_ASSERT(inDictionary());
 
   // Store this Shape in the table entry.
   entry->setPreservingCollision(this);
   table->incEntryCount();
 
@@ -554,17 +615,17 @@ static void AssertValidPropertyOp(Native
     MOZ_ASSERT(obj->is<ArrayObject>() || obj->is<ArgumentsObject>());
   }
 #endif
 }
 
 /* static */ Shape* NativeObject::addAccessorPropertyInternal(
     JSContext* cx, HandleNativeObject obj, HandleId id, GetterOp getter,
     SetterOp setter, unsigned attrs, ShapeTable* table,
-    ShapeTable::Entry* entry, const AutoKeepShapeTables& keep) {
+    ShapeTable::Entry* entry, const AutoKeepShapeCaches& keep) {
   AutoCheckShapeConsistency check(obj);
   AutoRooterGetterSetter gsRoot(cx, attrs, &getter, &setter);
 
   AssertValidPropertyOp(obj, getter, setter, attrs);
 
   if (!maybeConvertToOrGrowDictionaryForAdd(cx, obj, id, &table, &entry,
                                             keep)) {
     return nullptr;
@@ -595,17 +656,17 @@ static void AssertValidPropertyOp(Native
   }
 
   return shape;
 }
 
 /* static */ Shape* NativeObject::addDataPropertyInternal(
     JSContext* cx, HandleNativeObject obj, HandleId id, uint32_t slot,
     unsigned attrs, ShapeTable* table, ShapeTable::Entry* entry,
-    const AutoKeepShapeTables& keep) {
+    const AutoKeepShapeCaches& keep) {
   AutoCheckShapeConsistency check(obj);
 
   // The slot, if any, must be a reserved slot.
   MOZ_ASSERT(slot == SHAPE_INVALID_SLOT ||
              slot < JSCLASS_RESERVED_SLOTS(obj->getClass()));
 
   if (!maybeConvertToOrGrowDictionaryForAdd(cx, obj, id, &table, &entry,
                                             keep)) {
@@ -703,17 +764,17 @@ static MOZ_ALWAYS_INLINE Shape* Property
     }
 
     if (!obj->setLastProperty(cx, kid)) {
       return nullptr;
     }
     return kid;
   } while (0);
 
-  AutoKeepShapeTables keep(cx);
+  AutoKeepShapeCaches keep(cx);
   ShapeTable* table = nullptr;
   ShapeTable::Entry* entry = nullptr;
 
   if (!obj->inDictionaryMode()) {
     if (MOZ_UNLIKELY(ShouldConvertToDictionary(obj))) {
       if (!toDictionaryMode(cx, obj)) {
         return nullptr;
       }
@@ -895,17 +956,17 @@ static void AssertValidArrayIndex(Native
                                                   HandleNativeObject obj,
                                                   HandleId id, unsigned attrs) {
   MOZ_ASSERT(!JSID_IS_VOID(id));
 
   AutoCheckShapeConsistency check(obj);
   AssertValidArrayIndex(obj, id);
 
   // Search for id in order to claim its entry if table has been allocated.
-  AutoKeepShapeTables keep(cx);
+  AutoKeepShapeCaches keep(cx);
   RootedShape shape(cx);
   {
     ShapeTable* table;
     ShapeTable::Entry* entry;
     if (!Shape::search<MaybeAdding::Adding>(cx, obj->lastProperty(), id, keep,
                                             shape.address(), &table, &entry)) {
       return nullptr;
     }
@@ -1011,17 +1072,17 @@ static void AssertValidArrayIndex(Native
 
   AutoCheckShapeConsistency check(obj);
   AssertValidArrayIndex(obj, id);
   AssertValidPropertyOp(obj, getter, setter, attrs);
 
   AutoRooterGetterSetter gsRoot(cx, attrs, &getter, &setter);
 
   // Search for id in order to claim its entry if table has been allocated.
-  AutoKeepShapeTables keep(cx);
+  AutoKeepShapeCaches keep(cx);
   RootedShape shape(cx);
   {
     ShapeTable* table;
     ShapeTable::Entry* entry;
     if (!Shape::search<MaybeAdding::Adding>(cx, obj->lastProperty(), id, keep,
                                             shape.address(), &table, &entry)) {
       return nullptr;
     }
@@ -1149,17 +1210,17 @@ static void AssertValidArrayIndex(Native
   return putAccessorProperty(cx, obj, propid, getter, setter, attrs);
 }
 
 /* static */ bool NativeObject::removeProperty(JSContext* cx,
                                                HandleNativeObject obj,
                                                jsid id_) {
   RootedId id(cx, id_);
 
-  AutoKeepShapeTables keep(cx);
+  AutoKeepShapeCaches keep(cx);
   ShapeTable* table;
   ShapeTable::Entry* entry;
   RootedShape shape(cx);
   if (!Shape::search(cx, obj->lastProperty(), id, keep, shape.address(), &table,
                      &entry)) {
     return false;
   }
 
@@ -1458,21 +1519,17 @@ static void AssertValidArrayIndex(Native
   StackBaseShape base(last);
   base.flags |= flags;
 
   RootedShape lastRoot(cx, last);
   return replaceLastProperty(cx, base, proto, lastRoot);
 }
 
 inline BaseShape::BaseShape(const StackBaseShape& base)
-    : clasp_(base.clasp),
-      flags(base.flags),
-      slotSpan_(0),
-      unowned_(nullptr),
-      table_(nullptr) {}
+    : clasp_(base.clasp), flags(base.flags), slotSpan_(0), unowned_(nullptr) {}
 
 /* static */ void BaseShape::copyFromUnowned(BaseShape& dest,
                                              UnownedBaseShape& src) {
   dest.clasp_ = src.clasp_;
   dest.slotSpan_ = src.slotSpan_;
   dest.unowned_ = &src;
   dest.flags = src.flags | OWNED_SHAPE;
 }
@@ -1520,57 +1577,55 @@ void BaseShape::assertConsistency() {
   if (isOwned()) {
     UnownedBaseShape* unowned = baseUnowned();
     MOZ_ASSERT(getObjectFlags() == unowned->getObjectFlags());
   }
 #endif
 }
 
 void BaseShape::traceChildren(JSTracer* trc) {
-  traceChildrenSkipShapeTable(trc);
-  traceShapeTable(trc);
+  traceChildrenSkipShapeCache(trc);
+  traceShapeCache(trc);
 }
 
-void BaseShape::traceChildrenSkipShapeTable(JSTracer* trc) {
+void BaseShape::traceChildrenSkipShapeCache(JSTracer* trc) {
   if (isOwned()) {
     TraceEdge(trc, &unowned_, "base");
   }
 
   assertConsistency();
 }
 
-void BaseShape::traceShapeTable(JSTracer* trc) {
+void BaseShape::traceShapeCache(JSTracer* trc) {
   AutoCheckCannotGC nogc;
-  if (ShapeTable* table = maybeTable(nogc)) {
-    table->trace(trc);
-  }
+  cache_.trace(trc);
 }
 
 #ifdef DEBUG
-bool BaseShape::canSkipMarkingShapeTable(Shape* lastShape) {
+bool BaseShape::canSkipMarkingShapeCache(Shape* lastShape) {
   // Check that every shape in the shape table will be marked by marking
   // |lastShape|.
-
   AutoCheckCannotGC nogc;
-  ShapeTable* table = maybeTable(nogc);
-  if (!table) {
+  ShapeCachePtr cache = getCache(nogc);
+  if (!cache.isTable()) {
     return true;
   }
 
   uint32_t count = 0;
   for (Shape::Range<NoGC> r(lastShape); !r.empty(); r.popFront()) {
     Shape* shape = &r.front();
     ShapeTable::Entry& entry =
-        table->search<MaybeAdding::NotAdding>(shape->propid(), nogc);
+        cache.getTablePointer()->search<MaybeAdding::NotAdding>(shape->propid(),
+                                                                nogc);
     if (entry.isLive()) {
       count++;
     }
   }
 
-  return count == table->entryCount();
+  return count == cache.getTablePointer()->entryCount();
 }
 #endif
 
 #ifdef JSGC_HASH_TABLE_CHECKS
 
 void Zone::checkBaseShapeTableAfterMovingGC() {
   for (auto r = baseShapes().all(); !r.empty(); r.popFront()) {
     UnownedBaseShape* base = r.front().unbarrieredGet();
@@ -1579,19 +1634,18 @@ void Zone::checkBaseShapeTableAfterMovin
     BaseShapeSet::Ptr ptr = baseShapes().lookup(base);
     MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
   }
 }
 
 #endif  // JSGC_HASH_TABLE_CHECKS
 
 void BaseShape::finalize(FreeOp* fop) {
-  if (table_) {
-    fop->delete_(table_);
-    table_ = nullptr;
+  if (cache_.isInitialized()) {
+    cache_.destroy(fop);
   }
 }
 
 inline InitialShapeEntry::InitialShapeEntry() : shape(nullptr), proto() {}
 
 inline InitialShapeEntry::InitialShapeEntry(Shape* shape,
                                             const TaggedProto& proto)
     : shape(shape), proto(proto) {}
--- a/js/src/vm/Shape.h
+++ b/js/src/vm/Shape.h
@@ -16,16 +16,17 @@
 #include "mozilla/TemplateLib.h"
 
 #include "jsapi.h"
 #include "jsfriendapi.h"
 #include "jstypes.h"
 #include "NamespaceImports.h"
 
 #include "gc/Barrier.h"
+#include "gc/FreeOp.h"
 #include "gc/Heap.h"
 #include "gc/Rooting.h"
 #include "js/HashTable.h"
 #include "js/MemoryMetrics.h"
 #include "js/RootingAPI.h"
 #include "js/UbiNode.h"
 #include "vm/JSAtom.h"
 #include "vm/ObjectGroup.h"
@@ -87,19 +88,19 @@
  * lineage has (excluding the EmptyShape) at least MIN_ENTRIES, we create an
  * auxiliary hash table -- the ShapeTable -- that allows faster lookup.
  * Furthermore, a ShapeTable is always created for dictionary mode lists,
  * and it is attached to the last Shape in the lineage. Shape tables for
  * property tree Shapes never change, but shape tables for dictionary mode
  * Shapes can grow and shrink.
  *
  * To save memory, shape tables can be discarded on GC and recreated when
- * needed. AutoKeepShapeTables can be used to avoid discarding shape tables
+ * needed. AutoKeepShapeCaches can be used to avoid discarding shape tables
  * for a particular zone. Methods operating on ShapeTables take either an
- * AutoCheckCannotGC or AutoKeepShapeTables argument, to help ensure tables
+ * AutoCheckCannotGC or AutoKeepShapeCaches argument, to help ensure tables
  * are not purged while we're using them.
  *
  * There used to be a long, math-heavy comment here explaining why property
  * trees are more space-efficient than alternatives.  This was removed in bug
  * 631138; see that bug for the full details.
  *
  * For getters/setters, an AccessorShape is allocated. This is a slightly fatter
  * type with extra fields for the getter/setter data.
@@ -206,28 +207,93 @@ typedef JSGetterOp GetterOp;
 typedef JSSetterOp SetterOp;
 
 /* Limit on the number of slotful properties in an object. */
 static const uint32_t SHAPE_INVALID_SLOT = JS_BIT(24) - 1;
 static const uint32_t SHAPE_MAXIMUM_SLOT = JS_BIT(24) - 2;
 
 enum class MaybeAdding { Adding = true, NotAdding = false };
 
-class AutoKeepShapeTables;
+class AutoKeepShapeCaches;
 
 /*
- * Shapes use multiplicative hashing, but specialized to
+ * ShapeIC uses a small array that is linearly searched.
+ */
+class ShapeIC {
+ public:
+  friend class NativeObject;
+  friend class BaseShape;
+  friend class Shape;
+
+  ShapeIC() : size_(0), nextFreeIndex_(0), entries_(nullptr) {}
+
+  ~ShapeIC() = default;
+
+  bool isFull() const {
+    MOZ_ASSERT(nextFreeIndex_ <= size_);
+    return size_ == nextFreeIndex_;
+  }
+
+  size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+    return mallocSizeOf(this) + mallocSizeOf(entries_.get());
+  }
+
+  uint32_t entryCount() { return nextFreeIndex_; }
+
+  bool init(JSContext* cx);
+  void trace(JSTracer* trc);
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+  void checkAfterMovingGC();
+#endif
+
+  MOZ_ALWAYS_INLINE bool search(jsid id, Shape** foundShape);
+
+  MOZ_ALWAYS_INLINE bool appendEntry(jsid id, Shape* shape) {
+    MOZ_ASSERT(nextFreeIndex_ <= size_);
+    if (nextFreeIndex_ == size_) {
+      return false;
+    }
+
+    entries_[nextFreeIndex_].id_ = id;
+    entries_[nextFreeIndex_].shape_ = shape;
+    nextFreeIndex_++;
+    return true;
+  }
+
+ private:
+  static const uint32_t MAX_SIZE = 7;
+
+  class Entry {
+   public:
+    jsid id_;
+    Shape* shape_;
+
+    Entry() = delete;
+    Entry(const Entry&) = delete;
+    Entry& operator=(const Entry&) = delete;
+  };
+
+  uint8_t size_;
+  uint8_t nextFreeIndex_;
+
+  /* table of ptrs to {jsid,Shape*} pairs */
+  UniquePtr<Entry[], JS::FreePolicy> entries_;
+};
+
+/*
+ * ShapeTable uses multiplicative hashing, but specialized to
  * minimize footprint.
  */
 class ShapeTable {
  public:
   friend class NativeObject;
   friend class BaseShape;
   friend class Shape;
-  static const uint32_t MIN_ENTRIES = 11;
+  friend class ShapeCachePtr;
 
   class Entry {
     // js::Shape pointer tag bit indicating a collision.
     static const uintptr_t SHAPE_COLLISION = 1;
     static Shape* const SHAPE_REMOVED;  // = SHAPE_COLLISION
 
     Shape* shape_;
 
@@ -277,54 +343,55 @@ class ShapeTable {
 
   uint32_t entryCount_;   /* number of entries in table */
   uint32_t removedCount_; /* removed entry sentinels in table */
 
   uint32_t freeList_; /* SHAPE_INVALID_SLOT or head of slot
                          freelist in owning dictionary-mode
                          object */
 
-  Entry* entries_; /* table of ptrs to shared tree nodes */
+  UniquePtr<Entry[], JS::FreePolicy>
+      entries_; /* table of ptrs to shared tree nodes */
 
   template <MaybeAdding Adding>
   MOZ_ALWAYS_INLINE Entry& searchUnchecked(jsid id);
 
  public:
   explicit ShapeTable(uint32_t nentries)
       : hashShift_(HASH_BITS - MIN_SIZE_LOG2),
         entryCount_(nentries),
         removedCount_(0),
         freeList_(SHAPE_INVALID_SLOT),
         entries_(nullptr) {
     /* NB: entries is set by init, which must be called. */
   }
 
-  ~ShapeTable() { js_free(entries_); }
+  ~ShapeTable() = default;
 
   uint32_t entryCount() const { return entryCount_; }
 
   uint32_t freeList() const { return freeList_; }
   void setFreeList(uint32_t slot) { freeList_ = slot; }
 
   /*
    * This counts the ShapeTable object itself (which must be
    * heap-allocated) and its |entries| array.
    */
   size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
-    return mallocSizeOf(this) + mallocSizeOf(entries_);
+    return mallocSizeOf(this) + mallocSizeOf(entries_.get());
   }
 
   // init() is fallible and reports OOM to the context.
   bool init(JSContext* cx, Shape* lastProp);
 
   // change() is fallible but does not report OOM.
   bool change(JSContext* cx, int log2Delta);
 
   template <MaybeAdding Adding>
-  MOZ_ALWAYS_INLINE Entry& search(jsid id, const AutoKeepShapeTables&);
+  MOZ_ALWAYS_INLINE Entry& search(jsid id, const AutoKeepShapeCaches&);
 
   template <MaybeAdding Adding>
   MOZ_ALWAYS_INLINE Entry& search(jsid id, const JS::AutoCheckCannotGC&);
 
   void trace(JSTracer* trc);
 #ifdef JSGC_HASH_TABLE_CHECKS
   void checkAfterMovingGC();
 #endif
@@ -358,27 +425,161 @@ class ShapeTable {
   }
 
   // Try to grow the table.  On failure, reports out of memory on cx
   // and returns false.  This will make any extant pointers into the
   // table invalid.  Don't call this unless needsToGrow() is true.
   bool grow(JSContext* cx);
 };
 
+/*
+ *  Wrapper class to either ShapeTable or ShapeIC optimization.
+ *
+ *  Shapes are initially cached in a linear cache from the ShapeIC class that is
+ *  lazily initialized after LINEAR_SEARCHES_MAX searches have been reached, and
+ *  the Shape has at least MIN_ENTRIES parents in the lineage.
+ *
+ *  We use the population of the cache as an indicator of whether the ShapeIC is
+ *  working or not.  Once it is full, it is destroyed and a ShapeTable is
+ * created instead.
+ *
+ *  For dictionaries, the linear cache is skipped entirely and hashify is used
+ *  to generate the ShapeTable immediately.
+ */
+class ShapeCachePtr {
+  // To reduce impact on memory usage, p is the only data member for this class.
+  uintptr_t p;
+
+  enum class CacheType {
+    IC = 0x1,
+    Table = 0x2,
+  };
+
+  static const uint32_t MASK_BITS = 0x3;
+  static const uintptr_t CACHETYPE_MASK = 0x3;
+
+  void* getPointer() const {
+    uintptr_t ptrVal = p & ~CACHETYPE_MASK;
+    return reinterpret_cast<void*>(ptrVal);
+  }
+
+  CacheType getType() const {
+    return static_cast<CacheType>(p & CACHETYPE_MASK);
+  }
+
+ public:
+  static const uint32_t MIN_ENTRIES = 3;
+
+  ShapeCachePtr() : p(0) {}
+
+  template <MaybeAdding Adding>
+  MOZ_ALWAYS_INLINE bool search(jsid id, Shape* start, Shape** foundShape);
+
+  bool isIC() const { return (getType() == CacheType::IC); }
+  bool isTable() const { return (getType() == CacheType::Table); }
+  bool isInitialized() const { return isTable() || isIC(); }
+
+  ShapeTable* getTablePointer() const {
+    MOZ_ASSERT(isTable());
+    return reinterpret_cast<ShapeTable*>(getPointer());
+  }
+
+  ShapeIC* getICPointer() const {
+    MOZ_ASSERT(isIC());
+    return reinterpret_cast<ShapeIC*>(getPointer());
+  }
+
+  // Use ShapeTable implementation.
+  // This will clobber an existing IC implementation.
+  void initializeTable(ShapeTable* table) {
+    MOZ_ASSERT(!isTable());
+    maybePurgeCache();
+
+    uintptr_t tableptr = uintptr_t(table);
+
+    // Double check that pointer is 4 byte aligned.
+    MOZ_ASSERT((tableptr & CACHETYPE_MASK) == 0);
+
+    tableptr |= static_cast<uintptr_t>(CacheType::Table);
+    p = tableptr;
+  }
+
+  // Use ShapeIC implementation.
+  // This cannot clobber an existing Table implementation.
+  void initializeIC(ShapeIC* ic) {
+    MOZ_ASSERT(!isTable() && !isIC());
+
+    uintptr_t icptr = uintptr_t(ic);
+
+    // Double check that pointer is 4 byte aligned.
+    MOZ_ASSERT((icptr & CACHETYPE_MASK) == 0);
+
+    icptr |= static_cast<uintptr_t>(CacheType::IC);
+    p = icptr;
+  }
+
+  void destroy(FreeOp* fop) {
+    if (isTable()) {
+      fop->delete_<ShapeTable>(getTablePointer());
+    } else if (isIC()) {
+      fop->delete_<ShapeIC>(getICPointer());
+    }
+    p = 0;
+  }
+
+  void maybePurgeCache() {
+    if (isTable()) {
+      ShapeTable* table = getTablePointer();
+      if (table->freeList() == SHAPE_INVALID_SLOT) {
+        js_delete<ShapeTable>(getTablePointer());
+        p = 0;
+      }
+    } else if (isIC()) {
+      js_delete<ShapeIC>(getICPointer());
+      p = 0;
+    }
+  }
+
+  void trace(JSTracer* trc);
+
+  size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+    size_t size = 0;
+    if (isIC()) {
+      size = getICPointer()->sizeOfIncludingThis(mallocSizeOf);
+    } else if (isTable()) {
+      size = getTablePointer()->sizeOfIncludingThis(mallocSizeOf);
+    }
+    return size;
+  }
+
+  uint32_t entryCount() {
+    uint32_t count = 0;
+    if (isIC()) {
+      count = getICPointer()->entryCount();
+    } else if (isTable()) {
+      count = getTablePointer()->entryCount();
+    }
+    return count;
+  }
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+  void checkAfterMovingGC();
+#endif
+};
+
 // Ensures no shape tables are purged in the current zone.
-class MOZ_RAII AutoKeepShapeTables {
+class MOZ_RAII AutoKeepShapeCaches {
   JSContext* cx_;
   bool prev_;
 
-  AutoKeepShapeTables(const AutoKeepShapeTables&) = delete;
-  void operator=(const AutoKeepShapeTables&) = delete;
-
  public:
-  explicit inline AutoKeepShapeTables(JSContext* cx);
-  inline ~AutoKeepShapeTables();
+  void operator=(const AutoKeepShapeCaches&) = delete;
+  AutoKeepShapeCaches(const AutoKeepShapeCaches&) = delete;
+  explicit inline AutoKeepShapeCaches(JSContext* cx);
+  inline ~AutoKeepShapeCaches();
 };
 
 /*
  * Shapes encode information about both a property lineage *and* a particular
  * property. This information is split across the Shape and the BaseShape
  * at shape->base(). Both Shape and BaseShape can be either owned or unowned
  * by, respectively, the Object or Shape referring to them.
  *
@@ -476,17 +677,17 @@ class BaseShape : public gc::TenuredCell
   uint32_t flags;      /* Vector of above flags. */
   uint32_t slotSpan_;  /* Object slot span for BaseShapes at
                         * dictionary last properties. */
 
   /* For owned BaseShapes, the canonical unowned BaseShape. */
   GCPtrUnownedBaseShape unowned_;
 
   /* For owned BaseShapes, the shape's shape table. */
-  ShapeTable* table_;
+  ShapeCachePtr cache_;
 
   BaseShape(const BaseShape& base) = delete;
   BaseShape& operator=(const BaseShape& other) = delete;
 
  public:
   void finalize(FreeOp* fop);
 
   explicit inline BaseShape(const StackBaseShape& base);
@@ -504,39 +705,67 @@ class BaseShape : public gc::TenuredCell
   void setOwned(UnownedBaseShape* unowned) {
     flags |= OWNED_SHAPE;
     unowned_ = unowned;
   }
 
   uint32_t getObjectFlags() const { return flags & OBJECT_FLAG_MASK; }
 
   bool hasTable() const {
-    MOZ_ASSERT_IF(table_, isOwned());
-    return table_ != nullptr;
+    MOZ_ASSERT_IF(cache_.isInitialized(), isOwned());
+    return cache_.isTable();
   }
+
+  bool hasIC() const {
+    MOZ_ASSERT_IF(cache_.isInitialized(), isOwned());
+    return cache_.isIC();
+  }
+
   void setTable(ShapeTable* table) {
     MOZ_ASSERT(isOwned());
-    table_ = table;
+    cache_.initializeTable(table);
+  }
+
+  void setIC(ShapeIC* ic) {
+    MOZ_ASSERT(isOwned());
+    cache_.initializeIC(ic);
+  }
+
+  ShapeCachePtr getCache(const AutoKeepShapeCaches&) const {
+    MOZ_ASSERT_IF(cache_.isInitialized(), isOwned());
+    return cache_;
   }
 
-  ShapeTable* maybeTable(const AutoKeepShapeTables&) const {
-    MOZ_ASSERT_IF(table_, isOwned());
-    return table_;
+  ShapeCachePtr getCache(const JS::AutoCheckCannotGC&) const {
+    MOZ_ASSERT_IF(cache_.isInitialized(), isOwned());
+    return cache_;
   }
+
+  ShapeTable* maybeTable(const AutoKeepShapeCaches&) const {
+    MOZ_ASSERT_IF(cache_.isInitialized(), isOwned());
+    return (cache_.isTable()) ? cache_.getTablePointer() : nullptr;
+  }
+
   ShapeTable* maybeTable(const JS::AutoCheckCannotGC&) const {
-    MOZ_ASSERT_IF(table_, isOwned());
-    return table_;
+    MOZ_ASSERT_IF(cache_.isInitialized(), isOwned());
+    return (cache_.isTable()) ? cache_.getTablePointer() : nullptr;
+  }
+
+  ShapeIC* maybeIC(const AutoKeepShapeCaches&) const {
+    MOZ_ASSERT_IF(cache_.isInitialized(), isOwned());
+    return (cache_.isIC()) ? cache_.getICPointer() : nullptr;
   }
-  void maybePurgeTable() {
-    if (table_ && table_->freeList() == SHAPE_INVALID_SLOT) {
-      js_delete(table_);
-      table_ = nullptr;
-    }
+
+  ShapeIC* maybeIC(const JS::AutoCheckCannotGC&) const {
+    MOZ_ASSERT_IF(cache_.isInitialized(), isOwned());
+    return (cache_.isIC()) ? cache_.getICPointer() : nullptr;
   }
 
+  void maybePurgeCache() { cache_.maybePurgeCache(); }
+
   uint32_t slotSpan() const {
     MOZ_ASSERT(isOwned());
     return slotSpan_;
   }
   void setSlotSpan(uint32_t slotSpan) {
     MOZ_ASSERT(isOwned());
     slotSpan_ = slotSpan;
   }
@@ -560,32 +789,32 @@ class BaseShape : public gc::TenuredCell
   void assertConsistency();
 
   /* For JIT usage */
   static inline size_t offsetOfFlags() { return offsetof(BaseShape, flags); }
 
   static const JS::TraceKind TraceKind = JS::TraceKind::BaseShape;
 
   void traceChildren(JSTracer* trc);
-  void traceChildrenSkipShapeTable(JSTracer* trc);
+  void traceChildrenSkipShapeCache(JSTracer* trc);
 
 #ifdef DEBUG
-  bool canSkipMarkingShapeTable(Shape* lastShape);
+  bool canSkipMarkingShapeCache(Shape* lastShape);
 #endif
 
  private:
   static void staticAsserts() {
     JS_STATIC_ASSERT(offsetof(BaseShape, clasp_) ==
                      offsetof(js::shadow::BaseShape, clasp_));
     static_assert(sizeof(BaseShape) % gc::CellAlignBytes == 0,
                   "Things inheriting from gc::Cell must have a size that's "
                   "a multiple of gc::CellAlignBytes");
   }
 
-  void traceShapeTable(JSTracer* trc);
+  void traceShapeCache(JSTracer* trc);
 };
 
 class UnownedBaseShape : public BaseShape {};
 
 UnownedBaseShape* BaseShape::unowned() {
   return isOwned() ? baseUnowned() : toUnowned();
 }
 
@@ -708,20 +937,21 @@ class Shape : public gc::TenuredCell {
     // getter/setter information.
     ACCESSOR_SHAPE = 1 << 30,
   };
 
   // Flags stored in mutableFlags.
   enum MutableFlags : uint8_t {
     // numLinearSearches starts at zero and is incremented initially on
     // search() calls. Once numLinearSearches reaches LINEAR_SEARCHES_MAX,
-    // the table is created on the next search() call. The table can also
-    // be created when hashifying for dictionary mode.
-    LINEAR_SEARCHES_MAX = 0x7,
-    LINEAR_SEARCHES_MASK = LINEAR_SEARCHES_MAX,
+    // the inline cache is created on the next search() call.  Once the
+    // cache is full, it self transforms into a hash table. The hash table
+    // can also be created directly when hashifying for dictionary mode.
+    LINEAR_SEARCHES_MAX = 0x5,
+    LINEAR_SEARCHES_MASK = 0x7,
 
     // Slotful property was stored to more than once. This is used as a
     // hint for type inference.
     OVERWRITTEN = 0x08,
 
     // Flags used to speed up isBigEnoughForAShapeTable().
     HAS_CACHED_BIG_ENOUGH_FOR_SHAPE_TABLE = 0x10,
     CACHED_BIG_ENOUGH_FOR_SHAPE_TABLE = 0x20,
@@ -742,17 +972,17 @@ class Shape : public gc::TenuredCell {
                           last, else to obj->shape_ */
   };
 
   template <MaybeAdding Adding = MaybeAdding::NotAdding>
   static MOZ_ALWAYS_INLINE Shape* search(JSContext* cx, Shape* start, jsid id);
 
   template <MaybeAdding Adding = MaybeAdding::NotAdding>
   static inline MOZ_MUST_USE bool search(JSContext* cx, Shape* start, jsid id,
-                                         const AutoKeepShapeTables&,
+                                         const AutoKeepShapeCaches&,
                                          Shape** pshape, ShapeTable** ptable,
                                          ShapeTable::Entry** pentry);
 
   static inline Shape* searchNoHashify(Shape* start, jsid id);
 
   void removeFromDictionary(NativeObject* obj);
   void insertIntoDictionary(GCPtrShape* dictp);
 
@@ -765,16 +995,17 @@ class Shape : public gc::TenuredCell {
                                     TaggedProto proto, HandleShape shape);
 
   /*
    * This function is thread safe if every shape in the lineage of |shape|
    * is thread local, which is the case when we clone the entire shape
    * lineage in preparation for converting an object to dictionary mode.
    */
   static bool hashify(JSContext* cx, Shape* shape);
+  static bool cachify(JSContext* cx, Shape* shape);
   void handoffTableTo(Shape* newShape);
 
   void setParent(Shape* p) {
     MOZ_ASSERT_IF(p && !p->hasMissingSlot() && !inDictionary(),
                   p->maybeSlot() <= maybeSlot());
     MOZ_ASSERT_IF(p && !inDictionary(),
                   isDataProperty() == (p->maybeSlot() != maybeSlot()));
     parent = p;
@@ -784,31 +1015,51 @@ class Shape : public gc::TenuredCell {
     if (base()->isOwned()) {
       return true;
     }
     return makeOwnBaseShape(cx);
   }
 
   bool makeOwnBaseShape(JSContext* cx);
 
-  MOZ_ALWAYS_INLINE MOZ_MUST_USE bool maybeCreateTableForLookup(JSContext* cx);
+  MOZ_ALWAYS_INLINE MOZ_MUST_USE bool maybeCreateCacheForLookup(JSContext* cx);
 
   MOZ_ALWAYS_INLINE void updateDictionaryTable(ShapeTable* table,
                                                ShapeTable::Entry* entry,
-                                               const AutoKeepShapeTables& keep);
+                                               const AutoKeepShapeCaches& keep);
 
  public:
   bool hasTable() const { return base()->hasTable(); }
+  bool hasIC() const { return base()->hasIC(); }
 
-  ShapeTable* maybeTable(const AutoKeepShapeTables& keep) const {
+  ShapeIC* maybeIC(const AutoKeepShapeCaches& keep) const {
+    return base()->maybeIC(keep);
+  }
+  ShapeIC* maybeIC(const JS::AutoCheckCannotGC& check) const {
+    return base()->maybeIC(check);
+  }
+  ShapeTable* maybeTable(const AutoKeepShapeCaches& keep) const {
     return base()->maybeTable(keep);
   }
   ShapeTable* maybeTable(const JS::AutoCheckCannotGC& check) const {
     return base()->maybeTable(check);
   }
+  ShapeCachePtr getCache(const AutoKeepShapeCaches& keep) const {
+    return base()->getCache(keep);
+  }
+  ShapeCachePtr getCache(const JS::AutoCheckCannotGC& check) const {
+    return base()->getCache(check);
+  }
+
+  bool appendShapeToIC(jsid id, Shape* shape,
+                       const JS::AutoCheckCannotGC& check) {
+    MOZ_ASSERT(hasIC());
+    ShapeCachePtr cache = getCache(check);
+    return cache.getICPointer()->appendEntry(id, shape);
+  }
 
   template <typename T>
   MOZ_MUST_USE ShapeTable* ensureTableForDictionary(JSContext* cx,
                                                     const T& nogc) {
     MOZ_ASSERT(inDictionary());
     if (ShapeTable* table = maybeTable(nogc)) {
       return table;
     }
@@ -818,24 +1069,22 @@ class Shape : public gc::TenuredCell {
     ShapeTable* table = maybeTable(nogc);
     MOZ_ASSERT(table);
     return table;
   }
 
   void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
                               JS::ShapeInfo* info) const {
     JS::AutoCheckCannotGC nogc;
-    if (ShapeTable* table = maybeTable(nogc)) {
-      if (inDictionary()) {
-        info->shapesMallocHeapDictTables +=
-            table->sizeOfIncludingThis(mallocSizeOf);
-      } else {
-        info->shapesMallocHeapTreeTables +=
-            table->sizeOfIncludingThis(mallocSizeOf);
-      }
+    if (inDictionary()) {
+      info->shapesMallocHeapDictTables +=
+          getCache(nogc).sizeOfExcludingThis(mallocSizeOf);
+    } else {
+      info->shapesMallocHeapTreeTables +=
+          getCache(nogc).sizeOfExcludingThis(mallocSizeOf);
     }
 
     if (!inDictionary() && kids.isHash()) {
       info->shapesMallocHeapTreeKids +=
           kids.toHash()->shallowSizeOfIncludingThis(mallocSizeOf);
     }
   }
 
@@ -1078,17 +1327,17 @@ class Shape : public gc::TenuredCell {
     return count;
   }
 
  private:
   bool isBigEnoughForAShapeTableSlow() {
     uint32_t count = 0;
     for (Shape::Range<NoGC> r(this); !r.empty(); r.popFront()) {
       ++count;
-      if (count >= ShapeTable::MIN_ENTRIES) {
+      if (count >= ShapeCachePtr::MIN_ENTRIES) {
         return true;
       }
     }
     return false;
   }
   void clearCachedBigEnoughForShapeTable() {
     mutableFlags &= ~(HAS_CACHED_BIG_ENOUGH_FOR_SHAPE_TABLE |
                       CACHED_BIG_ENOUGH_FOR_SHAPE_TABLE);
@@ -1512,16 +1761,47 @@ inline Shape* Shape::searchLinear(jsid i
 }
 
 inline bool Shape::matches(const StackShape& other) const {
   return propid_.get() == other.propid &&
          matchesParamsAfterId(other.base, other.maybeSlot(), other.attrs,
                               other.rawGetter, other.rawSetter);
 }
 
+template <MaybeAdding Adding>
+MOZ_ALWAYS_INLINE bool ShapeCachePtr::search(jsid id, Shape* start,
+                                             Shape** foundShape) {
+  bool found = false;
+  if (isIC()) {
+    ShapeIC* ic = getICPointer();
+    found = ic->search(id, foundShape);
+  } else if (isTable()) {
+    ShapeTable* table = getTablePointer();
+    ShapeTable::Entry& entry = table->searchUnchecked<Adding>(id);
+    *foundShape = entry.shape();
+    found = true;
+  }
+  return found;
+}
+
+MOZ_ALWAYS_INLINE bool ShapeIC::search(jsid id, Shape** foundShape) {
+  // This loop needs to be as fast as possible, so use a direct pointer
+  // to the array instead of going through the UniquePtr methods.
+  Entry* entriesArray = entries_.get();
+  for (uint8_t i = 0; i < nextFreeIndex_; i++) {
+    Entry& entry = entriesArray[i];
+    if (entry.id_ == id) {
+      *foundShape = entry.shape_;
+      return true;
+    }
+  }
+
+  return false;
+}
+
 Shape* ReshapeForAllocKind(JSContext* cx, Shape* shape, TaggedProto proto,
                            gc::AllocKind allocKind);
 
 }  // namespace js
 
 // JS::ubi::Nodes can point to Shapes and BaseShapes; they're js::gc::Cell
 // instances that occupy a compartment.
 namespace JS {
--- a/js/src/vm/TypeInference.cpp
+++ b/js/src/vm/TypeInference.cpp
@@ -4753,27 +4753,27 @@ void TypeScript::destroy(Zone* zone) {
   icScript_->prepareForDestruction(zone);
 
   js_delete(this);
 }
 
 void Zone::addSizeOfIncludingThis(
     mozilla::MallocSizeOf mallocSizeOf, size_t* typePool, size_t* regexpZone,
     size_t* jitZone, size_t* baselineStubsOptimized, size_t* cachedCFG,
-    size_t* uniqueIdMap, size_t* shapeTables, size_t* atomsMarkBitmaps,
+    size_t* uniqueIdMap, size_t* shapeCaches, size_t* atomsMarkBitmaps,
     size_t* compartmentObjects, size_t* crossCompartmentWrappersTables,
     size_t* compartmentsPrivateData) {
   *typePool += types.typeLifoAlloc().sizeOfExcludingThis(mallocSizeOf);
   *regexpZone += regExps().sizeOfExcludingThis(mallocSizeOf);
   if (jitZone_) {
     jitZone_->addSizeOfIncludingThis(mallocSizeOf, jitZone,
                                      baselineStubsOptimized, cachedCFG);
   }
   *uniqueIdMap += uniqueIds().shallowSizeOfExcludingThis(mallocSizeOf);
-  *shapeTables += baseShapes().sizeOfExcludingThis(mallocSizeOf) +
+  *shapeCaches += baseShapes().sizeOfExcludingThis(mallocSizeOf) +
                   initialShapes().sizeOfExcludingThis(mallocSizeOf);
   *atomsMarkBitmaps += markedAtoms().sizeOfExcludingThis(mallocSizeOf);
 
   for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
     comp->addSizeOfIncludingThis(mallocSizeOf, compartmentObjects,
                                  crossCompartmentWrappersTables,
                                  compartmentsPrivateData);
   }