Bug 1359342 - Record objects and groups that need to be barriered after being read from type sets r=nbp
authorJon Coppeard <jcoppeard@mozilla.com>
Tue, 27 Feb 2018 12:14:47 +0000
changeset 405571 87e6042a409d0cd33d5fbf7a2abda73c046e796a
parent 405570 c3d6247ece759ea353bf49815b3688406e6e37bf
child 405572 f361cfca3755aae660df9131540d3b469f9e29e6
push id33524
push userapavel@mozilla.com
push dateTue, 27 Feb 2018 22:24:31 +0000
treeherdermozilla-central@529754159078 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnbp
bugs1359342
milestone60.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1359342 - Record objects and groups that need to be barriered after being read from type sets r=nbp
js/src/jit/Linker.cpp
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/vm/TypeInference-inl.h
js/src/vm/TypeInference.h
--- a/js/src/jit/Linker.cpp
+++ b/js/src/jit/Linker.cpp
@@ -17,16 +17,18 @@ JitCode*
 Linker::newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges /* = false */)
 {
     MOZ_ASSERT_IF(hasPatchableBackedges, kind == CodeKind::Ion);
 
     JS::AutoAssertNoGC nogc(cx);
     if (masm.oom())
         return fail(cx);
 
+    masm.performPendingReadBarriers();
+
     static const size_t ExecutableAllocatorAlignment = sizeof(void*);
     static_assert(CodeAlignment >= ExecutableAllocatorAlignment,
                   "Unexpected alignment requirements");
 
     // We require enough bytes for the code, header, and worst-case alignment padding.
     size_t bytesNeeded = masm.bytesNeeded() +
                          sizeof(JitCodeHeader) +
                          (CodeAlignment - ExecutableAllocatorAlignment);
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -24,16 +24,17 @@
 #include "js/Conversions.h"
 #include "js/Printf.h"
 #include "vm/TraceLogging.h"
 
 #include "gc/Nursery-inl.h"
 #include "jit/shared/Lowering-shared-inl.h"
 #include "vm/Interpreter-inl.h"
 #include "vm/JSObject-inl.h"
+#include "vm/TypeInference-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using JS::GenericNaN;
 using JS::ToInt32;
 
 using mozilla::CheckedUint32;
@@ -194,20 +195,20 @@ MacroAssembler::guardTypeSetMightBeIncom
 
     loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
     load32(Address(scratch, ObjectGroup::offsetOfFlags()), scratch);
     and32(Imm32(OBJECT_FLAG_ADDENDUM_MASK), scratch);
     branch32(Assembler::Equal,
              scratch, Imm32(ObjectGroup::addendumOriginalUnboxedGroupValue()), label);
 
     for (size_t i = 0; i < types->getObjectCount(); i++) {
-        if (JSObject* singleton = types->getSingletonNoBarrier(i)) {
+        if (JSObject* singleton = getSingletonAndDelayBarrier(types, i)) {
             movePtr(ImmGCPtr(singleton), scratch);
             loadPtr(Address(scratch, JSObject::offsetOfGroup()), scratch);
-        } else if (ObjectGroup* group = types->getGroupNoBarrier(i)) {
+        } else if (ObjectGroup* group = getGroupAndDelayBarrier(types, i)) {
             movePtr(ImmGCPtr(group), scratch);
         } else {
             continue;
         }
         branchTest32(Assembler::NonZero, Address(scratch, ObjectGroup::offsetOfFlags()),
                      Imm32(OBJECT_FLAG_UNKNOWN_PROPERTIES), label);
     }
 }
@@ -231,36 +232,36 @@ MacroAssembler::guardObjectType(Register
     Label matched;
 
     bool hasSingletons = false;
     bool hasObjectGroups = false;
     unsigned numBranches = 0;
 
     unsigned count = types->getObjectCount();
     for (unsigned i = 0; i < count; i++) {
-        if (types->getGroupNoBarrier(i)) {
+        if (types->hasGroup(i)) {
             hasObjectGroups = true;
             numBranches++;
-        } else if (types->getSingletonNoBarrier(i)) {
+        } else if (types->hasSingleton(i)) {
             hasSingletons = true;
             numBranches++;
         }
     }
 
     if (numBranches == 0) {
         jump(miss);
         return;
     }
 
     if (JitOptions.spectreObjectMitigationsBarriers)
         move32(Imm32(0), scratch);
 
     if (hasSingletons) {
         for (unsigned i = 0; i < count; i++) {
-            JSObject* singleton = types->getSingletonNoBarrier(i);
+            JSObject* singleton = getSingletonAndDelayBarrier(types, i);
             if (!singleton)
                 continue;
 
             if (JitOptions.spectreObjectMitigationsBarriers) {
                 if (--numBranches > 0) {
                     Label next;
                     branchPtr(NotEqual, obj, ImmGCPtr(singleton), &next);
                     spectreMovePtr(NotEqual, scratch, spectreRegToZero);
@@ -284,20 +285,25 @@ MacroAssembler::guardObjectType(Register
 
         // If Spectre mitigations are enabled, we use the scratch register as
         // zero register. Without mitigations we can use it to store the group.
         Address groupAddr(obj, JSObject::offsetOfGroup());
         if (!JitOptions.spectreObjectMitigationsBarriers)
             loadPtr(groupAddr, scratch);
 
         for (unsigned i = 0; i < count; i++) {
-            ObjectGroup* group = types->getGroupNoBarrier(i);
+            ObjectGroup* group = getGroupAndDelayBarrier(types, i);
             if (!group)
                 continue;
 
+            if (!pendingObjectGroupReadBarriers_.append(group)) {
+                setOOM();
+                return;
+            }
+
             if (JitOptions.spectreObjectMitigationsBarriers) {
                 if (--numBranches > 0) {
                     Label next;
                     branchPtr(NotEqual, groupAddr, ImmGCPtr(group), &next);
                     spectreMovePtr(NotEqual, scratch, spectreRegToZero);
                     jump(&matched);
                     bind(&next);
                 } else {
@@ -3617,16 +3623,68 @@ MacroAssembler::boundsCheck32PowerOfTwo(
     branch32(Assembler::AboveOrEqual, index, Imm32(length), failure);
 
     // Note: it's fine to clobber the input register, as this is a no-op: it
     // only affects speculative execution.
     if (JitOptions.spectreIndexMasking)
         and32(Imm32(length - 1), index);
 }
 
+template <typename T, size_t N, typename P>
+static bool
+AddPendingReadBarrier(Vector<T*, N, P>& list, T* value)
+{
+    // Check if value is already present in tail of list.
+    // TODO: Consider using a hash table here.
+    const size_t TailWindow = 4;
+
+    size_t len = list.length();
+    for (size_t i = 0; i < Min(len, TailWindow); i++) {
+        if (list[len - i - 1] == value)
+            return true;
+    }
+
+    return list.append(value);
+}
+
+JSObject*
+MacroAssembler::getSingletonAndDelayBarrier(const TypeSet* types, size_t i)
+{
+    JSObject* object = types->getSingletonNoBarrier(i);
+    if (!object)
+        return nullptr;
+
+    if (!AddPendingReadBarrier(pendingObjectReadBarriers_, object))
+        setOOM();
+
+    return object;
+}
+
+ObjectGroup*
+MacroAssembler::getGroupAndDelayBarrier(const TypeSet* types, size_t i)
+{
+    ObjectGroup* group = types->getGroupNoBarrier(i);
+    if (!group)
+        return nullptr;
+
+    if (!AddPendingReadBarrier(pendingObjectGroupReadBarriers_, group))
+        setOOM();
+
+    return group;
+}
+
+void
+MacroAssembler::performPendingReadBarriers()
+{
+    for (JSObject* object : pendingObjectReadBarriers_)
+        JSObject::readBarrier(object);
+    for (ObjectGroup* group : pendingObjectGroupReadBarriers_)
+        ObjectGroup::readBarrier(group);
+}
+
 namespace js {
 namespace jit {
 
 #ifdef DEBUG
 template <class RegisterType>
 AutoGenericRegisterScope<RegisterType>::AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg)
   : RegisterType(reg), masm_(masm), released_(false)
 {
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -2612,16 +2612,28 @@ class MacroAssembler : public MacroAssem
 
     // Align the stack pointer based on the number of arguments which are pushed
     // on the stack, such that the JitFrameLayout would be correctly aligned on
     // the JitStackAlignment.
     void alignJitStackBasedOnNArgs(Register nargs);
     void alignJitStackBasedOnNArgs(uint32_t nargs);
 
     inline void assertStackAlignment(uint32_t alignment, int32_t offset = 0);
+
+    void performPendingReadBarriers();
+
+  private:
+    // Methods to get a singleton object or object group from a type set without
+    // a read barrier, and record the result so that we can perform the barrier
+    // later.
+    JSObject* getSingletonAndDelayBarrier(const TypeSet* types, size_t i);
+    ObjectGroup* getGroupAndDelayBarrier(const TypeSet* types, size_t i);
+
+    Vector<JSObject*, 0, SystemAllocPolicy> pendingObjectReadBarriers_;
+    Vector<ObjectGroup*, 0, SystemAllocPolicy> pendingObjectGroupReadBarriers_;
 };
 
 //{{{ check_macroassembler_style
 inline uint32_t
 MacroAssembler::framePushed() const
 {
     return framePushed_;
 }
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -572,17 +572,17 @@ CodeGeneratorX86Shared::bailoutIf(Assemb
 {
     MOZ_ASSERT(Assembler::NaNCondFromDoubleCondition(condition) == Assembler::NaN_HandledByCond);
     bailoutIf(Assembler::ConditionFromDoubleCondition(condition), snapshot);
 }
 
 void
 CodeGeneratorX86Shared::bailoutFrom(Label* label, LSnapshot* snapshot)
 {
-    MOZ_ASSERT(label->used() && !label->bound());
+    MOZ_ASSERT_IF(!masm.oom(), label->used() && !label->bound());
     bailout(BailoutLabel(label), snapshot);
 }
 
 void
 CodeGeneratorX86Shared::bailout(LSnapshot* snapshot)
 {
     Label label;
     masm.jump(&label);
--- a/js/src/vm/TypeInference-inl.h
+++ b/js/src/vm/TypeInference-inl.h
@@ -1070,16 +1070,28 @@ TypeSet::getSingletonNoBarrier(unsigned 
 
 inline ObjectGroup*
 TypeSet::getGroupNoBarrier(unsigned i) const
 {
     ObjectKey* key = getObject(i);
     return (key && key->isGroup()) ? key->groupNoBarrier() : nullptr;
 }
 
+inline bool
+TypeSet::hasGroup(unsigned i) const
+{
+    return getGroupNoBarrier(i);
+}
+
+inline bool
+TypeSet::hasSingleton(unsigned i) const
+{
+    return getSingletonNoBarrier(i);
+}
+
 inline const Class*
 TypeSet::getObjectClass(unsigned i) const
 {
     if (JSObject* object = getSingleton(i))
         return object->getClass();
     if (ObjectGroup* group = getGroup(i))
         return group->clasp();
     return nullptr;
--- a/js/src/vm/TypeInference.h
+++ b/js/src/vm/TypeInference.h
@@ -446,19 +446,30 @@ class TypeSet
     template <class TypeListT> bool enumerateTypes(TypeListT* list) const;
 
     /*
      * Iterate through the objects in this set. getObjectCount overapproximates
      * in the hash case (see SET_ARRAY_SIZE in TypeInference-inl.h), and
      * getObject may return nullptr.
      */
     inline unsigned getObjectCount() const;
+    inline bool hasGroup(unsigned i) const;
+    inline bool hasSingleton(unsigned i) const;
     inline ObjectKey* getObject(unsigned i) const;
     inline JSObject* getSingleton(unsigned i) const;
     inline ObjectGroup* getGroup(unsigned i) const;
+
+    /*
+     * Get the objects in the set without triggering barriers. This is
+     * potentially unsafe and you should only call these methods if you know
+     * what you're doing. For JIT compilation, use the following methods
+     * instead:
+     *  - MacroAssembler::getSingletonAndDelayBarrier()
+     *  - MacroAssembler::getGroupAndDelayBarrier()
+     */
     inline JSObject* getSingletonNoBarrier(unsigned i) const;
     inline ObjectGroup* getGroupNoBarrier(unsigned i) const;
 
     /* The Class of an object in this set. */
     inline const Class* getObjectClass(unsigned i) const;
 
     bool canSetDefinite(unsigned slot) {
         // Note: the cast is required to work around an MSVC issue.