Bug 1522075 part 3 - Add MacroAssembler::guardedCallPreBarrierAnyZone for use in trampolines. r=djvj
authorJan de Mooij <jdemooij@mozilla.com>
Thu, 24 Jan 2019 17:36:12 +0000
changeset 515316 eff43158adb80f26363c448b52f5d37dbfae7575
parent 515315 59e188c6e83500abb433777237af9ba231901445
child 515317 ced24b663e6593c79917f8af47256e2ea9ca83d1
push id1953
push userffxbld-merge
push dateMon, 11 Mar 2019 12:10:20 +0000
treeherdermozilla-release@9c35dcbaa899 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdjvj
bugs1522075
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1522075 part 3 - Add MacroAssembler::guardedCallPreBarrierAnyZone for use in trampolines. r=djvj When generating the interpreter, we assert in guardedCallPreBarrier because JitContext::realm is nullptr. This adds guardedCallPreBarrierAnyZone for that use case: it loads cx->zone dynamically instead of baking it in. Differential Revision: https://phabricator.services.mozilla.com/D17367
js/src/gc/Zone.h
js/src/jit/BaselineCompiler.cpp
js/src/jit/CompileWrappers.cpp
js/src/jit/CompileWrappers.h
js/src/jit/MacroAssembler-inl.h
js/src/jit/MacroAssembler.h
js/src/vm/JSContext.h
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -266,16 +266,20 @@ class Zone : public JS::shadow::Zone,
   // possibly at other times too.
   uint64_t gcNumber();
 
   void setNeedsIncrementalBarrier(bool needs);
   const uint32_t* addressOfNeedsIncrementalBarrier() const {
     return &needsIncrementalBarrier_;
   }
 
+  static constexpr size_t offsetOfNeedsIncrementalBarrier() {
+    return offsetof(Zone, needsIncrementalBarrier_);
+  }
+
   js::jit::JitZone* getJitZone(JSContext* cx) {
     return jitZone_ ? jitZone_ : createJitZone(cx);
   }
   js::jit::JitZone* jitZone() { return jitZone_; }
 
   bool isAtomsZone() const { return runtimeFromAnyThread()->isAtomsZone(this); }
   bool isSelfHostingZone() const {
     return runtimeFromAnyThread()->isSelfHostingZone(this);
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -3455,24 +3455,23 @@ bool BaselineCompilerCodeGen::emitFormal
   masm.loadPrivate(Address(reg, ArgumentsObject::getDataSlotOffset()), reg);
 
   // Load/store the argument.
   Address argAddr(reg, ArgumentsData::offsetOfArgs() + arg * sizeof(Value));
   if (op == JSOP_GETARG) {
     masm.loadValue(argAddr, R0);
     frame.push(R0);
   } else {
-    masm.guardedCallPreBarrier(argAddr, MIRType::Value);
+    Register temp = R1.scratchReg();
+    masm.guardedCallPreBarrierAnyZone(argAddr, MIRType::Value, temp);
     masm.loadValue(frame.addressOfStackValue(-1), R0);
     masm.storeValue(R0, argAddr);
 
     MOZ_ASSERT(frame.numUnsyncedSlots() == 0);
 
-    Register temp = R1.scratchReg();
-
     // Reload the arguments object
     Register reg = R2.scratchReg();
     masm.loadPtr(
         Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfArgsObj()),
         reg);
 
     Label skipBarrier;
 
@@ -4917,22 +4916,22 @@ bool BaselineCodeGen<Handler>::emit_JSOP
   Register genObj = R2.scratchReg();
   masm.unboxObject(frame.addressOfStackValue(-1), genObj);
 
   MOZ_ASSERT_IF(handler.maybePC(), GET_RESUMEINDEX(handler.maybePC()) == 0);
   masm.storeValue(Int32Value(0),
                   Address(genObj, GeneratorObject::offsetOfResumeIndexSlot()));
 
   Register envObj = R0.scratchReg();
+  Register temp = R1.scratchReg();
   Address envChainSlot(genObj, GeneratorObject::offsetOfEnvironmentChainSlot());
   masm.loadPtr(frame.addressOfEnvironmentChain(), envObj);
-  masm.guardedCallPreBarrier(envChainSlot, MIRType::Value);
+  masm.guardedCallPreBarrierAnyZone(envChainSlot, MIRType::Value, temp);
   masm.storeValue(JSVAL_TYPE_OBJECT, envObj, envChainSlot);
 
-  Register temp = R1.scratchReg();
   Label skipBarrier;
   masm.branchPtrInNurseryChunk(Assembler::Equal, genObj, temp, &skipBarrier);
   masm.branchPtrInNurseryChunk(Assembler::NotEqual, envObj, temp, &skipBarrier);
   masm.push(genObj);
   MOZ_ASSERT(genObj == R2.scratchReg());
   masm.call(&postBarrierSlot_);
   masm.pop(genObj);
   masm.bind(&skipBarrier);
@@ -5396,21 +5395,21 @@ bool BaselineCodeGen<Handler>::emit_JSOP
   // Load HomeObject in R0.
   frame.popRegsAndSync(1);
 
   // Load function off stack
   Register func = R2.scratchReg();
   masm.unboxObject(frame.addressOfStackValue(-1), func);
 
   // Set HOMEOBJECT_SLOT
+  Register temp = R1.scratchReg();
   Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
-  masm.guardedCallPreBarrier(addr, MIRType::Value);
+  masm.guardedCallPreBarrierAnyZone(addr, MIRType::Value, temp);
   masm.storeValue(R0, addr);
 
-  Register temp = R1.scratchReg();
   Label skipBarrier;
   masm.branchPtrInNurseryChunk(Assembler::Equal, func, temp, &skipBarrier);
   masm.branchValueIsNurseryObject(Assembler::NotEqual, R0, temp, &skipBarrier);
   masm.call(&postBarrierSlot_);
   masm.bind(&skipBarrier);
 
   return true;
 }
--- a/js/src/jit/CompileWrappers.cpp
+++ b/js/src/jit/CompileWrappers.cpp
@@ -76,16 +76,20 @@ uint32_t* CompileRuntime::addressOfTenur
 const void* CompileRuntime::addressOfJitStackLimit() {
   return runtime()->mainContextFromAnyThread()->addressOfJitStackLimit();
 }
 
 const void* CompileRuntime::addressOfInterruptBits() {
   return runtime()->mainContextFromAnyThread()->addressOfInterruptBits();
 }
 
+const void* CompileRuntime::addressOfZone() {
+  return runtime()->mainContextFromAnyThread()->addressOfZone();
+}
+
 #ifdef DEBUG
 bool CompileRuntime::isInsideNursery(gc::Cell* cell) {
   return UninlinedIsInsideNursery(cell);
 }
 #endif
 
 const DOMCallbacks* CompileRuntime::DOMcallbacks() {
   return runtime()->DOMcallbacks;
--- a/js/src/jit/CompileWrappers.h
+++ b/js/src/jit/CompileWrappers.h
@@ -45,16 +45,17 @@ class CompileRuntime {
   const Value& NaNValue();
   const Value& positiveInfinityValue();
   const WellKnownSymbols& wellKnownSymbols();
 
   const void* mainContextPtr();
   uint32_t* addressOfTenuredAllocCount();
   const void* addressOfJitStackLimit();
   const void* addressOfInterruptBits();
+  const void* addressOfZone();
 
 #ifdef DEBUG
   bool isInsideNursery(gc::Cell* cell);
 #endif
 
   // DOM callbacks must be threadsafe (and will hopefully be removed soon).
   const DOMCallbacks* DOMcallbacks();
 
--- a/js/src/jit/MacroAssembler-inl.h
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -602,16 +602,31 @@ void MacroAssembler::branchTestProxyHand
 void MacroAssembler::branchTestNeedsIncrementalBarrier(Condition cond,
                                                        Label* label) {
   MOZ_ASSERT(cond == Zero || cond == NonZero);
   CompileZone* zone = GetJitContext()->realm->zone();
   const uint32_t* needsBarrierAddr = zone->addressOfNeedsIncrementalBarrier();
   branchTest32(cond, AbsoluteAddress(needsBarrierAddr), Imm32(0x1), label);
 }
 
+void MacroAssembler::branchTestNeedsIncrementalBarrierAnyZone(
+    Condition cond, Label* label, Register scratch) {
+  MOZ_ASSERT(cond == Zero || cond == NonZero);
+  if (GetJitContext()->realm) {
+    branchTestNeedsIncrementalBarrier(cond, label);
+  } else {
+    // We are compiling the interpreter or another runtime-wide trampoline, so
+    // we have to load cx->zone.
+    loadPtr(AbsoluteAddress(GetJitContext()->runtime->addressOfZone()),
+            scratch);
+    Address needsBarrierAddr(scratch, Zone::offsetOfNeedsIncrementalBarrier());
+    branchTest32(cond, needsBarrierAddr, Imm32(0x1), label);
+  }
+}
+
 void MacroAssembler::branchTestMagicValue(Condition cond,
                                           const ValueOperand& val,
                                           JSWhyMagic why, Label* label) {
   MOZ_ASSERT(cond == Equal || cond == NotEqual);
   branchTestValue(cond, val, MagicValue(why), label);
 }
 
 void MacroAssembler::branchDoubleNotInInt64Range(Address src, Register temp,
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1396,16 +1396,19 @@ class MacroAssembler : public MacroAssem
   void loadTypedObjectLength(Register obj, Register dest);
 
   // Emit type case branch on tag matching if the type tag in the definition
   // might actually be that type.
   void maybeBranchTestType(MIRType type, MDefinition* maybeDef, Register tag,
                            Label* label);
 
   inline void branchTestNeedsIncrementalBarrier(Condition cond, Label* label);
+  inline void branchTestNeedsIncrementalBarrierAnyZone(Condition cond,
+                                                       Label* label,
+                                                       Register scratch);
 
   // Perform a type-test on a tag of a Value (32bits boxing), or the tagged
   // value (64bits boxing).
   inline void branchTestUndefined(Condition cond, Register tag,
                                   Label* label) PER_SHARED_ARCH;
   inline void branchTestInt32(Condition cond, Register tag,
                               Label* label) PER_SHARED_ARCH;
   inline void branchTestDouble(Condition cond, Register tag, Label* label)
@@ -2587,37 +2590,55 @@ class MacroAssembler : public MacroAssem
     }
 #else
 #  error "Bad architecture"
 #endif
   }
 
   inline void storeCallResultValue(TypedOrValueRegister dest);
 
+ private:
   template <typename T>
-  void guardedCallPreBarrier(const T& address, MIRType type) {
+  void unguardedCallPreBarrier(const T& address, MIRType type) {
     Label done;
-
-    branchTestNeedsIncrementalBarrier(Assembler::Zero, &done);
-
     if (type == MIRType::Value) {
       branchTestGCThing(Assembler::NotEqual, address, &done);
     } else if (type == MIRType::Object || type == MIRType::String) {
       branchPtr(Assembler::Equal, address, ImmWord(0), &done);
     }
 
     Push(PreBarrierReg);
     computeEffectiveAddress(address, PreBarrierReg);
 
     const JitRuntime* rt = GetJitContext()->runtime->jitRuntime();
     TrampolinePtr preBarrier = rt->preBarrier(type);
 
     call(preBarrier);
     Pop(PreBarrierReg);
-
+    bind(&done);
+  }
+
+ public:
+  template <typename T>
+  void guardedCallPreBarrier(const T& address, MIRType type) {
+    Label done;
+    branchTestNeedsIncrementalBarrier(Assembler::Zero, &done);
+    unguardedCallPreBarrier(address, type);
+    bind(&done);
+  }
+
+  // Like guardedCallPreBarrier, but unlike guardedCallPreBarrier this can be
+  // called from runtime-wide trampolines because it loads cx->zone (instead of
+  // baking in the current Zone) if JitContext::realm is nullptr.
+  template <typename T>
+  void guardedCallPreBarrierAnyZone(const T& address, MIRType type,
+                                    Register scratch) {
+    Label done;
+    branchTestNeedsIncrementalBarrierAnyZone(Assembler::Zero, &done, scratch);
+    unguardedCallPreBarrier(address, type);
     bind(&done);
   }
 
   template <typename T>
   void loadFromTypedArray(Scalar::Type arrayType, const T& src,
                           AnyRegister dest, Register temp, Label* fail,
                           bool canonicalizeDoubles = true);
 
--- a/js/src/vm/JSContext.h
+++ b/js/src/vm/JSContext.h
@@ -838,16 +838,17 @@ struct JSContext : public JS::RootingCon
   }
 
  public:
   void* addressOfInterruptBits() { return &interruptBits_; }
   void* addressOfJitStackLimit() { return &jitStackLimit; }
   void* addressOfJitStackLimitNoInterrupt() {
     return &jitStackLimitNoInterrupt;
   }
+  void* addressOfZone() { return &zone_; }
 
   // Futex state, used by Atomics.wait() and Atomics.wake() on the Atomics
   // object.
   js::FutexThread fx;
 
   // Buffer for OSR from baseline to Ion. To avoid holding on to this for
   // too long, it's also freed in EnterBaseline (after returning from JIT code).
   js::ThreadData<uint8_t*> osrTempData_;