Bug 932627 - Create VMFunctions wrappers under CodeGenerator::link. r=h4writer
☠☠ backed out by 474be0bab26b ☠ ☠
authorNicolas Pierron <nicolas.b.pierron@mozilla.com>
Fri, 29 Nov 2013 07:59:42 -0800
changeset 172783 f76604a4f0259e4ce4539853d3936777b80b4bf1
parent 172782 3ed81454baf97bfcb2b9551f3b146598deedd43a
child 172784 4231aceecfe0d2758798fbbd831cb4973c2fd407
push id3224
push userlsblakk@mozilla.com
push dateTue, 04 Feb 2014 01:06:49 +0000
treeherdermozilla-beta@60c04d0987f1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersh4writer
bugs932627
milestone28.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 932627 - Create VMFunctions wrappers under CodeGenerator::link. r=h4writer ---
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineIC.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/CodeGenerator.h
js/src/jit/Ion.cpp
js/src/jit/IonCode.h
js/src/jit/JitCompartment.h
js/src/jit/VMFunctions.cpp
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/arm/Trampoline-arm.cpp
js/src/jit/shared/BaselineCompiler-shared.cpp
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/CodeGenerator-shared.h
js/src/jit/shared/CodeGenerator-x86-shared.cpp
js/src/jit/x64/Assembler-x64.cpp
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/MacroAssembler-x64.h
js/src/jit/x64/Trampoline-x64.cpp
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/MacroAssembler-x86.h
js/src/jit/x86/Trampoline-x86.cpp
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -684,16 +684,18 @@ BaselineCompiler::emitDebugTrap()
 {
     JS_ASSERT(debugMode_);
     JS_ASSERT(frame.numUnsyncedSlots() == 0);
 
     bool enabled = script->stepModeEnabled() || script->hasBreakpointsAt(pc);
 
     // Emit patchable call to debug trap handler.
     IonCode *handler = cx->runtime()->jitRuntime()->debugTrapHandler(cx);
+    if (!handler)
+        return false;
     mozilla::DebugOnly<CodeOffsetLabel> offset = masm.toggledCall(handler, enabled);
 
 #ifdef DEBUG
     // Patchable call offset has to match the pc mapping offset.
     PCMappingEntry &entry = pcMappingEntries_[pcMappingEntries_.length() - 1];
     JS_ASSERT((&offset)->offset() == entry.nativeOffset);
 #endif
 
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -610,40 +610,40 @@ ICStubCompiler::getStubCode()
 #endif
 
     return newStubCode;
 }
 
 bool
 ICStubCompiler::tailCallVM(const VMFunction &fun, MacroAssembler &masm)
 {
-    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(fun);
+    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(cx, fun);
     if (!code)
         return false;
 
     uint32_t argSize = fun.explicitStackSlots() * sizeof(void *);
     EmitTailCallVM(code, masm, argSize);
     return true;
 }
 
 bool
 ICStubCompiler::callVM(const VMFunction &fun, MacroAssembler &masm)
 {
-    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(fun);
+    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(cx, fun);
     if (!code)
         return false;
 
     EmitCallVM(code, masm);
     return true;
 }
 
 bool
 ICStubCompiler::callTypeUpdateIC(MacroAssembler &masm, uint32_t objectOffset)
 {
-    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(DoTypeUpdateFallbackInfo);
+    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(cx, DoTypeUpdateFallbackInfo);
     if (!code)
         return false;
 
     EmitCallTypeUpdateIC(masm, code, objectOffset);
     return true;
 }
 
 void
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -5804,16 +5804,36 @@ CodeGenerator::generate()
     perfSpewer_.noteEndInlineCode(masm);
 #endif
     if (!generateOutOfLineCode())
         return false;
 
     return !masm.oom();
 }
 
+
+bool
+CodeGenerator::initUsedVMWrappers(JSContext *cx)
+{
+    // VM Wrappers are shared stubs created in the atoms compartment.
+    AutoLockForExclusiveAccess atomsLock(cx);
+    JSRuntime::AutoLockForOperationCallback lock(cx->runtime());
+    AutoCompartment ac(cx, cx->runtime()->atomsCompartment());
+
+    JitRuntime *rt = cx->runtime()->jitRuntime();
+    LinkVMWrapper *end = patchableVMCalls_.end();
+    for (LinkVMWrapper *it = patchableVMCalls_.begin(); it != end; it++) {
+        IonCode *wrapper = rt->getVMWrapper(cx, it->fun, atomsLock);
+        if (!wrapper)
+            return false;
+    }
+
+    return true;
+}
+
 bool
 CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
 {
     RootedScript script(cx, gen->info().script());
     ExecutionMode executionMode = gen->info().executionMode();
     JS_ASSERT(!HasIonScript(script, executionMode));
 
     // Check to make sure we didn't have a mid-build invalidation. If so, we
@@ -5841,16 +5861,23 @@ CodeGenerator::link(JSContext *cx, types
                      bailouts_.length(), graph.numConstants(),
                      safepointIndices_.length(), osiIndices_.length(),
                      cacheList_.length(), runtimeData_.length(),
                      safepoints_.size(), callTargets.length(),
                      patchableBackedges_.length());
     if (!ionScript)
         return false;
 
+    if (!initUsedVMWrappers(cx)) {
+        // Use js_free instead of IonScript::Destroy: the cache list and
+        // backedge list are still uninitialized.
+        js_free(ionScript);
+        return false;
+    }
+
     // Lock the runtime against operation callbacks during the link.
     // We don't want an operation callback to protect the code for the script
     // before it has been filled in, as we could segv before the runtime's
     // patchable backedges have been fully updated.
     JSRuntime::AutoLockForOperationCallback lock(cx->runtime());
 
     // Make sure we don't segv while filling in the code, to avoid deadlocking
     // inside the signal handler.
@@ -5927,16 +5954,21 @@ CodeGenerator::link(JSContext *cx, types
         ionScript->copySnapshots(&snapshots_);
     if (graph.numConstants())
         ionScript->copyConstants(graph.constantPool());
     if (callTargets.length() > 0)
         ionScript->copyCallTargetEntries(callTargets.begin());
     if (patchableBackedges_.length() > 0)
         ionScript->copyPatchableBackedges(cx, code, patchableBackedges_.begin());
 
+    // Link VM wrappers needed by the current Ion Script.
+    if (patchableVMCalls_.length() > 0)
+        ionScript->patchVMCalls(cx, code, patchableVMCalls_.begin(),
+                                patchableVMCalls_.length(), masm);
+
     switch (executionMode) {
       case SequentialExecution:
         // The correct state for prebarriers is unknown until the end of compilation,
         // since a GC can occur during code generation. All barriers are emitted
         // off-by-default, and are toggled on here if necessary.
         if (cx->zone()->needsBarrier())
             ionScript->toggleBarriers(true);
         break;
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -428,14 +428,23 @@ class CodeGenerator : public CodeGenerat
     bool emitAssertRangeD(const Range *r, FloatRegister input, FloatRegister temp);
 
     // Script counts created when compiling code with no associated JSScript.
     IonScriptCounts *unassociatedScriptCounts_;
 
 #if defined(JS_ION_PERF)
     PerfSpewer perfSpewer_;
 #endif
+
+    // callVM function might not be able to produce a call to the corresponding
+    // VM function as the code generation is working concurrently on a separated
+    // thread. When the code is linked, we lock the main thread, and use
+    // initUsedVMWrappers to compile the VM wrappers of the VMFunctions which
+    // were not yet compiled. The list |patchableVMCalls| contains the call-site
+    // and their corresponding VMFunctions if it was not already compiled at the
+    // time of the generation of code.
+    bool initUsedVMWrappers(JSContext *cx);
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_CodeGenerator_h */
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -284,34 +284,33 @@ JitRuntime::initialize(JSContext *cx)
     if (!valuePreBarrier_)
         return false;
 
     IonSpew(IonSpew_Codegen, "# Emitting Pre Barrier for Shape");
     shapePreBarrier_ = generatePreBarrier(cx, MIRType_Shape);
     if (!shapePreBarrier_)
         return false;
 
-    IonSpew(IonSpew_Codegen, "# Emitting VM function wrappers");
-    for (VMFunction *fun = VMFunction::functions; fun; fun = fun->next) {
-        if (!generateVMWrapper(cx, *fun))
-            return false;
-    }
+    IonSpew(IonSpew_Codegen, "# Emitting Unreachable Trap");
+    unreachableTrap_ = generateUnreachableTrap(cx);
+    if (!unreachableTrap_)
+        return false;
 
     return true;
 }
 
 IonCode *
 JitRuntime::debugTrapHandler(JSContext *cx)
 {
     if (!debugTrapHandler_) {
         // JitRuntime code stubs are shared across compartments and have to
         // be allocated in the atoms compartment.
         AutoLockForExclusiveAccess lock(cx);
         AutoCompartment ac(cx, cx->runtime()->atomsCompartment());
-        debugTrapHandler_ = generateDebugTrapHandler(cx);
+        debugTrapHandler_ = generateDebugTrapHandler(cx, lock);
     }
     return debugTrapHandler_;
 }
 
 uint8_t *
 JitRuntime::allocateOsrTempData(size_t size)
 {
     osrTempData_ = (uint8_t *)js_realloc(osrTempData_, size);
@@ -591,24 +590,73 @@ JitCompartment::sweep(FreeOp *fop)
 IonCode *
 JitRuntime::getBailoutTable(const FrameSizeClass &frameClass) const
 {
     JS_ASSERT(frameClass != FrameSizeClass::None());
     return bailoutTables_[frameClass.classId()];
 }
 
 IonCode *
-JitRuntime::getVMWrapper(const VMFunction &f) const
+JitRuntime::maybeGetVMWrapper(const VMFunction &f) const
 {
     JS_ASSERT(functionWrappers_);
     JS_ASSERT(functionWrappers_->initialized());
     JitRuntime::VMWrapperMap::Ptr p = functionWrappers_->readonlyThreadsafeLookup(&f);
-    JS_ASSERT(p);
-
-    return p->value;
+    return p ? p->value : unreachableTrap();
+}
+
+IonCode *
+JitRuntime::getVMWrapper(JSContext *cx, const VMFunction &f)
+{
+    JS_ASSERT(functionWrappers_);
+    JS_ASSERT(functionWrappers_->initialized());
+    VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+    if (p)
+        return p->value;
+
+    AutoLockForExclusiveAccess atomsLock(cx);
+    AutoCompartment ac(cx, cx->atomsCompartment());
+    IonContext ictx(cx, nullptr);
+
+    IonCode *wrapper = generateVMWrapper(cx, f);
+    if (!wrapper) {
+        js_ReportOutOfMemory(cx);
+        return nullptr;
+    }
+
+    // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
+    // use relookupOrAdd instead of add.
+    if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+        return nullptr;
+
+    return wrapper;
+}
+
+IonCode *
+JitRuntime::getVMWrapper(JSContext *cx, const VMFunction &f,
+                         AutoLockForExclusiveAccess &atomsLock)
+{
+    JS_ASSERT(functionWrappers_);
+    JS_ASSERT(functionWrappers_->initialized());
+    VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+    if (p)
+        return p->value;
+
+    IonCode *wrapper = generateVMWrapper(cx, f);
+    if (!wrapper) {
+        js_ReportOutOfMemory(cx);
+        return nullptr;
+    }
+
+    // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
+    // use relookupOrAdd instead of add.
+    if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+        return nullptr;
+
+    return wrapper;
 }
 
 template <AllowGC allowGC>
 IonCode *
 IonCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool)
 {
     IonCode *codeObj = gc::NewGCThing<IonCode, allowGC>(cx, gc::FINALIZE_IONCODE, sizeof(IonCode), gc::DefaultHeap);
     if (!codeObj) {
@@ -964,16 +1012,31 @@ IonScript::copyCacheEntries(const uint32
 
     // Jumps in the caches reflect the offset of those jumps in the compiled
     // code, not the absolute positions of the jumps. Update according to the
     // final code address now.
     for (size_t i = 0; i < numCaches(); i++)
         getCacheFromIndex(i).updateBaseAddress(method_, masm);
 }
 
+void
+IonScript::patchVMCalls(JSContext *cx, IonCode *code, LinkVMWrapper *it, size_t length,
+                        MacroAssembler &masm)
+{
+    JitRuntime *rt = cx->runtime()->jitRuntime();
+    LinkVMWrapper *end = it + length;
+    for (; it != end; it++) {
+        it->offset.fixup(&masm);
+        // VM Wrappers should be compiled before linking them.
+        IonCode *wrapper = rt->maybeGetVMWrapper(it->fun);
+        JS_ASSERT(wrapper != rt->unreachableTrap());
+        it->patchCall(masm, code, rt->unreachableTrap(), wrapper);
+    }
+}
+
 const SafepointIndex *
 IonScript::getSafepointIndex(uint32_t disp) const
 {
     JS_ASSERT(safepointIndexEntries_ > 0);
 
     const SafepointIndex *table = safepointIndices();
     if (safepointIndexEntries_ == 1) {
         JS_ASSERT(disp == table[0].displacement());
--- a/js/src/jit/IonCode.h
+++ b/js/src/jit/IonCode.h
@@ -140,16 +140,17 @@ class IonCode : public gc::BarrieredCell
 
 class SnapshotWriter;
 class SafepointWriter;
 class SafepointIndex;
 class OsiIndex;
 class IonCache;
 struct PatchableBackedgeInfo;
 struct CacheLocation;
+struct LinkVMWrapper;
 
 // Describes a single AsmJSModule which jumps (via an FFI exit with the given
 // index) directly into an IonScript.
 struct DependentAsmJSModuleExit
 {
     const AsmJSModule *module;
     size_t exitIndex;
 
@@ -499,16 +500,18 @@ struct IonScript
     void copySafepointIndices(const SafepointIndex *firstSafepointIndex, MacroAssembler &masm);
     void copyOsiIndices(const OsiIndex *firstOsiIndex, MacroAssembler &masm);
     void copyRuntimeData(const uint8_t *data);
     void copyCacheEntries(const uint32_t *caches, MacroAssembler &masm);
     void copySafepoints(const SafepointWriter *writer);
     void copyCallTargetEntries(JSScript **callTargets);
     void copyPatchableBackedges(JSContext *cx, IonCode *code,
                                 PatchableBackedgeInfo *backedges);
+    void patchVMCalls(JSContext *cx, IonCode *code, LinkVMWrapper *begin, size_t length,
+                      MacroAssembler &masm);
 
     bool invalidated() const {
         return refcount_ != 0;
     }
     size_t refcount() const {
         return refcount_;
     }
     void incref() {
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -16,16 +16,20 @@
 #include "jit/CompileInfo.h"
 #include "jit/IonCode.h"
 #include "jit/IonFrames.h"
 #include "jit/shared/Assembler-shared.h"
 #include "js/Value.h"
 #include "vm/Stack.h"
 
 namespace js {
+
+// defined in jscntxtinlines.h
+class AutoLockForExclusiveAccess;
+
 namespace jit {
 
 class FrameSizeClass;
 
 enum EnterJitType {
     EnterJitBaseline = 0,
     EnterJitOptimized = 1
 };
@@ -206,27 +210,32 @@ class JitRuntime
     // Whether all Ion code in the runtime is protected, and will fault if it
     // is accessed.
     bool ionCodeProtected_;
 
     // If signal handlers are installed, this contains all loop backedges for
     // IonScripts in the runtime.
     InlineList<PatchableBackedge> backedgeList_;
 
+    // Thunk that is not supposed to be called and crashes the application if it
+    // is ever called.
+    IonCode *unreachableTrap_;
+
   private:
     IonCode *generateExceptionTailStub(JSContext *cx);
     IonCode *generateBailoutTailStub(JSContext *cx);
     IonCode *generateEnterJIT(JSContext *cx, EnterJitType type);
     IonCode *generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void **returnAddrOut);
     IonCode *generateBailoutTable(JSContext *cx, uint32_t frameClass);
     IonCode *generateBailoutHandler(JSContext *cx);
     IonCode *generateInvalidator(JSContext *cx);
     IonCode *generatePreBarrier(JSContext *cx, MIRType type);
-    IonCode *generateDebugTrapHandler(JSContext *cx);
+    IonCode *generateDebugTrapHandler(JSContext *cx, AutoLockForExclusiveAccess &atomsLock);
     IonCode *generateVMWrapper(JSContext *cx, const VMFunction &f);
+    IonCode *generateUnreachableTrap(JSContext *cx);
 
     JSC::ExecutableAllocator *createIonAlloc(JSContext *cx);
 
   public:
     JitRuntime();
     ~JitRuntime();
     bool initialize(JSContext *cx);
 
@@ -270,17 +279,22 @@ class JitRuntime
     };
 
     void ensureIonCodeProtected(JSRuntime *rt);
     void ensureIonCodeAccessible(JSRuntime *rt);
     void patchIonBackedges(JSRuntime *rt, BackedgeTarget target);
 
     bool handleAccessViolation(JSRuntime *rt, void *faultingAddress);
 
-    IonCode *getVMWrapper(const VMFunction &f) const;
+    // This bad code pointer is set to be on the first page, which is not
+    // mapped, and thus not executable.
+    IonCode *maybeGetVMWrapper(const VMFunction &f) const;
+    IonCode *getVMWrapper(JSContext *cx, const VMFunction &f);
+    IonCode *getVMWrapper(JSContext *cx, const VMFunction &f,
+                          AutoLockForExclusiveAccess &atomsLock);
     IonCode *debugTrapHandler(JSContext *cx);
 
     IonCode *getGenericBailoutHandler() const {
         return bailoutHandler_;
     }
 
     IonCode *getExceptionTail() const {
         return exceptionTail_;
@@ -318,16 +332,20 @@ class JitRuntime
 
     IonCode *valuePreBarrier() const {
         return valuePreBarrier_;
     }
 
     IonCode *shapePreBarrier() const {
         return shapePreBarrier_;
     }
+
+    IonCode *unreachableTrap() const {
+        return unreachableTrap_;
+    }
 };
 
 class JitCompartment
 {
     friend class JitActivation;
 
     // Ion state for the compartment's runtime.
     JitRuntime *rt;
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -917,11 +917,10 @@ InitBaselineFrameForOsr(BaselineFrame *f
 }
 
 JSObject *CreateDerivedTypedObj(JSContext *cx, HandleObject type,
                                 HandleObject owner, int32_t offset)
 {
     return TypedObject::createDerived(cx, type, owner, offset);
 }
 
-
 } // namespace jit
 } // namespace js
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -71,16 +71,24 @@ CodeGeneratorARM::generateEpilogue()
         JS_ASSERT(masm.framePushed() == 0);
         masm.ma_pop(pc);
     }
     masm.dumpPool();
     return true;
 }
 
 void
+LinkVMWrapper::patchCall(MacroAssembler &masm, IonCode *code,
+                         IonCode *trap, IonCode *wrapper) const
+{
+    CodeLocationLabel vmcall(code, offset);
+    masm.patchDataWithValueCheck(vmcall, ImmPtr(wrapper->raw()), ImmPtr(trap->raw()));
+}
+
+void
 CodeGeneratorARM::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue, MBasicBlock *mirFalse)
 {
     if (isNextBlock(mirFalse->lir())) {
         jumpToBlock(mirTrue, cond);
     } else {
         jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
         jumpToBlock(mirTrue);
     }
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -1776,16 +1776,23 @@ MacroAssemblerARMCompat::callWithExitFra
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
     ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, rs);
     ma_callIonHalfPush(ScratchRegister);
 }
 
+MacroAssemblerARMCompat::CodeOffsetCall
+MacroAssemblerARMCompat::lastPatchableCall(uint32_t callOffset)
+{
+    JS_ASSERT(jumps_.back().offset.getOffset() < callOffset);
+    return CodeOffsetCall(jumps_.back().offset.getOffset());
+}
+
 void
 MacroAssemblerARMCompat::callIon(const Register &callee)
 {
     JS_ASSERT((framePushed() & 3) == 0);
     if ((framePushed() & 7) == 4) {
         ma_callIonHalfPush(callee);
     } else {
         adjustFrame(sizeof(void*));
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -1207,16 +1207,19 @@ class MacroAssemblerARMCompat : public M
 
     // Builds an exit frame on the stack, with a return address to an internal
     // non-function. Returns offset to be passed to markSafepointAt().
     bool buildFakeExitFrame(const Register &scratch, uint32_t *offset);
 
     void callWithExitFrame(IonCode *target);
     void callWithExitFrame(IonCode *target, Register dynStack);
 
+    typedef CodeOffsetLabel CodeOffsetCall;
+    CodeOffsetCall lastPatchableCall(uint32_t callOffset);
+
     // Makes an Ion call using the only two methods that it is sane for
     // indep code to make a call
     void callIon(const Register &callee);
 
     void reserveStack(uint32_t amount);
     void freeStack(uint32_t amount);
     void freeStack(Register amount);
 
--- a/js/src/jit/arm/Trampoline-arm.cpp
+++ b/js/src/jit/arm/Trampoline-arm.cpp
@@ -651,22 +651,16 @@ JitRuntime::generateBailoutHandler(JSCon
     return code;
 }
 
 IonCode *
 JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
 {
     typedef MoveResolver::MoveOperand MoveOperand;
 
-    JS_ASSERT(functionWrappers_);
-    JS_ASSERT(functionWrappers_->initialized());
-    VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
-    if (p)
-        return p->value;
-
     // Generate a separated code for the wrapper.
     MacroAssembler masm(cx);
     GeneralRegisterSet regs = GeneralRegisterSet(Register::Codes::WrapperMask);
 
     // Wrapper register set is a superset of Volatile register set.
     JS_STATIC_ASSERT((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0);
 
     // The context is the first argument; r0 is the first argument register.
@@ -814,21 +808,16 @@ JitRuntime::generateVMWrapper(JSContext 
                     f.explicitStackSlots() * sizeof(void *) +
                     f.extraValuesToPop * sizeof(Value)));
 
     Linker linker(masm);
     IonCode *wrapper = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
     if (!wrapper)
         return nullptr;
 
-    // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
-    // use relookupOrAdd instead of add.
-    if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
-        return nullptr;
-
 #ifdef JS_ION_PERF
     writePerfSpewerIonCodeProfile(wrapper, "VMWrapper");
 #endif
 
     return wrapper;
 }
 
 IonCode *
@@ -871,34 +860,34 @@ JitRuntime::generatePreBarrier(JSContext
 
     return code;
 }
 
 typedef bool (*HandleDebugTrapFn)(JSContext *, BaselineFrame *, uint8_t *, bool *);
 static const VMFunction HandleDebugTrapInfo = FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap);
 
 IonCode *
-JitRuntime::generateDebugTrapHandler(JSContext *cx)
+JitRuntime::generateDebugTrapHandler(JSContext *cx, AutoLockForExclusiveAccess &atomsLock)
 {
     MacroAssembler masm;
 
     Register scratch1 = r0;
     Register scratch2 = r1;
 
     // Load BaselineFrame pointer in scratch1.
     masm.mov(r11, scratch1);
     masm.subPtr(Imm32(BaselineFrame::Size()), scratch1);
 
     // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
     // the stub frame has a nullptr ICStub pointer, since this pointer is
     // marked during GC.
     masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
     EmitEnterStubFrame(masm, scratch2);
 
-    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(cx, HandleDebugTrapInfo, atomsLock);
     if (!code)
         return nullptr;
 
     masm.push(lr);
     masm.push(scratch1);
     EmitCallVM(code, masm);
 
     EmitLeaveStubFrame(masm);
@@ -955,8 +944,29 @@ JitRuntime::generateBailoutTailStub(JSCo
     IonCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerIonCodeProfile(code, "BailoutTailStub");
 #endif
 
     return code;
 }
+
+IonCode *
+JitRuntime::generateUnreachableTrap(JSContext *cx)
+{
+    // Generate a separated code for the wrapper.
+    MacroAssembler masm;
+
+    masm.breakpoint();
+    masm.ret();
+
+    Linker linker(masm);
+    IonCode *trap = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+    if (!trap)
+        return nullptr;
+
+#ifdef JS_ION_PERF
+    writePerfSpewerIonCodeProfile(wrapper, "Unreachable Trap");
+#endif
+
+    return trap;
+}
--- a/js/src/jit/shared/BaselineCompiler-shared.cpp
+++ b/js/src/jit/shared/BaselineCompiler-shared.cpp
@@ -29,17 +29,17 @@ BaselineCompilerShared::BaselineCompiler
     pushedBeforeCall_(0),
     inCall_(false),
     spsPushToggleOffset_()
 { }
 
 bool
 BaselineCompilerShared::callVM(const VMFunction &fun, CallVMPhase phase)
 {
-    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(fun);
+    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(cx, fun);
     if (!code)
         return false;
 
 #ifdef DEBUG
     // Assert prepareVMCall() has been called.
     JS_ASSERT(inCall_);
     inCall_ = false;
 #endif
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -387,19 +387,32 @@ CodeGeneratorShared::markSafepoint(LInst
     return markSafepointAt(masm.currentOffset(), ins);
 }
 
 bool
 CodeGeneratorShared::markSafepointAt(uint32_t offset, LInstruction *ins)
 {
     JS_ASSERT_IF(safepointIndices_.length(),
                  offset - safepointIndices_.back().displacement() >= sizeof(uint32_t));
+
     return safepointIndices_.append(SafepointIndex(offset, ins->safepoint()));
 }
 
+bool
+CodeGeneratorShared::markVMCall(uint32_t offset, LInstruction *ins,
+                                const VMFunction &fun, const IonCode *wrapper)
+{
+    if (wrapper == gen->jitRuntime()->unreachableTrap()) {
+        if (!patchableVMCalls_.append(LinkVMWrapper(masm, offset, fun)))
+            return false;
+    }
+
+    return markSafepointAt(offset, ins);
+}
+
 void
 CodeGeneratorShared::ensureOsiSpace()
 {
     // For a refresher, an invalidation point is of the form:
     // 1: call <target>
     // 2: ...
     // 3: <osipoint>
     //
@@ -637,36 +650,34 @@ CodeGeneratorShared::callVM(const VMFunc
     //    ... frame ...
     //    [args]
 #ifdef DEBUG
     JS_ASSERT(pushedArgs_ == fun.explicitArgs);
     pushedArgs_ = 0;
 #endif
 
     // Get the wrapper of the VM function.
-    IonCode *wrapper = gen->jitRuntime()->getVMWrapper(fun);
-    if (!wrapper)
-        return false;
+    IonCode *wrapper = gen->jitRuntime()->maybeGetVMWrapper(fun);
 
 #ifdef CHECK_OSIPOINT_REGISTERS
     if (shouldVerifyOsiPointRegs(ins->safepoint()))
         StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
 #endif
 
     // Call the wrapper function.  The wrapper is in charge to unwind the stack
     // when returning from the call.  Failures are handled with exceptions based
     // on the return value of the C functions.  To guard the outcome of the
     // returned value, use another LIR instruction.
     uint32_t callOffset;
     if (dynStack)
         callOffset = masm.callWithExitFrame(wrapper, *dynStack);
     else
         callOffset = masm.callWithExitFrame(wrapper);
 
-    if (!markSafepointAt(callOffset, ins))
+    if (!markVMCall(callOffset, ins, fun, wrapper))
         return false;
 
     // Remove rest of the frame left on the stack. We remove the return address
     // which is implicitly poped when returning.
     int framePop = sizeof(IonExitFrameLayout) - sizeof(void*);
 
     // Pop arguments from framePushed.
     masm.implicitPop(fun.explicitStackSlots() * sizeof(void *) + framePop);
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -40,16 +40,28 @@ struct PatchableBackedgeInfo
     Label *loopHeader;
     Label *interruptCheck;
 
     PatchableBackedgeInfo(CodeOffsetJump backedge, Label *loopHeader, Label *interruptCheck)
       : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
     {}
 };
 
+struct LinkVMWrapper {
+    MacroAssembler::CodeOffsetCall offset;
+    const VMFunction &fun;
+
+    LinkVMWrapper(MacroAssembler &masm, uint32_t offset, const VMFunction &fun)
+      : offset(masm.lastPatchableCall(offset)), fun(fun)
+    { }
+
+    void patchCall(MacroAssembler &masm, IonCode *code,
+                   IonCode *trap, IonCode *wrapper) const;
+};
+
 class CodeGeneratorShared : public LInstructionVisitor
 {
     js::Vector<OutOfLineCode *, 0, SystemAllocPolicy> outOfLineCode_;
     OutOfLineCode *oolIns;
 
     MacroAssembler &ensureMasm(MacroAssembler *masm);
     mozilla::Maybe<MacroAssembler> maybeMasm_;
 
@@ -65,16 +77,17 @@ class CodeGeneratorShared : public LInst
 #ifdef DEBUG
     uint32_t pushedArgs_;
 #endif
     uint32_t lastOsiPointOffset_;
     SafepointWriter safepoints_;
     Label invalidate_;
     CodeOffsetLabel invalidateEpilogueData_;
 
+    js::Vector<LinkVMWrapper, 0, SystemAllocPolicy> patchableVMCalls_;
     js::Vector<SafepointIndex, 0, SystemAllocPolicy> safepointIndices_;
     js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_;
 
     // Mapping from bailout table ID to an offset in the snapshot buffer.
     js::Vector<SnapshotOffset, 0, SystemAllocPolicy> bailouts_;
 
     // Allocated data space needed at runtime.
     js::Vector<uint8_t, 0, SystemAllocPolicy> runtimeData_;
@@ -269,16 +282,21 @@ class CodeGeneratorShared : public LInst
     // safepoint offsets.
     void encodeSafepoints();
 
     // Mark the safepoint on |ins| as corresponding to the current assembler location.
     // The location should be just after a call.
     bool markSafepoint(LInstruction *ins);
     bool markSafepointAt(uint32_t offset, LInstruction *ins);
 
+    // Mark the safepoint of a VM Function call.it also register the location to
+    // be patched if the wrapper correspond to the unreachable trap.
+    bool markVMCall(uint32_t offset, LInstruction *ins,
+                    const VMFunction &fun, const IonCode *wrapper);
+
     // Mark the OSI point |ins| as corresponding to the current
     // assembler location inside the |osiIndices_|. Return the assembler
     // location for the OSI point return location within
     // |returnPointOffset|.
     bool markOsiPoint(LOsiPoint *ins, uint32_t *returnPointOffset);
 
     // Ensure that there is enough room between the last OSI point and the
     // current instruction, such that:
--- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp
@@ -54,16 +54,25 @@ CodeGeneratorX86Shared::generateEpilogue
     // Pop the stack we allocated at the start of the function.
     masm.freeStack(frameSize());
     JS_ASSERT(masm.framePushed() == 0);
 
     masm.ret();
     return true;
 }
 
+void
+LinkVMWrapper::patchCall(MacroAssembler &masm, IonCode *code,
+                         IonCode *trap, IonCode *wrapper) const
+{
+    CodeLocationJump vmcall(code, offset);
+    CodeLocationLabel target(wrapper);
+    PatchJump(vmcall, target);
+}
+
 bool
 OutOfLineBailout::accept(CodeGeneratorX86Shared *codegen)
 {
     return codegen->visitOutOfLineBailout(this);
 }
 
 void
 CodeGeneratorX86Shared::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue,
--- a/js/src/jit/x64/Assembler-x64.cpp
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -255,9 +255,8 @@ Assembler::TraceJumpRelocations(JSTracer
 {
     RelocationIterator iter(reader);
     while (iter.read()) {
         IonCode *child = CodeFromJump(code, code->raw() + iter.offset());
         MarkIonCodeUnbarriered(trc, &child, "rel32");
         JS_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
     }
 }
-
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -229,16 +229,21 @@ class Assembler : public AssemblerX86Sha
 
     Assembler()
       : extendedJumpTable_(0)
     {
     }
 
     static void TraceJumpRelocations(JSTracer *trc, IonCode *code, CompactBufferReader &reader);
 
+    // GenerateVMWrapperOnCall needs to find the location of its call-site such
+    // as we can link the call-site to the newly allocated VM wrapper instead of
+    // the trampoline to this lazy function.
+    static CodeOffsetJump findPatchableCall(IonCode *method, size_t returnOffset, IonCode **target);
+
     // The buffer is about to be linked, make sure any constant pools or excess
     // bookkeeping has been flushed to the instruction stream.
     void finish();
 
     // Copy the assembly code to the given buffer, and perform any pending
     // relocations relying on the target address.
     void executableCopy(uint8_t *buffer);
 
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -1233,16 +1233,22 @@ class MacroAssemblerX64 : public MacroAs
 
     void callWithExitFrame(IonCode *target, Register dynStack) {
         addPtr(Imm32(framePushed()), dynStack);
         makeFrameDescriptor(dynStack, IonFrame_OptimizedJS);
         Push(dynStack);
         call(target);
     }
 
+    typedef CodeOffsetJump CodeOffsetCall;
+    CodeOffsetCall lastPatchableCall(uint32_t callOffset) {
+        JS_ASSERT(jumps_.length());
+        return CodeOffsetCall(callOffset, jumps_.length() - 1);
+    }
+
     // Save an exit frame to the thread data of the current thread, given a
     // register that holds a PerThreadData *.
     void linkParallelExitFrame(const Register &pt) {
         storePtr(StackPointer, Address(pt, offsetof(PerThreadData, ionTop)));
     }
 
     void enterOsr(Register calleeToken, Register code) {
         push(Imm32(0)); // num actual args.
--- a/js/src/jit/x64/Trampoline-x64.cpp
+++ b/js/src/jit/x64/Trampoline-x64.cpp
@@ -501,21 +501,16 @@ JitRuntime::generateBailoutHandler(JSCon
 }
 
 IonCode *
 JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
 {
     typedef MoveResolver::MoveOperand MoveOperand;
 
     JS_ASSERT(!StackKeptAligned);
-    JS_ASSERT(functionWrappers_);
-    JS_ASSERT(functionWrappers_->initialized());
-    VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
-    if (p)
-        return p->value;
 
     // Generate a separated code for the wrapper.
     MacroAssembler masm;
 
     // Avoid conflicts with argument registers while discarding the result after
     // the function call.
     GeneralRegisterSet regs = GeneralRegisterSet(Register::Codes::WrapperMask);
 
@@ -674,21 +669,16 @@ JitRuntime::generateVMWrapper(JSContext 
     IonCode *wrapper = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
     if (!wrapper)
         return nullptr;
 
 #ifdef JS_ION_PERF
     writePerfSpewerIonCodeProfile(wrapper, "VMWrapper");
 #endif
 
-    // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
-    // use relookupOrAdd instead of add.
-    if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
-        return nullptr;
-
     return wrapper;
 }
 
 IonCode *
 JitRuntime::generatePreBarrier(JSContext *cx, MIRType type)
 {
     MacroAssembler masm;
 
@@ -721,17 +711,17 @@ JitRuntime::generatePreBarrier(JSContext
 
     return code;
 }
 
 typedef bool (*HandleDebugTrapFn)(JSContext *, BaselineFrame *, uint8_t *, bool *);
 static const VMFunction HandleDebugTrapInfo = FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap);
 
 IonCode *
-JitRuntime::generateDebugTrapHandler(JSContext *cx)
+JitRuntime::generateDebugTrapHandler(JSContext *cx, AutoLockForExclusiveAccess &atomsLock)
 {
     MacroAssembler masm;
 
     Register scratch1 = rax;
     Register scratch2 = rcx;
     Register scratch3 = rdx;
 
     // Load the return address in scratch1.
@@ -742,17 +732,17 @@ JitRuntime::generateDebugTrapHandler(JSC
     masm.subPtr(Imm32(BaselineFrame::Size()), scratch2);
 
     // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
     // the stub frame has a nullptr ICStub pointer, since this pointer is marked
     // during GC.
     masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
     EmitEnterStubFrame(masm, scratch3);
 
-    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(cx, HandleDebugTrapInfo, atomsLock);
     if (!code)
         return nullptr;
 
     masm.push(scratch1);
     masm.push(scratch2);
     EmitCallVM(code, masm);
 
     EmitLeaveStubFrame(masm);
@@ -809,8 +799,29 @@ JitRuntime::generateBailoutTailStub(JSCo
     IonCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerIonCodeProfile(code, "BailoutTailStub");
 #endif
 
     return code;
 }
+
+IonCode *
+JitRuntime::generateUnreachableTrap(JSContext *cx)
+{
+    // Generate a separated code for the wrapper.
+    MacroAssembler masm;
+
+    masm.breakpoint();
+    masm.ret();
+
+    Linker linker(masm);
+    IonCode *trap = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+    if (!trap)
+        return nullptr;
+
+#ifdef JS_ION_PERF
+    writePerfSpewerIonCodeProfile(wrapper, "Unreachable Trap");
+#endif
+
+    return trap;
+}
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -115,22 +115,24 @@ static const Scale ScalePointer = TimesF
 
 namespace js {
 namespace jit {
 
 static inline void
 PatchJump(CodeLocationJump jump, CodeLocationLabel label)
 {
 #ifdef DEBUG
-    // Assert that we're overwriting a jump instruction, either:
+    // Assert that we're overwriting a jump/call instruction, either:
     //   0F 80+cc <imm32>, or
-    //   E9 <imm32>
+    //   E9 <imm32>, or
+    //   E8 <imm32>
     unsigned char *x = (unsigned char *)jump.raw() - 5;
     JS_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
-              (*x == 0xE9));
+              (*x == 0xE9) ||
+              (*x == 0xE8));
 #endif
     JSC::X86Assembler::setRel32(jump.raw(), label.raw());
 }
 
 // Return operand from a JS -> JS call.
 static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
 
 class Assembler : public AssemblerX86Shared
--- a/js/src/jit/x86/MacroAssembler-x86.h
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -1077,16 +1077,21 @@ class MacroAssemblerX86 : public MacroAs
 
     void callWithExitFrame(IonCode *target, Register dynStack) {
         addPtr(Imm32(framePushed()), dynStack);
         makeFrameDescriptor(dynStack, IonFrame_OptimizedJS);
         Push(dynStack);
         call(target);
     }
 
+    typedef CodeOffsetJump CodeOffsetCall;
+    CodeOffsetCall lastPatchableCall(uint32_t callOffset) {
+        return CodeOffsetCall(callOffset);
+    }
+
     // Save an exit frame to the thread data of the current thread, given a
     // register that holds a PerThreadData *.
     void linkParallelExitFrame(const Register &pt) {
         movl(StackPointer, Operand(pt, offsetof(PerThreadData, ionTop)));
     }
 
     void enterOsr(Register calleeToken, Register code) {
         push(Imm32(0)); // num actual args.
--- a/js/src/jit/x86/Trampoline-x86.cpp
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -757,17 +757,17 @@ JitRuntime::generatePreBarrier(JSContext
 
     return code;
 }
 
 typedef bool (*HandleDebugTrapFn)(JSContext *, BaselineFrame *, uint8_t *, bool *);
 static const VMFunction HandleDebugTrapInfo = FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap);
 
 IonCode *
-JitRuntime::generateDebugTrapHandler(JSContext *cx)
+JitRuntime::generateDebugTrapHandler(JSContext *cx, AutoLockForExclusiveAccess &atomsLock)
 {
     MacroAssembler masm;
 
     Register scratch1 = eax;
     Register scratch2 = ecx;
     Register scratch3 = edx;
 
     // Load the return address in scratch1.
@@ -778,17 +778,17 @@ JitRuntime::generateDebugTrapHandler(JSC
     masm.subPtr(Imm32(BaselineFrame::Size()), scratch2);
 
     // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
     // the stub frame has a nullptr ICStub pointer, since this pointer is
     // marked during GC.
     masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
     EmitEnterStubFrame(masm, scratch3);
 
-    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+    IonCode *code = cx->runtime()->jitRuntime()->getVMWrapper(cx, HandleDebugTrapInfo, atomsLock);
     if (!code)
         return nullptr;
 
     masm.push(scratch1);
     masm.push(scratch2);
     EmitCallVM(code, masm);
 
     EmitLeaveStubFrame(masm);
@@ -845,8 +845,29 @@ JitRuntime::generateBailoutTailStub(JSCo
     IonCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerIonCodeProfile(code, "BailoutTailStub");
 #endif
 
     return code;
 }
+
+IonCode *
+JitRuntime::generateUnreachableTrap(JSContext *cx)
+{
+    // Generate a separated code for the wrapper.
+    MacroAssembler masm;
+
+    masm.breakpoint();
+    masm.ret();
+
+    Linker linker(masm);
+    IonCode *trap = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+    if (!trap)
+        return nullptr;
+
+#ifdef JS_ION_PERF
+    writePerfSpewerIonCodeProfile(wrapper, "Unreachable Trap");
+#endif
+
+    return trap;
+}