Bug 988789. r=luke
authorDouglas Crosher <dtc-moz@scieneer.com>
Thu, 15 May 2014 13:43:04 +1000
changeset 183662 32eea829181b1f885fffbdbba6c64ef4873fbe5e
parent 183661 61d0065e67a634c96b75d0c9001f5557dbc8c031
child 183663 7db77c1072cb0b191509c8c8d7654b141b3b1e4d
push id26799
push userphilringnalda@gmail.com
push dateSun, 18 May 2014 00:55:16 +0000
treeherdermozilla-central@00ef3a7d7aa7 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs988789
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 988789. r=luke
js/src/gc/Zone.cpp
js/src/jit/AsmJS.cpp
js/src/jit/AsmJSLink.cpp
js/src/jit/AsmJSModule.cpp
js/src/jit/AsmJSModule.h
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineDebugModeOSR.cpp
js/src/jit/BaselineIC.cpp
js/src/jit/BaselineJIT.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/Ion.cpp
js/src/jit/Ion.h
js/src/jit/IonCaches.cpp
js/src/jit/IonCode.h
js/src/jit/JitCompartment.h
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Simulator-arm.cpp
js/src/jit/arm/Trampoline-arm.cpp
js/src/jit/mips/Assembler-mips.cpp
js/src/jit/mips/Trampoline-mips.cpp
js/src/jit/shared/Assembler-x86-shared.cpp
js/src/jsgc.cpp
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -199,17 +199,17 @@ Zone::discardJitCode(FreeOp *fop)
             JSScript *script = i.get<JSScript>();
             jit::FinishInvalidation<SequentialExecution>(fop, script);
 
             // Preserve JIT code that have been recently used in
             // parallel. Note that we mark their baseline scripts as active as
             // well to preserve them.
             if (script->hasParallelIonScript()) {
                 if (jit::ShouldPreserveParallelJITCode(runtimeFromMainThread(), script)) {
-                    script->parallelIonScript()->purgeCaches(this);
+                    script->parallelIonScript()->purgeCaches();
                     script->baselineScript()->setActive();
                 } else {
                     jit::FinishInvalidation<ParallelExecution>(fop, script);
                 }
             }
 
             /*
              * Discard baseline script if it's not marked as active. Note that
--- a/js/src/jit/AsmJS.cpp
+++ b/js/src/jit/AsmJS.cpp
@@ -7017,16 +7017,19 @@ CheckModule(ExclusiveContext *cx, AsmJSP
 
     if (!CheckModuleReturn(m))
         return false;
 
     TokenKind tk = PeekToken(m.parser());
     if (tk != TOK_EOF && tk != TOK_RC)
         return m.fail(nullptr, "top-level export (return) must be the last statement");
 
+    // The instruction cache is flushed when dynamically linking, so can inhibit now.
+    AutoFlushICache afc("CheckModule", /* inhibit= */ true);
+
     ScopedJSDeletePtr<AsmJSModule> module;
     if (!FinishModule(m, &module))
         return false;
 
     bool storedInCache = StoreAsmJSModuleInCache(parser, *module, cx);
     module->staticallyLink(cx);
 
     m.buildCompilationTimeReport(storedInCache, compilationTimeReport);
--- a/js/src/jit/AsmJSLink.cpp
+++ b/js/src/jit/AsmJSLink.cpp
@@ -764,22 +764,34 @@ LinkAsmJS(JSContext *cx, unsigned argc, 
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
     // The LinkAsmJS builtin (created by NewAsmJSModuleFunction) is an extended
     // function and stores its module in an extended slot.
     RootedFunction fun(cx, &args.callee().as<JSFunction>());
     Rooted<AsmJSModuleObject*> moduleObj(cx, &ModuleFunctionToModuleObject(fun));
 
+    // All ICache flushing of the module being linked has been inhibited under the
+    // assumption that the module is flushed after dynamic linking (when the last code
+    // mutation occurs).  Thus, enter an AutoFlushICache context for the entire module
+    // now.  The module range is set below.
+    AutoFlushICache afc("LinkAsmJS");
+
     // When a module is linked, it is dynamically specialized to the given
     // arguments (buffer, ffis). Thus, if the module is linked again (it is just
     // a function so it can be called multiple times), we need to clone a new
     // module.
-    if (moduleObj->module().isDynamicallyLinked() && !CloneModule(cx, &moduleObj))
-        return false;
+    if (moduleObj->module().isDynamicallyLinked()) {
+        if (!CloneModule(cx, &moduleObj))
+            return false;
+    } else {
+        // CloneModule already calls setAutoFlushICacheRange internally before patching
+        // the cloned module, so avoid calling twice.
+        moduleObj->module().setAutoFlushICacheRange();
+    }
 
     AsmJSModule &module = moduleObj->module();
 
     // Link the module by performing the link-time validation checks in the
     // asm.js spec and then patching the generated module to associate it with
     // the given heap (ArrayBuffer) and a new global data segment (the closure
     // state shared by the inner asm.js functions).
     if (!DynamicallyLinkModule(cx, args, module)) {
--- a/js/src/jit/AsmJSModule.cpp
+++ b/js/src/jit/AsmJSModule.cpp
@@ -59,19 +59,16 @@ AsmJSModule::initHeap(Handle<ArrayBuffer
         JSC::X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
     }
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
     uint32_t heapLength = heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
         jit::Assembler::updateBoundsCheck(heapLength,
                                           (jit::Instruction*)(heapAccesses_[i].offset() + code_));
     }
-    // We already know the exact extent of areas that need to be patched, just make sure we
-    // flush all of them at once.
-    jit::AutoFlushCache::updateTop(uintptr_t(code_), pod.codeBytes_);
 #endif
 }
 
 static uint8_t *
 AllocateExecutableMemory(ExclusiveContext *cx, size_t totalBytes)
 {
     JS_ASSERT(totalBytes % AsmJSPageSize == 0);
 
@@ -297,16 +294,22 @@ AsmJSModule::restoreToInitialState(Array
             JS_ASSERT(ptr >= ptrBase);
             JSC::X86Assembler::setPointer(addr, (void *)(ptr - ptrBase));
         }
 #endif
     }
 }
 
 void
+AsmJSModule::setAutoFlushICacheRange()
+{
+    AutoFlushICache::setRange(uintptr_t(code_), pod.codeBytes_);
+}
+
+void
 AsmJSModule::staticallyLink(ExclusiveContext *cx)
 {
     // Process staticLinkData_
 
     interruptExit_ = code_ + staticLinkData_.interruptExitOffset;
 
     for (size_t i = 0; i < staticLinkData_.relativeLinks.length(); i++) {
         RelativeLink link = staticLinkData_.relativeLinks[i];
@@ -865,16 +868,17 @@ AsmJSModule::deserialize(ExclusiveContex
     (cursor = DeserializeVector(cx, cursor, &functionNames_)) &&
     (cursor = DeserializePodVector(cx, cursor, &heapAccesses_)) &&
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     (cursor = DeserializeVector(cx, cursor, &profiledFunctions_)) &&
 #endif
     (cursor = staticLinkData_.deserialize(cx, cursor));
 
     loadedFromCache_ = true;
+
     return cursor;
 }
 
 // When a module is cloned, we memcpy its executable code. If, right before or
 // during the clone, another thread calls AsmJSModule::protectCode() then the
 // executable code will become inaccessible. In theory, we could take away only
 // PROT_EXEC, but this seems to break emulators.
 class AutoUnprotectCodeForClone
@@ -935,16 +939,20 @@ AsmJSModule::clone(JSContext *cx, Scoped
         !ClonePodVector(cx, heapAccesses_, &out.heapAccesses_) ||
         !staticLinkData_.clone(cx, &out.staticLinkData_))
     {
         return false;
     }
 
     out.loadedFromCache_ = loadedFromCache_;
 
+    // We already know the exact extent of areas that need to be patched, just make sure we
+    // flush all of them at once.
+    out.setAutoFlushICacheRange();
+
     out.restoreToInitialState(maybeHeap_, cx);
     return true;
 }
 
 void
 AsmJSModule::protectCode(JSRuntime *rt) const
 {
     JS_ASSERT(rt->currentThreadOwnsInterruptLock());
@@ -1347,16 +1355,23 @@ js::LookupAsmJSModuleInCache(ExclusiveCo
     uint32_t funcStart = parser.pc->maybeFunction->pn_body->pn_pos.begin;
     uint32_t offsetToEndOfUseAsm = parser.tokenStream.currentToken().pos.end;
     bool strict = parser.pc->sc->strict && !parser.pc->sc->hasExplicitUseStrict();
     ScopedJSDeletePtr<AsmJSModule> module(
         cx->new_<AsmJSModule>(parser.ss, funcStart, offsetToEndOfUseAsm, strict));
     if (!module)
         return false;
     cursor = module->deserialize(cx, cursor);
+
+    // No need to flush the instruction cache now, it will be flushed when dynamically linking.
+    AutoFlushICache afc("LookupAsmJSModuleInCache", /* inhibit= */ true);
+    // We already know the exact extent of areas that need to be patched, just make sure we
+    // flush all of them at once.
+    module->setAutoFlushICacheRange();
+
     if (!cursor)
         return false;
 
     bool atEnd = cursor == entry.memory + entry.serializedSize;
     MOZ_ASSERT(atEnd, "Corrupt cache file");
     if (!atEnd)
         return true;
 
--- a/js/src/jit/AsmJSModule.h
+++ b/js/src/jit/AsmJSModule.h
@@ -818,16 +818,17 @@ class AsmJSModule
     bool addAbsoluteLink(AbsoluteLink link) {
         return staticLinkData_.absoluteLinks.append(link);
     }
     void setInterruptOffset(uint32_t offset) {
         staticLinkData_.interruptExitOffset = offset;
     }
 
     void restoreToInitialState(ArrayBufferObject *maybePrevBuffer, ExclusiveContext *cx);
+    void setAutoFlushICacheRange();
     void staticallyLink(ExclusiveContext *cx);
 
     uint8_t *codeBase() const {
         JS_ASSERT(code_);
         JS_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0);
         return code_;
     }
 
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -102,16 +102,17 @@ BaselineCompiler::compile()
     if (!emitOutOfLinePostBarrierSlot())
         return Method_Error;
 #endif
 
     if (masm.oom())
         return Method_Error;
 
     Linker linker(masm);
+    AutoFlushICache afc("Baseline");
     JitCode *code = linker.newCode<CanGC>(cx, JSC::BASELINE_CODE);
     if (!code)
         return Method_Error;
 
     JSObject *templateScope = nullptr;
     if (script->functionNonDelazifying()) {
         RootedFunction fun(cx, script->functionNonDelazifying());
         if (fun->isHeavyweight()) {
--- a/js/src/jit/BaselineDebugModeOSR.cpp
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -785,16 +785,17 @@ JitRuntime::generateBaselineDebugModeOSR
     masm.pop(target);
     masm.pop(BaselineFrameReg);
     masm.popValue(R1);
     masm.popValue(R0);
 
     masm.jump(target);
 
     Linker linker(masm);
+    AutoFlushICache afc("BaselineDebugModeOSRHandler");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
     if (!code)
         return nullptr;
 
     noFrameRegPopOffset.fixup(&masm);
     *noFrameRegPopOffsetOut = noFrameRegPopOffset.offset();
 
 #ifdef JS_ION_PERF
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -586,20 +586,20 @@ ICStubCompiler::getStubCode()
 
     // Compile new stubcode.
     IonContext ictx(cx, nullptr);
     MacroAssembler masm;
 #ifdef JS_CODEGEN_ARM
     masm.setSecondScratchReg(BaselineSecondScratchReg);
 #endif
 
-    AutoFlushCache afc("ICStubCompiler::getStubCode", cx->runtime()->jitRuntime());
     if (!generateStubCode(masm))
         return nullptr;
     Linker linker(masm);
+    AutoFlushICache afc("getStubCode");
     Rooted<JitCode *> newStubCode(cx, linker.newCode<CanGC>(cx, JSC::BASELINE_CODE));
     if (!newStubCode)
         return nullptr;
 
     // After generating code, run postGenerateStubCode()
     if (!postGenerateStubCode(masm, newStubCode))
         return nullptr;
 
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -108,17 +108,16 @@ EnterBaseline(JSContext *cx, EnterJitDat
 
     // Caller must construct |this| before invoking the Ion function.
     JS_ASSERT_IF(data.constructing, data.maxArgv[0].isObject());
 
     data.result.setInt32(data.numActualArgs);
     {
         AssertCompartmentUnchanged pcc(cx);
         JitActivation activation(cx, data.constructing);
-        AutoFlushInhibitor afi(cx->runtime()->jitRuntime());
 
         if (data.osrFrame)
             data.osrFrame->setRunningInJit();
 
         JS_ASSERT_IF(data.osrFrame, !IsJSDEnabled(cx));
 
         // Single transition point from Interpreter to Baseline.
         CALL_GENERATED_CODE(enter, data.jitcode, data.maxArgc, data.maxArgv, data.osrFrame, data.calleeToken,
@@ -230,17 +229,16 @@ jit::BaselineCompile(JSContext *cx, JSSc
         return Method_Error;
 
     IonContext ictx(cx, temp);
 
     BaselineCompiler compiler(cx, *temp, script);
     if (!compiler.init())
         return Method_Error;
 
-    AutoFlushCache afc("BaselineJIT", cx->runtime()->jitRuntime());
     MethodStatus status = compiler.compile();
 
     JS_ASSERT_IF(status == Method_Compiled, script->hasBaselineScript());
     JS_ASSERT_IF(status != Method_Compiled, !script->hasBaselineScript());
 
     if (status == Method_CantCompile)
         script->setBaselineScript(cx, BASELINE_DISABLED_SCRIPT);
 
@@ -759,22 +757,16 @@ BaselineScript::toggleDebugTraps(JSScrip
     JS_ASSERT(script->baselineScript() == this);
 
     // Only scripts compiled for debug mode have toggled calls.
     if (!debugMode())
         return;
 
     SrcNoteLineScanner scanner(script->notes(), script->lineno());
 
-    JSRuntime *rt = script->runtimeFromMainThread();
-    IonContext ictx(CompileRuntime::get(rt),
-                    CompileCompartment::get(script->compartment()),
-                    nullptr);
-    AutoFlushCache afc("DebugTraps", rt->jitRuntime());
-
     for (uint32_t i = 0; i < numPCMappingIndexEntries(); i++) {
         PCMappingIndexEntry &entry = pcMappingIndexEntry(i);
 
         CompactBufferReader reader(pcMappingReader(i));
         jsbytecode *curPC = script->offsetToPC(entry.pcOffset);
         uint32_t nativeOffset = entry.nativeOffset;
 
         JS_ASSERT(script->containsPC(curPC));
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -5234,16 +5234,17 @@ JitCompartment::generateStringConcatStub
     masm.pop(temp2);
     masm.pop(temp1);
 
     masm.bind(&failure);
     masm.movePtr(ImmPtr(nullptr), output);
     masm.ret();
 
     Linker linker(masm);
+    AutoFlushICache afc("StringConcatStub");
     JitCode *code = linker.newCode<CanGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "StringConcatStub");
 #endif
 
     return code;
 }
@@ -5268,16 +5269,17 @@ JitRuntime::generateMallocStub(JSContext
     masm.passABIArg(regNBytes);
     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, MallocWrapper));
     masm.storeCallResult(regReturn);
 
     masm.PopRegsInMask(regs);
     masm.ret();
 
     Linker linker(masm);
+    AutoFlushICache afc("MallocStub");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "MallocStub");
 #endif
 
     return code;
 }
@@ -5298,16 +5300,17 @@ JitRuntime::generateFreeStub(JSContext *
     masm.passABIArg(regSlots);
     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js_free));
 
     masm.PopRegsInMask(regs);
 
     masm.ret();
 
     Linker linker(masm);
+    AutoFlushICache afc("FreeStub");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "FreeStub");
 #endif
 
     return code;
 }
@@ -6567,16 +6570,17 @@ CodeGenerator::link(JSContext *cx, types
     // Make sure we don't segv while filling in the code, to avoid deadlocking
     // inside the signal handler.
     cx->runtime()->jitRuntime()->ensureIonCodeAccessible(cx->runtime());
 
     // Implicit interrupts are used only for sequential code. In parallel mode
     // use the normal executable allocator so that we cannot segv during
     // execution off the main thread.
     Linker linker(masm);
+    AutoFlushICache afc("IonLink");
     JitCode *code = (executionMode == SequentialExecution)
                     ? linker.newCodeForIonScript(cx)
                     : linker.newCode<CanGC>(cx, JSC::ION_CODE);
     if (!code) {
         // Use js_free instead of IonScript::Destroy: the cache list and
         // backedge list are still uninitialized.
         js_free(ionScript);
         recompileInfo.compilerOutput(cx->zone()->types)->invalidate();
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -157,17 +157,16 @@ JitRuntime::JitRuntime()
     argumentsRectifierReturnAddr_(nullptr),
     parallelArgumentsRectifier_(nullptr),
     invalidator_(nullptr),
     debugTrapHandler_(nullptr),
     forkJoinGetSliceStub_(nullptr),
     baselineDebugModeOSRHandler_(nullptr),
     functionWrappers_(nullptr),
     osrTempData_(nullptr),
-    flusher_(nullptr),
     ionCodeProtected_(false),
     ionReturnOverride_(MagicValue(JS_ARG_POISON))
 {
 }
 
 JitRuntime::~JitRuntime()
 {
     js_delete(functionWrappers_);
@@ -182,17 +181,16 @@ bool
 JitRuntime::initialize(JSContext *cx)
 {
     JS_ASSERT(cx->runtime()->currentThreadHasExclusiveAccess());
     JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock());
 
     AutoCompartment ac(cx, cx->atomsCompartment());
 
     IonContext ictx(cx, nullptr);
-    AutoFlushCache afc("JitRuntime::initialize", this);
 
     execAlloc_ = cx->runtime()->getExecAlloc(cx);
     if (!execAlloc_)
         return false;
 
     if (!cx->compartment()->ensureJitCompartmentExists(cx))
         return false;
 
@@ -1171,29 +1169,26 @@ IonScript::Destroy(FreeOp *fop, IonScrip
 
 void
 IonScript::toggleBarriers(bool enabled)
 {
     method()->togglePreBarriers(enabled);
 }
 
 void
-IonScript::purgeCaches(Zone *zone)
+IonScript::purgeCaches()
 {
     // Don't reset any ICs if we're invalidated, otherwise, repointing the
     // inline jump could overwrite an invalidation marker. These ICs can
     // no longer run, however, the IC slow paths may be active on the stack.
     // ICs therefore are required to check for invalidation before patching,
     // to ensure the same invariant.
     if (invalidated())
         return;
 
-    JSRuntime *rt = zone->runtimeFromMainThread();
-    IonContext ictx(CompileRuntime::get(rt));
-    AutoFlushCache afc("purgeCaches", rt->jitRuntime());
     for (size_t i = 0; i < numCaches(); i++)
         getCacheFromIndex(i).reset();
 }
 
 void
 IonScript::destroyCaches()
 {
     for (size_t i = 0; i < numCaches(); i++)
@@ -1243,18 +1238,16 @@ IonScript::unlinkFromRuntime(FreeOp *fop
 
 void
 jit::ToggleBarriers(JS::Zone *zone, bool needs)
 {
     JSRuntime *rt = zone->runtimeFromMainThread();
     if (!rt->hasJitRuntime())
         return;
 
-    IonContext ictx(CompileRuntime::get(rt));
-    AutoFlushCache afc("ToggleBarriers", rt->jitRuntime());
     for (gc::ZoneCellIterUnderGC i(zone, gc::FINALIZE_SCRIPT); !i.done(); i.next()) {
         JSScript *script = i.get<JSScript>();
         if (script->hasIonScript())
             script->ionScript()->toggleBarriers(needs);
         if (script->hasBaselineScript())
             script->baselineScript()->toggleBarriers(needs);
     }
 
@@ -1729,17 +1722,16 @@ AttachFinishedCompilations(JSContext *cx
             // previously, though any GC activity would discard the builder.
             codegen->masm.constructRoot(cx);
 
             bool success;
             {
                 // Release the worker thread lock and root the compiler for GC.
                 AutoTempAllocatorRooter root(cx, &builder->alloc());
                 AutoUnlockWorkerThreadState unlock;
-                AutoFlushCache afc("AttachFinishedCompilations", cx->runtime()->jitRuntime());
                 success = codegen->link(cx, builder->constraints());
             }
 
             if (!success) {
                 // Silently ignore OOM during code generation. The caller is
                 // InvokeInterruptCallback, which always runs at a
                 // nondeterministic time. It's not OK to throw a catchable
                 // exception from there.
@@ -1875,18 +1867,16 @@ IonCompile(JSContext *cx, JSScript *scri
 
     BaselineFrameInspector *baselineFrameInspector = nullptr;
     if (baselineFrame) {
         baselineFrameInspector = NewBaselineFrameInspector(temp, baselineFrame, info);
         if (!baselineFrameInspector)
             return AbortReason_Alloc;
     }
 
-    AutoFlushCache afc("IonCompile", cx->runtime()->jitRuntime());
-
     AutoTempAllocatorRooter root(cx, temp);
     types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(*temp);
     if (!constraints)
         return AbortReason_Alloc;
 
     const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(optimizationLevel);
     const JitCompileOptions options(cx);
 
@@ -2412,17 +2402,16 @@ EnterIon(JSContext *cx, EnterJitData &da
 
     // Caller must construct |this| before invoking the Ion function.
     JS_ASSERT_IF(data.constructing, data.maxArgv[0].isObject());
 
     data.result.setInt32(data.numActualArgs);
     {
         AssertCompartmentUnchanged pcc(cx);
         JitActivation activation(cx, data.constructing);
-        AutoFlushInhibitor afi(cx->runtime()->jitRuntime());
 
         CALL_GENERATED_CODE(enter, data.jitcode, data.maxArgc, data.maxArgv, /* osrFrame = */nullptr, data.calleeToken,
                             /* scopeChain = */ nullptr, 0, data.result.address());
     }
 
     JS_ASSERT(!cx->runtime()->jitRuntime()->hasIonReturnOverride());
 
     // Jit callers wrap primitive constructor return.
@@ -2596,17 +2585,17 @@ InvalidateActivation(FreeOp *fop, uint8_
         if (!invalidateAll && !script->ionScript()->invalidated())
             continue;
 
         IonScript *ionScript = script->ionScript();
 
         // Purge ICs before we mark this script as invalidated. This will
         // prevent lastJump_ from appearing to be a bogus pointer, just
         // in case anyone tries to read it.
-        ionScript->purgeCaches(script->zone());
+        ionScript->purgeCaches();
 
         // Clean up any pointers from elsewhere in the runtime to this IonScript
         // which is about to become disconnected from its JSScript.
         ionScript->unlinkFromRuntime(fop);
 
         // This frame needs to be invalidated. We do the following:
         //
         // 1. Increment the reference counter to keep the ionScript alive
@@ -2677,32 +2666,29 @@ jit::StopAllOffThreadCompilations(JSComp
 void
 jit::InvalidateAll(FreeOp *fop, Zone *zone)
 {
     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
         StopAllOffThreadCompilations(comp);
 
     for (JitActivationIterator iter(fop->runtime()); !iter.done(); ++iter) {
         if (iter->compartment()->zone() == zone) {
-            IonContext ictx(CompileRuntime::get(fop->runtime()));
-            AutoFlushCache afc("InvalidateAll", fop->runtime()->jitRuntime());
             IonSpew(IonSpew_Invalidate, "Invalidating all frames for GC");
             InvalidateActivation(fop, iter.jitTop(), true);
         }
     }
 }
 
 
 void
 jit::Invalidate(types::TypeZone &types, FreeOp *fop,
                 const Vector<types::RecompileInfo> &invalid, bool resetUses,
                 bool cancelOffThread)
 {
     IonSpew(IonSpew_Invalidate, "Start invalidation.");
-    AutoFlushCache afc ("Invalidate", fop->runtime()->jitRuntime());
 
     // Add an invalidation reference to all invalidated IonScripts to indicate
     // to the traversal which frames have been invalidated.
     size_t numInvalidations = 0;
     for (size_t i = 0; i < invalid.length(); i++) {
         const types::CompilerOutput &co = *invalid[i].compilerOutput(types);
         if (!co.isValid())
             continue;
@@ -2934,76 +2920,159 @@ jit::ForbidCompilation(JSContext *cx, JS
 
       default:
         MOZ_ASSUME_UNREACHABLE("No such execution mode");
     }
 
     MOZ_ASSUME_UNREACHABLE("No such execution mode");
 }
 
+AutoFlushICache *
+PerThreadData::autoFlushICache() const
+{
+    return autoFlushICache_;
+}
+
 void
-AutoFlushCache::updateTop(uintptr_t p, size_t len)
+PerThreadData::setAutoFlushICache(AutoFlushICache *afc)
+{
+    autoFlushICache_ = afc;
+}
+
+// Set the range for the merging of flushes.  The flushing is deferred until the end of
+// the AutoFlushICache context.  Subsequent flushing within this range will is also
+// deferred.  This is only expected to be defined once for each AutoFlushICache
+// context.  It assumes the range will be flushed is required to be within an
+// AutoFlushICache context.
+void
+AutoFlushICache::setRange(uintptr_t start, size_t len)
+{
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
+    AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache();
+    JS_ASSERT(afc);
+    JS_ASSERT(!afc->start_);
+    IonSpewCont(IonSpew_CacheFlush, "(%x %x):", start, len);
+
+    uintptr_t stop = start + len;
+    afc->start_ = start;
+    afc->stop_ = stop;
+#endif
+}
+
+// Flush the instruction cache.
+//
+// If called within a dynamic AutoFlushICache context and if the range is already pending
+// flushing for this AutoFlushICache context then the request is ignored with the
+// understanding that it will be flushed on exit from the AutoFlushICache context.
+// Otherwise the range is flushed immediately.
+//
+// Updates outside the current code object are typically the exception so they are flushed
+// immediately rather than attempting to merge them.
+//
+// For efficiency it is expected that all large ranges will be flushed within an
+// AutoFlushICache, so check.  If this assertion is hit then it does not necessarily
+// indicate a progam fault but it might indicate a lost opportunity to merge cache
+// flushing.  It can be corrected by wrapping the call in an AutoFlushICache to context.
+void
+AutoFlushICache::flush(uintptr_t start, size_t len)
 {
-    IonContext *ictx = MaybeGetIonContext();
-    JitRuntime *jrt = (ictx != nullptr) ? const_cast<JitRuntime *>(ictx->runtime->jitRuntime()) : nullptr;
-    if (!jrt || !jrt->flusher())
-        JSC::ExecutableAllocator::cacheFlush((void*)p, len);
-    else
-        jrt->flusher()->update(p, len);
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
+    AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache();
+    if (!afc) {
+        IonSpewCont(IonSpew_CacheFlush, "#");
+        JSC::ExecutableAllocator::cacheFlush((void*)start, len);
+        JS_ASSERT(len <= 16);
+        return;
+    }
+
+    uintptr_t stop = start + len;
+    if (start >= afc->start_ && stop <= afc->stop_) {
+        // Update is within the pending flush range, so defer to the end of the context.
+        IonSpewCont(IonSpew_CacheFlush, afc->inhibit_ ? "-" : "=");
+        return;
+    }
+
+    IonSpewCont(IonSpew_CacheFlush, afc->inhibit_ ? "x" : "*");
+    JSC::ExecutableAllocator::cacheFlush((void *)start, len);
+#endif
 }
 
-AutoFlushCache::AutoFlushCache(const char *nonce, JitRuntime *rt)
+// Flag the current dynamic AutoFlushICache as inhibiting flushing. Useful in error paths
+// where the changes are being abandoned.
+void
+AutoFlushICache::setInhibit()
+{
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
+    AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache();
+    JS_ASSERT(afc);
+    JS_ASSERT(afc->start_);
+    IonSpewCont(IonSpew_CacheFlush, "I");
+    afc->inhibit_ = true;
+#endif
+}
+
+// The common use case is merging cache flushes when preparing a code object.  In this
+// case the entire range of the code object is being flushed and as the code is patched
+// smaller redundant flushes could occur.  The design allows an AutoFlushICache dynamic
+// thread local context to be declared in which the range of the code object can be set
+// which defers flushing until the end of this dynamic context.  The redundant flushing
+// within this code range is also deferred avoiding redundant flushing.  Flushing outside
+// this code range is not affected and proceeds immediately.
+//
+// In some cases flushing is not necessary, such as when compiling an asm.js module which
+// is flushed again when dynamically linked, and also in error paths that abandon the
+// code.  Flushing within the set code range can be inhibited within the AutoFlushICache
+// dynamic context by setting an inhibit flag.
+//
+// The JS compiler can be re-entered while within an AutoFlushICache dynamic context and
+// it is assumed that code being assembled or patched is not executed before the exit of
+// the respective AutoFlushICache dynamic context.
+//
+AutoFlushICache::AutoFlushICache(const char *nonce, bool inhibit)
   : start_(0),
     stop_(0),
     name_(nonce),
-    runtime_(rt),
-    used_(false)
-{
-    if (rt->flusher())
-        IonSpew(IonSpew_CacheFlush, "<%s ", nonce);
-    else
-        IonSpewCont(IonSpew_CacheFlush, "<%s ", nonce);
-
-    rt->setFlusher(this);
-}
-
-AutoFlushInhibitor::AutoFlushInhibitor(JitRuntime *rt)
-  : runtime_(rt),
-    afc(nullptr)
+    inhibit_(inhibit)
 {
-    afc = rt->flusher();
-
-    // Ensure that called functions get a fresh flusher.
-    rt->setFlusher(nullptr);
-
-    // Ensure the current flusher has been flushed.
-    if (afc) {
-        afc->flushAnyway();
-        IonSpewCont(IonSpew_CacheFlush, "}");
-    }
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
+    PerThreadData *pt = TlsPerThreadData.get();
+    AutoFlushICache *afc = pt->PerThreadData::autoFlushICache();
+    if (afc)
+        IonSpew(IonSpew_CacheFlush, "<%s,%s%s ", nonce, afc->name_, inhibit ? " I" : "");
+    else
+        IonSpewCont(IonSpew_CacheFlush, "<%s%s ", nonce, inhibit ? " I" : "");
+
+    prev_ = afc;
+    pt->PerThreadData::setAutoFlushICache(this);
+#endif
 }
-AutoFlushInhibitor::~AutoFlushInhibitor()
+
+AutoFlushICache::~AutoFlushICache()
 {
-    JS_ASSERT(runtime_->flusher() == nullptr);
-
-    // Ensure any future modifications are recorded.
-    runtime_->setFlusher(afc);
-
-    if (afc)
-        IonSpewCont(IonSpew_CacheFlush, "{");
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
+    PerThreadData *pt = TlsPerThreadData.get();
+    JS_ASSERT(pt->PerThreadData::autoFlushICache() == this);
+
+    if (!inhibit_ && start_)
+        JSC::ExecutableAllocator::cacheFlush((void *)start_, size_t(stop_ - start_));
+
+    IonSpewCont(IonSpew_CacheFlush, "%s%s>", name_, start_ ? "" : " U");
+    IonSpewFin(IonSpew_CacheFlush);
+    pt->PerThreadData::setAutoFlushICache(prev_);
+#endif
 }
 
 void
-jit::PurgeCaches(JSScript *script, Zone *zone)
+jit::PurgeCaches(JSScript *script)
 {
     if (script->hasIonScript())
-        script->ionScript()->purgeCaches(zone);
+        script->ionScript()->purgeCaches();
 
     if (script->hasParallelIonScript())
-        script->parallelIonScript()->purgeCaches(zone);
+        script->parallelIonScript()->purgeCaches();
 }
 
 size_t
 jit::SizeOfIonData(JSScript *script, mozilla::MallocSizeOf mallocSizeOf)
 {
     size_t result = 0;
 
     if (script->hasIonScript())
@@ -3100,17 +3169,16 @@ AutoDebugModeInvalidation::~AutoDebugMod
     // Don't discard active baseline scripts. They are recompiled for debug
     // mode.
     jit::MarkActiveBaselineScripts(zone);
 
     for (JitActivationIterator iter(rt); !iter.done(); ++iter) {
         JSCompartment *comp = iter->compartment();
         if (comp_ == comp || zone_ == comp->zone()) {
             IonContext ictx(CompileRuntime::get(rt));
-            AutoFlushCache afc("AutoDebugModeInvalidation", rt->jitRuntime());
             IonSpew(IonSpew_Invalidate, "Invalidating frames for debug mode toggle");
             InvalidateActivation(fop, iter.jitTop(), true);
         }
     }
 
     for (gc::ZoneCellIter i(zone, gc::FINALIZE_SCRIPT); !i.done(); i.next()) {
         JSScript *script = i.get<JSScript>();
         if (script->compartment() == comp_ || zone_) {
--- a/js/src/jit/Ion.h
+++ b/js/src/jit/Ion.h
@@ -185,17 +185,17 @@ NumLocalsAndArgs(JSScript *script)
     if (JSFunction *fun = script->functionNonDelazifying())
         num += fun->nargs();
     return num;
 }
 
 void ForbidCompilation(JSContext *cx, JSScript *script);
 void ForbidCompilation(JSContext *cx, JSScript *script, ExecutionMode mode);
 
-void PurgeCaches(JSScript *script, JS::Zone *zone);
+void PurgeCaches(JSScript *script);
 size_t SizeOfIonData(JSScript *script, mozilla::MallocSizeOf mallocSizeOf);
 void DestroyIonScripts(FreeOp *fop, JSScript *script);
 void TraceIonScripts(JSTracer* trc, JSScript *script);
 
 void RequestInterruptForIonCode(JSRuntime *rt, JSRuntime::InterruptMode mode);
 
 bool RematerializeAllFrames(JSContext *cx, JSCompartment *comp);
 bool UpdateForDebugMode(JSContext *maybecx, JSCompartment *comp,
--- a/js/src/jit/IonCaches.cpp
+++ b/js/src/jit/IonCaches.cpp
@@ -398,16 +398,17 @@ IonCache::attachStub(MacroAssembler &mas
     attacher.patchStubCodePointer(masm, code);
 }
 
 bool
 IonCache::linkAndAttachStub(JSContext *cx, MacroAssembler &masm, StubAttacher &attacher,
                             IonScript *ion, const char *attachKind)
 {
     Rooted<JitCode *> code(cx);
+    AutoFlushICache afc("IonCache");
     LinkStatus status = linkCode(cx, masm, ion, code.address());
     if (status != LINK_GOOD)
         return status != LINK_ERROR;
 
     attachStub(masm, attacher, code);
 
     if (pc_) {
         IonSpew(IonSpew_InlineCaches, "Cache %p(%s:%d/%d) generated %s %s stub at %p",
@@ -1687,18 +1688,16 @@ GetPropertyIC::update(JSContext *cx, siz
 {
     void *returnAddr;
     RootedScript outerScript(cx, GetTopIonJSScript(cx, &returnAddr));
     IonScript *ion = outerScript->ionScript();
 
     GetPropertyIC &cache = ion->getCache(cacheIndex).toGetProperty();
     RootedPropertyName name(cx, cache.name());
 
-    AutoFlushCache afc ("GetPropertyCache", cx->runtime()->jitRuntime());
-
     // Override the return value if we are invalidated (bug 728188).
     AutoDetectInvalidation adi(cx, vp.address(), ion);
 
     // If the cache is idempotent, we will redo the op in the interpreter.
     if (cache.idempotent())
         adi.disable();
 
     // For now, just stop generating new stubs once we hit the stub count
@@ -1863,19 +1862,16 @@ GetPropertyParIC::update(ForkJoinContext
         return true;
 
     {
         // Lock the context before mutating the cache. Ideally we'd like to do
         // finer-grained locking, with one lock per cache. However, generating
         // new jitcode uses a global ExecutableAllocator tied to the runtime.
         LockedJSContext ncx(cx);
 
-        // The flusher needs to be under lock.
-        AutoFlushCache afc("GetPropertyParCache", cx->runtime()->jitRuntime());
-
         if (cache.canAttachStub()) {
             bool alreadyStubbed;
             if (!cache.hasOrAddStubbedShape(ncx, obj->lastProperty(), &alreadyStubbed))
                 return cx->setPendingAbortFatal(ParallelBailoutFailedIC);
             if (alreadyStubbed)
                 return true;
 
             // See note about the stub limit in GetPropertyCache.
@@ -2743,18 +2739,16 @@ CanAttachNativeSetProp(HandleObject obj,
 
     return SetPropertyIC::CanAttachNone;
 }
 
 bool
 SetPropertyIC::update(JSContext *cx, size_t cacheIndex, HandleObject obj,
                       HandleValue value)
 {
-    AutoFlushCache afc ("SetPropertyCache", cx->runtime()->jitRuntime());
-
     void *returnAddr;
     RootedScript script(cx, GetTopIonJSScript(cx, &returnAddr));
     IonScript *ion = script->ionScript();
     SetPropertyIC &cache = ion->getCache(cacheIndex).toSetProperty();
     RootedPropertyName name(cx, cache.name());
     RootedId id(cx, AtomToId(name));
 
     // Stop generating new stubs once we hit the stub count limit, see
@@ -2860,17 +2854,16 @@ SetPropertyParIC::update(ForkJoinContext
     }
 
     SetPropertyIC::NativeSetPropCacheability canCache = SetPropertyIC::CanAttachNone;
     bool attachedStub = false;
 
     {
         // See note about locking context in GetPropertyParIC::update.
         LockedJSContext ncx(cx);
-        AutoFlushCache afc("SetPropertyParCache", cx->runtime()->jitRuntime());
 
         if (cache.canAttachStub()) {
             bool alreadyStubbed;
             if (!cache.hasOrAddStubbedShape(ncx, obj->lastProperty(), &alreadyStubbed))
                 return cx->setPendingAbortFatal(ParallelBailoutFailedIC);
             if (alreadyStubbed) {
                 return baseops::SetPropertyHelper<ParallelExecution>(
                     cx, obj, obj, id, baseops::Qualified, &v, cache.strict());
@@ -3420,18 +3413,16 @@ GetElementIC::update(JSContext *cx, size
     if (cache.isDisabled()) {
         if (!GetObjectElementOperation(cx, JSOp(*pc), obj, /* wasObject = */true, idval, res))
             return false;
         if (!cache.monitoredResult())
             types::TypeScript::Monitor(cx, script, pc, res);
         return true;
     }
 
-    AutoFlushCache afc("GetElementCache", cx->runtime()->jitRuntime());
-
     RootedId id(cx);
     if (!ValueToId<CanGC>(cx, idval, &id))
         return false;
 
     bool attachedStub = false;
     if (cache.canAttachStub()) {
         if (IsOptimizableArgumentsObjectForGetElem(obj, idval) &&
             !cache.hasArgumentsStub(obj->is<StrictArgumentsObject>()) &&
@@ -3963,17 +3954,16 @@ GetElementParIC::update(ForkJoinContext 
 
     // Avoid unnecessary locking if cannot attach stubs.
     if (!cache.canAttachStub())
         return true;
 
     {
         // See note about locking context in GetPropertyParIC::update.
         LockedJSContext ncx(cx);
-        AutoFlushCache afc("GetElementParCache", cx->runtime()->jitRuntime());
 
         if (cache.canAttachStub()) {
             bool alreadyStubbed;
             if (!cache.hasOrAddStubbedShape(ncx, obj->lastProperty(), &alreadyStubbed))
                 return cx->setPendingAbortFatal(ParallelBailoutFailedIC);
             if (alreadyStubbed)
                 return true;
 
@@ -4154,18 +4144,16 @@ IsCacheableScopeChain(JSObject *scopeCha
     }
 
     MOZ_ASSUME_UNREACHABLE("Invalid scope chain");
 }
 
 JSObject *
 BindNameIC::update(JSContext *cx, size_t cacheIndex, HandleObject scopeChain)
 {
-    AutoFlushCache afc ("BindNameCache", cx->runtime()->jitRuntime());
-
     RootedScript outerScript(cx, GetTopIonJSScript(cx));
     IonScript *ion = outerScript->ionScript();
     BindNameIC &cache = ion->getCache(cacheIndex).toBindName();
     HandlePropertyName name = cache.name();
 
     RootedObject holder(cx);
     if (scopeChain->is<GlobalObject>()) {
         holder = scopeChain;
@@ -4285,18 +4273,16 @@ IsCacheableNameCallGetter(JSObject *scop
     return IsCacheableGetPropCallNative(obj, holder, shape) ||
         IsCacheableGetPropCallPropertyOp(obj, holder, shape);
 }
 
 bool
 NameIC::update(JSContext *cx, size_t cacheIndex, HandleObject scopeChain,
                MutableHandleValue vp)
 {
-    AutoFlushCache afc ("GetNameCache", cx->runtime()->jitRuntime());
-
     void *returnAddr;
     RootedScript outerScript(cx, GetTopIonJSScript(cx, &returnAddr));
     IonScript *ion = outerScript->ionScript();
 
     NameIC &cache = ion->getCache(cacheIndex).toName();
     RootedPropertyName name(cx, cache.name());
 
     RootedScript script(cx);
@@ -4349,18 +4335,16 @@ CallsiteCloneIC::attach(JSContext *cx, H
     attacher.jumpRejoin(masm);
 
     return linkAndAttachStub(cx, masm, attacher, ion, "generic");
 }
 
 JSObject *
 CallsiteCloneIC::update(JSContext *cx, size_t cacheIndex, HandleObject callee)
 {
-    AutoFlushCache afc ("CallsiteCloneCache", cx->runtime()->jitRuntime());
-
     // Act as the identity for functions that are not clone-at-callsite, as we
     // generate this cache as long as some callees are clone-at-callsite.
     RootedFunction fun(cx, &callee->as<JSFunction>());
     if (!fun->hasScript() || !fun->nonLazyScript()->shouldCloneAtCallsite())
         return fun;
 
     RootedScript outerScript(cx, GetTopIonJSScript(cx));
     IonScript *ion = outerScript->ionScript();
--- a/js/src/jit/IonCode.h
+++ b/js/src/jit/IonCode.h
@@ -535,17 +535,17 @@ struct IonScript
     size_t runtimeSize() const {
         return runtimeSize_;
     }
     CacheLocation *getCacheLocs(uint32_t locIndex) {
         JS_ASSERT(locIndex < runtimeSize_);
         return (CacheLocation *) &runtimeData()[locIndex];
     }
     void toggleBarriers(bool enabled);
-    void purgeCaches(JS::Zone *zone);
+    void purgeCaches();
     void destroyCaches();
     void unlinkFromRuntime(FreeOp *fop);
     void copySnapshots(const SnapshotWriter *writer);
     void copyRecovers(const RecoverWriter *writer);
     void copyBailoutTable(const SnapshotOffset *table);
     void copyConstants(const Value *vp);
     void copySafepointIndices(const SafepointIndex *firstSafepointIndex, MacroAssembler &masm);
     void copyOsiIndices(const OsiIndex *firstOsiIndex, MacroAssembler &masm);
@@ -769,50 +769,33 @@ struct IonScriptCounts
     }
 };
 
 struct VMFunction;
 
 class JitCompartment;
 class JitRuntime;
 
-struct AutoFlushCache
+struct AutoFlushICache
 {
   private:
     uintptr_t start_;
     uintptr_t stop_;
     const char *name_;
-    JitRuntime *runtime_;
-    bool used_;
+    bool inhibit_;
+    AutoFlushICache *prev_;
 
   public:
-    void update(uintptr_t p, size_t len);
-    static void updateTop(uintptr_t p, size_t len);
-    ~AutoFlushCache();
-    AutoFlushCache(const char *nonce, JitRuntime *rt);
-    void flushAnyway();
+    static void setRange(uintptr_t p, size_t len);
+    static void flush(uintptr_t p, size_t len);
+    static void setInhibit();
+    ~AutoFlushICache();
+    AutoFlushICache(const char *nonce, bool inhibit=false);
 };
 
-// If you are currently in the middle of modifing Ion-compiled code, which
-// is going to be flushed at *some* point, but determine that you *must*
-// call a function *right* *now*, two things can go wrong:
-//   1)  The flusher that you were using is still active, but you are about to
-//       enter jitted code, so it needs to be flushed
-//   2) the called function can re-enter a compilation/modification path which
-//       will use your AFC, and thus not flush when his compilation is done
-
-struct AutoFlushInhibitor
-{
-  private:
-    JitRuntime *runtime_;
-    AutoFlushCache *afc;
-  public:
-    AutoFlushInhibitor(JitRuntime *rt);
-    ~AutoFlushInhibitor();
-};
 } // namespace jit
 
 namespace gc {
 
 inline bool
 IsMarked(const jit::VMFunction *)
 {
     // VMFunction are only static objects which are used by WeakMaps as keys.
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -204,19 +204,16 @@ class JitRuntime
     typedef WeakCache<const VMFunction *, JitCode *> VMWrapperMap;
     VMWrapperMap *functionWrappers_;
 
     // Buffer for OSR from baseline to Ion. To avoid holding on to this for
     // too long, it's also freed in JitCompartment::mark and in EnterBaseline
     // (after returning from JIT code).
     uint8_t *osrTempData_;
 
-    // Keep track of memoryregions that are going to be flushed.
-    AutoFlushCache *flusher_;
-
     // Whether all Ion code in the runtime is protected, and will fault if it
     // is accessed.
     bool ionCodeProtected_;
 
     // If signal handlers are installed, this contains all loop backedges for
     // IonScripts in the runtime.
     InlineList<PatchableBackedge> backedgeList_;
 
@@ -257,24 +254,16 @@ class JitRuntime
     ~JitRuntime();
     bool initialize(JSContext *cx);
 
     uint8_t *allocateOsrTempData(size_t size);
     void freeOsrTempData();
 
     static void Mark(JSTracer *trc);
 
-    AutoFlushCache *flusher() {
-        return flusher_;
-    }
-    void setFlusher(AutoFlushCache *fl) {
-        if (!flusher_ || !fl)
-            flusher_ = fl;
-    }
-
     JSC::ExecutableAllocator *execAlloc() const {
         return execAlloc_;
     }
 
     JSC::ExecutableAllocator *getIonAlloc(JSContext *cx) {
         JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock());
         return ionAlloc_ ? ionAlloc_ : createIonAlloc(cx);
     }
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -563,17 +563,17 @@ Assembler::finish()
     }
 }
 
 void
 Assembler::executableCopy(uint8_t *buffer)
 {
     JS_ASSERT(isFinished);
     m_buffer.executableCopy(buffer);
-    AutoFlushCache::updateTop((uintptr_t)buffer, m_buffer.size());
+    AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
 }
 
 void
 Assembler::resetCounter()
 {
     m_buffer.resetCounter();
 }
 
@@ -2410,26 +2410,26 @@ Assembler::retargetNearBranch(Instructio
     JS_ASSERT_IF(i->is<InstBranchImm>(), i->is<InstBImm>() || i->is<InstBLImm>());
     if (i->is<InstBLImm>())
         new (i) InstBLImm(BOffImm(offset), cond);
     else
         new (i) InstBImm(BOffImm(offset), cond);
 
     // Flush the cache, since an instruction was overwritten
     if (final)
-        AutoFlushCache::updateTop(uintptr_t(i), 4);
+        AutoFlushICache::flush(uintptr_t(i), 4);
 }
 
 void
 Assembler::retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond)
 {
     int32_t offset = reinterpret_cast<uint8_t*>(slot) - reinterpret_cast<uint8_t*>(i);
     if (!i->is<InstLDR>()) {
         new (i) InstLDR(Offset, pc, DTRAddr(pc, DtrOffImm(offset - 8)), cond);
-        AutoFlushCache::updateTop(uintptr_t(i), 4);
+        AutoFlushICache::flush(uintptr_t(i), 4);
     }
     *slot = dest;
 
 }
 
 struct PoolHeader : Instruction {
     struct Header
     {
@@ -2521,35 +2521,35 @@ Assembler::patchWrite_NearCall(CodeLocat
     Instruction *inst = (Instruction *) start.raw();
     // Overwrite whatever instruction used to be here with a call.
     // Since the destination is in the same function, it will be within range of the 24<<2 byte
     // bl instruction.
     uint8_t *dest = toCall.raw();
     new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always);
     // Ensure everyone sees the code that was just written into memory.
 
-    AutoFlushCache::updateTop(uintptr_t(inst), 4);
+    AutoFlushICache::flush(uintptr_t(inst), 4);
 
 }
 void
 Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                    PatchedImmPtr expectedValue)
 {
     Instruction *ptr = (Instruction *) label.raw();
     InstructionIterator iter(ptr);
     Register dest;
     Assembler::RelocStyle rs;
     DebugOnly<const uint32_t *> val = getPtr32Target(&iter, &dest, &rs);
     JS_ASSERT((uint32_t)(const uint32_t *)val == uint32_t(expectedValue.value));
     reinterpret_cast<MacroAssemblerARM*>(dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)),
                                                                  dest, Always, rs, ptr);
     // L_LDR won't cause any instructions to be updated.
     if (rs != L_LDR) {
-        AutoFlushCache::updateTop(uintptr_t(ptr), 4);
-        AutoFlushCache::updateTop(uintptr_t(ptr->next()), 4);
+        AutoFlushICache::flush(uintptr_t(ptr), 4);
+        AutoFlushICache::flush(uintptr_t(ptr->next()), 4);
     }
 }
 
 void
 Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
 {
     patchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value));
 }
@@ -2671,17 +2671,17 @@ Assembler::ToggleToJmp(CodeLocationLabel
     uint32_t *ptr = (uint32_t *)inst_.raw();
 
     DebugOnly<Instruction *> inst = (Instruction *)inst_.raw();
     JS_ASSERT(inst->is<InstCMP>());
 
     // Zero bits 20-27, then set 24-27 to be correct for a branch.
     // 20-23 will be party of the B's immediate, and should be 0.
     *ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20);
-    AutoFlushCache::updateTop((uintptr_t)ptr, 4);
+    AutoFlushICache::flush(uintptr_t(ptr), 4);
 }
 
 void
 Assembler::ToggleToCmp(CodeLocationLabel inst_)
 {
     uint32_t *ptr = (uint32_t *)inst_.raw();
 
     DebugOnly<Instruction *> inst = (Instruction *)inst_.raw();
@@ -2694,17 +2694,17 @@ Assembler::ToggleToCmp(CodeLocationLabel
     // Also make sure that the CMP is valid. Part of having a valid CMP is that
     // all of the bits describing the destination in most ALU instructions are
     // all unset (looks like it is encoding r0).
     JS_ASSERT(toRD(*inst) == r0);
 
     // Zero out bits 20-27, then set them to be correct for a compare.
     *ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20);
 
-    AutoFlushCache::updateTop((uintptr_t)ptr, 4);
+    AutoFlushICache::flush(uintptr_t(ptr), 4);
 }
 
 void
 Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
 {
     Instruction *inst = (Instruction *)inst_.raw();
     JS_ASSERT(inst->is<InstMovW>() || inst->is<InstLDR>());
 
@@ -2724,17 +2724,17 @@ Assembler::ToggleCall(CodeLocationLabel 
         return;
     }
 
     if (enabled)
         *inst = InstBLXReg(ScratchRegister, Always);
     else
         *inst = InstNOP();
 
-    AutoFlushCache::updateTop(uintptr_t(inst), 4);
+    AutoFlushICache::flush(uintptr_t(inst), 4);
 }
 
 void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst)
 {
     JS_ASSERT(inst->is<InstCMP>());
     InstCMP *cmp = inst->as<InstCMP>();
 
     Register index;
@@ -2746,88 +2746,16 @@ void Assembler::updateBoundsCheck(uint32
     Imm8 imm8 = Imm8(heapSize);
     JS_ASSERT(!imm8.invalid);
 
     *inst = InstALU(InvalidReg, index, imm8, op_cmp, SetCond, Always);
     // NOTE: we don't update the Auto Flush Cache!  this function is currently only called from
     // within AsmJSModule::patchHeapAccesses, which does that for us.  Don't call this!
 }
 
-static uintptr_t
-PageStart(uintptr_t p)
-{
-    static const size_t PageSize = 4096;
-    return p & ~(PageSize - 1);
-}
-
-static bool
-OnSamePage(uintptr_t start1, uintptr_t stop1, uintptr_t start2, uintptr_t stop2)
-{
-    // Return true if (parts of) the two ranges are on the same memory page.
-    return PageStart(stop1) == PageStart(start2) || PageStart(stop2) == PageStart(start1);
-}
-
-void
-AutoFlushCache::update(uintptr_t newStart, size_t len)
-{
-    uintptr_t newStop = newStart + len;
-    used_ = true;
-    if (!start_) {
-        IonSpewCont(IonSpew_CacheFlush,  ".");
-        start_ = newStart;
-        stop_ = newStop;
-        return;
-    }
-
-    if (!OnSamePage(start_, stop_, newStart, newStop)) {
-        // Flush now if the two ranges have no memory page in common, to avoid
-        // problems on Linux where the kernel only flushes the first VMA that
-        // covers the range. This also ensures we don't add too many pages to
-        // the range.
-        IonSpewCont(IonSpew_CacheFlush, "*");
-        JSC::ExecutableAllocator::cacheFlush((void*)newStart, len);
-        return;
-    }
-
-    start_ = Min(start_, newStart);
-    stop_ = Max(stop_, newStop);
-    IonSpewCont(IonSpew_CacheFlush, ".");
-}
-
-AutoFlushCache::~AutoFlushCache()
-{
-   if (!runtime_)
-        return;
-
-    flushAnyway();
-    IonSpewCont(IonSpew_CacheFlush, ">", name_);
-    if (runtime_->flusher() == this) {
-        IonSpewFin(IonSpew_CacheFlush);
-        runtime_->setFlusher(nullptr);
-    }
-}
-
-void
-AutoFlushCache::flushAnyway()
-{
-    if (!runtime_)
-        return;
-
-    IonSpewCont(IonSpew_CacheFlush, "|", name_);
-
-    if (!used_)
-        return;
-
-    if (start_) {
-        JSC::ExecutableAllocator::cacheFlush((void *)start_, size_t(stop_ - start_ + sizeof(Instruction)));
-    } else {
-        JSC::ExecutableAllocator::cacheFlush(nullptr, 0xff000000);
-    }
-    used_ = false;
-}
 InstructionIterator::InstructionIterator(Instruction *i_) : i(i_) {
     const PoolHeader *ph;
     // If this is a guard, and the next instruction is a header, always work around the pool
     // If it isn't a guard, then start looking ahead.
     if (InstIsArtificialGuard(i, &ph)) {
         i = i->next();
     }
 }
--- a/js/src/jit/arm/Simulator-arm.cpp
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -1101,16 +1101,17 @@ Simulator::setLastDebuggerInput(char *in
 {
     js_free(lastDebuggerInput_);
     lastDebuggerInput_ = input;
 }
 
 void
 Simulator::FlushICache(void *start_addr, size_t size)
 {
+    IonSpewCont(IonSpew_CacheFlush, "[%p %zx]", start_addr, size);
     if (!Simulator::ICacheCheckingEnabled)
         return;
     SimulatorRuntime *srt = Simulator::Current()->srt_;
     AutoLockSimulatorRuntime alsr(srt);
     js::jit::FlushICache(srt->icache(), start_addr, size);
 }
 
 Simulator::~Simulator()
--- a/js/src/jit/arm/Trampoline-arm.cpp
+++ b/js/src/jit/arm/Trampoline-arm.cpp
@@ -108,17 +108,16 @@ JitCode *
 JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
 {
     const Address slot_token(sp, offsetof(EnterJITStack, token));
     const Address slot_vp(sp, offsetof(EnterJITStack, vp));
 
     JS_ASSERT(OsrFrameReg == r3);
 
     MacroAssembler masm(cx);
-    AutoFlushCache afc("GenerateEnterJIT", cx->runtime()->jitRuntime());
     Assembler *aasm = &masm;
 
     // Save non-volatile registers. These must be saved by the trampoline,
     // rather than the JIT'd code, because they are scanned by the conservative
     // scanner.
     masm.startDataTransferM(IsStore, sp, DB, WriteBack);
     masm.transferReg(r4); // [sp,0]
     masm.transferReg(r5); // [sp,4]
@@ -329,16 +328,17 @@ JitRuntime::generateEnterJIT(JSContext *
     //   ASSERT(JSReturnReg_Type.code() == JSReturnReg_Data.code()+1);
     //   aasm->as_extdtr(IsStore, 64, true, Offset,
     //                   JSReturnReg_Data, EDtrAddr(r5, EDtrOffImm(0)));
 
     // Restore non-volatile registers and return.
     GenerateReturn(masm, true, &cx->runtime()->spsProfiler);
 
     Linker linker(masm);
+    AutoFlushICache afc("EnterJIT");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "EnterJIT");
 #endif
 
     return code;
 }
@@ -390,16 +390,17 @@ JitRuntime::generateInvalidator(JSContex
     // (computed by InvalidationBailout)
     masm.ma_add(sp, r1, sp);
 
     // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
     JitCode *bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
     masm.branch(bailoutTail);
 
     Linker linker(masm);
+    AutoFlushICache afc("Invalidator");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
     IonSpew(IonSpew_Invalidate, "   invalidation thunk created at %p", (void *) code->raw());
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "Invalidator");
 #endif
 
     return code;
@@ -492,16 +493,17 @@ JitRuntime::generateArgumentsRectifier(J
     // sizeDescriptor
     // return address
 
     // Discard pushed arguments.
     masm.ma_alu(sp, lsr(r4, FRAMESIZE_SHIFT), sp, op_add);
 
     masm.ret();
     Linker linker(masm);
+    AutoFlushICache afc("ArgumentsRectifier");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
     CodeOffsetLabel returnLabel(returnOffset);
     returnLabel.fixup(&masm);
     if (returnAddrOut)
         *returnAddrOut = (void *) (code->raw() + returnLabel.offset());
 
 #ifdef JS_ION_PERF
@@ -616,32 +618,34 @@ JitRuntime::generateBailoutTable(JSConte
     Label bailout;
     for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++)
         masm.ma_bl(&bailout);
     masm.bind(&bailout);
 
     GenerateBailoutThunk(cx, masm, frameClass);
 
     Linker linker(masm);
+    AutoFlushICache afc("BailoutTable");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "BailoutTable");
 #endif
 
     return code;
 }
 
 JitCode *
 JitRuntime::generateBailoutHandler(JSContext *cx)
 {
     MacroAssembler masm(cx);
     GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
 
     Linker linker(masm);
+    AutoFlushICache afc("BailoutHandler");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "BailoutHandler");
 #endif
 
     return code;
 }
@@ -804,16 +808,17 @@ JitRuntime::generateVMWrapper(JSContext 
         break;
     }
     masm.leaveExitFrame();
     masm.retn(Imm32(sizeof(IonExitFrameLayout) +
                     f.explicitStackSlots() * sizeof(void *) +
                     f.extraValuesToPop * sizeof(Value)));
 
     Linker linker(masm);
+    AutoFlushICache afc("VMWrapper");
     JitCode *wrapper = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
     if (!wrapper)
         return nullptr;
 
     // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
     // use relookupOrAdd instead of add.
     if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
         return nullptr;
@@ -852,16 +857,17 @@ JitRuntime::generatePreBarrier(JSContext
         JS_ASSERT(type == MIRType_Shape);
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, MarkShapeFromIon));
     }
 
     masm.PopRegsInMask(save);
     masm.ret();
 
     Linker linker(masm);
+    AutoFlushICache afc("PreBarrier");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "PreBarrier");
 #endif
 
     return code;
 }
@@ -907,16 +913,17 @@ JitRuntime::generateDebugTrapHandler(JSC
     masm.bind(&forcedReturn);
     masm.loadValue(Address(r11, BaselineFrame::reverseOffsetOfReturnValue()),
                    JSReturnOperand);
     masm.mov(r11, sp);
     masm.pop(r11);
     masm.ret();
 
     Linker linker(masm);
+    AutoFlushICache afc("DebugTrapHandler");
     JitCode *codeDbg = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
 #endif
 
     return codeDbg;
 }
@@ -924,16 +931,17 @@ JitRuntime::generateDebugTrapHandler(JSC
 JitCode *
 JitRuntime::generateExceptionTailStub(JSContext *cx)
 {
     MacroAssembler masm;
 
     masm.handleFailureWithHandlerTail();
 
     Linker linker(masm);
+    AutoFlushICache afc("ExceptionTailStub");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
 #endif
 
     return code;
 }
@@ -941,16 +949,17 @@ JitRuntime::generateExceptionTailStub(JS
 JitCode *
 JitRuntime::generateBailoutTailStub(JSContext *cx)
 {
     MacroAssembler masm;
 
     masm.generateBailoutTail(r1, r2);
 
     Linker linker(masm);
+    AutoFlushICache afc("BailoutTailStub");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
 #endif
 
     return code;
 }
--- a/js/src/jit/mips/Assembler-mips.cpp
+++ b/js/src/jit/mips/Assembler-mips.cpp
@@ -121,17 +121,17 @@ InstImm::extractImm16(BOffImm16 *dest)
 void
 jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label)
 {
     Instruction *inst1 = (Instruction *)jump_.raw();
     Instruction *inst2 = inst1->next();
 
     Assembler::updateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
 
-    AutoFlushCache::updateTop(uintptr_t(inst1), 8);
+    AutoFlushICache::flush(uintptr_t(inst1), 8);
 }
 
 void
 Assembler::finish()
 {
     MOZ_ASSERT(!isFinished);
     isFinished = true;
 }
@@ -145,17 +145,17 @@ Assembler::executableCopy(uint8_t *buffe
     // Patch all long jumps during code copy.
     for (size_t i = 0; i < longJumps_.length(); i++) {
         Instruction *inst1 = (Instruction *) ((uint32_t)buffer + longJumps_[i]);
 
         uint32_t value = extractLuiOriValue(inst1, inst1->next());
         updateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value);
     }
 
-    AutoFlushCache::updateTop((uintptr_t)buffer, m_buffer.size());
+    AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
 }
 
 uint32_t
 Assembler::actualOffset(uint32_t off_) const
 {
     return off_;
 }
 
@@ -1331,17 +1331,17 @@ Assembler::patchWrite_NearCall(CodeLocat
     // - Jump has to be the same size because of patchWrite_NearCallSize.
     // - Return address has to be at the end of replaced block.
     // Short jump wouldn't be more efficient.
     writeLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest);
     inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
     inst[3] = InstNOP();
 
     // Ensure everyone sees the code that was just written into memory.
-    AutoFlushCache::updateTop(uintptr_t(inst), patchWrite_NearCallSize());
+    AutoFlushICache::flush(uintptr_t(inst), patchWrite_NearCallSize());
 }
 
 uint32_t
 Assembler::extractLuiOriValue(Instruction *inst0, Instruction *inst1)
 {
     InstImm *i0 = (InstImm *) inst0;
     InstImm *i1 = (InstImm *) inst1;
     MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
@@ -1378,17 +1378,17 @@ Assembler::patchDataWithValueCheck(CodeL
 
     // Extract old Value
     DebugOnly<uint32_t> value = Assembler::extractLuiOriValue(&inst[0], &inst[1]);
     MOZ_ASSERT(value == uint32_t(expectedValue.value));
 
     // Replace with new value
     Assembler::updateLuiOriValue(inst, inst->next(), uint32_t(newValue.value));
 
-    AutoFlushCache::updateTop(uintptr_t(inst), 8);
+    AutoFlushICache::flush(uintptr_t(inst), 8);
 }
 
 void
 Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
 {
     patchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
                             PatchedImmPtr(expectedValue.value));
 }
@@ -1480,30 +1480,30 @@ void
 Assembler::ToggleToJmp(CodeLocationLabel inst_)
 {
     InstImm * inst = (InstImm *)inst_.raw();
 
     MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_andi >> OpcodeShift));
     // We converted beq to andi, so now we restore it.
     inst->setOpcode(op_beq);
 
-    AutoFlushCache::updateTop((uintptr_t)inst, 4);
+    AutoFlushICache::flush(uintptr_t(inst), 4);
 }
 
 void
 Assembler::ToggleToCmp(CodeLocationLabel inst_)
 {
     InstImm * inst = (InstImm *)inst_.raw();
 
     // toggledJump is allways used for short jumps.
     MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift));
     // Replace "beq $zero, $zero, offset" with "andi $zero, $zero, offset"
     inst->setOpcode(op_andi);
 
-    AutoFlushCache::updateTop((uintptr_t)inst, 4);
+    AutoFlushICache::flush(uintptr_t(inst), 4);
 }
 
 void
 Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
 {
     Instruction *inst = (Instruction *)inst_.raw();
     InstImm *i0 = (InstImm *) inst;
     InstImm *i1 = (InstImm *) i0->next();
@@ -1515,80 +1515,15 @@ Assembler::ToggleCall(CodeLocationLabel 
     if (enabled) {
         InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
         *i2 = jalr;
     } else {
         InstNOP nop;
         *i2 = nop;
     }
 
-    AutoFlushCache::updateTop((uintptr_t)i2, 4);
+    AutoFlushICache::flush(uintptr_t(i2), 4);
 }
 
 void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst)
 {
     MOZ_ASSUME_UNREACHABLE("NYI");
 }
-
-void
-AutoFlushCache::update(uintptr_t newStart, size_t len)
-{
-    uintptr_t newStop = newStart + len;
-    if (this == nullptr) {
-        // just flush right here and now.
-        JSC::ExecutableAllocator::cacheFlush((void*)newStart, len);
-        return;
-    }
-    used_ = true;
-    if (!start_) {
-        IonSpewCont(IonSpew_CacheFlush, ".");
-        start_ = newStart;
-        stop_ = newStop;
-        return;
-    }
-
-    if (newStop < start_ - 4096 || newStart > stop_ + 4096) {
-        // If this would add too many pages to the range. Flush recorded range
-        // and make a new range.
-        IonSpewCont(IonSpew_CacheFlush, "*");
-        JSC::ExecutableAllocator::cacheFlush((void*)start_, stop_);
-        start_ = newStart;
-        stop_ = newStop;
-        return;
-    }
-    start_ = Min(start_, newStart);
-    stop_ = Max(stop_, newStop);
-    IonSpewCont(IonSpew_CacheFlush, ".");
-}
-
-AutoFlushCache::~AutoFlushCache()
-{
-    if (!runtime_)
-        return;
-
-    flushAnyway();
-    IonSpewCont(IonSpew_CacheFlush, ">", name_);
-    if (runtime_->flusher() == this) {
-        IonSpewFin(IonSpew_CacheFlush);
-        runtime_->setFlusher(nullptr);
-    }
-}
-
-void
-AutoFlushCache::flushAnyway()
-{
-    if (!runtime_)
-        return;
-
-    IonSpewCont(IonSpew_CacheFlush, "|", name_);
-
-    if (!used_)
-        return;
-
-    if (start_) {
-        JSC::ExecutableAllocator::cacheFlush((void *)start_,
-                                             size_t(stop_ - start_ + sizeof(Instruction)));
-    } else {
-        JSC::ExecutableAllocator::cacheFlush(nullptr, 0xff000000);
-    }
-    used_ = false;
-}
-
--- a/js/src/jit/mips/Trampoline-mips.cpp
+++ b/js/src/jit/mips/Trampoline-mips.cpp
@@ -132,18 +132,16 @@ JitRuntime::generateEnterJIT(JSContext *
     const Register reg_code = a0;
     const Register reg_argc = a1;
     const Register reg_argv = a2;
     const Register reg_frame = a3;
 
     MOZ_ASSERT(OsrFrameReg == reg_frame);
 
     MacroAssembler masm(cx);
-    AutoFlushCache afc("GenerateEnterJIT", cx->runtime()->jitRuntime());
-
     GeneratePrologue(masm);
 
     const Address slotToken(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, calleeToken));
     const Address slotVp(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, vp));
 
     // Save stack pointer into s4
     masm.movePtr(StackPointer, s4);
 
@@ -299,16 +297,17 @@ JitRuntime::generateEnterJIT(JSContext *
     // Store the returned value into the slotVp
     masm.loadPtr(slotVp, s1);
     masm.storeValue(JSReturnOperand, Address(s1, 0));
 
     // Restore non-volatile registers and return.
     GenerateReturn(masm, ShortJump);
 
     Linker linker(masm);
+    AutoFlushICache afc("GenerateEnterJIT");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "EnterJIT");
 #endif
 
     return code;
 }
@@ -371,16 +370,17 @@ JitRuntime::generateInvalidator(JSContex
     // (computed by InvalidationBailout)
     masm.addPtr(a1, StackPointer);
 
     // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
     JitCode *bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
     masm.branch(bailoutTail);
 
     Linker linker(masm);
+    AutoFlushICache afc("Invalidator");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
     IonSpew(IonSpew_Invalidate, "   invalidation thunk created at %p", (void *) code->raw());
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "Invalidator");
 #endif
 
     return code;
@@ -502,16 +502,17 @@ JitRuntime::generateArgumentsRectifier(J
     // sizeDescriptor
     // return address
 
     // Discard pushed arguments.
     masm.addPtr(t0, StackPointer);
 
     masm.ret();
     Linker linker(masm);
+    AutoFlushICache afc("ArgumentsRectifier");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
     CodeOffsetLabel returnLabel(returnOffset);
     returnLabel.fixup(&masm);
     if (returnAddrOut)
         *returnAddrOut = (void *) (code->raw() + returnLabel.offset());
 
 #ifdef JS_ION_PERF
@@ -625,32 +626,34 @@ JitRuntime::generateBailoutTable(JSConte
         masm.as_bal(BOffImm16(offset));
         masm.nop();
     }
     masm.bind(&bailout);
 
     GenerateBailoutThunk(cx, masm, frameClass);
 
     Linker linker(masm);
+    AutoFlushICache afc("BailoutTable");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "BailoutTable");
 #endif
 
     return code;
 }
 
 JitCode *
 JitRuntime::generateBailoutHandler(JSContext *cx)
 {
     MacroAssembler masm(cx);
     GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
 
     Linker linker(masm);
+    AutoFlushICache afc("BailoutHandler");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "BailoutHandler");
 #endif
 
     return code;
 }
@@ -837,16 +840,17 @@ JitRuntime::generateVMWrapper(JSContext 
         break;
     }
     masm.leaveExitFrame();
     masm.retn(Imm32(sizeof(IonExitFrameLayout) +
                     f.explicitStackSlots() * sizeof(uintptr_t) +
                     f.extraValuesToPop * sizeof(Value)));
 
     Linker linker(masm);
+    AutoFlushICache afc("VMWrapper");
     JitCode *wrapper = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
     if (!wrapper)
         return nullptr;
 
     // linker.newCode may trigger a GC and sweep functionWrappers_ so we have
     // to use relookupOrAdd instead of add.
     if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
         return nullptr;
@@ -886,16 +890,17 @@ JitRuntime::generatePreBarrier(JSContext
         MOZ_ASSERT(type == MIRType_Shape);
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, MarkShapeFromIon));
     }
 
     masm.PopRegsInMask(save);
     masm.ret();
 
     Linker linker(masm);
+    AutoFlushICache afc("PreBarrier");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "PreBarrier");
 #endif
 
     return code;
 }
@@ -945,16 +950,17 @@ JitRuntime::generateDebugTrapHandler(JSC
     masm.bind(&forcedReturn);
     masm.loadValue(Address(s5, BaselineFrame::reverseOffsetOfReturnValue()),
                    JSReturnOperand);
     masm.movePtr(s5, StackPointer);
     masm.pop(s5);
     masm.ret();
 
     Linker linker(masm);
+    AutoFlushICache afc("DebugTrapHandler");
     JitCode *codeDbg = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
 #endif
 
     return codeDbg;
 }
@@ -963,16 +969,17 @@ JitRuntime::generateDebugTrapHandler(JSC
 JitCode *
 JitRuntime::generateExceptionTailStub(JSContext *cx)
 {
     MacroAssembler masm;
 
     masm.handleFailureWithHandlerTail();
 
     Linker linker(masm);
+    AutoFlushICache afc("ExceptionTailStub");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
 #endif
 
     return code;
 }
@@ -980,16 +987,17 @@ JitRuntime::generateExceptionTailStub(JS
 JitCode *
 JitRuntime::generateBailoutTailStub(JSContext *cx)
 {
     MacroAssembler masm;
 
     masm.generateBailoutTail(a1, a2);
 
     Linker linker(masm);
+    AutoFlushICache afc("BailoutTailStub");
     JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
 #endif
 
     return code;
 }
--- a/js/src/jit/shared/Assembler-x86-shared.cpp
+++ b/js/src/jit/shared/Assembler-x86-shared.cpp
@@ -126,27 +126,8 @@ AssemblerX86Shared::InvertCondition(Cond
       case Below:
         return AboveOrEqual;
       case BelowOrEqual:
         return Above;
       default:
         MOZ_ASSUME_UNREACHABLE("unexpected condition");
     }
 }
-
-void
-AutoFlushCache::update(uintptr_t newStart, size_t len)
-{
-}
-
-void
-AutoFlushCache::flushAnyway()
-{
-}
-
-AutoFlushCache::~AutoFlushCache()
-{
-    if (!runtime_)
-        return;
-
-    if (runtime_->flusher() == this)
-        runtime_->setFlusher(nullptr);
-}
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -5383,17 +5383,17 @@ js::PurgePCCounts(JSContext *cx)
 void
 js::PurgeJITCaches(Zone *zone)
 {
 #ifdef JS_ION
     for (ZoneCellIterUnderGC i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
         JSScript *script = i.get<JSScript>();
 
         /* Discard Ion caches. */
-        jit::PurgeCaches(script, zone);
+        jit::PurgeCaches(script);
     }
 #endif
 }
 
 void
 ArenaLists::normalizeBackgroundFinalizeState(AllocKind thingKind)
 {
     ArenaLists::BackgroundFinalizeState *bfs = &backgroundFinalizeState[thingKind];
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -75,16 +75,17 @@ PerThreadData::PerThreadData(JSRuntime *
     jitTop(nullptr),
     jitJSContext(nullptr),
     jitStackLimit(0),
 #ifdef JS_TRACE_LOGGING
     traceLogger(nullptr),
 #endif
     activation_(nullptr),
     asmJSActivationStack_(nullptr),
+    autoFlushICache_(nullptr),
 #if defined(JS_ARM_SIMULATOR) || defined(JS_MIPS_SIMULATOR)
     simulator_(nullptr),
     simulatorStackLimit_(0),
 #endif
     dtoaState(nullptr),
     suppressGC(0),
     activeCompilations(0)
 {}
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -89,16 +89,17 @@ class AsmJSActivation;
 class MathCache;
 
 namespace jit {
 class JitRuntime;
 class JitActivation;
 struct PcScriptCache;
 class Simulator;
 class SimulatorRuntime;
+class AutoFlushICache;
 }
 
 /*
  * GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
  * given pc in a script. We use the script->code pointer to tag the cache,
  * instead of the script address itself, so that source notes are always found
  * by offset from the bytecode with which they were generated.
  */
@@ -528,16 +529,19 @@ class PerThreadData : public PerThreadDa
      * Points to the most recent activation running on the thread.
      * See Activation comment in vm/Stack.h.
      */
     js::Activation *activation_;
 
     /* See AsmJSActivation comment. Protected by rt->interruptLock. */
     js::AsmJSActivation *asmJSActivationStack_;
 
+    /* Pointer to the current AutoFlushICache. */
+    js::jit::AutoFlushICache *autoFlushICache_;
+
 #if defined(JS_ARM_SIMULATOR) || defined(JS_MIPS_SIMULATOR)
     js::jit::Simulator *simulator_;
     uintptr_t simulatorStackLimit_;
 #endif
 
   public:
     js::Activation *const *addressOfActivation() const {
         return &activation_;
@@ -603,16 +607,19 @@ class PerThreadData : public PerThreadDa
             pt->runtime_ = rt;
         }
 
         ~AutoEnterRuntime() {
             pt->runtime_ = nullptr;
         }
     };
 
+    js::jit::AutoFlushICache *autoFlushICache() const;
+    void setAutoFlushICache(js::jit::AutoFlushICache *afc);
+
 #if defined(JS_ARM_SIMULATOR) || defined(JS_MIPS_SIMULATOR)
     js::jit::Simulator *simulator() const;
     void setSimulator(js::jit::Simulator *sim);
     js::jit::SimulatorRuntime *simulatorRuntime() const;
     uintptr_t *addressOfSimulatorStackLimit();
 #endif
 };