Bug 977805 - Add an option to mark JIT pages as non-writable. r=luke
authorJan de Mooij <jdemooij@mozilla.com>
Fri, 12 Jun 2015 10:20:59 +0200
changeset 279349 b46d6692fe50b2698f217002e69d3cb0b088517f
parent 279348 bf79317108012383f42b6e28adf2ed5efa7e5784
child 279350 6df4c4d057bc063a488c9c90f2a9f5b8616c9691
push id4932
push userjlund@mozilla.com
push dateMon, 10 Aug 2015 18:23:06 +0000
treeherdermozilla-beta@6dd5a4f5f745 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs977805
milestone41.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 977805 - Add an option to mark JIT pages as non-writable. r=luke
js/src/asmjs/AsmJSLink.cpp
js/src/asmjs/AsmJSModule.cpp
js/src/irregexp/NativeRegExpMacroAssembler.cpp
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineJIT.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/ExecutableAllocator.cpp
js/src/jit/ExecutableAllocator.h
js/src/jit/ExecutableAllocatorPosix.cpp
js/src/jit/ExecutableAllocatorWin.cpp
js/src/jit/Ion.cpp
js/src/jit/IonCaches.cpp
js/src/jit/IonCaches.h
js/src/jit/IonCode.h
js/src/jit/JitCompartment.h
js/src/jit/Linker.h
js/src/jit/Lowering.cpp
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/none/MacroAssembler-none.h
js/src/jit/x64/Assembler-x64.cpp
js/src/jit/x64/Assembler-x64.h
js/src/jit/x86-shared/Assembler-x86-shared.cpp
js/src/jit/x86/Assembler-x86.h
js/src/shell/js.cpp
js/src/tests/lib/tests.py
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
--- a/js/src/asmjs/AsmJSLink.cpp
+++ b/js/src/asmjs/AsmJSLink.cpp
@@ -593,16 +593,19 @@ DynamicallyLinkModule(JSContext* cx, Cal
         }
     }
 
     for (unsigned i = 0; i < module.numExits(); i++)
         module.exitIndexToGlobalDatum(i).fun = &ffis[module.exit(i).ffiIndex()]->as<JSFunction>();
 
     module.initGlobalNaN();
 
+    // See the comment in AllocateExecutableMemory.
+    ExecutableAllocator::makeExecutable(module.codeBase(), module.codeBytes());
+
     return true;
 }
 
 static bool
 ChangeHeap(JSContext* cx, AsmJSModule& module, CallArgs args)
 {
     HandleValue bufferArg = args.get(0);
     if (!IsArrayBuffer(bufferArg)) {
--- a/js/src/asmjs/AsmJSModule.cpp
+++ b/js/src/asmjs/AsmJSModule.cpp
@@ -57,21 +57,21 @@ using mozilla::Compression::LZ4;
 using mozilla::PodCopy;
 using mozilla::PodEqual;
 using mozilla::PodZero;
 using mozilla::Swap;
 
 static uint8_t*
 AllocateExecutableMemory(ExclusiveContext* cx, size_t bytes)
 {
-#ifdef XP_WIN
-    unsigned permissions = PAGE_EXECUTE_READWRITE;
-#else
-    unsigned permissions = PROT_READ | PROT_WRITE | PROT_EXEC;
-#endif
+    // On most platforms, this will allocate RWX memory. On iOS, or when
+    // --non-writable-jitcode is used, this will allocate RW memory. In this
+    // case, DynamicallyLinkModule will reprotect the code as RX.
+    unsigned permissions =
+        ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable);
     void* p = AllocateExecutableMemory(nullptr, bytes, permissions, "asm-js-code", AsmJSPageSize);
     if (!p)
         ReportOutOfMemory(cx);
     return (uint8_t*)p;
 }
 
 AsmJSModule::AsmJSModule(ScriptSource* scriptSource, uint32_t srcStart, uint32_t srcBodyStart,
                          bool strict, bool canUseSignalHandlers)
@@ -290,20 +290,19 @@ AsmJSModule::finish(ExclusiveContext* cx
     if (!tokenStream.peekTokenPos(&pos))
         return false;
     uint32_t endAfterCurly = pos.end;
     MOZ_ASSERT(endBeforeCurly >= srcBodyStart_);
     MOZ_ASSERT(endAfterCurly >= srcBodyStart_);
     pod.srcLength_ = endBeforeCurly - srcStart_;
     pod.srcLengthWithRightBrace_ = endAfterCurly - srcStart_;
 
-    // The global data section sits immediately after the executable (and
-    // other) data allocated by the MacroAssembler, so ensure it is
-    // SIMD-aligned.
-    pod.codeBytes_ = AlignBytes(masm.bytesNeeded(), SimdMemoryAlignment);
+    // Start global data on a new page so JIT code may be given independent
+    // protection flags.
+    pod.codeBytes_ = AlignBytes(masm.bytesNeeded(), AsmJSPageSize);
 
     // The entire region is allocated via mmap/VirtualAlloc which requires
     // units of pages.
     pod.totalBytes_ = AlignBytes(pod.codeBytes_ + globalDataBytes(), AsmJSPageSize);
 
     MOZ_ASSERT(!code_);
     code_ = AllocateExecutableMemory(cx, pod.totalBytes_);
     if (!code_)
@@ -942,16 +941,34 @@ AsmJSModule::restoreToInitialState(Array
                                                PatchedImmPtr(originalValue));
         }
     }
 #endif
 
     restoreHeapToInitialState(maybePrevBuffer);
 }
 
+namespace {
+
+class MOZ_STACK_CLASS AutoMutateCode
+{
+    AutoWritableJitCode awjc_;
+    AutoFlushICache afc_;
+
+   public:
+    AutoMutateCode(JSContext* cx, AsmJSModule& module, const char* name)
+      : awjc_(cx->runtime(), module.codeBase(), module.codeBytes()),
+        afc_(name)
+    {
+        module.setAutoFlushICacheRange();
+    }
+};
+
+}; // anonymous namespace
+
 bool
 AsmJSModule::detachHeap(JSContext* cx)
 {
     MOZ_ASSERT(isDynamicallyLinked());
     MOZ_ASSERT(maybeHeap_);
 
     // Content JS should not be able to run (and detach heap) from within an
     // interrupt callback, but in case it does, fail. Otherwise, the heap can
@@ -962,19 +979,17 @@ AsmJSModule::detachHeap(JSContext* cx)
     }
 
     // Even if this->active(), to reach here, the activation must have called
     // out via an FFI stub. FFI stubs check if heapDatum() is null on reentry
     // and throw an exception if so.
     MOZ_ASSERT_IF(active(), activation()->exitReason() == AsmJSExit::Reason_JitFFI ||
                             activation()->exitReason() == AsmJSExit::Reason_SlowFFI);
 
-    AutoFlushICache afc("AsmJSModule::detachHeap");
-    setAutoFlushICacheRange();
-
+    AutoMutateCode amc(cx, *this, "AsmJSModule::detachHeap");
     restoreHeapToInitialState(maybeHeap_);
 
     MOZ_ASSERT(hasDetachedHeap());
     return true;
 }
 
 bool
 js::OnDetachAsmJSArrayBuffer(JSContext* cx, Handle<ArrayBufferObject*> buffer)
@@ -1712,19 +1727,17 @@ AsmJSModule::changeHeap(Handle<ArrayBuff
 
     // Content JS should not be able to run (and change heap) from within an
     // interrupt callback, but in case it does, fail to change heap. Otherwise,
     // the heap can change at every single instruction which would prevent
     // future optimizations like heap-base hoisting.
     if (interrupted_)
         return false;
 
-    AutoFlushICache afc("AsmJSModule::changeHeap");
-    setAutoFlushICacheRange();
-
+    AutoMutateCode amc(cx, *this, "AsmJSModule::changeHeap");
     restoreHeapToInitialState(maybeHeap_);
     initHeap(newHeap, cx);
     return true;
 }
 
 void
 AsmJSModule::setProfilingEnabled(bool enabled, JSContext* cx)
 {
@@ -1751,19 +1764,17 @@ AsmJSModule::setProfilingEnabled(bool en
                 name->hasLatin1Chars()
                 ? JS_smprintf("%s (%s:%u)", name->latin1Chars(nogc), filename, lineno)
                 : JS_smprintf("%hs (%s:%u)", name->twoByteChars(nogc), filename, lineno));
         }
     } else {
         profilingLabels_.clear();
     }
 
-    // Conservatively flush the icache for the entire module.
-    AutoFlushICache afc("AsmJSModule::setProfilingEnabled");
-    setAutoFlushICacheRange();
+    AutoMutateCode amc(cx, *this, "AsmJSModule::setProfilingEnabled");
 
     // Patch all internal (asm.js->asm.js) callsites to call the profiling
     // prologues:
     for (size_t i = 0; i < callSites_.length(); i++) {
         CallSite& cs = callSites_[i];
         if (cs.kind() != CallSite::Relative)
             continue;
 
--- a/js/src/irregexp/NativeRegExpMacroAssembler.cpp
+++ b/js/src/irregexp/NativeRegExpMacroAssembler.cpp
@@ -437,16 +437,18 @@ NativeRegExpMacroAssembler::GenerateCode
     JitCode* code = linker.newCode<NoGC>(cx, REGEXP_CODE);
     if (!code)
         return RegExpCode();
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "RegExp");
 #endif
 
+    AutoWritableJitCode awjc(code);
+
     for (size_t i = 0; i < labelPatches.length(); i++) {
         LabelPatch& v = labelPatches[i];
         MOZ_ASSERT(!v.label);
         v.patchOffset.fixup(&masm);
         uintptr_t offset = masm.actualOffset(v.labelOffset);
         Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, v.patchOffset),
                                            ImmPtr(code->raw() + offset),
                                            ImmPtr(0));
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -227,34 +227,40 @@ BaselineCompiler::compile()
 
     // Copy IC entries
     if (icEntries_.length())
         baselineScript->copyICEntries(script, &icEntries_[0], masm);
 
     // Adopt fallback stubs from the compiler into the baseline script.
     baselineScript->adoptFallbackStubs(&stubSpace_);
 
-    // Patch IC loads using IC entries
+    // All barriers are emitted off-by-default, toggle them on if needed.
+    if (cx->zone()->needsIncrementalBarrier())
+        baselineScript->toggleBarriers(true);
+
+    // If profiler instrumentation is enabled, toggle instrumentation on.
+    if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
+        baselineScript->toggleProfilerInstrumentation(true);
+
+    AutoWritableJitCode awjc(code);
+
+    // Patch IC loads using IC entries.
     for (size_t i = 0; i < icLoadLabels_.length(); i++) {
         CodeOffsetLabel label = icLoadLabels_[i].label;
         label.fixup(&masm);
         size_t icEntry = icLoadLabels_[i].icEntry;
         ICEntry* entryAddr = &(baselineScript->icEntry(icEntry));
         Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label),
                                            ImmPtr(entryAddr),
                                            ImmPtr((void*)-1));
     }
 
     if (modifiesArguments_)
         baselineScript->setModifiesArguments();
 
-    // All barriers are emitted off-by-default, toggle them on if needed.
-    if (cx->zone()->needsIncrementalBarrier())
-        baselineScript->toggleBarriers(true);
-
 #ifdef JS_TRACE_LOGGING
     // Initialize the tracelogger instrumentation.
     baselineScript->initTraceLogger(cx->runtime(), script);
 #endif
 
     uint32_t* bytecodeMap = baselineScript->bytecodeTypeMap();
     FillBytecodeTypeMap(script, bytecodeMap);
 
@@ -262,20 +268,16 @@ BaselineCompiler::compile()
     // searches for the sought entry when queries are in linear order.
     bytecodeMap[script->nTypeSets()] = 0;
 
     baselineScript->copyYieldEntries(script, yieldOffsets_);
 
     if (compileDebugInstrumentation_)
         baselineScript->setHasDebugInstrumentation();
 
-    // If profiler instrumentation is enabled, toggle instrumentation on.
-    if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
-        baselineScript->toggleProfilerInstrumentation(true);
-
     // Always register a native => bytecode mapping entry, since profiler can be
     // turned on with baseline jitcode on stack, and baseline jitcode cannot be invalidated.
     {
         JitSpew(JitSpew_Profiling, "Added JitcodeGlobalEntry for baseline script %s:%d (%p)",
                     script->filename(), script->lineno(), baselineScript.get());
 
         // Generate profiling string.
         char* str = JitcodeGlobalEntry::createScriptString(cx, script);
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -859,16 +859,18 @@ BaselineScript::toggleDebugTraps(JSScrip
     MOZ_ASSERT(script->baselineScript() == this);
 
     // Only scripts compiled for debug mode have toggled calls.
     if (!hasDebugInstrumentation())
         return;
 
     SrcNoteLineScanner scanner(script->notes(), script->lineno());
 
+    AutoWritableJitCode awjc(method());
+
     for (uint32_t i = 0; i < numPCMappingIndexEntries(); i++) {
         PCMappingIndexEntry& entry = pcMappingIndexEntry(i);
 
         CompactBufferReader reader(pcMappingReader(i));
         jsbytecode* curPC = script->offsetToPC(entry.pcOffset);
         uint32_t nativeOffset = entry.nativeOffset;
 
         MOZ_ASSERT(script->containsPC(curPC));
@@ -905,16 +907,17 @@ BaselineScript::initTraceLogger(JSRuntim
 
     TraceLoggerThread* logger = TraceLoggerForMainThread(runtime);
     if (TraceLogTextIdEnabled(TraceLogger_Scripts))
         traceLoggerScriptEvent_ = TraceLoggerEvent(logger, TraceLogger_Scripts, script);
     else
         traceLoggerScriptEvent_ = TraceLoggerEvent(logger, TraceLogger_Scripts);
 
     if (TraceLogTextIdEnabled(TraceLogger_Engine) || TraceLogTextIdEnabled(TraceLogger_Scripts)) {
+        AutoWritableJitCode awjc(method_);
         CodeLocationLabel enter(method_, CodeOffsetLabel(traceLoggerEnterToggleOffset_));
         CodeLocationLabel exit(method_, CodeOffsetLabel(traceLoggerExitToggleOffset_));
         Assembler::ToggleToCmp(enter);
         Assembler::ToggleToCmp(exit);
     }
 }
 
 void
@@ -928,16 +931,18 @@ BaselineScript::toggleTraceLoggerScripts
     // Patch the logging script textId to be correct.
     // When logging log the specific textId else the global Scripts textId.
     TraceLoggerThread* logger = TraceLoggerForMainThread(runtime);
     if (enable)
         traceLoggerScriptEvent_ = TraceLoggerEvent(logger, TraceLogger_Scripts, script);
     else
         traceLoggerScriptEvent_ = TraceLoggerEvent(logger, TraceLogger_Scripts);
 
+    AutoWritableJitCode awjc(method());
+
     // Enable/Disable the traceLogger prologue and epilogue.
     CodeLocationLabel enter(method_, CodeOffsetLabel(traceLoggerEnterToggleOffset_));
     CodeLocationLabel exit(method_, CodeOffsetLabel(traceLoggerExitToggleOffset_));
     if (!engineEnabled) {
         if (enable) {
             Assembler::ToggleToCmp(enter);
             Assembler::ToggleToCmp(exit);
         } else {
@@ -954,16 +959,18 @@ BaselineScript::toggleTraceLoggerScripts
 void
 BaselineScript::toggleTraceLoggerEngine(bool enable)
 {
     bool scriptsEnabled = TraceLogTextIdEnabled(TraceLogger_Scripts);
 
     MOZ_ASSERT(enable == !traceLoggerEngineEnabled_);
     MOZ_ASSERT(scriptsEnabled == traceLoggerScriptsEnabled_);
 
+    AutoWritableJitCode awjc(method());
+
     // Enable/Disable the traceLogger prologue and epilogue.
     CodeLocationLabel enter(method_, CodeOffsetLabel(traceLoggerEnterToggleOffset_));
     CodeLocationLabel exit(method_, CodeOffsetLabel(traceLoggerExitToggleOffset_));
     if (!scriptsEnabled) {
         if (enable) {
             Assembler::ToggleToCmp(enter);
             Assembler::ToggleToCmp(exit);
         } else {
@@ -982,16 +989,18 @@ void
 BaselineScript::toggleProfilerInstrumentation(bool enable)
 {
     if (enable == isProfilerInstrumentationOn())
         return;
 
     JitSpew(JitSpew_BaselineIC, "  toggling profiling %s for BaselineScript %p",
             enable ? "on" : "off", this);
 
+    AutoWritableJitCode awjc(method());
+
     // Toggle the jump
     CodeLocationLabel enterToggleLocation(method_, CodeOffsetLabel(profilerEnterToggleOffset_));
     CodeLocationLabel exitToggleLocation(method_, CodeOffsetLabel(profilerExitToggleOffset_));
     if (enable) {
         Assembler::ToggleToCmp(enterToggleLocation);
         Assembler::ToggleToCmp(exitToggleLocation);
         flags_ |= uint32_t(PROFILER_INSTRUMENTATION_ON);
     } else {
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -7991,20 +7991,53 @@ CodeGenerator::link(JSContext* cx, Compi
     ionScript->setSkipArgCheckEntryOffset(getSkipArgCheckEntryOffset());
 
     // If SPS is enabled, mark IonScript as having been instrumented with SPS
     if (isProfilerInstrumentationEnabled())
         ionScript->setHasProfilingInstrumentation();
 
     script->setIonScript(cx, ionScript);
 
-    invalidateEpilogueData_.fixup(&masm);
-    Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
-                                       ImmPtr(ionScript),
-                                       ImmPtr((void*)-1));
+    {
+        AutoWritableJitCode awjc(code);
+        invalidateEpilogueData_.fixup(&masm);
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
+                                           ImmPtr(ionScript),
+                                           ImmPtr((void*)-1));
+
+        for (size_t i = 0; i < ionScriptLabels_.length(); i++) {
+            ionScriptLabels_[i].fixup(&masm);
+            Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]),
+                                               ImmPtr(ionScript),
+                                               ImmPtr((void*)-1));
+        }
+
+#ifdef JS_TRACE_LOGGING
+        TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
+        for (uint32_t i = 0; i < patchableTraceLoggers_.length(); i++) {
+            patchableTraceLoggers_[i].fixup(&masm);
+            Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]),
+                                               ImmPtr(logger),
+                                               ImmPtr(nullptr));
+        }
+
+        if (patchableTLScripts_.length() > 0) {
+            MOZ_ASSERT(TraceLogTextIdEnabled(TraceLogger_Scripts));
+            TraceLoggerEvent event(logger, TraceLogger_Scripts, script);
+            ionScript->setTraceLoggerEvent(event);
+            uint32_t textId = event.payload()->textId();
+            for (uint32_t i = 0; i < patchableTLScripts_.length(); i++) {
+                patchableTLScripts_[i].fixup(&masm);
+                Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
+                                                   ImmPtr((void*) uintptr_t(textId)),
+                                                   ImmPtr((void*)0));
+            }
+        }
+#endif
+    }
 
     JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)",
             (void*) ionScript, (void*) code->raw());
 
     ionScript->setInvalidationEpilogueDataOffset(invalidateEpilogueData_.offset());
     ionScript->setOsrPc(gen->info().osrPc());
     ionScript->setOsrEntryOffset(getOsrEntryOffset());
     ptrdiff_t real_invalidate = masm.actualOffset(invalidate_.offset());
@@ -8012,23 +8045,16 @@ CodeGenerator::link(JSContext* cx, Compi
 
     ionScript->setDeoptTable(deoptTable_);
 
 #if defined(JS_ION_PERF)
     if (PerfEnabled())
         perfSpewer_.writeProfile(script, code, masm);
 #endif
 
-    for (size_t i = 0; i < ionScriptLabels_.length(); i++) {
-        ionScriptLabels_[i].fixup(&masm);
-        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]),
-                                           ImmPtr(ionScript),
-                                           ImmPtr((void*)-1));
-    }
-
     // for generating inline caches during the execution.
     if (runtimeData_.length())
         ionScript->copyRuntimeData(&runtimeData_[0]);
     if (cacheList_.length())
         ionScript->copyCacheEntries(&cacheList_[0], masm);
 
     // for marking during GC.
     if (safepointIndices_.length())
@@ -8046,39 +8072,16 @@ CodeGenerator::link(JSContext* cx, Compi
     MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
     if (recovers_.size())
         ionScript->copyRecovers(&recovers_);
     if (graph.numConstants())
         ionScript->copyConstants(graph.constantPool());
     if (patchableBackedges_.length() > 0)
         ionScript->copyPatchableBackedges(cx, code, patchableBackedges_.begin(), masm);
 
-#ifdef JS_TRACE_LOGGING
-    TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
-    for (uint32_t i = 0; i < patchableTraceLoggers_.length(); i++) {
-        patchableTraceLoggers_[i].fixup(&masm);
-        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]),
-                                           ImmPtr(logger),
-                                           ImmPtr(nullptr));
-    }
-
-    if (patchableTLScripts_.length() > 0) {
-        MOZ_ASSERT(TraceLogTextIdEnabled(TraceLogger_Scripts));
-        TraceLoggerEvent event(logger, TraceLogger_Scripts, script);
-        ionScript->setTraceLoggerEvent(event);
-        uint32_t textId = event.payload()->textId();
-        for (uint32_t i = 0; i < patchableTLScripts_.length(); i++) {
-            patchableTLScripts_[i].fixup(&masm);
-            Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
-                                               ImmPtr((void*) uintptr_t(textId)),
-                                               ImmPtr((void*)0));
-        }
-    }
-#endif
-
     // Replace dummy JSObject pointers embedded by LNurseryObject.
     code->fixupNurseryObjects(cx, gen->nurseryObjects());
 
     // The correct state for prebarriers is unknown until the end of compilation,
     // since a GC can occur during code generation. All barriers are emitted
     // off-by-default, and are toggled on here if necessary.
     if (cx->zone()->needsIncrementalBarrier())
         ionScript->toggleBarriers(true);
--- a/js/src/jit/ExecutableAllocator.cpp
+++ b/js/src/jit/ExecutableAllocator.cpp
@@ -57,8 +57,13 @@ ExecutableAllocator::addSizeOfCode(JS::C
             sizes->unused   += pool->m_allocation.size - pool->m_ionCodeBytes
                                                        - pool->m_baselineCodeBytes
                                                        - pool->m_regexpCodeBytes
                                                        - pool->m_otherCodeBytes;
         }
     }
 }
 
+#ifdef TARGET_OS_IPHONE
+bool ExecutableAllocator::nonWritableJitCode = true;
+#else
+bool ExecutableAllocator::nonWritableJitCode = false;
+#endif
--- a/js/src/jit/ExecutableAllocator.h
+++ b/js/src/jit/ExecutableAllocator.h
@@ -1,9 +1,11 @@
-/*
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
  * Copyright (C) 2008 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
@@ -52,24 +54,16 @@ static void sync_instruction_memory(cadd
 extern  "C" void sync_instruction_memory(caddr_t v, u_int len);
 #endif
 #endif
 
 #if defined(JS_CODEGEN_MIPS) && defined(__linux__) && !defined(JS_MIPS_SIMULATOR)
 #include <sys/cachectl.h>
 #endif
 
-#if ENABLE_ASSEMBLER_WX_EXCLUSIVE
-#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
-#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
-#define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
-#else
-#define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
-#endif
-
 namespace JS {
     struct CodeSizes;
 }
 
 namespace js {
 namespace jit {
   enum CodeKind { ION_CODE = 0, BASELINE_CODE, REGEXP_CODE, OTHER_CODE };
 
@@ -172,22 +166,24 @@ namespace jit {
     }
 
     size_t available() const {
         MOZ_ASSERT(m_end >= m_freePtr);
         return m_end - m_freePtr;
     }
 };
 
-class ExecutableAllocator {
+class ExecutableAllocator
+{
     typedef void (*DestroyCallback)(void* addr, size_t size);
-    enum ProtectionSetting { Writable, Executable };
     DestroyCallback destroyCallback;
 
   public:
+    enum ProtectionSetting { Writable, Executable };
+
     ExecutableAllocator()
       : destroyCallback(nullptr)
     {
         if (!pageSize) {
             pageSize = determinePageSize();
             // On Windows, VirtualAlloc effectively allocates in 64K chunks.
             // (Technically, it allocates in page chunks, but the starting
             // address is always a multiple of 64K, so each allocation uses up
@@ -262,16 +258,18 @@ class ExecutableAllocator {
     }
 
     void addSizeOfCode(JS::CodeSizes* sizes) const;
 
     void setDestroyCallback(DestroyCallback destroyCallback) {
         this->destroyCallback = destroyCallback;
     }
 
+    static bool nonWritableJitCode;
+
   private:
     static size_t pageSize;
     static size_t largeAllocSize;
 #ifdef XP_WIN
     static uint64_t rngSeed;
 #endif
 
     static const size_t OVERSIZE_ALLOCATION = size_t(-1);
@@ -374,31 +372,29 @@ class ExecutableAllocator {
                 pool->addRef();
             }
         }
 
         // Pass ownership to the caller.
         return pool;
     }
 
-#if ENABLE_ASSEMBLER_WX_EXCLUSIVE
     static void makeWritable(void* start, size_t size)
     {
-        reprotectRegion(start, size, Writable);
+        if (nonWritableJitCode)
+            reprotectRegion(start, size, Writable);
     }
 
     static void makeExecutable(void* start, size_t size)
     {
-        reprotectRegion(start, size, Executable);
+        if (nonWritableJitCode)
+            reprotectRegion(start, size, Executable);
     }
-#else
-    static void makeWritable(void*, size_t) {}
-    static void makeExecutable(void*, size_t) {}
-#endif
 
+    static unsigned initialProtectionFlags(ProtectionSetting protection);
 
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
     static void cacheFlush(void*, size_t)
     {
     }
 #elif defined(JS_ARM_SIMULATOR) || defined(JS_MIPS_SIMULATOR)
     static void cacheFlush(void* code, size_t size)
     {
@@ -441,19 +437,17 @@ class ExecutableAllocator {
         sync_instruction_memory((caddr_t)code, size);
     }
 #endif
 
   private:
     ExecutableAllocator(const ExecutableAllocator&) = delete;
     void operator=(const ExecutableAllocator&) = delete;
 
-#if ENABLE_ASSEMBLER_WX_EXCLUSIVE
     static void reprotectRegion(void*, size_t, ProtectionSetting);
-#endif
 
     // These are strong references;  they keep pools alive.
     static const size_t maxSmallPools = 4;
     typedef js::Vector<ExecutablePool*, maxSmallPools, js::SystemAllocPolicy> SmallExecPoolVector;
     SmallExecPoolVector m_smallPools;
 
     // All live pools are recorded here, just for stats purposes.  These are
     // weak references;  they don't keep pools alive.  When a pool is destroyed
--- a/js/src/jit/ExecutableAllocatorPosix.cpp
+++ b/js/src/jit/ExecutableAllocatorPosix.cpp
@@ -1,9 +1,11 @@
-/*
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
  * Copyright (C) 2008 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
@@ -30,17 +32,18 @@
 #include <sys/mman.h>
 #include <unistd.h>
 
 #include "jit/ExecutableAllocator.h"
 #include "js/Utility.h"
 
 using namespace js::jit;
 
-size_t ExecutableAllocator::determinePageSize()
+size_t
+ExecutableAllocator::determinePageSize()
 {
     return getpagesize();
 }
 
 void*
 js::jit::AllocateExecutableMemory(void* addr, size_t bytes, unsigned permissions, const char* tag,
                                   size_t pageSize)
 {
@@ -52,42 +55,54 @@ js::jit::AllocateExecutableMemory(void* 
 void
 js::jit::DeallocateExecutableMemory(void* addr, size_t bytes, size_t pageSize)
 {
     MOZ_ASSERT(bytes % pageSize == 0);
     mozilla::DebugOnly<int> result = munmap(addr, bytes);
     MOZ_ASSERT(!result || errno == ENOMEM);
 }
 
-ExecutablePool::Allocation ExecutableAllocator::systemAlloc(size_t n)
+ExecutablePool::Allocation
+ExecutableAllocator::systemAlloc(size_t n)
 {
-    void* allocation = AllocateExecutableMemory(nullptr, n, INITIAL_PROTECTION_FLAGS,
+    void* allocation = AllocateExecutableMemory(nullptr, n, initialProtectionFlags(Executable),
                                                 "js-jit-code", pageSize);
     ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(allocation), n };
     return alloc;
 }
 
-void ExecutableAllocator::systemRelease(const ExecutablePool::Allocation& alloc)
+void
+ExecutableAllocator::systemRelease(const ExecutablePool::Allocation& alloc)
 {
     DeallocateExecutableMemory(alloc.pages, alloc.size, pageSize);
 }
 
-#if WTF_ENABLE_ASSEMBLER_WX_EXCLUSIVE
-void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting)
+static const unsigned FLAGS_RW = PROT_READ | PROT_WRITE;
+static const unsigned FLAGS_RX = PROT_READ | PROT_EXEC;
+
+void
+ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting)
 {
-    if (!pageSize)
-        intializePageSize();
+    MOZ_ASSERT(nonWritableJitCode);
+    MOZ_ASSERT(pageSize);
 
     // Calculate the start of the page containing this region,
     // and account for this extra memory within size.
     intptr_t startPtr = reinterpret_cast<intptr_t>(start);
     intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
     void* pageStart = reinterpret_cast<void*>(pageStartPtr);
     size += (startPtr - pageStartPtr);
 
     // Round size up
     size += (pageSize - 1);
     size &= ~(pageSize - 1);
 
-    mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
+    mprotect(pageStart, size, (setting == Writable) ? FLAGS_RW : FLAGS_RX);
 }
-#endif
 
+/* static */ unsigned
+ExecutableAllocator::initialProtectionFlags(ProtectionSetting protection)
+{
+    if (!nonWritableJitCode)
+        return FLAGS_RW | FLAGS_RX;
+
+    return (protection == Writable) ? FLAGS_RW : FLAGS_RX;
+}
--- a/js/src/jit/ExecutableAllocatorWin.cpp
+++ b/js/src/jit/ExecutableAllocatorWin.cpp
@@ -1,9 +1,11 @@
-/*
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
  * Copyright (C) 2008 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
@@ -30,24 +32,26 @@
 #include "jswin.h"
 
 #include "jit/ExecutableAllocator.h"
 
 using namespace js::jit;
 
 uint64_t ExecutableAllocator::rngSeed;
 
-size_t ExecutableAllocator::determinePageSize()
+size_t
+ExecutableAllocator::determinePageSize()
 {
     SYSTEM_INFO system_info;
     GetSystemInfo(&system_info);
     return system_info.dwPageSize;
 }
 
-void* ExecutableAllocator::computeRandomAllocationAddress()
+void*
+ExecutableAllocator::computeRandomAllocationAddress()
 {
     /*
      * Inspiration is V8's OS::Allocate in platform-win32.cc.
      *
      * VirtualAlloc takes 64K chunks out of the virtual address space, so we
      * keep 16b alignment.
      *
      * x86: V8 comments say that keeping addresses in the [64MiB, 1GiB) range
@@ -181,17 +185,16 @@ UnregisterExecutableMemory(void* p, size
 }
 #endif
 
 void*
 js::jit::AllocateExecutableMemory(void* addr, size_t bytes, unsigned permissions, const char* tag,
                                   size_t pageSize)
 {
     MOZ_ASSERT(bytes % pageSize == 0);
-    MOZ_ASSERT(permissions == PAGE_EXECUTE_READWRITE);
 
 #ifdef JS_CPU_X64
     if (sJitExceptionHandler)
         bytes += pageSize;
 #endif
 
     void* p = VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, permissions);
     if (!p)
@@ -221,32 +224,62 @@ js::jit::DeallocateExecutableMemory(void
         addr = (uint8_t*)addr - pageSize;
         UnregisterExecutableMemory(addr, bytes, pageSize);
     }
 #endif
 
     VirtualFree(addr, 0, MEM_RELEASE);
 }
 
-ExecutablePool::Allocation ExecutableAllocator::systemAlloc(size_t n)
+ExecutablePool::Allocation
+ExecutableAllocator::systemAlloc(size_t n)
 {
     void* allocation = nullptr;
     if (!RandomizeIsBroken()) {
         void* randomAddress = computeRandomAllocationAddress();
-        allocation = AllocateExecutableMemory(randomAddress, n, PAGE_EXECUTE_READWRITE,
+        allocation = AllocateExecutableMemory(randomAddress, n, initialProtectionFlags(Executable),
                                               "js-jit-code", pageSize);
     }
     if (!allocation) {
-        allocation = AllocateExecutableMemory(nullptr, n, PAGE_EXECUTE_READWRITE,
+        allocation = AllocateExecutableMemory(nullptr, n, initialProtectionFlags(Executable),
                                               "js-jit-code", pageSize);
     }
     ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(allocation), n };
     return alloc;
 }
 
-void ExecutableAllocator::systemRelease(const ExecutablePool::Allocation& alloc)
+void
+ExecutableAllocator::systemRelease(const ExecutablePool::Allocation& alloc)
 {
     DeallocateExecutableMemory(alloc.pages, alloc.size, pageSize);
 }
 
-#if ENABLE_ASSEMBLER_WX_EXCLUSIVE
-#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
-#endif
+void
+ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting)
+{
+    MOZ_ASSERT(nonWritableJitCode);
+    MOZ_ASSERT(pageSize);
+
+    // Calculate the start of the page containing this region,
+    // and account for this extra memory within size.
+    intptr_t startPtr = reinterpret_cast<intptr_t>(start);
+    intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
+    void* pageStart = reinterpret_cast<void*>(pageStartPtr);
+    size += (startPtr - pageStartPtr);
+
+    // Round size up
+    size += (pageSize - 1);
+    size &= ~(pageSize - 1);
+
+    DWORD oldProtect;
+    int flags = (setting == Writable) ? PAGE_READWRITE : PAGE_EXECUTE_READ;
+    if (!VirtualProtect(pageStart, size, flags, &oldProtect))
+        MOZ_CRASH();
+}
+
+/* static */ unsigned
+ExecutableAllocator::initialProtectionFlags(ProtectionSetting protection)
+{
+    if (!nonWritableJitCode)
+        return PAGE_EXECUTE_READWRITE;
+
+    return (protection == Writable) ? PAGE_READWRITE : PAGE_EXECUTE_READ;
+}
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -762,16 +762,22 @@ JitCode::copyFrom(MacroAssembler& masm)
 void
 JitCode::traceChildren(JSTracer* trc)
 {
     // Note that we cannot mark invalidated scripts, since we've basically
     // corrupted the code stream by injecting bailouts.
     if (invalidated())
         return;
 
+    // If we're moving objects, we need writable JIT code.
+    ReprotectCode reprotect = (trc->runtime()->isHeapMinorCollecting() || zone()->isGCCompacting())
+                              ? Reprotect
+                              : DontReprotect;
+    MaybeAutoWritableJitCode awjc(this, reprotect);
+
     if (jumpRelocTableBytes_) {
         uint8_t* start = code_ + jumpRelocTableOffset();
         CompactBufferReader reader(start, start + jumpRelocTableBytes_);
         MacroAssembler::TraceJumpRelocations(trc, this, reader);
     }
     if (dataRelocTableBytes_) {
         uint8_t* start = code_ + dataRelocTableOffset();
         CompactBufferReader reader(start, start + dataRelocTableBytes_);
@@ -780,16 +786,18 @@ JitCode::traceChildren(JSTracer* trc)
 }
 
 void
 JitCode::fixupNurseryObjects(JSContext* cx, const ObjectVector& nurseryObjects)
 {
     if (nurseryObjects.empty() || !dataRelocTableBytes_)
         return;
 
+    AutoWritableJitCode awjc(this);
+
     uint8_t* start = code_ + dataRelocTableOffset();
     CompactBufferReader reader(start, start + dataRelocTableBytes_);
     MacroAssembler::FixupNurseryObjects(cx, this, reader, nurseryObjects);
 }
 
 void
 JitCode::finalize(FreeOp* fop)
 {
@@ -801,33 +809,37 @@ JitCode::finalize(FreeOp* fop)
         MOZ_ASSERT(rt->jitRuntime()->hasJitcodeGlobalTable());
         MOZ_ASSERT(!rt->jitRuntime()->getJitcodeGlobalTable()->lookup(raw(), &result, rt));
     }
 #endif
 
     // Buffer can be freed at any time hereafter. Catch use-after-free bugs.
     // Don't do this if the Ion code is protected, as the signal handler will
     // deadlock trying to reacquire the interrupt lock.
-    memset(code_, JS_SWEPT_CODE_PATTERN, bufferSize_);
-    code_ = nullptr;
+    {
+        AutoWritableJitCode awjc(this);
+        memset(code_, JS_SWEPT_CODE_PATTERN, bufferSize_);
+        code_ = nullptr;
+    }
 
     // Code buffers are stored inside JSC pools.
     // Pools are refcounted. Releasing the pool may free it.
     if (pool_) {
         // Horrible hack: if we are using perf integration, we don't
         // want to reuse code addresses, so we just leak the memory instead.
         if (!PerfEnabled())
             pool_->release(headerSize_ + bufferSize_, CodeKind(kind_));
         pool_ = nullptr;
     }
 }
 
 void
 JitCode::togglePreBarriers(bool enabled)
 {
+    AutoWritableJitCode awjc(this);
     uint8_t* start = code_ + preBarrierTableOffset();
     CompactBufferReader reader(start, start + preBarrierTableBytes_);
 
     while (reader.more()) {
         size_t offset = reader.readUnsigned();
         CodeLocationLabel loc(this, CodeOffsetLabel(offset));
         if (enabled)
             Assembler::ToggleToCmp(loc);
@@ -1210,16 +1222,17 @@ IonScript::purgeCaches()
     // Don't reset any ICs if we're invalidated, otherwise, repointing the
     // inline jump could overwrite an invalidation marker. These ICs can
     // no longer run, however, the IC slow paths may be active on the stack.
     // ICs therefore are required to check for invalidation before patching,
     // to ensure the same invariant.
     if (invalidated())
         return;
 
+    AutoWritableJitCode awjc(method());
     for (size_t i = 0; i < numCaches(); i++)
         getCacheFromIndex(i).reset();
 }
 
 void
 IonScript::unlinkFromRuntime(FreeOp* fop)
 {
     // The writes to the executable buffer below may clobber backedge jumps, so
@@ -2824,16 +2837,17 @@ InvalidateActivation(FreeOp* fop, const 
             continue;
 
         // Write the delta (from the return address offset to the
         // IonScript pointer embedded into the invalidation epilogue)
         // where the safepointed call instruction used to be. We rely on
         // the call sequence causing the safepoint being >= the size of
         // a uint32, which is checked during safepoint index
         // construction.
+        AutoWritableJitCode awjc(ionCode);
         const SafepointIndex* si = ionScript->getSafepointIndex(it.returnAddressToFp());
         CodeLocationLabel dataLabelToMunge(it.returnAddressToFp());
         ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() -
                           (it.returnAddressToFp() - ionCode->raw());
         Assembler::PatchWrite_Imm32(dataLabelToMunge, Imm32(delta));
 
         CodeLocationLabel osiPatchPoint = SafepointReader::InvalidationPatchPoint(ionScript, si);
         CodeLocationLabel invalidateEpilogue(ionCode, CodeOffsetLabel(ionScript->invalidateEpilogueOffset()));
--- a/js/src/jit/IonCaches.cpp
+++ b/js/src/jit/IonCaches.cpp
@@ -241,35 +241,38 @@ class IonCache::StubAttacher
         MOZ_ASSERT(!hasStubCodePatchOffset_);
         stubCodePatchOffset_ = masm.PushWithPatch(STUB_ADDR);
         hasStubCodePatchOffset_ = true;
     }
 
     void patchRejoinJump(MacroAssembler& masm, JitCode* code) {
         rejoinOffset_.fixup(&masm);
         CodeLocationJump rejoinJump(code, rejoinOffset_);
+        AutoWritableJitCode awjc(code);
         PatchJump(rejoinJump, rejoinLabel_);
     }
 
     void patchStubCodePointer(MacroAssembler& masm, JitCode* code) {
         if (hasStubCodePatchOffset_) {
+            AutoWritableJitCode awjc(code);
             stubCodePatchOffset_.fixup(&masm);
             Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, stubCodePatchOffset_),
                                                ImmPtr(code), STUB_ADDR);
         }
     }
 
     void patchNextStubJump(MacroAssembler& masm, JitCode* code) {
         // Patch the previous nextStubJump of the last stub, or the jump from the
         // codeGen, to jump into the newly allocated code.
-        PatchJump(cache_.lastJump_, CodeLocationLabel(code));
+        PatchJump(cache_.lastJump_, CodeLocationLabel(code), Reprotect);
 
         // If this path is not taken, we are producing an entry which can no
         // longer go back into the update function.
         if (hasNextStubOffset_) {
+            AutoWritableJitCode awjc(code);
             nextStubOffset_.fixup(&masm);
             CodeLocationJump nextStubJump(code, nextStubOffset_);
             PatchJump(nextStubJump, cache_.fallbackLabel_);
 
             // When the last stub fails, it fallback to the ool call which can
             // produce a stub. Next time we generate a stub, we will patch the
             // nextStub jump to try the new stub.
             cache_.lastJump_ = nextStubJump;
@@ -366,16 +369,17 @@ IonCache::linkAndAttachStub(JSContext* c
     }
 
     return true;
 }
 
 void
 IonCache::updateBaseAddress(JitCode* code, MacroAssembler& masm)
 {
+    AutoWritableJitCode awjc(code);
     fallbackLabel_.repoint(code, &masm);
     initialJump_.repoint(code, &masm);
     lastJump_.repoint(code, &masm);
     rejoinLabel_.repoint(code, &masm);
 }
 
 static void*
 GetReturnAddressToIonCode(JSContext* cx)
@@ -1995,18 +1999,19 @@ GetPropertyIC::reset()
     hasTypedArrayLengthStub_ = false;
     hasSharedTypedArrayLengthStub_ = false;
     hasStrictArgumentsLengthStub_ = false;
     hasNormalArgumentsLengthStub_ = false;
     hasGenericProxyStub_ = false;
 }
 
 void
-IonCache::disable()
+IonCache::disable(IonScript* ion)
 {
+    AutoWritableJitCode awjc(ion->method());
     reset();
     this->disabled_ = 1;
 }
 
 void
 IonCache::reset()
 {
     this->stubCount_ = 0;
@@ -4037,17 +4042,17 @@ GetElementIC::update(JSContext* cx, Hand
     if (!GetObjectElementOperation(cx, JSOp(*pc), obj, obj, idval, res))
         return false;
 
     // Disable cache when we reach max stubs or update failed too much.
     if (!attachedStub) {
         cache.incFailedUpdates();
         if (cache.shouldDisable()) {
             JitSpew(JitSpew_IonIC, "Disable inline cache");
-            cache.disable();
+            cache.disable(ion);
         }
     } else {
         cache.resetFailedUpdates();
     }
 
     if (!cache.monitoredResult())
         TypeScript::Monitor(cx, script, pc, res);
     return true;
--- a/js/src/jit/IonCaches.h
+++ b/js/src/jit/IonCaches.h
@@ -238,17 +238,17 @@ class IonCache
         pc_(nullptr),
         profilerLeavePc_(nullptr),
         initialJump_(),
         lastJump_(),
         rejoinLabel_()
     {
     }
 
-    virtual void disable();
+    void disable(IonScript* ion);
     inline bool isDisabled() const {
         return disabled_;
     }
 
     // Set the initial 'out-of-line' jump state of the cache. The fallbackLabel is
     // the location of the out-of-line update (slow) path.  This location will
     // be set to the exitJump of the last generated stub.
     void setFallbackLabel(CodeOffsetLabel fallbackLabel) {
--- a/js/src/jit/IonCode.h
+++ b/js/src/jit/IonCode.h
@@ -98,16 +98,20 @@ class JitCode : public gc::TenuredCell
     }
     bool containsNativePC(const void* addr) const {
         const uint8_t* addr_u8 = (const uint8_t*) addr;
         return raw() <= addr_u8 && addr_u8 < rawEnd();
     }
     size_t instructionsSize() const {
         return insnSize_;
     }
+    size_t bufferSize() const {
+        return bufferSize_;
+    }
+
     void traceChildren(JSTracer* trc);
     void finalize(FreeOp* fop);
     void fixupAfterMovingGC() {}
     void setInvalidated() {
         invalidated_ = true;
     }
 
     void fixupNurseryObjects(JSContext* cx, const ObjectVector& nurseryObjects);
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -552,12 +552,57 @@ void InvalidateAll(FreeOp* fop, JS::Zone
 void FinishInvalidation(FreeOp* fop, JSScript* script);
 
 // On windows systems, really large frames need to be incrementally touched.
 // The following constant defines the minimum increment of the touch.
 #ifdef XP_WIN
 const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
 #endif
 
+// If ExecutableAllocator::nonWritableJitCode is |true|, this class will ensure
+// JIT code is writable (has RW permissions) in its scope. If nonWritableJitCode
+// is |false|, it's a no-op.
+class MOZ_STACK_CLASS AutoWritableJitCode
+{
+    JSRuntime* rt_;
+    void* addr_;
+    size_t size_;
+
+  public:
+    AutoWritableJitCode(JSRuntime* rt, void* addr, size_t size)
+      : rt_(rt), addr_(addr), size_(size)
+    {
+        rt_->toggleAutoWritableJitCodeActive(true);
+        ExecutableAllocator::makeWritable(addr_, size_);
+    }
+    AutoWritableJitCode(void* addr, size_t size)
+      : AutoWritableJitCode(TlsPerThreadData.get()->runtimeFromMainThread(), addr, size)
+    {}
+    explicit AutoWritableJitCode(JitCode* code)
+      : AutoWritableJitCode(code->runtimeFromMainThread(), code->raw(), code->bufferSize())
+    {}
+    ~AutoWritableJitCode() {
+        ExecutableAllocator::makeExecutable(addr_, size_);
+        rt_->toggleAutoWritableJitCodeActive(false);
+    }
+};
+
+enum ReprotectCode { Reprotect = true, DontReprotect = false };
+
+class MOZ_STACK_CLASS MaybeAutoWritableJitCode
+{
+    mozilla::Maybe<AutoWritableJitCode> awjc_;
+
+  public:
+    MaybeAutoWritableJitCode(void* addr, size_t size, ReprotectCode reprotect) {
+        if (reprotect)
+            awjc_.emplace(addr, size);
+    }
+    MaybeAutoWritableJitCode(JitCode* code, ReprotectCode reprotect) {
+        if (reprotect)
+            awjc_.emplace(code);
+    }
+};
+
 } // namespace jit
 } // namespace js
 
 #endif /* jit_JitCompartment_h */
--- a/js/src/jit/Linker.h
+++ b/js/src/jit/Linker.h
@@ -63,16 +63,17 @@ class Linker
         codeStart = (uint8_t*)AlignBytes((uintptr_t)codeStart, CodeAlignment);
         uint32_t headerSize = codeStart - result;
         JitCode* code = JitCode::New<allowGC>(cx, codeStart, bytesNeeded - headerSize,
                                               headerSize, pool, kind);
         if (!code)
             return nullptr;
         if (masm.oom())
             return fail(cx);
+        AutoWritableJitCode awjc(result, bytesNeeded);
         code->copyFrom(masm);
         masm.link(code);
         if (masm.embedsNurseryPointers())
             cx->runtime()->gc.storeBuffer.putWholeCellFromMainThread(code);
         return code;
     }
 };
 
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -2245,21 +2245,28 @@ LIRGenerator::visitFunctionEnvironment(M
 {
     define(new(alloc()) LFunctionEnvironment(useRegisterAtStart(ins->function())), ins);
 }
 
 void
 LIRGenerator::visitInterruptCheck(MInterruptCheck* ins)
 {
     // Implicit interrupt checks require asm.js signal handlers to be installed.
+    // They also require writable JIT code: reprotecting in patchIonBackedges
+    // would be expensive and using AutoWritableJitCode in the signal handler
+    // is complicated because there could be another AutoWritableJitCode on the
+    // stack.
     LInstructionHelper<0, 0, 0>* lir;
-    if (GetJitContext()->runtime->canUseSignalHandlers())
+    if (GetJitContext()->runtime->canUseSignalHandlers() &&
+        !ExecutableAllocator::nonWritableJitCode)
+    {
         lir = new(alloc()) LInterruptCheckImplicit();
-    else
+    } else {
         lir = new(alloc()) LInterruptCheck();
+    }
     add(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
 LIRGenerator::visitAsmJSInterruptCheck(MAsmJSInterruptCheck* ins)
 {
     gen->setPerformsCall();
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -526,35 +526,42 @@ Imm16::Imm16(uint32_t imm)
     MOZ_ASSERT(decode() == imm);
 }
 
 Imm16::Imm16()
   : invalid(0xfff)
 { }
 
 void
-jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label)
+jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
 {
     // We need to determine if this jump can fit into the standard 24+2 bit
     // address or if we need a larger branch (or just need to use our pool
     // entry).
     Instruction* jump = (Instruction*)jump_.raw();
     // jumpWithPatch() returns the offset of the jump and never a pool or nop.
     Assembler::Condition c;
     jump->extractCond(&c);
     MOZ_ASSERT(jump->is<InstBranchImm>() || jump->is<InstLDR>());
 
     int jumpOffset = label.raw() - jump_.raw();
     if (BOffImm::IsInRange(jumpOffset)) {
         // This instruction started off as a branch, and will remain one.
+        MaybeAutoWritableJitCode awjc(jump, sizeof(Instruction), reprotect);
         Assembler::RetargetNearBranch(jump, jumpOffset, c);
     } else {
         // This instruction started off as a branch, but now needs to be demoted
         // to an ldr.
         uint8_t** slot = reinterpret_cast<uint8_t**>(jump_.jumpTableEntry());
+
+        // Ensure both the branch and the slot are writable.
+        MOZ_ASSERT(uintptr_t(slot) > uintptr_t(jump));
+        size_t size = uintptr_t(slot) - uintptr_t(jump) + sizeof(void*);
+        MaybeAutoWritableJitCode awjc(jump, size, reprotect);
+
         Assembler::RetargetFarBranch(jump, slot, label.raw(), c);
     }
 }
 
 void
 Assembler::finish()
 {
     flush();
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -1096,17 +1096,18 @@ class Operand
         return DTRAddr(baseReg(), DtrOffImm(offset));
     }
     VFPAddr toVFPAddr() const {
         return VFPAddr(baseReg(), VFPOffImm(offset));
     }
 };
 
 void
-PatchJump(CodeLocationJump& jump_, CodeLocationLabel label);
+PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
+          ReprotectCode reprotect = DontReprotect);
 
 static inline void
 PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
 {
     PatchJump(jump_, label);
 }
 
 class InstructionIterator;
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -488,18 +488,24 @@ class ABIArgGenerator
 
     static const Register NonArgReturnReg0;
     static const Register NonArgReturnReg1;
     static const Register NonArg_VolatileReg;
     static const Register NonReturn_VolatileReg0;
     static const Register NonReturn_VolatileReg1;
 };
 
-static inline void PatchJump(CodeLocationJump&, CodeLocationLabel) { MOZ_CRASH(); }
+static inline void
+PatchJump(CodeLocationJump&, CodeLocationLabel, ReprotectCode reprotect = DontReprotect)
+{
+    MOZ_CRASH();
+}
+
 static inline bool GetTempRegForIntArg(uint32_t, uint32_t, Register*) { MOZ_CRASH(); }
+
 static inline
 void PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
 {
     MOZ_CRASH();
 }
 
 } // namespace jit
 } // namespace js
--- a/js/src/jit/x64/Assembler-x64.cpp
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -170,19 +170,20 @@ Assembler::PatchableJumpAddress(JitCode*
     jumpOffset += index * SizeOfJumpTableEntry;
 
     MOZ_ASSERT(jumpOffset + SizeOfExtendedJump <= code->instructionsSize());
     return code->raw() + jumpOffset;
 }
 
 /* static */
 void
-Assembler::PatchJumpEntry(uint8_t* entry, uint8_t* target)
+Assembler::PatchJumpEntry(uint8_t* entry, uint8_t* target, ReprotectCode reprotect)
 {
     uint8_t** index = (uint8_t**) (entry + SizeOfExtendedJump - sizeof(void*));
+    MaybeAutoWritableJitCode awjc(index, sizeof(void*), reprotect);
     *index = target;
 }
 
 void
 Assembler::finish()
 {
     if (!jumps_.length() || oom())
         return;
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -260,17 +260,17 @@ class Assembler : public AssemblerX86Sha
   public:
     using AssemblerX86Shared::j;
     using AssemblerX86Shared::jmp;
     using AssemblerX86Shared::push;
     using AssemblerX86Shared::pop;
     using AssemblerX86Shared::vmovq;
 
     static uint8_t* PatchableJumpAddress(JitCode* code, size_t index);
-    static void PatchJumpEntry(uint8_t* entry, uint8_t* target);
+    static void PatchJumpEntry(uint8_t* entry, uint8_t* target, ReprotectCode reprotect);
 
     Assembler()
       : extendedJumpTable_(0)
     {
     }
 
     static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
 
@@ -789,25 +789,30 @@ class Assembler : public AssemblerX86Sha
         masm.vcvtsq2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
     }
     void vcvtsq2ss(Register src1, FloatRegister src0, FloatRegister dest) {
         masm.vcvtsq2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
     }
 };
 
 static inline void
-PatchJump(CodeLocationJump jump, CodeLocationLabel label)
+PatchJump(CodeLocationJump jump, CodeLocationLabel label, ReprotectCode reprotect = DontReprotect)
 {
     if (X86Encoding::CanRelinkJump(jump.raw(), label.raw())) {
+        MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
         X86Encoding::SetRel32(jump.raw(), label.raw());
     } else {
-        X86Encoding::SetRel32(jump.raw(), jump.jumpTableEntry());
-        Assembler::PatchJumpEntry(jump.jumpTableEntry(), label.raw());
+        {
+            MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
+            X86Encoding::SetRel32(jump.raw(), jump.jumpTableEntry());
+        }
+        Assembler::PatchJumpEntry(jump.jumpTableEntry(), label.raw(), reprotect);
     }
 }
+
 static inline void
 PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
 {
     PatchJump(jump_, label);
 }
 
 static inline bool
 GetIntArgReg(uint32_t intArg, uint32_t floatArg, Register* out)
--- a/js/src/jit/x86-shared/Assembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
@@ -57,17 +57,18 @@ TraceDataRelocations(JSTracer* trc, uint
         // All pointers on x64 will have the top bits cleared. If those bits
         // are not cleared, this must be a Value.
         uintptr_t* word = reinterpret_cast<uintptr_t*>(ptr);
         if (*word >> JSVAL_TAG_SHIFT) {
             jsval_layout layout;
             layout.asBits = *word;
             Value v = IMPL_TO_JSVAL(layout);
             TraceManuallyBarrieredEdge(trc, &v, "ion-masm-value");
-            *word = JSVAL_TO_IMPL(v).asBits;
+            if (*word != JSVAL_TO_IMPL(v).asBits)
+                *word = JSVAL_TO_IMPL(v).asBits;
             continue;
         }
 #endif
 
         // The low bit shouldn't be set. If it is, we probably got a dummy
         // pointer inserted by CodeGenerator::visitNurseryObject, but we
         // shouldn't be able to trigger GC before those are patched to their
         // real values.
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -154,26 +154,27 @@ static const Scale ScalePointer = TimesF
 } // namespace js
 
 #include "jit/x86-shared/Assembler-x86-shared.h"
 
 namespace js {
 namespace jit {
 
 static inline void
-PatchJump(CodeLocationJump jump, CodeLocationLabel label)
+PatchJump(CodeLocationJump jump, CodeLocationLabel label, ReprotectCode reprotect = DontReprotect)
 {
 #ifdef DEBUG
     // Assert that we're overwriting a jump instruction, either:
     //   0F 80+cc <imm32>, or
     //   E9 <imm32>
     unsigned char* x = (unsigned char*)jump.raw() - 5;
     MOZ_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
                (*x == 0xE9));
 #endif
+    MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
     X86Encoding::SetRel32(jump.raw(), label.raw());
 }
 static inline void
 PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
 {
     PatchJump(jump_, label);
 }
 
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -3585,23 +3585,21 @@ EscapeForShell(AutoCStringVector& argv)
         argv.replace(i, escaped);
     }
     return true;
 }
 #endif
 
 static Vector<const char*, 4, js::SystemAllocPolicy> sPropagatedFlags;
 
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
 static bool
 PropagateFlagToNestedShells(const char* flag)
 {
     return sPropagatedFlags.append(flag);
 }
-#endif
 
 static bool
 NestedShell(JSContext* cx, unsigned argc, jsval* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
     AutoCStringVector argv(cx);
 
@@ -6301,16 +6299,17 @@ main(int argc, char** argv, char** envp)
         || !op.addStringOption('\0', "ion-parallel-compile", "on/off",
                                "--ion-parallel compile is deprecated. Use --ion-offthread-compile.")
         || !op.addBoolOption('\0', "baseline", "Enable baseline compiler (default)")
         || !op.addBoolOption('\0', "no-baseline", "Disable baseline compiler")
         || !op.addBoolOption('\0', "baseline-eager", "Always baseline-compile methods")
         || !op.addIntOption('\0', "baseline-warmup-threshold", "COUNT",
                             "Wait for COUNT calls or iterations before baseline-compiling "
                             "(default: 10)", -1)
+        || !op.addBoolOption('\0', "non-writable-jitcode", "Allocate JIT code as non-writable memory.")
         || !op.addBoolOption('\0', "no-fpu", "Pretend CPU does not support floating-point operations "
                              "to test JIT codegen (no-op on platforms other than x86).")
         || !op.addBoolOption('\0', "no-sse3", "Pretend CPU does not support SSE3 instructions and above "
                              "to test JIT codegen (no-op on platforms other than x86 and x64).")
         || !op.addBoolOption('\0', "no-sse4", "Pretend CPU does not support SSE4 instructions"
                              "to test JIT codegen (no-op on platforms other than x86 and x64).")
         || !op.addBoolOption('\0', "enable-avx", "AVX is disabled by default. Enable AVX. "
                              "(no-op on platforms other than x86 and x64).")
@@ -6376,16 +6375,21 @@ main(int argc, char** argv, char** envp)
 #ifdef DEBUG
     /*
      * Process OOM options as early as possible so that we can observe as many
      * allocations as possible.
      */
     OOM_printAllocationCount = op.getBoolOption('O');
 #endif
 
+    if (op.getBoolOption("non-writable-jitcode")) {
+        js::jit::ExecutableAllocator::nonWritableJitCode = true;
+        PropagateFlagToNestedShells("--non-writable-jitcode");
+    }
+
 #ifdef JS_CODEGEN_X86
     if (op.getBoolOption("no-fpu"))
         js::jit::CPUInfo::SetFloatingPointDisabled();
 #endif
 
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
     if (op.getBoolOption("no-sse3")) {
         js::jit::CPUInfo::SetSSE3Disabled();
--- a/js/src/tests/lib/tests.py
+++ b/js/src/tests/lib/tests.py
@@ -10,17 +10,17 @@ from threading import Thread
 from results import TestOutput
 
 # When run on tbpl, we run each test multiple times with the following
 # arguments.
 JITFLAGS = {
     'all': [
         [], # no flags, normal baseline and ion
         ['--ion-eager', '--ion-offthread-compile=off'], # implies --baseline-eager
-        ['--ion-eager', '--ion-offthread-compile=off',
+        ['--ion-eager', '--ion-offthread-compile=off', '--non-writable-jitcode',
          '--ion-check-range-analysis', '--ion-extra-checks', '--no-sse3', '--no-threads'],
         ['--baseline-eager'],
         ['--baseline-eager', '--no-fpu'],
         ['--no-baseline', '--no-ion'],
     ],
     # used by jit_test.py
     'ion': [
         ['--baseline-eager'],
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -207,16 +207,17 @@ JSRuntime::JSRuntime(JSRuntime* parentRu
     preserveWrapperCallback(nullptr),
     jitSupportsFloatingPoint(false),
     jitSupportsSimd(false),
     ionPcScriptCache(nullptr),
     scriptEnvironmentPreparer(nullptr),
     ctypesActivityCallback(nullptr),
     offthreadIonCompilationEnabled_(true),
     parallelParsingEnabled_(true),
+    autoWritableJitCodeActive_(false),
 #ifdef DEBUG
     enteredPolicy(nullptr),
 #endif
     largeAllocationFailureCallback(nullptr),
     oomCallback(nullptr),
     debuggerMallocSizeOf(ReturnZeroSize),
     lastAnimationTime(0)
 {
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -1376,16 +1376,18 @@ struct JSRuntime : public JS::shadow::Ru
 
   private:
     JS::RuntimeOptions options_;
 
     // Settings for how helper threads can be used.
     bool offthreadIonCompilationEnabled_;
     bool parallelParsingEnabled_;
 
+    bool autoWritableJitCodeActive_;
+
   public:
 
     // Note: these values may be toggled dynamically (in response to about:config
     // prefs changing).
     void setOffthreadIonCompilationEnabled(bool value) {
         offthreadIonCompilationEnabled_ = value;
     }
     bool canUseOffthreadIonCompilation() const {
@@ -1393,16 +1395,22 @@ struct JSRuntime : public JS::shadow::Ru
     }
     void setParallelParsingEnabled(bool value) {
         parallelParsingEnabled_ = value;
     }
     bool canUseParallelParsing() const {
         return parallelParsingEnabled_;
     }
 
+    void toggleAutoWritableJitCodeActive(bool b) {
+        MOZ_ASSERT(autoWritableJitCodeActive_ != b, "AutoWritableJitCode should not be nested.");
+        MOZ_ASSERT(CurrentThreadCanAccessRuntime(this));
+        autoWritableJitCodeActive_ = b;
+    }
+
     const JS::RuntimeOptions& options() const {
         return options_;
     }
     JS::RuntimeOptions& options() {
         return options_;
     }
 
 #ifdef DEBUG