Backed out 3 changesets (bug 1435360) for Android XPCShel failures a=backout on a CLOSED TREE
authorCiure Andrei <aciure@mozilla.com>
Sat, 10 Mar 2018 02:35:41 +0200
changeset 459958 55c87e7ea09d4d9d6a38603360d57523b3c4c821
parent 459957 12afe6be329fe78619bbc35772da7b7ce1ada2b5
child 459959 a97bab3e3c901e13d2b1cd0c0a4fa12f1b941d8c
push id8824
push userarchaeopteryx@coole-files.de
push dateMon, 12 Mar 2018 14:54:48 +0000
treeherdermozilla-beta@8d9daab95d68 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbackout
bugs1435360
milestone60.0a1
backs oute2a6bd47f69707533d8cb6eda535e9a011bfa8bd
8cdf945be534dacae33245106e6718055a80bd7f
a463d224c412529aa8d7b02103506f9a714a6dd9
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 3 changesets (bug 1435360) for Android XPCShel failures a=backout on a CLOSED TREE Backed out changeset e2a6bd47f697 (bug 1435360) Backed out changeset 8cdf945be534 (bug 1435360) Backed out changeset a463d224c412 (bug 1435360)
js/public/MemoryMetrics.h
js/src/jit/AsyncInterrupt.cpp
js/src/jit/AsyncInterrupt.h
js/src/jit/CodeGenerator.cpp
js/src/jit/CodeGenerator.h
js/src/jit/Ion.cpp
js/src/jit/JitOptions.cpp
js/src/jit/JitOptions.h
js/src/jit/Lowering.cpp
js/src/jit/Lowering.h
js/src/jit/MIR.h
js/src/jit/MOpcodes.h
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/Simulator-arm.cpp
js/src/jit/arm/Simulator-arm.h
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
js/src/jit/arm64/vixl/Simulator-vixl.h
js/src/jit/mips32/Assembler-mips32.h
js/src/jit/mips32/Simulator-mips32.cpp
js/src/jit/mips32/Simulator-mips32.h
js/src/jit/mips64/Assembler-mips64.h
js/src/jit/mips64/Simulator-mips64.cpp
js/src/jit/mips64/Simulator-mips64.h
js/src/jit/none/Architecture-none.h
js/src/jit/none/MacroAssembler-none.h
js/src/jit/shared/LIR-shared.h
js/src/jit/shared/LOpcodes-shared.h
js/src/jit/x64/Assembler-x64.h
js/src/jit/x86/Assembler-x86.h
js/src/jsapi.cpp
js/src/moz.build
js/src/vm/JSCompartment.cpp
js/src/vm/JSContext.cpp
js/src/vm/MutexIDs.h
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
js/src/vm/Stack.cpp
js/src/vm/Stack.h
js/src/wasm/WasmBaselineCompile.cpp
js/src/wasm/WasmBuiltins.cpp
js/src/wasm/WasmCode.cpp
js/src/wasm/WasmCode.h
js/src/wasm/WasmCompartment.cpp
js/src/wasm/WasmCompartment.h
js/src/wasm/WasmFrameIter.cpp
js/src/wasm/WasmFrameIter.h
js/src/wasm/WasmGenerator.cpp
js/src/wasm/WasmInstance.cpp
js/src/wasm/WasmIonCompile.cpp
js/src/wasm/WasmModule.h
js/src/wasm/WasmProcess.cpp
js/src/wasm/WasmProcess.h
js/src/wasm/WasmSignalHandlers.cpp
js/src/wasm/WasmSignalHandlers.h
js/src/wasm/WasmStubs.cpp
js/src/wasm/WasmTypes.cpp
js/src/wasm/WasmTypes.h
js/xpconnect/src/XPCJSRuntime.cpp
--- a/js/public/MemoryMetrics.h
+++ b/js/public/MemoryMetrics.h
@@ -511,18 +511,17 @@ struct RuntimeSizes
     macro(_, MallocHeap, contexts) \
     macro(_, MallocHeap, temporary) \
     macro(_, MallocHeap, interpreterStack) \
     macro(_, MallocHeap, mathCache) \
     macro(_, MallocHeap, sharedImmutableStringsCache) \
     macro(_, MallocHeap, sharedIntlData) \
     macro(_, MallocHeap, uncompressedSourceCache) \
     macro(_, MallocHeap, scriptData) \
-    macro(_, MallocHeap, tracelogger) \
-    macro(_, MallocHeap, wasmRuntime)
+    macro(_, MallocHeap, tracelogger)
 
     RuntimeSizes()
       : FOR_EACH_SIZE(ZERO_SIZE)
         scriptSourceInfo(),
         code(),
         gc(),
         notableScriptSources()
     {
deleted file mode 100644
--- a/js/src/jit/AsyncInterrupt.cpp
+++ /dev/null
@@ -1,185 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "jit/AsyncInterrupt.h"
-
-#include "jit/JitCompartment.h"
-#include "util/Windows.h"
-
-#if defined(ANDROID)
-# include <sys/system_properties.h>
-#endif
-
-using namespace js;
-using namespace js::jit;
-
-using mozilla::PodArrayZero;
-
-static void
-RedirectIonBackedgesToInterruptCheck(JSContext* cx)
-{
-    // Jitcode may only be modified on the runtime's active thread.
-    if (cx != cx->runtime()->activeContext())
-        return;
-
-    // The faulting thread is suspended so we can access cx fields that can
-    // normally only be accessed by the cx's active thread.
-    AutoNoteSingleThreadedRegion anstr;
-
-    Zone* zone = cx->zoneRaw();
-    if (zone && !zone->isAtomsZone()) {
-        jit::JitRuntime* jitRuntime = cx->runtime()->jitRuntime();
-        if (!jitRuntime)
-            return;
-
-        // If the backedge list is being mutated, the pc must be in C++ code and
-        // thus not in a JIT iloop. We assume that the interrupt flag will be
-        // checked at least once before entering JIT code (if not, no big deal;
-        // the browser will just request another interrupt in a second).
-        if (!jitRuntime->preventBackedgePatching()) {
-            jit::JitZoneGroup* jzg = zone->group()->jitZoneGroup;
-            jzg->patchIonBackedges(cx, jit::JitZoneGroup::BackedgeInterruptCheck);
-        }
-    }
-}
-
-#if !defined(XP_WIN)
-// For the interrupt signal, pick a signal number that:
-//  - is not otherwise used by mozilla or standard libraries
-//  - defaults to nostop and noprint on gdb/lldb so that noone is bothered
-// SIGVTALRM a relative of SIGALRM, so intended for user code, but, unlike
-// SIGALRM, not used anywhere else in Mozilla.
-static const int sJitAsyncInterruptSignal = SIGVTALRM;
-
-static void
-JitAsyncInterruptHandler(int signum, siginfo_t*, void*)
-{
-    MOZ_RELEASE_ASSERT(signum == sJitAsyncInterruptSignal);
-
-    JSContext* cx = TlsContext.get();
-    if (!cx)
-        return;
-
-#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
-    SimulatorProcess::ICacheCheckingDisableCount++;
-#endif
-
-    RedirectIonBackedgesToInterruptCheck(cx);
-
-#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
-    SimulatorProcess::cacheInvalidatedBySignalHandler_ = true;
-    SimulatorProcess::ICacheCheckingDisableCount--;
-#endif
-
-    cx->finishHandlingJitInterrupt();
-}
-#endif
-
-static bool sTriedInstallAsyncInterrupt = false;
-static bool sHaveAsyncInterrupt = false;
-
-void
-jit::EnsureAsyncInterrupt(JSContext* cx)
-{
-    // We assume that there are no races creating the first JSRuntime of the process.
-    if (sTriedInstallAsyncInterrupt)
-        return;
-    sTriedInstallAsyncInterrupt = true;
-
-#if defined(ANDROID) && !defined(__aarch64__)
-    // Before Android 4.4 (SDK version 19), there is a bug
-    //   https://android-review.googlesource.com/#/c/52333
-    // in Bionic's pthread_join which causes pthread_join to return early when
-    // pthread_kill is used (on any thread). Nobody expects the pthread_cond_wait
-    // EINTRquisition.
-    char version_string[PROP_VALUE_MAX];
-    PodArrayZero(version_string);
-    if (__system_property_get("ro.build.version.sdk", version_string) > 0) {
-        if (atol(version_string) < 19)
-            return;
-    }
-#endif
-
-#if defined(XP_WIN)
-    // Windows uses SuspendThread to stop the active thread from another thread.
-#else
-    struct sigaction interruptHandler;
-    interruptHandler.sa_flags = SA_SIGINFO;
-    interruptHandler.sa_sigaction = &JitAsyncInterruptHandler;
-    sigemptyset(&interruptHandler.sa_mask);
-    struct sigaction prev;
-    if (sigaction(sJitAsyncInterruptSignal, &interruptHandler, &prev))
-        MOZ_CRASH("unable to install interrupt handler");
-
-    // There shouldn't be any other handlers installed for
-    // sJitAsyncInterruptSignal. If there are, we could always forward, but we
-    // need to understand what we're doing to avoid problematic interference.
-    if ((prev.sa_flags & SA_SIGINFO && prev.sa_sigaction) ||
-        (prev.sa_handler != SIG_DFL && prev.sa_handler != SIG_IGN))
-    {
-        MOZ_CRASH("contention for interrupt signal");
-    }
-#endif // defined(XP_WIN)
-
-    sHaveAsyncInterrupt = true;
-}
-
-bool
-jit::HaveAsyncInterrupt()
-{
-    MOZ_ASSERT(sTriedInstallAsyncInterrupt);
-    return sHaveAsyncInterrupt;
-}
-
-// JSRuntime::requestInterrupt sets interrupt_ (which is checked frequently by
-// C++ code at every Baseline JIT loop backedge) and jitStackLimit_ (which is
-// checked at every Baseline and Ion JIT function prologue). The remaining
-// sources of potential iloops (Ion loop backedges) are handled by this
-// function: Ion loop backedges are patched to instead point to a stub that
-// handles the interrupt;
-void
-jit::InterruptRunningCode(JSContext* cx)
-{
-    // If signal handlers weren't installed, then Ion emit normal interrupt
-    // checks and don't need asynchronous interruption.
-    MOZ_ASSERT(sTriedInstallAsyncInterrupt);
-    if (!sHaveAsyncInterrupt)
-        return;
-
-    // Do nothing if we're already handling an interrupt here, to avoid races
-    // below and in JitRuntime::patchIonBackedges.
-    if (!cx->startHandlingJitInterrupt())
-        return;
-
-    // If we are on context's thread, then we can patch Ion backedges without
-    // any special synchronization.
-    if (cx == TlsContext.get()) {
-        RedirectIonBackedgesToInterruptCheck(cx);
-        cx->finishHandlingJitInterrupt();
-        return;
-    }
-
-    // We are not on the runtime's active thread, so we need to halt the
-    // runtime's active thread first.
-#if defined(XP_WIN)
-    // On Windows, we can simply suspend the active thread. SuspendThread can
-    // sporadically fail if the thread is in the middle of a syscall. Rather
-    // than retrying in a loop, just wait for the next request for interrupt.
-    HANDLE thread = (HANDLE)cx->threadNative();
-    if (SuspendThread(thread) != (DWORD)-1) {
-        RedirectIonBackedgesToInterruptCheck(cx);
-        ResumeThread(thread);
-    }
-    cx->finishHandlingJitInterrupt();
-#else
-    // On Unix, we instead deliver an async signal to the active thread which
-    // halts the thread and callers our JitAsyncInterruptHandler (which has
-    // already been installed by EnsureSignalHandlersInstalled).
-    pthread_t thread = (pthread_t)cx->threadNative();
-    pthread_kill(thread, sJitAsyncInterruptSignal);
-#endif
-}
-
deleted file mode 100644
--- a/js/src/jit/AsyncInterrupt.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef jit_AsyncInterrupt_h
-#define jit_AsyncInterrupt_h
-
-#include "NamespaceImports.h"
-
-namespace js {
-namespace jit {
-
-// Ensure the given JSRuntime is set up to use async interrupts. Failure to
-// enable signal handlers indicates some catastrophic failure and creation of
-// the runtime must fail.
-void
-EnsureAsyncInterrupt(JSContext* cx);
-
-// Return whether the async interrupt can be used to interrupt Ion code.
-bool
-HaveAsyncInterrupt();
-
-// Force any currently-executing JIT code to call HandleExecutionInterrupt.
-extern void
-InterruptRunningCode(JSContext* cx);
-
-} // namespace jit
-} // namespace js
-
-#endif // jit_AsyncInterrupt_h
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -12706,24 +12706,16 @@ CodeGenerator::visitInterruptCheck(LInte
     const void* contextAddr = gen->compartment->zone()->addressOfJSContext();
     masm.loadPtr(AbsoluteAddress(contextAddr), temp);
     masm.branch32(Assembler::NotEqual, Address(temp, offsetof(JSContext, interrupt_)),
                   Imm32(0), ool->entry());
     masm.bind(ool->rejoin());
 }
 
 void
-CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir)
-{
-    MOZ_ASSERT(gen->compilingWasm());
-
-    masm.wasmInterruptCheck(ToRegister(lir->tlsPtr()), lir->mir()->bytecodeOffset());
-}
-
-void
 CodeGenerator::visitWasmTrap(LWasmTrap* lir)
 {
     MOZ_ASSERT(gen->compilingWasm());
     const MWasmTrap* mir = lir->mir();
 
     masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
 }
 
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -460,17 +460,16 @@ class CodeGenerator final : public CodeG
 
 #ifdef DEBUG
     void emitAssertResultV(const ValueOperand output, const TemporaryTypeSet* typeset);
     void emitAssertObjectOrStringResult(Register input, MIRType type, const TemporaryTypeSet* typeset);
 #endif
 
     void visitInterruptCheck(LInterruptCheck* lir);
     void visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit* ins);
-    void visitWasmInterruptCheck(LWasmInterruptCheck* lir);
     void visitWasmTrap(LWasmTrap* lir);
     void visitWasmLoadTls(LWasmLoadTls* ins);
     void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
     void visitWasmAlignmentCheck(LWasmAlignmentCheck* ins);
     void visitRecompileCheck(LRecompileCheck* ins);
     void visitRotate(LRotate* ins);
 
     void visitRandom(LRandom* ins);
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -387,17 +387,17 @@ void
 JitZoneGroup::patchIonBackedges(JSContext* cx, BackedgeTarget target)
 {
     if (target == BackedgeLoopHeader) {
         // We must be on the active thread. The caller must use
         // AutoPreventBackedgePatching to ensure we don't reenter.
         MOZ_ASSERT(cx->runtime()->jitRuntime()->preventBackedgePatching());
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
     } else {
-        // We must be called from jit::InterruptRunningCode, or a signal handler
+        // We must be called from InterruptRunningJitCode, or a signal handler
         // triggered there. rt->handlingJitInterrupt() ensures we can't reenter
         // this code.
         MOZ_ASSERT(!cx->runtime()->jitRuntime()->preventBackedgePatching());
         MOZ_ASSERT(cx->handlingJitInterrupt());
     }
 
     // Do nothing if we know all backedges are already jumping to `target`.
     if (backedgeTarget_ == target)
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -182,16 +182,20 @@ DefaultJitOptions::DefaultJitOptions()
 
     // The bytecode length limit for small function.
     SET_DEFAULT(smallFunctionMaxBytecodeLength_, 130);
 
     // An artificial testing limit for the maximum supported offset of
     // pc-relative jump and call instructions.
     SET_DEFAULT(jumpThreshold, UINT32_MAX);
 
+    // Whether the (ARM) simulators should always interrupt before executing any
+    // instruction.
+    SET_DEFAULT(simulatorAlwaysInterrupt, false);
+
     // Branch pruning heuristic is based on a scoring system, which is look at
     // different metrics and provide a score. The score is computed as a
     // projection where each factor defines the weight of each metric. Then this
     // score is compared against a threshold to prevent a branch from being
     // removed.
     SET_DEFAULT(branchPruningHitCountFactor, 1);
     SET_DEFAULT(branchPruningInstFactor, 10);
     SET_DEFAULT(branchPruningBlockSpanFactor, 100);
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -71,16 +71,17 @@ struct DefaultJitOptions
     bool forceInlineCaches;
     bool fullDebugChecks;
     bool limitScriptSize;
     bool osr;
     bool asmJSAtomicsEnable;
     bool wasmFoldOffsets;
     bool wasmDelayTier2;
     bool ionInterruptWithoutSignals;
+    bool simulatorAlwaysInterrupt;
     uint32_t baselineWarmUpThreshold;
     uint32_t exceptionBailoutThreshold;
     uint32_t frequentBailoutThreshold;
     uint32_t maxStackArgs;
     uint32_t osrPcMismatchesBeforeRecompile;
     uint32_t smallFunctionMaxBytecodeLength_;
     uint32_t jumpThreshold;
     uint32_t branchPruningHitCountFactor;
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -4,21 +4,21 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/Lowering.h"
 
 #include "mozilla/DebugOnly.h"
 #include "mozilla/EndianUtils.h"
 
-#include "jit/AsyncInterrupt.h"
 #include "jit/JitSpewer.h"
 #include "jit/LIR.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
+#include "wasm/WasmSignalHandlers.h"
 
 #include "jit/shared/Lowering-shared-inl.h"
 #include "vm/BytecodeUtil-inl.h"
 #include "vm/JSObject-inl.h"
 
 using namespace js;
 using namespace jit;
 
@@ -88,18 +88,18 @@ void
 LIRGenerator::visitIsConstructing(MIsConstructing* ins)
 {
     define(new(alloc()) LIsConstructing(), ins);
 }
 
 static void
 TryToUseImplicitInterruptCheck(MIRGraph& graph, MBasicBlock* backedge)
 {
-    // Implicit interrupt checks require JIT async interrupt support.
-    if (!jit::HaveAsyncInterrupt() || JitOptions.ionInterruptWithoutSignals)
+    // Implicit interrupt checks require wasm signal handlers to be installed.
+    if (!wasm::HaveSignalHandlers() || JitOptions.ionInterruptWithoutSignals)
         return;
 
     // To avoid triggering expensive interrupts (backedge patching) in
     // requestMajorGC and requestMinorGC, use an implicit interrupt check only
     // if the loop body can not trigger GC or affect GC state like the store
     // buffer. We do this by checking there are no safepoints attached to LIR
     // instructions inside the loop.
 
@@ -2733,23 +2733,16 @@ void
 LIRGenerator::visitInterruptCheck(MInterruptCheck* ins)
 {
     LInstruction* lir = new(alloc()) LInterruptCheck(temp());
     add(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
-LIRGenerator::visitWasmInterruptCheck(MWasmInterruptCheck* ins)
-{
-    auto* lir = new(alloc()) LWasmInterruptCheck(useRegisterAtStart(ins->tlsPtr()));
-    add(lir, ins);
-}
-
-void
 LIRGenerator::visitWasmTrap(MWasmTrap* ins)
 {
     add(new(alloc()) LWasmTrap, ins);
 }
 
 void
 LIRGenerator::visitWasmReinterpret(MWasmReinterpret* ins)
 {
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -206,17 +206,16 @@ class LIRGenerator : public LIRGenerator
     void visitMaybeToDoubleElement(MMaybeToDoubleElement* ins) override;
     void visitMaybeCopyElementsForWrite(MMaybeCopyElementsForWrite* ins) override;
     void visitLoadSlot(MLoadSlot* ins) override;
     void visitLoadFixedSlotAndUnbox(MLoadFixedSlotAndUnbox* ins) override;
     void visitFunctionEnvironment(MFunctionEnvironment* ins) override;
     void visitHomeObject(MHomeObject* ins) override;
     void visitHomeObjectSuperBase(MHomeObjectSuperBase* ins) override;
     void visitInterruptCheck(MInterruptCheck* ins) override;
-    void visitWasmInterruptCheck(MWasmInterruptCheck* ins) override;
     void visitWasmTrap(MWasmTrap* ins) override;
     void visitWasmReinterpret(MWasmReinterpret* ins) override;
     void visitStoreSlot(MStoreSlot* ins) override;
     void visitFilterTypeSet(MFilterTypeSet* ins) override;
     void visitTypeBarrier(MTypeBarrier* ins) override;
     void visitPostWriteBarrier(MPostWriteBarrier* ins) override;
     void visitPostWriteElementBarrier(MPostWriteElementBarrier* ins) override;
     void visitArrayLength(MArrayLength* ins) override;
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -8298,43 +8298,16 @@ class MInterruptCheck : public MNullaryI
     INSTRUCTION_HEADER(InterruptCheck)
     TRIVIAL_NEW_WRAPPERS
 
     AliasSet getAliasSet() const override {
         return AliasSet::None();
     }
 };
 
-// Check whether we need to fire the interrupt handler (in wasm code).
-class MWasmInterruptCheck
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    wasm::BytecodeOffset bytecodeOffset_;
-
-    MWasmInterruptCheck(MDefinition* tlsPointer, wasm::BytecodeOffset bytecodeOffset)
-      : MUnaryInstruction(classOpcode, tlsPointer),
-        bytecodeOffset_(bytecodeOffset)
-    {
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(WasmInterruptCheck)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, tlsPtr))
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-    wasm::BytecodeOffset bytecodeOffset() const {
-        return bytecodeOffset_;
-    }
-};
-
 // Directly jumps to the indicated trap, leaving Wasm code and reporting a
 // runtime error.
 
 class MWasmTrap
   : public MAryControlInstruction<0, 0>,
     public NoTypePolicy::Data
 {
     wasm::Trap trap_;
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -272,17 +272,16 @@ namespace jit {
     _(Ceil)                                                                 \
     _(Round)                                                                \
     _(NearbyInt)                                                            \
     _(InCache)                                                              \
     _(HasOwnCache)                                                          \
     _(InstanceOf)                                                           \
     _(InstanceOfCache)                                                      \
     _(InterruptCheck)                                                       \
-    _(WasmInterruptCheck)                                                   \
     _(GetDOMProperty)                                                       \
     _(GetDOMMember)                                                         \
     _(SetDOMProperty)                                                       \
     _(IsConstructor)                                                        \
     _(IsCallable)                                                           \
     _(IsArray)                                                              \
     _(IsTypedArray)                                                         \
     _(IsObject)                                                             \
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -3352,29 +3352,17 @@ MacroAssembler::maybeBranchTestType(MIRT
             MOZ_CRASH("Unsupported type");
         }
     }
 }
 
 void
 MacroAssembler::wasmTrap(wasm::Trap trap, wasm::BytecodeOffset bytecodeOffset)
 {
-    uint32_t trapOffset = wasmTrapInstruction().offset();
-    MOZ_ASSERT_IF(!oom(), currentOffset() - trapOffset == WasmTrapInstructionLength);
-
-    append(trap, wasm::TrapSite(trapOffset, bytecodeOffset));
-}
-
-void
-MacroAssembler::wasmInterruptCheck(Register tls, wasm::BytecodeOffset bytecodeOffset)
-{
-    Label ok;
-    branch32(Assembler::Equal, Address(tls, offsetof(wasm::TlsData, interrupt)), Imm32(0), &ok);
-    wasmTrap(wasm::Trap::CheckInterrupt, bytecodeOffset);
-    bind(&ok);
+    append(trap, wasm::TrapSite(wasmTrapInstruction().offset(), bytecodeOffset));
 }
 
 void
 MacroAssembler::wasmCallImport(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee)
 {
     // Load the callee, before the caller's registers are clobbered.
     uint32_t globalDataOffset = callee.importGlobalDataOffset();
     loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, code), ABINonArgReg0);
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1488,17 +1488,16 @@ class MacroAssembler : public MacroAssem
 
   public:
     // ========================================================================
     // wasm support
 
     CodeOffset wasmTrapInstruction() PER_SHARED_ARCH;
 
     void wasmTrap(wasm::Trap trap, wasm::BytecodeOffset bytecodeOffset);
-    void wasmInterruptCheck(Register tls, wasm::BytecodeOffset bytecodeOffset);
 
     // Emit a bounds check against the wasm heap limit, jumping to 'label' if
     // 'cond' holds. Required when WASM_HUGE_MEMORY is not defined. If
     // JitOptions.spectreMaskIndex is true, in speculative executions 'index' is
     // saturated in-place to 'boundsCheckLimit'.
     template <class L>
     inline void wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, L label)
         DEFINED_ON(arm, arm64, mips32, mips64, x86);
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -123,17 +123,16 @@ static constexpr Register ABINonArgReg2 
 // This register may be volatile or nonvolatile. Avoid d15 which is the
 // ScratchDoubleReg.
 static constexpr FloatRegister ABINonArgDoubleReg { FloatRegisters::d8, VFPRegister::Double };
 
 // These registers may be volatile or nonvolatile.
 // Note: these three registers are all guaranteed to be different
 static constexpr Register ABINonArgReturnReg0 = r4;
 static constexpr Register ABINonArgReturnReg1 = r5;
-static constexpr Register ABINonVolatileReg = r6;
 
 // This register is guaranteed to be clobberable during the prologue and
 // epilogue of an ABI call which must preserve both ABI argument, return
 // and non-volatile registers.
 static constexpr Register ABINonArgReturnVolatileReg = lr;
 
 // TLS pointer argument register for WebAssembly functions. This must not alias
 // any other register used for passing function arguments or return values.
@@ -245,17 +244,16 @@ static_assert(CodeAlignment % SimdMemory
   "the constant sections of the code buffer.  Thus it should be larger than the "
   "alignment for SIMD constants.");
 
 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
   "Stack alignment should be larger than any of the alignments which are used for "
   "spilled values.  Thus it should be larger than the alignment for SIMD accesses.");
 
 static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
-static const uint32_t WasmTrapInstructionLength = 4;
 
 // Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
 static constexpr bool SupportsUint32x4FloatConversions = false;
 
 // Does this architecture support comparisons of unsigned integer vectors?
 static constexpr bool SupportsUint8x16Compares = false;
 static constexpr bool SupportsUint16x8Compares = false;
 static constexpr bool SupportsUint32x4Compares = false;
--- a/js/src/jit/arm/Simulator-arm.cpp
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -1155,16 +1155,17 @@ Simulator::Simulator(JSContext* cx)
 
     // Note, allocation and anything that depends on allocated memory is
     // deferred until init(), in order to handle OOM properly.
 
     stack_ = nullptr;
     stackLimit_ = 0;
     pc_modified_ = false;
     icount_ = 0L;
+    wasm_interrupt_ = false;
     break_pc_ = nullptr;
     break_instr_ = 0;
     single_stepping_ = false;
     single_step_callback_ = nullptr;
     single_step_callback_arg_ = nullptr;
     skipCalleeSavedRegsCheck = false;
 
     // Set up architecture state.
@@ -1588,16 +1589,39 @@ Simulator::registerState()
     wasm::RegisterState state;
     state.pc = (void*) get_pc();
     state.fp = (void*) get_register(fp);
     state.sp = (void*) get_register(sp);
     state.lr = (void*) get_register(lr);
     return state;
 }
 
+// The signal handler only redirects the PC to the interrupt stub when the PC is
+// in function code. However, this guard is racy for the ARM simulator since the
+// signal handler samples PC in the middle of simulating an instruction and thus
+// the current PC may have advanced once since the signal handler's guard. So we
+// re-check here.
+void
+Simulator::handleWasmInterrupt()
+{
+    if (!wasm::CodeExists)
+        return;
+
+    uint8_t* pc = (uint8_t*)get_pc();
+
+    const wasm::ModuleSegment* ms = nullptr;
+    if (!wasm::InInterruptibleCode(cx_, pc, &ms))
+        return;
+
+    if (!cx_->activation()->asJit()->startWasmInterrupt(registerState()))
+        return;
+
+    set_pc(int32_t(ms->interruptCode()));
+}
+
 static inline JitActivation*
 GetJitActivation(JSContext* cx)
 {
     if (!wasm::CodeExists)
         return nullptr;
     if (!cx->activation() || !cx->activation()->isJit())
         return nullptr;
     return cx->activation()->asJit();
@@ -1630,17 +1654,17 @@ Simulator::handleWasmSegFault(int32_t ad
 
     MOZ_RELEASE_ASSERT(&instance->code() == &moduleSegment->code());
 
     if (!instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
         return false;
 
     const wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
     if (!memoryAccess) {
-        act->asJit()->startWasmTrap(wasm::Trap::OutOfBounds, 0, registerState());
+        MOZ_ALWAYS_TRUE(act->asJit()->startWasmInterrupt(registerState()));
         if (!instance->code().containsCodePC(pc))
             MOZ_CRASH("Cannot map PC to trap handler");
         set_pc(int32_t(moduleSegment->outOfBoundsCode()));
         return true;
     }
 
     MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
     set_pc(int32_t(memoryAccess->trapOutOfLineCode(moduleSegment->base())));
@@ -4896,16 +4920,29 @@ Simulator::disable_single_stepping()
     if (!single_stepping_)
         return;
     single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
     single_stepping_ = false;
     single_step_callback_ = nullptr;
     single_step_callback_arg_ = nullptr;
 }
 
+static void
+FakeInterruptHandler()
+{
+    JSContext* cx = TlsContext.get();
+    uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
+
+    const wasm::ModuleSegment* ms= nullptr;
+    if (!wasm::InInterruptibleCode(cx, pc, &ms))
+        return;
+
+    cx->simulator()->trigger_wasm_interrupt();
+}
+
 template<bool EnableStopSimAt>
 void
 Simulator::execute()
 {
     if (single_stepping_)
         single_step_callback_(single_step_callback_arg_, this, nullptr);
 
     // Get the PC to simulate. Cannot use the accessor here as we need the raw
@@ -4915,19 +4952,26 @@ Simulator::execute()
     while (program_counter != end_sim_pc) {
         if (EnableStopSimAt && (icount_ == Simulator::StopSimAt)) {
             fprintf(stderr, "\nStopped simulation at icount %lld\n", icount_);
             ArmDebugger dbg(this);
             dbg.debug();
         } else {
             if (single_stepping_)
                 single_step_callback_(single_step_callback_arg_, this, (void*)program_counter);
+            if (MOZ_UNLIKELY(JitOptions.simulatorAlwaysInterrupt))
+                FakeInterruptHandler();
             SimInstruction* instr = reinterpret_cast<SimInstruction*>(program_counter);
             instructionDecode(instr);
             icount_++;
+
+            if (MOZ_UNLIKELY(wasm_interrupt_)) {
+                handleWasmInterrupt();
+                wasm_interrupt_ = false;
+            }
         }
         program_counter = get_pc();
     }
 
     if (single_stepping_)
         single_step_callback_(single_step_callback_arg_, this, nullptr);
 }
 
--- a/js/src/jit/arm/Simulator-arm.h
+++ b/js/src/jit/arm/Simulator-arm.h
@@ -192,16 +192,23 @@ class Simulator
 
     // Special case of set_register and get_register to access the raw PC value.
     void set_pc(int32_t value);
     int32_t get_pc() const;
 
     template <typename T>
     T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
 
+    void trigger_wasm_interrupt() {
+        // This can be called several times if a single interrupt isn't caught
+        // and handled by the simulator, but this is fine; once the current
+        // instruction is done executing, the interrupt will be handled anyhow.
+        wasm_interrupt_ = true;
+    }
+
     void enable_single_stepping(SingleStepCallback cb, void* arg);
     void disable_single_stepping();
 
     uintptr_t stackLimit() const;
     bool overRecursed(uintptr_t newsp = 0) const;
     bool overRecursedWithExtra(uint32_t extra) const;
 
     // Executes ARM instructions until the PC reaches end_sim_pc.
@@ -281,16 +288,17 @@ class Simulator
     inline bool isWatchedStop(uint32_t bkpt_code);
     inline bool isEnabledStop(uint32_t bkpt_code);
     inline void enableStop(uint32_t bkpt_code);
     inline void disableStop(uint32_t bkpt_code);
     inline void increaseStopCounter(uint32_t bkpt_code);
     void printStopInfo(uint32_t code);
 
     // Handle a wasm interrupt triggered by an async signal handler.
+    void handleWasmInterrupt();
     JS::ProfilingFrameIterator::RegisterState registerState();
 
     // Handle any wasm faults, returning true if the fault was handled.
     bool handleWasmSegFault(int32_t addr, unsigned numBytes);
     bool handleWasmIllFault();
 
     // Read and write memory.
     inline uint8_t readBU(int32_t addr);
@@ -413,16 +421,19 @@ class Simulator
     bool inexact_vfp_flag_;
 
     // Simulator support.
     char* stack_;
     uintptr_t stackLimit_;
     bool pc_modified_;
     int64_t icount_;
 
+    // wasm async interrupt / fault support
+    bool wasm_interrupt_;
+
     // Debugger input.
     char* lastDebuggerInput_;
 
     // Registered breakpoints.
     SimInstruction* break_pc_;
     Instr break_instr_;
 
     // Single-stepping support
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -167,17 +167,16 @@ static constexpr bool SupportsSimd = fal
 static constexpr uint32_t SimdMemoryAlignment = 16;
 
 static_assert(CodeAlignment % SimdMemoryAlignment == 0,
   "Code alignment should be larger than any of the alignments which are used for "
   "the constant sections of the code buffer.  Thus it should be larger than the "
   "alignment for SIMD constants.");
 
 static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
-static const uint32_t WasmTrapInstructionLength = 4;
 
 // Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
 static constexpr bool SupportsUint32x4FloatConversions = false;
 
 // Does this architecture support comparisons of unsigned integer vectors?
 static constexpr bool SupportsUint8x16Compares = false;
 static constexpr bool SupportsUint16x8Compares = false;
 static constexpr bool SupportsUint32x4Compares = false;
@@ -453,17 +452,16 @@ static constexpr Register ABINonArgReg2 
 // This register may be volatile or nonvolatile. Avoid d31 which is the
 // ScratchDoubleReg.
 static constexpr FloatRegister ABINonArgDoubleReg = { FloatRegisters::s16, FloatRegisters::Single };
 
 // These registers may be volatile or nonvolatile.
 // Note: these three registers are all guaranteed to be different
 static constexpr Register ABINonArgReturnReg0 = r8;
 static constexpr Register ABINonArgReturnReg1 = r9;
-static constexpr Register ABINonVolatileReg { Registers::x19 };
 
 // This register is guaranteed to be clobberable during the prologue and
 // epilogue of an ABI call which must preserve both ABI argument, return
 // and non-volatile registers.
 static constexpr Register ABINonArgReturnVolatileReg = lr;
 
 // TLS pointer argument register for WebAssembly functions. This must not alias
 // any other register used for passing function arguments or return values.
--- a/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
@@ -81,16 +81,17 @@ Simulator::~Simulator() {
 void Simulator::ResetState() {
   // Reset the system registers.
   nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
   fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
 
   // Reset registers to 0.
   pc_ = nullptr;
   pc_modified_ = false;
+  wasm_interrupt_ = false;
   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
     set_xreg(i, 0xbadbeef);
   }
   // Set FP registers to a value that is a NaN in both 32-bit and 64-bit FP.
   uint64_t nan_bits = UINT64_C(0x7ff0dead7f8beef1);
   VIXL_ASSERT(IsSignallingNaN(rawbits_to_double(nan_bits & kDRegMask)));
   VIXL_ASSERT(IsSignallingNaN(rawbits_to_float(nan_bits & kSRegMask)));
   for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
@@ -189,16 +190,25 @@ void Simulator::Destroy(Simulator* sim) 
 }
 
 
 void Simulator::ExecuteInstruction() {
   // The program counter should always be aligned.
   VIXL_ASSERT(IsWordAligned(pc_));
   decoder_->Decode(pc_);
   increment_pc();
+
+  if (MOZ_UNLIKELY(wasm_interrupt_)) {
+    handle_wasm_interrupt();
+    // Just calling set_pc turns the pc_modified_ flag on, which means it doesn't
+    // auto-step after executing the next instruction.  Force that to off so it
+    // will auto-step after executing the first instruction of the handler.
+    pc_modified_ = false;
+    wasm_interrupt_ = false;
+  }
 }
 
 
 uintptr_t Simulator::stackLimit() const {
   return reinterpret_cast<uintptr_t>(stack_limit_);
 }
 
 
@@ -215,16 +225,22 @@ bool Simulator::overRecursed(uintptr_t n
 
 
 bool Simulator::overRecursedWithExtra(uint32_t extra) const {
   uintptr_t newsp = get_sp() - extra;
   return newsp <= stackLimit();
 }
 
 
+void Simulator::trigger_wasm_interrupt() {
+  MOZ_ASSERT(!wasm_interrupt_);
+  wasm_interrupt_ = true;
+}
+
+
 static inline JitActivation*
 GetJitActivation(JSContext* cx)
 {
     if (!js::wasm::CodeExists)
         return nullptr;
     if (!cx->activation() || !cx->activation()->isJit())
         return nullptr;
     return cx->activation()->asJit();
@@ -236,16 +252,42 @@ Simulator::registerState()
   JS::ProfilingFrameIterator::RegisterState state;
   state.pc = (uint8_t*) get_pc();
   state.fp = (uint8_t*) get_fp();
   state.lr = (uint8_t*) get_lr();
   state.sp = (uint8_t*) get_sp();
   return state;
 }
 
+// The signal handler only redirects the PC to the interrupt stub when the PC is
+// in function code. However, this guard is racy for the ARM simulator since the
+// signal handler samples PC in the middle of simulating an instruction and thus
+// the current PC may have advanced once since the signal handler's guard. So we
+// re-check here.
+void Simulator::handle_wasm_interrupt()
+{
+  if (!js::wasm::CodeExists)
+    return;
+
+  uint8_t* pc = (uint8_t*)get_pc();
+
+  const js::wasm::ModuleSegment* ms = nullptr;
+  if (!js::wasm::InInterruptibleCode(cx_, pc, &ms))
+      return;
+
+  JitActivation* act = GetJitActivation(cx_);
+  if (!act)
+      return;
+
+  if (!act->startWasmInterrupt(registerState()))
+      return;
+
+  set_pc((Instruction*)ms->interruptCode());
+}
+
 bool
 Simulator::handle_wasm_seg_fault(uintptr_t addr, unsigned numBytes)
 {
     JitActivation* act = GetJitActivation(cx_);
     if (!act)
         return false;
 
     uint8_t* pc = (uint8_t*)get_pc();
@@ -262,17 +304,18 @@ Simulator::handle_wasm_seg_fault(uintptr
 
     MOZ_RELEASE_ASSERT(&instance->code() == &moduleSegment->code());
 
     if (!instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
         return false;
 
     const js::wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
     if (!memoryAccess) {
-        act->startWasmTrap(js::wasm::Trap::OutOfBounds, 0, registerState());
+        if (!act->startWasmInterrupt(registerState()))
+	    MOZ_CRASH("Cannot start interrupt");
         if (!instance->code().containsCodePC(pc))
             MOZ_CRASH("Cannot map PC to trap handler");
         set_pc((Instruction*)moduleSegment->outOfBoundsCode());
         return true;
     }
 
     MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
     set_pc((Instruction*)memoryAccess->trapOutOfLineCode(moduleSegment->base()));
--- a/js/src/jit/arm64/vixl/Simulator-vixl.h
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.h
@@ -741,16 +741,18 @@ class Simulator : public DecoderVisitor 
   template <typename T>
   T get_pc_as() const { return reinterpret_cast<T>(const_cast<Instruction*>(pc())); }
 
   void set_pc(const Instruction* new_pc) {
     pc_ = Memory::AddressUntag(new_pc);
     pc_modified_ = true;
   }
 
+  void trigger_wasm_interrupt();
+  void handle_wasm_interrupt();
   bool handle_wasm_ill_fault();
   bool handle_wasm_seg_fault(uintptr_t addr, unsigned numBytes);
 
   void increment_pc() {
     if (!pc_modified_) {
       pc_ = pc_->NextInstruction();
     }
 
@@ -2576,16 +2578,17 @@ class Simulator : public DecoderVisitor 
   static const int stack_size_ = (2 * MBytes) + (2 * stack_protection_size_);
   byte* stack_limit_;
 
   Decoder* decoder_;
   // Indicates if the pc has been modified by the instruction and should not be
   // automatically incremented.
   bool pc_modified_;
   const Instruction* pc_;
+  bool wasm_interrupt_;
 
   static const char* xreg_names[];
   static const char* wreg_names[];
   static const char* sreg_names[];
   static const char* dreg_names[];
   static const char* vreg_names[];
 
   static const Instruction* kEndOfSimAddress;
--- a/js/src/jit/mips32/Assembler-mips32.h
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -130,17 +130,16 @@ static constexpr uint32_t JitStackValueA
 static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
   "Stack alignment should be a non-zero multiple of sizeof(Value)");
 
 // TODO this is just a filler to prevent a build failure. The MIPS SIMD
 // alignment requirements still need to be explored.
 // TODO Copy the static_asserts from x64/x86 assembler files.
 static constexpr uint32_t SimdMemoryAlignment = 8;
 static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
-static const uint32_t WasmTrapInstructionLength = 4;
 
 // Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
 static constexpr bool SupportsUint32x4FloatConversions = false;
 
 // Does this architecture support comparisons of unsigned integer vectors?
 static constexpr bool SupportsUint8x16Compares = false;
 static constexpr bool SupportsUint16x8Compares = false;
 static constexpr bool SupportsUint32x4Compares = false;
--- a/js/src/jit/mips32/Simulator-mips32.cpp
+++ b/js/src/jit/mips32/Simulator-mips32.cpp
@@ -1264,16 +1264,17 @@ Simulator::Simulator()
     // Note, allocation and anything that depends on allocated memory is
     // deferred until init(), in order to handle OOM properly.
 
     stack_ = nullptr;
     stackLimit_ = 0;
     pc_modified_ = false;
     icount_ = 0;
     break_count_ = 0;
+    wasm_interrupt_ = false;
     break_pc_ = nullptr;
     break_instr_ = 0;
 
     // Set up architecture state.
     // All registers are initialized to zero to start with.
     for (int i = 0; i < Register::kNumSimuRegisters; i++) {
         registers_[i] = 0;
     }
@@ -1636,16 +1637,42 @@ Simulator::registerState()
     wasm::RegisterState state;
     state.pc = (void*) get_pc();
     state.fp = (void*) getRegister(fp);
     state.sp = (void*) getRegister(sp);
     state.lr = (void*) getRegister(ra);
     return state;
 }
 
+// The signal handler only redirects the PC to the interrupt stub when the PC is
+// in function code. However, this guard is racy for the simulator since the
+// signal handler samples PC in the middle of simulating an instruction and thus
+// the current PC may have advanced once since the signal handler's guard. So we
+// re-check here.
+void
+Simulator::handleWasmInterrupt()
+{
+    if (!wasm::CodeExists)
+        return;
+
+    void* pc = (void*)get_pc();
+    void* fp = (void*)getRegister(Register::fp);
+
+    JitActivation* activation = TlsContext.get()->activation()->asJit();
+    const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
+    if (!segment || !segment->isModule() || !segment->containsCodePC(pc))
+        return;
+
+    if (!activation->startWasmInterrupt(registerState()))
+         return;
+
+    set_pc(int32_t(segment->asModule()->interruptCode()));
+}
+
+
 // WebAssembly memories contain an extra region of guard pages (see
 // WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses
 // using a signal handler that redirects PC to a stub that safely reports an
 // error. However, if the handler is hit by the simulator, the PC is in C++ code
 // and cannot be redirected. Therefore, we must avoid hitting the handler by
 // redirecting in the simulator before the real handler would have been hit.
 bool
 Simulator::handleWasmFault(int32_t addr, unsigned numBytes)
@@ -1674,17 +1701,17 @@ Simulator::handleWasmFault(int32_t addr,
 
     if (!instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
          return false;
 
     LLBit_ = false;
 
     const wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
     if (!memoryAccess) {
-        act->startWasmTrap(wasm::Trap::OutOfBounds, 0, registerState());
+        MOZ_ALWAYS_TRUE(act->startWasmInterrupt(registerState()));
         if (!instance->code().containsCodePC(pc))
             MOZ_CRASH("Cannot map PC to trap handler");
         set_pc(int32_t(moduleSegment->outOfBoundsCode()));
         return true;
     }
 
     MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
     set_pc(int32_t(memoryAccess->trapOutOfLineCode(moduleSegment->base())));
@@ -3641,32 +3668,52 @@ Simulator::branchDelayInstructionDecode(
     }
 
     if (instr->isForbiddenInBranchDelay()) {
         MOZ_CRASH("Eror:Unexpected opcode in a branch delay slot.");
     }
     instructionDecode(instr);
 }
 
+static void
+FakeInterruptHandler()
+{
+    JSContext* cx = TlsContext.get();
+    uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
+
+    const wasm::ModuleSegment* ms = nullptr;
+    if (!wasm::InInterruptibleCode(cx, pc, &ms))
+        return;
+
+    cx->simulator()->trigger_wasm_interrupt();
+}
+
 template<bool enableStopSimAt>
 void
 Simulator::execute()
 {
     // Get the PC to simulate. Cannot use the accessor here as we need the
     // raw PC value and not the one used as input to arithmetic instructions.
     int program_counter = get_pc();
 
     while (program_counter != end_sim_pc) {
         if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
             MipsDebugger dbg(this);
             dbg.debug();
         } else {
+            if (MOZ_UNLIKELY(JitOptions.simulatorAlwaysInterrupt))
+                FakeInterruptHandler();
             SimInstruction* instr = reinterpret_cast<SimInstruction*>(program_counter);
             instructionDecode(instr);
             icount_++;
+
+            if (MOZ_UNLIKELY(wasm_interrupt_)) {
+                handleWasmInterrupt();
+                wasm_interrupt_ = false;
+            }
         }
         program_counter = get_pc();
     }
 }
 
 void
 Simulator::callInternal(uint8_t* entry)
 {
--- a/js/src/jit/mips32/Simulator-mips32.h
+++ b/js/src/jit/mips32/Simulator-mips32.h
@@ -197,16 +197,23 @@ class Simulator {
 
     // Special case of set_register and get_register to access the raw PC value.
     void set_pc(int32_t value);
     int32_t get_pc() const;
 
     template <typename T>
     T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
 
+    void trigger_wasm_interrupt() {
+        // This can be called several times if a single interrupt isn't caught
+        // and handled by the simulator, but this is fine; once the current
+        // instruction is done executing, the interrupt will be handled anyhow.
+        wasm_interrupt_ = true;
+    }
+
     // Accessor to the internal simulator stack area.
     uintptr_t stackLimit() const;
     bool overRecursed(uintptr_t newsp = 0) const;
     bool overRecursedWithExtra(uint32_t extra) const;
 
     // Executes MIPS instructions until the PC reaches end_sim_pc.
     template<bool enableStopSimAt>
     void execute();
@@ -292,16 +299,18 @@ class Simulator {
     void handleStop(uint32_t code, SimInstruction* instr);
     bool isStopInstruction(SimInstruction* instr);
     bool isEnabledStop(uint32_t code);
     void enableStop(uint32_t code);
     void disableStop(uint32_t code);
     void increaseStopCounter(uint32_t code);
     void printStopInfo(uint32_t code);
 
+    // Handle a wasm interrupt triggered by an async signal handler.
+    void handleWasmInterrupt();
     JS::ProfilingFrameIterator::RegisterState registerState();
 
     // Handle any wasm faults, returning true if the fault was handled.
     bool handleWasmFault(int32_t addr, unsigned numBytes);
     bool handleWasmTrapFault();
 
     // Executes one instruction.
     void instructionDecode(SimInstruction* instr);
@@ -351,16 +360,19 @@ class Simulator {
 
     // Simulator support.
     char* stack_;
     uintptr_t stackLimit_;
     bool pc_modified_;
     int icount_;
     int break_count_;
 
+    // wasm async interrupt / fault support
+    bool wasm_interrupt_;
+
     // Debugger input.
     char* lastDebuggerInput_;
 
     // Registered breakpoints.
     SimInstruction* break_pc_;
     Instr break_instr_;
 
     // A stop is watched if its code is less than kNumOfWatchedStops.
--- a/js/src/jit/mips64/Assembler-mips64.h
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -141,17 +141,16 @@ static_assert(JitStackAlignment % sizeof
   "Stack alignment should be a non-zero multiple of sizeof(Value)");
 
 // TODO this is just a filler to prevent a build failure. The MIPS SIMD
 // alignment requirements still need to be explored.
 // TODO Copy the static_asserts from x64/x86 assembler files.
 static constexpr uint32_t SimdMemoryAlignment = 16;
 
 static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
-static const uint32_t WasmTrapInstructionLength = 4;
 
 // Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
 static constexpr bool SupportsUint32x4FloatConversions = false;
 
 // Does this architecture support comparisons of unsigned integer vectors?
 static constexpr bool SupportsUint8x16Compares = false;
 static constexpr bool SupportsUint16x8Compares = false;
 static constexpr bool SupportsUint32x4Compares = false;
--- a/js/src/jit/mips64/Simulator-mips64.cpp
+++ b/js/src/jit/mips64/Simulator-mips64.cpp
@@ -1273,16 +1273,17 @@ Simulator::Simulator()
     // Note, allocation and anything that depends on allocated memory is
     // deferred until init(), in order to handle OOM properly.
 
     stack_ = nullptr;
     stackLimit_ = 0;
     pc_modified_ = false;
     icount_ = 0;
     break_count_ = 0;
+    wasm_interrupt_ = false;
     break_pc_ = nullptr;
     break_instr_ = 0;
     single_stepping_ = false;
     single_step_callback_ = nullptr;
     single_step_callback_arg_ = nullptr;
 
     // Set up architecture state.
     // All registers are initialized to zero to start with.
@@ -1639,16 +1640,45 @@ Simulator::registerState()
     wasm::RegisterState state;
     state.pc = (void*) get_pc();
     state.fp = (void*) getRegister(fp);
     state.sp = (void*) getRegister(sp);
     state.lr = (void*) getRegister(ra);
     return state;
 }
 
+// The signal handler only redirects the PC to the interrupt stub when the PC is
+// in function code. However, this guard is racy for the simulator since the
+// signal handler samples PC in the middle of simulating an instruction and thus
+// the current PC may have advanced once since the signal handler's guard. So we
+// re-check here.
+void
+Simulator::handleWasmInterrupt()
+{
+    if (!wasm::CodeExists)
+        return;
+
+    void* pc = (void*)get_pc();
+    void* fp = (void*)getRegister(Register::fp);
+
+    JitActivation* activation = TlsContext.get()->activation()->asJit();
+    const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
+    if (!segment || !segment->isModule() || !segment->containsCodePC(pc))
+        return;
+
+    // fp can be null during the prologue/epilogue of the entry function.
+    if (!fp)
+        return;
+
+    if (!activation->startWasmInterrupt(registerState()))
+         return;
+
+    set_pc(int64_t(segment->asModule()->interruptCode()));
+}
+
 // WebAssembly memories contain an extra region of guard pages (see
 // WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses
 // using a signal handler that redirects PC to a stub that safely reports an
 // error. However, if the handler is hit by the simulator, the PC is in C++ code
 // and cannot be redirected. Therefore, we must avoid hitting the handler by
 // redirecting in the simulator before the real handler would have been hit.
 bool
 Simulator::handleWasmFault(uint64_t addr, unsigned numBytes)
@@ -1677,17 +1707,17 @@ Simulator::handleWasmFault(uint64_t addr
 
     if (!instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
          return false;
 
     LLBit_ = false;
 
     const wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
     if (!memoryAccess) {
-        act->startWasmTrap(wasm::Trap::OutOfBounds, 0, registerState());
+        MOZ_ALWAYS_TRUE(act->startWasmInterrupt(registerState()));
         if (!instance->code().containsCodePC(pc))
             MOZ_CRASH("Cannot map PC to trap handler");
         set_pc(int64_t(moduleSegment->outOfBoundsCode()));
         return true;
     }
 
     MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
     set_pc(int64_t(memoryAccess->trapOutOfLineCode(moduleSegment->base())));
@@ -4027,16 +4057,29 @@ Simulator::disable_single_stepping()
     if (!single_stepping_)
         return;
     single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
     single_stepping_ = false;
     single_step_callback_ = nullptr;
     single_step_callback_arg_ = nullptr;
 }
 
+static void
+FakeInterruptHandler()
+{
+    JSContext* cx = TlsContext.get();
+    uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
+
+    const wasm::ModuleSegment* ms = nullptr;
+    if (!wasm::InInterruptibleCode(cx, pc, &ms))
+        return;
+
+    cx->simulator()->trigger_wasm_interrupt();
+}
+
 template<bool enableStopSimAt>
 void
 Simulator::execute()
 {
     if (single_stepping_)
         single_step_callback_(single_step_callback_arg_, this, nullptr);
 
     // Get the PC to simulate. Cannot use the accessor here as we need the
@@ -4045,19 +4088,26 @@ Simulator::execute()
 
     while (program_counter != end_sim_pc) {
         if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
             MipsDebugger dbg(this);
             dbg.debug();
         } else {
             if (single_stepping_)
                 single_step_callback_(single_step_callback_arg_, this, (void*)program_counter);
+            if (MOZ_UNLIKELY(JitOptions.simulatorAlwaysInterrupt))
+                FakeInterruptHandler();
             SimInstruction* instr = reinterpret_cast<SimInstruction *>(program_counter);
             instructionDecode(instr);
             icount_++;
+
+            if (MOZ_UNLIKELY(wasm_interrupt_)) {
+                handleWasmInterrupt();
+                wasm_interrupt_ = false;
+            }
         }
         program_counter = get_pc();
     }
 
     if (single_stepping_)
         single_step_callback_(single_step_callback_arg_, this, nullptr);
 }
 
--- a/js/src/jit/mips64/Simulator-mips64.h
+++ b/js/src/jit/mips64/Simulator-mips64.h
@@ -201,16 +201,23 @@ class Simulator {
 
     // Special case of set_register and get_register to access the raw PC value.
     void set_pc(int64_t value);
     int64_t get_pc() const;
 
     template <typename T>
     T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
 
+    void trigger_wasm_interrupt() {
+        // This can be called several times if a single interrupt isn't caught
+        // and handled by the simulator, but this is fine; once the current
+        // instruction is done executing, the interrupt will be handled anyhow.
+        wasm_interrupt_ = true;
+    }
+
     void enable_single_stepping(SingleStepCallback cb, void* arg);
     void disable_single_stepping();
 
     // Accessor to the internal simulator stack area.
     uintptr_t stackLimit() const;
     bool overRecursed(uintptr_t newsp = 0) const;
     bool overRecursedWithExtra(uint32_t extra) const;
 
@@ -307,16 +314,18 @@ class Simulator {
     void handleStop(uint32_t code, SimInstruction* instr);
     bool isStopInstruction(SimInstruction* instr);
     bool isEnabledStop(uint32_t code);
     void enableStop(uint32_t code);
     void disableStop(uint32_t code);
     void increaseStopCounter(uint32_t code);
     void printStopInfo(uint32_t code);
 
+    // Handle a wasm interrupt triggered by an async signal handler.
+    void handleWasmInterrupt();
     JS::ProfilingFrameIterator::RegisterState registerState();
 
     // Handle any wasm faults, returning true if the fault was handled.
     bool handleWasmFault(uint64_t addr, unsigned numBytes);
     bool handleWasmTrapFault();
 
     // Executes one instruction.
     void instructionDecode(SimInstruction* instr);
@@ -364,16 +373,19 @@ class Simulator {
 
     // Simulator support.
     char* stack_;
     uintptr_t stackLimit_;
     bool pc_modified_;
     int64_t icount_;
     int64_t break_count_;
 
+    // wasm async interrupt support
+    bool wasm_interrupt_;
+
     // Debugger input.
     char* lastDebuggerInput_;
 
     // Registered breakpoints.
     SimInstruction* break_pc_;
     Instr break_instr_;
 
     // Single-stepping support
--- a/js/src/jit/none/Architecture-none.h
+++ b/js/src/jit/none/Architecture-none.h
@@ -14,17 +14,16 @@
 #include "jit/shared/Architecture-shared.h"
 
 namespace js {
 namespace jit {
 
 static const bool SupportsSimd = false;
 static const uint32_t SimdMemoryAlignment = 4; // Make it 4 to avoid a bunch of div-by-zero warnings
 static const uint32_t WasmStackAlignment = 8;
-static const uint32_t WasmTrapInstructionLength = 0;
 
 // Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
 static constexpr bool SupportsUint32x4FloatConversions = false;
 
 // Does this architecture support comparisons of unsigned integer vectors?
 static constexpr bool SupportsUint8x16Compares = false;
 static constexpr bool SupportsUint16x8Compares = false;
 static constexpr bool SupportsUint32x4Compares = false;
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -76,17 +76,16 @@ static constexpr Register64 ReturnReg64(
 #error "Bad architecture"
 #endif
 
 static constexpr Register ABINonArgReg0 { Registers::invalid_reg };
 static constexpr Register ABINonArgReg1 { Registers::invalid_reg };
 static constexpr Register ABINonArgReg2 { Registers::invalid_reg };
 static constexpr Register ABINonArgReturnReg0 { Registers::invalid_reg };
 static constexpr Register ABINonArgReturnReg1 { Registers::invalid_reg };
-static constexpr Register ABINonVolatileReg { Registers::invalid_reg };
 static constexpr Register ABINonArgReturnVolatileReg { Registers::invalid_reg };
 
 static constexpr FloatRegister ABINonArgDoubleReg = { FloatRegisters::invalid_reg };
 
 static constexpr Register WasmTableCallScratchReg { Registers::invalid_reg };
 static constexpr Register WasmTableCallSigReg { Registers::invalid_reg };
 static constexpr Register WasmTableCallIndexReg { Registers::invalid_reg };
 static constexpr Register WasmTlsReg { Registers::invalid_reg };
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -1658,34 +1658,16 @@ class LInterruptCheck : public LInstruct
         return implicit_;
     }
 
     const LDefinition* temp() {
         return getTemp(0);
     }
 };
 
-class LWasmInterruptCheck : public LInstructionHelper<0, 1, 0>
-{
-  public:
-    LIR_HEADER(WasmInterruptCheck)
-
-    explicit LWasmInterruptCheck(const LAllocation& tlsData)
-      : LInstructionHelper(classOpcode)
-    {
-        setOperand(0, tlsData);
-    }
-    MWasmInterruptCheck* mir() const {
-        return mir_->toWasmInterruptCheck();
-    }
-    const LAllocation* tlsPtr() {
-        return getOperand(0);
-    }
-};
-
 class LDefVar : public LCallInstructionHelper<0, 1, 0>
 {
   public:
     LIR_HEADER(DefVar)
 
     explicit LDefVar(const LAllocation& envChain)
       : LCallInstructionHelper(classOpcode)
     {
--- a/js/src/jit/shared/LOpcodes-shared.h
+++ b/js/src/jit/shared/LOpcodes-shared.h
@@ -384,17 +384,16 @@
     _(NearbyIntF)                   \
     _(InCache)                      \
     _(InArray)                      \
     _(HasOwnCache)                  \
     _(InstanceOfO)                  \
     _(InstanceOfV)                  \
     _(InstanceOfCache)              \
     _(InterruptCheck)               \
-    _(WasmInterruptCheck)           \
     _(Rotate)                       \
     _(RotateI64)                    \
     _(GetDOMProperty)               \
     _(GetDOMMemberV)                \
     _(GetDOMMemberT)                \
     _(SetDOMProperty)               \
     _(CallDOMNative)                \
     _(IsCallableO)                  \
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -245,17 +245,16 @@ static_assert(CodeAlignment % SimdMemory
   "the constant sections of the code buffer.  Thus it should be larger than the "
   "alignment for SIMD constants.");
 
 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
   "Stack alignment should be larger than any of the alignments which are used for "
   "spilled values.  Thus it should be larger than the alignment for SIMD accesses.");
 
 static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
-static const uint32_t WasmTrapInstructionLength = 2;
 
 static const Scale ScalePointer = TimesEight;
 
 } // namespace jit
 } // namespace js
 
 #include "jit/x86-shared/Assembler-x86-shared.h"
 
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -162,17 +162,16 @@ static_assert(CodeAlignment % SimdMemory
   "the constant sections of the code buffer.  Thus it should be larger than the "
   "alignment for SIMD constants.");
 
 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
   "Stack alignment should be larger than any of the alignments which are used for "
   "spilled values.  Thus it should be larger than the alignment for SIMD accesses.");
 
 static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
-static const uint32_t WasmTrapInstructionLength = 2;
 
 struct ImmTag : public Imm32
 {
     explicit ImmTag(JSValueTag mask)
       : Imm32(int32_t(mask))
     { }
 };
 
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -7267,16 +7267,19 @@ JS_SetGlobalJitCompilerOption(JSContext*
         break;
       case JSJITCOMPILER_JUMP_THRESHOLD:
         if (value == uint32_t(-1)) {
             jit::DefaultJitOptions defaultValues;
             value = defaultValues.jumpThreshold;
         }
         jit::JitOptions.jumpThreshold = value;
         break;
+      case JSJITCOMPILER_SIMULATOR_ALWAYS_INTERRUPT:
+        jit::JitOptions.simulatorAlwaysInterrupt = !!value;
+        break;
       case JSJITCOMPILER_SPECTRE_INDEX_MASKING:
         jit::JitOptions.spectreIndexMasking = !!value;
         break;
       case JSJITCOMPILER_SPECTRE_OBJECT_MITIGATIONS_BARRIERS:
         jit::JitOptions.spectreObjectMitigationsBarriers = !!value;
         break;
       case JSJITCOMPILER_SPECTRE_STRING_MITIGATIONS:
         jit::JitOptions.spectreStringMitigations = !!value;
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -207,17 +207,16 @@ UNIFIED_SOURCES += [
     'irregexp/RegExpEngine.cpp',
     'irregexp/RegExpInterpreter.cpp',
     'irregexp/RegExpMacroAssembler.cpp',
     'irregexp/RegExpParser.cpp',
     'irregexp/RegExpStack.cpp',
     'jit/AliasAnalysis.cpp',
     'jit/AliasAnalysisShared.cpp',
     'jit/AlignmentMaskAnalysis.cpp',
-    'jit/AsyncInterrupt.cpp',
     'jit/BacktrackingAllocator.cpp',
     'jit/Bailouts.cpp',
     'jit/BaselineBailouts.cpp',
     'jit/BaselineCacheIRCompiler.cpp',
     'jit/BaselineCompiler.cpp',
     'jit/BaselineDebugModeOSR.cpp',
     'jit/BaselineFrame.cpp',
     'jit/BaselineFrameInfo.cpp',
--- a/js/src/vm/JSCompartment.cpp
+++ b/js/src/vm/JSCompartment.cpp
@@ -68,17 +68,17 @@ JSCompartment::JSCompartment(Zone* zone,
     arraySpeciesLookup(),
     globalWriteBarriered(0),
     detachedTypedObjects(0),
     objectMetadataState(ImmediateMetadata()),
     selfHostingScriptSource(nullptr),
     objectMetadataTable(nullptr),
     innerViews(zone),
     lazyArrayBuffers(nullptr),
-    wasm(zone->runtimeFromActiveCooperatingThread()),
+    wasm(zone),
     nonSyntacticLexicalEnvironments_(nullptr),
     gcIncomingGrayPointers(nullptr),
     debugModeBits(0),
     validAccessPtr(nullptr),
     randomKeyGenerator_(runtime_->forkRandomKeyGenerator()),
     scriptCountsMap(nullptr),
     scriptNameMap(nullptr),
     debugScriptMap(nullptr),
@@ -178,17 +178,17 @@ JSRuntime::createJitRuntime(JSContext* c
         ReportOutOfMemory(cx);
         return nullptr;
     }
 
     jit::JitRuntime* jrt = cx->new_<jit::JitRuntime>(cx->runtime());
     if (!jrt)
         return nullptr;
 
-    // Protect jitRuntime_ from being observed (by jit::InterruptRunningCode)
+    // Protect jitRuntime_ from being observed (by InterruptRunningJitCode)
     // while it is being initialized. Unfortunately, initialization depends on
     // jitRuntime_ being non-null, so we can't just wait to assign jitRuntime_.
     JitRuntime::AutoPreventBackedgePatching apbp(cx->runtime(), jrt);
     jitRuntime_ = jrt;
 
     AutoEnterOOMUnsafeRegion noOOM;
     if (!jitRuntime_->initialize(cx, atomsLock)) {
         // Handling OOM here is complicated: if we delete jitRuntime_ now, we
--- a/js/src/vm/JSContext.cpp
+++ b/js/src/vm/JSContext.cpp
@@ -30,17 +30,16 @@
 
 #include "jsexn.h"
 #include "jspubtd.h"
 #include "jstypes.h"
 
 #include "builtin/String.h"
 #include "gc/FreeOp.h"
 #include "gc/Marking.h"
-#include "jit/AsyncInterrupt.h"
 #include "jit/Ion.h"
 #include "jit/PcScriptCache.h"
 #include "js/CharacterEncoding.h"
 #include "js/Printf.h"
 #include "util/DoubleToString.h"
 #include "util/NativeStack.h"
 #include "util/Windows.h"
 #include "vm/BytecodeUtil.h"
@@ -48,16 +47,17 @@
 #include "vm/HelperThreads.h"
 #include "vm/Iteration.h"
 #include "vm/JSAtom.h"
 #include "vm/JSCompartment.h"
 #include "vm/JSFunction.h"
 #include "vm/JSObject.h"
 #include "vm/JSScript.h"
 #include "vm/Shape.h"
+#include "wasm/WasmSignalHandlers.h"
 
 #include "vm/JSObject-inl.h"
 #include "vm/JSScript-inl.h"
 #include "vm/Stack-inl.h"
 
 using namespace js;
 using namespace js::gc;
 
@@ -97,17 +97,17 @@ js::AutoCycleDetector::~AutoCycleDetecto
     }
 }
 
 bool
 JSContext::init(ContextKind kind)
 {
     // Skip most of the initialization if this thread will not be running JS.
     if (kind == ContextKind::Cooperative) {
-        // Get a platform-native handle for this thread, used by jit::InterruptRunningCode.
+        // Get a platform-native handle for this thread, used by js::InterruptRunningJitCode.
 #ifdef XP_WIN
         size_t openFlags = THREAD_GET_CONTEXT | THREAD_SET_CONTEXT | THREAD_SUSPEND_RESUME |
                            THREAD_QUERY_INFORMATION;
         HANDLE self = OpenThread(openFlags, false, GetCurrentThreadId());
         if (!self)
         return false;
         static_assert(sizeof(HANDLE) <= sizeof(threadNative_), "need bigger field");
         threadNative_ = (size_t)self;
@@ -118,22 +118,21 @@ JSContext::init(ContextKind kind)
 
         if (!regexpStack.ref().init())
             return false;
 
         if (!fx.initInstance())
             return false;
 
 #ifdef JS_SIMULATOR
-        simulator_ = jit::Simulator::Create(this);
+        simulator_ = js::jit::Simulator::Create(this);
         if (!simulator_)
             return false;
 #endif
 
-        jit::EnsureAsyncInterrupt(this);
         if (!wasm::EnsureSignalHandlers(this))
             return false;
     }
 
     // Set the ContextKind last, so that ProtectedData checks will allow us to
     // initialize this context before it becomes the runtime's active context.
     kind_ = kind;
 
--- a/js/src/vm/MutexIDs.h
+++ b/js/src/vm/MutexIDs.h
@@ -31,38 +31,37 @@
                                       \
   _(WasmLazyStubsTier1,          475) \
   _(WasmLazyStubsTier2,          476) \
                                       \
   _(SharedImmutableStringsCache, 500) \
   _(FutexThread,                 500) \
   _(GeckoProfilerStrings,        500) \
   _(ProtectedRegionTree,         500) \
+  _(WasmSigIdSet,                500) \
   _(ShellOffThreadState,         500) \
   _(SimulatorCacheLock,          500) \
   _(Arm64SimulatorLock,          500) \
   _(IonSpewer,                   500) \
   _(PerfSpewer,                  500) \
   _(CacheIRSpewer,               500) \
   _(TraceLoggerThreadState,      500) \
   _(DateTimeInfoMutex,           500) \
   _(IcuTimeZoneStateMutex,       500) \
   _(ProcessExecutableRegion,     500) \
   _(OffThreadPromiseState,       500) \
   _(BufferStreamState,           500) \
-  _(SharedArrayGrow,             500) \
-  _(RuntimeScriptData,           500) \
-  _(WasmSigIdSet,                500) \
   _(WasmCodeProfilingLabels,     500) \
   _(WasmModuleTieringLock,       500) \
   _(WasmCompileTaskState,        500) \
   _(WasmCodeStreamEnd,           500) \
   _(WasmTailBytesPtr,            500) \
   _(WasmStreamStatus,            500) \
-  _(WasmRuntimeInstances,        500) \
+  _(SharedArrayGrow,             500) \
+  _(RuntimeScriptData,           500) \
                                       \
   _(ThreadId,                    600) \
   _(WasmCodeSegmentMap,          600) \
   _(TraceLoggerGraphState,       600) \
   _(VTuneLock,                   600)
 
 namespace js {
 namespace mutexid {
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -25,31 +25,31 @@
 #include "jsmath.h"
 
 #include "builtin/Promise.h"
 #include "gc/FreeOp.h"
 #include "gc/GCInternals.h"
 #include "gc/PublicIterators.h"
 #include "jit/arm/Simulator-arm.h"
 #include "jit/arm64/vixl/Simulator-vixl.h"
-#include "jit/AsyncInterrupt.h"
 #include "jit/JitCompartment.h"
 #include "jit/mips32/Simulator-mips32.h"
 #include "jit/mips64/Simulator-mips64.h"
 #include "js/Date.h"
 #include "js/MemoryMetrics.h"
 #include "js/SliceBudget.h"
 #include "js/Wrapper.h"
 #include "util/Windows.h"
 #include "vm/Debugger.h"
 #include "vm/JSAtom.h"
 #include "vm/JSObject.h"
 #include "vm/JSScript.h"
 #include "vm/TraceLogging.h"
 #include "vm/TraceLoggingGraph.h"
+#include "wasm/WasmSignalHandlers.h"
 
 #include "gc/GC-inl.h"
 #include "vm/JSContext-inl.h"
 
 using namespace js;
 using namespace js::gc;
 
 using mozilla::Atomic;
@@ -172,35 +172,32 @@ JSRuntime::JSRuntime(JSRuntime* parentRu
     offthreadIonCompilationEnabled_(true),
     parallelParsingEnabled_(true),
     autoWritableJitCodeActive_(false),
     oomCallback(nullptr),
     debuggerMallocSizeOf(ReturnZeroSize),
     lastAnimationTime(0),
     performanceMonitoring_(),
     stackFormat_(parentRuntime ? js::StackFormat::Default
-                               : js::StackFormat::SpiderMonkey),
-    wasmInstances(mutexid::WasmRuntimeInstances)
+                               : js::StackFormat::SpiderMonkey)
 {
     liveRuntimesCount++;
 
     /* Initialize infallibly first, so we can goto bad and JS_DestroyRuntime. */
 
     PodZero(&asmJSCacheOps);
     lcovOutput().init();
 }
 
 JSRuntime::~JSRuntime()
 {
     MOZ_ASSERT(!initialized_);
 
     DebugOnly<size_t> oldCount = liveRuntimesCount--;
     MOZ_ASSERT(oldCount > 0);
-
-    MOZ_ASSERT(wasmInstances.lock()->empty());
 }
 
 bool
 JSRuntime::init(JSContext* cx, uint32_t maxbytes, uint32_t maxNurseryBytes)
 {
 #ifdef DEBUG
     MOZ_ASSERT(!initialized_);
     initialized_ = true;
@@ -507,18 +504,16 @@ JSRuntime::addSizeOfIncludingThis(mozill
         for (ScriptDataTable::Range r = scriptDataTable(lock).all(); !r.empty(); r.popFront())
             rtSizes->scriptData += mallocSizeOf(r.front());
     }
 
     if (jitRuntime_) {
         jitRuntime_->execAlloc().addSizeOfCode(&rtSizes->code);
         jitRuntime_->backedgeExecAlloc().addSizeOfCode(&rtSizes->code);
     }
-
-    rtSizes->wasmRuntime += wasmInstances.lock()->sizeOfExcludingThis(mallocSizeOf);
 }
 
 static bool
 InvokeInterruptCallback(JSContext* cx)
 {
     MOZ_ASSERT(cx->requestDepth >= 1);
     MOZ_ASSERT(!cx->compartment()->isAtomsCompartment());
 
@@ -598,18 +593,17 @@ JSContext::requestInterrupt(InterruptMod
         // additional steps to interrupt corner cases where the above fields are
         // not regularly polled. Wake ilooping Ion code, irregexp JIT code and
         // Atomics.wait()
         interruptRegExpJit_ = true;
         fx.lock();
         if (fx.isWaiting())
             fx.wake(FutexThread::WakeForJSInterrupt);
         fx.unlock();
-        jit::InterruptRunningCode(this);
-        wasm::InterruptRunningCode(this);
+        InterruptRunningJitCode(this);
     }
 }
 
 bool
 JSContext::handleInterrupt()
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
     if (interrupt_ || jitStackLimit == UINTPTR_MAX) {
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -994,24 +994,28 @@ struct JSRuntime : public js::MallocProv
     friend class js::gc::AutoTraceSession;
     friend class JS::AutoEnterCycleCollection;
 
   private:
     js::ActiveThreadData<js::RuntimeCaches> caches_;
   public:
     js::RuntimeCaches& caches() { return caches_.ref(); }
 
-    // When wasm traps, the signal handler records some data for unwinding
-    // purposes. Wasm code can't trap reentrantly.
-    js::ActiveThreadData<mozilla::Maybe<js::wasm::TrapData>> wasmTrapData;
+    // When wasm traps or is interrupted, the signal handler records some data
+    // for unwinding purposes. Wasm code can't interrupt or trap reentrantly.
+    js::ActiveThreadData<
+        mozilla::MaybeOneOf<js::wasm::TrapData, js::wasm::InterruptData>
+    > wasmUnwindData;
 
-    // List of all the live wasm::Instances in the runtime. Equal to the union
-    // of all instances registered in all JSCompartments. Accessed from watchdog
-    // threads for purposes of wasm::InterruptRunningCode().
-    js::ExclusiveData<js::wasm::InstanceVector> wasmInstances;
+    js::wasm::TrapData& wasmTrapData() {
+        return wasmUnwindData.ref().ref<js::wasm::TrapData>();
+    }
+    js::wasm::InterruptData& wasmInterruptData() {
+        return wasmUnwindData.ref().ref<js::wasm::InterruptData>();
+    }
 
   public:
 #if defined(NIGHTLY_BUILD)
     // Support for informing the embedding of any error thrown.
     // This mechanism is designed to let the embedding
     // log/report/fail in case certain errors are thrown
     // (e.g. SyntaxError, ReferenceError or TypeError
     // in critical code).
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -1569,17 +1569,17 @@ jit::JitActivation::~JitActivation()
 
     // All reocvered value are taken from activation during the bailout.
     MOZ_ASSERT(ionRecovery_.empty());
 
     // The BailoutFrameInfo should have unregistered itself from the
     // JitActivations.
     MOZ_ASSERT(!bailoutData_);
 
-    // Traps get handled immediately.
+    MOZ_ASSERT(!isWasmInterrupted());
     MOZ_ASSERT(!isWasmTrapping());
 
     clearRematerializedFrames();
     js_delete(rematerializedFrames_);
 }
 
 void
 jit::JitActivation::setBailoutData(jit::BailoutFrameInfo* bailoutData)
@@ -1737,16 +1737,96 @@ jit::JitActivation::removeIonFrameRecove
 
 void
 jit::JitActivation::traceIonRecovery(JSTracer* trc)
 {
     for (RInstructionResults* it = ionRecovery_.begin(); it != ionRecovery_.end(); it++)
         it->trace(trc);
 }
 
+bool
+jit::JitActivation::startWasmInterrupt(const JS::ProfilingFrameIterator::RegisterState& state)
+{
+    // fp may be null when first entering wasm code from an interpreter entry
+    // stub.
+    if (!state.fp)
+        return false;
+
+    MOZ_ASSERT(state.pc);
+
+    // Execution can only be interrupted in function code. Afterwards, control
+    // flow does not reenter function code and thus there can be no
+    // interrupt-during-interrupt.
+
+    bool unwound;
+    wasm::UnwindState unwindState;
+    MOZ_ALWAYS_TRUE(wasm::StartUnwinding(state, &unwindState, &unwound));
+
+    void* pc = unwindState.pc;
+
+    if (unwound) {
+        // In the prologue/epilogue, FP might have been fixed up to the
+        // caller's FP, and the caller could be the jit entry. Ignore this
+        // interrupt, in this case, because FP points to a jit frame and not a
+        // wasm one.
+        if (!wasm::LookupCode(pc)->lookupFuncRange(pc))
+            return false;
+    }
+
+    cx_->runtime()->wasmUnwindData.ref().construct<wasm::InterruptData>(pc, state.pc);
+    setWasmExitFP(unwindState.fp);
+
+    MOZ_ASSERT(compartment() == unwindState.fp->tls->instance->compartment());
+    MOZ_ASSERT(isWasmInterrupted());
+    return true;
+}
+
+void
+jit::JitActivation::finishWasmInterrupt()
+{
+    MOZ_ASSERT(isWasmInterrupted());
+
+    cx_->runtime()->wasmUnwindData.ref().destroy();
+    packedExitFP_ = nullptr;
+}
+
+bool
+jit::JitActivation::isWasmInterrupted() const
+{
+    JSRuntime* rt = cx_->runtime();
+    if (!rt->wasmUnwindData.ref().constructed<wasm::InterruptData>())
+        return false;
+
+    Activation* act = cx_->activation();
+    while (act && !act->hasWasmExitFP())
+        act = act->prev();
+
+    if (act != this)
+        return false;
+
+    DebugOnly<const wasm::Frame*> fp = wasmExitFP();
+    DebugOnly<void*> unwindPC = rt->wasmInterruptData().unwindPC;
+    MOZ_ASSERT(fp->instance()->code().containsCodePC(unwindPC));
+    return true;
+}
+
+void*
+jit::JitActivation::wasmInterruptUnwindPC() const
+{
+    MOZ_ASSERT(isWasmInterrupted());
+    return cx_->runtime()->wasmInterruptData().unwindPC;
+}
+
+void*
+jit::JitActivation::wasmInterruptResumePC() const
+{
+    MOZ_ASSERT(isWasmInterrupted());
+    return cx_->runtime()->wasmInterruptData().resumePC;
+}
+
 void
 jit::JitActivation::startWasmTrap(wasm::Trap trap, uint32_t bytecodeOffset,
                                   const wasm::RegisterState& state)
 {
     bool unwound;
     wasm::UnwindState unwindState;
     MOZ_ALWAYS_TRUE(wasm::StartUnwinding(state, &unwindState, &unwound));
     MOZ_ASSERT(unwound == (trap == wasm::Trap::IndirectCallBadSig));
@@ -1757,72 +1837,61 @@ jit::JitActivation::startWasmTrap(wasm::
     const wasm::Code& code = fp->tls->instance->code();
     MOZ_RELEASE_ASSERT(&code == wasm::LookupCode(pc));
 
     // If the frame was unwound, the bytecodeOffset must be recovered from the
     // callsite so that it is accurate.
     if (unwound)
         bytecodeOffset = code.lookupCallSite(pc)->lineOrBytecode();
 
-    wasm::TrapData trapData;
-    trapData.resumePC = ((uint8_t*)state.pc) + jit::WasmTrapInstructionLength;
-    trapData.unwoundPC = pc;
-    trapData.trap = trap;
-    trapData.bytecodeOffset = bytecodeOffset;
-
-    cx_->runtime()->wasmTrapData = Some(trapData);
+    cx_->runtime()->wasmUnwindData.ref().construct<wasm::TrapData>(pc, trap, bytecodeOffset);
     setWasmExitFP(fp);
 }
 
 void
 jit::JitActivation::finishWasmTrap()
 {
     MOZ_ASSERT(isWasmTrapping());
 
-    cx_->runtime()->wasmTrapData.ref().reset();
+    cx_->runtime()->wasmUnwindData.ref().destroy();
     packedExitFP_ = nullptr;
 }
 
 bool
 jit::JitActivation::isWasmTrapping() const
 {
     JSRuntime* rt = cx_->runtime();
-    if (!rt->wasmTrapData.ref())
+    if (!rt->wasmUnwindData.ref().constructed<wasm::TrapData>())
         return false;
 
     Activation* act = cx_->activation();
     while (act && !act->hasWasmExitFP())
         act = act->prev();
 
     if (act != this)
         return false;
 
-    MOZ_ASSERT(wasmExitFP()->instance()->code().containsCodePC(rt->wasmTrapData->unwoundPC));
+    DebugOnly<const wasm::Frame*> fp = wasmExitFP();
+    DebugOnly<void*> unwindPC = rt->wasmTrapData().pc;
+    MOZ_ASSERT(fp->instance()->code().containsCodePC(unwindPC));
     return true;
 }
 
 void*
-jit::JitActivation::wasmTrapResumePC() const
+jit::JitActivation::wasmTrapPC() const
 {
     MOZ_ASSERT(isWasmTrapping());
-    return cx_->runtime()->wasmTrapData->resumePC;
-}
-
-void*
-jit::JitActivation::wasmTrapUnwoundPC() const
-{
-    MOZ_ASSERT(isWasmTrapping());
-    return cx_->runtime()->wasmTrapData->unwoundPC;
+    return cx_->runtime()->wasmTrapData().pc;
 }
 
 uint32_t
 jit::JitActivation::wasmTrapBytecodeOffset() const
 {
     MOZ_ASSERT(isWasmTrapping());
-    return cx_->runtime()->wasmTrapData->bytecodeOffset;
+    return cx_->runtime()->wasmTrapData().bytecodeOffset;
 }
 
 InterpreterFrameIterator&
 InterpreterFrameIterator::operator++()
 {
     MOZ_ASSERT(!done());
     if (fp_ != activation_->entryFrame_) {
         pc_ = fp_->prevpc();
--- a/js/src/vm/Stack.h
+++ b/js/src/vm/Stack.h
@@ -1705,21 +1705,31 @@ class JitActivation : public Activation
     wasm::ExitReason wasmExitReason() const {
         MOZ_ASSERT(hasWasmExitFP());
         return wasm::ExitReason::Decode(encodedWasmExitReason_);
     }
     static size_t offsetOfEncodedWasmExitReason() {
         return offsetof(JitActivation, encodedWasmExitReason_);
     }
 
+    // Interrupts are started from the interrupt signal handler (or the ARM
+    // simulator) and cleared by WasmHandleExecutionInterrupt or WasmHandleThrow
+    // when the interrupt is handled.
+
+    // Returns true iff we've entered interrupted state.
+    bool startWasmInterrupt(const wasm::RegisterState& state);
+    void finishWasmInterrupt();
+    bool isWasmInterrupted() const;
+    void* wasmInterruptUnwindPC() const;
+    void* wasmInterruptResumePC() const;
+
     void startWasmTrap(wasm::Trap trap, uint32_t bytecodeOffset, const wasm::RegisterState& state);
     void finishWasmTrap();
     bool isWasmTrapping() const;
-    void* wasmTrapResumePC() const;
-    void* wasmTrapUnwoundPC() const;
+    void* wasmTrapPC() const;
     uint32_t wasmTrapBytecodeOffset() const;
 };
 
 // A filtering of the ActivationIterator to only stop at JitActivations.
 class JitActivationIterator : public ActivationIterator
 {
     void settle() {
         while (!done() && !activation_->isJit())
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -3366,20 +3366,20 @@ class BaseCompiler final : public BaseCo
     void moveImmF32(float f, RegF32 dest) {
         masm.loadConstantFloat32(f, dest);
     }
 
     void moveImmF64(double d, RegF64 dest) {
         masm.loadConstantDouble(d, dest);
     }
 
-    void addInterruptCheck() {
-        ScratchI32 tmp(*this);
-        masm.loadWasmTlsRegFromFrame(tmp);
-        masm.wasmInterruptCheck(tmp, bytecodeOffset());
+    void addInterruptCheck()
+    {
+        // Always use signals for interrupts with Asm.JS/Wasm
+        MOZ_RELEASE_ASSERT(HaveSignalHandlers());
     }
 
     void jumpTable(const LabelVector& labels, Label* theTable) {
         // Flush constant pools to ensure that the table is never interrupted by
         // constant pool entries.
         masm.flush();
 
         masm.bind(theTable);
@@ -9486,16 +9486,18 @@ BaseCompiler::init()
         !SigPILL_.append(MIRType::Int64) || !SigPILL_.append(MIRType::Int64))
     {
         return false;
     }
 
     if (!fr.setupLocals(locals_, sig().args(), debugEnabled_, &localInfo_))
         return false;
 
+    addInterruptCheck();
+
     return true;
 }
 
 FuncOffsets
 BaseCompiler::finish()
 {
     MOZ_ASSERT(done(), "all bytes must be consumed");
     MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
--- a/js/src/wasm/WasmBuiltins.cpp
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -64,16 +64,38 @@ extern MOZ_EXPORT int64_t
 static JitActivation*
 CallingActivation()
 {
     Activation* act = TlsContext.get()->activation();
     MOZ_ASSERT(act->asJit()->hasWasmExitFP());
     return act->asJit();
 }
 
+static void*
+WasmHandleExecutionInterrupt()
+{
+    JitActivation* activation = CallingActivation();
+    MOZ_ASSERT(activation->isWasmInterrupted());
+
+    if (!CheckForInterrupt(activation->cx())) {
+        // If CheckForInterrupt failed, it is time to interrupt execution.
+        // Returning nullptr to the caller will jump to the throw stub which
+        // will call HandleThrow. The JitActivation must stay in the
+        // interrupted state until then so that stack unwinding works in
+        // HandleThrow.
+        return nullptr;
+    }
+
+    // If CheckForInterrupt succeeded, then execution can proceed and the
+    // interrupt is over.
+    void* resumePC = activation->wasmInterruptResumePC();
+    activation->finishWasmInterrupt();
+    return resumePC;
+}
+
 static bool
 WasmHandleDebugTrap()
 {
     JitActivation* activation = CallingActivation();
     JSContext* cx = activation->cx();
     Frame* fp = activation->wasmExitFP();
     Instance* instance = fp->tls->instance;
     const Code& code = instance->code();
@@ -192,118 +214,86 @@ wasm::HandleThrow(JSContext* cx, WasmFra
             // Unexpected success from the handler onLeaveFrame -- raising error
             // since throw recovery is not yet implemented in the wasm baseline.
             // TODO properly handle success and resume wasm execution.
             JS_ReportErrorASCII(cx, "Unexpected success from onLeaveFrame");
         }
         frame->leave(cx);
     }
 
+    MOZ_ASSERT(!cx->activation()->asJit()->isWasmInterrupted(), "unwinding clears the interrupt");
     MOZ_ASSERT(!cx->activation()->asJit()->isWasmTrapping(), "unwinding clears the trapping state");
 
     return iter.unwoundAddressOfReturnAddress();
 }
 
 static void*
 WasmHandleThrow()
 {
     JitActivation* activation = CallingActivation();
     JSContext* cx = activation->cx();
     WasmFrameIter iter(activation);
     return HandleThrow(cx, iter);
 }
 
-// Unconditionally returns nullptr per calling convention of OnTrap().
-static void*
-ReportError(JSContext* cx, unsigned errorNumber)
+static void
+WasmOldReportTrap(int32_t trapIndex)
 {
-    JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber);
-    return nullptr;
-};
-
-// Has the same return-value convention as OnTrap().
-static void*
-CheckInterrupt(JSContext* cx, JitActivation* activation)
-{
-    ResetInterruptState(cx);
-
-    if (!CheckForInterrupt(cx))
-        return nullptr;
+    JSContext* cx = TlsContext.get();
 
-    void* resumePC = activation->wasmTrapResumePC();
-    activation->finishWasmTrap();
-    return resumePC;
-}
+    MOZ_ASSERT(trapIndex < int32_t(Trap::Limit) && trapIndex >= 0);
+    Trap trap = Trap(trapIndex);
 
-// The calling convention between this function and its caller in the stub
-// generated by GenerateTrapExit() is:
-//   - return nullptr if the stub should jump to the throw stub to unwind
-//     the activation;
-//   - return the (non-null) resumePC that should be jumped if execution should
-//     resume after the trap.
-static void*
-OnTrap(Trap trap)
-{
-    JitActivation* activation = CallingActivation();
-    JSContext* cx = activation->cx();
-
+    unsigned errorNumber;
     switch (trap) {
       case Trap::Unreachable:
-        return ReportError(cx, JSMSG_WASM_UNREACHABLE);
+        errorNumber = JSMSG_WASM_UNREACHABLE;
+        break;
       case Trap::IntegerOverflow:
-        return ReportError(cx, JSMSG_WASM_INTEGER_OVERFLOW);
+        errorNumber = JSMSG_WASM_INTEGER_OVERFLOW;
+        break;
       case Trap::InvalidConversionToInteger:
-        return ReportError(cx, JSMSG_WASM_INVALID_CONVERSION);
+        errorNumber = JSMSG_WASM_INVALID_CONVERSION;
+        break;
       case Trap::IntegerDivideByZero:
-        return ReportError(cx, JSMSG_WASM_INT_DIVIDE_BY_ZERO);
+        errorNumber = JSMSG_WASM_INT_DIVIDE_BY_ZERO;
+        break;
       case Trap::IndirectCallToNull:
-        return ReportError(cx, JSMSG_WASM_IND_CALL_TO_NULL);
+        errorNumber = JSMSG_WASM_IND_CALL_TO_NULL;
+        break;
       case Trap::IndirectCallBadSig:
-        return ReportError(cx, JSMSG_WASM_IND_CALL_BAD_SIG);
+        errorNumber = JSMSG_WASM_IND_CALL_BAD_SIG;
+        break;
       case Trap::ImpreciseSimdConversion:
-        return ReportError(cx, JSMSG_SIMD_FAILED_CONVERSION);
+        errorNumber = JSMSG_SIMD_FAILED_CONVERSION;
+        break;
       case Trap::OutOfBounds:
-        return ReportError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+        errorNumber = JSMSG_WASM_OUT_OF_BOUNDS;
+        break;
       case Trap::UnalignedAccess:
-        return ReportError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
-      case Trap::CheckInterrupt:
-        return CheckInterrupt(cx, activation);
+        errorNumber = JSMSG_WASM_UNALIGNED_ACCESS;
+        break;
       case Trap::StackOverflow:
-        // TlsData::setInterrupt() causes a fake stack overflow. Since
-        // TlsData::setInterrupt() is called racily, it's possible for a real
-        // stack overflow to trap, followed by a racy call to setInterrupt().
-        // Thus, we must check for a real stack overflow first before we
-        // CheckInterrupt() and possibly resume execution.
-        if (!CheckRecursionLimit(cx))
-            return nullptr;
-        if (activation->wasmExitFP()->tls->isInterrupted())
-            return CheckInterrupt(cx, activation);
-        return ReportError(cx, JSMSG_OVER_RECURSED);
+        errorNumber = JSMSG_OVER_RECURSED;
+        break;
       case Trap::ThrowReported:
         // Error was already reported under another name.
-        return nullptr;
-      case Trap::Limit:
-        break;
+        return;
+      default:
+        MOZ_CRASH("unexpected trap");
     }
 
-    MOZ_CRASH("unexpected trap");
+    JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber);
 }
 
 static void
-WasmOldReportTrap(int32_t trapIndex)
+WasmReportTrap()
 {
-    MOZ_ASSERT(trapIndex < int32_t(Trap::Limit) && trapIndex >= 0);
-    DebugOnly<void*> resumePC = OnTrap(Trap(trapIndex));
-    MOZ_ASSERT(!resumePC);
-}
-
-static void*
-WasmOnTrap()
-{
-    return OnTrap(TlsContext.get()->runtime()->wasmTrapData->trap);
+    Trap trap = TlsContext.get()->runtime()->wasmTrapData().trap;
+    WasmOldReportTrap(int32_t(trap));
 }
 
 static void
 WasmReportOutOfBounds()
 {
     JSContext* cx = TlsContext.get();
     JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_OUT_OF_BOUNDS);
 }
@@ -516,25 +506,28 @@ FuncCast(F* funcPtr, ABIFunctionType abi
 #endif
     return pf;
 }
 
 void*
 wasm::AddressOf(SymbolicAddress imm, ABIFunctionType* abiType)
 {
     switch (imm) {
+      case SymbolicAddress::HandleExecutionInterrupt:
+        *abiType = Args_General0;
+        return FuncCast(WasmHandleExecutionInterrupt, *abiType);
       case SymbolicAddress::HandleDebugTrap:
         *abiType = Args_General0;
         return FuncCast(WasmHandleDebugTrap, *abiType);
       case SymbolicAddress::HandleThrow:
         *abiType = Args_General0;
         return FuncCast(WasmHandleThrow, *abiType);
-      case SymbolicAddress::OnTrap:
+      case SymbolicAddress::ReportTrap:
         *abiType = Args_General0;
-        return FuncCast(WasmOnTrap, *abiType);
+        return FuncCast(WasmReportTrap, *abiType);
       case SymbolicAddress::OldReportTrap:
         *abiType = Args_General1;
         return FuncCast(WasmOldReportTrap, *abiType);
       case SymbolicAddress::ReportOutOfBounds:
         *abiType = Args_General0;
         return FuncCast(WasmReportOutOfBounds, *abiType);
       case SymbolicAddress::ReportUnalignedAccess:
         *abiType = Args_General0;
@@ -694,19 +687,20 @@ wasm::AddressOf(SymbolicAddress imm, ABI
 }
 
 bool
 wasm::NeedsBuiltinThunk(SymbolicAddress sym)
 {
     // Some functions don't want to a thunk, because they already have one or
     // they don't have frame info.
     switch (sym) {
+      case SymbolicAddress::HandleExecutionInterrupt: // GenerateInterruptExit
       case SymbolicAddress::HandleDebugTrap:          // GenerateDebugTrapStub
       case SymbolicAddress::HandleThrow:              // GenerateThrowStub
-      case SymbolicAddress::OnTrap:                   // GenerateTrapExit
+      case SymbolicAddress::ReportTrap:               // GenerateTrapExit
       case SymbolicAddress::OldReportTrap:            // GenerateOldTrapExit
       case SymbolicAddress::ReportOutOfBounds:        // GenerateOutOfBoundsExit
       case SymbolicAddress::ReportUnalignedAccess:    // GenerateUnalignedExit
       case SymbolicAddress::CallImport_Void:          // GenerateImportInterpExit
       case SymbolicAddress::CallImport_I32:
       case SymbolicAddress::CallImport_I64:
       case SymbolicAddress::CallImport_F64:
       case SymbolicAddress::CoerceInPlace_ToInt32:    // GenerateImportJitExit
@@ -875,18 +869,18 @@ PopulateTypedNatives(TypedNativeToFuncPt
 
 // ============================================================================
 // Process-wide builtin thunk set
 //
 // Thunks are inserted between wasm calls and the C++ callee and achieve two
 // things:
 //  - bridging the few differences between the internal wasm ABI and the external
 //    native ABI (viz. float returns on x86 and soft-fp ARM)
-//  - executing an exit prologue/epilogue which in turn allows any profiling
-//    iterator to see the full stack up to the wasm operation that called out
+//  - executing an exit prologue/epilogue which in turn allows any asynchronous
+//    interrupt to see the full stack up to the wasm operation that called out
 //
 // Thunks are created for two kinds of C++ callees, enumerated above:
 //  - SymbolicAddress: for statically compiled calls in the wasm module
 //  - Imported JS builtins: optimized calls to imports
 //
 // All thunks are created up front, lazily, when the first wasm module is
 // compiled in the process. Thunks are kept alive until the JS engine shuts down
 // in the process. No thunks are created at runtime after initialization. This
--- a/js/src/wasm/WasmCode.cpp
+++ b/js/src/wasm/WasmCode.cpp
@@ -330,23 +330,25 @@ ModuleSegment::initialize(Tier tier,
                           UniqueCodeBytes codeBytes,
                           uint32_t codeLength,
                           const ShareableBytes& bytecode,
                           const LinkDataTier& linkData,
                           const Metadata& metadata,
                           const CodeRangeVector& codeRanges)
 {
     MOZ_ASSERT(bytes_ == nullptr);
+    MOZ_ASSERT(linkData.interruptOffset);
     MOZ_ASSERT(linkData.outOfBoundsOffset);
     MOZ_ASSERT(linkData.unalignedAccessOffset);
     MOZ_ASSERT(linkData.trapOffset);
 
     tier_ = tier;
     bytes_ = Move(codeBytes);
     length_ = codeLength;
+    interruptCode_ = bytes_.get() + linkData.interruptOffset;
     outOfBoundsCode_ = bytes_.get() + linkData.outOfBoundsOffset;
     unalignedAccessCode_ = bytes_.get() + linkData.unalignedAccessOffset;
     trapCode_ = bytes_.get() + linkData.trapOffset;
 
     if (!StaticallyLink(*this, linkData))
         return false;
 
     ExecutableAllocator::cacheFlush(bytes_.get(), RoundupCodeLength(codeLength));
--- a/js/src/wasm/WasmCode.h
+++ b/js/src/wasm/WasmCode.h
@@ -139,18 +139,19 @@ class CodeSegment
 
 typedef UniquePtr<ModuleSegment> UniqueModuleSegment;
 typedef UniquePtr<const ModuleSegment> UniqueConstModuleSegment;
 
 class ModuleSegment : public CodeSegment
 {
     Tier            tier_;
 
-    // These are pointers into code for stubs used for signal-handler
-    // control-flow transfer.
+    // These are pointers into code for stubs used for asynchronous
+    // signal-handler control-flow transfer.
+    uint8_t*        interruptCode_;
     uint8_t*        outOfBoundsCode_;
     uint8_t*        unalignedAccessCode_;
     uint8_t*        trapCode_;
 
     bool initialize(Tier tier,
                     UniqueCodeBytes bytes,
                     uint32_t codeLength,
                     const ShareableBytes& bytecode,
@@ -167,16 +168,17 @@ class ModuleSegment : public CodeSegment
                                       const CodeRangeVector& codeRanges);
   public:
     ModuleSegment(const ModuleSegment&) = delete;
     void operator=(const ModuleSegment&) = delete;
 
     ModuleSegment()
       : CodeSegment(),
         tier_(Tier(-1)),
+        interruptCode_(nullptr),
         outOfBoundsCode_(nullptr),
         unalignedAccessCode_(nullptr),
         trapCode_(nullptr)
     {}
 
     static UniqueModuleSegment create(Tier tier,
                                       jit::MacroAssembler& masm,
                                       const ShareableBytes& bytecode,
@@ -188,16 +190,17 @@ class ModuleSegment : public CodeSegment
                                       const Bytes& unlinkedBytes,
                                       const ShareableBytes& bytecode,
                                       const LinkDataTier& linkData,
                                       const Metadata& metadata,
                                       const CodeRangeVector& codeRanges);
 
     Tier tier() const { return tier_; }
 
+    uint8_t* interruptCode() const { return interruptCode_; }
     uint8_t* outOfBoundsCode() const { return outOfBoundsCode_; }
     uint8_t* unalignedAccessCode() const { return unalignedAccessCode_; }
     uint8_t* trapCode() const { return trapCode_; }
 
     // Structured clone support:
 
     size_t serializedSize() const;
     uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkDataTier) const;
--- a/js/src/wasm/WasmCompartment.cpp
+++ b/js/src/wasm/WasmCompartment.cpp
@@ -21,18 +21,17 @@
 #include "vm/JSCompartment.h"
 #include "wasm/WasmInstance.h"
 
 #include "vm/Debugger-inl.h"
 
 using namespace js;
 using namespace wasm;
 
-Compartment::Compartment(JSRuntime* rt)
-  : runtime_(rt)
+Compartment::Compartment(Zone* zone)
 {}
 
 Compartment::~Compartment()
 {
     MOZ_ASSERT(instances_.empty());
 }
 
 struct InstanceComparator
@@ -58,85 +57,50 @@ struct InstanceComparator
 
         return target.codeBase(targetTier) < instance->codeBase(instanceTier) ? -1 : 1;
     }
 };
 
 bool
 Compartment::registerInstance(JSContext* cx, HandleWasmInstanceObject instanceObj)
 {
-    MOZ_ASSERT(runtime_ == cx->runtime());
-
     Instance& instance = instanceObj->instance();
     MOZ_ASSERT(this == &instance.compartment()->wasm);
 
     instance.ensureProfilingLabels(cx->runtime()->geckoProfiler().enabled());
 
     if (instance.debugEnabled() && instance.compartment()->debuggerObservesAllExecution())
         instance.ensureEnterFrameTrapsState(cx, true);
 
-    {
-        if (!instances_.reserve(instances_.length() + 1))
-            return false;
-
-        auto runtimeInstances = cx->runtime()->wasmInstances.lock();
-        if (!runtimeInstances->reserve(runtimeInstances->length() + 1))
-            return false;
+    size_t index;
+    if (BinarySearchIf(instances_, 0, instances_.length(), InstanceComparator(instance), &index))
+        MOZ_CRASH("duplicate registration");
 
-        // To avoid implementing rollback, do not fail after mutations start.
-
-        InstanceComparator cmp(instance);
-        size_t index;
-
-        MOZ_ALWAYS_FALSE(BinarySearchIf(instances_, 0, instances_.length(), cmp, &index));
-        MOZ_ALWAYS_TRUE(instances_.insert(instances_.begin() + index, &instance));
-
-        MOZ_ALWAYS_FALSE(BinarySearchIf(runtimeInstances.get(), 0, runtimeInstances->length(), cmp, &index));
-        MOZ_ALWAYS_TRUE(runtimeInstances->insert(runtimeInstances->begin() + index, &instance));
+    if (!instances_.insert(instances_.begin() + index, &instance)) {
+        ReportOutOfMemory(cx);
+        return false;
     }
 
-    // Notify the debugger after wasmInstances is unlocked.
     Debugger::onNewWasmInstance(cx, instanceObj);
     return true;
 }
 
 void
 Compartment::unregisterInstance(Instance& instance)
 {
-    InstanceComparator cmp(instance);
     size_t index;
-
-    if (BinarySearchIf(instances_, 0, instances_.length(), cmp, &index))
-        instances_.erase(instances_.begin() + index);
-
-    auto runtimeInstances = runtime_->wasmInstances.lock();
-    if (BinarySearchIf(runtimeInstances.get(), 0, runtimeInstances->length(), cmp, &index))
-        runtimeInstances->erase(runtimeInstances->begin() + index);
+    if (!BinarySearchIf(instances_, 0, instances_.length(), InstanceComparator(instance), &index))
+        return;
+    instances_.erase(instances_.begin() + index);
 }
 
 void
 Compartment::ensureProfilingLabels(bool profilingEnabled)
 {
     for (Instance* instance : instances_)
         instance->ensureProfilingLabels(profilingEnabled);
 }
 
 void
 Compartment::addSizeOfExcludingThis(MallocSizeOf mallocSizeOf, size_t* compartmentTables)
 {
     *compartmentTables += instances_.sizeOfExcludingThis(mallocSizeOf);
 }
-
-void
-wasm::InterruptRunningCode(JSContext* cx)
-{
-    auto runtimeInstances = cx->runtime()->wasmInstances.lock();
-    for (Instance* instance : runtimeInstances.get())
-        instance->tlsData()->setInterrupt();
-}
-
-void
-wasm::ResetInterruptState(JSContext* cx)
-{
-    auto runtimeInstances = cx->runtime()->wasmInstances.lock();
-    for (Instance* instance : runtimeInstances.get())
-        instance->tlsData()->resetInterrupt(cx);
-}
--- a/js/src/wasm/WasmCompartment.h
+++ b/js/src/wasm/WasmCompartment.h
@@ -19,28 +19,29 @@
 #ifndef wasm_compartment_h
 #define wasm_compartment_h
 
 #include "wasm/WasmJS.h"
 
 namespace js {
 namespace wasm {
 
+typedef Vector<Instance*, 0, SystemAllocPolicy> InstanceVector;
+
 // wasm::Compartment lives in JSCompartment and contains the wasm-related
 // per-compartment state. wasm::Compartment tracks every live instance in the
 // compartment and must be notified, via registerInstance(), of any new
 // WasmInstanceObject.
 
 class Compartment
 {
-    JSRuntime* runtime_;
     InstanceVector instances_;
 
   public:
-    explicit Compartment(JSRuntime* rt);
+    explicit Compartment(Zone* zone);
     ~Compartment();
 
     // Before a WasmInstanceObject can be considered fully constructed and
     // valid, it must be registered with the Compartment. If this method fails,
     // an error has been reported and the instance object must be abandoned.
     // After a successful registration, an Instance must call
     // unregisterInstance() before being destroyed.
 
@@ -58,25 +59,12 @@ class Compartment
 
     void ensureProfilingLabels(bool profilingEnabled);
 
     // about:memory reporting
 
     void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf, size_t* compartmentTables);
 };
 
-// Interrupt all running wasm Instances that have been registered with
-// wasm::Compartments in the given JSContext.
-
-extern void
-InterruptRunningCode(JSContext* cx);
-
-// After a wasm Instance sees an interrupt request and calls
-// CheckForInterrupt(), it should call RunningCodeInterrupted() to clear the
-// interrupt request for all wasm Instances to avoid spurious trapping.
-
-void
-ResetInterruptState(JSContext* cx);
-
 } // namespace wasm
 } // namespace js
 
 #endif // wasm_compartment_h
--- a/js/src/wasm/WasmFrameIter.cpp
+++ b/js/src/wasm/WasmFrameIter.cpp
@@ -45,27 +45,48 @@ WasmFrameIter::WasmFrameIter(JitActivati
     MOZ_ASSERT(fp_);
 
     // When the stack is captured during a trap (viz., to create the .stack
     // for an Error object), use the pc/bytecode information captured by the
     // signal handler in the runtime.
 
     if (activation->isWasmTrapping()) {
         code_ = &fp_->tls->instance->code();
-        MOZ_ASSERT(code_ == LookupCode(activation->wasmTrapUnwoundPC()));
+        MOZ_ASSERT(code_ == LookupCode(activation->wasmTrapPC()));
 
-        codeRange_ = code_->lookupFuncRange(activation->wasmTrapUnwoundPC());
+        codeRange_ = code_->lookupFuncRange(activation->wasmTrapPC());
         MOZ_ASSERT(codeRange_);
 
         lineOrBytecode_ = activation->wasmTrapBytecodeOffset();
 
         MOZ_ASSERT(!done());
         return;
     }
 
+    // When asynchronously interrupted, exitFP is set to the interrupted frame
+    // itself and so we do not want to skip it. Instead, we can recover the
+    // Code and CodeRange from the JitActivation, which are set when control
+    // flow was interrupted. There is no CallSite (b/c the interrupt was
+    // async), but this is fine because CallSite is only used for line number
+    // for which we can use the beginning of the function from the CodeRange
+    // instead.
+
+    if (activation->isWasmInterrupted()) {
+        code_ = &fp_->tls->instance->code();
+        MOZ_ASSERT(code_ == LookupCode(activation->wasmInterruptUnwindPC()));
+
+        codeRange_ = code_->lookupFuncRange(activation->wasmInterruptUnwindPC());
+        MOZ_ASSERT(codeRange_);
+
+        lineOrBytecode_ = codeRange_->funcLineOrBytecode();
+
+        MOZ_ASSERT(!done());
+        return;
+    }
+
     // Otherwise, execution exits wasm code via an exit stub which sets exitFP
     // to the exit stub's frame. Thus, in this case, we want to start iteration
     // at the caller of the exit frame, whose Code, CodeRange and CallSite are
     // indicated by the returnAddress of the exit stub's frame. If the caller
     // was Ion, we can just skip the wasm frames.
 
     popFrame();
     MOZ_ASSERT(!done() || unwoundIonCallerFP_);
@@ -85,22 +106,24 @@ WasmFrameIter::operator++()
     MOZ_ASSERT(!done());
 
     // When the iterator is set to unwind, each time the iterator pops a frame,
     // the JitActivation is updated so that the just-popped frame is no longer
     // visible. This is necessary since Debugger::onLeaveFrame is called before
     // popping each frame and, once onLeaveFrame is called for a given frame,
     // that frame must not be visible to subsequent stack iteration (or it
     // could be added as a "new" frame just as it becomes garbage).  When the
-    // frame is trapping, then exitFP is included in the callstack (otherwise,
-    // it is skipped, as explained above). So to unwind the innermost frame, we
-    // just clear the trapping state.
+    // frame is "interrupted", then exitFP is included in the callstack
+    // (otherwise, it is skipped, as explained above). So to unwind the
+    // innermost frame, we just clear the interrupt state.
 
     if (unwind_ == Unwind::True) {
-        if (activation_->isWasmTrapping())
+        if (activation_->isWasmInterrupted())
+            activation_->finishWasmInterrupt();
+        else if (activation_->isWasmTrapping())
             activation_->finishWasmTrap();
         activation_->setWasmExitFP(fp_);
     }
 
     popFrame();
 }
 
 void
@@ -704,18 +727,20 @@ ProfilingFrameIterator::initFromExitFP(c
     code_ = LookupCode(pc, &codeRange_);
     MOZ_ASSERT(code_);
     MOZ_ASSERT(codeRange_);
 
     // Since we don't have the pc for fp, start unwinding at the caller of fp.
     // This means that the innermost frame is skipped. This is fine because:
     //  - for import exit calls, the innermost frame is a thunk, so the first
     //    frame that shows up is the function calling the import;
-    //  - for Math and other builtin calls, we note the absence of an exit
-    //    reason and inject a fake "builtin" frame; and
+    //  - for Math and other builtin calls as well as interrupts, we note the
+    //    absence of an exit reason and inject a fake "builtin" frame; and
+    //  - for async interrupts, we just accept that we'll lose the innermost
+    //    frame.
     switch (codeRange_->kind()) {
       case CodeRange::InterpEntry:
         callerPC_ = nullptr;
         callerFP_ = nullptr;
         codeRange_ = nullptr;
         exitReason_ = ExitReason(ExitReason::Fixed::FakeInterpEntry);
         break;
       case CodeRange::JitEntry:
@@ -733,16 +758,17 @@ ProfilingFrameIterator::initFromExitFP(c
       case CodeRange::ImportInterpExit:
       case CodeRange::BuiltinThunk:
       case CodeRange::TrapExit:
       case CodeRange::OldTrapExit:
       case CodeRange::DebugTrap:
       case CodeRange::OutOfBoundsExit:
       case CodeRange::UnalignedExit:
       case CodeRange::Throw:
+      case CodeRange::Interrupt:
       case CodeRange::FarJumpIsland:
         MOZ_CRASH("Unexpected CodeRange kind");
     }
 
     MOZ_ASSERT(!done());
 }
 
 bool
@@ -930,16 +956,21 @@ js::wasm::StartUnwinding(const RegisterS
         if (intptr_t(fixedFP) == (FailFP & ~JitActivation::ExitFpWasmBit))
             return false;
         break;
       case CodeRange::Throw:
         // The throw stub executes a small number of instructions before popping
         // the entire activation. To simplify testing, we simply pretend throw
         // stubs have already popped the entire stack.
         return false;
+      case CodeRange::Interrupt:
+        // When the PC is in the async interrupt stub, the fp may be garbage and
+        // so we cannot blindly unwind it. Since the percent of time spent in
+        // the interrupt stub is extremely small, just ignore the stack.
+        return false;
     }
 
     unwindState->code = code;
     unwindState->codeRange = codeRange;
     unwindState->fp = fixedFP;
     unwindState->pc = fixedPC;
     return true;
 }
@@ -1055,31 +1086,33 @@ ProfilingFrameIterator::operator++()
         callerPC_ = callerFP_->returnAddress;
         AssertMatchesCallSite(callerPC_, callerFP_->callerFP);
         callerFP_ = callerFP_->callerFP;
         break;
       case CodeRange::InterpEntry:
         MOZ_CRASH("should have had null caller fp");
       case CodeRange::JitEntry:
         MOZ_CRASH("should have been guarded above");
+      case CodeRange::Interrupt:
       case CodeRange::Throw:
         MOZ_CRASH("code range doesn't have frame");
     }
 
     MOZ_ASSERT(!done());
 }
 
 static const char*
 ThunkedNativeToDescription(SymbolicAddress func)
 {
     MOZ_ASSERT(NeedsBuiltinThunk(func));
     switch (func) {
+      case SymbolicAddress::HandleExecutionInterrupt:
       case SymbolicAddress::HandleDebugTrap:
       case SymbolicAddress::HandleThrow:
-      case SymbolicAddress::OnTrap:
+      case SymbolicAddress::ReportTrap:
       case SymbolicAddress::OldReportTrap:
       case SymbolicAddress::ReportOutOfBounds:
       case SymbolicAddress::ReportUnalignedAccess:
       case SymbolicAddress::CallImport_Void:
       case SymbolicAddress::CallImport_I32:
       case SymbolicAddress::CallImport_I64:
       case SymbolicAddress::CallImport_F64:
       case SymbolicAddress::CoerceInPlace_ToInt32:
@@ -1222,17 +1255,18 @@ ProfilingFrameIterator::label() const
       case CodeRange::BuiltinThunk:      return builtinNativeDescription;
       case CodeRange::ImportInterpExit:  return importInterpDescription;
       case CodeRange::TrapExit:          return trapDescription;
       case CodeRange::OldTrapExit:       return trapDescription;
       case CodeRange::DebugTrap:         return debugTrapDescription;
       case CodeRange::OutOfBoundsExit:   return "out-of-bounds stub (in wasm)";
       case CodeRange::UnalignedExit:     return "unaligned trap stub (in wasm)";
       case CodeRange::FarJumpIsland:     return "interstitial (in wasm)";
-      case CodeRange::Throw:             MOZ_CRASH("does not have a frame");
+      case CodeRange::Throw:             MOZ_FALLTHROUGH;
+      case CodeRange::Interrupt:         MOZ_CRASH("does not have a frame");
     }
 
     MOZ_CRASH("bad code range kind");
 }
 
 Instance*
 wasm::LookupFaultingInstance(const ModuleSegment& codeSegment, void* pc, void* fp)
 {
--- a/js/src/wasm/WasmFrameIter.h
+++ b/js/src/wasm/WasmFrameIter.h
@@ -44,16 +44,22 @@ struct FuncOffsets;
 struct CallableOffsets;
 
 // Iterates over a linear group of wasm frames of a single wasm JitActivation,
 // called synchronously from C++ in the wasm thread. It will stop at the first
 // frame that is not of the same kind, or at the end of an activation.
 //
 // If you want to handle every kind of frames (including JS jit frames), use
 // JitFrameIter.
+//
+// The one exception is that this iterator may be called from the interrupt
+// callback which may be called asynchronously from asm.js code; in this case,
+// the backtrace may not be correct. That being said, we try our best printing
+// an informative message to the user and at least the name of the innermost
+// function stack frame.
 
 class WasmFrameIter
 {
   public:
     enum class Unwind { True, False };
 
   private:
     jit::JitActivation* activation_;
@@ -147,17 +153,17 @@ class ExitReason
     }
     SymbolicAddress symbolic() const {
         MOZ_ASSERT(!isFixed());
         return SymbolicAddress(payload_ >> 1);
     }
 };
 
 // Iterates over the frames of a single wasm JitActivation, given an
-// asynchronously-profiled thread's state.
+// asynchronously-interrupted thread's state.
 class ProfilingFrameIterator
 {
     const Code* code_;
     const CodeRange* codeRange_;
     Frame* callerFP_;
     void* callerPC_;
     void* stackAddress_;
     uint8_t* unwoundIonCallerFP_;
--- a/js/src/wasm/WasmGenerator.cpp
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -545,16 +545,20 @@ ModuleGenerator::noteCodeRange(uint32_t 
       case CodeRange::OutOfBoundsExit:
         MOZ_ASSERT(!linkDataTier_->outOfBoundsOffset);
         linkDataTier_->outOfBoundsOffset = codeRange.begin();
         break;
       case CodeRange::UnalignedExit:
         MOZ_ASSERT(!linkDataTier_->unalignedAccessOffset);
         linkDataTier_->unalignedAccessOffset = codeRange.begin();
         break;
+      case CodeRange::Interrupt:
+        MOZ_ASSERT(!linkDataTier_->interruptOffset);
+        linkDataTier_->interruptOffset = codeRange.begin();
+        break;
       case CodeRange::TrapExit:
         MOZ_ASSERT(!linkDataTier_->trapOffset);
         linkDataTier_->trapOffset = codeRange.begin();
         break;
       case CodeRange::Throw:
         // Jumped to by other stubs, so nothing to do.
         break;
       case CodeRange::FarJumpIsland:
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -401,17 +401,17 @@ Instance::Instance(JSContext* cx,
     MOZ_ASSERT(tables_.length() == metadata().tables.length());
 
     tlsData()->memoryBase = memory ? memory->buffer().dataPointerEither().unwrap() : nullptr;
 #ifndef WASM_HUGE_MEMORY
     tlsData()->boundsCheckLimit = memory ? memory->buffer().wasmBoundsCheckLimit() : 0;
 #endif
     tlsData()->instance = this;
     tlsData()->cx = cx;
-    tlsData()->resetInterrupt(cx);
+    tlsData()->stackLimit = cx->stackLimitForJitCode(JS::StackForUntrustedScript);
     tlsData()->jumpTable = code_->tieringJumpTable();
 
     Tier callerTier = code_->bestTier();
 
     for (size_t i = 0; i < metadata(callerTier).funcImports.length(); i++) {
         HandleFunction f = funcImports[i];
         const FuncImport& fi = metadata(callerTier).funcImports[i];
         FuncImportTls& import = funcImportTls(fi);
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -246,16 +246,18 @@ class FunctionCompiler
             }
 
             curBlock_->add(ins);
             curBlock_->initSlot(info().localSlot(i), ins);
             if (!mirGen_.ensureBallast())
                 return false;
         }
 
+        addInterruptCheck();
+
         return true;
     }
 
     void finish()
     {
         mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
 
         MOZ_ASSERT(callStack_.empty());
@@ -1028,19 +1030,18 @@ class FunctionCompiler
     {
         if (inDeadCode())
             return;
         curBlock_->add(MWasmStoreGlobalVar::New(alloc(), globalDataOffset, v, tlsPointer_));
     }
 
     void addInterruptCheck()
     {
-        if (inDeadCode())
-            return;
-        curBlock_->add(MWasmInterruptCheck::New(alloc(), tlsPointer_, bytecodeOffset()));
+        // We rely on signal handlers for interrupts on Asm.JS/Wasm
+        MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
     }
 
     MDefinition* extractSimdElement(unsigned lane, MDefinition* base, MIRType type, SimdSign sign)
     {
         if (inDeadCode())
             return nullptr;
 
         MOZ_ASSERT(IsSimdType(base->type()));
--- a/js/src/wasm/WasmModule.h
+++ b/js/src/wasm/WasmModule.h
@@ -37,16 +37,17 @@ struct CompileArgs;
 //
 // LinkData is built incrementally by ModuleGenerator and then stored immutably
 // in Module. LinkData is distinct from Metadata in that LinkData is owned and
 // destroyed by the Module since it is not needed after instantiation; Metadata
 // is needed at runtime.
 
 struct LinkDataTierCacheablePod
 {
+    uint32_t interruptOffset;
     uint32_t outOfBoundsOffset;
     uint32_t unalignedAccessOffset;
     uint32_t trapOffset;
 
     LinkDataTierCacheablePod() { mozilla::PodZero(this); }
 };
 
 struct LinkDataTier : LinkDataTierCacheablePod
--- a/js/src/wasm/WasmProcess.cpp
+++ b/js/src/wasm/WasmProcess.cpp
@@ -45,24 +45,25 @@ class ProcessCodeSegmentMap
     // Since writes (insertions or removals) can happen on any background
     // thread at the same time, we need a lock here.
 
     Mutex mutatorsMutex_;
 
     CodeSegmentVector segments1_;
     CodeSegmentVector segments2_;
 
-    // Because of profiling, the thread running wasm might need to know to which
-    // CodeSegment the current PC belongs, during a call to lookup(). A lookup
-    // is a read-only operation, and we don't want to take a lock then
+    // Because of sampling/interruptions/stack iteration in general, the
+    // thread running wasm might need to know to which CodeSegment the
+    // current PC belongs, during a call to lookup(). A lookup is a
+    // read-only operation, and we don't want to take a lock then
     // (otherwise, we could have a deadlock situation if an async lookup
     // happened on a given thread that was holding mutatorsMutex_ while getting
-    // sampled). Since the writer could be modifying the data that is getting
-    // looked up, the writer functions use spin-locks to know if there are any
-    // observers (i.e. calls to lookup()) of the atomic data.
+    // interrupted/sampled). Since the writer could be modifying the data that
+    // is getting looked up, the writer functions use spin-locks to know if
+    // there are any observers (i.e. calls to lookup()) of the atomic data.
 
     Atomic<size_t> observers_;
 
     // Except during swapAndWait(), there are no lookup() observers of the
     // vector pointed to by mutableCodeSegments_
 
     CodeSegmentVector* mutableCodeSegments_;
     Atomic<const CodeSegmentVector*> readonlyCodeSegments_;
--- a/js/src/wasm/WasmProcess.h
+++ b/js/src/wasm/WasmProcess.h
@@ -25,17 +25,17 @@ namespace js {
 namespace wasm {
 
 class Code;
 class CodeRange;
 class CodeSegment;
 
 // These methods return the wasm::CodeSegment (resp. wasm::Code) containing
 // the given pc, if any exist in the process. These methods do not take a lock,
-// and thus are safe to use in a profiling context.
+// and thus are safe to use in a profiling or async interrupt context.
 
 const CodeSegment*
 LookupCodeSegment(const void* pc, const CodeRange** codeRange = nullptr);
 
 const Code*
 LookupCode(const void* pc, const CodeRange** codeRange = nullptr);
 
 // A bool member that can be used as a very fast lookup to know if there is any
--- a/js/src/wasm/WasmSignalHandlers.cpp
+++ b/js/src/wasm/WasmSignalHandlers.cpp
@@ -26,42 +26,30 @@
 #include "jit/AtomicOperations.h"
 #include "jit/Disassembler.h"
 #include "vm/Runtime.h"
 #include "wasm/WasmBuiltins.h"
 #include "wasm/WasmInstance.h"
 
 #include "vm/ArrayBufferObject-inl.h"
 
-#if defined(XP_WIN)
-# include "util/Windows.h"
-#else
-# include <signal.h>
-# include <sys/mman.h>
-#endif
-
-#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
-# include <sys/ucontext.h> // for ucontext_t, mcontext_t
-#endif
-
-#if defined(__x86_64__)
-# if defined(__DragonFly__)
-#  include <machine/npx.h> // for union savefpu
-# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
-       defined(__NetBSD__) || defined(__OpenBSD__)
-#  include <machine/fpu.h> // for struct savefpu/fxsave64
-# endif
-#endif
-
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
 using JS::GenericNaN;
 using mozilla::DebugOnly;
+using mozilla::PodArrayZero;
+
+#if defined(ANDROID)
+# include <sys/system_properties.h>
+# if defined(MOZ_LINKER)
+extern "C" MFBT_API bool IsSignalHandlingBroken();
+# endif
+#endif
 
 // Crashing inside the signal handler can cause the handler to be recursively
 // invoked, eventually blowing the stack without actually showing a crash
 // report dialog via Breakpad. To guard against this we watch for such
 // recursion and fall through to the next handler immediately rather than
 // trying to handle it.
 
 static MOZ_THREAD_LOCAL(bool) sAlreadyInSignalHandler;
@@ -264,30 +252,48 @@ struct AutoSignalHandler
 #  define RLR_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_lr)
 #  define R31_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_sp)
 # endif
 # if defined(__FreeBSD__) && defined(__mips__)
 #  define EPC_sig(p) ((p)->uc_mcontext.mc_pc)
 #  define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30])
 # endif
 #elif defined(XP_DARWIN)
-# define EIP_sig(p) ((p)->thread.uts.ts32.__eip)
-# define EBP_sig(p) ((p)->thread.uts.ts32.__ebp)
-# define ESP_sig(p) ((p)->thread.uts.ts32.__esp)
-# define RIP_sig(p) ((p)->thread.__rip)
-# define RBP_sig(p) ((p)->thread.__rbp)
-# define RSP_sig(p) ((p)->thread.__rsp)
-# define R11_sig(p) ((p)->thread.__r[11])
-# define R13_sig(p) ((p)->thread.__sp)
-# define R14_sig(p) ((p)->thread.__lr)
-# define R15_sig(p) ((p)->thread.__pc)
+# define EIP_sig(p) ((p)->uc_mcontext->__ss.__eip)
+# define EBP_sig(p) ((p)->uc_mcontext->__ss.__ebp)
+# define ESP_sig(p) ((p)->uc_mcontext->__ss.__esp)
+# define RIP_sig(p) ((p)->uc_mcontext->__ss.__rip)
+# define RBP_sig(p) ((p)->uc_mcontext->__ss.__rbp)
+# define RSP_sig(p) ((p)->uc_mcontext->__ss.__rsp)
+# define R14_sig(p) ((p)->uc_mcontext->__ss.__lr)
+# define R15_sig(p) ((p)->uc_mcontext->__ss.__pc)
 #else
 # error "Don't know how to read/write to the thread state via the mcontext_t."
 #endif
 
+#if defined(XP_WIN)
+# include "util/Windows.h"
+#else
+# include <signal.h>
+# include <sys/mman.h>
+#endif
+
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+# include <sys/ucontext.h> // for ucontext_t, mcontext_t
+#endif
+
+#if defined(__x86_64__)
+# if defined(__DragonFly__)
+#  include <machine/npx.h> // for union savefpu
+# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
+       defined(__NetBSD__) || defined(__OpenBSD__)
+#  include <machine/fpu.h> // for struct savefpu/fxsave64
+# endif
+#endif
+
 #if defined(ANDROID)
 // Not all versions of the Android NDK define ucontext_t or mcontext_t.
 // Detect this and provide custom but compatible definitions. Note that these
 // follow the GLibc naming convention to access register values from
 // mcontext_t.
 //
 // See: https://chromiumcodereview.appspot.com/10829122/
 // See: http://code.google.com/p/android/issues/detail?id=34784
@@ -358,40 +364,48 @@ typedef struct ucontext {
     mcontext_t uc_mcontext;
     // Other fields are not used by V8, don't define them here.
 } ucontext_t;
 enum { REG_EIP = 14 };
 #  endif  // defined(__i386__)
 # endif  // !defined(__BIONIC_HAVE_UCONTEXT_T)
 #endif // defined(ANDROID)
 
+#if !defined(XP_WIN)
+# define CONTEXT ucontext_t
+#endif
+
+// Define a context type for use in the emulator code. This is usually just
+// the same as CONTEXT, but on Mac we use a different structure since we call
+// into the emulator code from a Mach exception handler rather than a
+// sigaction-style signal handler.
 #if defined(XP_DARWIN)
 # if defined(__x86_64__)
 struct macos_x64_context {
     x86_thread_state64_t thread;
     x86_float_state64_t float_;
 };
-#  define CONTEXT macos_x64_context
+#  define EMULATOR_CONTEXT macos_x64_context
 # elif defined(__i386__)
 struct macos_x86_context {
     x86_thread_state_t thread;
     x86_float_state_t float_;
 };
-#  define CONTEXT macos_x86_context
+#  define EMULATOR_CONTEXT macos_x86_context
 # elif defined(__arm__)
 struct macos_arm_context {
     arm_thread_state_t thread;
     arm_neon_state_t float_;
 };
-#  define CONTEXT macos_arm_context
+#  define EMULATOR_CONTEXT macos_arm_context
 # else
 #  error Unsupported architecture
 # endif
-#elif !defined(XP_WIN)
-# define CONTEXT ucontext_t
+#else
+# define EMULATOR_CONTEXT CONTEXT
 #endif
 
 #if defined(_M_X64) || defined(__x86_64__)
 # define PC_sig(p) RIP_sig(p)
 # define FP_sig(p) RBP_sig(p)
 # define SP_sig(p) RSP_sig(p)
 #elif defined(_M_IX86) || defined(__i386__)
 # define PC_sig(p) EIP_sig(p)
@@ -409,71 +423,143 @@ struct macos_arm_context {
 # define LR_sig(p) RLR_sig(p)
 #elif defined(__mips__)
 # define PC_sig(p) EPC_sig(p)
 # define FP_sig(p) RFP_sig(p)
 # define SP_sig(p) RSP_sig(p)
 # define LR_sig(p) R31_sig(p)
 #endif
 
+#if defined(PC_sig) && defined(FP_sig) && defined(SP_sig)
+# define KNOWS_MACHINE_STATE
+#endif
+
 static uint8_t**
 ContextToPC(CONTEXT* context)
 {
-#ifdef PC_sig
+#ifdef KNOWS_MACHINE_STATE
     return reinterpret_cast<uint8_t**>(&PC_sig(context));
 #else
     MOZ_CRASH();
 #endif
 }
 
 static uint8_t*
 ContextToFP(CONTEXT* context)
 {
-#ifdef FP_sig
+#ifdef KNOWS_MACHINE_STATE
     return reinterpret_cast<uint8_t*>(FP_sig(context));
 #else
     MOZ_CRASH();
 #endif
 }
 
+#ifdef KNOWS_MACHINE_STATE
 static uint8_t*
 ContextToSP(CONTEXT* context)
 {
-#ifdef SP_sig
     return reinterpret_cast<uint8_t*>(SP_sig(context));
-#else
-    MOZ_CRASH();
-#endif
 }
 
-#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+# if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
 static uint8_t*
 ContextToLR(CONTEXT* context)
 {
-# ifdef LR_sig
     return reinterpret_cast<uint8_t*>(LR_sig(context));
+}
+# endif
+#endif // KNOWS_MACHINE_STATE
+
+#if defined(XP_DARWIN)
+
+static uint8_t**
+ContextToPC(EMULATOR_CONTEXT* context)
+{
+# if defined(__x86_64__)
+    static_assert(sizeof(context->thread.__rip) == sizeof(void*),
+                  "stored IP should be compile-time pointer-sized");
+    return reinterpret_cast<uint8_t**>(&context->thread.__rip);
+# elif defined(__i386__)
+    static_assert(sizeof(context->thread.uts.ts32.__eip) == sizeof(void*),
+                  "stored IP should be compile-time pointer-sized");
+    return reinterpret_cast<uint8_t**>(&context->thread.uts.ts32.__eip);
+# elif defined(__arm__)
+    static_assert(sizeof(context->thread.__pc) == sizeof(void*),
+                  "stored IP should be compile-time pointer-sized");
+    return reinterpret_cast<uint8_t**>(&context->thread.__pc);
 # else
-    MOZ_CRASH();
+#  error Unsupported architecture
 # endif
 }
-#endif
+
+static uint8_t*
+ContextToFP(EMULATOR_CONTEXT* context)
+{
+# if defined(__x86_64__)
+    return (uint8_t*)context->thread.__rbp;
+# elif defined(__i386__)
+    return (uint8_t*)context->thread.uts.ts32.__ebp;
+# elif defined(__arm__)
+    return (uint8_t*)context->thread.__r[11];
+# else
+#  error Unsupported architecture
+# endif
+}
+
+# if defined(__arm__) || defined(__aarch64__)
+static uint8_t*
+ContextToLR(EMULATOR_CONTEXT* context)
+{
+    return (uint8_t*)context->thread.__lr;
+}
+# endif
+
+static uint8_t*
+ContextToSP(EMULATOR_CONTEXT* context)
+{
+# if defined(__x86_64__)
+    return (uint8_t*)context->thread.__rsp;
+# elif defined(__i386__)
+    return (uint8_t*)context->thread.uts.ts32.__esp;
+# elif defined(__arm__)
+    return (uint8_t*)context->thread.__sp;
+# else
+#  error Unsupported architecture
+# endif
+}
 
 static JS::ProfilingFrameIterator::RegisterState
-ToRegisterState(CONTEXT* context)
+ToRegisterState(EMULATOR_CONTEXT* context)
 {
     JS::ProfilingFrameIterator::RegisterState state;
     state.fp = ContextToFP(context);
     state.pc = *ContextToPC(context);
     state.sp = ContextToSP(context);
-#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+# if defined(__arm__) || defined(__aarch64__)
     state.lr = ContextToLR(context);
+# endif
+    return state;
+}
+#endif // XP_DARWIN
+
+static JS::ProfilingFrameIterator::RegisterState
+ToRegisterState(CONTEXT* context)
+{
+#ifdef KNOWS_MACHINE_STATE
+    JS::ProfilingFrameIterator::RegisterState state;
+    state.fp = ContextToFP(context);
+    state.pc = *ContextToPC(context);
+    state.sp = ContextToSP(context);
+# if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+    state.lr = ContextToLR(context);
+# endif
+    return state;
 #else
-    state.lr = (void*)UINTPTR_MAX;
+    MOZ_CRASH();
 #endif
-    return state;
 }
 
 #if defined(WASM_HUGE_MEMORY)
 MOZ_COLD static void
 SetFPRegToNaN(size_t size, void* fp_reg)
 {
     MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
     memset(fp_reg, 0, Simd128DataSize);
@@ -562,17 +648,17 @@ AddressOfFPRegisterSlot(CONTEXT* context
       case X86Encoding::xmm14: return &XMM_sig(context, 14);
       case X86Encoding::xmm15: return &XMM_sig(context, 15);
       default: break;
     }
     MOZ_CRASH();
 }
 
 MOZ_COLD static void*
-AddressOfGPRegisterSlot(CONTEXT* context, Registers::Code code)
+AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
 {
     switch (code) {
       case X86Encoding::rax: return &RAX_sig(context);
       case X86Encoding::rcx: return &RCX_sig(context);
       case X86Encoding::rdx: return &RDX_sig(context);
       case X86Encoding::rbx: return &RBX_sig(context);
       case X86Encoding::rsp: return &RSP_sig(context);
       case X86Encoding::rbp: return &RBP_sig(context);
@@ -587,17 +673,17 @@ AddressOfGPRegisterSlot(CONTEXT* context
       case X86Encoding::r14: return &R14_sig(context);
       case X86Encoding::r15: return &R15_sig(context);
       default: break;
     }
     MOZ_CRASH();
 }
 # else
 MOZ_COLD static void*
-AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
+AddressOfFPRegisterSlot(EMULATOR_CONTEXT* context, FloatRegisters::Encoding encoding)
 {
     switch (encoding) {
       case X86Encoding::xmm0:  return &context->float_.__fpu_xmm0;
       case X86Encoding::xmm1:  return &context->float_.__fpu_xmm1;
       case X86Encoding::xmm2:  return &context->float_.__fpu_xmm2;
       case X86Encoding::xmm3:  return &context->float_.__fpu_xmm3;
       case X86Encoding::xmm4:  return &context->float_.__fpu_xmm4;
       case X86Encoding::xmm5:  return &context->float_.__fpu_xmm5;
@@ -612,17 +698,17 @@ AddressOfFPRegisterSlot(CONTEXT* context
       case X86Encoding::xmm14: return &context->float_.__fpu_xmm14;
       case X86Encoding::xmm15: return &context->float_.__fpu_xmm15;
       default: break;
     }
     MOZ_CRASH();
 }
 
 MOZ_COLD static void*
-AddressOfGPRegisterSlot(CONTEXT* context, Registers::Code code)
+AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
 {
     switch (code) {
       case X86Encoding::rax: return &context->thread.__rax;
       case X86Encoding::rcx: return &context->thread.__rcx;
       case X86Encoding::rdx: return &context->thread.__rdx;
       case X86Encoding::rbx: return &context->thread.__rbx;
       case X86Encoding::rsp: return &context->thread.__rsp;
       case X86Encoding::rbp: return &context->thread.__rbp;
@@ -638,69 +724,69 @@ AddressOfGPRegisterSlot(CONTEXT* context
       case X86Encoding::r15: return &context->thread.__r15;
       default: break;
     }
     MOZ_CRASH();
 }
 # endif  // !XP_DARWIN
 #elif defined(JS_CODEGEN_ARM64)
 MOZ_COLD static void*
-AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
+AddressOfFPRegisterSlot(EMULATOR_CONTEXT* context, FloatRegisters::Encoding encoding)
 {
     MOZ_CRASH("NYI - asm.js not supported yet on this platform");
 }
 
 MOZ_COLD static void*
-AddressOfGPRegisterSlot(CONTEXT* context, Registers::Code code)
+AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
 {
     MOZ_CRASH("NYI - asm.js not supported yet on this platform");
 }
 #endif
 
 MOZ_COLD static void
-SetRegisterToCoercedUndefined(CONTEXT* context, size_t size,
+SetRegisterToCoercedUndefined(EMULATOR_CONTEXT* context, size_t size,
                               const Disassembler::OtherOperand& value)
 {
     if (value.kind() == Disassembler::OtherOperand::FPR)
         SetFPRegToNaN(size, AddressOfFPRegisterSlot(context, value.fpr()));
     else
         SetGPRegToZero(AddressOfGPRegisterSlot(context, value.gpr()));
 }
 
 MOZ_COLD static void
-SetRegisterToLoadedValue(CONTEXT* context, SharedMem<void*> addr, size_t size,
+SetRegisterToLoadedValue(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
                          const Disassembler::OtherOperand& value)
 {
     if (value.kind() == Disassembler::OtherOperand::FPR)
         SetFPRegToLoadedValue(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
     else
         SetGPRegToLoadedValue(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
 }
 
 MOZ_COLD static void
-SetRegisterToLoadedValueSext32(CONTEXT* context, SharedMem<void*> addr, size_t size,
+SetRegisterToLoadedValueSext32(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
                                const Disassembler::OtherOperand& value)
 {
     SetGPRegToLoadedValueSext32(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
 }
 
 MOZ_COLD static void
-StoreValueFromRegister(CONTEXT* context, SharedMem<void*> addr, size_t size,
+StoreValueFromRegister(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
                        const Disassembler::OtherOperand& value)
 {
     if (value.kind() == Disassembler::OtherOperand::FPR)
         StoreValueFromFPReg(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
     else if (value.kind() == Disassembler::OtherOperand::GPR)
         StoreValueFromGPReg(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
     else
         StoreValueFromGPImm(addr, size, value.imm());
 }
 
 MOZ_COLD static uint8_t*
-ComputeAccessAddress(CONTEXT* context, const Disassembler::ComplexAddress& address)
+ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddress& address)
 {
     MOZ_RELEASE_ASSERT(!address.isPCRelative(), "PC-relative addresses not supported yet");
 
     uintptr_t result = address.disp();
 
     if (address.hasBase()) {
         uintptr_t base;
         StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
@@ -715,29 +801,29 @@ ComputeAccessAddress(CONTEXT* context, c
         MOZ_ASSERT(address.scale() < 32, "address shift overflow");
         result += index * (uintptr_t(1) << address.scale());
     }
 
     return reinterpret_cast<uint8_t*>(result);
 }
 
 MOZ_COLD static void
-HandleMemoryAccess(CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
+HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
                    const ModuleSegment* segment, const Instance& instance, JitActivation* activation,
                    uint8_t** ppc)
 {
     MOZ_RELEASE_ASSERT(instance.code().containsCodePC(pc));
 
     const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc);
     if (!memoryAccess) {
         // If there is no associated MemoryAccess for the faulting PC, this must be
         // experimental SIMD.js or Atomics. When these are converted to
         // non-experimental wasm features, this case, as well as outOfBoundsCode,
         // can be removed.
-        activation->startWasmTrap(wasm::Trap::OutOfBounds, 0, ToRegisterState(context));
+        MOZ_ALWAYS_TRUE(activation->startWasmInterrupt(ToRegisterState(context)));
         *ppc = segment->outOfBoundsCode();
         return;
     }
 
     MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - segment->base()));
 
     // On WASM_HUGE_MEMORY platforms, asm.js code may fault. asm.js does not
     // trap on fault and so has no trap out-of-line path. Instead, stores are
@@ -871,26 +957,26 @@ HandleMemoryAccess(CONTEXT* context, uin
     }
 
     *ppc = end;
 }
 
 #else // WASM_HUGE_MEMORY
 
 MOZ_COLD static void
-HandleMemoryAccess(CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
+HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
                    const ModuleSegment* segment, const Instance& instance, JitActivation* activation,
                    uint8_t** ppc)
 {
     MOZ_RELEASE_ASSERT(instance.code().containsCodePC(pc));
 
     const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc);
     if (!memoryAccess) {
         // See explanation in the WASM_HUGE_MEMORY HandleMemoryAccess.
-        activation->startWasmTrap(wasm::Trap::OutOfBounds, 0, ToRegisterState(context));
+        MOZ_ALWAYS_TRUE(activation->startWasmInterrupt(ToRegisterState(context)));
         *ppc = segment->outOfBoundsCode();
         return;
     }
 
     MOZ_RELEASE_ASSERT(memoryAccess->hasTrapOutOfLineCode());
     *ppc = memoryAccess->trapOutOfLineCode(segment->base());
 }
 
@@ -928,18 +1014,43 @@ HandleFault(PEXCEPTION_POINTERS exceptio
         return false;
 
     const ModuleSegment* moduleSegment = codeSegment->asModule();
 
     JitActivation* activation = TlsContext.get()->activation()->asJit();
     MOZ_ASSERT(activation);
 
     const Instance* instance = LookupFaultingInstance(*moduleSegment, pc, ContextToFP(context));
-    if (!instance)
-        return false;
+    if (!instance) {
+        // On Windows, it is possible for InterruptRunningJitCode to execute
+        // between a faulting instruction and the handling of the fault due
+        // to InterruptRunningJitCode's use of SuspendThread. When this happens,
+        // after ResumeThread, the exception handler is called with pc equal to
+        // ModuleSegment.interrupt, which is logically wrong. The Right Thing would
+        // be for the OS to make fault-handling atomic (so that CONTEXT.pc was
+        // always the logically-faulting pc). Fortunately, we can detect this
+        // case and silence the exception ourselves (the exception will
+        // retrigger after the interrupt jumps back to resumePC).
+        return activation->isWasmInterrupted() &&
+               pc == moduleSegment->interruptCode() &&
+               moduleSegment->containsCodePC(activation->wasmInterruptResumePC());
+    }
+
+    // In the same race-with-interrupt situation above, it's *also* possible
+    // that the reported 'pc' is the pre-interrupt pc, not post-interrupt
+    // moduleSegment->interruptCode (this may be windows-version-specific). In
+    // this case, lookupTrap()/lookupMemoryAccess() will all succeed causing the
+    // pc to be redirected *again* (to a trap stub), leading to the interrupt
+    // stub never being called. Since the goal of the async interrupt is to break
+    // out iloops and trapping does just that, this is fine, we just clear the
+    // "interrupted" state.
+    if (activation->isWasmInterrupted()) {
+        MOZ_ASSERT(activation->wasmInterruptResumePC() == pc);
+        activation->finishWasmInterrupt();
+    }
 
     if (record->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
         Trap trap;
         BytecodeOffset bytecode;
         if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode))
             return false;
 
         activation->startWasmTrap(trap, bytecode.offset, ToRegisterState(context));
@@ -1009,17 +1120,17 @@ struct ExceptionRequest
 
 static bool
 HandleMachException(JSContext* cx, const ExceptionRequest& request)
 {
     // Get the port of the JSContext's thread from the message.
     mach_port_t cxThread = request.body.thread.name;
 
     // Read out the JSRuntime thread's register state.
-    CONTEXT context;
+    EMULATOR_CONTEXT context;
 # if defined(__x86_64__)
     unsigned int thread_state_count = x86_THREAD_STATE64_COUNT;
     unsigned int float_state_count = x86_FLOAT_STATE64_COUNT;
     int thread_state = x86_THREAD_STATE64;
     int float_state = x86_FLOAT_STATE64;
 # elif defined(__i386__)
     unsigned int thread_state_count = x86_THREAD_STATE_COUNT;
     unsigned int float_state_count = x86_FLOAT_STATE_COUNT;
@@ -1330,17 +1441,17 @@ HandleFault(int signum, siginfo_t* info,
     }
 
 #ifdef JS_CODEGEN_ARM
     if (signum == SIGBUS) {
         // TODO: We may see a bus error for something that is an unaligned access that
         // partly overlaps the end of the heap.  In this case, it is an out-of-bounds
         // error and we should signal that properly, but to do so we must inspect
         // the operand of the failed access.
-        activation->startWasmTrap(wasm::Trap::UnalignedAccess, 0, ToRegisterState(context));
+        MOZ_ALWAYS_TRUE(activation->startWasmInterrupt(ToRegisterState(context)));
         *ppc = moduleSegment->unalignedAccessCode();
         return true;
     }
 #endif
 
     HandleMemoryAccess(context, pc, faultingAddress, moduleSegment, *instance, activation, ppc);
     return true;
 }
@@ -1379,91 +1490,240 @@ WasmFaultHandler(int signum, siginfo_t* 
         previousSignal->sa_sigaction(signum, info, context);
     else if (previousSignal->sa_handler == SIG_DFL || previousSignal->sa_handler == SIG_IGN)
         sigaction(signum, previousSignal, nullptr);
     else
         previousSignal->sa_handler(signum);
 }
 # endif // XP_WIN || XP_DARWIN || assume unix
 
-#if defined(ANDROID) && defined(MOZ_LINKER)
-extern "C" MFBT_API bool IsSignalHandlingBroken();
+static void
+RedirectIonBackedgesToInterruptCheck(JSContext* cx)
+{
+    if (!cx->runtime()->hasJitRuntime())
+        return;
+    jit::JitRuntime* jitRuntime = cx->runtime()->jitRuntime();
+    Zone* zone = cx->zoneRaw();
+    if (zone && !zone->isAtomsZone()) {
+        // If the backedge list is being mutated, the pc must be in C++ code and
+        // thus not in a JIT iloop. We assume that the interrupt flag will be
+        // checked at least once before entering JIT code (if not, no big deal;
+        // the browser will just request another interrupt in a second).
+        if (!jitRuntime->preventBackedgePatching()) {
+            jit::JitZoneGroup* jzg = zone->group()->jitZoneGroup;
+            jzg->patchIonBackedges(cx, jit::JitZoneGroup::BackedgeInterruptCheck);
+        }
+    }
+}
+
+bool
+wasm::InInterruptibleCode(JSContext* cx, uint8_t* pc, const ModuleSegment** ms)
+{
+    // Only interrupt in function code so that the frame iterators have the
+    // invariant that resumePC always has a function CodeRange and we can't
+    // get into any weird interrupt-during-interrupt-stub cases.
+
+    if (!cx->compartment())
+        return false;
+
+    const CodeSegment* cs = LookupCodeSegment(pc);
+    if (!cs || !cs->isModule())
+        return false;
+
+    *ms = cs->asModule();
+    return !!(*ms)->code().lookupFuncRange(pc);
+}
+
+// The return value indicates whether the PC was changed, not whether there was
+// a failure.
+static bool
+RedirectJitCodeToInterruptCheck(JSContext* cx, CONTEXT* context)
+{
+    // Jitcode may only be modified on the runtime's active thread.
+    if (cx != cx->runtime()->activeContext())
+        return false;
+
+    // The faulting thread is suspended so we can access cx fields that can
+    // normally only be accessed by the cx's active thread.
+    AutoNoteSingleThreadedRegion anstr;
+
+    RedirectIonBackedgesToInterruptCheck(cx);
+
+#ifdef JS_SIMULATOR
+    uint8_t* pc = cx->simulator()->get_pc_as<uint8_t*>();
+#else
+    uint8_t* pc = *ContextToPC(context);
+#endif
+
+    const ModuleSegment* moduleSegment = nullptr;
+    if (!InInterruptibleCode(cx, pc, &moduleSegment))
+        return false;
+
+#ifdef JS_SIMULATOR
+    // The checks performed by the !JS_SIMULATOR path happen in
+    // Simulator::handleWasmInterrupt.
+    cx->simulator()->trigger_wasm_interrupt();
+#else
+    // Only probe cx->activation() after we know the pc is in wasm code. This
+    // way we don't depend on signal-safe update of cx->activation().
+    JitActivation* activation = cx->activation()->asJit();
+
+    // The out-of-bounds/unaligned trap paths which call startWasmInterrupt() go
+    // through function code, so test if already interrupted. These paths are
+    // temporary though, so this case can be removed later.
+    if (activation->isWasmInterrupted())
+        return false;
+
+    if (!activation->startWasmInterrupt(ToRegisterState(context)))
+        return false;
+
+    *ContextToPC(context) = moduleSegment->interruptCode();
+#endif
+
+    return true;
+}
+
+#if !defined(XP_WIN)
+// For the interrupt signal, pick a signal number that:
+//  - is not otherwise used by mozilla or standard libraries
+//  - defaults to nostop and noprint on gdb/lldb so that noone is bothered
+// SIGVTALRM a relative of SIGALRM, so intended for user code, but, unlike
+// SIGALRM, not used anywhere else in Mozilla.
+static const int sInterruptSignal = SIGVTALRM;
+
+static void
+JitInterruptHandler(int signum, siginfo_t* info, void* context)
+{
+    if (JSContext* cx = TlsContext.get()) {
+
+#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+        SimulatorProcess::ICacheCheckingDisableCount++;
+#endif
+
+        RedirectJitCodeToInterruptCheck(cx, (CONTEXT*)context);
+
+#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+        SimulatorProcess::cacheInvalidatedBySignalHandler_ = true;
+        SimulatorProcess::ICacheCheckingDisableCount--;
+#endif
+
+        cx->finishHandlingJitInterrupt();
+    }
+}
 #endif
 
 static bool sTriedInstallSignalHandlers = false;
 static bool sHaveSignalHandlers = false;
 
 static bool
 ProcessHasSignalHandlers()
 {
     // We assume that there are no races creating the first JSRuntime of the process.
     if (sTriedInstallSignalHandlers)
         return sHaveSignalHandlers;
     sTriedInstallSignalHandlers = true;
 
-#if defined(ANDROID) && defined(MOZ_LINKER)
+#if defined(ANDROID)
+# if !defined(__aarch64__)
+    // Before Android 4.4 (SDK version 19), there is a bug
+    //   https://android-review.googlesource.com/#/c/52333
+    // in Bionic's pthread_join which causes pthread_join to return early when
+    // pthread_kill is used (on any thread). Nobody expects the pthread_cond_wait
+    // EINTRquisition.
+    char version_string[PROP_VALUE_MAX];
+    PodArrayZero(version_string);
+    if (__system_property_get("ro.build.version.sdk", version_string) > 0) {
+        if (atol(version_string) < 19)
+            return false;
+    }
+# endif
+# if defined(MOZ_LINKER)
     // Signal handling is broken on some android systems.
     if (IsSignalHandlingBroken())
         return false;
+# endif
 #endif
 
+    // The interrupt handler allows the active thread to be paused from another
+    // thread (see InterruptRunningJitCode).
+#if defined(XP_WIN)
+    // Windows uses SuspendThread to stop the active thread from another thread.
+#else
+    struct sigaction interruptHandler;
+    interruptHandler.sa_flags = SA_SIGINFO;
+    interruptHandler.sa_sigaction = &JitInterruptHandler;
+    sigemptyset(&interruptHandler.sa_mask);
+    struct sigaction prev;
+    if (sigaction(sInterruptSignal, &interruptHandler, &prev))
+        MOZ_CRASH("unable to install interrupt handler");
+
+    // There shouldn't be any other handlers installed for sInterruptSignal. If
+    // there are, we could always forward, but we need to understand what we're
+    // doing to avoid problematic interference.
+    if ((prev.sa_flags & SA_SIGINFO && prev.sa_sigaction) ||
+        (prev.sa_handler != SIG_DFL && prev.sa_handler != SIG_IGN))
+    {
+        MOZ_CRASH("contention for interrupt signal");
+    }
+#endif // defined(XP_WIN)
+
     // Initalize ThreadLocal flag used by WasmFaultHandler
     sAlreadyInSignalHandler.infallibleInit();
 
     // Install a SIGSEGV handler to handle safely-out-of-bounds asm.js heap
     // access and/or unaligned accesses.
-#if defined(XP_WIN)
-# if defined(MOZ_ASAN)
+# if defined(XP_WIN)
+#  if defined(MOZ_ASAN)
     // Under ASan we need to let the ASan runtime's ShadowExceptionHandler stay
     // in the first handler position. This requires some coordination with
     // MemoryProtectionExceptionHandler::isDisabled().
     const bool firstHandler = false;
-# else
+#  else
     // Otherwise, WasmFaultHandler needs to go first, so that we can recover
     // from wasm faults and continue execution without triggering handlers
     // such as MemoryProtectionExceptionHandler that assume we are crashing.
     const bool firstHandler = true;
-# endif
+#  endif
     if (!AddVectoredExceptionHandler(firstHandler, WasmFaultHandler))
         return false;
-#elif defined(XP_DARWIN)
+# elif defined(XP_DARWIN)
     // OSX handles seg faults via the Mach exception handler above, so don't
     // install WasmFaultHandler.
-#else
+# else
     // SA_NODEFER allows us to reenter the signal handler if we crash while
     // handling the signal, and fall through to the Breakpad handler by testing
     // handlingSegFault.
 
     // Allow handling OOB with signals on all architectures
     struct sigaction faultHandler;
     faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
     faultHandler.sa_sigaction = WasmFaultHandler;
     sigemptyset(&faultHandler.sa_mask);
     if (sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler))
         MOZ_CRASH("unable to install segv handler");
 
-# if defined(JS_CODEGEN_ARM)
+#  if defined(JS_CODEGEN_ARM)
     // On Arm Handle Unaligned Accesses
     struct sigaction busHandler;
     busHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
     busHandler.sa_sigaction = WasmFaultHandler;
     sigemptyset(&busHandler.sa_mask);
     if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler))
         MOZ_CRASH("unable to install sigbus handler");
-# endif
+#  endif
 
     // Install a handler to handle the instructions that are emitted to implement
     // wasm traps.
     struct sigaction wasmTrapHandler;
     wasmTrapHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
     wasmTrapHandler.sa_sigaction = WasmFaultHandler;
     sigemptyset(&wasmTrapHandler.sa_mask);
     if (sigaction(kWasmTrapSignal, &wasmTrapHandler, &sPrevWasmTrapHandler))
         MOZ_CRASH("unable to install wasm trap handler");
-#endif
+# endif
 
     sHaveSignalHandlers = true;
     return true;
 }
 
 bool
 wasm::EnsureSignalHandlers(JSContext* cx)
 {
@@ -1481,8 +1741,66 @@ wasm::EnsureSignalHandlers(JSContext* cx
 }
 
 bool
 wasm::HaveSignalHandlers()
 {
     MOZ_ASSERT(sTriedInstallSignalHandlers);
     return sHaveSignalHandlers;
 }
+
+// JSRuntime::requestInterrupt sets interrupt_ (which is checked frequently by
+// C++ code at every Baseline JIT loop backedge) and jitStackLimit_ (which is
+// checked at every Baseline and Ion JIT function prologue). The remaining
+// sources of potential iloops (Ion loop backedges and all wasm code) are
+// handled by this function:
+//  1. Ion loop backedges are patched to instead point to a stub that handles
+//     the interrupt;
+//  2. if the active thread's pc is inside wasm code, the pc is updated to point
+//     to a stub that handles the interrupt.
+void
+js::InterruptRunningJitCode(JSContext* cx)
+{
+    // If signal handlers weren't installed, then Ion and wasm emit normal
+    // interrupt checks and don't need asynchronous interruption.
+    if (!HaveSignalHandlers())
+        return;
+
+    // Do nothing if we're already handling an interrupt here, to avoid races
+    // below and in JitRuntime::patchIonBackedges.
+    if (!cx->startHandlingJitInterrupt())
+        return;
+
+    // If we are on context's thread, then: pc is not in wasm code (so nothing
+    // to do for wasm) and we can patch Ion backedges without any special
+    // synchronization.
+    if (cx == TlsContext.get()) {
+        RedirectIonBackedgesToInterruptCheck(cx);
+        cx->finishHandlingJitInterrupt();
+        return;
+    }
+
+    // We are not on the runtime's active thread, so to do 1 and 2 above, we need
+    // to halt the runtime's active thread first.
+#if defined(XP_WIN)
+    // On Windows, we can simply suspend the active thread and work directly on
+    // its context from this thread. SuspendThread can sporadically fail if the
+    // thread is in the middle of a syscall. Rather than retrying in a loop,
+    // just wait for the next request for interrupt.
+    HANDLE thread = (HANDLE)cx->threadNative();
+    if (SuspendThread(thread) != (DWORD)-1) {
+        CONTEXT context;
+        context.ContextFlags = CONTEXT_FULL;
+        if (GetThreadContext(thread, &context)) {
+            if (RedirectJitCodeToInterruptCheck(cx, &context))
+                SetThreadContext(thread, &context);
+        }
+        ResumeThread(thread);
+    }
+    cx->finishHandlingJitInterrupt();
+#else
+    // On Unix, we instead deliver an async signal to the active thread which
+    // halts the thread and callers our JitInterruptHandler (which has already
+    // been installed by EnsureSignalHandlersInstalled).
+    pthread_t thread = (pthread_t)cx->threadNative();
+    pthread_kill(thread, sInterruptSignal);
+#endif
+}
--- a/js/src/wasm/WasmSignalHandlers.h
+++ b/js/src/wasm/WasmSignalHandlers.h
@@ -25,29 +25,41 @@
 # include <mach/mach.h>
 #endif
 
 #include "js/TypeDecls.h"
 #include "threading/Thread.h"
 #include "wasm/WasmTypes.h"
 
 namespace js {
+
+// Force any currently-executing asm.js/ion code to call HandleExecutionInterrupt.
+extern void
+InterruptRunningJitCode(JSContext* cx);
+
 namespace wasm {
 
 // Ensure the given JSRuntime is set up to use signals. Failure to enable signal
 // handlers indicates some catastrophic failure and creation of the runtime must
 // fail.
 MOZ_MUST_USE bool
 EnsureSignalHandlers(JSContext* cx);
 
-// Return whether signals can be used in this process for asm.js/wasm
-// out-of-bounds.
+// Return whether signals can be used in this process for interrupts or
+// asm.js/wasm out-of-bounds.
 bool
 HaveSignalHandlers();
 
+class ModuleSegment;
+
+// Returns true if wasm code is on top of the activation stack (and fills out
+// the code segment outparam in this case), or false otherwise.
+bool
+InInterruptibleCode(JSContext* cx, uint8_t* pc, const ModuleSegment** ms);
+
 #if defined(XP_DARWIN)
 // On OSX we are forced to use the lower-level Mach exception mechanism instead
 // of Unix signals. Mach exceptions are not handled on the victim's stack but
 // rather require an extra thread. For simplicity, we create one such thread
 // per JSContext (upon the first use of wasm in the JSContext). This thread
 // and related resources are owned by AsmJSMachExceptionHandler which is owned
 // by JSContext.
 class MachExceptionHandler
@@ -62,22 +74,41 @@ class MachExceptionHandler
     MachExceptionHandler();
     ~MachExceptionHandler() { uninstall(); }
     mach_port_t port() const { return port_; }
     bool installed() const { return installed_; }
     bool install(JSContext* cx);
 };
 #endif
 
-// On trap, the bytecode offset to be reported in callstacks is saved.
+// Typed wrappers encapsulating the data saved by the signal handler on async
+// interrupt or trap. On interrupt, the PC at which to resume is saved. On trap,
+// the bytecode offset to be reported in callstacks is saved.
+
+struct InterruptData
+{
+    // The pc to use for unwinding purposes which is kept consistent with fp at
+    // call boundaries.
+    void* unwindPC;
+
+    // The pc at which we should return if the interrupt doesn't stop execution.
+    void* resumePC;
+
+    InterruptData(void* unwindPC, void* resumePC)
+      : unwindPC(unwindPC), resumePC(resumePC)
+    {}
+};
 
 struct TrapData
 {
-    void* resumePC;
-    void* unwoundPC;
+    void* pc;
     Trap trap;
     uint32_t bytecodeOffset;
+
+    TrapData(void* pc, Trap trap, uint32_t bytecodeOffset)
+      : pc(pc), trap(trap), bytecodeOffset(bytecodeOffset)
+    {}
 };
 
 } // namespace wasm
 } // namespace js
 
 #endif // wasm_signal_handlers_h
--- a/js/src/wasm/WasmStubs.cpp
+++ b/js/src/wasm/WasmStubs.cpp
@@ -1125,16 +1125,18 @@ GenerateImportJitExit(MacroAssembler& ma
     masm.callJitNoProfiler(callee);
 
     // Note that there might be a GC thing in the JSReturnOperand now.
     // In all the code paths from here:
     // - either the value is unboxed because it was a primitive and we don't
     //   need to worry about rooting anymore.
     // - or the value needs to be rooted, but nothing can cause a GC between
     //   here and CoerceInPlace, which roots before coercing to a primitive.
+    //   In particular, this is true because wasm::InInterruptibleCode will
+    //   return false when PC is in the jit exit.
 
     // The JIT callee clobbers all registers, including WasmTlsReg and
     // FramePointer, so restore those here. During this sequence of
     // instructions, FP can't be trusted by the profiling frame iterator.
     offsets->untrustedFPStart = masm.currentOffset();
     AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
 
     masm.loadWasmTlsRegFromFrame();
@@ -1353,76 +1355,35 @@ wasm::GenerateBuiltinThunk(MacroAssemble
     if (!UseHardFpABI() && IsFloatingPointType(retType))
         masm.ma_vxfer(r0, r1, d0);
 #endif
 
     GenerateExitEpilogue(masm, framePushed, exitReason, offsets);
     return FinishOffsets(masm, offsets);
 }
 
-#if defined(JS_CODEGEN_ARM)
-static const LiveRegisterSet RegsToPreserve(
-    GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::sp) |
-                                              (uint32_t(1) << Registers::pc))),
-    FloatRegisterSet(FloatRegisters::AllDoubleMask));
-static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-static const LiveRegisterSet RegsToPreserve(
-    GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::k0) |
-                                              (uint32_t(1) << Registers::k1) |
-                                              (uint32_t(1) << Registers::sp) |
-                                              (uint32_t(1) << Registers::zero))),
-    FloatRegisterSet(FloatRegisters::AllDoubleMask));
-static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
-#else
-static const LiveRegisterSet RegsToPreserve(
-    GeneralRegisterSet(Registers::AllMask & ~(uint32_t(1) << Registers::StackPointer)),
-    FloatRegisterSet(FloatRegisters::AllMask));
-#endif
-
 // Generate a stub which calls WasmReportTrap() and can be executed by having
 // the signal handler redirect PC from any trapping instruction.
 static bool
 GenerateTrapExit(MacroAssembler& masm, Label* throwLabel, Offsets* offsets)
 {
     masm.haltingAlign(CodeAlignment);
 
     offsets->begin = masm.currentOffset();
 
-    // Traps can only happen at well-defined program points. However, since
-    // traps may resume and the optimal assumption for the surrounding code is
-    // that registers are not clobbered, we need to preserve all registers in
-    // the trap exit. One simplifying assumption is that flags may be clobbered.
-    // Push a dummy word to use as return address below.
-    masm.push(ImmWord(0));
-    masm.setFramePushed(0);
-    masm.PushRegsInMask(RegsToPreserve);
-
     // We know that StackPointer is word-aligned, but not necessarily
     // stack-aligned, so we need to align it dynamically.
-    Register preAlignStackPointer = ABINonVolatileReg;
-    masm.moveStackPtrTo(preAlignStackPointer);
     masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
     if (ShadowStackSpace)
         masm.subFromStackPtr(Imm32(ShadowStackSpace));
 
     masm.assertStackAlignment(ABIStackAlignment);
-    masm.call(SymbolicAddress::OnTrap);
-
-    // OnTrap returns null if control should transfer to the throw stub.
-    masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+    masm.call(SymbolicAddress::ReportTrap);
 
-    // Otherwise, the return value is the TrapData::resumePC we must jump to.
-    // We must restore register state before jumping, which will clobber
-    // ReturnReg, so store ReturnReg in the above-reserved stack slot which we
-    // use to jump to via ret.
-    masm.moveToStackPtr(preAlignStackPointer);
-    masm.storePtr(ReturnReg, Address(masm.getStackPointer(), masm.framePushed()));
-    masm.PopRegsInMask(RegsToPreserve);
-    masm.ret();
+    masm.jump(throwLabel);
 
     return FinishOffsets(masm, offsets);
 }
 
 // Generate a stub that calls into WasmOldReportTrap with the right trap reason.
 // This stub is called with ABIStackAlignment by a trap out-of-line path. An
 // exit prologue/epilogue is used so that stack unwinding picks up the
 // current JitActivation. Unwinding will begin at the caller of this trap exit.
@@ -1496,30 +1457,209 @@ GenerateOutOfBoundsExit(MacroAssembler& 
 
 static bool
 GenerateUnalignedExit(MacroAssembler& masm, Label* throwLabel, Offsets* offsets)
 {
     return GenerateGenericMemoryAccessTrap(masm, SymbolicAddress::ReportUnalignedAccess, throwLabel,
                                            offsets);
 }
 
+#if defined(JS_CODEGEN_ARM)
+static const LiveRegisterSet AllRegsExceptPCSP(
+    GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::sp) |
+                                              (uint32_t(1) << Registers::pc))),
+    FloatRegisterSet(FloatRegisters::AllDoubleMask));
+static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+static const LiveRegisterSet AllUserRegsExceptSP(
+    GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::k0) |
+                                              (uint32_t(1) << Registers::k1) |
+                                              (uint32_t(1) << Registers::sp) |
+                                              (uint32_t(1) << Registers::zero))),
+    FloatRegisterSet(FloatRegisters::AllDoubleMask));
+static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
+#else
+static const LiveRegisterSet AllRegsExceptSP(
+    GeneralRegisterSet(Registers::AllMask & ~(uint32_t(1) << Registers::StackPointer)),
+    FloatRegisterSet(FloatRegisters::AllMask));
+#endif
+
+// The async interrupt-callback exit is called from arbitrarily-interrupted wasm
+// code. It calls into the WasmHandleExecutionInterrupt to determine whether we must
+// really halt execution which can reenter the VM (e.g., to display the slow
+// script dialog). If execution is not interrupted, this stub must carefully
+// preserve *all* register state. If execution is interrupted, the entire
+// activation will be popped by the throw stub, so register state does not need
+// to be restored.
+static bool
+GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel, Offsets* offsets)
+{
+    masm.haltingAlign(CodeAlignment);
+
+    offsets->begin = masm.currentOffset();
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+    // Be very careful here not to perturb the machine state before saving it
+    // to the stack. In particular, add/sub instructions may set conditions in
+    // the flags register.
+    masm.push(Imm32(0));            // space used as return address, updated below
+    masm.setFramePushed(0);         // set to 0 now so that framePushed is offset of return address
+    masm.PushFlags();               // after this we are safe to use sub
+    masm.PushRegsInMask(AllRegsExceptSP); // save all GP/FP registers (except SP)
+
+    // We know that StackPointer is word-aligned, but not necessarily
+    // stack-aligned, so we need to align it dynamically.
+    masm.moveStackPtrTo(ABINonVolatileReg);
+    masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
+    if (ShadowStackSpace)
+        masm.subFromStackPtr(Imm32(ShadowStackSpace));
+
+    // Make the call to C++, which preserves ABINonVolatileReg.
+    masm.assertStackAlignment(ABIStackAlignment);
+    masm.call(SymbolicAddress::HandleExecutionInterrupt);
+
+    // HandleExecutionInterrupt returns null if execution is interrupted and
+    // the resumption pc otherwise.
+    masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+
+    // Restore the stack pointer then store resumePC into the stack slow that
+    // will be popped by the 'ret' below.
+    masm.moveToStackPtr(ABINonVolatileReg);
+    masm.storePtr(ReturnReg, Address(StackPointer, masm.framePushed()));
+
+    // Restore the machine state to before the interrupt. After popping flags,
+    // no instructions can be executed which set flags.
+    masm.PopRegsInMask(AllRegsExceptSP);
+    masm.PopFlags();
+
+    // Return to the resumePC stored into this stack slot above.
+    MOZ_ASSERT(masm.framePushed() == 0);
+    masm.ret();
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+    // Reserve space to store resumePC and HeapReg.
+    masm.subFromStackPtr(Imm32(2 * sizeof(intptr_t)));
+    // Set to zero so we can use masm.framePushed() below.
+    masm.setFramePushed(0);
+
+    // Save all registers, except sp.
+    masm.PushRegsInMask(AllUserRegsExceptSP);
+
+    // Save the stack pointer and FCSR in a non-volatile registers.
+    masm.moveStackPtrTo(s0);
+    masm.as_cfc1(s1, Assembler::FCSR);
+
+    // Align the stack.
+    masm.ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+
+    // Store HeapReg into the reserved space.
+    masm.storePtr(HeapReg, Address(s0, masm.framePushed() + sizeof(intptr_t)));
+
+# ifdef USES_O32_ABI
+    // MIPS ABI requires rewserving stack for registes $a0 to $a3.
+    masm.subFromStackPtr(Imm32(4 * sizeof(intptr_t)));
+# endif
+
+    masm.assertStackAlignment(ABIStackAlignment);
+    masm.call(SymbolicAddress::HandleExecutionInterrupt);
+
+    masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+
+    // This will restore stack to the address before the call.
+    masm.moveToStackPtr(s0);
+
+    // Restore FCSR.
+    masm.as_ctc1(s1, Assembler::FCSR);
+
+    // Store resumePC into the reserved space.
+    masm.storePtr(ReturnReg, Address(s0, masm.framePushed()));
+
+    masm.PopRegsInMask(AllUserRegsExceptSP);
+
+    // Pop resumePC into PC. Clobber HeapReg to make the jump and restore it
+    // during jump delay slot.
+    masm.loadPtr(Address(StackPointer, 0), HeapReg);
+    // Reclaim the reserve space.
+    masm.addToStackPtr(Imm32(2 * sizeof(intptr_t)));
+    masm.as_jr(HeapReg);
+    masm.loadPtr(Address(StackPointer, -int32_t(sizeof(intptr_t))), HeapReg);
+#elif defined(JS_CODEGEN_ARM)
+    {
+        // Be careful not to clobber scratch registers before they are saved.
+        ScratchRegisterScope scratch(masm);
+        SecondScratchRegisterScope secondScratch(masm);
+
+        // Reserve a word to receive the return address.
+        masm.as_alu(StackPointer, StackPointer, Imm8(4), OpSub);
+
+        // Set framePushed to 0 now so that framePushed can be used later as the
+        // stack offset to the return-address space reserved above.
+        masm.setFramePushed(0);
+
+        // Save all GP/FP registers (except PC and SP).
+        masm.PushRegsInMask(AllRegsExceptPCSP);
+    }
+
+    // Save SP, APSR and FPSCR in non-volatile registers.
+    masm.as_mrs(r4);
+    masm.as_vmrs(r5);
+    masm.mov(sp, r6);
+
+    // We know that StackPointer is word-aligned, but not necessarily
+    // stack-aligned, so we need to align it dynamically.
+    masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
+
+    // Make the call to C++, which preserves the non-volatile registers.
+    masm.assertStackAlignment(ABIStackAlignment);
+    masm.call(SymbolicAddress::HandleExecutionInterrupt);
+
+    // HandleExecutionInterrupt returns null if execution is interrupted and
+    // the resumption pc otherwise.
+    masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+
+    // Restore the stack pointer then store resumePC into the stack slot that
+    // will be popped by the 'ret' below.
+    masm.mov(r6, sp);
+    masm.storePtr(ReturnReg, Address(sp, masm.framePushed()));
+
+    // Restore the machine state to before the interrupt. After popping flags,
+    // no instructions can be executed which set flags.
+    masm.as_vmsr(r5);
+    masm.as_msr(r4);
+    masm.PopRegsInMask(AllRegsExceptPCSP);
+
+    // Return to the resumePC stored into this stack slot above.
+    MOZ_ASSERT(masm.framePushed() == 0);
+    masm.ret();
+#elif defined(JS_CODEGEN_ARM64)
+    MOZ_CRASH();
+#elif defined (JS_CODEGEN_NONE)
+    MOZ_CRASH();
+#else
+# error "Unknown architecture!"
+#endif
+
+    return FinishOffsets(masm, offsets);
+}
+
 // Generate a stub that restores the stack pointer to what it was on entry to
 // the wasm activation, sets the return register to 'false' and then executes a
 // return which will return from this wasm activation to the caller. This stub
-// should only be called after the caller has reported an error.
+// should only be called after the caller has reported an error (or, in the case
+// of the interrupt stub, intends to interrupt execution).
 static bool
 GenerateThrowStub(MacroAssembler& masm, Label* throwLabel, Offsets* offsets)
 {
     masm.haltingAlign(CodeAlignment);
 
     masm.bind(throwLabel);
 
     offsets->begin = masm.currentOffset();
 
-    // Conservatively, the stack pointer can be unaligned and we must align it
+    // The throw stub can be jumped to from an async interrupt that is halting
+    // execution. Thus the stack pointer can be unaligned and we must align it
     // dynamically.
     masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
     if (ShadowStackSpace)
         masm.subFromStackPtr(Imm32(ShadowStackSpace));
 
     // WasmHandleThrow unwinds JitActivation::wasmExitFP() and returns the
     // address of the return address on the stack this stub should return to.
     // Set the FramePointer to a magic value to indicate a return by throw.
@@ -1654,17 +1794,16 @@ wasm::GenerateStubs(const ModuleEnvironm
           case Trap::Unreachable:
           case Trap::IntegerOverflow:
           case Trap::InvalidConversionToInteger:
           case Trap::IntegerDivideByZero:
           case Trap::IndirectCallToNull:
           case Trap::IndirectCallBadSig:
           case Trap::ImpreciseSimdConversion:
           case Trap::StackOverflow:
-          case Trap::CheckInterrupt:
           case Trap::ThrowReported:
             break;
           // The TODO list of "old" traps to convert to new traps:
           case Trap::OutOfBounds:
           case Trap::UnalignedAccess: {
             CallableOffsets offsets;
             if (!GenerateOldTrapExit(masm, trap, &throwLabel, &offsets))
                 return false;
@@ -1691,21 +1830,28 @@ wasm::GenerateStubs(const ModuleEnvironm
     if (!code->codeRanges.emplaceBack(CodeRange::UnalignedExit, offsets))
         return false;
 
     if (!GenerateTrapExit(masm, &throwLabel, &offsets))
         return false;
     if (!code->codeRanges.emplaceBack(CodeRange::TrapExit, offsets))
         return false;
 
-    CallableOffsets callableOffsets;
-    if (!GenerateDebugTrapStub(masm, &throwLabel, &callableOffsets))
+    if (!GenerateInterruptExit(masm, &throwLabel, &offsets))
+        return false;
+    if (!code->codeRanges.emplaceBack(CodeRange::Interrupt, offsets))
         return false;
-    if (!code->codeRanges.emplaceBack(CodeRange::DebugTrap, callableOffsets))
-        return false;
+
+    {
+        CallableOffsets offsets;
+        if (!GenerateDebugTrapStub(masm, &throwLabel, &offsets))
+            return false;
+        if (!code->codeRanges.emplaceBack(CodeRange::DebugTrap, offsets))
+            return false;
+    }
 
     if (!GenerateThrowStub(masm, &throwLabel, &offsets))
         return false;
     if (!code->codeRanges.emplaceBack(CodeRange::Throw, offsets))
         return false;
 
     masm.finish();
     if (masm.oom())
--- a/js/src/wasm/WasmTypes.cpp
+++ b/js/src/wasm/WasmTypes.cpp
@@ -779,16 +779,17 @@ CodeRange::CodeRange(Kind kind, Offsets 
     PodZero(&u);
 #ifdef DEBUG
     switch (kind_) {
       case FarJumpIsland:
       case OutOfBoundsExit:
       case UnalignedExit:
       case TrapExit:
       case Throw:
+      case Interrupt:
         break;
       default:
         MOZ_CRASH("should use more specific constructor");
     }
 #endif
 }
 
 CodeRange::CodeRange(Kind kind, uint32_t funcIndex, Offsets offsets)
@@ -906,28 +907,8 @@ wasm::CreateTlsData(uint32_t globalDataL
     if (!allocatedBase)
         return nullptr;
 
     auto* tlsData = reinterpret_cast<TlsData*>(AlignBytes(uintptr_t(allocatedBase), TlsDataAlign));
     tlsData->allocatedBase = allocatedBase;
 
     return UniqueTlsData(tlsData);
 }
-
-void
-TlsData::setInterrupt()
-{
-    interrupt = true;
-    stackLimit = UINTPTR_MAX;
-}
-
-bool
-TlsData::isInterrupted() const
-{
-    return interrupt || stackLimit == UINTPTR_MAX;
-}
-
-void
-TlsData::resetInterrupt(JSContext* cx)
-{
-    interrupt = false;
-    stackLimit = cx->stackLimitForJitCode(JS::StackForUntrustedScript);
-}
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -80,36 +80,35 @@ using mozilla::Move;
 using mozilla::MallocSizeOf;
 using mozilla::Nothing;
 using mozilla::PodZero;
 using mozilla::PodCopy;
 using mozilla::PodEqual;
 using mozilla::Some;
 using mozilla::Unused;
 
+typedef Vector<uint32_t, 0, SystemAllocPolicy> Uint32Vector;
+typedef Vector<uint8_t, 0, SystemAllocPolicy> Bytes;
+typedef UniquePtr<Bytes> UniqueBytes;
+typedef UniquePtr<const Bytes> UniqueConstBytes;
+typedef Vector<char, 0, SystemAllocPolicy> UTF8Bytes;
+
 typedef int8_t I8x16[16];
 typedef int16_t I16x8[8];
 typedef int32_t I32x4[4];
 typedef float F32x4[4];
 
 class Code;
 class DebugState;
 class GeneratedSourceMap;
 class Memory;
 class Module;
 class Instance;
 class Table;
 
-typedef Vector<uint32_t, 0, SystemAllocPolicy> Uint32Vector;
-typedef Vector<uint8_t, 0, SystemAllocPolicy> Bytes;
-typedef UniquePtr<Bytes> UniqueBytes;
-typedef UniquePtr<const Bytes> UniqueConstBytes;
-typedef Vector<char, 0, SystemAllocPolicy> UTF8Bytes;
-typedef Vector<Instance*, 0, SystemAllocPolicy> InstanceVector;
-
 // To call Vector::podResizeToFit, a type must specialize mozilla::IsPod
 // which is pretty verbose to do within js::wasm, so factor that process out
 // into a macro.
 
 #define WASM_DECLARE_POD_VECTOR(Type, VectorName)                               \
 } } namespace mozilla {                                                         \
 template <> struct IsPod<js::wasm::Type> : TrueType {};                         \
 } namespace js { namespace wasm {                                               \
@@ -924,20 +923,16 @@ enum class Trap
     // (asm.js only) SIMD float to int conversion failed because the input
     // wasn't in bounds.
     ImpreciseSimdConversion,
 
     // The internal stack space was exhausted. For compatibility, this throws
     // the same over-recursed error as JS.
     StackOverflow,
 
-    // The wasm execution has potentially run too long and the engine must call
-    // CheckForInterrupt(). This trap is resumable.
-    CheckInterrupt,
-
     // Signal an error that was reported in C++ code.
     ThrowReported,
 
     Limit
 };
 
 // A wrapper around the bytecode offset of a wasm instruction within a whole
 // module, used for trap offsets or call offsets. These offsets should refer to
@@ -1064,16 +1059,17 @@ class CodeRange
         BuiltinThunk,      // fast-path calling from wasm into a C++ native
         TrapExit,          // calls C++ to report and jumps to throw stub
         OldTrapExit,       // calls C++ to report and jumps to throw stub
         DebugTrap,         // calls C++ to handle debug event
         FarJumpIsland,     // inserted to connect otherwise out-of-range insns
         OutOfBoundsExit,   // stub jumped to by non-standard asm.js SIMD/Atomics
         UnalignedExit,     // stub jumped to by wasm Atomics and non-standard
                            // ARM unaligned trap
+        Interrupt,         // stub executes asynchronously to interrupt wasm
         Throw              // special stack-unwinding stub jumped to by other stubs
     };
 
   private:
     // All fields are treated as cacheable POD:
     uint32_t begin_;
     uint32_t ret_;
     uint32_t end_;
@@ -1372,19 +1368,20 @@ enum class SymbolicAddress
     TruncD,
     TruncF,
     NearbyIntD,
     NearbyIntF,
     ExpD,
     LogD,
     PowD,
     ATan2D,
+    HandleExecutionInterrupt,
     HandleDebugTrap,
     HandleThrow,
-    OnTrap,
+    ReportTrap,
     OldReportTrap,
     ReportOutOfBounds,
     ReportUnalignedAccess,
     ReportInt64JSCall,
     CallImport_Void,
     CallImport_I32,
     CallImport_I64,
     CallImport_F64,
@@ -1527,29 +1524,19 @@ struct TlsData
 #endif
 
     // Pointer to the Instance that contains this TLS data.
     Instance* instance;
 
     // The containing JSContext.
     JSContext* cx;
 
-    // Usually equal to cx->stackLimitForJitCode(JS::StackForUntrustedScript),
-    // but can be racily set to trigger immediate trap as an opportunity to
-    // CheckForInterrupt without an additional branch.
-    Atomic<uintptr_t, mozilla::Relaxed> stackLimit;
-
-    // Set to 1 when wasm should call CheckForInterrupt.
-    Atomic<uint32_t, mozilla::Relaxed> interrupt;
-
-    // Methods to set, test and clear the above two fields. Both interrupt
-    // fields are Relaxed and so no consistency/ordering can be assumed.
-    void setInterrupt();
-    bool isInterrupted() const;
-    void resetInterrupt(JSContext* cx);
+    // The native stack limit which is checked by prologues. Shortcut for
+    // cx->stackLimitForJitCode(JS::StackForUntrustedScript).
+    uintptr_t stackLimit;
 
     // Pointer that should be freed (due to padding before the TlsData).
     void* allocatedBase;
 
     // When compiling with tiering, the jumpTable has one entry for each
     // baseline-compiled function.
     void** jumpTable;
 
--- a/js/xpconnect/src/XPCJSRuntime.cpp
+++ b/js/xpconnect/src/XPCJSRuntime.cpp
@@ -2388,21 +2388,16 @@ JSReporter::CollectReports(WindowPaths* 
         KIND_OTHER, rtTotal,
         "The sum of all measurements under 'explicit/js-non-window/runtime/'.");
 
     // Report the numbers for memory used by tracelogger.
     REPORT_BYTES(NS_LITERAL_CSTRING("tracelogger"),
         KIND_OTHER, rtStats.runtime.tracelogger,
         "The memory used for the tracelogger, including the graph and events.");
 
-    // Report the numbers for memory used by wasm Runtime state.
-    REPORT_BYTES(NS_LITERAL_CSTRING("wasm-runtime"),
-        KIND_OTHER, rtStats.runtime.wasmRuntime,
-        "The memory used for wasm runtime bookkeeping.");
-
     // Report the numbers for memory outside of compartments.
 
     REPORT_BYTES(NS_LITERAL_CSTRING("js-main-runtime/gc-heap/unused-chunks"),
         KIND_OTHER, rtStats.gcHeapUnusedChunks,
         "The same as 'explicit/js-non-window/gc-heap/unused-chunks'.");
 
     REPORT_BYTES(NS_LITERAL_CSTRING("js-main-runtime/gc-heap/unused-arenas"),
         KIND_OTHER, rtStats.gcHeapUnusedArenas,