Bug 1181612: Split AsmJSValidate into AsmJS{Validate,Compile} and different headers; r=luke
authorBenjamin Bouvier <benj@benj.me>
Wed, 16 Sep 2015 19:30:23 +0200
changeset 295663 f3b279e0309573e39f44a8285fad5e7b6d6fe002
parent 295662 80822711aca56dac51b0e311bc07f25d551ecead
child 295664 8a26a2abcf6164ea60d8250607ff73fbd1e7b8c8
push id5245
push userraliiev@mozilla.com
push dateThu, 29 Oct 2015 11:30:51 +0000
treeherdermozilla-beta@dac831dc1bd0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1181612
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1181612: Split AsmJSValidate into AsmJS{Validate,Compile} and different headers; r=luke
js/src/asmjs/AsmJSCompile.cpp
js/src/asmjs/AsmJSCompile.h
js/src/asmjs/AsmJSGlobals.h
js/src/asmjs/AsmJSValidate.cpp
js/src/devtools/rootAnalysis/annotations.js
js/src/jit/CompileWrappers.h
js/src/moz.build
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/AsmJSCompile.cpp
@@ -0,0 +1,3172 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asmjs/AsmJSCompile.h"
+#include "asmjs/AsmJSGlobals.h"
+
+#include "jit/CodeGenerator.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+
+namespace js {
+// ModuleCompiler encapsulates the compilation of an entire asm.js module. Over
+// the course of an ModuleCompiler object's lifetime, many FunctionCompiler
+// objects will be created and destroyed in sequence, one for each function in
+// the module.
+//
+// *** asm.js FFI calls ***
+//
+// asm.js allows calling out to non-asm.js via "FFI calls". The asm.js type
+// system does not place any constraints on the FFI call. In particular:
+//  - an FFI call's target is not known or speculated at module-compile time;
+//  - a single external function can be called with different signatures.
+//
+// If performance didn't matter, all FFI calls could simply box their arguments
+// and call js::Invoke. However, we'd like to be able to specialize FFI calls
+// to be more efficient in several cases:
+//
+//  - for calls to JS functions which have been jitted, we'd like to call
+//    directly into JIT code without going through C++.
+//
+//  - for calls to certain builtins, we'd like to be call directly into the C++
+//    code for the builtin without going through the general call path.
+//
+// All of this requires dynamic specialization techniques which must happen
+// after module compilation. To support this, at module-compilation time, each
+// FFI call generates a call signature according to the system ABI, as if the
+// callee was a C++ function taking/returning the same types as the caller was
+// passing/expecting. The callee is loaded from a fixed offset in the global
+// data array which allows the callee to change at runtime. Initially, the
+// callee is stub which boxes its arguments and calls js::Invoke.
+//
+// To do this, we need to generate a callee stub for each pairing of FFI callee
+// and signature. We call this pairing an "exit". For example, this code has
+// two external functions and three exits:
+//
+//  function f(global, imports) {
+//    "use asm";
+//    var foo = imports.foo;
+//    var bar = imports.bar;
+//    function g() {
+//      foo(1);      // Exit #1: (int) -> void
+//      foo(1.5);    // Exit #2: (double) -> void
+//      bar(1)|0;    // Exit #3: (int) -> int
+//      bar(2)|0;    // Exit #3: (int) -> int
+//    }
+//  }
+//
+// The ModuleCompiler maintains a hash table (ExitMap) which allows a call site
+// to add a new exit or reuse an existing one. The key is an index into the
+// Vector<Exit> stored in the AsmJSModule and the value is the signature of
+// that exit's variant.
+//
+// Although ModuleCompiler isn't a MOZ_STACK_CLASS, it has the same rooting
+// properties as the ModuleValidator, and a shorter lifetime: so it is marked
+// as rooted in the in the rooting analysis. Don't add non-JSATom pointers, or
+// this will break!
+class ModuleCompiler
+{
+    ModuleCompileInputs                     compileInputs_;
+    ScopedJSDeletePtr<ModuleCompileResults> compileResults_;
+
+  public:
+    explicit ModuleCompiler(const ModuleCompileInputs& inputs)
+      : compileInputs_(inputs)
+    {}
+
+    bool init() {
+        compileResults_.reset(js_new<ModuleCompileResults>());
+        return !!compileResults_;
+    }
+
+    /*************************************************** Read-only interface */
+
+    MacroAssembler& masm()          { return compileResults_->masm(); }
+    Label& stackOverflowLabel()     { return compileResults_->stackOverflowLabel(); }
+    Label& asyncInterruptLabel()    { return compileResults_->asyncInterruptLabel(); }
+    Label& syncInterruptLabel()     { return compileResults_->syncInterruptLabel(); }
+    Label& onOutOfBoundsLabel()     { return compileResults_->onOutOfBoundsLabel(); }
+    Label& onConversionErrorLabel() { return compileResults_->onConversionErrorLabel(); }
+    int64_t usecBefore()            { return compileResults_->usecBefore(); }
+
+    bool usesSignalHandlersForOOB() const   { return compileInputs_.usesSignalHandlersForOOB; }
+    CompileRuntime* runtime() const         { return compileInputs_.runtime; }
+    CompileCompartment* compartment() const { return compileInputs_.compartment; }
+
+    /***************************************************** Mutable interface */
+
+    bool getOrCreateFunctionEntry(uint32_t funcIndex, Label** label)
+    {
+        return compileResults_->getOrCreateFunctionEntry(funcIndex, label);
+    }
+
+    bool finishGeneratingFunction(AsmFunction& func, CodeGenerator& codegen,
+                                  const AsmJSFunctionLabels& labels)
+    {
+        // Code range
+        unsigned line = func.lineno();
+        unsigned column = func.column();
+        PropertyName* funcName = func.name();
+        if (!compileResults_->addCodeRange(AsmJSModule::FunctionCodeRange(funcName, line, labels)))
+            return false;
+
+        // Script counts
+        jit::IonScriptCounts* counts = codegen.extractScriptCounts();
+        if (counts && !compileResults_->addFunctionCounts(counts)) {
+            js_delete(counts);
+            return false;
+        }
+
+        // Slow functions
+        if (func.compileTime() >= 250) {
+            ModuleCompileResults::SlowFunction sf(funcName, func.compileTime(), line, column);
+            if (!compileResults_->slowFunctions().append(Move(sf)))
+                return false;
+        }
+
+#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
+        // Perf and profiling information
+        unsigned begin = labels.begin.offset();
+        unsigned end = labels.end.offset();
+        AsmJSModule::ProfiledFunction profiledFunc(funcName, begin, end, line, column);
+        if (!compileResults_->addProfiledFunction(profiledFunc))
+            return false;
+#endif // defined(MOZ_VTUNE) || defined(JS_ION_PERF)
+        return true;
+    }
+
+    void finish(ScopedJSDeletePtr<ModuleCompileResults>* results) {
+        *results = compileResults_.forget();
+    }
+};
+
+} // namespace js
+
+enum class AsmType : uint8_t {
+    Int32,
+    Float32,
+    Float64,
+    Int32x4,
+    Float32x4
+};
+
+typedef Vector<size_t, 1, SystemAllocPolicy> LabelVector;
+typedef Vector<MBasicBlock*, 8, SystemAllocPolicy> BlockVector;
+
+// Encapsulates the compilation of a single function in an asm.js module. The
+// function compiler handles the creation and final backend compilation of the
+// MIR graph. Also see ModuleCompiler comment.
+class FunctionCompiler
+{
+  private:
+    typedef HashMap<uint32_t, BlockVector, DefaultHasher<uint32_t>, SystemAllocPolicy> LabeledBlockMap;
+    typedef HashMap<size_t, BlockVector, DefaultHasher<uint32_t>, SystemAllocPolicy> UnlabeledBlockMap;
+    typedef Vector<size_t, 4, SystemAllocPolicy> PositionStack;
+    typedef Vector<Type, 4, SystemAllocPolicy> LocalVarTypes;
+
+    ModuleCompiler &         m_;
+    LifoAlloc &              lifo_;
+    RetType                  retType_;
+
+    const AsmFunction &      func_;
+    size_t                   pc_;
+
+    TempAllocator *          alloc_;
+    MIRGraph *               graph_;
+    CompileInfo *            info_;
+    MIRGenerator *           mirGen_;
+    Maybe<JitContext>        jitContext_;
+
+    MBasicBlock *            curBlock_;
+
+    PositionStack            loopStack_;
+    PositionStack            breakableStack_;
+    UnlabeledBlockMap        unlabeledBreaks_;
+    UnlabeledBlockMap        unlabeledContinues_;
+    LabeledBlockMap          labeledBreaks_;
+    LabeledBlockMap          labeledContinues_;
+
+    LocalVarTypes            localVarTypes_;
+
+  public:
+    FunctionCompiler(ModuleCompiler& m, const AsmFunction& func, LifoAlloc& lifo)
+      : m_(m),
+        lifo_(lifo),
+        retType_(func.returnedType()),
+        func_(func),
+        pc_(0),
+        alloc_(nullptr),
+        graph_(nullptr),
+        info_(nullptr),
+        mirGen_(nullptr),
+        curBlock_(nullptr)
+    {}
+
+    ModuleCompiler &        m() const            { return m_; }
+    TempAllocator &         alloc() const        { return *alloc_; }
+    LifoAlloc &             lifo() const         { return lifo_; }
+    RetType                 returnedType() const { return retType_; }
+
+    bool init()
+    {
+        return unlabeledBreaks_.init() &&
+               unlabeledContinues_.init() &&
+               labeledBreaks_.init() &&
+               labeledContinues_.init();
+    }
+
+    void checkPostconditions()
+    {
+        MOZ_ASSERT(loopStack_.empty());
+        MOZ_ASSERT(unlabeledBreaks_.empty());
+        MOZ_ASSERT(unlabeledContinues_.empty());
+        MOZ_ASSERT(labeledBreaks_.empty());
+        MOZ_ASSERT(labeledContinues_.empty());
+        MOZ_ASSERT(inDeadCode());
+        MOZ_ASSERT(pc_ == func_.size(), "all bytecode must be consumed");
+    }
+
+    /************************* Read-only interface (after local scope setup) */
+
+    MIRGenerator & mirGen() const     { MOZ_ASSERT(mirGen_); return *mirGen_; }
+    MIRGraph &     mirGraph() const   { MOZ_ASSERT(graph_); return *graph_; }
+    CompileInfo &  info() const       { MOZ_ASSERT(info_); return *info_; }
+
+    MDefinition* getLocalDef(unsigned slot)
+    {
+        if (inDeadCode())
+            return nullptr;
+        return curBlock_->getSlot(info().localSlot(slot));
+    }
+
+    /***************************** Code generation (after local scope setup) */
+
+    MDefinition* constant(const SimdConstant& v, MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+        MInstruction* constant;
+        constant = MSimdConstant::New(alloc(), v, type);
+        curBlock_->add(constant);
+        return constant;
+    }
+
+    MDefinition* constant(Value v, MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+        MConstant* constant = MConstant::NewAsmJS(alloc(), v, type);
+        curBlock_->add(constant);
+        return constant;
+    }
+
+    template <class T>
+    MDefinition* unary(MDefinition* op)
+    {
+        if (inDeadCode())
+            return nullptr;
+        T* ins = T::NewAsmJS(alloc(), op);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template <class T>
+    MDefinition* unary(MDefinition* op, MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+        T* ins = T::NewAsmJS(alloc(), op, type);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template <class T>
+    MDefinition* binary(MDefinition* lhs, MDefinition* rhs)
+    {
+        if (inDeadCode())
+            return nullptr;
+        T* ins = T::New(alloc(), lhs, rhs);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template <class T>
+    MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+        T* ins = T::NewAsmJS(alloc(), lhs, rhs, type);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* unarySimd(MDefinition* input, MSimdUnaryArith::Operation op, MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MOZ_ASSERT(IsSimdType(input->type()) && input->type() == type);
+        MInstruction* ins = MSimdUnaryArith::NewAsmJS(alloc(), input, op, type);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, MSimdBinaryArith::Operation op,
+                            MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
+        MOZ_ASSERT(lhs->type() == type);
+        MSimdBinaryArith* ins = MSimdBinaryArith::NewAsmJS(alloc(), lhs, rhs, op, type);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, MSimdBinaryBitwise::Operation op,
+                            MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
+        MOZ_ASSERT(lhs->type() == type);
+        MSimdBinaryBitwise* ins = MSimdBinaryBitwise::NewAsmJS(alloc(), lhs, rhs, op, type);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template<class T>
+    MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, typename T::Operation op)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        T* ins = T::NewAsmJS(alloc(), lhs, rhs, op);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* swizzleSimd(MDefinition* vector, int32_t X, int32_t Y, int32_t Z, int32_t W,
+                             MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MSimdSwizzle* ins = MSimdSwizzle::New(alloc(), vector, type, X, Y, Z, W);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* shuffleSimd(MDefinition* lhs, MDefinition* rhs, int32_t X, int32_t Y,
+                             int32_t Z, int32_t W, MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MInstruction* ins = MSimdShuffle::New(alloc(), lhs, rhs, type, X, Y, Z, W);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* insertElementSimd(MDefinition* vec, MDefinition* val, SimdLane lane, MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MOZ_ASSERT(IsSimdType(vec->type()) && vec->type() == type);
+        MOZ_ASSERT(!IsSimdType(val->type()));
+        MSimdInsertElement* ins = MSimdInsertElement::NewAsmJS(alloc(), vec, val, type, lane);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* selectSimd(MDefinition* mask, MDefinition* lhs, MDefinition* rhs, MIRType type,
+                            bool isElementWise)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MOZ_ASSERT(IsSimdType(mask->type()));
+        MOZ_ASSERT(mask->type() == MIRType_Int32x4);
+        MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
+        MOZ_ASSERT(lhs->type() == type);
+        MSimdSelect* ins = MSimdSelect::NewAsmJS(alloc(), mask, lhs, rhs, type, isElementWise);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template<class T>
+    MDefinition* convertSimd(MDefinition* vec, MIRType from, MIRType to)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MOZ_ASSERT(IsSimdType(from) && IsSimdType(to) && from != to);
+        T* ins = T::NewAsmJS(alloc(), vec, from, to);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* splatSimd(MDefinition* v, MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MOZ_ASSERT(IsSimdType(type));
+        MSimdSplatX4* ins = MSimdSplatX4::NewAsmJS(alloc(), v, type);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type, bool isMax) {
+        if (inDeadCode())
+            return nullptr;
+        MMinMax* ins = MMinMax::New(alloc(), lhs, rhs, type, isMax);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type, MMul::Mode mode)
+    {
+        if (inDeadCode())
+            return nullptr;
+        MMul* ins = MMul::New(alloc(), lhs, rhs, type, mode);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type, bool unsignd)
+    {
+        if (inDeadCode())
+            return nullptr;
+        MDiv* ins = MDiv::NewAsmJS(alloc(), lhs, rhs, type, unsignd);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type, bool unsignd)
+    {
+        if (inDeadCode())
+            return nullptr;
+        MMod* ins = MMod::NewAsmJS(alloc(), lhs, rhs, type, unsignd);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template <class T>
+    MDefinition* bitwise(MDefinition* lhs, MDefinition* rhs)
+    {
+        if (inDeadCode())
+            return nullptr;
+        T* ins = T::NewAsmJS(alloc(), lhs, rhs);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template <class T>
+    MDefinition* bitwise(MDefinition* op)
+    {
+        if (inDeadCode())
+            return nullptr;
+        T* ins = T::NewAsmJS(alloc(), op);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op, MCompare::CompareType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+        MCompare* ins = MCompare::NewAsmJS(alloc(), lhs, rhs, op, type);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    void assign(unsigned slot, MDefinition* def)
+    {
+        if (inDeadCode())
+            return;
+        curBlock_->setSlot(info().localSlot(slot), def);
+    }
+
+    MDefinition* loadHeap(Scalar::Type accessType, MDefinition* ptr, NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
+        MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD loads should use loadSimdHeap");
+        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck);
+        curBlock_->add(load);
+        return load;
+    }
+
+    MDefinition* loadSimdHeap(Scalar::Type accessType, MDefinition* ptr, NeedsBoundsCheck chk,
+                              unsigned numElems)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
+        MOZ_ASSERT(Scalar::isSimdType(accessType), "loadSimdHeap can only load from a SIMD view");
+        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck,
+                                                   numElems);
+        curBlock_->add(load);
+        return load;
+    }
+
+    void storeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v, NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
+        MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD stores should use loadSimdHeap");
+        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck);
+        curBlock_->add(store);
+    }
+
+    void storeSimdHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v,
+                       NeedsBoundsCheck chk, unsigned numElems)
+    {
+        if (inDeadCode())
+            return;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
+        MOZ_ASSERT(Scalar::isSimdType(accessType), "storeSimdHeap can only load from a SIMD view");
+        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck,
+                                                      numElems);
+        curBlock_->add(store);
+    }
+
+    void memoryBarrier(MemoryBarrierBits type)
+    {
+        if (inDeadCode())
+            return;
+        MMemoryBarrier* ins = MMemoryBarrier::New(alloc(), type);
+        curBlock_->add(ins);
+    }
+
+    MDefinition* atomicLoadHeap(Scalar::Type accessType, MDefinition* ptr, NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
+        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck,
+                                                   /* numElems */ 0,
+                                                   MembarBeforeLoad, MembarAfterLoad);
+        curBlock_->add(load);
+        return load;
+    }
+
+    void atomicStoreHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v, NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
+        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck,
+                                                      /* numElems = */ 0,
+                                                      MembarBeforeStore, MembarAfterStore);
+        curBlock_->add(store);
+    }
+
+    MDefinition* atomicCompareExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* oldv,
+                                           MDefinition* newv, NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
+        MAsmJSCompareExchangeHeap* cas =
+            MAsmJSCompareExchangeHeap::New(alloc(), accessType, ptr, oldv, newv, needsBoundsCheck);
+        curBlock_->add(cas);
+        return cas;
+    }
+
+    MDefinition* atomicExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* value,
+                                    NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
+        MAsmJSAtomicExchangeHeap* cas =
+            MAsmJSAtomicExchangeHeap::New(alloc(), accessType, ptr, value, needsBoundsCheck);
+        curBlock_->add(cas);
+        return cas;
+    }
+
+    MDefinition* atomicBinopHeap(js::jit::AtomicOp op, Scalar::Type accessType, MDefinition* ptr,
+                                 MDefinition* v, NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
+        MAsmJSAtomicBinopHeap* binop =
+            MAsmJSAtomicBinopHeap::New(alloc(), op, accessType, ptr, v, needsBoundsCheck);
+        curBlock_->add(binop);
+        return binop;
+    }
+
+    MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst, MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+        MAsmJSLoadGlobalVar* load = MAsmJSLoadGlobalVar::New(alloc(), type, globalDataOffset,
+                                                             isConst);
+        curBlock_->add(load);
+        return load;
+    }
+
+    void storeGlobalVar(uint32_t globalDataOffset, MDefinition* v)
+    {
+        if (inDeadCode())
+            return;
+        curBlock_->add(MAsmJSStoreGlobalVar::New(alloc(), globalDataOffset, v));
+    }
+
+    void addInterruptCheck(unsigned lineno, unsigned column)
+    {
+        if (inDeadCode())
+            return;
+
+        CallSiteDesc callDesc(lineno, column, CallSiteDesc::Relative);
+        curBlock_->add(MAsmJSInterruptCheck::New(alloc(), &m().syncInterruptLabel(), callDesc));
+    }
+
+    MDefinition* extractSimdElement(SimdLane lane, MDefinition* base, MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MOZ_ASSERT(IsSimdType(base->type()));
+        MOZ_ASSERT(!IsSimdType(type));
+        MSimdExtractElement* ins = MSimdExtractElement::NewAsmJS(alloc(), base, type, lane);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    MDefinition* extractSignMask(MDefinition* base)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MOZ_ASSERT(IsSimdType(base->type()));
+        MSimdSignMask* ins = MSimdSignMask::NewAsmJS(alloc(), base);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    template<typename T>
+    MDefinition* constructSimd(MDefinition* x, MDefinition* y, MDefinition* z, MDefinition* w,
+                               MIRType type)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        MOZ_ASSERT(IsSimdType(type));
+        T* ins = T::NewAsmJS(alloc(), type, x, y, z, w);
+        curBlock_->add(ins);
+        return ins;
+    }
+
+    /***************************************************************** Calls */
+
+    // The IonMonkey backend maintains a single stack offset (from the stack
+    // pointer to the base of the frame) by adding the total amount of spill
+    // space required plus the maximum stack required for argument passing.
+    // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
+    // manually accumulate, for the entire function, the maximum required stack
+    // space for argument passing. (This is passed to the CodeGenerator via
+    // MIRGenerator::maxAsmJSStackArgBytes.) Naively, this would just be the
+    // maximum of the stack space required for each individual call (as
+    // determined by the call ABI). However, as an optimization, arguments are
+    // stored to the stack immediately after evaluation (to decrease live
+    // ranges and reduce spilling). This introduces the complexity that,
+    // between evaluating an argument and making the call, another argument
+    // evaluation could perform a call that also needs to store to the stack.
+    // When this occurs childClobbers_ = true and the parent expression's
+    // arguments are stored above the maximum depth clobbered by a child
+    // expression.
+
+    class Call
+    {
+        uint32_t lineno_;
+        uint32_t column_;
+        ABIArgGenerator abi_;
+        uint32_t prevMaxStackBytes_;
+        uint32_t maxChildStackBytes_;
+        uint32_t spIncrement_;
+        MAsmJSCall::Args regArgs_;
+        Vector<MAsmJSPassStackArg*, 0, SystemAllocPolicy> stackArgs_;
+        bool childClobbers_;
+
+        friend class FunctionCompiler;
+
+      public:
+        Call(FunctionCompiler& f, uint32_t lineno, uint32_t column)
+          : lineno_(lineno),
+            column_(column),
+            prevMaxStackBytes_(0),
+            maxChildStackBytes_(0),
+            spIncrement_(0),
+            childClobbers_(false)
+        { }
+    };
+
+    void startCallArgs(Call* call)
+    {
+        if (inDeadCode())
+            return;
+        call->prevMaxStackBytes_ = mirGen().resetAsmJSMaxStackArgBytes();
+    }
+
+    bool passArg(MDefinition* argDef, MIRType mirType, Call* call)
+    {
+        if (inDeadCode())
+            return true;
+
+        uint32_t childStackBytes = mirGen().resetAsmJSMaxStackArgBytes();
+        call->maxChildStackBytes_ = Max(call->maxChildStackBytes_, childStackBytes);
+        if (childStackBytes > 0 && !call->stackArgs_.empty())
+            call->childClobbers_ = true;
+
+        ABIArg arg = call->abi_.next(mirType);
+        if (arg.kind() == ABIArg::Stack) {
+            MAsmJSPassStackArg* mir = MAsmJSPassStackArg::New(alloc(), arg.offsetFromArgBase(),
+                                                              argDef);
+            curBlock_->add(mir);
+            if (!call->stackArgs_.append(mir))
+                return false;
+        } else {
+            if (!call->regArgs_.append(MAsmJSCall::Arg(arg.reg(), argDef)))
+                return false;
+        }
+        return true;
+    }
+
+    void finishCallArgs(Call* call)
+    {
+        if (inDeadCode())
+            return;
+        uint32_t parentStackBytes = call->abi_.stackBytesConsumedSoFar();
+        uint32_t newStackBytes;
+        if (call->childClobbers_) {
+            call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, AsmJSStackAlignment);
+            for (unsigned i = 0; i < call->stackArgs_.length(); i++)
+                call->stackArgs_[i]->incrementOffset(call->spIncrement_);
+            newStackBytes = Max(call->prevMaxStackBytes_,
+                                call->spIncrement_ + parentStackBytes);
+        } else {
+            call->spIncrement_ = 0;
+            newStackBytes = Max(call->prevMaxStackBytes_,
+                                Max(call->maxChildStackBytes_, parentStackBytes));
+        }
+        mirGen_->setAsmJSMaxStackArgBytes(newStackBytes);
+    }
+
+  private:
+    bool callPrivate(MAsmJSCall::Callee callee, const Call& call, MIRType returnType, MDefinition** def)
+    {
+        if (inDeadCode()) {
+            *def = nullptr;
+            return true;
+        }
+
+        CallSiteDesc::Kind kind = CallSiteDesc::Kind(-1);  // initialize to silence GCC warning
+        switch (callee.which()) {
+          case MAsmJSCall::Callee::Internal: kind = CallSiteDesc::Relative; break;
+          case MAsmJSCall::Callee::Dynamic:  kind = CallSiteDesc::Register; break;
+          case MAsmJSCall::Callee::Builtin:  kind = CallSiteDesc::Register; break;
+        }
+
+        MAsmJSCall* ins = MAsmJSCall::New(alloc(), CallSiteDesc(call.lineno_, call.column_, kind),
+                                          callee, call.regArgs_, returnType, call.spIncrement_);
+        if (!ins)
+            return false;
+
+        curBlock_->add(ins);
+        *def = ins;
+        return true;
+    }
+
+  public:
+    bool internalCall(const Signature& sig, Label* entry, const Call& call, MDefinition** def)
+    {
+        MIRType returnType = sig.retType().toMIRType();
+        return callPrivate(MAsmJSCall::Callee(entry), call, returnType, def);
+    }
+
+    bool funcPtrCall(const Signature& sig, uint32_t maskLit, uint32_t globalDataOffset, MDefinition* index,
+                     const Call& call, MDefinition** def)
+    {
+        if (inDeadCode()) {
+            *def = nullptr;
+            return true;
+        }
+
+        MConstant* mask = MConstant::New(alloc(), Int32Value(maskLit));
+        curBlock_->add(mask);
+        MBitAnd* maskedIndex = MBitAnd::NewAsmJS(alloc(), index, mask);
+        curBlock_->add(maskedIndex);
+        MAsmJSLoadFuncPtr* ptrFun = MAsmJSLoadFuncPtr::New(alloc(), globalDataOffset, maskedIndex);
+        curBlock_->add(ptrFun);
+
+        MIRType returnType = sig.retType().toMIRType();
+        return callPrivate(MAsmJSCall::Callee(ptrFun), call, returnType, def);
+    }
+
+    bool ffiCall(unsigned globalDataOffset, const Call& call, MIRType returnType, MDefinition** def)
+    {
+        if (inDeadCode()) {
+            *def = nullptr;
+            return true;
+        }
+
+        MAsmJSLoadFFIFunc* ptrFun = MAsmJSLoadFFIFunc::New(alloc(), globalDataOffset);
+        curBlock_->add(ptrFun);
+
+        return callPrivate(MAsmJSCall::Callee(ptrFun), call, returnType, def);
+    }
+
+    bool builtinCall(AsmJSImmKind builtin, const Call& call, MIRType returnType, MDefinition** def)
+    {
+        return callPrivate(MAsmJSCall::Callee(builtin), call, returnType, def);
+    }
+
+    /*********************************************** Control flow generation */
+
+    inline bool inDeadCode() const {
+        return curBlock_ == nullptr;
+    }
+
+    void returnExpr(MDefinition* expr)
+    {
+        if (inDeadCode())
+            return;
+        MAsmJSReturn* ins = MAsmJSReturn::New(alloc(), expr);
+        curBlock_->end(ins);
+        curBlock_ = nullptr;
+    }
+
+    void returnVoid()
+    {
+        if (inDeadCode())
+            return;
+        MAsmJSVoidReturn* ins = MAsmJSVoidReturn::New(alloc());
+        curBlock_->end(ins);
+        curBlock_ = nullptr;
+    }
+
+    bool branchAndStartThen(MDefinition* cond, MBasicBlock** thenBlock, MBasicBlock** elseBlock)
+    {
+        if (inDeadCode())
+            return true;
+
+        bool hasThenBlock = *thenBlock != nullptr;
+        bool hasElseBlock = *elseBlock != nullptr;
+
+        if (!hasThenBlock && !newBlock(curBlock_, thenBlock))
+            return false;
+        if (!hasElseBlock && !newBlock(curBlock_, elseBlock))
+            return false;
+
+        curBlock_->end(MTest::New(alloc(), cond, *thenBlock, *elseBlock));
+
+        // Only add as a predecessor if newBlock hasn't been called (as it does it for us)
+        if (hasThenBlock && !(*thenBlock)->addPredecessor(alloc(), curBlock_))
+            return false;
+        if (hasElseBlock && !(*elseBlock)->addPredecessor(alloc(), curBlock_))
+            return false;
+
+        curBlock_ = *thenBlock;
+        mirGraph().moveBlockToEnd(curBlock_);
+        return true;
+    }
+
+    void assertCurrentBlockIs(MBasicBlock* block) {
+        if (inDeadCode())
+            return;
+        MOZ_ASSERT(curBlock_ == block);
+    }
+
+    bool appendThenBlock(BlockVector* thenBlocks)
+    {
+        if (inDeadCode())
+            return true;
+        return thenBlocks->append(curBlock_);
+    }
+
+    bool joinIf(const BlockVector& thenBlocks, MBasicBlock* joinBlock)
+    {
+        if (!joinBlock)
+            return true;
+        MOZ_ASSERT_IF(curBlock_, thenBlocks.back() == curBlock_);
+        for (size_t i = 0; i < thenBlocks.length(); i++) {
+            thenBlocks[i]->end(MGoto::New(alloc(), joinBlock));
+            if (!joinBlock->addPredecessor(alloc(), thenBlocks[i]))
+                return false;
+        }
+        curBlock_ = joinBlock;
+        mirGraph().moveBlockToEnd(curBlock_);
+        return true;
+    }
+
+    void switchToElse(MBasicBlock* elseBlock)
+    {
+        if (!elseBlock)
+            return;
+        curBlock_ = elseBlock;
+        mirGraph().moveBlockToEnd(curBlock_);
+    }
+
+    bool joinIfElse(const BlockVector& thenBlocks)
+    {
+        if (inDeadCode() && thenBlocks.empty())
+            return true;
+        MBasicBlock* pred = curBlock_ ? curBlock_ : thenBlocks[0];
+        MBasicBlock* join;
+        if (!newBlock(pred, &join))
+            return false;
+        if (curBlock_)
+            curBlock_->end(MGoto::New(alloc(), join));
+        for (size_t i = 0; i < thenBlocks.length(); i++) {
+            thenBlocks[i]->end(MGoto::New(alloc(), join));
+            if (pred == curBlock_ || i > 0) {
+                if (!join->addPredecessor(alloc(), thenBlocks[i]))
+                    return false;
+            }
+        }
+        curBlock_ = join;
+        return true;
+    }
+
+    void pushPhiInput(MDefinition* def)
+    {
+        if (inDeadCode())
+            return;
+        MOZ_ASSERT(curBlock_->stackDepth() == info().firstStackSlot());
+        curBlock_->push(def);
+    }
+
+    MDefinition* popPhiOutput()
+    {
+        if (inDeadCode())
+            return nullptr;
+        MOZ_ASSERT(curBlock_->stackDepth() == info().firstStackSlot() + 1);
+        return curBlock_->pop();
+    }
+
+    bool startPendingLoop(size_t pos, MBasicBlock** loopEntry)
+    {
+        if (!loopStack_.append(pos) || !breakableStack_.append(pos))
+            return false;
+        if (inDeadCode()) {
+            *loopEntry = nullptr;
+            return true;
+        }
+        MOZ_ASSERT(curBlock_->loopDepth() == loopStack_.length() - 1);
+        *loopEntry = MBasicBlock::NewAsmJS(mirGraph(), info(), curBlock_,
+                                           MBasicBlock::PENDING_LOOP_HEADER);
+        if (!*loopEntry)
+            return false;
+        mirGraph().addBlock(*loopEntry);
+        (*loopEntry)->setLoopDepth(loopStack_.length());
+        curBlock_->end(MGoto::New(alloc(), *loopEntry));
+        curBlock_ = *loopEntry;
+        return true;
+    }
+
+    bool branchAndStartLoopBody(MDefinition* cond, MBasicBlock** afterLoop)
+    {
+        if (inDeadCode()) {
+            *afterLoop = nullptr;
+            return true;
+        }
+        MOZ_ASSERT(curBlock_->loopDepth() > 0);
+        MBasicBlock* body;
+        if (!newBlock(curBlock_, &body))
+            return false;
+        if (cond->isConstant() && cond->toConstant()->valueToBoolean()) {
+            *afterLoop = nullptr;
+            curBlock_->end(MGoto::New(alloc(), body));
+        } else {
+            if (!newBlockWithDepth(curBlock_, curBlock_->loopDepth() - 1, afterLoop))
+                return false;
+            curBlock_->end(MTest::New(alloc(), cond, body, *afterLoop));
+        }
+        curBlock_ = body;
+        return true;
+    }
+
+  private:
+    size_t popLoop()
+    {
+        size_t pos = loopStack_.popCopy();
+        MOZ_ASSERT(!unlabeledContinues_.has(pos));
+        breakableStack_.popBack();
+        return pos;
+    }
+
+  public:
+    bool closeLoop(MBasicBlock* loopEntry, MBasicBlock* afterLoop)
+    {
+        size_t pos = popLoop();
+        if (!loopEntry) {
+            MOZ_ASSERT(!afterLoop);
+            MOZ_ASSERT(inDeadCode());
+            MOZ_ASSERT(!unlabeledBreaks_.has(pos));
+            return true;
+        }
+        MOZ_ASSERT(loopEntry->loopDepth() == loopStack_.length() + 1);
+        MOZ_ASSERT_IF(afterLoop, afterLoop->loopDepth() == loopStack_.length());
+        if (curBlock_) {
+            MOZ_ASSERT(curBlock_->loopDepth() == loopStack_.length() + 1);
+            curBlock_->end(MGoto::New(alloc(), loopEntry));
+            if (!loopEntry->setBackedgeAsmJS(curBlock_))
+                return false;
+        }
+        curBlock_ = afterLoop;
+        if (curBlock_)
+            mirGraph().moveBlockToEnd(curBlock_);
+        return bindUnlabeledBreaks(pos);
+    }
+
+    bool branchAndCloseDoWhileLoop(MDefinition* cond, MBasicBlock* loopEntry)
+    {
+        size_t pos = popLoop();
+        if (!loopEntry) {
+            MOZ_ASSERT(inDeadCode());
+            MOZ_ASSERT(!unlabeledBreaks_.has(pos));
+            return true;
+        }
+        MOZ_ASSERT(loopEntry->loopDepth() == loopStack_.length() + 1);
+        if (curBlock_) {
+            MOZ_ASSERT(curBlock_->loopDepth() == loopStack_.length() + 1);
+            if (cond->isConstant()) {
+                if (cond->toConstant()->valueToBoolean()) {
+                    curBlock_->end(MGoto::New(alloc(), loopEntry));
+                    if (!loopEntry->setBackedgeAsmJS(curBlock_))
+                        return false;
+                    curBlock_ = nullptr;
+                } else {
+                    MBasicBlock* afterLoop;
+                    if (!newBlock(curBlock_, &afterLoop))
+                        return false;
+                    curBlock_->end(MGoto::New(alloc(), afterLoop));
+                    curBlock_ = afterLoop;
+                }
+            } else {
+                MBasicBlock* afterLoop;
+                if (!newBlock(curBlock_, &afterLoop))
+                    return false;
+                curBlock_->end(MTest::New(alloc(), cond, loopEntry, afterLoop));
+                if (!loopEntry->setBackedgeAsmJS(curBlock_))
+                    return false;
+                curBlock_ = afterLoop;
+            }
+        }
+        return bindUnlabeledBreaks(pos);
+    }
+
+    bool bindContinues(size_t pos, const LabelVector* maybeLabels)
+    {
+        bool createdJoinBlock = false;
+        if (UnlabeledBlockMap::Ptr p = unlabeledContinues_.lookup(pos)) {
+            if (!bindBreaksOrContinues(&p->value(), &createdJoinBlock))
+                return false;
+            unlabeledContinues_.remove(p);
+        }
+        return bindLabeledBreaksOrContinues(maybeLabels, &labeledContinues_, &createdJoinBlock);
+    }
+
+    bool bindLabeledBreaks(const LabelVector* maybeLabels)
+    {
+        bool createdJoinBlock = false;
+        return bindLabeledBreaksOrContinues(maybeLabels, &labeledBreaks_, &createdJoinBlock);
+    }
+
+    bool addBreak(uint32_t* maybeLabelId) {
+        if (maybeLabelId)
+            return addBreakOrContinue(*maybeLabelId, &labeledBreaks_);
+        return addBreakOrContinue(breakableStack_.back(), &unlabeledBreaks_);
+    }
+
+    bool addContinue(uint32_t* maybeLabelId) {
+        if (maybeLabelId)
+            return addBreakOrContinue(*maybeLabelId, &labeledContinues_);
+        return addBreakOrContinue(loopStack_.back(), &unlabeledContinues_);
+    }
+
+    bool startSwitch(size_t pos, MDefinition* expr, int32_t low, int32_t high,
+                     MBasicBlock** switchBlock)
+    {
+        if (!breakableStack_.append(pos))
+            return false;
+        if (inDeadCode()) {
+            *switchBlock = nullptr;
+            return true;
+        }
+        curBlock_->end(MTableSwitch::New(alloc(), expr, low, high));
+        *switchBlock = curBlock_;
+        curBlock_ = nullptr;
+        return true;
+    }
+
+    bool startSwitchCase(MBasicBlock* switchBlock, MBasicBlock** next)
+    {
+        if (!switchBlock) {
+            *next = nullptr;
+            return true;
+        }
+        if (!newBlock(switchBlock, next))
+            return false;
+        if (curBlock_) {
+            curBlock_->end(MGoto::New(alloc(), *next));
+            if (!(*next)->addPredecessor(alloc(), curBlock_))
+                return false;
+        }
+        curBlock_ = *next;
+        return true;
+    }
+
+    bool startSwitchDefault(MBasicBlock* switchBlock, BlockVector* cases, MBasicBlock** defaultBlock)
+    {
+        if (!startSwitchCase(switchBlock, defaultBlock))
+            return false;
+        if (!*defaultBlock)
+            return true;
+        mirGraph().moveBlockToEnd(*defaultBlock);
+        return true;
+    }
+
+    bool joinSwitch(MBasicBlock* switchBlock, const BlockVector& cases, MBasicBlock* defaultBlock)
+    {
+        size_t pos = breakableStack_.popCopy();
+        if (!switchBlock)
+            return true;
+        MTableSwitch* mir = switchBlock->lastIns()->toTableSwitch();
+        size_t defaultIndex = mir->addDefault(defaultBlock);
+        for (unsigned i = 0; i < cases.length(); i++) {
+            if (!cases[i])
+                mir->addCase(defaultIndex);
+            else
+                mir->addCase(mir->addSuccessor(cases[i]));
+        }
+        if (curBlock_) {
+            MBasicBlock* next;
+            if (!newBlock(curBlock_, &next))
+                return false;
+            curBlock_->end(MGoto::New(alloc(), next));
+            curBlock_ = next;
+        }
+        return bindUnlabeledBreaks(pos);
+    }
+
+    /************************************************************ DECODING ***/
+
+    uint8_t  readU8()              { return func_.readU8(&pc_); }
+    uint32_t readU32()             { return func_.readU32(&pc_); }
+    int32_t  readI32()             { return func_.readI32(&pc_); }
+    float    readF32()             { return func_.readF32(&pc_); }
+    double   readF64()             { return func_.readF64(&pc_); }
+    LifoSignature* readSignature() { return func_.readSignature(&pc_); }
+    SimdConstant readI32X4()       { return func_.readI32X4(&pc_); }
+    SimdConstant readF32X4()       { return func_.readF32X4(&pc_); }
+
+    Stmt readStmtOp()              { return Stmt(readU8()); }
+
+    void assertDebugCheckPoint() {
+#ifdef DEBUG
+        MOZ_ASSERT(Stmt(readU8()) == Stmt::DebugCheckPoint);
+#endif
+    }
+
+    bool done() const { return pc_ == func_.size(); }
+    size_t pc() const { return pc_; }
+
+    bool prepareEmitMIR(const VarTypeVector& argTypes)
+    {
+        const AsmFunction::VarInitializerVector& varInitializers = func_.varInitializers();
+        size_t numLocals = func_.numLocals();
+
+        // Prepare data structures
+        alloc_  = lifo_.new_<TempAllocator>(&lifo_);
+        if (!alloc_)
+            return false;
+        jitContext_.emplace(m().runtime(), /* CompileCompartment = */ nullptr, alloc_);
+        graph_  = lifo_.new_<MIRGraph>(alloc_);
+        if (!graph_)
+            return false;
+        MOZ_ASSERT(numLocals == argTypes.length() + varInitializers.length());
+        info_   = lifo_.new_<CompileInfo>(numLocals);
+        if (!info_)
+            return false;
+        const OptimizationInfo* optimizationInfo = js_IonOptimizations.get(Optimization_AsmJS);
+        const JitCompileOptions options;
+        mirGen_ = lifo_.new_<MIRGenerator>(m().compartment(),
+                                           options, alloc_,
+                                           graph_, info_, optimizationInfo,
+                                           &m().onOutOfBoundsLabel(),
+                                           &m().onConversionErrorLabel(),
+                                           m().usesSignalHandlersForOOB());
+        if (!mirGen_)
+            return false;
+
+        if (!newBlock(/* pred = */ nullptr, &curBlock_))
+            return false;
+
+        // Emit parameters and local variables
+        for (ABIArgTypeIter i(argTypes); !i.done(); i++) {
+            MAsmJSParameter* ins = MAsmJSParameter::New(alloc(), *i, i.mirType());
+            curBlock_->add(ins);
+            curBlock_->initSlot(info().localSlot(i.index()), ins);
+            if (!mirGen_->ensureBallast())
+                return false;
+            localVarTypes_.append(argTypes[i.index()].toType());
+        }
+
+        unsigned firstLocalSlot = argTypes.length();
+        for (unsigned i = 0; i < varInitializers.length(); i++) {
+            const AsmJSNumLit& lit = varInitializers[i];
+            Type type = Type::Of(lit);
+            MIRType mirType = type.toMIRType();
+
+            MInstruction* ins;
+            if (lit.isSimd())
+               ins = MSimdConstant::New(alloc(), lit.simdValue(), mirType);
+            else
+               ins = MConstant::NewAsmJS(alloc(), lit.scalarValue(), mirType);
+
+            curBlock_->add(ins);
+            curBlock_->initSlot(info().localSlot(firstLocalSlot + i), ins);
+            if (!mirGen_->ensureBallast())
+                return false;
+            localVarTypes_.append(type);
+        }
+
+        return true;
+    }
+
+    /*************************************************************************/
+
+    MIRGenerator* extractMIR()
+    {
+        MOZ_ASSERT(mirGen_ != nullptr);
+        MIRGenerator* mirGen = mirGen_;
+        mirGen_ = nullptr;
+        return mirGen;
+    }
+
+    /*************************************************************************/
+  private:
+    bool newBlockWithDepth(MBasicBlock* pred, unsigned loopDepth, MBasicBlock** block)
+    {
+        *block = MBasicBlock::NewAsmJS(mirGraph(), info(), pred, MBasicBlock::NORMAL);
+        if (!*block)
+            return false;
+        mirGraph().addBlock(*block);
+        (*block)->setLoopDepth(loopDepth);
+        return true;
+    }
+
+    bool newBlock(MBasicBlock* pred, MBasicBlock** block)
+    {
+        return newBlockWithDepth(pred, loopStack_.length(), block);
+    }
+
+    bool bindBreaksOrContinues(BlockVector* preds, bool* createdJoinBlock)
+    {
+        for (unsigned i = 0; i < preds->length(); i++) {
+            MBasicBlock* pred = (*preds)[i];
+            if (*createdJoinBlock) {
+                pred->end(MGoto::New(alloc(), curBlock_));
+                if (!curBlock_->addPredecessor(alloc(), pred))
+                    return false;
+            } else {
+                MBasicBlock* next;
+                if (!newBlock(pred, &next))
+                    return false;
+                pred->end(MGoto::New(alloc(), next));
+                if (curBlock_) {
+                    curBlock_->end(MGoto::New(alloc(), next));
+                    if (!next->addPredecessor(alloc(), curBlock_))
+                        return false;
+                }
+                curBlock_ = next;
+                *createdJoinBlock = true;
+            }
+            MOZ_ASSERT(curBlock_->begin() == curBlock_->end());
+            if (!mirGen_->ensureBallast())
+                return false;
+        }
+        preds->clear();
+        return true;
+    }
+
+    bool bindLabeledBreaksOrContinues(const LabelVector* maybeLabels, LabeledBlockMap* map,
+                                      bool* createdJoinBlock)
+    {
+        if (!maybeLabels)
+            return true;
+        const LabelVector& labels = *maybeLabels;
+        for (unsigned i = 0; i < labels.length(); i++) {
+            if (LabeledBlockMap::Ptr p = map->lookup(labels[i])) {
+                if (!bindBreaksOrContinues(&p->value(), createdJoinBlock))
+                    return false;
+                map->remove(p);
+            }
+            if (!mirGen_->ensureBallast())
+                return false;
+        }
+        return true;
+    }
+
+    template <class Key, class Map>
+    bool addBreakOrContinue(Key key, Map* map)
+    {
+        if (inDeadCode())
+            return true;
+        typename Map::AddPtr p = map->lookupForAdd(key);
+        if (!p) {
+            BlockVector empty;
+            if (!map->add(p, key, Move(empty)))
+                return false;
+        }
+        if (!p->value().append(curBlock_))
+            return false;
+        curBlock_ = nullptr;
+        return true;
+    }
+
+    bool bindUnlabeledBreaks(size_t pos)
+    {
+        bool createdJoinBlock = false;
+        if (UnlabeledBlockMap::Ptr p = unlabeledBreaks_.lookup(pos)) {
+            if (!bindBreaksOrContinues(&p->value(), &createdJoinBlock))
+                return false;
+            unlabeledBreaks_.remove(p);
+        }
+        return true;
+    }
+};
+
+static bool
+EmitLiteral(FunctionCompiler& f, AsmType type, MDefinition**def)
+{
+    switch (type) {
+      case AsmType::Int32: {
+        int32_t val = f.readI32();
+        *def = f.constant(Int32Value(val), MIRType_Int32);
+        return true;
+      }
+      case AsmType::Float32: {
+        float val = f.readF32();
+        *def = f.constant(Float32Value(val), MIRType_Float32);
+        return true;
+      }
+      case AsmType::Float64: {
+        double val = f.readF64();
+        *def = f.constant(DoubleValue(val), MIRType_Double);
+        return true;
+      }
+      case AsmType::Int32x4: {
+        SimdConstant lit(f.readI32X4());
+        *def = f.constant(lit, MIRType_Int32x4);
+        return true;
+      }
+      case AsmType::Float32x4: {
+        SimdConstant lit(f.readF32X4());
+        *def = f.constant(lit, MIRType_Float32x4);
+        return true;
+      }
+    }
+    MOZ_CRASH("unexpected literal type");
+}
+
+static bool
+EmitGetLoc(FunctionCompiler& f, const DebugOnly<MIRType>& type, MDefinition** def)
+{
+    uint32_t slot = f.readU32();
+    *def = f.getLocalDef(slot);
+    MOZ_ASSERT_IF(*def, (*def)->type() == type);
+    return true;
+}
+
+static bool
+EmitGetGlo(FunctionCompiler& f, MIRType type, MDefinition** def)
+{
+    uint32_t globalDataOffset = f.readU32();
+    bool isConst = bool(f.readU8());
+    *def = f.loadGlobalVar(globalDataOffset, isConst, type);
+    return true;
+}
+
+static bool EmitI32Expr(FunctionCompiler& f, MDefinition** def);
+static bool EmitF32Expr(FunctionCompiler& f, MDefinition** def);
+static bool EmitF64Expr(FunctionCompiler& f, MDefinition** def);
+static bool EmitI32X4Expr(FunctionCompiler& f, MDefinition** def);
+static bool EmitF32X4Expr(FunctionCompiler& f, MDefinition** def);
+static bool EmitExpr(FunctionCompiler& f, AsmType type, MDefinition** def);
+
+static bool
+EmitLoadArray(FunctionCompiler& f, Scalar::Type scalarType, MDefinition** def)
+{
+    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
+    MDefinition* ptr;
+    if (!EmitI32Expr(f, &ptr))
+        return false;
+    *def = f.loadHeap(scalarType, ptr, needsBoundsCheck);
+    return true;
+}
+
+static bool
+EmitSignMask(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MDefinition* in;
+    if (!EmitExpr(f, type, &in))
+        return false;
+    *def = f.extractSignMask(in);
+    return true;
+}
+
+static bool
+EmitStore(FunctionCompiler& f, Scalar::Type viewType, MDefinition** def)
+{
+    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
+
+    MDefinition* ptr;
+    if (!EmitI32Expr(f, &ptr))
+        return false;
+
+    MDefinition* rhs = nullptr;
+    switch (viewType) {
+      case Scalar::Int8:
+      case Scalar::Int16:
+      case Scalar::Int32:
+        if (!EmitI32Expr(f, &rhs))
+            return false;
+        break;
+      case Scalar::Float32:
+        if (!EmitF32Expr(f, &rhs))
+            return false;
+        break;
+      case Scalar::Float64:
+        if (!EmitF64Expr(f, &rhs))
+            return false;
+        break;
+      default: MOZ_CRASH("unexpected scalar type");
+    }
+
+    f.storeHeap(viewType, ptr, rhs, needsBoundsCheck);
+    *def = rhs;
+    return true;
+}
+
+static bool
+EmitStoreWithCoercion(FunctionCompiler& f, Scalar::Type rhsType, Scalar::Type viewType,
+                      MDefinition **def)
+{
+    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
+    MDefinition* ptr;
+    if (!EmitI32Expr(f, &ptr))
+        return false;
+
+    MDefinition* rhs = nullptr;
+    MDefinition* coerced = nullptr;
+    if (rhsType == Scalar::Float32 && viewType == Scalar::Float64) {
+        if (!EmitF32Expr(f, &rhs))
+            return false;
+        coerced = f.unary<MToDouble>(rhs);
+    } else if (rhsType == Scalar::Float64 && viewType == Scalar::Float32) {
+        if (!EmitF64Expr(f, &rhs))
+            return false;
+        coerced = f.unary<MToFloat32>(rhs);
+    } else {
+        MOZ_CRASH("unexpected coerced store");
+    }
+
+    f.storeHeap(viewType, ptr, coerced, needsBoundsCheck);
+    *def = rhs;
+    return true;
+}
+
+static bool
+EmitSetLoc(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    uint32_t slot = f.readU32();
+    MDefinition* expr;
+    if (!EmitExpr(f, type, &expr))
+        return false;
+    f.assign(slot, expr);
+    *def = expr;
+    return true;
+}
+
+static bool
+EmitSetGlo(FunctionCompiler& f, AsmType type, MDefinition**def)
+{
+    uint32_t globalDataOffset = f.readU32();
+    MDefinition* expr;
+    if (!EmitExpr(f, type, &expr))
+        return false;
+    f.storeGlobalVar(globalDataOffset, expr);
+    *def = expr;
+    return true;
+}
+
+static MIRType
+MIRTypeFromAsmType(AsmType type)
+{
+    switch(type) {
+      case AsmType::Int32:     return MIRType_Int32;
+      case AsmType::Float32:   return MIRType_Float32;
+      case AsmType::Float64:   return MIRType_Double;
+      case AsmType::Int32x4:   return MIRType_Int32x4;
+      case AsmType::Float32x4: return MIRType_Float32x4;
+    }
+    MOZ_CRASH("unexpected type in binary arith");
+}
+
+typedef bool IsMax;
+
+static bool
+EmitMathMinMax(FunctionCompiler& f, AsmType type, bool isMax, MDefinition** def)
+{
+    size_t numArgs = f.readU8();
+    MOZ_ASSERT(numArgs >= 2);
+    MDefinition* lastDef;
+    if (!EmitExpr(f, type, &lastDef))
+        return false;
+    MIRType mirType = MIRTypeFromAsmType(type);
+    for (size_t i = 1; i < numArgs; i++) {
+        MDefinition* next;
+        if (!EmitExpr(f, type, &next))
+            return false;
+        lastDef = f.minMax(lastDef, next, mirType, isMax);
+    }
+    *def = lastDef;
+    return true;
+}
+
+static bool
+EmitAtomicsLoad(FunctionCompiler& f, MDefinition** def)
+{
+    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
+    Scalar::Type viewType = Scalar::Type(f.readU8());
+    MDefinition* index;
+    if (!EmitI32Expr(f, &index))
+        return false;
+    *def = f.atomicLoadHeap(viewType, index, needsBoundsCheck);
+    return true;
+}
+
+static bool
+EmitAtomicsStore(FunctionCompiler& f, MDefinition** def)
+{
+    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
+    Scalar::Type viewType = Scalar::Type(f.readU8());
+    MDefinition* index;
+    if (!EmitI32Expr(f, &index))
+        return false;
+    MDefinition* value;
+    if (!EmitI32Expr(f, &value))
+        return false;
+    f.atomicStoreHeap(viewType, index, value, needsBoundsCheck);
+    *def = value;
+    return true;
+}
+
+static bool
+EmitAtomicsBinOp(FunctionCompiler& f, MDefinition** def)
+{
+    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
+    Scalar::Type viewType = Scalar::Type(f.readU8());
+    js::jit::AtomicOp op = js::jit::AtomicOp(f.readU8());
+    MDefinition* index;
+    if (!EmitI32Expr(f, &index))
+        return false;
+    MDefinition* value;
+    if (!EmitI32Expr(f, &value))
+        return false;
+    *def = f.atomicBinopHeap(op, viewType, index, value, needsBoundsCheck);
+    return true;
+}
+
+static bool
+EmitAtomicsCompareExchange(FunctionCompiler& f, MDefinition** def)
+{
+    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
+    Scalar::Type viewType = Scalar::Type(f.readU8());
+    MDefinition* index;
+    if (!EmitI32Expr(f, &index))
+        return false;
+    MDefinition* oldValue;
+    if (!EmitI32Expr(f, &oldValue))
+        return false;
+    MDefinition* newValue;
+    if (!EmitI32Expr(f, &newValue))
+        return false;
+    *def = f.atomicCompareExchangeHeap(viewType, index, oldValue, newValue, needsBoundsCheck);
+    return true;
+}
+
+static bool
+EmitAtomicsExchange(FunctionCompiler& f, MDefinition** def)
+{
+    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
+    Scalar::Type viewType = Scalar::Type(f.readU8());
+    MDefinition* index;
+    if (!EmitI32Expr(f, &index))
+        return false;
+    MDefinition* value;
+    if (!EmitI32Expr(f, &value))
+        return false;
+    *def = f.atomicExchangeHeap(viewType, index, value, needsBoundsCheck);
+    return true;
+}
+
+static bool
+EmitCallArgs(FunctionCompiler& f, const Signature& sig, FunctionCompiler::Call* call)
+{
+    f.startCallArgs(call);
+    for (unsigned i = 0; i < sig.args().length(); i++) {
+        MDefinition *arg = nullptr;
+        switch (sig.arg(i).which()) {
+          case VarType::Int:       if (!EmitI32Expr(f, &arg))   return false; break;
+          case VarType::Float:     if (!EmitF32Expr(f, &arg))   return false; break;
+          case VarType::Double:    if (!EmitF64Expr(f, &arg))   return false; break;
+          case VarType::Int32x4:   if (!EmitI32X4Expr(f, &arg)) return false; break;
+          case VarType::Float32x4: if (!EmitF32X4Expr(f, &arg)) return false; break;
+          default: MOZ_CRASH("unexpected vartype");
+        }
+        if (!f.passArg(arg, sig.arg(i).toMIRType(), call))
+            return false;
+    }
+    f.finishCallArgs(call);
+    return true;
+}
+
+static void
+ReadCallLineCol(FunctionCompiler& f, uint32_t* line, uint32_t* column)
+{
+    *line = f.readU32();
+    *column = f.readU32();
+}
+
+static bool
+EmitInternalCall(FunctionCompiler& f, RetType retType, MDefinition** def)
+{
+    uint32_t funcIndex = f.readU32();
+
+    Label* entry;
+    if (!f.m().getOrCreateFunctionEntry(funcIndex, &entry))
+        return false;
+
+    const Signature& sig = *f.readSignature();
+
+    MOZ_ASSERT_IF(sig.retType() != RetType::Void, sig.retType() == retType);
+
+    uint32_t lineno, column;
+    ReadCallLineCol(f, &lineno, &column);
+
+    FunctionCompiler::Call call(f, lineno, column);
+    if (!EmitCallArgs(f, sig, &call))
+        return false;
+
+    return f.internalCall(sig, entry, call, def);
+}
+
+static bool
+EmitFuncPtrCall(FunctionCompiler& f, RetType retType, MDefinition** def)
+{
+    uint32_t mask = f.readU32();
+    uint32_t globalDataOffset = f.readU32();
+
+    const Signature& sig = *f.readSignature();
+    MOZ_ASSERT_IF(sig.retType() != RetType::Void, sig.retType() == retType);
+
+    uint32_t lineno, column;
+    ReadCallLineCol(f, &lineno, &column);
+
+    MDefinition *index;
+    if (!EmitI32Expr(f, &index))
+        return false;
+
+    FunctionCompiler::Call call(f, lineno, column);
+    if (!EmitCallArgs(f, sig, &call))
+        return false;
+
+    return f.funcPtrCall(sig, mask, globalDataOffset, index, call, def);
+}
+
+static bool
+EmitFFICall(FunctionCompiler& f, RetType retType, MDefinition** def)
+{
+    unsigned globalDataOffset = f.readI32();
+
+    const Signature& sig = *f.readSignature();
+    MOZ_ASSERT_IF(sig.retType() != RetType::Void, sig.retType() == retType);
+
+    uint32_t lineno, column;
+    ReadCallLineCol(f, &lineno, &column);
+
+    FunctionCompiler::Call call(f, lineno, column);
+    if (!EmitCallArgs(f, sig, &call))
+        return false;
+
+    return f.ffiCall(globalDataOffset, call, retType.toMIRType(), def);
+}
+
+static bool
+EmitMathBuiltinCall(FunctionCompiler& f, F32 f32, MDefinition** def)
+{
+    MOZ_ASSERT(f32 == F32::Ceil || f32 == F32::Floor);
+
+    uint32_t lineno, column;
+    ReadCallLineCol(f, &lineno, &column);
+
+    FunctionCompiler::Call call(f, lineno, column);
+    f.startCallArgs(&call);
+
+    MDefinition* firstArg;
+    if (!EmitF32Expr(f, &firstArg) || !f.passArg(firstArg, MIRType_Float32, &call))
+        return false;
+
+    f.finishCallArgs(&call);
+
+    AsmJSImmKind callee = f32 == F32::Ceil ? AsmJSImm_CeilF : AsmJSImm_FloorF;
+    return f.builtinCall(callee, call, MIRType_Float32, def);
+}
+
+static bool
+EmitMathBuiltinCall(FunctionCompiler& f, F64 f64, MDefinition** def)
+{
+    uint32_t lineno, column;
+    ReadCallLineCol(f, &lineno, &column);
+
+    FunctionCompiler::Call call(f, lineno, column);
+    f.startCallArgs(&call);
+
+    MDefinition* firstArg;
+    if (!EmitF64Expr(f, &firstArg) || !f.passArg(firstArg, MIRType_Double, &call))
+        return false;
+
+    if (f64 == F64::Pow || f64 == F64::Atan2) {
+        MDefinition* secondArg;
+        if (!EmitF64Expr(f, &secondArg) || !f.passArg(secondArg, MIRType_Double, &call))
+            return false;
+    }
+
+    AsmJSImmKind callee;
+    switch (f64) {
+      case F64::Ceil:  callee = AsmJSImm_CeilD; break;
+      case F64::Floor: callee = AsmJSImm_FloorD; break;
+      case F64::Sin:   callee = AsmJSImm_SinD; break;
+      case F64::Cos:   callee = AsmJSImm_CosD; break;
+      case F64::Tan:   callee = AsmJSImm_TanD; break;
+      case F64::Asin:  callee = AsmJSImm_ASinD; break;
+      case F64::Acos:  callee = AsmJSImm_ACosD; break;
+      case F64::Atan:  callee = AsmJSImm_ATanD; break;
+      case F64::Exp:   callee = AsmJSImm_ExpD; break;
+      case F64::Log:   callee = AsmJSImm_LogD; break;
+      case F64::Pow:   callee = AsmJSImm_PowD; break;
+      case F64::Atan2: callee = AsmJSImm_ATan2D; break;
+      default: MOZ_CRASH("unexpected double math builtin callee");
+    }
+
+    f.finishCallArgs(&call);
+
+    return f.builtinCall(callee, call, MIRType_Double, def);
+}
+
+static bool
+EmitSimdUnary(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MSimdUnaryArith::Operation op = MSimdUnaryArith::Operation(f.readU8());
+    MDefinition* in;
+    if (!EmitExpr(f, type, &in))
+        return false;
+    *def = f.unarySimd(in, op, MIRTypeFromAsmType(type));
+    return true;
+}
+
+template<class OpKind>
+inline bool
+EmitBinarySimdGuts(FunctionCompiler& f, AsmType type, OpKind op, MDefinition** def)
+{
+    MDefinition* lhs;
+    if (!EmitExpr(f, type, &lhs))
+        return false;
+    MDefinition* rhs;
+    if (!EmitExpr(f, type, &rhs))
+        return false;
+    *def = f.binarySimd(lhs, rhs, op, MIRTypeFromAsmType(type));
+    return true;
+}
+
+static bool
+EmitSimdBinaryArith(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MSimdBinaryArith::Operation op = MSimdBinaryArith::Operation(f.readU8());
+    return EmitBinarySimdGuts(f, type, op, def);
+}
+
+static bool
+EmitSimdBinaryBitwise(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MSimdBinaryBitwise::Operation op = MSimdBinaryBitwise::Operation(f.readU8());
+    return EmitBinarySimdGuts(f, type, op, def);
+}
+
+static bool
+EmitSimdBinaryComp(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MSimdBinaryComp::Operation op = MSimdBinaryComp::Operation(f.readU8());
+    MDefinition* lhs;
+    if (!EmitExpr(f, type, &lhs))
+        return false;
+    MDefinition* rhs;
+    if (!EmitExpr(f, type, &rhs))
+        return false;
+    *def = f.binarySimd<MSimdBinaryComp>(lhs, rhs, op);
+    return true;
+}
+
+static bool
+EmitSimdBinaryShift(FunctionCompiler& f, MDefinition** def)
+{
+    MSimdShift::Operation op = MSimdShift::Operation(f.readU8());
+    MDefinition* lhs;
+    if (!EmitI32X4Expr(f, &lhs))
+        return false;
+    MDefinition* rhs;
+    if (!EmitI32Expr(f, &rhs))
+        return false;
+    *def = f.binarySimd<MSimdShift>(lhs, rhs, op);
+    return true;
+}
+
+static MIRType
+ScalarMIRTypeFromSimdAsmType(AsmType type)
+{
+    switch (type) {
+      case AsmType::Int32:
+      case AsmType::Float32:
+      case AsmType::Float64:   break;
+      case AsmType::Int32x4:   return MIRType_Int32;
+      case AsmType::Float32x4: return MIRType_Float32;
+    }
+    MOZ_CRASH("unexpected simd type");
+}
+
+static bool
+EmitExtractLane(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MDefinition* vec;
+    if (!EmitExpr(f, type, &vec))
+        return false;
+
+    MDefinition* laneDef;
+    if (!EmitI32Expr(f, &laneDef))
+        return false;
+
+    if (!laneDef) {
+        *def = nullptr;
+        return true;
+    }
+
+    MOZ_ASSERT(laneDef->isConstant());
+    int32_t laneLit = laneDef->toConstant()->value().toInt32();
+    MOZ_ASSERT(laneLit < 4);
+    SimdLane lane = SimdLane(laneLit);
+
+    *def = f.extractSimdElement(lane, vec, ScalarMIRTypeFromSimdAsmType(type));
+    return true;
+}
+
+static AsmType
+AsmSimdTypeToScalarType(AsmType simd)
+{
+    switch (simd) {
+      case AsmType::Int32x4:   return AsmType::Int32;
+      case AsmType::Float32x4: return AsmType::Float32;
+      case AsmType::Int32:
+      case AsmType::Float32:
+      case AsmType::Float64:    break;
+    }
+    MOZ_CRASH("unexpected simd type");
+}
+
+static bool
+EmitSimdReplaceLane(FunctionCompiler& f, AsmType simdType, MDefinition** def)
+{
+    MDefinition* vector;
+    if (!EmitExpr(f, simdType, &vector))
+        return false;
+
+    MDefinition* laneDef;
+    if (!EmitI32Expr(f, &laneDef))
+        return false;
+
+    SimdLane lane;
+    if (laneDef) {
+        MOZ_ASSERT(laneDef->isConstant());
+        int32_t laneLit = laneDef->toConstant()->value().toInt32();
+        MOZ_ASSERT(laneLit < 4);
+        lane = SimdLane(laneLit);
+    } else {
+        lane = SimdLane(-1);
+    }
+
+    MDefinition* scalar;
+    if (!EmitExpr(f, AsmSimdTypeToScalarType(simdType), &scalar))
+        return false;
+    *def = f.insertElementSimd(vector, scalar, lane, MIRTypeFromAsmType(simdType));
+    return true;
+}
+
+template<class T>
+inline bool
+EmitSimdCast(FunctionCompiler& f, AsmType fromType, AsmType toType, MDefinition** def)
+{
+    MDefinition* in;
+    if (!EmitExpr(f, fromType, &in))
+        return false;
+    *def = f.convertSimd<T>(in, MIRTypeFromAsmType(fromType), MIRTypeFromAsmType(toType));
+    return true;
+}
+
+static bool
+EmitSimdSwizzle(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MDefinition* in;
+    if (!EmitExpr(f, type, &in))
+        return false;
+
+    uint8_t lanes[4];
+    for (unsigned i = 0; i < 4; i++)
+        lanes[i] = f.readU8();
+
+    *def = f.swizzleSimd(in, lanes[0], lanes[1], lanes[2], lanes[3], MIRTypeFromAsmType(type));
+    return true;
+}
+
+static bool
+EmitSimdShuffle(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MDefinition* lhs;
+    if (!EmitExpr(f, type, &lhs))
+        return false;
+
+    MDefinition* rhs;
+    if (!EmitExpr(f, type, &rhs))
+        return false;
+
+    uint8_t lanes[4];
+    for (unsigned i = 0; i < 4; i++)
+        lanes[i] = f.readU8();
+
+    *def = f.shuffleSimd(lhs, rhs, lanes[0], lanes[1], lanes[2], lanes[3],
+                         MIRTypeFromAsmType(type));
+    return true;
+}
+
+static bool
+EmitSimdLoad(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    Scalar::Type viewType = Scalar::Type(f.readU8());
+    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
+    uint8_t numElems = f.readU8();
+
+    MDefinition* index;
+    if (!EmitI32Expr(f, &index))
+        return false;
+
+    *def = f.loadSimdHeap(viewType, index, needsBoundsCheck, numElems);
+    return true;
+}
+
+static bool
+EmitSimdStore(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    Scalar::Type viewType = Scalar::Type(f.readU8());
+    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
+    uint8_t numElems = f.readU8();
+
+    MDefinition* index;
+    if (!EmitI32Expr(f, &index))
+        return false;
+
+    MDefinition* vec;
+    if (!EmitExpr(f, type, &vec))
+        return false;
+
+    f.storeSimdHeap(viewType, index, vec, needsBoundsCheck, numElems);
+    *def = vec;
+    return true;
+}
+
+typedef bool IsElementWise;
+
+static bool
+EmitSimdSelect(FunctionCompiler& f, AsmType type, bool isElementWise, MDefinition** def)
+{
+    MDefinition* defs[3];
+    if (!EmitI32X4Expr(f, &defs[0]) || !EmitExpr(f, type, &defs[1]) || !EmitExpr(f, type, &defs[2]))
+        return false;
+    *def = f.selectSimd(defs[0], defs[1], defs[2], MIRTypeFromAsmType(type), isElementWise);
+    return true;
+}
+
+static bool
+EmitSimdSplat(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MDefinition* in;
+    if (!EmitExpr(f, AsmSimdTypeToScalarType(type), &in))
+        return false;
+    *def = f.splatSimd(in, MIRTypeFromAsmType(type));
+    return true;
+}
+
+static bool
+EmitSimdCtor(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    switch (type) {
+      case AsmType::Int32x4: {
+        MDefinition* args[4];
+        for (unsigned i = 0; i < 4; i++) {
+            if (!EmitI32Expr(f, &args[i]))
+                return false;
+        }
+        *def = f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3], MIRType_Int32x4);
+        return true;
+      }
+      case AsmType::Float32x4: {
+        MDefinition* args[4];
+        for (unsigned i = 0; i < 4; i++) {
+            if (!EmitF32Expr(f, &args[i]))
+                return false;
+        }
+        *def = f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3], MIRType_Float32x4);
+        return true;
+      }
+      case AsmType::Int32:
+      case AsmType::Float32:
+      case AsmType::Float64:
+        break;
+    }
+    MOZ_CRASH("unexpected SIMD type");
+}
+
+template<class T>
+static bool
+EmitUnary(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MDefinition* in;
+    if (!EmitExpr(f, type, &in))
+        return false;
+    *def = f.unary<T>(in);
+    return true;
+}
+
+template<class T>
+static bool
+EmitUnaryMir(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MDefinition* in;
+    if (!EmitExpr(f, type, &in))
+        return false;
+    *def = f.unary<T>(in, MIRTypeFromAsmType(type));
+    return true;
+}
+
+static bool EmitStatement(FunctionCompiler& f, LabelVector* maybeLabels = nullptr);
+
+static bool
+EmitComma(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    uint32_t numExpr = f.readU32();
+    for (uint32_t i = 1; i < numExpr; i++) {
+        if (!EmitStatement(f))
+            return false;
+    }
+    return EmitExpr(f, type, def);
+}
+
+static bool
+EmitConditional(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MDefinition* cond;
+    if (!EmitI32Expr(f, &cond))
+        return false;
+
+    MBasicBlock* thenBlock = nullptr;
+    MBasicBlock* elseBlock = nullptr;
+    if (!f.branchAndStartThen(cond, &thenBlock, &elseBlock))
+        return false;
+
+    MDefinition* ifTrue;
+    if (!EmitExpr(f, type, &ifTrue))
+        return false;
+
+    BlockVector thenBlocks;
+    if (!f.appendThenBlock(&thenBlocks))
+        return false;
+
+    f.pushPhiInput(ifTrue);
+
+    f.switchToElse(elseBlock);
+
+    MDefinition* ifFalse;
+    if (!EmitExpr(f, type, &ifFalse))
+        return false;
+
+    f.pushPhiInput(ifFalse);
+
+    if (!f.joinIfElse(thenBlocks))
+        return false;
+
+    *def = f.popPhiOutput();
+    return true;
+}
+
+static bool
+EmitMultiply(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    MDefinition* lhs;
+    if (!EmitExpr(f, type, &lhs))
+        return false;
+    MDefinition* rhs;
+    if (!EmitExpr(f, type, &rhs))
+        return false;
+    MIRType mirType = MIRTypeFromAsmType(type);
+    *def = f.mul(lhs, rhs, mirType, type == AsmType::Int32 ? MMul::Integer : MMul::Normal);
+    return true;
+}
+
+typedef bool IsAdd;
+
+static bool
+EmitAddOrSub(FunctionCompiler& f, AsmType type, bool isAdd, MDefinition** def)
+{
+    MDefinition* lhs;
+    if (!EmitExpr(f, type, &lhs))
+        return false;
+    MDefinition* rhs;
+    if (!EmitExpr(f, type, &rhs))
+        return false;
+    MIRType mirType = MIRTypeFromAsmType(type);
+    *def = isAdd ? f.binary<MAdd>(lhs, rhs, mirType) : f.binary<MSub>(lhs, rhs, mirType);
+    return true;
+}
+
+typedef bool IsUnsigned;
+typedef bool IsDiv;
+
+static bool
+EmitDivOrMod(FunctionCompiler& f, AsmType type, bool isDiv, bool isUnsigned, MDefinition** def)
+{
+    MDefinition* lhs;
+    if (!EmitExpr(f, type, &lhs))
+        return false;
+    MDefinition* rhs;
+    if (!EmitExpr(f, type, &rhs))
+        return false;
+    *def = isDiv
+           ? f.div(lhs, rhs, MIRTypeFromAsmType(type), isUnsigned)
+           : f.mod(lhs, rhs, MIRTypeFromAsmType(type), isUnsigned);
+    return true;
+}
+
+static bool
+EmitDivOrMod(FunctionCompiler& f, AsmType type, bool isDiv, MDefinition** def)
+{
+    MOZ_ASSERT(type != AsmType::Int32, "int div or mod must precise signedness");
+    return EmitDivOrMod(f, type, isDiv, false, def);
+}
+
+static bool
+EmitComparison(FunctionCompiler& f, I32 stmt, MDefinition** def)
+{
+    MDefinition *lhs, *rhs;
+    MCompare::CompareType compareType;
+    switch (stmt) {
+      case I32::EqI32:
+      case I32::NeI32:
+      case I32::SLeI32:
+      case I32::SLtI32:
+      case I32::ULeI32:
+      case I32::ULtI32:
+      case I32::SGeI32:
+      case I32::SGtI32:
+      case I32::UGeI32:
+      case I32::UGtI32:
+        if (!EmitI32Expr(f, &lhs) || !EmitI32Expr(f, &rhs))
+            return false;
+        // The list of opcodes is sorted such that all signed comparisons
+        // stand before ULtI32.
+        compareType = stmt < I32::ULtI32
+                      ? MCompare::Compare_Int32
+                      : MCompare::Compare_UInt32;
+        break;
+      case I32::EqF32:
+      case I32::NeF32:
+      case I32::LeF32:
+      case I32::LtF32:
+      case I32::GeF32:
+      case I32::GtF32:
+        if (!EmitF32Expr(f, &lhs) || !EmitF32Expr(f, &rhs))
+            return false;
+        compareType = MCompare::Compare_Float32;
+        break;
+      case I32::EqF64:
+      case I32::NeF64:
+      case I32::LeF64:
+      case I32::LtF64:
+      case I32::GeF64:
+      case I32::GtF64:
+        if (!EmitF64Expr(f, &lhs) || !EmitF64Expr(f, &rhs))
+            return false;
+        compareType = MCompare::Compare_Double;
+        break;
+      default: MOZ_CRASH("unexpected comparison opcode");
+    }
+
+    JSOp compareOp;
+    switch (stmt) {
+      case I32::EqI32:
+      case I32::EqF32:
+      case I32::EqF64:
+        compareOp = JSOP_EQ;
+        break;
+      case I32::NeI32:
+      case I32::NeF32:
+      case I32::NeF64:
+        compareOp = JSOP_NE;
+        break;
+      case I32::SLeI32:
+      case I32::ULeI32:
+      case I32::LeF32:
+      case I32::LeF64:
+        compareOp = JSOP_LE;
+        break;
+      case I32::SLtI32:
+      case I32::ULtI32:
+      case I32::LtF32:
+      case I32::LtF64:
+        compareOp = JSOP_LT;
+        break;
+      case I32::SGeI32:
+      case I32::UGeI32:
+      case I32::GeF32:
+      case I32::GeF64:
+        compareOp = JSOP_GE;
+        break;
+      case I32::SGtI32:
+      case I32::UGtI32:
+      case I32::GtF32:
+      case I32::GtF64:
+        compareOp = JSOP_GT;
+        break;
+      default: MOZ_CRASH("unexpected comparison opcode");
+    }
+
+    *def = f.compare(lhs, rhs, compareOp, compareType);
+    return true;
+}
+
+template<class T>
+static bool
+EmitBitwise(FunctionCompiler& f, MDefinition** def)
+{
+    MDefinition* lhs;
+    if (!EmitI32Expr(f, &lhs))
+        return false;
+    MDefinition* rhs;
+    if (!EmitI32Expr(f, &rhs))
+        return false;
+    *def = f.bitwise<T>(lhs, rhs);
+    return true;
+}
+
+template<>
+bool
+EmitBitwise<MBitNot>(FunctionCompiler& f, MDefinition** def)
+{
+    MDefinition* in;
+    if (!EmitI32Expr(f, &in))
+        return false;
+    *def = f.bitwise<MBitNot>(in);
+    return true;
+}
+
+static bool
+EmitExpr(FunctionCompiler& f, AsmType type, MDefinition** def)
+{
+    switch (type) {
+      case AsmType::Int32:     return EmitI32Expr(f, def);
+      case AsmType::Float32:   return EmitF32Expr(f, def);
+      case AsmType::Float64:   return EmitF64Expr(f, def);
+      case AsmType::Int32x4:   return EmitI32X4Expr(f, def);
+      case AsmType::Float32x4: return EmitF32X4Expr(f, def);
+    }
+    MOZ_CRASH("unexpected asm type");
+}
+
+static bool
+EmitInterruptCheck(FunctionCompiler& f)
+{
+    unsigned lineno = f.readU32();
+    unsigned column = f.readU32();
+    f.addInterruptCheck(lineno, column);
+    return true;
+}
+
+static bool
+EmitInterruptCheckLoop(FunctionCompiler& f)
+{
+    if (!EmitInterruptCheck(f))
+        return false;
+    return EmitStatement(f);
+}
+
+static bool
+EmitWhile(FunctionCompiler& f, const LabelVector* maybeLabels)
+{
+    size_t headPc = f.pc();
+
+    MBasicBlock* loopEntry;
+    if (!f.startPendingLoop(headPc, &loopEntry))
+        return false;
+
+    MDefinition* condDef;
+    if (!EmitI32Expr(f, &condDef))
+        return false;
+
+    MBasicBlock* afterLoop;
+    if (!f.branchAndStartLoopBody(condDef, &afterLoop))
+        return false;
+
+    if (!EmitStatement(f))
+        return false;
+
+    if (!f.bindContinues(headPc, maybeLabels))
+        return false;
+
+    return f.closeLoop(loopEntry, afterLoop);
+}
+
+static bool
+EmitFor(FunctionCompiler& f, Stmt stmt, const LabelVector* maybeLabels)
+{
+    MOZ_ASSERT(stmt == Stmt::ForInitInc || stmt == Stmt::ForInitNoInc ||
+               stmt == Stmt::ForNoInitInc || stmt == Stmt::ForNoInitNoInc);
+    size_t headPc = f.pc();
+
+    if (stmt == Stmt::ForInitInc || stmt == Stmt::ForInitNoInc) {
+        if (!EmitStatement(f))
+            return false;
+    }
+
+    MBasicBlock* loopEntry;
+    if (!f.startPendingLoop(headPc, &loopEntry))
+        return false;
+
+    MDefinition* condDef;
+    if (!EmitI32Expr(f, &condDef))
+        return false;
+
+    MBasicBlock* afterLoop;
+    if (!f.branchAndStartLoopBody(condDef, &afterLoop))
+        return false;
+
+    if (!EmitStatement(f))
+        return false;
+
+    if (!f.bindContinues(headPc, maybeLabels))
+        return false;
+
+    if (stmt == Stmt::ForInitInc || stmt == Stmt::ForNoInitInc) {
+        if (!EmitStatement(f))
+            return false;
+    }
+
+    f.assertDebugCheckPoint();
+
+    return f.closeLoop(loopEntry, afterLoop);
+}
+
+static bool
+EmitDoWhile(FunctionCompiler& f, const LabelVector* maybeLabels)
+{
+    size_t headPc = f.pc();
+
+    MBasicBlock* loopEntry;
+    if (!f.startPendingLoop(headPc, &loopEntry))
+        return false;
+
+    if (!EmitStatement(f))
+        return false;
+
+    if (!f.bindContinues(headPc, maybeLabels))
+        return false;
+
+    MDefinition* condDef;
+    if (!EmitI32Expr(f, &condDef))
+        return false;
+
+    return f.branchAndCloseDoWhileLoop(condDef, loopEntry);
+}
+
+static bool
+EmitLabel(FunctionCompiler& f, LabelVector* maybeLabels)
+{
+    uint32_t labelId = f.readU32();
+
+    if (maybeLabels) {
+        if (!maybeLabels->append(labelId))
+            return false;
+        return EmitStatement(f, maybeLabels);
+    }
+
+    LabelVector labels;
+    if (!labels.append(labelId))
+        return false;
+
+    if (!EmitStatement(f, &labels))
+        return false;
+
+    return f.bindLabeledBreaks(&labels);
+}
+
+static bool EmitStatement(FunctionCompiler& f, Stmt stmt, LabelVector* maybeLabels = nullptr);
+
+typedef bool HasElseBlock;
+
+static bool
+EmitIfElse(FunctionCompiler& f, bool hasElse)
+{
+    // Handle if/else-if chains using iteration instead of recursion. This
+    // avoids blowing the C stack quota for long if/else-if chains and also
+    // creates fewer MBasicBlocks at join points (by creating one join block
+    // for the entire if/else-if chain).
+    BlockVector thenBlocks;
+
+  recurse:
+    MDefinition* condition;
+    if (!EmitI32Expr(f, &condition))
+        return false;
+
+    MBasicBlock* thenBlock = nullptr;
+    MBasicBlock* elseOrJoinBlock = nullptr;
+    if (!f.branchAndStartThen(condition, &thenBlock, &elseOrJoinBlock))
+        return false;
+
+    if (!EmitStatement(f))
+        return false;
+
+    if (!f.appendThenBlock(&thenBlocks))
+        return false;
+
+    if (hasElse) {
+        f.switchToElse(elseOrJoinBlock);
+
+        Stmt nextStmt(f.readStmtOp());
+        if (nextStmt == Stmt::IfThen) {
+            hasElse = false;
+            goto recurse;
+        }
+        if (nextStmt == Stmt::IfElse) {
+            hasElse = true;
+            goto recurse;
+        }
+
+        if (!EmitStatement(f, nextStmt))
+            return false;
+
+        return f.joinIfElse(thenBlocks);
+    } else {
+        return f.joinIf(thenBlocks, elseOrJoinBlock);
+    }
+}
+
+static bool
+EmitSwitch(FunctionCompiler& f)
+{
+    bool hasDefault = f.readU8();
+    int32_t low = f.readI32();
+    int32_t high = f.readI32();
+    uint32_t numCases = f.readU32();
+
+    MDefinition* exprDef;
+    if (!EmitI32Expr(f, &exprDef))
+        return false;
+
+    // Switch with no cases
+    if (!hasDefault && numCases == 0)
+        return true;
+
+    BlockVector cases;
+    if (!cases.resize(high - low + 1))
+        return false;
+
+    MBasicBlock* switchBlock;
+    if (!f.startSwitch(f.pc(), exprDef, low, high, &switchBlock))
+        return false;
+
+    while (numCases--) {
+        int32_t caseValue = f.readI32();
+        MOZ_ASSERT(caseValue >= low && caseValue <= high);
+        unsigned caseIndex = caseValue - low;
+        if (!f.startSwitchCase(switchBlock, &cases[caseIndex]))
+            return false;
+        if (!EmitStatement(f))
+            return false;
+    }
+
+    MBasicBlock* defaultBlock;
+    if (!f.startSwitchDefault(switchBlock, &cases, &defaultBlock))
+        return false;
+
+    if (hasDefault && !EmitStatement(f))
+        return false;
+
+    return f.joinSwitch(switchBlock, cases, defaultBlock);
+}
+
+static AsmType
+RetTypeToAsmType(RetType retType)
+{
+    switch (retType.which()) {
+      case RetType::Void:      break;
+      case RetType::Signed:    return AsmType::Int32;
+      case RetType::Float:     return AsmType::Float32;
+      case RetType::Double:    return AsmType::Float64;
+      case RetType::Int32x4:   return AsmType::Int32x4;
+      case RetType::Float32x4: return AsmType::Float32x4;
+    }
+    MOZ_CRASH("unexpected return type");
+}
+
+static bool
+EmitRet(FunctionCompiler& f)
+{
+    RetType retType = f.returnedType();
+
+    if (retType == RetType::Void) {
+        f.returnVoid();
+        return true;
+    }
+
+    AsmType type = RetTypeToAsmType(retType);
+    MDefinition *def = nullptr;
+    if (!EmitExpr(f, type, &def))
+        return false;
+    f.returnExpr(def);
+    return true;
+}
+
+static bool
+EmitBlock(FunctionCompiler& f)
+{
+    size_t numStmt = f.readU32();
+    for (size_t i = 0; i < numStmt; i++) {
+        if (!EmitStatement(f))
+            return false;
+    }
+    f.assertDebugCheckPoint();
+    return true;
+}
+
+typedef bool HasLabel;
+
+static bool
+EmitContinue(FunctionCompiler& f, bool hasLabel)
+{
+    if (!hasLabel)
+        return f.addContinue(nullptr);
+    uint32_t labelId = f.readU32();
+    return f.addContinue(&labelId);
+}
+
+static bool
+EmitBreak(FunctionCompiler& f, bool hasLabel)
+{
+    if (!hasLabel)
+        return f.addBreak(nullptr);
+    uint32_t labelId = f.readU32();
+    return f.addBreak(&labelId);
+}
+
+static bool
+EmitStatement(FunctionCompiler& f, Stmt stmt, LabelVector* maybeLabels /*= nullptr */)
+{
+    if (!f.mirGen().ensureBallast())
+        return false;
+
+    MDefinition* _;
+    switch (stmt) {
+      case Stmt::Block:              return EmitBlock(f);
+      case Stmt::IfThen:             return EmitIfElse(f, HasElseBlock(false));
+      case Stmt::IfElse:             return EmitIfElse(f, HasElseBlock(true));
+      case Stmt::Switch:             return EmitSwitch(f);
+      case Stmt::While:              return EmitWhile(f, maybeLabels);
+      case Stmt::DoWhile:            return EmitDoWhile(f, maybeLabels);
+      case Stmt::ForInitInc:
+      case Stmt::ForInitNoInc:
+      case Stmt::ForNoInitNoInc:
+      case Stmt::ForNoInitInc:       return EmitFor(f, stmt, maybeLabels);
+      case Stmt::Label:              return EmitLabel(f, maybeLabels);
+      case Stmt::Continue:           return EmitContinue(f, HasLabel(false));
+      case Stmt::ContinueLabel:      return EmitContinue(f, HasLabel(true));
+      case Stmt::Break:              return EmitBreak(f, HasLabel(false));
+      case Stmt::BreakLabel:         return EmitBreak(f, HasLabel(true));
+      case Stmt::Ret:                return EmitRet(f);
+      case Stmt::I32Expr:            return EmitI32Expr(f, &_);
+      case Stmt::F32Expr:            return EmitF32Expr(f, &_);
+      case Stmt::F64Expr:            return EmitF64Expr(f, &_);
+      case Stmt::I32X4Expr:          return EmitI32X4Expr(f, &_);
+      case Stmt::F32X4Expr:          return EmitF32X4Expr(f, &_);
+      case Stmt::CallInternal:       return EmitInternalCall(f, RetType::Void, &_);
+      case Stmt::CallIndirect:       return EmitFuncPtrCall(f, RetType::Void, &_);
+      case Stmt::CallImport:         return EmitFFICall(f, RetType::Void, &_);
+      case Stmt::AtomicsFence:       f.memoryBarrier(MembarFull); return true;
+      case Stmt::Noop:               return true;
+      case Stmt::Id:                 return EmitStatement(f);
+      case Stmt::InterruptCheckHead: return EmitInterruptCheck(f);
+      case Stmt::InterruptCheckLoop: return EmitInterruptCheckLoop(f);
+      case Stmt::DebugCheckPoint:
+      case Stmt::Bad:             break;
+    }
+    MOZ_CRASH("unexpected statement");
+}
+
+static bool
+EmitStatement(FunctionCompiler& f, LabelVector* maybeLabels /* = nullptr */)
+{
+    Stmt stmt(f.readStmtOp());
+    return EmitStatement(f, stmt, maybeLabels);
+}
+
+static bool
+EmitI32Expr(FunctionCompiler& f, MDefinition** def)
+{
+    I32 op = I32(f.readU8());
+    switch (op) {
+      case I32::Id:
+        return EmitI32Expr(f, def);
+      case I32::Literal:
+        return EmitLiteral(f, AsmType::Int32, def);
+      case I32::GetLocal:
+        return EmitGetLoc(f, DebugOnly<MIRType>(MIRType_Int32), def);
+      case I32::SetLocal:
+        return EmitSetLoc(f, AsmType::Int32, def);
+      case I32::GetGlobal:
+        return EmitGetGlo(f, MIRType_Int32, def);
+      case I32::SetGlobal:
+        return EmitSetGlo(f, AsmType::Int32, def);
+      case I32::CallInternal:
+        return EmitInternalCall(f, RetType::Signed, def);
+      case I32::CallIndirect:
+        return EmitFuncPtrCall(f, RetType::Signed, def);
+      case I32::CallImport:
+        return EmitFFICall(f, RetType::Signed, def);
+      case I32::Conditional:
+        return EmitConditional(f, AsmType::Int32, def);
+      case I32::Comma:
+        return EmitComma(f, AsmType::Int32, def);
+      case I32::Add:
+        return EmitAddOrSub(f, AsmType::Int32, IsAdd(true), def);
+      case I32::Sub:
+        return EmitAddOrSub(f, AsmType::Int32, IsAdd(false), def);
+      case I32::Mul:
+        return EmitMultiply(f, AsmType::Int32, def);
+      case I32::UDiv:
+      case I32::SDiv:
+        return EmitDivOrMod(f, AsmType::Int32, IsDiv(true), IsUnsigned(op == I32::UDiv), def);
+      case I32::UMod:
+      case I32::SMod:
+        return EmitDivOrMod(f, AsmType::Int32, IsDiv(false), IsUnsigned(op == I32::UMod), def);
+      case I32::Min:
+        return EmitMathMinMax(f, AsmType::Int32, IsMax(false), def);
+      case I32::Max:
+        return EmitMathMinMax(f, AsmType::Int32, IsMax(true), def);
+      case I32::Not:
+        return EmitUnary<MNot>(f, AsmType::Int32, def);
+      case I32::FromF32:
+        return EmitUnary<MTruncateToInt32>(f, AsmType::Float32, def);
+      case I32::FromF64:
+        return EmitUnary<MTruncateToInt32>(f, AsmType::Float64, def);
+      case I32::Clz:
+        return EmitUnary<MClz>(f, AsmType::Int32, def);
+      case I32::Abs:
+        return EmitUnaryMir<MAbs>(f, AsmType::Int32, def);
+      case I32::Neg:
+        return EmitUnaryMir<MAsmJSNeg>(f, AsmType::Int32, def);
+      case I32::BitOr:
+        return EmitBitwise<MBitOr>(f, def);
+      case I32::BitAnd:
+        return EmitBitwise<MBitAnd>(f, def);
+      case I32::BitXor:
+        return EmitBitwise<MBitXor>(f, def);
+      case I32::Lsh:
+        return EmitBitwise<MLsh>(f, def);
+      case I32::ArithRsh:
+        return EmitBitwise<MRsh>(f, def);
+      case I32::LogicRsh:
+        return EmitBitwise<MUrsh>(f, def);
+      case I32::BitNot:
+        return EmitBitwise<MBitNot>(f, def);
+      case I32::SLoad8:
+        return EmitLoadArray(f, Scalar::Int8, def);
+      case I32::SLoad16:
+        return EmitLoadArray(f, Scalar::Int16, def);
+      case I32::SLoad32:
+        return EmitLoadArray(f, Scalar::Int32, def);
+      case I32::ULoad8:
+        return EmitLoadArray(f, Scalar::Uint8, def);
+      case I32::ULoad16:
+        return EmitLoadArray(f, Scalar::Uint16, def);
+      case I32::ULoad32:
+        return EmitLoadArray(f, Scalar::Uint32, def);
+      case I32::Store8:
+        return EmitStore(f, Scalar::Int8, def);
+      case I32::Store16:
+        return EmitStore(f, Scalar::Int16, def);
+      case I32::Store32:
+        return EmitStore(f, Scalar::Int32, def);
+      case I32::EqI32:
+      case I32::NeI32:
+      case I32::SLtI32:
+      case I32::SLeI32:
+      case I32::SGtI32:
+      case I32::SGeI32:
+      case I32::ULtI32:
+      case I32::ULeI32:
+      case I32::UGtI32:
+      case I32::UGeI32:
+      case I32::EqF32:
+      case I32::NeF32:
+      case I32::LtF32:
+      case I32::LeF32:
+      case I32::GtF32:
+      case I32::GeF32:
+      case I32::EqF64:
+      case I32::NeF64:
+      case I32::LtF64:
+      case I32::LeF64:
+      case I32::GtF64:
+      case I32::GeF64:
+        return EmitComparison(f, op, def);
+      case I32::AtomicsCompareExchange:
+        return EmitAtomicsCompareExchange(f, def);
+      case I32::AtomicsExchange:
+        return EmitAtomicsExchange(f, def);
+      case I32::AtomicsLoad:
+        return EmitAtomicsLoad(f, def);
+      case I32::AtomicsStore:
+        return EmitAtomicsStore(f, def);
+      case I32::AtomicsBinOp:
+        return EmitAtomicsBinOp(f, def);
+      case I32::I32X4SignMask:
+        return EmitSignMask(f, AsmType::Int32x4, def);
+      case I32::F32X4SignMask:
+        return EmitSignMask(f, AsmType::Float32x4, def);
+      case I32::I32X4ExtractLane:
+        return EmitExtractLane(f, AsmType::Int32x4, def);
+      case I32::Bad:
+        break;
+    }
+    MOZ_CRASH("unexpected i32 expression");
+}
+
+static bool
+EmitF32Expr(FunctionCompiler& f, MDefinition** def)
+{
+    F32 op = F32(f.readU8());
+    switch (op) {
+      case F32::Id:
+        return EmitF32Expr(f, def);
+      case F32::Literal:
+        return EmitLiteral(f, AsmType::Float32, def);
+      case F32::GetLocal:
+        return EmitGetLoc(f, DebugOnly<MIRType>(MIRType_Float32), def);
+      case F32::SetLocal:
+        return EmitSetLoc(f, AsmType::Float32, def);
+      case F32::GetGlobal:
+        return EmitGetGlo(f, MIRType_Float32, def);
+      case F32::SetGlobal:
+        return EmitSetGlo(f, AsmType::Float32, def);
+      case F32::CallInternal:
+        return EmitInternalCall(f, RetType::Float, def);
+      case F32::CallIndirect:
+        return EmitFuncPtrCall(f, RetType::Float, def);
+      case F32::CallImport:
+        return EmitFFICall(f, RetType::Float, def);
+      case F32::Conditional:
+        return EmitConditional(f, AsmType::Float32, def);
+      case F32::Comma:
+        return EmitComma(f, AsmType::Float32, def);
+      case F32::Add:
+        return EmitAddOrSub(f, AsmType::Float32, IsAdd(true), def);
+      case F32::Sub:
+        return EmitAddOrSub(f, AsmType::Float32, IsAdd(false), def);
+      case F32::Mul:
+        return EmitMultiply(f, AsmType::Float32, def);
+      case F32::Div:
+        return EmitDivOrMod(f, AsmType::Float32, IsDiv(true), def);
+      case F32::Min:
+        return EmitMathMinMax(f, AsmType::Float32, IsMax(false), def);
+      case F32::Max:
+        return EmitMathMinMax(f, AsmType::Float32, IsMax(true), def);
+      case F32::Neg:
+        return EmitUnaryMir<MAsmJSNeg>(f, AsmType::Float32, def);
+      case F32::Abs:
+        return EmitUnaryMir<MAbs>(f, AsmType::Float32, def);
+      case F32::Sqrt:
+        return EmitUnaryMir<MSqrt>(f, AsmType::Float32, def);
+      case F32::Ceil:
+      case F32::Floor:
+        return EmitMathBuiltinCall(f, op, def);
+      case F32::FromF64:
+        return EmitUnary<MToFloat32>(f, AsmType::Float64, def);
+      case F32::FromS32:
+        return EmitUnary<MToFloat32>(f, AsmType::Int32, def);
+      case F32::FromU32:
+        return EmitUnary<MAsmJSUnsignedToFloat32>(f, AsmType::Int32, def);
+      case F32::Load:
+        return EmitLoadArray(f, Scalar::Float32, def);
+      case F32::StoreF32:
+        return EmitStore(f, Scalar::Float32, def);
+      case F32::StoreF64:
+        return EmitStoreWithCoercion(f, Scalar::Float32, Scalar::Float64, def);
+      case F32::F32X4ExtractLane:
+        return EmitExtractLane(f, AsmType::Float32x4, def);
+      case F32::Bad:
+        break;
+    }
+    MOZ_CRASH("unexpected f32 expression");
+}
+
+static bool
+EmitF64Expr(FunctionCompiler& f, MDefinition** def)
+{
+    F64 op = F64(f.readU8());
+    switch (op) {
+      case F64::Id:
+        return EmitF64Expr(f, def);
+      case F64::GetLocal:
+        return EmitGetLoc(f, DebugOnly<MIRType>(MIRType_Double), def);
+      case F64::SetLocal:
+        return EmitSetLoc(f, AsmType::Float64, def);
+      case F64::GetGlobal:
+        return EmitGetGlo(f, MIRType_Double, def);
+      case F64::SetGlobal:
+        return EmitSetGlo(f, AsmType::Float64, def);
+      case F64::Literal:
+        return EmitLiteral(f, AsmType::Float64, def);
+      case F64::Add:
+        return EmitAddOrSub(f, AsmType::Float64, IsAdd(true), def);
+      case F64::Sub:
+        return EmitAddOrSub(f, AsmType::Float64, IsAdd(false), def);
+      case F64::Mul:
+        return EmitMultiply(f, AsmType::Float64, def);
+      case F64::Div:
+        return EmitDivOrMod(f, AsmType::Float64, IsDiv(true), def);
+      case F64::Mod:
+        return EmitDivOrMod(f, AsmType::Float64, IsDiv(false), def);
+      case F64::Min:
+        return EmitMathMinMax(f, AsmType::Float64, IsMax(false), def);
+      case F64::Max:
+        return EmitMathMinMax(f, AsmType::Float64, IsMax(true), def);
+      case F64::Neg:
+        return EmitUnaryMir<MAsmJSNeg>(f, AsmType::Float64, def);
+      case F64::Abs:
+        return EmitUnaryMir<MAbs>(f, AsmType::Float64, def);
+      case F64::Sqrt:
+        return EmitUnaryMir<MSqrt>(f, AsmType::Float64, def);
+      case F64::Ceil:
+      case F64::Floor:
+      case F64::Sin:
+      case F64::Cos:
+      case F64::Tan:
+      case F64::Asin:
+      case F64::Acos:
+      case F64::Atan:
+      case F64::Exp:
+      case F64::Log:
+      case F64::Pow:
+      case F64::Atan2:
+        return EmitMathBuiltinCall(f, op, def);
+      case F64::FromF32:
+        return EmitUnary<MToDouble>(f, AsmType::Float32, def);
+      case F64::FromS32:
+        return EmitUnary<MToDouble>(f, AsmType::Int32, def);
+      case F64::FromU32:
+        return EmitUnary<MAsmJSUnsignedToDouble>(f, AsmType::Int32, def);
+      case F64::Load:
+        return EmitLoadArray(f, Scalar::Float64, def);
+      case F64::StoreF64:
+        return EmitStore(f, Scalar::Float64, def);
+      case F64::StoreF32:
+        return EmitStoreWithCoercion(f, Scalar::Float64, Scalar::Float32, def);
+      case F64::CallInternal:
+        return EmitInternalCall(f, RetType::Double, def);
+      case F64::CallIndirect:
+        return EmitFuncPtrCall(f, RetType::Double, def);
+      case F64::CallImport:
+        return EmitFFICall(f, RetType::Double, def);
+      case F64::Conditional:
+        return EmitConditional(f, AsmType::Float64, def);
+      case F64::Comma:
+        return EmitComma(f, AsmType::Float64, def);
+      case F64::Bad:
+        break;
+    }
+    MOZ_CRASH("unexpected f64 expression");
+}
+
+static bool
+EmitI32X4Expr(FunctionCompiler& f, MDefinition** def)
+{
+    I32X4 op = I32X4(f.readU8());
+    switch (op) {
+      case I32X4::Id:
+        return EmitI32X4Expr(f, def);
+      case I32X4::GetLocal:
+        return EmitGetLoc(f, DebugOnly<MIRType>(MIRType_Int32x4), def);
+      case I32X4::SetLocal:
+        return EmitSetLoc(f, AsmType::Int32x4, def);
+      case I32X4::GetGlobal:
+        return EmitGetGlo(f, MIRType_Int32x4, def);
+      case I32X4::SetGlobal:
+        return EmitSetGlo(f, AsmType::Int32x4, def);
+      case I32X4::Comma:
+        return EmitComma(f, AsmType::Int32x4, def);
+      case I32X4::Conditional:
+        return EmitConditional(f, AsmType::Int32x4, def);
+      case I32X4::CallInternal:
+        return EmitInternalCall(f, RetType::Int32x4, def);
+      case I32X4::CallIndirect:
+        return EmitFuncPtrCall(f, RetType::Int32x4, def);
+      case I32X4::CallImport:
+        return EmitFFICall(f, RetType::Int32x4, def);
+      case I32X4::Literal:
+        return EmitLiteral(f, AsmType::Int32x4, def);
+      case I32X4::Ctor:
+        return EmitSimdCtor(f, AsmType::Int32x4, def);
+      case I32X4::Unary:
+        return EmitSimdUnary(f, AsmType::Int32x4, def);
+      case I32X4::Binary:
+        return EmitSimdBinaryArith(f, AsmType::Int32x4, def);
+      case I32X4::BinaryBitwise:
+        return EmitSimdBinaryBitwise(f, AsmType::Int32x4, def);
+      case I32X4::BinaryCompI32X4:
+        return EmitSimdBinaryComp(f, AsmType::Int32x4, def);
+      case I32X4::BinaryCompF32X4:
+        return EmitSimdBinaryComp(f, AsmType::Float32x4, def);
+      case I32X4::BinaryShift:
+        return EmitSimdBinaryShift(f, def);
+      case I32X4::ReplaceLane:
+        return EmitSimdReplaceLane(f, AsmType::Int32x4, def);
+      case I32X4::FromF32X4:
+        return EmitSimdCast<MSimdConvert>(f, AsmType::Float32x4, AsmType::Int32x4, def);
+      case I32X4::FromF32X4Bits:
+        return EmitSimdCast<MSimdReinterpretCast>(f, AsmType::Float32x4, AsmType::Int32x4, def);
+      case I32X4::Swizzle:
+        return EmitSimdSwizzle(f, AsmType::Int32x4, def);
+      case I32X4::Shuffle:
+        return EmitSimdShuffle(f, AsmType::Int32x4, def);
+      case I32X4::Select:
+        return EmitSimdSelect(f, AsmType::Int32x4, IsElementWise(true), def);
+      case I32X4::BitSelect:
+        return EmitSimdSelect(f, AsmType::Int32x4, IsElementWise(false), def);
+      case I32X4::Splat:
+        return EmitSimdSplat(f, AsmType::Int32x4, def);
+      case I32X4::Load:
+        return EmitSimdLoad(f, AsmType::Int32x4, def);
+      case I32X4::Store:
+        return EmitSimdStore(f, AsmType::Int32x4, def);
+      case I32X4::Bad:
+        break;
+    }
+    MOZ_CRASH("unexpected int32x4 expression");
+}
+
+static bool
+EmitF32X4Expr(FunctionCompiler& f, MDefinition** def)
+{
+    F32X4 op = F32X4(f.readU8());
+    switch (op) {
+      case F32X4::Id:
+        return EmitF32X4Expr(f, def);
+      case F32X4::GetLocal:
+        return EmitGetLoc(f, DebugOnly<MIRType>(MIRType_Float32x4), def);
+      case F32X4::SetLocal:
+        return EmitSetLoc(f, AsmType::Float32x4, def);
+      case F32X4::GetGlobal:
+        return EmitGetGlo(f, MIRType_Float32x4, def);
+      case F32X4::SetGlobal:
+        return EmitSetGlo(f, AsmType::Float32x4, def);
+      case F32X4::Comma:
+        return EmitComma(f, AsmType::Float32x4, def);
+      case F32X4::Conditional:
+        return EmitConditional(f, AsmType::Float32x4, def);
+      case F32X4::CallInternal:
+        return EmitInternalCall(f, RetType::Float32x4, def);
+      case F32X4::CallIndirect:
+        return EmitFuncPtrCall(f, RetType::Float32x4, def);
+      case F32X4::CallImport:
+        return EmitFFICall(f, RetType::Float32x4, def);
+      case F32X4::Literal:
+        return EmitLiteral(f, AsmType::Float32x4, def);
+      case F32X4::Ctor:
+        return EmitSimdCtor(f, AsmType::Float32x4, def);
+      case F32X4::Unary:
+        return EmitSimdUnary(f, AsmType::Float32x4, def);
+      case F32X4::Binary:
+        return EmitSimdBinaryArith(f, AsmType::Float32x4, def);
+      case F32X4::BinaryBitwise:
+        return EmitSimdBinaryBitwise(f, AsmType::Float32x4, def);
+      case F32X4::ReplaceLane:
+        return EmitSimdReplaceLane(f, AsmType::Float32x4, def);
+      case F32X4::FromI32X4:
+        return EmitSimdCast<MSimdConvert>(f, AsmType::Int32x4, AsmType::Float32x4, def);
+      case F32X4::FromI32X4Bits:
+        return EmitSimdCast<MSimdReinterpretCast>(f, AsmType::Int32x4, AsmType::Float32x4, def);
+      case F32X4::Swizzle:
+        return EmitSimdSwizzle(f, AsmType::Float32x4, def);
+      case F32X4::Shuffle:
+        return EmitSimdShuffle(f, AsmType::Float32x4, def);
+      case F32X4::Select:
+        return EmitSimdSelect(f, AsmType::Float32x4, IsElementWise(true), def);
+      case F32X4::BitSelect:
+        return EmitSimdSelect(f, AsmType::Float32x4, IsElementWise(false), def);
+      case F32X4::Splat:
+        return EmitSimdSplat(f, AsmType::Float32x4, def);
+      case F32X4::Load:
+        return EmitSimdLoad(f, AsmType::Float32x4, def);
+      case F32X4::Store:
+        return EmitSimdStore(f, AsmType::Float32x4, def);
+      case F32X4::Bad:
+        break;
+    }
+    MOZ_CRASH("unexpected float32x4 expression");
+}
+
+bool
+js::GenerateAsmFunctionMIR(ModuleCompiler& m, LifoAlloc& lifo, AsmFunction& func, MIRGenerator** mir)
+{
+    int64_t before = PRMJ_Now();
+
+    FunctionCompiler f(m, func, lifo);
+    if (!f.init())
+        return false;
+
+    if (!f.prepareEmitMIR(func.argTypes()))
+        return false;
+
+    while (!f.done()) {
+        if (!EmitStatement(f))
+            return false;
+    }
+
+    *mir = f.extractMIR();
+    if (!*mir)
+        return false;
+
+    jit::SpewBeginFunction(*mir, nullptr);
+
+    f.checkPostconditions();
+
+    func.accumulateCompileTime((PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC);
+    return true;
+}
+
+bool
+js::GenerateAsmFunctionCode(ModuleCompiler& m, AsmFunction& func, MIRGenerator& mir, LIRGraph& lir)
+{
+    JitContext jitContext(m.runtime(), /* CompileCompartment = */ nullptr, &mir.alloc());
+
+    int64_t before = PRMJ_Now();
+
+    // A single MacroAssembler is reused for all function compilations so
+    // that there is a single linear code segment for each module. To avoid
+    // spiking memory, a LifoAllocScope in the caller frees all MIR/LIR
+    // after each function is compiled. This method is responsible for cleaning
+    // out any dangling pointers that the MacroAssembler may have kept.
+    m.masm().resetForNewCodeGenerator(mir.alloc());
+
+    ScopedJSDeletePtr<CodeGenerator> codegen(js_new<CodeGenerator>(&mir, &lir, &m.masm()));
+    if (!codegen)
+        return false;
+
+    Label* funcEntry;
+    if (!m.getOrCreateFunctionEntry(func.funcIndex(), &funcEntry))
+        return false;
+
+    AsmJSFunctionLabels labels(*funcEntry, m.stackOverflowLabel());
+    if (!codegen->generateAsmJS(&labels))
+        return false;
+
+    func.accumulateCompileTime((PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC);
+
+    if (!m.finishGeneratingFunction(func, *codegen, labels))
+        return false;
+
+    // Unlike regular IonMonkey, which links and generates a new JitCode for
+    // every function, we accumulate all the functions in the module in a
+    // single MacroAssembler and link at end. Linking asm.js doesn't require a
+    // CodeGenerator so we can destroy it now (via ScopedJSDeletePtr).
+    return true;
+}
+
+bool
+js::CreateAsmModuleCompiler(ModuleCompileInputs mci, AsmModuleCompilerScope* scope)
+{
+    auto* mc = js_new<ModuleCompiler>(mci);
+    if (!mc || !mc->init())
+        return false;
+    scope->setModule(mc);
+    return true;
+}
+
+AsmModuleCompilerScope::~AsmModuleCompilerScope()
+{
+    if (m_) {
+        js_delete(m_);
+        m_ = nullptr;
+    }
+}
+
+void
+js::FinishAsmModuleCompilation(ModuleCompiler& m, ScopedJSDeletePtr<ModuleCompileResults>* results)
+{
+    m.finish(results);
+}
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/AsmJSCompile.h
@@ -0,0 +1,84 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef jit_AsmJSCompile_h
+#define jit_AsmJSCompile_h
+
+#include "jit/CompileWrappers.h"
+
+namespace js {
+
+class AsmFunction;
+class LifoAlloc;
+class ModuleCompiler;
+class ModuleCompileResults;
+
+namespace jit {
+    class LIRGraph;
+    class MIRGenerator;
+}
+
+struct ModuleCompileInputs
+{
+    jit::CompileCompartment* compartment;
+    jit::CompileRuntime* runtime;
+    bool usesSignalHandlersForOOB;
+
+    ModuleCompileInputs(jit::CompileCompartment* compartment,
+                        jit::CompileRuntime* runtime,
+                        bool usesSignalHandlersForOOB)
+      : compartment(compartment),
+        runtime(runtime),
+        usesSignalHandlersForOOB(usesSignalHandlersForOOB)
+    {}
+};
+
+class MOZ_RAII AsmModuleCompilerScope
+{
+    ModuleCompiler* m_;
+
+    AsmModuleCompilerScope(const AsmModuleCompilerScope&) = delete;
+    AsmModuleCompilerScope(const AsmModuleCompilerScope&&) = delete;
+    AsmModuleCompilerScope& operator=(const AsmModuleCompilerScope&&) = delete;
+
+  public:
+    AsmModuleCompilerScope()
+      : m_(nullptr)
+    {}
+
+    void setModule(ModuleCompiler* m) {
+        MOZ_ASSERT(m);
+        m_ = m;
+    }
+
+    ModuleCompiler& module() const {
+        MOZ_ASSERT(m_);
+        return *m_;
+    }
+
+    ~AsmModuleCompilerScope();
+};
+
+bool CreateAsmModuleCompiler(ModuleCompileInputs mci, AsmModuleCompilerScope* scope);
+bool GenerateAsmFunctionMIR(ModuleCompiler& m, LifoAlloc& lifo, AsmFunction& func, jit::MIRGenerator** mir);
+bool GenerateAsmFunctionCode(ModuleCompiler& m, AsmFunction& func, jit::MIRGenerator& mir, jit::LIRGraph& lir);
+void FinishAsmModuleCompilation(ModuleCompiler& m, ScopedJSDeletePtr<ModuleCompileResults>* results);
+
+} // namespace js
+
+#endif // jit_AsmJSCompile_h
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/AsmJSGlobals.h
@@ -0,0 +1,1148 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef jit_AsmJSGlobals_h
+#define jit_AsmJSGlobals_h
+
+#include "asmjs/AsmJSModule.h"
+
+namespace js {
+namespace wasm {
+
+enum NeedsBoundsCheck {
+    NO_BOUNDS_CHECK,
+    NEEDS_BOUNDS_CHECK
+};
+
+// Respresents the type of a general asm.js expression.
+class Type
+{
+  public:
+    enum Which {
+        Fixnum = AsmJSNumLit::Fixnum,
+        Signed = AsmJSNumLit::NegativeInt,
+        Unsigned = AsmJSNumLit::BigUnsigned,
+        DoubleLit = AsmJSNumLit::Double,
+        Float = AsmJSNumLit::Float,
+        Int32x4 = AsmJSNumLit::Int32x4,
+        Float32x4 = AsmJSNumLit::Float32x4,
+        Double,
+        MaybeDouble,
+        MaybeFloat,
+        Floatish,
+        Int,
+        Intish,
+        Void
+    };
+
+  private:
+    Which which_;
+
+  public:
+    Type() : which_(Which(-1)) {}
+    static Type Of(const AsmJSNumLit& lit) {
+        MOZ_ASSERT(lit.hasType());
+        Which which = Type::Which(lit.which());
+        MOZ_ASSERT(which >= Fixnum && which <= Float32x4);
+        Type t;
+        t.which_ = which;
+        return t;
+    }
+    MOZ_IMPLICIT Type(Which w) : which_(w) {}
+    Which which() const { return which_; }
+    MOZ_IMPLICIT Type(AsmJSSimdType type) {
+        switch (type) {
+          case AsmJSSimdType_int32x4:
+            which_ = Int32x4;
+            return;
+          case AsmJSSimdType_float32x4:
+            which_ = Float32x4;
+            return;
+        }
+        MOZ_CRASH("unexpected AsmJSSimdType");
+    }
+
+    bool operator==(Type rhs) const { return which_ == rhs.which_; }
+    bool operator!=(Type rhs) const { return which_ != rhs.which_; }
+
+    inline bool operator<=(Type rhs) const {
+        switch (rhs.which_) {
+          case Signed:      return isSigned();
+          case Unsigned:    return isUnsigned();
+          case DoubleLit:   return isDoubleLit();
+          case Double:      return isDouble();
+          case Float:       return isFloat();
+          case Int32x4:     return isInt32x4();
+          case Float32x4:   return isFloat32x4();
+          case MaybeDouble: return isMaybeDouble();
+          case MaybeFloat:  return isMaybeFloat();
+          case Floatish:    return isFloatish();
+          case Int:         return isInt();
+          case Intish:      return isIntish();
+          case Fixnum:      return isFixnum();
+          case Void:        return isVoid();
+        }
+        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected this type");
+    }
+
+    bool isFixnum() const {
+        return which_ == Fixnum;
+    }
+
+    bool isSigned() const {
+        return which_ == Signed || which_ == Fixnum;
+    }
+
+    bool isUnsigned() const {
+        return which_ == Unsigned || which_ == Fixnum;
+    }
+
+    bool isInt() const {
+        return isSigned() || isUnsigned() || which_ == Int;
+    }
+
+    bool isIntish() const {
+        return isInt() || which_ == Intish;
+    }
+
+    bool isDoubleLit() const {
+        return which_ == DoubleLit;
+    }
+
+    bool isDouble() const {
+        return isDoubleLit() || which_ == Double;
+    }
+
+    bool isMaybeDouble() const {
+        return isDouble() || which_ == MaybeDouble;
+    }
+
+    bool isFloat() const {
+        return which_ == Float;
+    }
+
+    bool isMaybeFloat() const {
+        return isFloat() || which_ == MaybeFloat;
+    }
+
+    bool isFloatish() const {
+        return isMaybeFloat() || which_ == Floatish;
+    }
+
+    bool isVoid() const {
+        return which_ == Void;
+    }
+
+    bool isExtern() const {
+        return isDouble() || isSigned();
+    }
+
+    bool isInt32x4() const {
+        return which_ == Int32x4;
+    }
+
+    bool isFloat32x4() const {
+        return which_ == Float32x4;
+    }
+
+    bool isSimd() const {
+        return isInt32x4() || isFloat32x4();
+    }
+
+    bool isVarType() const {
+        return isInt() || isDouble() || isFloat() || isSimd();
+    }
+
+    jit::MIRType toMIRType() const {
+        switch (which_) {
+          case Double:
+          case DoubleLit:
+          case MaybeDouble:
+            return jit::MIRType_Double;
+          case Float:
+          case Floatish:
+          case MaybeFloat:
+            return jit::MIRType_Float32;
+          case Fixnum:
+          case Int:
+          case Signed:
+          case Unsigned:
+          case Intish:
+            return jit::MIRType_Int32;
+          case Int32x4:
+            return jit::MIRType_Int32x4;
+          case Float32x4:
+            return jit::MIRType_Float32x4;
+          case Void:
+            return jit::MIRType_None;
+        }
+        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid Type");
+    }
+
+    AsmJSSimdType simdType() const {
+        MOZ_ASSERT(isSimd());
+        switch (which_) {
+          case Int32x4:
+            return AsmJSSimdType_int32x4;
+          case Float32x4:
+            return AsmJSSimdType_float32x4;
+          // Scalar types
+          case Double:
+          case DoubleLit:
+          case MaybeDouble:
+          case Float:
+          case MaybeFloat:
+          case Floatish:
+          case Fixnum:
+          case Int:
+          case Signed:
+          case Unsigned:
+          case Intish:
+          case Void:
+            break;
+        }
+        MOZ_CRASH("not a SIMD Type");
+    }
+
+    const char* toChars() const {
+        switch (which_) {
+          case Double:      return "double";
+          case DoubleLit:   return "doublelit";
+          case MaybeDouble: return "double?";
+          case Float:       return "float";
+          case Floatish:    return "floatish";
+          case MaybeFloat:  return "float?";
+          case Fixnum:      return "fixnum";
+          case Int:         return "int";
+          case Signed:      return "signed";
+          case Unsigned:    return "unsigned";
+          case Intish:      return "intish";
+          case Int32x4:     return "int32x4";
+          case Float32x4:   return "float32x4";
+          case Void:        return "void";
+        }
+        MOZ_CRASH("Invalid Type");
+    }
+};
+
+// Represents the subset of Type that can be used as a variable or
+// argument's type. Note: AsmJSCoercion and VarType are kept separate to
+// make very clear the signed/int distinction: a coercion may explicitly sign
+// an *expression* but, when stored as a variable, this signedness information
+// is explicitly thrown away by the asm.js type system. E.g., in
+//
+//   function f(i) {
+//     i = i | 0;             (1)
+//     if (...)
+//         i = foo() >>> 0;
+//     else
+//         i = bar() | 0;
+//     return i | 0;          (2)
+//   }
+//
+// the AsmJSCoercion of (1) is Signed (since | performs ToInt32) but, when
+// translated to a VarType, the result is a plain Int since, as shown, it
+// is legal to assign both Signed and Unsigned (or some other Int) values to
+// it. For (2), the AsmJSCoercion is also Signed but, when translated to an
+// RetType, the result is Signed since callers (asm.js and non-asm.js) can
+// rely on the return value being Signed.
+class VarType
+{
+  public:
+    enum Which {
+        Int = Type::Int,
+        Double = Type::Double,
+        Float = Type::Float,
+        Int32x4 = Type::Int32x4,
+        Float32x4 = Type::Float32x4
+    };
+
+  private:
+    Which which_;
+
+  public:
+    VarType()
+      : which_(Which(-1)) {}
+    MOZ_IMPLICIT VarType(Which w)
+      : which_(w) {}
+    MOZ_IMPLICIT VarType(AsmJSCoercion coercion) {
+        switch (coercion) {
+          case AsmJS_ToInt32: which_ = Int; break;
+          case AsmJS_ToNumber: which_ = Double; break;
+          case AsmJS_FRound: which_ = Float; break;
+          case AsmJS_ToInt32x4: which_ = Int32x4; break;
+          case AsmJS_ToFloat32x4: which_ = Float32x4; break;
+        }
+    }
+    static VarType Of(const AsmJSNumLit& lit) {
+        MOZ_ASSERT(lit.hasType());
+        switch (lit.which()) {
+          case AsmJSNumLit::Fixnum:
+          case AsmJSNumLit::NegativeInt:
+          case AsmJSNumLit::BigUnsigned:
+            return Int;
+          case AsmJSNumLit::Double:
+            return Double;
+          case AsmJSNumLit::Float:
+            return Float;
+          case AsmJSNumLit::Int32x4:
+            return Int32x4;
+          case AsmJSNumLit::Float32x4:
+            return Float32x4;
+          case AsmJSNumLit::OutOfRangeInt:
+            MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("can't be out of range int");
+        }
+        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected literal type");
+    }
+
+    Which which() const {
+        return which_;
+    }
+    Type toType() const {
+        return Type::Which(which_);
+    }
+    jit::MIRType toMIRType() const {
+        switch(which_) {
+          case Int:       return jit::MIRType_Int32;
+          case Double:    return jit::MIRType_Double;
+          case Float:     return jit::MIRType_Float32;
+          case Int32x4:   return jit::MIRType_Int32x4;
+          case Float32x4: return jit::MIRType_Float32x4;
+        }
+        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("VarType can only be Int, SIMD, Double or Float");
+    }
+    AsmJSCoercion toCoercion() const {
+        switch(which_) {
+          case Int:       return AsmJS_ToInt32;
+          case Double:    return AsmJS_ToNumber;
+          case Float:     return AsmJS_FRound;
+          case Int32x4:   return AsmJS_ToInt32x4;
+          case Float32x4: return AsmJS_ToFloat32x4;
+        }
+        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("VarType can only be Int, SIMD, Double or Float");
+    }
+    static VarType FromCheckedType(Type type) {
+        MOZ_ASSERT(type.isInt() || type.isMaybeDouble() || type.isFloatish() || type.isSimd());
+        if (type.isMaybeDouble())
+            return Double;
+        else if (type.isFloatish())
+            return Float;
+        else if (type.isInt())
+            return Int;
+        else if (type.isInt32x4())
+            return Int32x4;
+        else if (type.isFloat32x4())
+            return Float32x4;
+        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unknown type in FromCheckedType");
+    }
+    bool operator==(VarType rhs) const { return which_ == rhs.which_; }
+    bool operator!=(VarType rhs) const { return which_ != rhs.which_; }
+};
+
+// Represents the subset of Type that can be used as the return type of a
+// function.
+class RetType
+{
+  public:
+    enum Which {
+        Void      = Type::Void,
+        Signed    = Type::Signed,
+        Double    = Type::Double,
+        Float     = Type::Float,
+        Int32x4   = Type::Int32x4,
+        Float32x4 = Type::Float32x4
+    };
+
+  private:
+    Which which_;
+
+  public:
+    RetType() : which_(Which(-1)) {}
+    MOZ_IMPLICIT RetType(Which w) : which_(w) {}
+    MOZ_IMPLICIT RetType(AsmJSCoercion coercion) {
+        which_ = Which(-1);  // initialize to silence GCC warning
+        switch (coercion) {
+          case AsmJS_ToInt32:     which_ = Signed;    break;
+          case AsmJS_ToNumber:    which_ = Double;    break;
+          case AsmJS_FRound:      which_ = Float;     break;
+          case AsmJS_ToInt32x4:   which_ = Int32x4;   break;
+          case AsmJS_ToFloat32x4: which_ = Float32x4; break;
+        }
+    }
+    Which which() const {
+        return which_;
+    }
+    Type toType() const {
+        return Type::Which(which_);
+    }
+    AsmJSModule::ReturnType toModuleReturnType() const {
+        switch (which_) {
+          case Void:      return AsmJSModule:: Return_Void;
+          case Signed:    return AsmJSModule:: Return_Int32;
+          case Float: // will be converted to a Double
+          case Double:    return AsmJSModule:: Return_Double;
+          case Int32x4:   return AsmJSModule:: Return_Int32x4;
+          case Float32x4: return AsmJSModule:: Return_Float32x4;
+        }
+        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected return type");
+    }
+    jit::MIRType toMIRType() const {
+        switch (which_) {
+          case Void:      return jit::MIRType_None;
+          case Signed:    return jit::MIRType_Int32;
+          case Double:    return jit::MIRType_Double;
+          case Float:     return jit::MIRType_Float32;
+          case Int32x4:   return jit::MIRType_Int32x4;
+          case Float32x4: return jit::MIRType_Float32x4;
+        }
+        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected return type");
+    }
+    bool operator==(RetType rhs) const { return which_ == rhs.which_; }
+    bool operator!=(RetType rhs) const { return which_ != rhs.which_; }
+};
+
+inline jit::MIRType ToMIRType(jit::MIRType t) { return t; }
+inline jit::MIRType ToMIRType(VarType t) { return t.toMIRType(); }
+
+template <class VecT>
+class ABIArgIter
+{
+    jit::ABIArgGenerator gen_;
+    const VecT& types_;
+    unsigned i_;
+
+    void settle() { if (!done()) gen_.next(ToMIRType(types_[i_])); }
+
+  public:
+    explicit ABIArgIter(const VecT& types) : types_(types), i_(0) { settle(); }
+    void operator++(int) { MOZ_ASSERT(!done()); i_++; settle(); }
+    bool done() const { return i_ == types_.length(); }
+
+    jit::ABIArg* operator->() { MOZ_ASSERT(!done()); return &gen_.current(); }
+    jit::ABIArg& operator*() { MOZ_ASSERT(!done()); return gen_.current(); }
+
+    unsigned index() const { MOZ_ASSERT(!done()); return i_; }
+    jit::MIRType mirType() const { MOZ_ASSERT(!done()); return ToMIRType(types_[i_]); }
+    uint32_t stackBytesConsumedSoFar() const { return gen_.stackBytesConsumedSoFar(); }
+};
+
+typedef Vector<jit::MIRType, 8> MIRTypeVector;
+typedef ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter;
+
+typedef Vector<VarType, 8, LifoAllocPolicy<Fallible>> VarTypeVector;
+typedef ABIArgIter<VarTypeVector> ABIArgTypeIter;
+
+class Signature
+{
+    VarTypeVector argTypes_;
+    RetType retType_;
+
+  public:
+    explicit Signature(LifoAlloc& alloc)
+      : argTypes_(alloc) {}
+    Signature(LifoAlloc& alloc, RetType retType)
+      : argTypes_(alloc), retType_(retType) {}
+    Signature(VarTypeVector&& argTypes, RetType retType)
+      : argTypes_(Move(argTypes)), retType_(Move(retType)) {}
+    Signature(Signature&& rhs)
+      : argTypes_(Move(rhs.argTypes_)), retType_(Move(rhs.retType_)) {}
+
+    bool copy(const Signature& rhs) {
+        if (!argTypes_.resize(rhs.argTypes_.length()))
+            return false;
+        for (unsigned i = 0; i < argTypes_.length(); i++)
+            argTypes_[i] = rhs.argTypes_[i];
+        retType_ = rhs.retType_;
+        return true;
+    }
+
+    bool appendArg(VarType type) { return argTypes_.append(type); }
+    VarType arg(unsigned i) const { return argTypes_[i]; }
+    const VarTypeVector& args() const { return argTypes_; }
+    VarTypeVector&& extractArgs() { return Move(argTypes_); }
+
+    RetType retType() const { return retType_; }
+};
+
+// Signature that can be only allocated with a LifoAlloc.
+class LifoSignature : public Signature
+{
+    explicit LifoSignature(Signature&& rhs)
+      : Signature(Move(rhs))
+    {}
+
+    LifoSignature(const LifoSignature&) = delete;
+    LifoSignature(const LifoSignature&&) = delete;
+    LifoSignature& operator=(const LifoSignature&) = delete;
+    LifoSignature& operator=(const LifoSignature&&) = delete;
+
+  public:
+    static LifoSignature* new_(LifoAlloc& lifo, Signature&& sig) {
+        void* mem = lifo.alloc(sizeof(LifoSignature));
+        if (!mem)
+            return nullptr;
+        return new (mem) LifoSignature(Move(sig));
+    }
+};
+
+enum class Stmt : uint8_t {
+    Ret,
+
+    Block,
+
+    IfThen,
+    IfElse,
+    Switch,
+
+    While,
+    DoWhile,
+
+    ForInitInc,
+    ForInitNoInc,
+    ForNoInitNoInc,
+    ForNoInitInc,
+
+    Label,
+    Continue,
+    ContinueLabel,
+    Break,
+    BreakLabel,
+
+    CallInternal,
+    CallIndirect,
+    CallImport,
+
+    AtomicsFence,
+
+    // asm.js specific
+    // Expression statements (to be removed in the future)
+    I32Expr,
+    F32Expr,
+    F64Expr,
+    I32X4Expr,
+    F32X4Expr,
+
+    Id,
+    Noop,
+    InterruptCheckHead,
+    InterruptCheckLoop,
+
+    DebugCheckPoint,
+
+    Bad
+};
+
+enum class I32 : uint8_t {
+    // Common opcodes
+    GetLocal,
+    SetLocal,
+    GetGlobal,
+    SetGlobal,
+
+    CallInternal,
+    CallIndirect,
+    CallImport,
+
+    Conditional,
+    Comma,
+
+    Literal,
+
+    // Binary arith opcodes
+    Add,
+    Sub,
+    Mul,
+    SDiv,
+    SMod,
+    UDiv,
+    UMod,
+    Min,
+    Max,
+
+    // Unary arith opcodes
+    Not,
+    Neg,
+
+    // Bitwise opcodes
+    BitOr,
+    BitAnd,
+    BitXor,
+    BitNot,
+
+    Lsh,
+    ArithRsh,
+    LogicRsh,
+
+    // Conversion opcodes
+    FromF32,
+    FromF64,
+
+    // Math builtin opcodes
+    Clz,
+    Abs,
+
+    // Comparison opcodes
+    // Ordering matters (EmitComparison expects signed opcodes to be placed
+    // before unsigned opcodes)
+    EqI32,
+    NeI32,
+    SLtI32,
+    SLeI32,
+    SGtI32,
+    SGeI32,
+    ULtI32,
+    ULeI32,
+    UGtI32,
+    UGeI32,
+
+    EqF32,
+    NeF32,
+    LtF32,
+    LeF32,
+    GtF32,
+    GeF32,
+
+    EqF64,
+    NeF64,
+    LtF64,
+    LeF64,
+    GtF64,
+    GeF64,
+
+    // Heap accesses opcodes
+    SLoad8,
+    SLoad16,
+    SLoad32,
+    ULoad8,
+    ULoad16,
+    ULoad32,
+    Store8,
+    Store16,
+    Store32,
+
+    // Atomics opcodes
+    AtomicsCompareExchange,
+    AtomicsExchange,
+    AtomicsLoad,
+    AtomicsStore,
+    AtomicsBinOp,
+
+    // SIMD opcodes
+    I32X4SignMask,
+    F32X4SignMask,
+
+    I32X4ExtractLane,
+
+    // Specific to AsmJS
+    Id,
+
+    Bad
+};
+
+enum class F32 : uint8_t {
+    // Common opcodes
+    GetLocal,
+    SetLocal,
+    GetGlobal,
+    SetGlobal,
+
+    CallInternal,
+    CallIndirect,
+    CallImport,
+
+    Conditional,
+    Comma,
+
+    Literal,
+
+    // Binary arith opcodes
+    Add,
+    Sub,
+    Mul,
+    Div,
+    Min,
+    Max,
+    Neg,
+
+    // Math builtin opcodes
+    Abs,
+    Sqrt,
+    Ceil,
+    Floor,
+
+    // Conversion opcodes
+    FromF64,
+    FromS32,
+    FromU32,
+
+    // Heap accesses opcodes
+    Load,
+    StoreF32,
+    StoreF64,
+
+    // SIMD opcodes
+    F32X4ExtractLane,
+
+    // asm.js specific
+    Id,
+    Bad
+};
+
+enum class F64 : uint8_t {
+    // Common opcodes
+    GetLocal,
+    SetLocal,
+    GetGlobal,
+    SetGlobal,
+
+    CallInternal,
+    CallIndirect,
+    CallImport,
+
+    Conditional,
+    Comma,
+
+    Literal,
+
+    // Binary arith opcodes
+    Add,
+    Sub,
+    Mul,
+    Div,
+    Min,
+    Max,
+    Mod,
+    Neg,
+
+    // Math builtin opcodes
+    Abs,
+    Sqrt,
+    Ceil,
+    Floor,
+    Sin,
+    Cos,
+    Tan,
+    Asin,
+    Acos,
+    Atan,
+    Exp,
+    Log,
+    Pow,
+    Atan2,
+
+    // Conversions opcodes
+    FromF32,
+    FromS32,
+    FromU32,
+
+    // Heap accesses opcodes
+    Load,
+    StoreF32,
+    StoreF64,
+
+    // asm.js specific
+    Id,
+    Bad
+};
+
+enum class I32X4 : uint8_t {
+    // Common opcodes
+    GetLocal,
+    SetLocal,
+
+    GetGlobal,
+    SetGlobal,
+
+    CallInternal,
+    CallIndirect,
+    CallImport,
+
+    Conditional,
+    Comma,
+
+    Literal,
+
+    // Specific opcodes
+    Ctor,
+
+    Unary,
+
+    Binary,
+    BinaryCompI32X4,
+    BinaryCompF32X4,
+    BinaryBitwise,
+    BinaryShift,
+
+    ReplaceLane,
+
+    FromF32X4,
+    FromF32X4Bits,
+
+    Swizzle,
+    Shuffle,
+    Select,
+    BitSelect,
+    Splat,
+
+    Load,
+    Store,
+
+    // asm.js specific
+    Id,
+    Bad
+};
+
+enum class F32X4 : uint8_t {
+    // Common opcodes
+    GetLocal,
+    SetLocal,
+
+    GetGlobal,
+    SetGlobal,
+
+    CallInternal,
+    CallIndirect,
+    CallImport,
+
+    Conditional,
+    Comma,
+
+    Literal,
+
+    // Specific opcodes
+    Ctor,
+
+    Unary,
+
+    Binary,
+    BinaryBitwise,
+
+    ReplaceLane,
+
+    FromI32X4,
+    FromI32X4Bits,
+    Swizzle,
+    Shuffle,
+    Select,
+    BitSelect,
+    Splat,
+
+    Load,
+    Store,
+
+    // asm.js specific
+    Id,
+    Bad
+};
+
+} // namespace wasm
+
+class AsmFunction
+{
+  public:
+    typedef Vector<AsmJSNumLit, 8, LifoAllocPolicy<Fallible>> VarInitializerVector;
+
+  private:
+    typedef Vector<uint8_t, 4096, LifoAllocPolicy<Fallible>> Bytecode;
+
+    VarInitializerVector varInitializers_;
+    Bytecode bytecode_;
+
+    wasm::VarTypeVector argTypes_;
+    wasm::RetType returnedType_;
+
+    PropertyName* name_;
+
+    unsigned funcIndex_;
+    unsigned srcBegin_;
+    unsigned lineno_;
+    unsigned column_;
+    unsigned compileTime_;
+
+  public:
+    explicit AsmFunction(LifoAlloc& alloc)
+      : varInitializers_(alloc),
+        bytecode_(alloc),
+        argTypes_(alloc),
+        returnedType_(wasm::RetType::Which(-1)),
+        name_(nullptr),
+        funcIndex_(-1),
+        srcBegin_(-1),
+        lineno_(-1),
+        column_(-1),
+        compileTime_(-1)
+    {}
+
+    bool init(const wasm::VarTypeVector& args) {
+        if (!argTypes_.initCapacity(args.length()))
+            return false;
+        for (size_t i = 0; i < args.length(); i++)
+            argTypes_.append(args[i]);
+        return true;
+    }
+
+    bool finish(const wasm::VarTypeVector& args, PropertyName* name, unsigned funcIndex,
+                unsigned srcBegin, unsigned lineno, unsigned column, unsigned compileTime)
+    {
+        if (!argTypes_.initCapacity(args.length()))
+            return false;
+        for (size_t i = 0; i < args.length(); i++)
+            argTypes_.infallibleAppend(args[i]);
+
+        MOZ_ASSERT(name_ == nullptr);
+        name_ = name;
+
+        MOZ_ASSERT(funcIndex_ == unsigned(-1));
+        funcIndex_ = funcIndex;
+
+        MOZ_ASSERT(srcBegin_ == unsigned(-1));
+        srcBegin_ = srcBegin;
+
+        MOZ_ASSERT(lineno_ == unsigned(-1));
+        lineno_ = lineno;
+
+        MOZ_ASSERT(column_ == unsigned(-1));
+        column_ = column;
+
+        MOZ_ASSERT(compileTime_ == unsigned(-1));
+        compileTime_ = compileTime;
+        return true;
+    }
+
+  private:
+    AsmFunction(const AsmFunction&) = delete;
+    AsmFunction(AsmFunction&& other) = delete;
+    AsmFunction& operator=(const AsmFunction&) = delete;
+
+    // Helper functions
+    template<class T> size_t writePrimitive(T v) {
+        size_t writeAt = bytecode_.length();
+        if (!bytecode_.append(reinterpret_cast<uint8_t*>(&v), sizeof(T)))
+            return -1;
+        return writeAt;
+    }
+
+    template<class T> T readPrimitive(size_t* pc) const {
+        MOZ_ASSERT(*pc + sizeof(T) <= bytecode_.length());
+        T ret;
+        memcpy(&ret, &bytecode_[*pc], sizeof(T));
+        *pc += sizeof(T);
+        return ret;
+    }
+
+  public:
+    size_t writeU8(uint8_t i)   { return writePrimitive<uint8_t>(i); }
+    size_t writeI32(int32_t i)  { return writePrimitive<int32_t>(i); }
+    size_t writeU32(uint32_t i) { return writePrimitive<uint32_t>(i); }
+    size_t writeF32(float f)    { return writePrimitive<float>(f); }
+    size_t writeF64(double d)   { return writePrimitive<double>(d); }
+
+    size_t writeI32X4(const int32_t* i4) {
+        size_t pos = bytecode_.length();
+        for (size_t i = 0; i < 4; i++)
+            writePrimitive<int32_t>(i4[i]);
+        return pos;
+    }
+    size_t writeF32X4(const float* f4) {
+        size_t pos = bytecode_.length();
+        for (size_t i = 0; i < 4; i++)
+            writePrimitive<float>(f4[i]);
+        return pos;
+    }
+
+    uint8_t  readU8 (size_t* pc) const { return readPrimitive<uint8_t>(pc); }
+    int32_t  readI32(size_t* pc) const { return readPrimitive<int32_t>(pc); }
+    float    readF32(size_t* pc) const { return readPrimitive<float>(pc); }
+    uint32_t readU32(size_t* pc) const { return readPrimitive<uint32_t>(pc); }
+    double   readF64(size_t* pc) const { return readPrimitive<double>(pc); }
+    wasm::LifoSignature* readSignature(size_t* pc) const { return readPrimitive<wasm::LifoSignature*>(pc); }
+
+    jit::SimdConstant readI32X4(size_t* pc) const {
+        int32_t x = readI32(pc);
+        int32_t y = readI32(pc);
+        int32_t z = readI32(pc);
+        int32_t w = readI32(pc);
+        return jit::SimdConstant::CreateX4(x, y, z, w);
+    }
+    jit::SimdConstant readF32X4(size_t* pc) const {
+        float x = readF32(pc);
+        float y = readF32(pc);
+        float z = readF32(pc);
+        float w = readF32(pc);
+        return jit::SimdConstant::CreateX4(x, y, z, w);
+    }
+
+#ifdef DEBUG
+    bool pcIsPatchable(size_t pc, unsigned size) const {
+        bool patchable = true;
+        for (unsigned i = 0; patchable && i < size; i++)
+            patchable &= wasm::Stmt(bytecode_[pc]) == wasm::Stmt::Bad;
+        return patchable;
+    }
+#endif // DEBUG
+
+    void patchU8(size_t pc, uint8_t i) {
+        MOZ_ASSERT(pcIsPatchable(pc, sizeof(uint8_t)));
+        bytecode_[pc] = i;
+    }
+
+    template<class T>
+    void patch32(size_t pc, T i) {
+        static_assert(sizeof(T) == sizeof(uint32_t),
+                      "patch32 must be used with 32-bits wide types");
+        MOZ_ASSERT(pcIsPatchable(pc, sizeof(uint32_t)));
+        memcpy(&bytecode_[pc], &i, sizeof(uint32_t));
+    }
+
+    void patchSignature(size_t pc, const wasm::LifoSignature* ptr) {
+        MOZ_ASSERT(pcIsPatchable(pc, sizeof(wasm::LifoSignature*)));
+        memcpy(&bytecode_[pc], &ptr, sizeof(wasm::LifoSignature*));
+    }
+
+    // Setters
+    void accumulateCompileTime(unsigned ms) {
+        compileTime_ += ms;
+    }
+    bool addVariable(const AsmJSNumLit& init) {
+        return varInitializers_.append(init);
+    }
+    void setReturnedType(wasm::RetType retType) {
+        MOZ_ASSERT(returnedType_ == wasm::RetType::Which(-1));
+        returnedType_ = retType;
+    }
+
+    // Read-only interface
+    PropertyName* name() const { return name_; }
+    unsigned funcIndex() const { return funcIndex_; }
+    unsigned srcBegin() const { return srcBegin_; }
+    unsigned lineno() const { return lineno_; }
+    unsigned column() const { return column_; }
+    unsigned compileTime() const { return compileTime_; }
+
+    size_t size() const { return bytecode_.length(); }
+
+    const wasm::VarTypeVector& argTypes() const { return argTypes_; }
+
+    const VarInitializerVector& varInitializers() const { return varInitializers_; }
+    size_t numLocals() const { return argTypes_.length() + varInitializers_.length(); }
+    wasm::RetType returnedType() const {
+        MOZ_ASSERT(returnedType_ != wasm::RetType::Which(-1));
+        return returnedType_;
+    }
+};
+
+const size_t LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 1 << 12;
+
+class ModuleCompileResults
+{
+  public:
+    struct SlowFunction
+    {
+        SlowFunction(PropertyName* name, unsigned ms, unsigned line, unsigned column)
+         : name(name), ms(ms), line(line), column(column)
+        {}
+
+        PropertyName* name;
+        unsigned ms;
+        unsigned line;
+        unsigned column;
+    };
+
+    typedef Vector<SlowFunction                  , 0, SystemAllocPolicy> SlowFunctionVector;
+    typedef Vector<jit::Label*                   , 8, SystemAllocPolicy> LabelVector;
+    typedef Vector<AsmJSModule::FunctionCodeRange, 8, SystemAllocPolicy> FunctionCodeRangeVector;
+    typedef Vector<jit::IonScriptCounts*         , 0, SystemAllocPolicy> ScriptCountVector;
+#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
+    typedef Vector<AsmJSModule::ProfiledFunction , 0, SystemAllocPolicy> ProfiledFunctionVector;
+#endif // defined(MOZ_VTUNE) || defined(JS_ION_PERF)
+
+  private:
+    LifoAlloc               lifo_;
+    jit::MacroAssembler     masm_;
+
+    SlowFunctionVector      slowFunctions_;
+    LabelVector             functionEntries_;
+    FunctionCodeRangeVector codeRanges_;
+    ScriptCountVector       functionCounts_;
+#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
+    ProfiledFunctionVector  profiledFunctions_;
+#endif // defined(MOZ_VTUNE) || defined(JS_ION_PERF)
+
+    jit::NonAssertingLabel stackOverflowLabel_;
+    jit::NonAssertingLabel asyncInterruptLabel_;
+    jit::NonAssertingLabel syncInterruptLabel_;
+    jit::NonAssertingLabel onDetachedLabel_;
+    jit::NonAssertingLabel onConversionErrorLabel_;
+    jit::NonAssertingLabel onOutOfBoundsLabel_;
+    int64_t                usecBefore_;
+
+  public:
+    ModuleCompileResults()
+      : lifo_(LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
+        masm_(jit::MacroAssembler::AsmJSToken()),
+        usecBefore_(PRMJ_Now())
+    {}
+
+    jit::MacroAssembler& masm()           { return masm_; }
+    jit::Label& stackOverflowLabel()      { return stackOverflowLabel_; }
+    jit::Label& asyncInterruptLabel()     { return asyncInterruptLabel_; }
+    jit::Label& syncInterruptLabel()      { return syncInterruptLabel_; }
+    jit::Label& onOutOfBoundsLabel()      { return onOutOfBoundsLabel_; }
+    jit::Label& onDetachedLabel()         { return onDetachedLabel_; }
+    jit::Label& onConversionErrorLabel()  { return onConversionErrorLabel_; }
+    int64_t usecBefore()                  { return usecBefore_; }
+
+    SlowFunctionVector& slowFunctions()   { return slowFunctions_; }
+
+    size_t numFunctionEntries() const     { return functionEntries_.length(); }
+    jit::Label* functionEntry(unsigned i) { return functionEntries_[i]; }
+
+    bool getOrCreateFunctionEntry(unsigned i, jit::Label** label) {
+        if (i == UINT32_MAX)
+            return false;
+        while (functionEntries_.length() <= i) {
+            jit::Label* newEntry = lifo_.new_<jit::Label>();
+            if (!newEntry || !functionEntries_.append(newEntry))
+                return false;
+        }
+        *label = functionEntries_[i];
+        return true;
+    }
+
+    size_t numCodeRanges() const { return codeRanges_.length(); }
+    bool addCodeRange(AsmJSModule::FunctionCodeRange range) { return codeRanges_.append(range); }
+    AsmJSModule::FunctionCodeRange& codeRange(unsigned i) { return codeRanges_[i]; }
+
+    size_t numFunctionCounts() const { return functionCounts_.length(); }
+    bool addFunctionCounts(jit::IonScriptCounts* counts) { return functionCounts_.append(counts); }
+    jit::IonScriptCounts* functionCount(unsigned i) { return functionCounts_[i]; }
+
+#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
+    size_t numProfiledFunctions() const { return profiledFunctions_.length(); }
+    bool addProfiledFunction(AsmJSModule::ProfiledFunction func) {
+        return profiledFunctions_.append(func);
+    }
+    AsmJSModule::ProfiledFunction& profiledFunction(unsigned i) {
+        return profiledFunctions_[i];
+    }
+#endif // defined(MOZ_VTUNE) || defined(JS_ION_PERF)
+};
+
+} // namespace js
+
+#endif //jit_AsmJSGlobals_h
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -24,18 +24,19 @@
 #ifdef MOZ_VTUNE
 # include "vtune/VTuneWrapper.h"
 #endif
 
 #include "jsmath.h"
 #include "jsprf.h"
 #include "jsutil.h"
 
+#include "asmjs/AsmJSCompile.h"
+#include "asmjs/AsmJSGlobals.h"
 #include "asmjs/AsmJSLink.h"
-#include "asmjs/AsmJSModule.h"
 #include "asmjs/AsmJSSignalHandlers.h"
 #include "builtin/SIMD.h"
 #include "frontend/Parser.h"
 #include "jit/AtomicOperations.h"
 #include "jit/CodeGenerator.h"
 #include "jit/CompileWrappers.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
@@ -51,32 +52,31 @@
 #include "frontend/ParseNode-inl.h"
 #include "frontend/Parser-inl.h"
 #include "jit/AtomicOperations-inl.h"
 #include "jit/MacroAssembler-inl.h"
 
 using namespace js;
 using namespace js::frontend;
 using namespace js::jit;
+using namespace js::wasm;
 
 using mozilla::AddToHash;
 using mozilla::ArrayLength;
 using mozilla::CountLeadingZeroes32;
 using mozilla::DebugOnly;
 using mozilla::HashGeneric;
 using mozilla::IsNaN;
 using mozilla::IsNegativeZero;
 using mozilla::Maybe;
 using mozilla::Move;
 using mozilla::PositiveInfinity;
 using mozilla::UniquePtr;
 using JS::GenericNaN;
 
-static const size_t LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 1 << 12;
-
 /*****************************************************************************/
 // ParseNode utilities
 
 static inline ParseNode*
 NextNode(ParseNode* pn)
 {
     return pn->pn_next;
 }
@@ -521,516 +521,32 @@ ParseVarOrConstStatement(AsmJSParser& pa
         return false;
 
     MOZ_ASSERT((*var)->isKind(PNK_VAR) || (*var)->isKind(PNK_CONST));
     return true;
 }
 
 /*****************************************************************************/
 
-namespace {
-
-// Respresents the type of a general asm.js expression.
-class Type
-{
-  public:
-    enum Which {
-        Fixnum = AsmJSNumLit::Fixnum,
-        Signed = AsmJSNumLit::NegativeInt,
-        Unsigned = AsmJSNumLit::BigUnsigned,
-        DoubleLit = AsmJSNumLit::Double,
-        Float = AsmJSNumLit::Float,
-        Int32x4 = AsmJSNumLit::Int32x4,
-        Float32x4 = AsmJSNumLit::Float32x4,
-        Double,
-        MaybeDouble,
-        MaybeFloat,
-        Floatish,
-        Int,
-        Intish,
-        Void
-    };
-
-  private:
-    Which which_;
-
-  public:
-    Type() : which_(Which(-1)) {}
-    static Type Of(const AsmJSNumLit& lit) {
-        MOZ_ASSERT(lit.hasType());
-        Which which = Type::Which(lit.which());
-        MOZ_ASSERT(which >= Fixnum && which <= Float32x4);
-        Type t;
-        t.which_ = which;
-        return t;
-    }
-    MOZ_IMPLICIT Type(Which w) : which_(w) {}
-    Which which() const { return which_; }
-    MOZ_IMPLICIT Type(AsmJSSimdType type) {
-        switch (type) {
-          case AsmJSSimdType_int32x4:
-            which_ = Int32x4;
-            return;
-          case AsmJSSimdType_float32x4:
-            which_ = Float32x4;
-            return;
-        }
-        MOZ_CRASH("unexpected AsmJSSimdType");
-    }
-
-    bool operator==(Type rhs) const { return which_ == rhs.which_; }
-    bool operator!=(Type rhs) const { return which_ != rhs.which_; }
-
-    inline bool operator<=(Type rhs) const {
-        switch (rhs.which_) {
-          case Signed:      return isSigned();
-          case Unsigned:    return isUnsigned();
-          case DoubleLit:   return isDoubleLit();
-          case Double:      return isDouble();
-          case Float:       return isFloat();
-          case Int32x4:     return isInt32x4();
-          case Float32x4:   return isFloat32x4();
-          case MaybeDouble: return isMaybeDouble();
-          case MaybeFloat:  return isMaybeFloat();
-          case Floatish:    return isFloatish();
-          case Int:         return isInt();
-          case Intish:      return isIntish();
-          case Fixnum:      return isFixnum();
-          case Void:        return isVoid();
-        }
-        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected this type");
-    }
-
-    bool isFixnum() const {
-        return which_ == Fixnum;
-    }
-
-    bool isSigned() const {
-        return which_ == Signed || which_ == Fixnum;
-    }
-
-    bool isUnsigned() const {
-        return which_ == Unsigned || which_ == Fixnum;
-    }
-
-    bool isInt() const {
-        return isSigned() || isUnsigned() || which_ == Int;
-    }
-
-    bool isIntish() const {
-        return isInt() || which_ == Intish;
-    }
-
-    bool isDoubleLit() const {
-        return which_ == DoubleLit;
-    }
-
-    bool isDouble() const {
-        return isDoubleLit() || which_ == Double;
-    }
-
-    bool isMaybeDouble() const {
-        return isDouble() || which_ == MaybeDouble;
-    }
-
-    bool isFloat() const {
-        return which_ == Float;
-    }
-
-    bool isMaybeFloat() const {
-        return isFloat() || which_ == MaybeFloat;
-    }
-
-    bool isFloatish() const {
-        return isMaybeFloat() || which_ == Floatish;
-    }
-
-    bool isVoid() const {
-        return which_ == Void;
-    }
-
-    bool isExtern() const {
-        return isDouble() || isSigned();
-    }
-
-    bool isInt32x4() const {
-        return which_ == Int32x4;
-    }
-
-    bool isFloat32x4() const {
-        return which_ == Float32x4;
-    }
-
-    bool isSimd() const {
-        return isInt32x4() || isFloat32x4();
-    }
-
-    bool isVarType() const {
-        return isInt() || isDouble() || isFloat() || isSimd();
-    }
-
-    MIRType toMIRType() const {
-        switch (which_) {
-          case Double:
-          case DoubleLit:
-          case MaybeDouble:
-            return MIRType_Double;
-          case Float:
-          case Floatish:
-          case MaybeFloat:
-            return MIRType_Float32;
-          case Fixnum:
-          case Int:
-          case Signed:
-          case Unsigned:
-          case Intish:
-            return MIRType_Int32;
-          case Int32x4:
-            return MIRType_Int32x4;
-          case Float32x4:
-            return MIRType_Float32x4;
-          case Void:
-            return MIRType_None;
-        }
-        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid Type");
-    }
-
-    AsmJSSimdType simdType() const {
-        MOZ_ASSERT(isSimd());
-        switch (which_) {
-          case Int32x4:
-            return AsmJSSimdType_int32x4;
-          case Float32x4:
-            return AsmJSSimdType_float32x4;
-          // Scalar types
-          case Double:
-          case DoubleLit:
-          case MaybeDouble:
-          case Float:
-          case MaybeFloat:
-          case Floatish:
-          case Fixnum:
-          case Int:
-          case Signed:
-          case Unsigned:
-          case Intish:
-          case Void:
-            break;
-        }
-        MOZ_CRASH("not a SIMD Type");
-    }
-
-    const char* toChars() const {
-        switch (which_) {
-          case Double:      return "double";
-          case DoubleLit:   return "doublelit";
-          case MaybeDouble: return "double?";
-          case Float:       return "float";
-          case Floatish:    return "floatish";
-          case MaybeFloat:  return "float?";
-          case Fixnum:      return "fixnum";
-          case Int:         return "int";
-          case Signed:      return "signed";
-          case Unsigned:    return "unsigned";
-          case Intish:      return "intish";
-          case Int32x4:     return "int32x4";
-          case Float32x4:   return "float32x4";
-          case Void:        return "void";
-        }
-        MOZ_CRASH("Invalid Type");
-    }
-};
-
-} /* anonymous namespace */
-
-// Represents the subset of Type that can be used as the return type of a
-// function.
-class RetType
-{
-  public:
-    enum Which {
-        Void = Type::Void,
-        Signed = Type::Signed,
-        Double = Type::Double,
-        Float = Type::Float,
-        Int32x4 = Type::Int32x4,
-        Float32x4 = Type::Float32x4
-    };
-
-  private:
-    Which which_;
-
-  public:
-    RetType() : which_(Which(-1)) {}
-    MOZ_IMPLICIT RetType(Which w) : which_(w) {}
-    MOZ_IMPLICIT RetType(AsmJSCoercion coercion) {
-        which_ = Which(-1);  // initialize to silence GCC warning
-        switch (coercion) {
-          case AsmJS_ToInt32: which_ = Signed; break;
-          case AsmJS_ToNumber: which_ = Double; break;
-          case AsmJS_FRound: which_ = Float; break;
-          case AsmJS_ToInt32x4: which_ = Int32x4; break;
-          case AsmJS_ToFloat32x4: which_ = Float32x4; break;
-        }
-    }
-    Which which() const {
-        return which_;
-    }
-    Type toType() const {
-        return Type::Which(which_);
-    }
-    AsmJSModule::ReturnType toModuleReturnType() const {
-        switch (which_) {
-          case Void: return AsmJSModule::Return_Void;
-          case Signed: return AsmJSModule::Return_Int32;
-          case Float: // will be converted to a Double
-          case Double: return AsmJSModule::Return_Double;
-          case Int32x4: return AsmJSModule::Return_Int32x4;
-          case Float32x4: return AsmJSModule::Return_Float32x4;
-        }
-        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected return type");
-    }
-    MIRType toMIRType() const {
-        switch (which_) {
-          case Void: return MIRType_None;
-          case Signed: return MIRType_Int32;
-          case Double: return MIRType_Double;
-          case Float: return MIRType_Float32;
-          case Int32x4: return MIRType_Int32x4;
-          case Float32x4: return MIRType_Float32x4;
-        }
-        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected return type");
-    }
-    bool operator==(RetType rhs) const { return which_ == rhs.which_; }
-    bool operator!=(RetType rhs) const { return which_ != rhs.which_; }
-};
-
-namespace {
-
-// Represents the subset of Type that can be used as a variable or
-// argument's type. Note: AsmJSCoercion and VarType are kept separate to
-// make very clear the signed/int distinction: a coercion may explicitly sign
-// an *expression* but, when stored as a variable, this signedness information
-// is explicitly thrown away by the asm.js type system. E.g., in
-//
-//   function f(i) {
-//     i = i | 0;             (1)
-//     if (...)
-//         i = foo() >>> 0;
-//     else
-//         i = bar() | 0;
-//     return i | 0;          (2)
-//   }
-//
-// the AsmJSCoercion of (1) is Signed (since | performs ToInt32) but, when
-// translated to a VarType, the result is a plain Int since, as shown, it
-// is legal to assign both Signed and Unsigned (or some other Int) values to
-// it. For (2), the AsmJSCoercion is also Signed but, when translated to an
-// RetType, the result is Signed since callers (asm.js and non-asm.js) can
-// rely on the return value being Signed.
-class VarType
-{
-  public:
-    enum Which {
-        Int = Type::Int,
-        Double = Type::Double,
-        Float = Type::Float,
-        Int32x4 = Type::Int32x4,
-        Float32x4 = Type::Float32x4
-    };
-
-  private:
-    Which which_;
-
-  public:
-    VarType()
-      : which_(Which(-1)) {}
-    MOZ_IMPLICIT VarType(Which w)
-      : which_(w) {}
-    MOZ_IMPLICIT VarType(AsmJSCoercion coercion) {
-        switch (coercion) {
-          case AsmJS_ToInt32: which_ = Int; break;
-          case AsmJS_ToNumber: which_ = Double; break;
-          case AsmJS_FRound: which_ = Float; break;
-          case AsmJS_ToInt32x4: which_ = Int32x4; break;
-          case AsmJS_ToFloat32x4: which_ = Float32x4; break;
-        }
-    }
-    static VarType Of(const AsmJSNumLit& lit) {
-        MOZ_ASSERT(lit.hasType());
-        switch (lit.which()) {
-          case AsmJSNumLit::Fixnum:
-          case AsmJSNumLit::NegativeInt:
-          case AsmJSNumLit::BigUnsigned:
-            return Int;
-          case AsmJSNumLit::Double:
-            return Double;
-          case AsmJSNumLit::Float:
-            return Float;
-          case AsmJSNumLit::Int32x4:
-            return Int32x4;
-          case AsmJSNumLit::Float32x4:
-            return Float32x4;
-          case AsmJSNumLit::OutOfRangeInt:
-            MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("can't be out of range int");
-        }
-        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected literal type");
-    }
-
-    Which which() const {
-        return which_;
-    }
-    Type toType() const {
-        return Type::Which(which_);
-    }
-    MIRType toMIRType() const {
-        switch(which_) {
-          case Int:       return MIRType_Int32;
-          case Double:    return MIRType_Double;
-          case Float:     return MIRType_Float32;
-          case Int32x4:   return MIRType_Int32x4;
-          case Float32x4: return MIRType_Float32x4;
-        }
-        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("VarType can only be Int, SIMD, Double or Float");
-    }
-    AsmJSCoercion toCoercion() const {
-        switch(which_) {
-          case Int:       return AsmJS_ToInt32;
-          case Double:    return AsmJS_ToNumber;
-          case Float:     return AsmJS_FRound;
-          case Int32x4:   return AsmJS_ToInt32x4;
-          case Float32x4: return AsmJS_ToFloat32x4;
-        }
-        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("VarType can only be Int, SIMD, Double or Float");
-    }
-    static VarType FromCheckedType(Type type) {
-        MOZ_ASSERT(type.isInt() || type.isMaybeDouble() || type.isFloatish() || type.isSimd());
-        if (type.isMaybeDouble())
-            return Double;
-        else if (type.isFloatish())
-            return Float;
-        else if (type.isInt())
-            return Int;
-        else if (type.isInt32x4())
-            return Int32x4;
-        else if (type.isFloat32x4())
-            return Float32x4;
-        MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unknown type in FromCheckedType");
-    }
-    bool operator==(VarType rhs) const { return which_ == rhs.which_; }
-    bool operator!=(VarType rhs) const { return which_ != rhs.which_; }
-};
-
-} /* anonymous namespace */
-
 // Implements <: (subtype) operator when the rhs is a VarType
 static inline bool
 operator<=(Type lhs, VarType rhs)
 {
     switch (rhs.which()) {
       case VarType::Int:       return lhs.isInt();
       case VarType::Double:    return lhs.isDouble();
       case VarType::Float:     return lhs.isFloat();
       case VarType::Int32x4:   return lhs.isInt32x4();
       case VarType::Float32x4: return lhs.isFloat32x4();
     }
     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected rhs type");
 }
 
 /*****************************************************************************/
 
-static inline MIRType ToMIRType(MIRType t) { return t; }
-static inline MIRType ToMIRType(VarType t) { return t.toMIRType(); }
-
-namespace {
-
-template <class VecT>
-class ABIArgIter
-{
-    ABIArgGenerator gen_;
-    const VecT& types_;
-    unsigned i_;
-
-    void settle() { if (!done()) gen_.next(ToMIRType(types_[i_])); }
-
-  public:
-    explicit ABIArgIter(const VecT& types) : types_(types), i_(0) { settle(); }
-    void operator++(int) { MOZ_ASSERT(!done()); i_++; settle(); }
-    bool done() const { return i_ == types_.length(); }
-
-    ABIArg* operator->() { MOZ_ASSERT(!done()); return &gen_.current(); }
-    ABIArg& operator*() { MOZ_ASSERT(!done()); return gen_.current(); }
-
-    unsigned index() const { MOZ_ASSERT(!done()); return i_; }
-    MIRType mirType() const { MOZ_ASSERT(!done()); return ToMIRType(types_[i_]); }
-    uint32_t stackBytesConsumedSoFar() const { return gen_.stackBytesConsumedSoFar(); }
-};
-
-typedef Vector<MIRType, 8> MIRTypeVector;
-typedef ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter;
-
-typedef Vector<VarType, 8, LifoAllocPolicy<Fallible>> VarTypeVector;
-typedef ABIArgIter<VarTypeVector> ABIArgTypeIter;
-
-class Signature
-{
-    VarTypeVector argTypes_;
-    RetType retType_;
-
-  public:
-    explicit Signature(LifoAlloc& alloc)
-      : argTypes_(alloc) {}
-    Signature(LifoAlloc& alloc, RetType retType)
-      : argTypes_(alloc), retType_(retType) {}
-    Signature(VarTypeVector&& argTypes, RetType retType)
-      : argTypes_(Move(argTypes)), retType_(Move(retType)) {}
-    Signature(Signature&& rhs)
-      : argTypes_(Move(rhs.argTypes_)), retType_(Move(rhs.retType_)) {}
-
-    bool copy(const Signature& rhs) {
-        if (!argTypes_.resize(rhs.argTypes_.length()))
-            return false;
-        for (unsigned i = 0; i < argTypes_.length(); i++)
-            argTypes_[i] = rhs.argTypes_[i];
-        retType_ = rhs.retType_;
-        return true;
-    }
-
-    bool appendArg(VarType type) { return argTypes_.append(type); }
-    VarType arg(unsigned i) const { return argTypes_[i]; }
-    const VarTypeVector& args() const { return argTypes_; }
-    VarTypeVector&& extractArgs() { return Move(argTypes_); }
-
-    RetType retType() const { return retType_; }
-};
-
-// Signature that can be only allocated with a LifoAlloc.
-class LifoSignature : public Signature
-{
-    explicit LifoSignature(Signature&& rhs)
-      : Signature(Move(rhs))
-    {}
-
-    LifoSignature(const LifoSignature&) = delete;
-    LifoSignature(const LifoSignature&&) = delete;
-    LifoSignature& operator=(const LifoSignature&) = delete;
-    LifoSignature& operator=(const LifoSignature&&) = delete;
-
-  public:
-    static LifoSignature* new_(LifoAlloc& lifo, Signature&& sig) {
-        void* mem = lifo.alloc(sizeof(LifoSignature));
-        if (!mem)
-            return nullptr;
-        return new (mem) LifoSignature(Move(sig));
-    }
-};
-
-} // namespace
-
-
 static
 bool operator==(const Signature& lhs, const Signature& rhs)
 {
     if (lhs.retType() != rhs.retType())
         return false;
     if (lhs.args().length() != rhs.args().length())
         return false;
     for (unsigned i = 0; i < lhs.args().length(); i++) {
@@ -1064,322 +580,18 @@ TypedArrayLoadType(Scalar::Type viewType
         return Type::MaybeFloat;
       case Scalar::Float64:
         return Type::MaybeDouble;
       default:;
     }
     MOZ_CRASH("Unexpected array type");
 }
 
-enum NeedsBoundsCheck {
-    NO_BOUNDS_CHECK,
-    NEEDS_BOUNDS_CHECK
-};
-
 namespace {
 
-class AsmFunction
-{
-  public:
-    typedef Vector<AsmJSNumLit, 8, LifoAllocPolicy<Fallible>> VarInitializerVector;
-
-  private:
-    typedef Vector<uint8_t, 4096, LifoAllocPolicy<Fallible>> Bytecode;
-
-    VarInitializerVector varInitializers_;
-    Bytecode bytecode_;
-
-    VarTypeVector argTypes_;
-    RetType returnedType_;
-
-    PropertyName* name_;
-
-    unsigned funcIndex_;
-    unsigned srcBegin_;
-    unsigned lineno_;
-    unsigned column_;
-    unsigned compileTime_;
-
-  public:
-    explicit AsmFunction(LifoAlloc& alloc)
-      : varInitializers_(alloc),
-        bytecode_(alloc),
-        argTypes_(alloc),
-        returnedType_(RetType::Which(-1)),
-        name_(nullptr),
-        funcIndex_(-1),
-        srcBegin_(-1),
-        lineno_(-1),
-        column_(-1),
-        compileTime_(-1)
-    {}
-
-    bool init(const VarTypeVector& args) {
-        if (!argTypes_.initCapacity(args.length()))
-            return false;
-        for (size_t i = 0; i < args.length(); i++)
-            argTypes_.append(args[i]);
-        return true;
-    }
-
-    bool finish(const VarTypeVector& args, PropertyName* name, unsigned funcIndex,
-                unsigned srcBegin, unsigned lineno, unsigned column, unsigned compileTime)
-    {
-        if (!argTypes_.initCapacity(args.length()))
-            return false;
-        for (size_t i = 0; i < args.length(); i++)
-            argTypes_.infallibleAppend(args[i]);
-
-        MOZ_ASSERT(name_ == nullptr);
-        name_ = name;
-
-        MOZ_ASSERT(funcIndex_ == unsigned(-1));
-        funcIndex_ = funcIndex;
-
-        MOZ_ASSERT(srcBegin_ == unsigned(-1));
-        srcBegin_ = srcBegin;
-
-        MOZ_ASSERT(lineno_ == unsigned(-1));
-        lineno_ = lineno;
-
-        MOZ_ASSERT(column_ == unsigned(-1));
-        column_ = column;
-
-        MOZ_ASSERT(compileTime_ == unsigned(-1));
-        compileTime_ = compileTime;
-        return true;
-    }
-
-  private:
-    AsmFunction(const AsmFunction&) = delete;
-    AsmFunction(AsmFunction&& other) = delete;
-    AsmFunction& operator=(const AsmFunction&) = delete;
-
-    // Helper functions
-    template<class T> size_t writePrimitive(T v) {
-        size_t writeAt = bytecode_.length();
-        if (!bytecode_.append(reinterpret_cast<uint8_t*>(&v), sizeof(T)))
-            return -1;
-        return writeAt;
-    }
-
-    template<class T> T readPrimitive(size_t* pc) const {
-        MOZ_ASSERT(*pc + sizeof(T) <= bytecode_.length());
-        T ret;
-        memcpy(&ret, &bytecode_[*pc], sizeof(T));
-        *pc += sizeof(T);
-        return ret;
-    }
-
-  public:
-    size_t writeU8(uint8_t i)   { return writePrimitive<uint8_t>(i); }
-    size_t writeI32(int32_t i)  { return writePrimitive<int32_t>(i); }
-    size_t writeU32(uint32_t i) { return writePrimitive<uint32_t>(i); }
-    size_t writeF32(float f)    { return writePrimitive<float>(f); }
-    size_t writeF64(double d)   { return writePrimitive<double>(d); }
-
-    size_t writeI32X4(const int32_t* i4) {
-        size_t pos = bytecode_.length();
-        for (size_t i = 0; i < 4; i++)
-            writePrimitive<int32_t>(i4[i]);
-        return pos;
-    }
-    size_t writeF32X4(const float* f4) {
-        size_t pos = bytecode_.length();
-        for (size_t i = 0; i < 4; i++)
-            writePrimitive<float>(f4[i]);
-        return pos;
-    }
-
-    uint8_t  readU8 (size_t* pc) const { return readPrimitive<uint8_t>(pc); }
-    int32_t  readI32(size_t* pc) const { return readPrimitive<int32_t>(pc); }
-    float    readF32(size_t* pc) const { return readPrimitive<float>(pc); }
-    uint32_t readU32(size_t* pc) const { return readPrimitive<uint32_t>(pc); }
-    double   readF64(size_t* pc) const { return readPrimitive<double>(pc); }
-    LifoSignature* readSignature(size_t* pc) const { return readPrimitive<LifoSignature*>(pc); }
-
-    SimdConstant readI32X4(size_t* pc) const {
-        int32_t x = readI32(pc);
-        int32_t y = readI32(pc);
-        int32_t z = readI32(pc);
-        int32_t w = readI32(pc);
-        return SimdConstant::CreateX4(x, y, z, w);
-    }
-    SimdConstant readF32X4(size_t* pc) const {
-        float x = readF32(pc);
-        float y = readF32(pc);
-        float z = readF32(pc);
-        float w = readF32(pc);
-        return SimdConstant::CreateX4(x, y, z, w);
-    }
-
-#ifdef DEBUG
-    inline bool pcIsPatchable(size_t pc, unsigned size) const;
-#endif // DEBUG
-
-    void patchU8(size_t pc, uint8_t i) {
-        MOZ_ASSERT(pcIsPatchable(pc, sizeof(uint8_t)));
-        bytecode_[pc] = i;
-    }
-
-    template<class T>
-    void patch32(size_t pc, T i) {
-        static_assert(sizeof(T) == sizeof(uint32_t),
-                      "patch32 must be used with 32-bits wide types");
-        MOZ_ASSERT(pcIsPatchable(pc, sizeof(uint32_t)));
-        memcpy(&bytecode_[pc], &i, sizeof(uint32_t));
-    }
-
-    void patchSignature(size_t pc, const LifoSignature* ptr) {
-        MOZ_ASSERT(pcIsPatchable(pc, sizeof(LifoSignature*)));
-        memcpy(&bytecode_[pc], &ptr, sizeof(LifoSignature*));
-    }
-
-    // Setters
-    void accumulateCompileTime(unsigned ms) {
-        compileTime_ += ms;
-    }
-    bool addVariable(const AsmJSNumLit& init) {
-        return varInitializers_.append(init);
-    }
-    void setReturnedType(RetType retType) {
-        MOZ_ASSERT(returnedType_ == RetType::Which(-1));
-        returnedType_ = retType;
-    }
-
-    // Read-only interface
-    PropertyName* name() const { return name_; }
-    unsigned funcIndex() const { return funcIndex_; }
-    unsigned srcBegin() const { return srcBegin_; }
-    unsigned lineno() const { return lineno_; }
-    unsigned column() const { return column_; }
-    unsigned compileTime() const { return compileTime_; }
-
-    size_t size() const { return bytecode_.length(); }
-
-    const VarTypeVector& argTypes() const { return argTypes_; }
-
-    const VarInitializerVector& varInitializers() const { return varInitializers_; }
-    size_t numLocals() const { return argTypes_.length() + varInitializers_.length(); }
-    RetType returnedType() const {
-        MOZ_ASSERT(returnedType_ != RetType::Which(-1));
-        return returnedType_;
-    }
-};
-
-struct ModuleCompileInputs
-{
-    CompileCompartment* compartment;
-    CompileRuntime* runtime;
-    bool usesSignalHandlersForOOB;
-
-    ModuleCompileInputs(CompileCompartment* compartment,
-                        CompileRuntime* runtime,
-                        bool usesSignalHandlersForOOB)
-      : compartment(compartment),
-        runtime(runtime),
-        usesSignalHandlersForOOB(usesSignalHandlersForOOB)
-    {}
-};
-
-class ModuleCompileResults
-{
-  public:
-    struct SlowFunction
-    {
-        SlowFunction(PropertyName* name, unsigned ms, unsigned line, unsigned column)
-         : name(name), ms(ms), line(line), column(column)
-        {}
-
-        PropertyName* name;
-        unsigned ms;
-        unsigned line;
-        unsigned column;
-    };
-
-    typedef Vector<SlowFunction                  , 0, SystemAllocPolicy> SlowFunctionVector;
-    typedef Vector<Label*                        , 8, SystemAllocPolicy> LabelVector;
-    typedef Vector<AsmJSModule::FunctionCodeRange, 8, SystemAllocPolicy> FunctionCodeRangeVector;
-    typedef Vector<jit::IonScriptCounts*         , 0, SystemAllocPolicy> ScriptCountVector;
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-    typedef Vector<AsmJSModule::ProfiledFunction , 0, SystemAllocPolicy> ProfiledFunctionVector;
-#endif // defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-
-  private:
-    LifoAlloc           lifo_;
-    MacroAssembler      masm_;
-
-    SlowFunctionVector      slowFunctions_;
-    LabelVector             functionEntries_;
-    FunctionCodeRangeVector codeRanges_;
-    ScriptCountVector       functionCounts_;
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-    ProfiledFunctionVector  profiledFunctions_;
-#endif // defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-
-    NonAssertingLabel   stackOverflowLabel_;
-    NonAssertingLabel   asyncInterruptLabel_;
-    NonAssertingLabel   syncInterruptLabel_;
-    NonAssertingLabel   onDetachedLabel_;
-    NonAssertingLabel   onConversionErrorLabel_;
-    NonAssertingLabel   onOutOfBoundsLabel_;
-    int64_t             usecBefore_;
-
-  public:
-    ModuleCompileResults()
-      : lifo_(LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
-        masm_(MacroAssembler::AsmJSToken()),
-        usecBefore_(PRMJ_Now())
-    {}
-
-    MacroAssembler& masm()              { return masm_; }
-    Label& stackOverflowLabel()         { return stackOverflowLabel_; }
-    Label& asyncInterruptLabel()        { return asyncInterruptLabel_; }
-    Label& syncInterruptLabel()         { return syncInterruptLabel_; }
-    Label& onOutOfBoundsLabel()         { return onOutOfBoundsLabel_; }
-    Label& onDetachedLabel()            { return onDetachedLabel_; }
-    Label& onConversionErrorLabel()     { return onConversionErrorLabel_; }
-    int64_t usecBefore()                { return usecBefore_; }
-
-    SlowFunctionVector& slowFunctions() { return slowFunctions_; }
-
-    size_t numFunctionEntries() const   { return functionEntries_.length(); }
-    Label* functionEntry(unsigned i)    { return functionEntries_[i]; }
-
-    bool getOrCreateFunctionEntry(unsigned i, Label** label) {
-        if (i == UINT32_MAX)
-            return false;
-        while (functionEntries_.length() <= i) {
-            Label* newEntry = lifo_.new_<Label>();
-            if (!newEntry || !functionEntries_.append(newEntry))
-                return false;
-        }
-        *label = functionEntries_[i];
-        return true;
-    }
-
-    size_t numCodeRanges() const { return codeRanges_.length(); }
-    bool addCodeRange(AsmJSModule::FunctionCodeRange range) { return codeRanges_.append(range); }
-    AsmJSModule::FunctionCodeRange& codeRange(unsigned i) { return codeRanges_[i]; }
-
-    size_t numFunctionCounts() const { return functionCounts_.length(); }
-    bool addFunctionCounts(jit::IonScriptCounts* counts) { return functionCounts_.append(counts); }
-    jit::IonScriptCounts* functionCount(unsigned i) { return functionCounts_[i]; }
-
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-    size_t numProfiledFunctions() const { return profiledFunctions_.length(); }
-    bool addProfiledFunction(AsmJSModule::ProfiledFunction func) {
-        return profiledFunctions_.append(func);
-    }
-    AsmJSModule::ProfiledFunction& profiledFunction(unsigned i) {
-        return profiledFunctions_[i];
-    }
-#endif // defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-};
-
 // The ModuleValidator encapsulates the entire validation of an asm.js module.
 // Its lifetime goes from the validation of the top components of an asm.js
 // module (all the globals), the emission of bytecode for all the functions in
 // the module and the validation of function's pointer tables. It also finishes
 // the compilation of all the module's stubs.
 //
 // Rooting note: ModuleValidator is a stack class that contains unrooted
 // PropertyName (JSAtom) pointers.  This is safe because it cannot be
@@ -2397,141 +1609,16 @@ class MOZ_STACK_CLASS ModuleValidator
     ModuleCompileInputs compileInputs() const {
         CompileCompartment* compartment = CompileCompartment::get(cx()->compartment());
         return ModuleCompileInputs(compartment,
                                    compartment->runtime(),
                                    module().usesSignalHandlersForOOB());
     }
 };
 
-// ModuleCompiler encapsulates the compilation of an entire asm.js module. Over
-// the course of an ModuleCompiler object's lifetime, many FunctionCompiler
-// objects will be created and destroyed in sequence, one for each function in
-// the module.
-//
-// *** asm.js FFI calls ***
-//
-// asm.js allows calling out to non-asm.js via "FFI calls". The asm.js type
-// system does not place any constraints on the FFI call. In particular:
-//  - an FFI call's target is not known or speculated at module-compile time;
-//  - a single external function can be called with different signatures.
-//
-// If performance didn't matter, all FFI calls could simply box their arguments
-// and call js::Invoke. However, we'd like to be able to specialize FFI calls
-// to be more efficient in several cases:
-//
-//  - for calls to JS functions which have been jitted, we'd like to call
-//    directly into JIT code without going through C++.
-//
-//  - for calls to certain builtins, we'd like to be call directly into the C++
-//    code for the builtin without going through the general call path.
-//
-// All of this requires dynamic specialization techniques which must happen
-// after module compilation. To support this, at module-compilation time, each
-// FFI call generates a call signature according to the system ABI, as if the
-// callee was a C++ function taking/returning the same types as the caller was
-// passing/expecting. The callee is loaded from a fixed offset in the global
-// data array which allows the callee to change at runtime. Initially, the
-// callee is stub which boxes its arguments and calls js::Invoke.
-//
-// To do this, we need to generate a callee stub for each pairing of FFI callee
-// and signature. We call this pairing an "exit". For example, this code has
-// two external functions and three exits:
-//
-//  function f(global, imports) {
-//    "use asm";
-//    var foo = imports.foo;
-//    var bar = imports.bar;
-//    function g() {
-//      foo(1);      // Exit #1: (int) -> void
-//      foo(1.5);    // Exit #2: (double) -> void
-//      bar(1)|0;    // Exit #3: (int) -> int
-//      bar(2)|0;    // Exit #3: (int) -> int
-//    }
-//  }
-//
-// The ModuleCompiler maintains a hash table (ExitMap) which allows a call site
-// to add a new exit or reuse an existing one. The key is an index into the
-// Vector<Exit> stored in the AsmJSModule and the value is the signature of
-// that exit's variant.
-//
-// The same rooting note at the top comment of ModuleValidator applies here as
-// well.
-class MOZ_STACK_CLASS ModuleCompiler
-{
-    ModuleCompileInputs                     compileInputs_;
-    ScopedJSDeletePtr<ModuleCompileResults> compileResults_;
-
-  public:
-    explicit ModuleCompiler(const ModuleCompileInputs& inputs)
-      : compileInputs_(inputs),
-        compileResults_(js_new<ModuleCompileResults>())
-    {}
-
-    /*************************************************** Read-only interface */
-
-    MacroAssembler& masm()          { return compileResults_->masm(); }
-    Label& stackOverflowLabel()     { return compileResults_->stackOverflowLabel(); }
-    Label& asyncInterruptLabel()    { return compileResults_->asyncInterruptLabel(); }
-    Label& syncInterruptLabel()     { return compileResults_->syncInterruptLabel(); }
-    Label& onOutOfBoundsLabel()     { return compileResults_->onOutOfBoundsLabel(); }
-    Label& onConversionErrorLabel() { return compileResults_->onConversionErrorLabel(); }
-    int64_t usecBefore()            { return compileResults_->usecBefore(); }
-
-    bool usesSignalHandlersForOOB() const   { return compileInputs_.usesSignalHandlersForOOB; }
-    CompileRuntime* runtime() const         { return compileInputs_.runtime; }
-    CompileCompartment* compartment() const { return compileInputs_.compartment; }
-
-    /***************************************************** Mutable interface */
-
-    bool getOrCreateFunctionEntry(uint32_t funcIndex, Label** label)
-    {
-        return compileResults_->getOrCreateFunctionEntry(funcIndex, label);
-    }
-
-    bool finishGeneratingFunction(AsmFunction& func, CodeGenerator& codegen,
-                                  const AsmJSFunctionLabels& labels)
-    {
-        // Code range
-        unsigned line = func.lineno();
-        unsigned column = func.column();
-        PropertyName* funcName = func.name();
-        if (!compileResults_->addCodeRange(AsmJSModule::FunctionCodeRange(funcName, line, labels)))
-            return false;
-
-        // Script counts
-        jit::IonScriptCounts* counts = codegen.extractScriptCounts();
-        if (counts && !compileResults_->addFunctionCounts(counts)) {
-            js_delete(counts);
-            return false;
-        }
-
-        // Slow functions
-        if (func.compileTime() >= 250) {
-            ModuleCompileResults::SlowFunction sf(funcName, func.compileTime(), line, column);
-            if (!compileResults_->slowFunctions().append(Move(sf)))
-                return false;
-        }
-
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-        // Perf and profiling information
-        unsigned begin = labels.begin.offset();
-        unsigned end = labels.end.offset();
-        AsmJSModule::ProfiledFunction profiledFunc(funcName, begin, end, line, column);
-        if (!compileResults_->addProfiledFunction(profiledFunc))
-            return false;
-#endif // defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-        return true;
-    }
-
-    void finish(ScopedJSDeletePtr<ModuleCompileResults>* results) {
-        *results = compileResults_.forget();
-    }
-};
-
 } // namespace
 
 /*****************************************************************************/
 // Numeric literal utilities
 
 static bool
 IsNumericNonFloatLiteral(ParseNode* pn)
 {
@@ -2802,383 +1889,16 @@ IsLiteralInt(ModuleValidator& m, ParseNo
     return IsNumericLiteral(m, pn) &&
            IsLiteralInt(ExtractNumericLiteral(m, pn), u32);
 }
 
 /*****************************************************************************/
 
 namespace {
 
-enum class AsmType : uint8_t {
-    Int32,
-    Float32,
-    Float64,
-    Int32x4,
-    Float32x4
-};
-
-enum class Stmt : uint8_t {
-    Ret,
-
-    Block,
-
-    IfThen,
-    IfElse,
-    Switch,
-
-    While,
-    DoWhile,
-
-    ForInitInc,
-    ForInitNoInc,
-    ForNoInitNoInc,
-    ForNoInitInc,
-
-    Label,
-    Continue,
-    ContinueLabel,
-    Break,
-    BreakLabel,
-
-    CallInternal,
-    CallIndirect,
-    CallImport,
-
-    AtomicsFence,
-
-    // asm.js specific
-    // Expression statements (to be removed in the future)
-    I32Expr,
-    F32Expr,
-    F64Expr,
-    I32X4Expr,
-    F32X4Expr,
-
-    Id,
-    Noop,
-    InterruptCheckHead,
-    InterruptCheckLoop,
-
-    DebugCheckPoint,
-
-    Bad
-};
-
-enum class I32 : uint8_t {
-    // Common opcodes
-    GetLocal,
-    SetLocal,
-    GetGlobal,
-    SetGlobal,
-
-    CallInternal,
-    CallIndirect,
-    CallImport,
-
-    Conditional,
-    Comma,
-
-    Literal,
-
-    // Binary arith opcodes
-    Add,
-    Sub,
-    Mul,
-    SDiv,
-    SMod,
-    UDiv,
-    UMod,
-    Min,
-    Max,
-
-    // Unary arith opcodes
-    Not,
-    Neg,
-
-    // Bitwise opcodes
-    BitOr,
-    BitAnd,
-    BitXor,
-    BitNot,
-
-    Lsh,
-    ArithRsh,
-    LogicRsh,
-
-    // Conversion opcodes
-    FromF32,
-    FromF64,
-
-    // Math builtin opcodes
-    Clz,
-    Abs,
-
-    // Comparison opcodes
-    // Ordering matters (EmitComparison expects signed opcodes to be placed
-    // before unsigned opcodes)
-    EqI32,
-    NeI32,
-    SLtI32,
-    SLeI32,
-    SGtI32,
-    SGeI32,
-    ULtI32,
-    ULeI32,
-    UGtI32,
-    UGeI32,
-
-    EqF32,
-    NeF32,
-    LtF32,
-    LeF32,
-    GtF32,
-    GeF32,
-
-    EqF64,
-    NeF64,
-    LtF64,
-    LeF64,
-    GtF64,
-    GeF64,
-
-    // Heap accesses opcodes
-    SLoad8,
-    SLoad16,
-    SLoad32,
-    ULoad8,
-    ULoad16,
-    ULoad32,
-    Store8,
-    Store16,
-    Store32,
-
-    // Atomics opcodes
-    AtomicsCompareExchange,
-    AtomicsExchange,
-    AtomicsLoad,
-    AtomicsStore,
-    AtomicsBinOp,
-
-    // SIMD opcodes
-    I32X4SignMask,
-    F32X4SignMask,
-
-    I32X4ExtractLane,
-
-    // Specific to AsmJS
-    Id,
-
-    Bad
-};
-
-enum class F32 : uint8_t {
-    // Common opcodes
-    GetLocal,
-    SetLocal,
-    GetGlobal,
-    SetGlobal,
-
-    CallInternal,
-    CallIndirect,
-    CallImport,
-
-    Conditional,
-    Comma,
-
-    Literal,
-
-    // Binary arith opcodes
-    Add,
-    Sub,
-    Mul,
-    Div,
-    Min,
-    Max,
-    Neg,
-
-    // Math builtin opcodes
-    Abs,
-    Sqrt,
-    Ceil,
-    Floor,
-
-    // Conversion opcodes
-    FromF64,
-    FromS32,
-    FromU32,
-
-    // Heap accesses opcodes
-    Load,
-    StoreF32,
-    StoreF64,
-
-    // SIMD opcodes
-    F32X4ExtractLane,
-
-    // asm.js specific
-    Id,
-    Bad
-};
-
-enum class F64 : uint8_t {
-    // Common opcodes
-    GetLocal,
-    SetLocal,
-    GetGlobal,
-    SetGlobal,
-
-    CallInternal,
-    CallIndirect,
-    CallImport,
-
-    Conditional,
-    Comma,
-
-    Literal,
-
-    // Binary arith opcodes
-    Add,
-    Sub,
-    Mul,
-    Div,
-    Min,
-    Max,
-    Mod,
-    Neg,
-
-    // Math builtin opcodes
-    Abs,
-    Sqrt,
-    Ceil,
-    Floor,
-    Sin,
-    Cos,
-    Tan,
-    Asin,
-    Acos,
-    Atan,
-    Exp,
-    Log,
-    Pow,
-    Atan2,
-
-    // Conversions opcodes
-    FromF32,
-    FromS32,
-    FromU32,
-
-    // Heap accesses opcodes
-    Load,
-    StoreF32,
-    StoreF64,
-
-    // asm.js specific
-    Id,
-    Bad
-};
-
-enum class I32X4 : uint8_t {
-    // Common opcodes
-    GetLocal,
-    SetLocal,
-
-    GetGlobal,
-    SetGlobal,
-
-    CallInternal,
-    CallIndirect,
-    CallImport,
-
-    Conditional,
-    Comma,
-
-    Literal,
-
-    // Specific opcodes
-    Ctor,
-
-    Unary,
-
-    Binary,
-    BinaryCompI32X4,
-    BinaryCompF32X4,
-    BinaryBitwise,
-    BinaryShift,
-
-    ReplaceLane,
-
-    FromF32X4,
-    FromF32X4Bits,
-
-    Swizzle,
-    Shuffle,
-    Select,
-    BitSelect,
-    Splat,
-
-    Load,
-    Store,
-
-    // asm.js specific
-    Id,
-    Bad
-};
-
-enum class F32X4 : uint8_t {
-    // Common opcodes
-    GetLocal,
-    SetLocal,
-
-    GetGlobal,
-    SetGlobal,
-
-    CallInternal,
-    CallIndirect,
-    CallImport,
-
-    Conditional,
-    Comma,
-
-    Literal,
-
-    // Specific opcodes
-    Ctor,
-
-    Unary,
-
-    Binary,
-    BinaryBitwise,
-
-    ReplaceLane,
-
-    FromI32X4,
-    FromI32X4Bits,
-    Swizzle,
-    Shuffle,
-    Select,
-    BitSelect,
-    Splat,
-
-    Load,
-    Store,
-
-    // asm.js specific
-    Id,
-    Bad
-};
-
-#ifdef DEBUG
-bool AsmFunction::pcIsPatchable(size_t pc, unsigned size) const {
-    bool patchable = true;
-    for (unsigned i = 0; patchable && i < size; i++)
-        patchable &= Stmt(bytecode_[pc]) == Stmt::Bad;
-    return patchable;
-}
-#endif // DEBUG
-
 // Encapsulates the building of an asm bytecode function from an asm.js function
 // source code, packing the asm.js code into the asm bytecode form that can
 // be decoded and compiled with a FunctionCompiler.
 class FunctionValidator
 {
   public:
     struct Local
     {
@@ -3422,1228 +2142,16 @@ class FunctionValidator
         size_t ret = func_.writeU8(uint8_t(Stmt::Bad));
         for (size_t i = 1; i < sizeof(intptr_t); i++)
             func_.writeU8(uint8_t(Stmt::Bad));
         return ret;
     }
     /************************************************** End of build helpers */
 };
 
-static bool
-NoExceptionPending(ExclusiveContext* cx)
-{
-    return !cx->isJSContext() || !cx->asJSContext()->isExceptionPending();
-}
-
-typedef Vector<size_t, 1, SystemAllocPolicy> LabelVector;
-typedef Vector<MBasicBlock*, 8, SystemAllocPolicy> BlockVector;
-
-// Encapsulates the compilation of a single function in an asm.js module. The
-// function compiler handles the creation and final backend compilation of the
-// MIR graph. Also see ModuleCompiler comment.
-class FunctionCompiler
-{
-  private:
-    typedef HashMap<uint32_t, BlockVector, DefaultHasher<uint32_t>, SystemAllocPolicy> LabeledBlockMap;
-    typedef HashMap<size_t, BlockVector, DefaultHasher<uint32_t>, SystemAllocPolicy> UnlabeledBlockMap;
-    typedef Vector<size_t, 4, SystemAllocPolicy> PositionStack;
-    typedef Vector<Type, 4, SystemAllocPolicy> LocalVarTypes;
-
-    ModuleCompiler &         m_;
-    LifoAlloc &              lifo_;
-    RetType                  retType_;
-
-    const AsmFunction &      func_;
-    size_t                   pc_;
-
-    TempAllocator *          alloc_;
-    MIRGraph *               graph_;
-    CompileInfo *            info_;
-    MIRGenerator *           mirGen_;
-    Maybe<JitContext>        jitContext_;
-
-    MBasicBlock *            curBlock_;
-
-    PositionStack            loopStack_;
-    PositionStack            breakableStack_;
-    UnlabeledBlockMap        unlabeledBreaks_;
-    UnlabeledBlockMap        unlabeledContinues_;
-    LabeledBlockMap          labeledBreaks_;
-    LabeledBlockMap          labeledContinues_;
-
-    LocalVarTypes            localVarTypes_;
-
-  public:
-    FunctionCompiler(ModuleCompiler& m, const AsmFunction& func, LifoAlloc& lifo)
-      : m_(m),
-        lifo_(lifo),
-        retType_(func.returnedType()),
-        func_(func),
-        pc_(0),
-        alloc_(nullptr),
-        graph_(nullptr),
-        info_(nullptr),
-        mirGen_(nullptr),
-        curBlock_(nullptr)
-    {}
-
-    ModuleCompiler &        m() const            { return m_; }
-    TempAllocator &         alloc() const        { return *alloc_; }
-    LifoAlloc &             lifo() const         { return lifo_; }
-    RetType                 returnedType() const { return retType_; }
-
-    bool init()
-    {
-        return unlabeledBreaks_.init() &&
-               unlabeledContinues_.init() &&
-               labeledBreaks_.init() &&
-               labeledContinues_.init();
-    }
-
-    void checkPostconditions()
-    {
-        MOZ_ASSERT(loopStack_.empty());
-        MOZ_ASSERT(unlabeledBreaks_.empty());
-        MOZ_ASSERT(unlabeledContinues_.empty());
-        MOZ_ASSERT(labeledBreaks_.empty());
-        MOZ_ASSERT(labeledContinues_.empty());
-        MOZ_ASSERT(inDeadCode());
-        MOZ_ASSERT(pc_ == func_.size(), "all bytecode must be consumed");
-    }
-
-    /************************* Read-only interface (after local scope setup) */
-
-    MIRGenerator & mirGen() const     { MOZ_ASSERT(mirGen_); return *mirGen_; }
-    MIRGraph &     mirGraph() const   { MOZ_ASSERT(graph_); return *graph_; }
-    CompileInfo &  info() const       { MOZ_ASSERT(info_); return *info_; }
-
-    MDefinition* getLocalDef(unsigned slot)
-    {
-        if (inDeadCode())
-            return nullptr;
-        return curBlock_->getSlot(info().localSlot(slot));
-    }
-
-    /***************************** Code generation (after local scope setup) */
-
-    MDefinition* constant(const SimdConstant& v, MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-        MInstruction* constant;
-        constant = MSimdConstant::New(alloc(), v, type);
-        curBlock_->add(constant);
-        return constant;
-    }
-
-    MDefinition* constant(Value v, MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-        MConstant* constant = MConstant::NewAsmJS(alloc(), v, type);
-        curBlock_->add(constant);
-        return constant;
-    }
-
-    template <class T>
-    MDefinition* unary(MDefinition* op)
-    {
-        if (inDeadCode())
-            return nullptr;
-        T* ins = T::NewAsmJS(alloc(), op);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    template <class T>
-    MDefinition* unary(MDefinition* op, MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-        T* ins = T::NewAsmJS(alloc(), op, type);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    template <class T>
-    MDefinition* binary(MDefinition* lhs, MDefinition* rhs)
-    {
-        if (inDeadCode())
-            return nullptr;
-        T* ins = T::New(alloc(), lhs, rhs);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    template <class T>
-    MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-        T* ins = T::NewAsmJS(alloc(), lhs, rhs, type);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* unarySimd(MDefinition* input, MSimdUnaryArith::Operation op, MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(IsSimdType(input->type()) && input->type() == type);
-        MInstruction* ins = MSimdUnaryArith::NewAsmJS(alloc(), input, op, type);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, MSimdBinaryArith::Operation op,
-                            MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
-        MOZ_ASSERT(lhs->type() == type);
-        MSimdBinaryArith* ins = MSimdBinaryArith::NewAsmJS(alloc(), lhs, rhs, op, type);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, MSimdBinaryBitwise::Operation op,
-                            MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
-        MOZ_ASSERT(lhs->type() == type);
-        MSimdBinaryBitwise* ins = MSimdBinaryBitwise::NewAsmJS(alloc(), lhs, rhs, op, type);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    template<class T>
-    MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, typename T::Operation op)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        T* ins = T::NewAsmJS(alloc(), lhs, rhs, op);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* swizzleSimd(MDefinition* vector, int32_t X, int32_t Y, int32_t Z, int32_t W,
-                             MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MSimdSwizzle* ins = MSimdSwizzle::New(alloc(), vector, type, X, Y, Z, W);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* shuffleSimd(MDefinition* lhs, MDefinition* rhs, int32_t X, int32_t Y,
-                             int32_t Z, int32_t W, MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MInstruction* ins = MSimdShuffle::New(alloc(), lhs, rhs, type, X, Y, Z, W);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* insertElementSimd(MDefinition* vec, MDefinition* val, SimdLane lane, MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(IsSimdType(vec->type()) && vec->type() == type);
-        MOZ_ASSERT(!IsSimdType(val->type()));
-        MSimdInsertElement* ins = MSimdInsertElement::NewAsmJS(alloc(), vec, val, type, lane);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* selectSimd(MDefinition* mask, MDefinition* lhs, MDefinition* rhs, MIRType type,
-                            bool isElementWise)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(IsSimdType(mask->type()));
-        MOZ_ASSERT(mask->type() == MIRType_Int32x4);
-        MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
-        MOZ_ASSERT(lhs->type() == type);
-        MSimdSelect* ins = MSimdSelect::NewAsmJS(alloc(), mask, lhs, rhs, type, isElementWise);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    template<class T>
-    MDefinition* convertSimd(MDefinition* vec, MIRType from, MIRType to)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(IsSimdType(from) && IsSimdType(to) && from != to);
-        T* ins = T::NewAsmJS(alloc(), vec, from, to);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* splatSimd(MDefinition* v, MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(IsSimdType(type));
-        MSimdSplatX4* ins = MSimdSplatX4::NewAsmJS(alloc(), v, type);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type, bool isMax) {
-        if (inDeadCode())
-            return nullptr;
-        MMinMax* ins = MMinMax::New(alloc(), lhs, rhs, type, isMax);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type, MMul::Mode mode)
-    {
-        if (inDeadCode())
-            return nullptr;
-        MMul* ins = MMul::New(alloc(), lhs, rhs, type, mode);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type, bool unsignd)
-    {
-        if (inDeadCode())
-            return nullptr;
-        MDiv* ins = MDiv::NewAsmJS(alloc(), lhs, rhs, type, unsignd);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type, bool unsignd)
-    {
-        if (inDeadCode())
-            return nullptr;
-        MMod* ins = MMod::NewAsmJS(alloc(), lhs, rhs, type, unsignd);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    template <class T>
-    MDefinition* bitwise(MDefinition* lhs, MDefinition* rhs)
-    {
-        if (inDeadCode())
-            return nullptr;
-        T* ins = T::NewAsmJS(alloc(), lhs, rhs);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    template <class T>
-    MDefinition* bitwise(MDefinition* op)
-    {
-        if (inDeadCode())
-            return nullptr;
-        T* ins = T::NewAsmJS(alloc(), op);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op, MCompare::CompareType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-        MCompare* ins = MCompare::NewAsmJS(alloc(), lhs, rhs, op, type);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    void assign(unsigned slot, MDefinition* def)
-    {
-        if (inDeadCode())
-            return;
-        curBlock_->setSlot(info().localSlot(slot), def);
-    }
-
-    MDefinition* loadHeap(Scalar::Type accessType, MDefinition* ptr, NeedsBoundsCheck chk)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
-        MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD loads should use loadSimdHeap");
-        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck);
-        curBlock_->add(load);
-        return load;
-    }
-
-    MDefinition* loadSimdHeap(Scalar::Type accessType, MDefinition* ptr, NeedsBoundsCheck chk,
-                              unsigned numElems)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
-        MOZ_ASSERT(Scalar::isSimdType(accessType), "loadSimdHeap can only load from a SIMD view");
-        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck,
-                                                   numElems);
-        curBlock_->add(load);
-        return load;
-    }
-
-    void storeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v, NeedsBoundsCheck chk)
-    {
-        if (inDeadCode())
-            return;
-
-        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
-        MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD stores should use loadSimdHeap");
-        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck);
-        curBlock_->add(store);
-    }
-
-    void storeSimdHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v,
-                       NeedsBoundsCheck chk, unsigned numElems)
-    {
-        if (inDeadCode())
-            return;
-
-        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
-        MOZ_ASSERT(Scalar::isSimdType(accessType), "storeSimdHeap can only load from a SIMD view");
-        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck,
-                                                      numElems);
-        curBlock_->add(store);
-    }
-
-    void memoryBarrier(MemoryBarrierBits type)
-    {
-        if (inDeadCode())
-            return;
-        MMemoryBarrier* ins = MMemoryBarrier::New(alloc(), type);
-        curBlock_->add(ins);
-    }
-
-    MDefinition* atomicLoadHeap(Scalar::Type accessType, MDefinition* ptr, NeedsBoundsCheck chk)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
-        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck,
-                                                   /* numElems */ 0,
-                                                   MembarBeforeLoad, MembarAfterLoad);
-        curBlock_->add(load);
-        return load;
-    }
-
-    void atomicStoreHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v, NeedsBoundsCheck chk)
-    {
-        if (inDeadCode())
-            return;
-
-        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
-        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck,
-                                                      /* numElems = */ 0,
-                                                      MembarBeforeStore, MembarAfterStore);
-        curBlock_->add(store);
-    }
-
-    MDefinition* atomicCompareExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* oldv,
-                                           MDefinition* newv, NeedsBoundsCheck chk)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
-        MAsmJSCompareExchangeHeap* cas =
-            MAsmJSCompareExchangeHeap::New(alloc(), accessType, ptr, oldv, newv, needsBoundsCheck);
-        curBlock_->add(cas);
-        return cas;
-    }
-
-    MDefinition* atomicExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* value,
-                                    NeedsBoundsCheck chk)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
-        MAsmJSAtomicExchangeHeap* cas =
-            MAsmJSAtomicExchangeHeap::New(alloc(), accessType, ptr, value, needsBoundsCheck);
-        curBlock_->add(cas);
-        return cas;
-    }
-
-    MDefinition* atomicBinopHeap(js::jit::AtomicOp op, Scalar::Type accessType, MDefinition* ptr,
-                                 MDefinition* v, NeedsBoundsCheck chk)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
-        MAsmJSAtomicBinopHeap* binop =
-            MAsmJSAtomicBinopHeap::New(alloc(), op, accessType, ptr, v, needsBoundsCheck);
-        curBlock_->add(binop);
-        return binop;
-    }
-
-    MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst, MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-        MAsmJSLoadGlobalVar* load = MAsmJSLoadGlobalVar::New(alloc(), type, globalDataOffset,
-                                                             isConst);
-        curBlock_->add(load);
-        return load;
-    }
-
-    void storeGlobalVar(uint32_t globalDataOffset, MDefinition* v)
-    {
-        if (inDeadCode())
-            return;
-        curBlock_->add(MAsmJSStoreGlobalVar::New(alloc(), globalDataOffset, v));
-    }
-
-    void addInterruptCheck(unsigned lineno, unsigned column)
-    {
-        if (inDeadCode())
-            return;
-
-        CallSiteDesc callDesc(lineno, column, CallSiteDesc::Relative);
-        curBlock_->add(MAsmJSInterruptCheck::New(alloc(), &m().syncInterruptLabel(), callDesc));
-    }
-
-    MDefinition* extractSimdElement(SimdLane lane, MDefinition* base, MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(IsSimdType(base->type()));
-        MOZ_ASSERT(!IsSimdType(type));
-        MSimdExtractElement* ins = MSimdExtractElement::NewAsmJS(alloc(), base, type, lane);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    MDefinition* extractSignMask(MDefinition* base)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(IsSimdType(base->type()));
-        MSimdSignMask* ins = MSimdSignMask::NewAsmJS(alloc(), base);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    template<typename T>
-    MDefinition* constructSimd(MDefinition* x, MDefinition* y, MDefinition* z, MDefinition* w,
-                               MIRType type)
-    {
-        if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(IsSimdType(type));
-        T* ins = T::NewAsmJS(alloc(), type, x, y, z, w);
-        curBlock_->add(ins);
-        return ins;
-    }
-
-    /***************************************************************** Calls */
-
-    // The IonMonkey backend maintains a single stack offset (from the stack
-    // pointer to the base of the frame) by adding the total amount of spill
-    // space required plus the maximum stack required for argument passing.
-    // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
-    // manually accumulate, for the entire function, the maximum required stack
-    // space for argument passing. (This is passed to the CodeGenerator via
-    // MIRGenerator::maxAsmJSStackArgBytes.) Naively, this would just be the
-    // maximum of the stack space required for each individual call (as
-    // determined by the call ABI). However, as an optimization, arguments are
-    // stored to the stack immediately after evaluation (to decrease live
-    // ranges and reduce spilling). This introduces the complexity that,
-    // between evaluating an argument and making the call, another argument
-    // evaluation could perform a call that also needs to store to the stack.
-    // When this occurs childClobbers_ = true and the parent expression's
-    // arguments are stored above the maximum depth clobbered by a child
-    // expression.
-
-    class Call
-    {
-        uint32_t lineno_;
-        uint32_t column_;
-        ABIArgGenerator abi_;
-        uint32_t prevMaxStackBytes_;
-        uint32_t maxChildStackBytes_;
-        uint32_t spIncrement_;
-        MAsmJSCall::Args regArgs_;
-        Vector<MAsmJSPassStackArg*, 0, SystemAllocPolicy> stackArgs_;
-        bool childClobbers_;
-
-        friend class FunctionCompiler;
-
-      public:
-        Call(FunctionCompiler& f, uint32_t lineno, uint32_t column)
-          : lineno_(lineno),
-            column_(column),
-            prevMaxStackBytes_(0),
-            maxChildStackBytes_(0),
-            spIncrement_(0),
-            childClobbers_(false)
-        { }
-    };
-
-    void startCallArgs(Call* call)
-    {
-        if (inDeadCode())
-            return;
-        call->prevMaxStackBytes_ = mirGen().resetAsmJSMaxStackArgBytes();
-    }
-
-    bool passArg(MDefinition* argDef, MIRType mirType, Call* call)
-    {
-        if (inDeadCode())
-            return true;
-
-        uint32_t childStackBytes = mirGen().resetAsmJSMaxStackArgBytes();
-        call->maxChildStackBytes_ = Max(call->maxChildStackBytes_, childStackBytes);
-        if (childStackBytes > 0 && !call->stackArgs_.empty())
-            call->childClobbers_ = true;
-
-        ABIArg arg = call->abi_.next(mirType);
-        if (arg.kind() == ABIArg::Stack) {
-            MAsmJSPassStackArg* mir = MAsmJSPassStackArg::New(alloc(), arg.offsetFromArgBase(),
-                                                              argDef);
-            curBlock_->add(mir);
-            if (!call->stackArgs_.append(mir))
-                return false;
-        } else {
-            if (!call->regArgs_.append(MAsmJSCall::Arg(arg.reg(), argDef)))
-                return false;
-        }
-        return true;
-    }
-
-    void finishCallArgs(Call* call)
-    {
-        if (inDeadCode())
-            return;
-        uint32_t parentStackBytes = call->abi_.stackBytesConsumedSoFar();
-        uint32_t newStackBytes;
-        if (call->childClobbers_) {
-            call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, AsmJSStackAlignment);
-            for (unsigned i = 0; i < call->stackArgs_.length(); i++)
-                call->stackArgs_[i]->incrementOffset(call->spIncrement_);
-            newStackBytes = Max(call->prevMaxStackBytes_,
-                                call->spIncrement_ + parentStackBytes);
-        } else {
-            call->spIncrement_ = 0;
-            newStackBytes = Max(call->prevMaxStackBytes_,
-                                Max(call->maxChildStackBytes_, parentStackBytes));
-        }
-        mirGen_->setAsmJSMaxStackArgBytes(newStackBytes);
-    }
-
-  private:
-    bool callPrivate(MAsmJSCall::Callee callee, const Call& call, MIRType returnType, MDefinition** def)
-    {
-        if (inDeadCode()) {
-            *def = nullptr;
-            return true;
-        }
-
-        CallSiteDesc::Kind kind = CallSiteDesc::Kind(-1);  // initialize to silence GCC warning
-        switch (callee.which()) {
-          case MAsmJSCall::Callee::Internal: kind = CallSiteDesc::Relative; break;
-          case MAsmJSCall::Callee::Dynamic:  kind = CallSiteDesc::Register; break;
-          case MAsmJSCall::Callee::Builtin:  kind = CallSiteDesc::Register; break;
-        }
-
-        MAsmJSCall* ins = MAsmJSCall::New(alloc(), CallSiteDesc(call.lineno_, call.column_, kind),
-                                          callee, call.regArgs_, returnType, call.spIncrement_);
-        if (!ins)
-            return false;
-
-        curBlock_->add(ins);
-        *def = ins;
-        return true;
-    }
-
-  public:
-    bool internalCall(const Signature& sig, Label* entry, const Call& call, MDefinition** def)
-    {
-        MIRType returnType = sig.retType().toMIRType();
-        return callPrivate(MAsmJSCall::Callee(entry), call, returnType, def);
-    }
-
-    bool funcPtrCall(const Signature& sig, uint32_t maskLit, uint32_t globalDataOffset, MDefinition* index,
-                     const Call& call, MDefinition** def)
-    {
-        if (inDeadCode()) {
-            *def = nullptr;
-            return true;
-        }
-
-        MConstant* mask = MConstant::New(alloc(), Int32Value(maskLit));
-        curBlock_->add(mask);
-        MBitAnd* maskedIndex = MBitAnd::NewAsmJS(alloc(), index, mask);
-        curBlock_->add(maskedIndex);
-        MAsmJSLoadFuncPtr* ptrFun = MAsmJSLoadFuncPtr::New(alloc(), globalDataOffset, maskedIndex);
-        curBlock_->add(ptrFun);
-
-        MIRType returnType = sig.retType().toMIRType();
-        return callPrivate(MAsmJSCall::Callee(ptrFun), call, returnType, def);
-    }
-
-    bool ffiCall(unsigned globalDataOffset, const Call& call, MIRType returnType, MDefinition** def)
-    {
-        if (inDeadCode()) {
-            *def = nullptr;
-            return true;
-        }
-
-        MAsmJSLoadFFIFunc* ptrFun = MAsmJSLoadFFIFunc::New(alloc(), globalDataOffset);
-        curBlock_->add(ptrFun);
-
-        return callPrivate(MAsmJSCall::Callee(ptrFun), call, returnType, def);
-    }
-
-    bool builtinCall(AsmJSImmKind builtin, const Call& call, MIRType returnType, MDefinition** def)
-    {
-        return callPrivate(MAsmJSCall::Callee(builtin), call, returnType, def);
-    }
-
-    /*********************************************** Control flow generation */
-
-    inline bool inDeadCode() const {
-        return curBlock_ == nullptr;
-    }
-
-    void returnExpr(MDefinition* expr)
-    {
-        if (inDeadCode())
-            return;
-        MAsmJSReturn* ins = MAsmJSReturn::New(alloc(), expr);
-        curBlock_->end(ins);
-        curBlock_ = nullptr;
-    }
-
-    void returnVoid()
-    {
-        if (inDeadCode())
-            return;
-        MAsmJSVoidReturn* ins = MAsmJSVoidReturn::New(alloc());
-        curBlock_->end(ins);
-        curBlock_ = nullptr;
-    }
-
-    bool branchAndStartThen(MDefinition* cond, MBasicBlock** thenBlock, MBasicBlock** elseBlock)
-    {
-        if (inDeadCode())
-            return true;
-
-        bool hasThenBlock = *thenBlock != nullptr;
-        bool hasElseBlock = *elseBlock != nullptr;
-
-        if (!hasThenBlock && !newBlock(curBlock_, thenBlock))
-            return false;
-        if (!hasElseBlock && !newBlock(curBlock_, elseBlock))
-            return false;
-
-        curBlock_->end(MTest::New(alloc(), cond, *thenBlock, *elseBlock));
-
-        // Only add as a predecessor if newBlock hasn't been called (as it does it for us)
-        if (hasThenBlock && !(*thenBlock)->addPredecessor(alloc(), curBlock_))
-            return false;
-        if (hasElseBlock && !(*elseBlock)->addPredecessor(alloc(), curBlock_))
-            return false;
-
-        curBlock_ = *thenBlock;
-        mirGraph().moveBlockToEnd(curBlock_);
-        return true;
-    }
-
-    void assertCurrentBlockIs(MBasicBlock* block) {
-        if (inDeadCode())
-            return;
-        MOZ_ASSERT(curBlock_ == block);
-    }
-
-    bool appendThenBlock(BlockVector* thenBlocks)
-    {
-        if (inDeadCode())
-            return true;
-        return thenBlocks->append(curBlock_);
-    }
-
-    bool joinIf(const BlockVector& thenBlocks, MBasicBlock* joinBlock)
-    {
-        if (!joinBlock)
-            return true;
-        MOZ_ASSERT_IF(curBlock_, thenBlocks.back() == curBlock_);
-        for (size_t i = 0; i < thenBlocks.length(); i++) {
-            thenBlocks[i]->end(MGoto::New(alloc(), joinBlock));
-            if (!joinBlock->addPredecessor(alloc(), thenBlocks[i]))
-                return false;
-        }
-        curBlock_ = joinBlock;
-        mirGraph().moveBlockToEnd(curBlock_);
-        return true;
-    }
-
-    void switchToElse(MBasicBlock* elseBlock)
-    {
-        if (!elseBlock)
-            return;
-        curBlock_ = elseBlock;
-        mirGraph().moveBlockToEnd(curBlock_);
-    }
-
-    bool joinIfElse(const BlockVector& thenBlocks)
-    {
-        if (inDeadCode() && thenBlocks.empty())
-            return true;
-        MBasicBlock* pred = curBlock_ ? curBlock_ : thenBlocks[0];
-        MBasicBlock* join;
-        if (!newBlock(pred, &join))
-            return false;
-        if (curBlock_)
-            curBlock_->end(MGoto::New(alloc(), join));
-        for (size_t i = 0; i < thenBlocks.length(); i++) {
-            thenBlocks[i]->end(MGoto::New(alloc(), join));
-            if (pred == curBlock_ || i > 0) {
-                if (!join->addPredecessor(alloc(), thenBlocks[i]))
-                    return false;
-            }
-        }
-        curBlock_ = join;
-        return true;
-    }
-
-    void pushPhiInput(MDefinition* def)
-    {
-        if (inDeadCode())
-            return;
-        MOZ_ASSERT(curBlock_->stackDepth() == info().firstStackSlot());
-        curBlock_->push(def);
-    }
-
-    MDefinition* popPhiOutput()
-    {
-        if (inDeadCode())
-            return nullptr;
-        MOZ_ASSERT(curBlock_->stackDepth() == info().firstStackSlot() + 1);
-        return curBlock_->pop();
-    }
-
-    bool startPendingLoop(size_t pos, MBasicBlock** loopEntry)
-    {
-        if (!loopStack_.append(pos) || !breakableStack_.append(pos))
-            return false;
-        if (inDeadCode()) {
-            *loopEntry = nullptr;
-            return true;
-        }
-        MOZ_ASSERT(curBlock_->loopDepth() == loopStack_.length() - 1);
-        *loopEntry = MBasicBlock::NewAsmJS(mirGraph(), info(), curBlock_,
-                                           MBasicBlock::PENDING_LOOP_HEADER);
-        if (!*loopEntry)
-            return false;
-        mirGraph().addBlock(*loopEntry);
-        (*loopEntry)->setLoopDepth(loopStack_.length());
-        curBlock_->end(MGoto::New(alloc(), *loopEntry));
-        curBlock_ = *loopEntry;
-        return true;
-    }
-
-    bool branchAndStartLoopBody(MDefinition* cond, MBasicBlock** afterLoop)
-    {
-        if (inDeadCode()) {
-            *afterLoop = nullptr;
-            return true;
-        }
-        MOZ_ASSERT(curBlock_->loopDepth() > 0);
-        MBasicBlock* body;
-        if (!newBlock(curBlock_, &body))
-            return false;
-        if (cond->isConstant() && cond->toConstant()->valueToBoolean()) {
-            *afterLoop = nullptr;
-            curBlock_->end(MGoto::New(alloc(), body));
-        } else {
-            if (!newBlockWithDepth(curBlock_, curBlock_->loopDepth() - 1, afterLoop))
-                return false;
-            curBlock_->end(MTest::New(alloc(), cond, body, *afterLoop));
-        }
-        curBlock_ = body;
-        return true;
-    }
-
-  private:
-    size_t popLoop()
-    {
-        size_t pos = loopStack_.popCopy();
-        MOZ_ASSERT(!unlabeledContinues_.has(pos));
-        breakableStack_.popBack();
-        return pos;
-    }
-
-  public:
-    bool closeLoop(MBasicBlock* loopEntry, MBasicBlock* afterLoop)
-    {
-        size_t pos = popLoop();
-        if (!loopEntry) {
-            MOZ_ASSERT(!afterLoop);
-            MOZ_ASSERT(inDeadCode());
-            MOZ_ASSERT(!unlabeledBreaks_.has(pos));
-            return true;
-        }
-        MOZ_ASSERT(loopEntry->loopDepth() == loopStack_.length() + 1);
-        MOZ_ASSERT_IF(afterLoop, afterLoop->loopDepth() == loopStack_.length());
-        if (curBlock_) {
-            MOZ_ASSERT(curBlock_->loopDepth() == loopStack_.length() + 1);
-            curBlock_->end(MGoto::New(alloc(), loopEntry));
-            if (!loopEntry->setBackedgeAsmJS(curBlock_))
-                return false;
-        }
-        curBlock_ = afterLoop;
-        if (curBlock_)
-            mirGraph().moveBlockToEnd(curBlock_);
-        return bindUnlabeledBreaks(pos);
-    }
-
-    bool branchAndCloseDoWhileLoop(MDefinition* cond, MBasicBlock* loopEntry)
-    {
-        size_t pos = popLoop();
-        if (!loopEntry) {
-            MOZ_ASSERT(inDeadCode());
-            MOZ_ASSERT(!unlabeledBreaks_.has(pos));
-            return true;
-        }
-        MOZ_ASSERT(loopEntry->loopDepth() == loopStack_.length() + 1);
-        if (curBlock_) {
-            MOZ_ASSERT(curBlock_->loopDepth() == loopStack_.length() + 1);
-            if (cond->isConstant()) {
-                if (cond->toConstant()->valueToBoolean()) {
-                    curBlock_->end(MGoto::New(alloc(), loopEntry));
-                    if (!loopEntry->setBackedgeAsmJS(curBlock_))
-                        return false;
-                    curBlock_ = nullptr;
-                } else {
-                    MBasicBlock* afterLoop;
-                    if (!newBlock(curBlock_, &afterLoop))
-                        return false;
-                    curBlock_->end(MGoto::New(alloc(), afterLoop));
-                    curBlock_ = afterLoop;
-                }
-            } else {
-                MBasicBlock* afterLoop;
-                if (!newBlock(curBlock_, &afterLoop))
-                    return false;
-                curBlock_->end(MTest::New(alloc(), cond, loopEntry, afterLoop));
-                if (!loopEntry->setBackedgeAsmJS(curBlock_))
-                    return false;
-                curBlock_ = afterLoop;
-            }
-        }
-        return bindUnlabeledBreaks(pos);
-    }
-
-    bool bindContinues(size_t pos, const LabelVector* maybeLabels)
-    {
-        bool createdJoinBlock = false;
-        if (UnlabeledBlockMap::Ptr p = unlabeledContinues_.lookup(pos)) {
-            if (!bindBreaksOrContinues(&p->value(), &createdJoinBlock))
-                return false;
-            unlabeledContinues_.remove(p);
-        }
-        return bindLabeledBreaksOrContinues(maybeLabels, &labeledContinues_, &createdJoinBlock);
-    }
-
-    bool bindLabeledBreaks(const LabelVector* maybeLabels)
-    {
-        bool createdJoinBlock = false;
-        return bindLabeledBreaksOrContinues(maybeLabels, &labeledBreaks_, &createdJoinBlock);
-    }
-
-    bool addBreak(uint32_t* maybeLabelId) {
-        if (maybeLabelId)
-            return addBreakOrContinue(*maybeLabelId, &labeledBreaks_);
-        return addBreakOrContinue(breakableStack_.back(), &unlabeledBreaks_);
-    }
-
-    bool addContinue(uint32_t* maybeLabelId) {
-        if (maybeLabelId)
-            return addBreakOrContinue(*maybeLabelId, &labeledContinues_);
-        return addBreakOrContinue(loopStack_.back(), &unlabeledContinues_);
-    }
-
-    bool startSwitch(size_t pos, MDefinition* expr, int32_t low, int32_t high,
-                     MBasicBlock** switchBlock)
-    {
-        if (!breakableStack_.append(pos))
-            return false;
-        if (inDeadCode()) {
-            *switchBlock = nullptr;
-            return true;
-        }
-        curBlock_->end(MTableSwitch::New(alloc(), expr, low, high));
-        *switchBlock = curBlock_;
-        curBlock_ = nullptr;
-        return true;
-    }
-
-    bool startSwitchCase(MBasicBlock* switchBlock, MBasicBlock** next)
-    {
-        if (!switchBlock) {
-            *next = nullptr;
-            return true;
-        }
-        if (!newBlock(switchBlock, next))
-            return false;
-        if (curBlock_) {
-            curBlock_->end(MGoto::New(alloc(), *next));
-            if (!(*next)->addPredecessor(alloc(), curBlock_))
-                return false;
-        }
-        curBlock_ = *next;
-        return true;
-    }
-
-    bool startSwitchDefault(MBasicBlock* switchBlock, BlockVector* cases, MBasicBlock** defaultBlock)
-    {
-        if (!startSwitchCase(switchBlock, defaultBlock))
-            return false;
-        if (!*defaultBlock)
-            return true;
-        mirGraph().moveBlockToEnd(*defaultBlock);
-        return true;
-    }
-
-    bool joinSwitch(MBasicBlock* switchBlock, const BlockVector& cases, MBasicBlock* defaultBlock)
-    {
-        size_t pos = breakableStack_.popCopy();
-        if (!switchBlock)
-            return true;
-        MTableSwitch* mir = switchBlock->lastIns()->toTableSwitch();
-        size_t defaultIndex = mir->addDefault(defaultBlock);
-        for (unsigned i = 0; i < cases.length(); i++) {
-            if (!cases[i])
-                mir->addCase(defaultIndex);
-            else
-                mir->addCase(mir->addSuccessor(cases[i]));
-        }
-        if (curBlock_) {
-            MBasicBlock* next;
-            if (!newBlock(curBlock_, &next))
-                return false;
-            curBlock_->end(MGoto::New(alloc(), next));
-            curBlock_ = next;
-        }
-        return bindUnlabeledBreaks(pos);
-    }
-
-    /************************************************************ DECODING ***/
-
-    uint8_t  readU8()              { return func_.readU8(&pc_); }
-    uint32_t readU32()             { return func_.readU32(&pc_); }
-    int32_t  readI32()             { return func_.readI32(&pc_); }
-    float    readF32()             { return func_.readF32(&pc_); }
-    double   readF64()             { return func_.readF64(&pc_); }
-    LifoSignature* readSignature() { return func_.readSignature(&pc_); }
-    SimdConstant readI32X4()       { return func_.readI32X4(&pc_); }
-    SimdConstant readF32X4()       { return func_.readF32X4(&pc_); }
-
-    Stmt readStmtOp()              { return Stmt(readU8()); }
-
-    void assertDebugCheckPoint() {
-#ifdef DEBUG
-        MOZ_ASSERT(Stmt(readU8()) == Stmt::DebugCheckPoint);
-#endif
-    }
-
-    bool done() const { return pc_ == func_.size(); }
-    size_t pc() const { return pc_; }
-
-    bool prepareEmitMIR(const VarTypeVector& argTypes)
-    {
-        const AsmFunction::VarInitializerVector& varInitializers = func_.varInitializers();
-        size_t numLocals = func_.numLocals();
-
-        // Prepare data structures
-        alloc_  = lifo_.new_<TempAllocator>(&lifo_);
-        if (!alloc_)
-            return false;
-        jitContext_.emplace(m().runtime(), /* CompileCompartment = */ nullptr, alloc_);
-        graph_  = lifo_.new_<MIRGraph>(alloc_);
-        if (!graph_)
-            return false;
-        MOZ_ASSERT(numLocals == argTypes.length() + varInitializers.length());
-        info_   = lifo_.new_<CompileInfo>(numLocals);
-        if (!info_)
-            return false;
-        const OptimizationInfo* optimizationInfo = js_IonOptimizations.get(Optimization_AsmJS);
-        const JitCompileOptions options;
-        mirGen_ = lifo_.new_<MIRGenerator>(m().compartment(),
-                                           options, alloc_,
-                                           graph_, info_, optimizationInfo,
-                                           &m().onOutOfBoundsLabel(),
-                                           &m().onConversionErrorLabel(),
-                                           m().usesSignalHandlersForOOB());
-        if (!mirGen_)
-            return false;
-
-        if (!newBlock(/* pred = */ nullptr, &curBlock_))
-            return false;
-
-        // Emit parameters and local variables
-        for (ABIArgTypeIter i(argTypes); !i.done(); i++) {
-            MAsmJSParameter* ins = MAsmJSParameter::New(alloc(), *i, i.mirType());
-            curBlock_->add(ins);
-            curBlock_->initSlot(info().localSlot(i.index()), ins);
-            if (!mirGen_->ensureBallast())
-                return false;
-            localVarTypes_.append(argTypes[i.index()].toType());
-        }
-
-        unsigned firstLocalSlot = argTypes.length();
-        for (unsigned i = 0; i < varInitializers.length(); i++) {
-            const AsmJSNumLit& lit = varInitializers[i];
-            Type type = Type::Of(lit);
-            MIRType mirType = type.toMIRType();
-
-            MInstruction* ins;
-            if (lit.isSimd())
-               ins = MSimdConstant::New(alloc(), lit.simdValue(), mirType);
-            else
-               ins = MConstant::NewAsmJS(alloc(), lit.scalarValue(), mirType);
-
-            curBlock_->add(ins);
-            curBlock_->initSlot(info().localSlot(firstLocalSlot + i), ins);
-            if (!mirGen_->ensureBallast())
-                return false;
-            localVarTypes_.append(type);
-        }
-
-        return true;
-    }
-
-    /*************************************************************************/
-
-    MIRGenerator* extractMIR()
-    {
-        MOZ_ASSERT(mirGen_ != nullptr);
-        MIRGenerator* mirGen = mirGen_;
-        mirGen_ = nullptr;
-        return mirGen;
-    }
-
-    /*************************************************************************/
-  private:
-    bool newBlockWithDepth(MBasicBlock* pred, unsigned loopDepth, MBasicBlock** block)
-    {
-        *block = MBasicBlock::NewAsmJS(mirGraph(), info(), pred, MBasicBlock::NORMAL);
-        if (!*block)
-            return false;
-        mirGraph().addBlock(*block);
-        (*block)->setLoopDepth(loopDepth);
-        return true;
-    }
-
-    bool newBlock(MBasicBlock* pred, MBasicBlock** block)
-    {
-        return newBlockWithDepth(pred, loopStack_.length(), block);
-    }
-
-    bool bindBreaksOrContinues(BlockVector* preds, bool* createdJoinBlock)
-    {
-        for (unsigned i = 0; i < preds->length(); i++) {
-            MBasicBlock* pred = (*preds)[i];
-            if (*createdJoinBlock) {
-                pred->end(MGoto::New(alloc(), curBlock_));
-                if (!curBlock_->addPredecessor(alloc(), pred))
-                    return false;
-            } else {
-                MBasicBlock* next;
-                if (!newBlock(pred, &next))
-                    return false;
-                pred->end(MGoto::New(alloc(), next));
-                if (curBlock_) {
-                    curBlock_->end(MGoto::New(alloc(), next));
-                    if (!next->addPredecessor(alloc(), curBlock_))
-                        return false;
-                }
-                curBlock_ = next;
-                *createdJoinBlock = true;
-            }
-            MOZ_ASSERT(curBlock_->begin() == curBlock_->end());
-            if (!mirGen_->ensureBallast())
-                return false;
-        }
-        preds->clear();
-        return true;
-    }
-
-    bool bindLabeledBreaksOrContinues(const LabelVector* maybeLabels, LabeledBlockMap* map,
-                                      bool* createdJoinBlock)
-    {
-        if (!maybeLabels)
-            return true;
-        const LabelVector& labels = *maybeLabels;
-        for (unsigned i = 0; i < labels.length(); i++) {
-            if (LabeledBlockMap::Ptr p = map->lookup(labels[i])) {
-                if (!bindBreaksOrContinues(&p->value(), createdJoinBlock))
-                    return false;
-                map->remove(p);
-            }
-            if (!mirGen_->ensureBallast())
-                return false;
-        }
-        return true;
-    }
-
-    template <class Key, class Map>
-    bool addBreakOrContinue(Key key, Map* map)
-    {
-        if (inDeadCode())
-            return true;
-        typename Map::AddPtr p = map->lookupForAdd(key);
-        if (!p) {
-            BlockVector empty;
-            if (!map->add(p, key, Move(empty)))
-                return false;
-        }
-        if (!p->value().append(curBlock_))
-            return false;
-        curBlock_ = nullptr;
-        return true;
-    }
-
-    bool bindUnlabeledBreaks(size_t pos)
-    {
-        bool createdJoinBlock = false;
-        if (UnlabeledBlockMap::Ptr p = unlabeledBreaks_.lookup(pos)) {
-            if (!bindBreaksOrContinues(&p->value(), &createdJoinBlock))
-                return false;
-            unlabeledBreaks_.remove(p);
-        }
-        return true;
-    }
-};
-
 } /* anonymous namespace */
 
 /*****************************************************************************/
 // asm.js type-checking and code-generation algorithm
 
 static bool
 CheckIdentifier(ModuleValidator& m, ParseNode* usepn, PropertyName* name)
 {
@@ -5341,49 +2849,16 @@ CheckNumericLiteral(FunctionValidator& f
     if (!literal.hasType())
         return f.fail(num, "numeric literal out of representable integer range");
     f.writeLit(literal);
     *type = Type::Of(literal);
     return true;
 }
 
 static bool
-EmitLiteral(FunctionCompiler& f, AsmType type, MDefinition**def)
-{
-    switch (type) {
-      case AsmType::Int32: {
-        int32_t val = f.readI32();
-        *def = f.constant(Int32Value(val), MIRType_Int32);
-        return true;
-      }
-      case AsmType::Float32: {
-        float val = f.readF32();
-        *def = f.constant(Float32Value(val), MIRType_Float32);
-        return true;
-      }
-      case AsmType::Float64: {
-        double val = f.readF64();
-        *def = f.constant(DoubleValue(val), MIRType_Double);
-        return true;
-      }
-      case AsmType::Int32x4: {
-        SimdConstant lit(f.readI32X4());
-        *def = f.constant(lit, MIRType_Int32x4);
-        return true;
-      }
-      case AsmType::Float32x4: {
-        SimdConstant lit(f.readF32X4());
-        *def = f.constant(lit, MIRType_Float32x4);
-        return true;
-      }
-    }
-    MOZ_CRASH("unexpected literal type");
-}
-
-static bool
 CheckVarRef(FunctionValidator& f, ParseNode* varRef, Type* type)
 {
     PropertyName* name = varRef->name();
 
     if (const FunctionValidator::Local* local = f.lookupLocal(name)) {
         switch (local->type.which()) {
           case VarType::Int:       f.writeOp(I32::GetLocal);   break;
           case VarType::Double:    f.writeOp(F64::GetLocal);   break;
@@ -5437,34 +2912,16 @@ CheckVarRef(FunctionValidator& f, ParseN
             return f.failName(varRef, "'%s' may not be accessed by ordinary expressions", name);
         }
         return true;
     }
 
     return f.failName(varRef, "'%s' not found in local or asm.js module scope", name);
 }
 
-static bool
-EmitGetLoc(FunctionCompiler& f, const DebugOnly<MIRType>& type, MDefinition** def)
-{
-    uint32_t slot = f.readU32();
-    *def = f.getLocalDef(slot);
-    MOZ_ASSERT_IF(*def, (*def)->type() == type);
-    return true;
-}
-
-static bool
-EmitGetGlo(FunctionCompiler& f, MIRType type, MDefinition** def)
-{
-    uint32_t globalDataOffset = f.readU32();
-    bool isConst = bool(f.readU8());
-    *def = f.loadGlobalVar(globalDataOffset, isConst, type);
-    return true;
-}
-
 static inline bool
 IsLiteralOrConstInt(FunctionValidator& f, ParseNode* pn, uint32_t* u32)
 {
     AsmJSNumLit lit;
     if (!IsLiteralOrConst(f, pn, &lit))
         return false;
 
     return IsLiteralInt(lit, u32);
@@ -5643,34 +3100,16 @@ CheckLoadArray(FunctionValidator& f, Par
     }
 
     f.patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
 
     *type = TypedArrayLoadType(viewType);
     return true;
 }
 
-static bool EmitI32Expr(FunctionCompiler& f, MDefinition** def);
-static bool EmitF32Expr(FunctionCompiler& f, MDefinition** def);
-static bool EmitF64Expr(FunctionCompiler& f, MDefinition** def);
-static bool EmitI32X4Expr(FunctionCompiler& f, MDefinition** def);
-static bool EmitF32X4Expr(FunctionCompiler& f, MDefinition** def);
-static bool EmitExpr(FunctionCompiler& f, AsmType type, MDefinition** def);
-
-static bool
-EmitLoadArray(FunctionCompiler& f, Scalar::Type scalarType, MDefinition** def)
-{
-    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
-    MDefinition* ptr;
-    if (!EmitI32Expr(f, &ptr))
-        return false;
-    *def = f.loadHeap(scalarType, ptr, needsBoundsCheck);
-    return true;
-}
-
 static bool
 CheckDotAccess(FunctionValidator& f, ParseNode* elem, Type* type)
 {
     MOZ_ASSERT(elem->isKind(PNK_DOT));
 
     size_t opcodeAt = f.tempOp();
 
     ParseNode* base = DotBase(elem);
@@ -5693,26 +3132,16 @@ CheckDotAccess(FunctionValidator& f, Par
       case AsmJSSimdType_int32x4:   f.patchOp(opcodeAt, I32::I32X4SignMask); break;
       case AsmJSSimdType_float32x4: f.patchOp(opcodeAt, I32::F32X4SignMask); break;
     }
 
     return true;
 }
 
 static bool
-EmitSignMask(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MDefinition* in;
-    if (!EmitExpr(f, type, &in))
-        return false;
-    *def = f.extractSignMask(in);
-    return true;
-}
-
-static bool
 CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type)
 {
     size_t opcodeAt = f.tempOp();
     size_t needsBoundsCheckAt = f.tempU8();
 
     Scalar::Type viewType;
     NeedsBoundsCheck needsBoundsCheck;
     int32_t mask;
@@ -5779,77 +3208,16 @@ CheckStoreArray(FunctionValidator& f, Pa
 
     f.patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
 
     *type = rhsType;
     return true;
 }
 
 static bool
-EmitStore(FunctionCompiler& f, Scalar::Type viewType, MDefinition** def)
-{
-    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
-
-    MDefinition* ptr;
-    if (!EmitI32Expr(f, &ptr))
-        return false;
-
-    MDefinition* rhs = nullptr;
-    switch (viewType) {
-      case Scalar::Int8:
-      case Scalar::Int16:
-      case Scalar::Int32:
-        if (!EmitI32Expr(f, &rhs))
-            return false;
-        break;
-      case Scalar::Float32:
-        if (!EmitF32Expr(f, &rhs))
-            return false;
-        break;
-      case Scalar::Float64:
-        if (!EmitF64Expr(f, &rhs))
-            return false;
-        break;
-      default: MOZ_CRASH("unexpected scalar type");
-    }
-
-    f.storeHeap(viewType, ptr, rhs, needsBoundsCheck);
-    *def = rhs;
-    return true;
-}
-
-static bool
-EmitStoreWithCoercion(FunctionCompiler& f, Scalar::Type rhsType, Scalar::Type viewType,
-                      MDefinition **def)
-{
-    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
-    MDefinition* ptr;
-    if (!EmitI32Expr(f, &ptr))
-        return false;
-
-    MDefinition* rhs = nullptr;
-    MDefinition* coerced = nullptr;
-    if (rhsType == Scalar::Float32 && viewType == Scalar::Float64) {
-        if (!EmitF32Expr(f, &rhs))
-            return false;
-        coerced = f.unary<MToDouble>(rhs);
-    } else if (rhsType == Scalar::Float64 && viewType == Scalar::Float32) {
-        if (!EmitF64Expr(f, &rhs))
-            return false;
-        coerced = f.unary<MToFloat32>(rhs);
-    } else {
-        MOZ_CRASH("unexpected coerced store");
-    }
-
-    f.storeHeap(viewType, ptr, coerced, needsBoundsCheck);
-    *def = rhs;
-    return true;
-}
-
-static bool
 CheckAssignName(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type)
 {
     RootedPropertyName name(f.cx(), lhs->name());
 
     size_t opcodeAt = f.tempOp();
     size_t indexAt = f.temp32();
 
     Type rhsType;
@@ -5902,40 +3270,16 @@ CheckAssignName(FunctionValidator& f, Pa
         *type = rhsType;
         return true;
     }
 
     return f.failName(lhs, "'%s' not found in local or asm.js module scope", name);
 }
 
 static bool
-EmitSetLoc(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    uint32_t slot = f.readU32();
-    MDefinition* expr;
-    if (!EmitExpr(f, type, &expr))
-        return false;
-    f.assign(slot, expr);
-    *def = expr;
-    return true;
-}
-
-static bool
-EmitSetGlo(FunctionCompiler& f, AsmType type, MDefinition**def)
-{
-    uint32_t globalDataOffset = f.readU32();
-    MDefinition* expr;
-    if (!EmitExpr(f, type, &expr))
-        return false;
-    f.storeGlobalVar(globalDataOffset, expr);
-    *def = expr;
-    return true;
-}
-
-static bool
 CheckAssign(FunctionValidator& f, ParseNode* assign, Type* type)
 {
     MOZ_ASSERT(assign->isKind(PNK_ASSIGN));
 
     ParseNode* lhs = BinaryLeft(assign);
     ParseNode* rhs = BinaryRight(assign);
 
     if (lhs->getKind() == PNK_ELEM)
@@ -6101,50 +3445,16 @@ CheckMathMinMax(FunctionValidator& f, Pa
             return false;
         if (!(nextType <= firstType))
             return f.failf(nextArg, "%s is not a subtype of %s", nextType.toChars(), firstType.toChars());
     }
 
     return true;
 }
 
-static MIRType
-MIRTypeFromAsmType(AsmType type)
-{
-    switch(type) {
-      case AsmType::Int32:     return MIRType_Int32;
-      case AsmType::Float32:   return MIRType_Float32;
-      case AsmType::Float64:   return MIRType_Double;
-      case AsmType::Int32x4:   return MIRType_Int32x4;
-      case AsmType::Float32x4: return MIRType_Float32x4;
-    }
-    MOZ_CRASH("unexpected type in binary arith");
-}
-
-typedef bool IsMax;
-
-static bool
-EmitMathMinMax(FunctionCompiler& f, AsmType type, bool isMax, MDefinition** def)
-{
-    size_t numArgs = f.readU8();
-    MOZ_ASSERT(numArgs >= 2);
-    MDefinition* lastDef;
-    if (!EmitExpr(f, type, &lastDef))
-        return false;
-    MIRType mirType = MIRTypeFromAsmType(type);
-    for (size_t i = 1; i < numArgs; i++) {
-        MDefinition* next;
-        if (!EmitExpr(f, type, &next))
-            return false;
-        lastDef = f.minMax(lastDef, next, mirType, isMax);
-    }
-    *def = lastDef;
-    return true;
-}
-
 static bool
 CheckSharedArrayAtomicAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
                              Scalar::Type* viewType, NeedsBoundsCheck* needsBoundsCheck,
                              int32_t* mask)
 {
     if (!CheckAndPrepareArrayAccess(f, viewName, indexExpr, viewType, needsBoundsCheck, mask))
         return false;
 
@@ -6203,28 +3513,16 @@ CheckAtomicsLoad(FunctionValidator& f, P
     f.patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
     f.patchU8(viewTypeAt, uint8_t(viewType));
 
     *type = Type::Int;
     return true;
 }
 
 static bool
-EmitAtomicsLoad(FunctionCompiler& f, MDefinition** def)
-{
-    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
-    Scalar::Type viewType = Scalar::Type(f.readU8());
-    MDefinition* index;
-    if (!EmitI32Expr(f, &index))
-        return false;
-    *def = f.atomicLoadHeap(viewType, index, needsBoundsCheck);
-    return true;
-}
-
-static bool
 CheckAtomicsStore(FunctionValidator& f, ParseNode* call, Type* type)
 {
     if (CallArgListLength(call) != 3)
         return f.fail(call, "Atomics.store must be passed 3 arguments");
 
     ParseNode* arrayArg = CallArgList(call);
     ParseNode* indexArg = NextNode(arrayArg);
     ParseNode* valueArg = NextNode(indexArg);
@@ -6249,32 +3547,16 @@ CheckAtomicsStore(FunctionValidator& f, 
     f.patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
     f.patchU8(viewTypeAt, uint8_t(viewType));
 
     *type = rhsType;
     return true;
 }
 
 static bool
-EmitAtomicsStore(FunctionCompiler& f, MDefinition** def)
-{
-    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
-    Scalar::Type viewType = Scalar::Type(f.readU8());
-    MDefinition* index;
-    if (!EmitI32Expr(f, &index))
-        return false;
-    MDefinition* value;
-    if (!EmitI32Expr(f, &value))
-        return false;
-    f.atomicStoreHeap(viewType, index, value, needsBoundsCheck);
-    *def = value;
-    return true;
-}
-
-static bool
 CheckAtomicsBinop(FunctionValidator& f, ParseNode* call, Type* type, js::jit::AtomicOp op)
 {
     if (CallArgListLength(call) != 3)
         return f.fail(call, "Atomics binary operator must be passed 3 arguments");
 
     ParseNode* arrayArg = CallArgList(call);
     ParseNode* indexArg = NextNode(arrayArg);
     ParseNode* valueArg = NextNode(indexArg);
@@ -6300,32 +3582,16 @@ CheckAtomicsBinop(FunctionValidator& f, 
     f.patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
     f.patchU8(viewTypeAt, uint8_t(viewType));
 
     *type = Type::Int;
     return true;
 }
 
 static bool
-EmitAtomicsBinOp(FunctionCompiler& f, MDefinition** def)
-{
-    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
-    Scalar::Type viewType = Scalar::Type(f.readU8());
-    js::jit::AtomicOp op = js::jit::AtomicOp(f.readU8());
-    MDefinition* index;
-    if (!EmitI32Expr(f, &index))
-        return false;
-    MDefinition* value;
-    if (!EmitI32Expr(f, &value))
-        return false;
-    *def = f.atomicBinopHeap(op, viewType, index, value, needsBoundsCheck);
-    return true;
-}
-
-static bool
 CheckAtomicsIsLockFree(FunctionValidator& f, ParseNode* call, Type* type)
 {
     if (CallArgListLength(call) != 1)
         return f.fail(call, "Atomics.isLockFree must be passed 1 argument");
 
     ParseNode* sizeArg = CallArgList(call);
 
     uint32_t size;
@@ -6375,34 +3641,16 @@ CheckAtomicsCompareExchange(FunctionVali
     f.patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
     f.patchU8(viewTypeAt, uint8_t(viewType));
 
     *type = Type::Int;
     return true;
 }
 
 static bool
-EmitAtomicsCompareExchange(FunctionCompiler& f, MDefinition** def)
-{
-    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
-    Scalar::Type viewType = Scalar::Type(f.readU8());
-    MDefinition* index;
-    if (!EmitI32Expr(f, &index))
-        return false;
-    MDefinition* oldValue;
-    if (!EmitI32Expr(f, &oldValue))
-        return false;
-    MDefinition* newValue;
-    if (!EmitI32Expr(f, &newValue))
-        return false;
-    *def = f.atomicCompareExchangeHeap(viewType, index, oldValue, newValue, needsBoundsCheck);
-    return true;
-}
-
-static bool
 CheckAtomicsExchange(FunctionValidator& f, ParseNode* call, Type* type)
 {
     if (CallArgListLength(call) != 3)
         return f.fail(call, "Atomics.exchange must be passed 3 arguments");
 
     ParseNode* arrayArg = CallArgList(call);
     ParseNode* indexArg = NextNode(arrayArg);
     ParseNode* valueArg = NextNode(indexArg);
@@ -6427,31 +3675,16 @@ CheckAtomicsExchange(FunctionValidator& 
     f.patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
     f.patchU8(viewTypeAt, uint8_t(viewType));
 
     *type = Type::Int;
     return true;
 }
 
 static bool
-EmitAtomicsExchange(FunctionCompiler& f, MDefinition** def)
-{
-    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
-    Scalar::Type viewType = Scalar::Type(f.readU8());
-    MDefinition* index;
-    if (!EmitI32Expr(f, &index))
-        return false;
-    MDefinition* value;
-    if (!EmitI32Expr(f, &value))
-        return false;
-    *def = f.atomicExchangeHeap(viewType, index, value, needsBoundsCheck);
-    return true;
-}
-
-static bool
 CheckAtomicsBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSAtomicsBuiltinFunction func,
                         Type* resultType)
 {
     switch (func) {
       case AsmJSAtomicsBuiltin_compareExchange:
         return CheckAtomicsCompareExchange(f, callNode, resultType);
       case AsmJSAtomicsBuiltin_exchange:
         return CheckAtomicsExchange(f, callNode, resultType);
@@ -6494,37 +3727,16 @@ CheckCallArgs(FunctionValidator& f, Pars
 
         if (!signature.appendArg(VarType::FromCheckedType(type)))
             return false;
     }
     return true;
 }
 
 static bool
-EmitCallArgs(FunctionCompiler& f, const Signature& sig, FunctionCompiler::Call* call)
-{
-    f.startCallArgs(call);
-    for (unsigned i = 0; i < sig.args().length(); i++) {
-        MDefinition *arg = nullptr;
-        switch (sig.arg(i).which()) {
-          case VarType::Int:       if (!EmitI32Expr(f, &arg))   return false; break;
-          case VarType::Float:     if (!EmitF32Expr(f, &arg))   return false; break;
-          case VarType::Double:    if (!EmitF64Expr(f, &arg))   return false; break;
-          case VarType::Int32x4:   if (!EmitI32X4Expr(f, &arg)) return false; break;
-          case VarType::Float32x4: if (!EmitF32X4Expr(f, &arg)) return false; break;
-          default: MOZ_CRASH("unexpected vartype");
-        }
-        if (!f.passArg(arg, sig.arg(i).toMIRType(), call))
-            return false;
-    }
-    f.finishCallArgs(call);
-    return true;
-}
-
-static bool
 CheckSignatureAgainstExisting(ModuleValidator& m, ParseNode* usepn, const Signature& sig,
                               const Signature& existing)
 {
     if (sig.args().length() != existing.args().length()) {
         return m.failf(usepn, "incompatible number of arguments (%u here vs. %u before)",
                        sig.args().length(), existing.args().length());
     }
 
@@ -6574,23 +3786,16 @@ static void
 WriteCallLineCol(FunctionValidator& f, ParseNode* pn)
 {
     uint32_t line, column;
     f.m().tokenStream().srcCoords.lineNumAndColumnIndex(pn->pn_pos.begin, &line, &column);
     f.writeU32(line);
     f.writeU32(column);
 }
 
-static void
-ReadCallLineCol(FunctionCompiler& f, uint32_t* line, uint32_t* column)
-{
-    *line = f.readU32();
-    *column = f.readU32();
-}
-
 static bool
 CheckInternalCall(FunctionValidator& f, ParseNode* callNode, PropertyName* calleeName,
                   RetType retType, Type* type)
 {
     if (!f.canCall()) {
         return f.fail(callNode, "call expressions may not be nested inside heap expressions "
                                 "when the module contains a change-heap function");
     }
@@ -6621,39 +3826,16 @@ CheckInternalCall(FunctionValidator& f, 
 
     f.patch32(funcIndexAt, callee->funcIndex());
     f.patchSignature(signatureAt, &callee->sig());
     *type = retType.toType();
     return true;
 }
 
 static bool
-EmitInternalCall(FunctionCompiler& f, RetType retType, MDefinition** def)
-{
-    uint32_t funcIndex = f.readU32();
-
-    Label* entry;
-    if (!f.m().getOrCreateFunctionEntry(funcIndex, &entry))
-        return false;
-
-    const Signature& sig = *f.readSignature();
-
-    MOZ_ASSERT_IF(sig.retType() != RetType::Void, sig.retType() == retType);
-
-    uint32_t lineno, column;
-    ReadCallLineCol(f, &lineno, &column);
-
-    FunctionCompiler::Call call(f, lineno, column);
-    if (!EmitCallArgs(f, sig, &call))
-        return false;
-
-    return f.internalCall(sig, entry, call, def);
-}
-
-static bool
 CheckFuncPtrTableAgainstExisting(ModuleValidator& m, ParseNode* usepn,
                                  PropertyName* name, Signature&& sig, unsigned mask,
                                  ModuleValidator::FuncPtrTable** tableOut)
 {
     if (const ModuleValidator::Global* existing = m.lookupGlobal(name)) {
         if (existing->which() != ModuleValidator::Global::FuncPtrTable)
             return m.failName(usepn, "'%s' is not a function-pointer table", name);
 
@@ -6742,39 +3924,16 @@ CheckFuncPtrCall(FunctionValidator& f, P
     f.patch32(globalDataOffsetAt, table->globalDataOffset());
     f.patchSignature(signatureAt, &table->sig());
 
     *type = retType.toType();
     return true;
 }
 
 static bool
-EmitFuncPtrCall(FunctionCompiler& f, RetType retType, MDefinition** def)
-{
-    uint32_t mask = f.readU32();
-    uint32_t globalDataOffset = f.readU32();
-
-    const Signature& sig = *f.readSignature();
-    MOZ_ASSERT_IF(sig.retType() != RetType::Void, sig.retType() == retType);
-
-    uint32_t lineno, column;
-    ReadCallLineCol(f, &lineno, &column);
-
-    MDefinition *index;
-    if (!EmitI32Expr(f, &index))
-        return false;
-
-    FunctionCompiler::Call call(f, lineno, column);
-    if (!EmitCallArgs(f, sig, &call))
-        return false;
-
-    return f.funcPtrCall(sig, mask, globalDataOffset, index, call, def);
-}
-
-static bool
 CheckIsExternType(FunctionValidator& f, ParseNode* argNode, Type type)
 {
     if (!type.isExtern())
         return f.failf(argNode, "%s is not a subtype of extern", type.toChars());
     return true;
 }
 
 static bool
@@ -6823,34 +3982,16 @@ CheckFFICall(FunctionValidator& f, Parse
     JS_STATIC_ASSERT(offsetof(AsmJSModule::ExitDatum, exit) == 0);
     f.patch32(offsetAt, f.module().exitIndexToGlobalDataOffset(exitIndex));
     f.patchSignature(sigAt, lifoSig);
     *type = retType.toType();
     return true;
 }
 
 static bool
-EmitFFICall(FunctionCompiler& f, RetType retType, MDefinition** def)
-{
-    unsigned globalDataOffset = f.readI32();
-
-    const Signature& sig = *f.readSignature();
-    MOZ_ASSERT_IF(sig.retType() != RetType::Void, sig.retType() == retType);
-
-    uint32_t lineno, column;
-    ReadCallLineCol(f, &lineno, &column);
-
-    FunctionCompiler::Call call(f, lineno, column);
-    if (!EmitCallArgs(f, sig, &call))
-        return false;
-
-    return f.ffiCall(globalDataOffset, call, retType.toMIRType(), def);
-}
-
-static bool
 CheckFloatCoercionArg(FunctionValidator& f, ParseNode* inputNode, Type inputType,
                       size_t opcodeAt)
 {
     if (inputType.isMaybeDouble()) {
         f.patchOp(opcodeAt, F32::FromF64);
         return true;
     }
     if (inputType.isSigned()) {
@@ -6992,78 +4133,16 @@ CheckMathBuiltinCall(FunctionValidator& 
         if (firstType.isMaybeFloat() && !secondType.isMaybeFloat())
             return f.fail(argNode, "both arguments to math builtin call should be the same type");
     }
 
     *type = opIsDouble ? Type::Double : Type::Floatish;
     return true;
 }
 
-static bool
-EmitMathBuiltinCall(FunctionCompiler& f, F32 f32, MDefinition** def)
-{
-    MOZ_ASSERT(f32 == F32::Ceil || f32 == F32::Floor);
-
-    uint32_t lineno, column;
-    ReadCallLineCol(f, &lineno, &column);
-
-    FunctionCompiler::Call call(f, lineno, column);
-    f.startCallArgs(&call);
-
-    MDefinition* firstArg;
-    if (!EmitF32Expr(f, &firstArg) || !f.passArg(firstArg, MIRType_Float32, &call))
-        return false;
-
-    f.finishCallArgs(&call);
-
-    AsmJSImmKind callee = f32 == F32::Ceil ? AsmJSImm_CeilF : AsmJSImm_FloorF;
-    return f.builtinCall(callee, call, MIRType_Float32, def);
-}
-
-static bool
-EmitMathBuiltinCall(FunctionCompiler& f, F64 f64, MDefinition** def)
-{
-    uint32_t lineno, column;
-    ReadCallLineCol(f, &lineno, &column);
-
-    FunctionCompiler::Call call(f, lineno, column);
-    f.startCallArgs(&call);
-
-    MDefinition* firstArg;
-    if (!EmitF64Expr(f, &firstArg) || !f.passArg(firstArg, MIRType_Double, &call))
-        return false;
-
-    if (f64 == F64::Pow || f64 == F64::Atan2) {
-        MDefinition* secondArg;
-        if (!EmitF64Expr(f, &secondArg) || !f.passArg(secondArg, MIRType_Double, &call))
-            return false;
-    }
-
-    AsmJSImmKind callee;
-    switch (f64) {
-      case F64::Ceil:  callee = AsmJSImm_CeilD; break;
-      case F64::Floor: callee = AsmJSImm_FloorD; break;
-      case F64::Sin:   callee = AsmJSImm_SinD; break;
-      case F64::Cos:   callee = AsmJSImm_CosD; break;
-      case F64::Tan:   callee = AsmJSImm_TanD; break;
-      case F64::Asin:  callee = AsmJSImm_ASinD; break;
-      case F64::Acos:  callee = AsmJSImm_ACosD; break;
-      case F64::Atan:  callee = AsmJSImm_ATanD; break;
-      case F64::Exp:   callee = AsmJSImm_ExpD; break;
-      case F64::Log:   callee = AsmJSImm_LogD; break;
-      case F64::Pow:   callee = AsmJSImm_PowD; break;
-      case F64::Atan2: callee = AsmJSImm_ATan2D; break;
-      default: MOZ_CRASH("unexpected double math builtin callee");
-    }
-
-    f.finishCallArgs(&call);
-
-    return f.builtinCall(callee, call, MIRType_Double, def);
-}
-
 namespace {
 // Include CheckSimdCallArgs in unnamed namespace to avoid MSVC name lookup bug.
 
 template<class CheckArgOp>
 static bool
 CheckSimdCallArgs(FunctionValidator& f, ParseNode* call, unsigned expectedArity,
                   const CheckArgOp& checkArg)
 {
@@ -7323,27 +4402,16 @@ CheckSimdUnary(FunctionValidator& f, Par
     SwitchPackOp(f, opType, I32X4::Unary, F32X4::Unary);
     f.writeU8(uint8_t(op));
     if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType)))
         return false;
     *type = opType;
     return true;
 }
 
-static bool
-EmitSimdUnary(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MSimdUnaryArith::Operation op = MSimdUnaryArith::Operation(f.readU8());
-    MDefinition* in;
-    if (!EmitExpr(f, type, &in))
-        return false;
-    *def = f.unarySimd(in, op, MIRTypeFromAsmType(type));
-    return true;
-}
-
 template<class OpKind>
 inline bool
 CheckSimdBinaryGuts(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, OpKind op,
                     Type* type)
 {
     f.writeU8(uint8_t(op));
     if (!CheckSimdCallArgs(f, call, 2, CheckArgIsSubtypeOf(opType)))
         return false;
@@ -7354,212 +4422,77 @@ CheckSimdBinaryGuts(FunctionValidator& f
 static bool
 CheckSimdBinary(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType,
                 MSimdBinaryArith::Operation op, Type* type)
 {
     SwitchPackOp(f, opType, I32X4::Binary, F32X4::Binary);
     return CheckSimdBinaryGuts(f, call, opType, op, type);
 }
 
-template<class OpKind>
-inline bool
-EmitBinarySimdGuts(FunctionCompiler& f, AsmType type, OpKind op, MDefinition** def)
-{
-    MDefinition* lhs;
-    if (!EmitExpr(f, type, &lhs))
-        return false;
-    MDefinition* rhs;
-    if (!EmitExpr(f, type, &rhs))
-        return false;
-    *def = f.binarySimd(lhs, rhs, op, MIRTypeFromAsmType(type));
-    return true;
-}
-
-static bool
-EmitSimdBinaryArith(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MSimdBinaryArith::Operation op = MSimdBinaryArith::Operation(f.readU8());
-    return EmitBinarySimdGuts(f, type, op, def);
-}
-
 static bool
 CheckSimdBinary(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType,
                 MSimdBinaryBitwise::Operation op, Type* type)
 {
     SwitchPackOp(f, opType, I32X4::BinaryBitwise, F32X4::BinaryBitwise);
     return CheckSimdBinaryGuts(f, call, opType, op, type);
 }
 
 static bool
-EmitSimdBinaryBitwise(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MSimdBinaryBitwise::Operation op = MSimdBinaryBitwise::Operation(f.readU8());
-    return EmitBinarySimdGuts(f, type, op, def);
-}
-
-static bool
 CheckSimdBinary(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType,
                 MSimdBinaryComp::Operation op, Type* type)
 {
     switch (opType) {
       case AsmJSSimdType_int32x4:   f.writeOp(I32X4::BinaryCompI32X4); break;
       case AsmJSSimdType_float32x4: f.writeOp(I32X4::BinaryCompF32X4); break;
     }
     f.writeU8(uint8_t(op));
     if (!CheckSimdCallArgs(f, call, 2, CheckArgIsSubtypeOf(opType)))
         return false;
     *type = Type::Int32x4;
     return true;
 }
 
 static bool
-EmitSimdBinaryComp(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MSimdBinaryComp::Operation op = MSimdBinaryComp::Operation(f.readU8());
-    MDefinition* lhs;
-    if (!EmitExpr(f, type, &lhs))
-        return false;
-    MDefinition* rhs;
-    if (!EmitExpr(f, type, &rhs))
-        return false;
-    *def = f.binarySimd<MSimdBinaryComp>(lhs, rhs, op);
-    return true;
-}
-
-static bool
 CheckSimdBinary(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType,
                 MSimdShift::Operation op, Type* type)
 {
     f.writeOp(I32X4::BinaryShift);
     f.writeU8(uint8_t(op));
     if (!CheckSimdCallArgs(f, call, 2, CheckSimdVectorScalarArgs(opType)))
         return false;
     *type = Type::Int32x4;
     return true;
 }
 
 static bool
-EmitSimdBinaryShift(FunctionCompiler& f, MDefinition** def)
-{
-    MSimdShift::Operation op = MSimdShift::Operation(f.readU8());
-    MDefinition* lhs;
-    if (!EmitI32X4Expr(f, &lhs))
-        return false;
-    MDefinition* rhs;
-    if (!EmitI32Expr(f, &rhs))
-        return false;
-    *def = f.binarySimd<MSimdShift>(lhs, rhs, op);
-    return true;
-}
-
-static bool
 CheckSimdExtractLane(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Type* type)
 {
     switch (opType) {
       case AsmJSSimdType_int32x4:
         f.writeOp(I32::I32X4ExtractLane);
         *type = Type::Signed;
         break;
       case AsmJSSimdType_float32x4:
         f.writeOp(F32::F32X4ExtractLane);
         *type = Type::Float;
         break;
     }
     return CheckSimdCallArgs(f, call, 2, CheckSimdExtractLaneArgs(opType));
 }
 
-static MIRType
-ScalarMIRTypeFromSimdAsmType(AsmType type)
-{
-    switch (type) {
-      case AsmType::Int32:
-      case AsmType::Float32:
-      case AsmType::Float64:   break;
-      case AsmType::Int32x4:   return MIRType_Int32;
-      case AsmType::Float32x4: return MIRType_Float32;
-    }
-    MOZ_CRASH("unexpected simd type");
-}
-
-static bool
-EmitExtractLane(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MDefinition* vec;
-    if (!EmitExpr(f, type, &vec))
-        return false;
-
-    MDefinition* laneDef;
-    if (!EmitI32Expr(f, &laneDef))
-        return false;
-
-    if (!laneDef) {
-        *def = nullptr;
-        return true;
-    }
-
-    MOZ_ASSERT(laneDef->isConstant());
-    int32_t laneLit = laneDef->toConstant()->value().toInt32();
-    MOZ_ASSERT(laneLit < 4);
-    SimdLane lane = SimdLane(laneLit);
-
-    *def = f.extractSimdElement(lane, vec, ScalarMIRTypeFromSimdAsmType(type));
-    return true;
-}
-
 static bool
 CheckSimdReplaceLane(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Type* type)
 {
     SwitchPackOp(f, opType, I32X4::ReplaceLane, F32X4::ReplaceLane);
     if (!CheckSimdCallArgsPatchable(f, call, 3, CheckSimdReplaceLaneArgs(opType)))
         return false;
     *type = opType;
     return true;
 }
 
-static AsmType
-AsmSimdTypeToScalarType(AsmType simd)
-{
-    switch (simd) {
-      case AsmType::Int32x4:   return AsmType::Int32;
-      case AsmType::Float32x4: return AsmType::Float32;
-      case AsmType::Int32:
-      case AsmType::Float32:
-      case AsmType::Float64:    break;
-    }
-    MOZ_CRASH("unexpected simd type");
-}
-
-static bool
-EmitSimdReplaceLane(FunctionCompiler& f, AsmType simdType, MDefinition** def)
-{
-    MDefinition* vector;
-    if (!EmitExpr(f, simdType, &vector))
-        return false;
-
-    MDefinition* laneDef;
-    if (!EmitI32Expr(f, &laneDef))
-        return false;
-
-    SimdLane lane;
-    if (laneDef) {
-        MOZ_ASSERT(laneDef->isConstant());
-        int32_t laneLit = laneDef->toConstant()->value().toInt32();
-        MOZ_ASSERT(laneLit < 4);
-        lane = SimdLane(laneLit);
-    } else {
-        lane = SimdLane(-1);
-    }
-
-    MDefinition* scalar;
-    if (!EmitExpr(f, AsmSimdTypeToScalarType(simdType), &scalar))
-        return false;
-    *def = f.insertElementSimd(vector, scalar, lane, MIRTypeFromAsmType(simdType));
-    return true;
-}
-
 typedef bool IsBitCast;
 
 namespace {
 // Include CheckSimdCast in unnamed namespace to avoid MSVC name lookup bug (due to the use of Type).
 
 static bool
 CheckSimdCast(FunctionValidator& f, ParseNode* call, AsmJSSimdType fromType, AsmJSSimdType toType,
               bool bitcast, Type* type)
@@ -7570,27 +4503,16 @@ CheckSimdCast(FunctionValidator& f, Pars
     if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(fromType)))
         return false;
     *type = toType;
     return true;
 }
 
 } // namespace
 
-template<class T>
-inline bool
-EmitSimdCast(FunctionCompiler& f, AsmType fromType, AsmType toType, MDefinition** def)
-{
-    MDefinition* in;
-    if (!EmitExpr(f, fromType, &in))
-        return false;
-    *def = f.convertSimd<T>(in, MIRTypeFromAsmType(fromType), MIRTypeFromAsmType(toType));
-    return true;
-}
-
 static bool
 CheckSimdShuffleSelectors(FunctionValidator& f, ParseNode* lane, int32_t lanes[4], uint32_t maxLane)
 {
     for (unsigned i = 0; i < 4; i++, lane = NextNode(lane)) {
         uint32_t u32;
         if (!IsLiteralInt(f.m(), lane, &u32))
             return f.failf(lane, "lane selector should be a constant integer literal");
         if (u32 >= maxLane)
@@ -7624,31 +4546,16 @@ CheckSimdSwizzle(FunctionValidator& f, P
     for (unsigned i = 0; i < 4; i++)
         f.writeU8(uint8_t(lanes[i]));
 
     *type = retType;
     return true;
 }
 
 static bool
-EmitSimdSwizzle(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MDefinition* in;
-    if (!EmitExpr(f, type, &in))
-        return false;
-
-    uint8_t lanes[4];
-    for (unsigned i = 0; i < 4; i++)
-        lanes[i] = f.readU8();
-
-    *def = f.swizzleSimd(in, lanes[0], lanes[1], lanes[2], lanes[3], MIRTypeFromAsmType(type));
-    return true;
-}
-
-static bool
 CheckSimdShuffle(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Type* type)
 {
     unsigned numArgs = CallArgListLength(call);
     if (numArgs != 6)
         return f.failf(call, "expected 6 arguments to SIMD shuffle, got %u", numArgs);
 
     SwitchPackOp(f, opType, I32X4::Shuffle, F32X4::Shuffle);
 
@@ -7669,36 +4576,16 @@ CheckSimdShuffle(FunctionValidator& f, P
     for (unsigned i = 0; i < 4; i++)
         f.writeU8(uint8_t(lanes[i]));
 
     *type = retType;
     return true;
 }
 
 static bool
-EmitSimdShuffle(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MDefinition* lhs;
-    if (!EmitExpr(f, type, &lhs))
-        return false;
-
-    MDefinition* rhs;
-    if (!EmitExpr(f, type, &rhs))
-        return false;
-
-    uint8_t lanes[4];
-    for (unsigned i = 0; i < 4; i++)
-        lanes[i] = f.readU8();
-
-    *def = f.shuffleSimd(lhs, rhs, lanes[0], lanes[1], lanes[2], lanes[3],
-                         MIRTypeFromAsmType(type));
-    return true;
-}
-
-static bool
 CheckSimdLoadStoreArgs(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType,
                        Scalar::Type* viewType, NeedsBoundsCheck* needsBoundsCheck)
 {
     ParseNode* view = CallArgList(call);
     if (!view->isKind(PNK_NAME))
         return f.fail(view, "expected Uint8Array view as SIMD.*.load/store first argument");
 
     const ModuleValidator::Global* global = f.lookupGlobal(view->name());
@@ -7767,31 +4654,16 @@ CheckSimdLoad(FunctionValidator& f, Pars
     f.patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
     f.patchU8(viewTypeAt, uint8_t(viewType));
 
     *type = opType;
     return true;
 }
 
 static bool
-EmitSimdLoad(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    Scalar::Type viewType = Scalar::Type(f.readU8());
-    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
-    uint8_t numElems = f.readU8();
-
-    MDefinition* index;
-    if (!EmitI32Expr(f, &index))
-        return false;
-
-    *def = f.loadSimdHeap(viewType, index, needsBoundsCheck, numElems);
-    return true;
-}
-
-static bool
 CheckSimdStore(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType,
                unsigned numElems, Type* type)
 {
     unsigned numArgs = CallArgListLength(call);
     if (numArgs != 3)
         return f.failf(call, "expected 3 arguments to SIMD store, got %u", numArgs);
 
     SwitchPackOp(f, opType, I32X4::Store, F32X4::Store);
@@ -7815,60 +4687,28 @@ CheckSimdStore(FunctionValidator& f, Par
     f.patchU8(needsBoundsCheckAt, uint8_t(needsBoundsCheck));
     f.patchU8(viewTypeAt, uint8_t(viewType));
 
     *type = vecType;
     return true;
 }
 
 static bool
-EmitSimdStore(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    Scalar::Type viewType = Scalar::Type(f.readU8());
-    NeedsBoundsCheck needsBoundsCheck = NeedsBoundsCheck(f.readU8());
-    uint8_t numElems = f.readU8();
-
-    MDefinition* index;
-    if (!EmitI32Expr(f, &index))
-        return false;
-
-    MDefinition* vec;
-    if (!EmitExpr(f, type, &vec))
-        return false;
-
-    f.storeSimdHeap(viewType, index, vec, needsBoundsCheck, numElems);
-    *def = vec;
-    return true;
-}
-
-static bool
 CheckSimdSelect(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, bool isElementWise,
                 Type* type)
 {
     SwitchPackOp(f, opType,
                  isElementWise ? I32X4::Select : I32X4::BitSelect,
                  isElementWise ? F32X4::Select : F32X4::BitSelect);
     if (!CheckSimdCallArgs(f, call, 3, CheckSimdSelectArgs(opType)))
         return false;
     *type = opType;
     return true;
 }
 
-typedef bool IsElementWise;
-
-static bool
-EmitSimdSelect(FunctionCompiler& f, AsmType type, bool isElementWise, MDefinition** def)
-{
-    MDefinition* defs[3];
-    if (!EmitI32X4Expr(f, &defs[0]) || !EmitExpr(f, type, &defs[1]) || !EmitExpr(f, type, &defs[2]))
-        return false;
-    *def = f.selectSimd(defs[0], defs[1], defs[2], MIRTypeFromAsmType(type), isElementWise);
-    return true;
-}
-
 static bool
 CheckSimdCheck(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Type* type)
 {
     AsmJSCoercion coercion;
     ParseNode* argNode;
     if (!IsCoercionCall(f.m(), call, &coercion, &argNode))
         return f.failf(call, "expected 1 argument in call to check");
     return CheckCoercionArg(f, argNode, coercion, type);
@@ -7880,26 +4720,16 @@ CheckSimdSplat(FunctionValidator& f, Par
     SwitchPackOp(f, opType, I32X4::Splat, F32X4::Splat);
     if (!CheckSimdCallArgsPatchable(f, call, 1, CheckSimdScalarArgs(opType)))
         return false;
     *type = opType;
     return true;
 }
 
 static bool
-EmitSimdSplat(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MDefinition* in;
-    if (!EmitExpr(f, AsmSimdTypeToScalarType(type), &in))
-        return false;
-    *def = f.splatSimd(in, MIRTypeFromAsmType(type));
-    return true;
-}
-
-static bool
 CheckSimdOperationCall(FunctionValidator& f, ParseNode* call, const ModuleValidator::Global* global,
                        Type* type)
 {
     MOZ_ASSERT(global->isSimdOperation());
 
     AsmJSSimdType opType = global->simdOperationType();
 
     switch (global->simdOperation()) {
@@ -8013,46 +4843,16 @@ CheckSimdCtorCall(FunctionValidator& f, 
     if (!CheckSimdCallArgsPatchable(f, call, length, CheckSimdScalarArgs(simdType)))
         return false;
 
     *type = simdType;
     return true;
 }
 
 static bool
-EmitSimdCtor(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    switch (type) {
-      case AsmType::Int32x4: {
-        MDefinition* args[4];
-        for (unsigned i = 0; i < 4; i++) {
-            if (!EmitI32Expr(f, &args[i]))
-                return false;
-        }
-        *def = f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3], MIRType_Int32x4);
-        return true;
-      }
-      case AsmType::Float32x4: {
-        MDefinition* args[4];
-        for (unsigned i = 0; i < 4; i++) {
-            if (!EmitF32Expr(f, &args[i]))
-                return false;
-        }
-        *def = f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3], MIRType_Float32x4);
-        return true;
-      }
-      case AsmType::Int32:
-      case AsmType::Float32:
-      case AsmType::Float64:
-        break;
-    }
-    MOZ_CRASH("unexpected SIMD type");
-}
-
-static bool
 CheckUncoercedCall(FunctionValidator& f, ParseNode* expr, Type* type)
 {
     MOZ_ASSERT(expr->isKind(PNK_CALL));
 
     const ModuleValidator::Global* global;
     if (IsCallToGlobal(f.m(), expr, &global)) {
         if (global->isMathFunction())
             return CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), type);
@@ -8310,38 +5110,16 @@ CheckNeg(FunctionValidator& f, ParseNode
         f.patchOp(opcodeAt, F32::Neg);
         *type = Type::Floatish;
         return true;
     }
 
     return f.failf(operand, "%s is not a subtype of int, float? or double?", operandType.toChars());
 }
 
-template<class T>
-static bool
-EmitUnary(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MDefinition* in;
-    if (!EmitExpr(f, type, &in))
-        return false;
-    *def = f.unary<T>(in);
-    return true;
-}
-
-template<class T>
-static bool
-EmitUnaryMir(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MDefinition* in;
-    if (!EmitExpr(f, type, &in))
-        return false;
-    *def = f.unary<T>(in, MIRTypeFromAsmType(type));
-    return true;
-}
-
 static bool
 CheckCoerceToInt(FunctionValidator& f, ParseNode* expr, Type* type)
 {
     MOZ_ASSERT(expr->isKind(PNK_BITNOT));
     ParseNode* operand = UnaryKid(expr);
 
     size_t opcodeAt = f.tempOp();
 
@@ -8417,29 +5195,16 @@ CheckComma(FunctionValidator& f, ParseNo
     else if (type->isFloat32x4())
         f.patchOp(commaAt, F32X4::Comma);
     else
         MOZ_CRASH("unexpected or unimplemented expression statement");
 
     return true;
 }
 
-static bool EmitStatement(FunctionCompiler& f, LabelVector* maybeLabels = nullptr);
-
-static bool
-EmitComma(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    uint32_t numExpr = f.readU32();
-    for (uint32_t i = 1; i < numExpr; i++) {
-        if (!EmitStatement(f))
-            return false;
-    }
-    return EmitExpr(f, type, def);
-}
-
 static bool
 CheckConditional(FunctionValidator& f, ParseNode* ternary, Type* type)
 {
     MOZ_ASSERT(ternary->isKind(PNK_CONDITIONAL));
 
     size_t opcodeAt = f.tempOp();
 
     ParseNode* cond = TernaryKid1(ternary);
@@ -8481,53 +5246,16 @@ CheckConditional(FunctionValidator& f, P
                        "double or SIMD types, current types are %s and %s",
                        thenType.toChars(), elseType.toChars());
     }
 
     return true;
 }
 
 static bool
-EmitConditional(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MDefinition* cond;
-    if (!EmitI32Expr(f, &cond))
-        return false;
-
-    MBasicBlock* thenBlock = nullptr;
-    MBasicBlock* elseBlock = nullptr;
-    if (!f.branchAndStartThen(cond, &thenBlock, &elseBlock))
-        return false;
-
-    MDefinition* ifTrue;
-    if (!EmitExpr(f, type, &ifTrue))
-        return false;
-
-    BlockVector thenBlocks;
-    if (!f.appendThenBlock(&thenBlocks))
-        return false;
-
-    f.pushPhiInput(ifTrue);
-
-    f.switchToElse(elseBlock);
-
-    MDefinition* ifFalse;
-    if (!EmitExpr(f, type, &ifFalse))
-        return false;
-
-    f.pushPhiInput(ifFalse);
-
-    if (!f.joinIfElse(thenBlocks))
-        return false;
-
-    *def = f.popPhiOutput();
-    return true;
-}
-
-static bool
 IsValidIntMultiplyConstant(ModuleValidator& m, ParseNode* expr)
 {
     if (!IsNumericLiteral(m, expr))
         return false;
 
     AsmJSNumLit literal = ExtractNumericLiteral(m, expr);
     switch (literal.which()) {
       case AsmJSNumLit::Fixnum:
@@ -8583,30 +5311,16 @@ CheckMultiply(FunctionValidator& f, Pars
         *type = Type::Floatish;
         return true;
     }
 
     return f.fail(star, "multiply operands must be both int, both double? or both float?");
 }
 
 static bool
-EmitMultiply(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    MDefinition* lhs;
-    if (!EmitExpr(f, type, &lhs))
-        return false;
-    MDefinition* rhs;
-    if (!EmitExpr(f, type, &rhs))
-        return false;
-    MIRType mirType = MIRTypeFromAsmType(type);
-    *def = f.mul(lhs, rhs, mirType, type == AsmType::Int32 ? MMul::Integer : MMul::Normal);
-    return true;
-}
-
-static bool
 CheckAddOrSub(FunctionValidator& f, ParseNode* expr, Type* type, unsigned* numAddOrSubOut = nullptr)
 {
     JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed());
 
     MOZ_ASSERT(expr->isKind(PNK_ADD) || expr->isKind(PNK_SUB));
     ParseNode* lhs = AddSubLeft(expr);
     ParseNode* rhs = AddSubRight(expr);
 
@@ -8655,32 +5369,16 @@ CheckAddOrSub(FunctionValidator& f, Pars
                        lhsType.toChars(), rhsType.toChars());
     }
 
     if (numAddOrSubOut)
         *numAddOrSubOut = numAddOrSub;
     return true;
 }
 
-typedef bool IsAdd;
-
-static bool
-EmitAddOrSub(FunctionCompiler& f, AsmType type, bool isAdd, MDefinition** def)
-{
-    MDefinition* lhs;
-    if (!EmitExpr(f, type, &lhs))
-        return false;
-    MDefinition* rhs;
-    if (!EmitExpr(f, type, &rhs))
-        return false;
-    MIRType mirType = MIRTypeFromAsmType(type);
-    *def = isAdd ? f.binary<MAdd>(lhs, rhs, mirType) : f.binary<MSub>(lhs, rhs, mirType);
-    return true;
-}
-
 static bool
 CheckDivOrMod(FunctionValidator& f, ParseNode* expr, Type* type)
 {
     MOZ_ASSERT(expr->isKind(PNK_DIV) || expr->isKind(PNK_MOD));
 
     size_t opcodeAt = f.tempOp();
 
     ParseNode* lhs = DivOrModLeft(expr);
@@ -8718,41 +5416,16 @@ CheckDivOrMod(FunctionValidator& f, Pars
         *type = Type::Intish;
         return true;
     }
 
     return f.failf(expr, "arguments to / or %% must both be double?, float?, signed, or unsigned; "
                    "%s and %s are given", lhsType.toChars(), rhsType.toChars());
 }
 
-typedef bool IsUnsigned;
-typedef bool IsDiv;
-
-static bool
-EmitDivOrMod(FunctionCompiler& f, AsmType type, bool isDiv, bool isUnsigned, MDefinition** def)
-{
-    MDefinition* lhs;
-    if (!EmitExpr(f, type, &lhs))
-        return false;
-    MDefinition* rhs;
-    if (!EmitExpr(f, type, &rhs))
-        return false;
-    *def = isDiv
-           ? f.div(lhs, rhs, MIRTypeFromAsmType(type), isUnsigned)
-           : f.mod(lhs, rhs, MIRTypeFromAsmType(type), isUnsigned);
-    return true;
-}
-
-static bool
-EmitDivOrMod(FunctionCompiler& f, AsmType type, bool isDiv, MDefinition** def)
-{
-    MOZ_ASSERT(type != AsmType::Int32, "int div or mod must precise signedness");
-    return EmitDivOrMod(f, type, isDiv, false, def);
-}
-
 static bool
 CheckComparison(FunctionValidator& f, ParseNode* comp, Type* type)
 {
     MOZ_ASSERT(comp->isKind(PNK_LT) || comp->isKind(PNK_LE) || comp->isKind(PNK_GT) ||
                comp->isKind(PNK_GE) || comp->isKind(PNK_EQ) || comp->isKind(PNK_NE));
 
     size_t opcodeAt = f.tempOp();
 
@@ -8820,106 +5493,16 @@ CheckComparison(FunctionValidator& f, Pa
     }
 
     f.patchOp(opcodeAt, stmt);
     *type = Type::Int;
     return true;
 }
 
 static bool
-EmitComparison(FunctionCompiler& f, I32 stmt, MDefinition** def)
-{
-    MDefinition *lhs, *rhs;
-    MCompare::CompareType compareType;
-    switch (stmt) {
-      case I32::EqI32:
-      case I32::NeI32:
-      case I32::SLeI32:
-      case I32::SLtI32:
-      case I32::ULeI32:
-      case I32::ULtI32:
-      case I32::SGeI32:
-      case I32::SGtI32:
-      case I32::UGeI32:
-      case I32::UGtI32:
-        if (!EmitI32Expr(f, &lhs) || !EmitI32Expr(f, &rhs))
-            return false;
-        // The list of opcodes is sorted such that all signed comparisons
-        // stand before ULtI32.
-        compareType = stmt < I32::ULtI32
-                      ? MCompare::Compare_Int32
-                      : MCompare::Compare_UInt32;
-        break;
-      case I32::EqF32:
-      case I32::NeF32:
-      case I32::LeF32:
-      case I32::LtF32:
-      case I32::GeF32:
-      case I32::GtF32:
-        if (!EmitF32Expr(f, &lhs) || !EmitF32Expr(f, &rhs))
-            return false;
-        compareType = MCompare::Compare_Float32;
-        break;
-      case I32::EqF64:
-      case I32::NeF64:
-      case I32::LeF64:
-      case I32::LtF64:
-      case I32::GeF64:
-      case I32::GtF64:
-        if (!EmitF64Expr(f, &lhs) || !EmitF64Expr(f, &rhs))
-            return false;
-        compareType = MCompare::Compare_Double;
-        break;
-      default: MOZ_CRASH("unexpected comparison opcode");
-    }
-
-    JSOp compareOp;
-    switch (stmt) {
-      case I32::EqI32:
-      case I32::EqF32:
-      case I32::EqF64:
-        compareOp = JSOP_EQ;
-        break;
-      case I32::NeI32:
-      case I32::NeF32:
-      case I32::NeF64:
-        compareOp = JSOP_NE;
-        break;
-      case I32::SLeI32:
-      case I32::ULeI32:
-      case I32::LeF32:
-      case I32::LeF64:
-        compareOp = JSOP_LE;
-        break;
-      case I32::SLtI32:
-      case I32::ULtI32:
-      case I32::LtF32:
-      case I32::LtF64:
-        compareOp = JSOP_LT;
-        break;
-      case I32::SGeI32:
-      case I32::UGeI32:
-      case I32::GeF32:
-      case I32::GeF64:
-        compareOp = JSOP_GE;
-        break;
-      case I32::SGtI32:
-      case I32::UGtI32:
-      case I32::GtF32:
-      case I32::GtF64:
-        compareOp = JSOP_GT;
-        break;
-      default: MOZ_CRASH("unexpected comparison opcode");
-    }
-
-    *def = f.compare(lhs, rhs, compareOp, compareType);
-    return true;
-}
-
-static bool
 CheckBitwise(FunctionValidator& f, ParseNode* bitwise, Type* type)
 {
     ParseNode* lhs = BitwiseLeft(bitwise);
     ParseNode* rhs = BitwiseRight(bitwise);
 
     int32_t identityElement;
     bool onlyOnRight;
     switch (bitwise->getKind()) {
@@ -8975,41 +5558,16 @@ CheckBitwise(FunctionValidator& f, Parse
     if (!lhsType.isIntish())
         return f.failf(lhs, "%s is not a subtype of intish", lhsType.toChars());
     if (!rhsType.isIntish())
         return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars());
 
     return true;
 }
 
-template<class T>
-static bool
-EmitBitwise(FunctionCompiler& f, MDefinition** def)
-{
-    MDefinition* lhs;
-    if (!EmitI32Expr(f, &lhs))
-        return false;
-    MDefinition* rhs;
-    if (!EmitI32Expr(f, &rhs))
-        return false;
-    *def = f.bitwise<T>(lhs, rhs);
-    return true;
-}
-
-template<>
-bool
-EmitBitwise<MBitNot>(FunctionCompiler& f, MDefinition** def)
-{
-    MDefinition* in;
-    if (!EmitI32Expr(f, &in))
-        return false;
-    *def = f.bitwise<MBitNot>(in);
-    return true;
-}
-
 static bool
 CheckExpr(FunctionValidator& f, ParseNode* expr, Type* type)
 {
     JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed());
 
     if (IsNumericLiteral(f.m(), expr))
         return CheckNumericLiteral(f, expr, type);
 
@@ -9049,29 +5607,16 @@ CheckExpr(FunctionValidator& f, ParseNod
 
       default:;
     }
 
     return f.fail(expr, "unsupported expression");
 }
 
 static bool
-EmitExpr(FunctionCompiler& f, AsmType type, MDefinition** def)
-{
-    switch (type) {
-      case AsmType::Int32:     return EmitI32Expr(f, def);
-      case AsmType::Float32:   return EmitF32Expr(f, def);
-      case AsmType::Float64:   return EmitF64Expr(f, def);
-      case AsmType::Int32x4:   return EmitI32X4Expr(f, def);
-      case AsmType::Float32x4: return EmitF32X4Expr(f, def);
-    }
-    MOZ_CRASH("unexpected asm type");
-}
-
-static bool
 CheckStatement(FunctionValidator& f, ParseNode* stmt);
 
 static bool
 CheckAsExprStatement(FunctionValidator& f, ParseNode* expr)
 {
     Type type;
     if (expr->isKind(PNK_CALL))
         return CheckCoercedCall(f, expr, RetType::Void, &type);
@@ -9129,33 +5674,16 @@ MaybeAddInterruptCheck(FunctionValidator
 
     unsigned lineno = 0, column = 0;
     f.m().tokenStream().srcCoords.lineNumAndColumnIndex(pn->pn_pos.begin, &lineno, &column);
     f.writeU32(lineno);
     f.writeU32(column);
 }
 
 static bool
-EmitInterruptCheck(FunctionCompiler& f)
-{
-    unsigned lineno = f.readU32();
-    unsigned column = f.readU32();
-    f.addInterruptCheck(lineno, column);
-    return true;
-}
-
-static bool
-EmitInterruptCheckLoop(FunctionCompiler& f)
-{
-    if (!EmitInterruptCheck(f))
-        return false;
-    return EmitStatement(f);
-}
-
-static bool
 CheckWhile(FunctionValidator& f, ParseNode* whileStmt)
 {
     MOZ_ASSERT(whileStmt->isKind(PNK_WHILE));
     ParseNode* cond = BinaryLeft(whileStmt);
     ParseNode* body = BinaryRight(whileStmt);
 
     f.writeOp(Stmt::While);
 
@@ -9166,42 +5694,16 @@ CheckWhile(FunctionValidator& f, ParseNo
         return f.failf(cond, "%s is not a subtype of int", condType.toChars());
 
     MaybeAddInterruptCheck(f, InterruptCheckPosition::Loop, whileStmt);
 
     return CheckStatement(f, body);
 }
 
 static bool
-EmitWhile(FunctionCompiler& f, const LabelVector* maybeLabels)
-{
-    size_t headPc = f.pc();
-
-    MBasicBlock* loopEntry;
-    if (!f.startPendingLoop(headPc, &loopEntry))
-        return false;
-
-    MDefinition* condDef;
-    if (!EmitI32Expr(f, &condDef))
-        return false;
-
-    MBasicBlock* afterLoop;
-    if (!f.branchAndStartLoopBody(condDef, &afterLoop))
-        return false;
-
-    if (!EmitStatement(f))
-        return false;
-
-    if (!f.bindContinues(headPc, maybeLabels))
-        return false;
-
-    return f.closeLoop(loopEntry, afterLoop);
-}
-
-static bool
 CheckFor(FunctionValidator& f, ParseNode* forStmt)
 {
     MOZ_ASSERT(forStmt->isKind(PNK_FOR));
     ParseNode* forHead = BinaryLeft(forStmt);
     ParseNode* body = BinaryRight(forStmt);
 
     if (!forHead->isKind(PNK_FORHEAD))
         return f.fail(forHead, "unsupported for-loop statement");
@@ -9234,56 +5736,16 @@ CheckFor(FunctionValidator& f, ParseNode
     if (maybeInc && !CheckAsExprStatement(f, maybeInc))
         return false;
 
     f.writeDebugCheckPoint();
     return true;
 }
 
 static bool
-EmitFor(FunctionCompiler& f, Stmt stmt, const LabelVector* maybeLabels)
-{
-    MOZ_ASSERT(stmt == Stmt::ForInitInc || stmt == Stmt::ForInitNoInc ||
-               stmt == Stmt::ForNoInitInc || stmt == Stmt::ForNoInitNoInc);
-    size_t headPc = f.pc();
-
-    if (stmt == Stmt::ForInitInc || stmt == Stmt::ForInitNoInc) {
-        if (!EmitStatement(f))
-            return false;
-    }
-
-    MBasicBlock* loopEntry;
-    if (!f.startPendingLoop(headPc, &loopEntry))
-        return false;
-
-    MDefinition* condDef;
-    if (!EmitI32Expr(f, &condDef))
-        return false;
-
-    MBasicBlock* afterLoop;
-    if (!f.branchAndStartLoopBody(condDef, &afterLoop))
-        return false;
-
-    if (!EmitStatement(f))
-        return false;
-
-    if (!f.bindContinues(headPc, maybeLabels))
-        return false;
-
-    if (stmt == Stmt::ForInitInc || stmt == Stmt::ForNoInitInc) {
-        if (!EmitStatement(f))
-            return false;
-    }
-
-    f.assertDebugCheckPoint();
-
-    return f.closeLoop(loopEntry, afterLoop);
-}
-
-static bool
 CheckDoWhile(FunctionValidator& f, ParseNode* whileStmt)
 {
     MOZ_ASSERT(whileStmt->isKind(PNK_DOWHILE));
     ParseNode* body = BinaryLeft(whileStmt);
     ParseNode* cond = BinaryRight(whileStmt);
 
     f.writeOp(Stmt::DoWhile);
 
@@ -9297,38 +5759,16 @@ CheckDoWhile(FunctionValidator& f, Parse
         return false;
     if (!condType.isInt())
         return f.failf(cond, "%s is not a subtype of int", condType.toChars());
 
     return true;
 }
 
 static bool
-EmitDoWhile(FunctionCompiler& f, const LabelVector* maybeLabels)
-{
-    size_t headPc = f.pc();
-
-    MBasicBlock* loopEntry;
-    if (!f.startPendingLoop(headPc, &loopEntry))
-        return false;
-
-    if (!EmitStatement(f))
-        return false;
-
-    if (!f.bindContinues(headPc, maybeLabels))
-        return false;
-
-    MDefinition* condDef;
-    if (!EmitI32Expr(f, &condDef))
-        return false;
-
-    return f.branchAndCloseDoWhileLoop(condDef, loopEntry);
-}
-
-static bool
 CheckLabel(FunctionValidator& f, ParseNode* labeledStmt)
 {
     MOZ_ASSERT(labeledStmt->isKind(PNK_LABEL));
     PropertyName* label = LabeledStatementLabel(labeledStmt);
     ParseNode* stmt = LabeledStatementStatement(labeledStmt);
 
     f.writeOp(Stmt::Label);
 
@@ -9341,39 +5781,16 @@ CheckLabel(FunctionValidator& f, ParseNo
     if (!CheckStatement(f, stmt))
         return false;
 
     f.removeLabel(label);
     return true;
 }
 
 static bool
-EmitLabel(FunctionCompiler& f, LabelVector* maybeLabels)
-{
-    uint32_t labelId = f.readU32();
-
-    if (maybeLabels) {
-        if (!maybeLabels->append(labelId))
-            return false;
-        return EmitStatement(f, maybeLabels);
-    }
-
-    LabelVector labels;
-    if (!labels.append(labelId))
-        return false;
-
-    if (!EmitStatement(f, &labels))
-        return false;
-
-    return f.bindLabeledBreaks(&labels);
-}
-
-static bool EmitStatement(FunctionCompiler& f, Stmt stmt, LabelVector* maybeLabels = nullptr);
-
-static bool
 CheckIf(FunctionValidator& f, ParseNode* ifStmt)
 {
   recurse:
     size_t opcodeAt = f.tempOp();
 
     MOZ_ASSERT(ifStmt->isKind(PNK_IF));
     ParseNode* cond = TernaryKid1(ifStmt);
     ParseNode* thenStmt = TernaryKid2(ifStmt);
@@ -9400,65 +5817,16 @@ CheckIf(FunctionValidator& f, ParseNode*
 
         if (!CheckStatement(f, elseStmt))
             return false;
     }
 
     return true;
 }
 
-typedef bool HasElseBlock;
-
-static bool
-EmitIfElse(FunctionCompiler& f, bool hasElse)
-{
-    // Handle if/else-if chains using iteration instead of recursion. This
-    // avoids blowing the C stack quota for long if/else-if chains and also
-    // creates fewer MBasicBlocks at join points (by creating one join block
-    // for the entire if/else-if chain).
-    BlockVector thenBlocks;
-
-  recurse:
-    MDefinition* condition;
-    if (!EmitI32Expr(f, &condition))
-        return false;
-
-    MBasicBlock* thenBlock = nullptr;
-    MBasicBlock* elseOrJoinBlock = nullptr;
-    if (!f.branchAndStartThen(condition, &thenBlock, &elseOrJoinBlock))
-        return false;
-
-    if (!EmitStatement(f))
-        return false;
-
-    if (!f.appendThenBlock(&thenBlocks))
-        return false;
-
-    if (hasElse) {
-        f.switchToElse(elseOrJoinBlock);
-
-        Stmt nextStmt(f.readStmtOp());
-        if (nextStmt == Stmt::IfThen) {
-            hasElse = false;
-            goto recurse;
-        }
-        if (nextStmt == Stmt::IfElse) {
-            hasElse = true;
-            goto recurse;
-        }
-
-        if (!EmitStatement(f, nextStmt))
-            return false;
-
-        return f.joinIfElse(thenBlocks);
-    } else {
-        return f.joinIf(thenBlocks, elseOrJoinBlock);
-    }
-}
-
 static bool
 CheckCaseExpr(FunctionValidator& f, ParseNode* caseExpr, int32_t* value)
 {
     if (!IsNumericLiteral(f.m(), caseExpr))
         return f.fail(caseExpr, "switch case expression must be an integer literal");
 
     AsmJSNumLit literal = ExtractNumericLiteral(f.m(), caseExpr);
     switch (literal.which()) {
@@ -9607,60 +5975,16 @@ CheckSwitch(FunctionValidator& f, ParseN
             return false;
     }
 
     PatchSwitch(f, hasDefaultAt, hasDefault, lowAt, low, highAt, high, numCasesAt, numCases);
     return true;
 }
 
 static bool
-EmitSwitch(FunctionCompiler& f)
-{
-    bool hasDefault = f.readU8();
-    int32_t low = f.readI32();
-    int32_t high = f.readI32();
-    uint32_t numCases = f.readU32();
-
-    MDefinition* exprDef;
-    if (!EmitI32Expr(f, &exprDef))
-        return false;
-
-    // Switch with no cases
-    if (!hasDefault && numCases == 0)
-        return true;
-
-    BlockVector cases;
-    if (!cases.resize(high - low + 1))
-        return false;
-
-    MBasicBlock* switchBlock;
-    if (!f.startSwitch(f.pc(), exprDef, low, high, &switchBlock))
-        return false;
-
-    while (numCases--) {
-        int32_t caseValue = f.readI32();
-        MOZ_ASSERT(caseValue >= low && caseValue <= high);
-        unsigned caseIndex = caseValue - low;
-        if (!f.startSwitchCase(switchBlock, &cases[caseIndex]))
-            return false;
-        if (!EmitStatement(f))
-            return false;
-    }
-
-    MBasicBlock* defaultBlock;
-    if (!f.startSwitchDefault(switchBlock, &cases, &defaultBlock))
-        return false;
-
-    if (hasDefault && !EmitStatement(f))
-        return false;
-
-    return f.joinSwitch(switchBlock, cases, defaultBlock);
-}
-
-static bool
 CheckReturnType(FunctionValidator& f, ParseNode* usepn, RetType retType)
 {
     if (!f.hasAlreadyReturned()) {
         f.setReturnedType(retType);
         return true;
     }
 
     if (f.returnedType() != retType) {
@@ -9699,48 +6023,16 @@ CheckReturn(FunctionValidator& f, ParseN
     else if (type.isVoid())
         retType = RetType::Void;
     else
         return f.failf(expr, "%s is not a valid return type", type.toChars());
 
     return CheckReturnType(f, expr, retType);
 }
 
-static AsmType
-RetTypeToAsmType(RetType retType)
-{
-    switch (retType.which()) {
-      case RetType::Void:      break;
-      case RetType::Signed:    return AsmType::Int32;
-      case RetType::Float:     return AsmType::Float32;
-      case RetType::Double:    return AsmType::Float64;
-      case RetType::Int32x4:   return AsmType::Int32x4;
-      case RetType::Float32x4: return AsmType::Float32x4;
-    }
-    MOZ_CRASH("unexpected return type");
-}
-
-static bool
-EmitRet(FunctionCompiler& f)
-{
-    RetType retType = f.returnedType();
-
-    if (retType == RetType::Void) {
-        f.returnVoid();
-        return true;
-    }
-
-    AsmType type = RetTypeToAsmType(retType);
-    MDefinition *def = nullptr;
-    if (!EmitExpr(f, type, &def))
-        return false;
-    f.returnExpr(def);
-    return true;
-}
-
 static bool
 CheckStatementList(FunctionValidator& f, ParseNode* stmtList)
 {
     MOZ_ASSERT(stmtList->isKind(PNK_STATEMENTLIST));
 
     f.writeOp(Stmt::Block);
     f.writeU32(ListLength(stmtList));
 
@@ -9749,28 +6041,16 @@ CheckStatementList(FunctionValidator& f,
             return false;
     }
 
     f.writeDebugCheckPoint();
     return true;
 }
 
 static bool
-EmitBlock(FunctionCompiler& f)
-{
-    size_t numStmt = f.readU32();
-    for (size_t i = 0; i < numStmt; i++) {
-        if (!EmitStatement(f))
-            return false;
-    }
-    f.assertDebugCheckPoint();
-    return true;
-}
-
-static bool
 CheckBreakOrContinue(FunctionValidator& f, PropertyName* maybeLabel,
                      Stmt withoutLabel, Stmt withLabel)
 {
     if (!maybeLabel) {
         f.writeOp(withoutLabel);
         return true;
     }
 
@@ -9778,36 +6058,16 @@ CheckBreakOrContinue(FunctionValidator& 
 
     uint32_t labelId = f.lookupLabel(maybeLabel);
     MOZ_ASSERT(labelId != uint32_t(-1));
 
     f.writeU32(labelId);
     return true;
 }
 
-typedef bool HasLabel;
-
-static bool
-EmitContinue(FunctionCompiler& f, bool hasLabel)
-{
-    if (!hasLabel)
-        return f.addContinue(nullptr);
-    uint32_t labelId = f.readU32();
-    return f.addContinue(&labelId);
-}
-
-static bool
-EmitBreak(FunctionCompiler& f, bool hasLabel)
-{
-    if (!hasLabel)
-        return f.addBreak(nullptr);
-    uint32_t labelId = f.readU32();
-    return f.addBreak(&labelId);
-}
-
 static bool
 CheckStatement(FunctionValidator& f, ParseNode* stmt)
 {
     JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed());
 
     switch (stmt->getKind()) {
       case PNK_SEMI:          return CheckExprStatement(f, stmt);
       case PNK_WHILE:         return CheckWhile(f, stmt);
@@ -9824,66 +6084,16 @@ CheckStatement(FunctionValidator& f, Par
                                                           Stmt::Continue, Stmt::ContinueLabel);
       default:;
     }
 
     return f.fail(stmt, "unexpected statement kind");
 }
 
 static bool
-EmitStatement(FunctionCompiler& f, Stmt stmt, LabelVector* maybeLabels /*= nullptr */)
-{
-    if (!f.mirGen().ensureBallast())
-        return false;
-
-    MDefinition* _;
-    switch (stmt) {
-      case Stmt::Block:              return EmitBlock(f);
-      case Stmt::IfThen:             return EmitIfElse(f, HasElseBlock(false));
-      case Stmt::IfElse:             return EmitIfElse(f, HasElseBlock(true));
-      case Stmt::Switch:             return EmitSwitch(f);
-      case Stmt::While:              return EmitWhile(f, maybeLabels);
-      case Stmt::DoWhile:            return EmitDoWhile(f, maybeLabels);
-      case Stmt::ForInitInc:
-      case Stmt::ForInitNoInc:
-      case Stmt::ForNoInitNoInc:
-      case Stmt::ForNoInitInc:       return EmitFor(f, stmt, maybeLabels);
-      case Stmt::Label:              return EmitLabel(f, maybeLabels);
-      case Stmt::Continue:           return EmitContinue(f, HasLabel(false));
-      case Stmt::ContinueLabel:      return EmitContinue(f, HasLabel(true));
-      case Stmt::Break:              return EmitBreak(f, HasLabel(false));
-      case Stmt::BreakLabel:         return EmitBreak(f, HasLabel(true));
-      case Stmt::Ret:                return EmitRet(f);
-      case Stmt::I32Expr:            return EmitI32Expr(f, &_);
-      case Stmt::F32Expr:            return EmitF32Expr(f, &_);
-      case Stmt::F64Expr:            return EmitF64Expr(f, &_);
-      case Stmt::I32X4Expr:          return EmitI32X4Expr(f, &_);
-      case Stmt::F32X4Expr:          return EmitF32X4Expr(f, &_);
-      case Stmt::CallInternal:       return EmitInternalCall(f, RetType::Void, &_);
-      case Stmt::CallIndirect:       return EmitFuncPtrCall(f, RetType::Void, &_);
-      case Stmt::CallImport:         return EmitFFICall(f, RetType::Void, &_);
-      case Stmt::AtomicsFence:       f.memoryBarrier(MembarFull); return true;
-      case Stmt::Noop:               return true;
-      case Stmt::Id:                 return EmitStatement(f);
-      case Stmt::InterruptCheckHead: return EmitInterruptCheck(f);
-      case Stmt::InterruptCheckLoop: return EmitInterruptCheckLoop(f);
-      case Stmt::DebugCheckPoint:
-      case Stmt::Bad:             break;
-    }
-    MOZ_CRASH("unexpected statement");
-}
-
-static bool
-EmitStatement(FunctionCompiler& f, LabelVector* maybeLabels /* = nullptr */)
-{
-    Stmt stmt(f.readStmtOp());
-    return EmitStatement(f, stmt, maybeLabels);
-}
-
-static bool
 CheckByteLengthCall(ModuleValidator& m, ParseNode* pn, PropertyName* newBufferName)
 {
     if (!pn->isKind(PNK_CALL) || !CallCallee(pn)->isKind(PNK_NAME))
         return m.fail(pn, "expecting call to imported byteLength");
 
     const ModuleValidator::Global* global = m.lookupGlobal(CallCallee(pn)->name());
     if (!global || global->which() != ModuleValidator::Global::ByteLength)
         return m.fail(pn, "expecting call to imported byteLength");
@@ -10155,449 +6365,16 @@ ParseFunction(ModuleValidator& m, ParseN
     MOZ_ASSERT(directives == newDirectives);
 
     fn->pn_blockid = outerpc->blockid();
 
     *fnOut = fn;
     return true;
 }
 
-bool
-EmitI32Expr(FunctionCompiler& f, MDefinition** def)
-{
-    I32 op = I32(f.readU8());
-    switch (op) {
-      case I32::Id:
-        return EmitI32Expr(f, def);
-      case I32::Literal:
-        return EmitLiteral(f, AsmType::Int32, def);
-      case I32::GetLocal:
-        return EmitGetLoc(f, DebugOnly<MIRType>(MIRType_Int32), def);
-      case I32::SetLocal:
-        return EmitSetLoc(f, AsmType::Int32, def);
-      case I32::GetGlobal:
-        return EmitGetGlo(f, MIRType_Int32, def);
-      case I32::SetGlobal:
-        return EmitSetGlo(f, AsmType::Int32, def);
-      case I32::CallInternal:
-        return EmitInternalCall(f, RetType::Signed, def);
-      case I32::CallIndirect:
-        return EmitFuncPtrCall(f, RetType::Signed, def);
-      case I32::CallImport:
-        return EmitFFICall(f, RetType::Signed, def);
-      case I32::Conditional:
-        return EmitConditional(f, AsmType::Int32, def);
-      case I32::Comma:
-        return EmitComma(f, AsmType::Int32, def);
-      case I32::Add:
-        return EmitAddOrSub(f, AsmType::Int32, IsAdd(true), def);
-      case I32::Sub:
-        return EmitAddOrSub(f, AsmType::Int32, IsAdd(false), def);
-      case I32::Mul:
-        return EmitMultiply(f, AsmType::Int32, def);
-      case I32::UDiv:
-      case I32::SDiv:
-        return EmitDivOrMod(f, AsmType::Int32, IsDiv(true), IsUnsigned(op == I32::UDiv), def);
-      case I32::UMod:
-      case I32::SMod:
-        return EmitDivOrMod(f, AsmType::Int32, IsDiv(false), IsUnsigned(op == I32::UMod), def);
-      case I32::Min:
-        return EmitMathMinMax(f, AsmType::Int32, IsMax(false), def);
-      case I32::Max:
-        return EmitMathMinMax(f, AsmType::Int32, IsMax(true), def);
-      case I32::Not:
-        return EmitUnary<MNot>(f, AsmType::Int32, def);
-      case I32::FromF32:
-        return EmitUnary<MTruncateToInt32>(f, AsmType::Float32, def);
-      case I32::FromF64:
-        return EmitUnary<MTruncateToInt32>(f, AsmType::Float64, def);
-      case I32::Clz:
-        return EmitUnary<MClz>(f, AsmType::Int32, def);
-      case I32::Abs:
-        return EmitUnaryMir<MAbs>(f, AsmType::Int32, def);
-      case I32::Neg:
-        return EmitUnaryMir<MAsmJSNeg>(f, AsmType::Int32, def);
-      case I32::BitOr:
-        return EmitBitwise<MBitOr>(f, def);
-      case I32::BitAnd:
-        return EmitBitwise<MBitAnd>(f, def);
-      case I32::BitXor:
-        return EmitBitwise<MBitXor>(f, def);
-      case I32::Lsh:
-        return EmitBitwise<MLsh>(f, def);
-      case I32::ArithRsh:
-        return EmitBitwise<MRsh>(f, def);
-      case I32::LogicRsh:
-        return EmitBitwise<MUrsh>(f, def);
-      case I32::BitNot:
-        return EmitBitwise<MBitNot>(f, def);
-      case I32::SLoad8:
-        return EmitLoadArray(f, Scalar::Int8, def);
-      case I32::SLoad16:
-        return EmitLoadArray(f, Scalar::Int16, def);
-      case I32::SLoad32:
-        return EmitLoadArray(f, Scalar::Int32, def);
-      case I32::ULoad8:
-        return EmitLoadArray(f, Scalar::Uint8, def);
-      case I32::ULoad16:
-        return EmitLoadArray(f, Scalar::Uint16, def);
-      case I32::ULoad32:
-        return EmitLoadArray(f, Scalar::Uint32, def);
-      case I32::Store8:
-        return EmitStore(f, Scalar::Int8, def);
-      case I32::Store16:
-        return EmitStore(f, Scalar::Int16, def);
-      case I32::Store32:
-        return EmitStore(f, Scalar::Int32, def);
-      case I32::EqI32:
-      case I32::NeI32:
-      case I32::SLtI32:
-      case I32::SLeI32:
-      case I32::SGtI32:
-      case I32::SGeI32:
-      case I32::ULtI32:
-      case I32::ULeI32:
-      case I32::UGtI32:
-      case I32::UGeI32:
-      case I32::EqF32:
-      case I32::NeF32:
-      case I32::LtF32:
-      case I32::LeF32:
-      case I32::GtF32:
-      case I32::GeF32:
-      case I32::EqF64:
-      case I32::NeF64:
-      case I32::LtF64:
-      case I32::LeF64:
-      case I32::GtF64:
-      case I32::GeF64:
-        return EmitComparison(f, op, def);
-      case I32::AtomicsCompareExchange:
-        return EmitAtomicsCompareExchange(f, def);
-      case I32::AtomicsExchange:
-        return EmitAtomicsExchange(f, def);
-      case I32::AtomicsLoad:
-        return EmitAtomicsLoad(f, def);
-      case I32::AtomicsStore:
-        return EmitAtomicsStore(f, def);
-      case I32::AtomicsBinOp:
-        return EmitAtomicsBinOp(f, def);
-      case I32::I32X4SignMask:
-        return EmitSignMask(f, AsmType::Int32x4, def);
-      case I32::F32X4SignMask:
-        return EmitSignMask(f, AsmType::Float32x4, def);
-      case I32::I32X4ExtractLane:
-        return EmitExtractLane(f, AsmType::Int32x4, def);
-      case I32::Bad:
-        break;
-    }
-    MOZ_CRASH("unexpected i32 expression");
-}
-
-bool EmitF32Expr(FunctionCompiler& f, MDefinition** def)
-{
-    F32 op = F32(f.readU8());
-    switch (op) {
-      case F32::Id:
-        return EmitF32Expr(f, def);
-      case F32::Literal:
-        return EmitLiteral(f, AsmType::Float32, def);
-      case F32::GetLocal:
-        return EmitGetLoc(f, DebugOnly<MIRType>(MIRType_Float32), def);
-      case F32::SetLocal:
-        return EmitSetLoc(f, AsmType::Float32, def);
-      case F32::GetGlobal:
-        return EmitGetGlo(f, MIRType_Float32, def);
-      case F32::SetGlobal:
-        return EmitSetGlo(f, AsmType::Float32, def);
-      case F32::CallInternal:
-        return EmitInternalCall(f, RetType::Float, def);
-      case F32::CallIndirect:
-        return EmitFuncPtrCall(f, RetType::Float, def);
-      case F32::CallImport:
-        return EmitFFICall(f, RetType::Float, def);
-      case F32::Conditional:
-        return EmitConditional(f, AsmType::Float32, def);
-      case F32::Comma:
-        return EmitComma(f, AsmType::Float32, def);
-      case F32::Add:
-        return EmitAddOrSub(f, AsmType::Float32, IsAdd(true), def);
-      case F32::Sub:
-        return EmitAddOrSub(f, AsmType::Float32, IsAdd(false), def);
-      case F32::Mul:
-        return EmitMultiply(f, AsmType::Float32, def);
-      case F32::Div:
-        return EmitDivOrMod(f, AsmType::Float32, IsDiv(true), def);
-      case F32::Min:
-        return EmitMathMinMax(f, AsmType::Float32, IsMax(false), def);
-      case F32::Max:
-        return EmitMathMinMax(f, AsmType::Float32, IsMax(true), def);
-      case F32::Neg:
-        return EmitUnaryMir<MAsmJSNeg>(f, AsmType::Float32, def);
-      case F32::Abs:
-        return EmitUnaryMir<MAbs>(f, AsmType::Float32, def);
-      case F32::Sqrt:
-        return EmitUnaryMir<MSqrt>(f, AsmType::Float32, def);
-      case F32::Ceil:
-      case F32::Floor:
-        return EmitMathBuiltinCall(f, op, def);
-      case F32::FromF64:
-        return EmitUnary<MToFloat32>(f, AsmType::Float64, def);
-      case F32::FromS32:
-        return EmitUnary<MToFloat32>(f, AsmType::Int32, def);
-      case F32::FromU32:
-        return EmitUnary<MAsmJSUnsignedToFloat32>(f, AsmType::Int32, def);
-      case F32::Load:
-        return EmitLoadArray(f, Scalar::Float32, def);
-      case F32::StoreF32:
-        return EmitStore(f, Scalar::Float32, def);
-      case F32::StoreF64:
-        return EmitStoreWithCoercion(f, Scalar::Float32, Scalar::Float64, def);
-      case F32::F32X4ExtractLane:
-        return EmitExtractLane(f, AsmType::Float32x4, def);
-      case F32::Bad:
-        break;
-    }
-    MOZ_CRASH("unexpected f32 expression");
-}
-
-bool EmitF64Expr(FunctionCompiler& f, MDefinition** def)
-{
-    F64 op = F64(f.readU8());
-    switch (op) {
-      case F64::Id:
-        return EmitF64Expr(f, def);
-      case F64::GetLocal:
-        return EmitGetLoc(f, DebugOnly<MIRType>(MIRType_Double), def);
-      case F64::SetLocal:
-        return EmitSetLoc(f, AsmType::Float64, def);
-      case F64::GetGlobal:
-        return EmitGetGlo(f, MIRType_Double, def);
-      case F64::SetGlobal:
-        return EmitSetGlo(f, AsmType::Float64, def);
-      case F64::Literal:
-        return EmitLiteral(f, AsmType::Float64, def);
-      case F64::Add:
-        return EmitAddOrSub(f, AsmType::Float64, IsAdd(true), def);
-      case F64::Sub:
-        return EmitAddOrSub(f, AsmType::Float64, IsAdd(false), def);
-      case F64::Mul:
-        return EmitMultiply(f, AsmType::Float64, def);
-      case F64::Div:
-        return EmitDivOrMod(f, AsmType::Float64, IsDiv(true), def);
-      case F64::Mod:
-        return EmitDivOrMod(f, AsmType::Float64, IsDiv(false), def);
-      case F64::Min:
-        return EmitMathMinMax(f, AsmType::Float64, IsMax(false), def);
-      case F64::Max:
-        return EmitMathMinMax(f, AsmType::Float64, IsMax(true), def);
-      case F64::Neg:
-        return EmitUnaryMir<MAsmJSNeg>(f, AsmType::Float64, def);
-      case F64::Abs:
-        return EmitUnaryMir<MAbs>(f, AsmType::Float64, def);
-      case F64::Sqrt:
-        return EmitUnaryMir<MSqrt>(f, AsmType::Float64, def);
-      case F64::Ceil:
-      case F64::Floor:
-      case F64::Sin:
-      case F64::Cos:
-      case F64::Tan:
-      case F64::Asin:
-      case F64::Acos:
-      case F64::Atan:
-      case F64::Exp:
-      case F64::Log:
-      case F64::Pow:
-      case F64::Atan2:
-        return EmitMathBuiltinCall(f, op, def);
-      case F64::FromF32:
-        return EmitUnary<MToDouble>(f, AsmType::Float32, def);
-      case F64::FromS32:
-        return EmitUnary<MToDouble>(f, AsmType::Int32, def);
-      case F64::FromU32:
-        return EmitUnary<MAsmJSUnsignedToDouble>(f, AsmType::Int32, def);
-      case F64::Load:
-        return EmitLoadArray(f, Scalar::Float64, def);
-      case F64::StoreF64:
-        return EmitStore(f, Scalar::Float64, def);
-      case F64::StoreF32:
-        return EmitStoreWithCoercion(f, Scalar::Float64, Scalar::Float32, def);
-      case F64::CallInternal:
-        return EmitInternalCall(f, RetType::Double, def);
-      case F64::CallIndirect:
-        return EmitFuncPtrCall(f, RetType::Double, def);
-      case F64::CallImport:
-        return EmitFFICall(f, RetType::Double, def);
-      case F64::Conditional:
-        return EmitConditional(f, AsmType::Float64, def);
-      case F64::Comma:
-        return EmitComma(f, AsmType::Float64, def);
-      case F64::Bad:
-        break;
-    }
-    MOZ_CRASH("unexpected f64 expression");
-}
-
-static bool
-EmitI32X4Expr(FunctionCompiler& f, MDefinition** def)
-{
-    I32X4 op = I32X4(f.readU8());
-    switch (op) {
-      case I32X4::Id:
-        return EmitI32X4Expr(f, def);
-      case I32X4::GetLocal:
-        return EmitGetLoc(f, DebugOnly<MIRType>(MIRType_Int32x4), def);
-      case I32X4::SetLocal:
-        return EmitSetLoc(f, AsmType::Int32x4, def);
-      case I32X4::GetGlobal:
-        return EmitGetGlo(f, MIRType_Int32x4, def);
-      case I32X4::SetGlobal:
-        return EmitSetGlo(f, AsmType::Int32x4, def);
-      case I32X4::Comma:
-        return EmitComma(f, AsmType::Int32x4, def);
-      case I32X4::Conditional:
-        return EmitConditional(f, AsmType::Int32x4, def);
-      case I32X4::CallInternal:
-        return EmitInternalCall(f, RetType::Int32x4, def);
-      case I32X4::CallIndirect:
-        return EmitFuncPtrCall(f, RetType::Int32x4, def);
-      case I32X4::CallImport:
-        return EmitFFICall(f, RetType::Int32x4, def);
-      case I32X4::Literal:
-        return EmitLiteral(f, AsmType::Int32x4, def);
-      case I32X4::Ctor:
-        return EmitSimdCtor(f, AsmType::Int32x4, def);
-      case I32X4::Unary:
-        return EmitSimdUnary(f, AsmType::Int32x4, def);
-      case I32X4::Binary:
-        return EmitSimdBinaryArith(f, AsmType::Int32x4, def);
-      case I32X4::BinaryBitwise:
-        return EmitSimdBinaryBitwise(f, AsmType::Int32x4, def);
-      case I32X4::BinaryCompI32X4:
-        return EmitSimdBinaryComp(f, AsmType::Int32x4, def);
-      case I32X4::BinaryCompF32X4:
-        return EmitSimdBinaryComp(f, AsmType::Float32x4, def);
-      case I32X4::BinaryShift:
-        return EmitSimdBinaryShift(f, def);
-      case I32X4::ReplaceLane:
-        return EmitSimdReplaceLane(f, AsmType::Int32x4, def);
-      case I32X4::FromF32X4:
-        return EmitSimdCast<MSimdConvert>(f, AsmType::Float32x4, AsmType::Int32x4, def);
-      case I32X4::FromF32X4Bits:
-        return EmitSimdCast<MSimdReinterpretCast>(f, AsmType::Float32x4, AsmType::Int32x4, def);
-      case I32X4::Swizzle:
-        return EmitSimdSwizzle(f, AsmType::Int32x4, def);
-      case I32X4::Shuffle:
-        return EmitSimdShuffle(f, AsmType::Int32x4, def);
-      case I32X4::Select:
-        return EmitSimdSelect(f, AsmType::Int32x4, IsElementWise(true), def);
-      case I32X4::BitSelect:
-        return EmitSimdSelect(f, AsmType::Int32x4, IsElementWise(false), def);
-      case I32X4::Splat:
-        return EmitSimdSplat(f, AsmType::Int32x4, def);
-      case I32X4::Load:
-        return EmitSimdLoad(f, AsmType::Int32x4, def);
-      case I32X4::Store:
-        return EmitSimdStore(f, AsmType::Int32x4, def);
-      case I32X4::Bad:
-        break;
-    }
-    MOZ_CRASH("unexpected int32x4 expression");
-}
-
-static bool
-EmitF32X4Expr(FunctionCompiler& f, MDefinition** def)
-{
-    F32X4 op = F32X4(f.readU8());
-    switch (op) {
-      case F32X4::Id:
-        return EmitF32X4Expr(f, def);
-      case F32X4::GetLocal:
-        return EmitGetLoc(f, DebugOnly<MIRType>(MIRType_Float32x4), def);
-      case F32X4::SetLocal:
-        return EmitSetLoc(f, AsmType::Float32x4, def);
-      case F32X4::GetGlobal:
-        return EmitGetGlo(f, MIRType_Float32x4, def);
-      case F32X4::SetGlobal:
-        return EmitSetGlo(f, AsmType::Float32x4, def);
-      case F32X4::Comma:
-        return EmitComma(f, AsmType::Float32x4, def);
-      case F32X4::Conditional:
-        return EmitConditional(f, AsmType::Float32x4, def);
-      case F32X4::CallInternal:
-        return EmitInternalCall(f, RetType::Float32x4, def);
-      case F32X4::CallIndirect:
-        return EmitFuncPtrCall(f, RetType::Float32x4, def);
-      case F32X4::CallImport:
-        return EmitFFICall(f, RetType::Float32x4, def);
-      case F32X4::Literal:
-        return EmitLiteral(f, AsmType::Float32x4, def);
-      case F32X4::Ctor:
-        return EmitSimdCtor(f, AsmType::Float32x4, def);
-      case F32X4::Unary:
-        return EmitSimdUnary(f, AsmType::Float32x4, def);
-      case F32X4::Binary:
-        return EmitSimdBinaryArith(f, AsmType::Float32x4, def);
-      case F32X4::BinaryBitwise:
-        return EmitSimdBinaryBitwise(f, AsmType::Float32x4, def);
-      case F32X4::ReplaceLane:
-        return EmitSimdReplaceLane(f, AsmType::Float32x4, def);
-      case F32X4::FromI32X4:
-        return EmitSimdCast<MSimdConvert>(f, AsmType::Int32x4, AsmType::Float32x4, def);
-      case F32X4::FromI32X4Bits:
-        return EmitSimdCast<MSimdReinterpretCast>(f, AsmType::Int32x4, AsmType::Float32x4, def);
-      case F32X4::Swizzle:
-        return EmitSimdSwizzle(f, AsmType::Float32x4, def);
-      case F32X4::Shuffle:
-        return EmitSimdShuffle(f, AsmType::Float32x4, def);
-      case F32X4::Select:
-        return EmitSimdSelect(f, AsmType::Float32x4, IsElementWise(true), def);
-      case F32X4::BitSelect:
-        return EmitSimdSelect(f, AsmType::Float32x4, IsElementWise(false), def);
-      case F32X4::Splat:
-        return EmitSimdSplat(f, AsmType::Float32x4, def);
-      case F32X4::Load:
-        return EmitSimdLoad(f, AsmType::Float32x4, def);
-      case F32X4::Store:
-        return EmitSimdStore(f, AsmType::Float32x4, def);
-      case F32X4::Bad:
-        break;
-    }
-    MOZ_CRASH("unexpected float32x4 expression");
-}
-
-static bool
-GenerateMIR(ModuleCompiler& m, LifoAlloc& lifo, AsmFunction& func, MIRGenerator** mir)
-{
-    int64_t before = PRMJ_Now();
-
-    FunctionCompiler f(m, func, lifo);
-    if (!f.init())
-        return false;
-
-    if (!f.prepareEmitMIR(func.argTypes()))
-        return false;
-
-    while (!f.done()) {
-        if (!EmitStatement(f))
-            return false;
-    }
-
-    *mir = f.extractMIR();
-    if (!*mir)
-        return false;
-
-    jit::SpewBeginFunction(*mir, nullptr);
-
-    f.checkPostconditions();
-
-    func.accumulateCompileTime((PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC);
-    return true;
-}
-
 static bool
 CheckFunction(ModuleValidator& m, LifoAlloc& lifo, AsmFunction** funcOut)
 {
     int64_t before = PRMJ_Now();
 
     // asm.js modules can be quite large when represented as parse trees so pop
     // the backing LifoAlloc after parsing/compiling each function.
     AsmJSParser::Mark mark = m.parser().mark();
@@ -10672,52 +6449,16 @@ CheckFunction(ModuleValidator& m, LifoAl
 
     m.parser().release(mark);
 
     *funcOut = asmFunc;
     return true;
 }
 
 static bool
-GenerateCode(ModuleCompiler& m, AsmFunction& func, MIRGenerator& mir, LIRGraph& lir)
-{
-    int64_t before = PRMJ_Now();
-
-    // A single MacroAssembler is reused for all function compilations so
-    // that there is a single linear code segment for each module. To avoid
-    // spiking memory, a LifoAllocScope in the caller frees all MIR/LIR
-    // after each function is compiled. This method is responsible for cleaning
-    // out any dangling pointers that the MacroAssembler may have kept.
-    m.masm().resetForNewCodeGenerator(mir.alloc());
-
-    ScopedJSDeletePtr<CodeGenerator> codegen(js_new<CodeGenerator>(&mir, &lir, &m.masm()));
-    if (!codegen)
-        return false;
-
-    Label* funcEntry;
-    if (!m.getOrCreateFunctionEntry(func.funcIndex(), &funcEntry))
-        return false;
-
-    AsmJSFunctionLabels labels(*funcEntry, m.stackOverflowLabel());
-    if (!codegen->generateAsmJS(&labels))
-        return false;
-
-    func.accumulateCompileTime((PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC);
-
-    if (!m.finishGeneratingFunction(func, *codegen, labels))
-        return false;
-
-    // Unlike regular IonMonkey, which links and generates a new JitCode for
-    // every function, we accumulate all the functions in the module in a
-    // single MacroAssembler and link at end. Linking asm.js doesn't require a
-    // CodeGenerator so we can destroy it now (via ScopedJSDeletePtr).
-    return true;
-}
-
-static bool
 CheckAllFunctionsDefined(ModuleValidator& m)
 {
     for (unsigned i = 0; i < m.numFunctions(); i++) {
         ModuleValidator::Func& f = m.function(i);
         if (!f.defined()) {
             return m.failNameOffset(f.firstUseOffset(),
                                     "missing definition of function %s", f.name());
         }
@@ -10729,17 +6470,20 @@ CheckAllFunctionsDefined(ModuleValidator
 static bool
 CheckFunctionsSequential(ModuleValidator& m, ScopedJSDeletePtr<ModuleCompileResults>* compileResults)
 {
     // Use a single LifoAlloc to allocate all the temporary compiler IR.
     // All allocated LifoAlloc'd memory is released after compiling each
     // function by the LifoAllocScope inside the loop.
     LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
 
-    ModuleCompiler mc(m.compileInputs());
+    AsmModuleCompilerScope scope;
+    if (!CreateAsmModuleCompiler(m.compileInputs(), &scope))
+        return false;
+    ModuleCompiler& mc = scope.module();
 
     while (true) {
         TokenKind tk;
         if (!PeekToken(m.parser(), &tk))
             return false;
         if (tk != TOK_FUNCTION)
             break;
 
@@ -10749,41 +6493,44 @@ CheckFunctionsSequential(ModuleValidator
         if (!CheckFunction(m, lifo, &func))
             return false;
 
         // In the case of the change-heap function, no function is produced.
         if (!func)
             continue;
 
         MIRGenerator* mir;
-        if (!GenerateMIR(mc, lifo, *func, &mir))
+        if (!GenerateAsmFunctionMIR(mc, lifo, *func, &mir))
             return false;
 
         int64_t before = PRMJ_Now();
 
-        JitContext jcx(m.cx(), &mir->alloc());
-        jit::AutoSpewEndFunction spewEndFunction(mir);
-
-        if (!OptimizeMIR(mir))
-            return m.failOffset(func->srcBegin(), "internal compiler failure (probably out of memory)");
-
-        LIRGraph* lir = GenerateLIR(mir);
-        if (!lir)
-            return m.failOffset(func->srcBegin(), "internal compiler failure (probably out of memory)");
-
-        func->accumulateCompileTime((PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC);
-
-        if (!GenerateCode(mc, *func, *mir, *lir))
+        LIRGraph* lir;
+        {
+            JitContext jcx(m.cx(), &mir->alloc());
+            jit::AutoSpewEndFunction spewEndFunction(mir);
+
+            if (!OptimizeMIR(mir))
+                return m.failOffset(func->srcBegin(), "internal compiler failure (probably out of memory)");
+
+            lir = GenerateLIR(mir);
+            if (!lir)
+                return m.failOffset(func->srcBegin(), "internal compiler failure (probably out of memory)");
+
+            func->accumulateCompileTime((PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC);
+        }
+
+        if (!GenerateAsmFunctionCode(mc, *func, *mir, *lir))
             return false;
     }
 
     if (!CheckAllFunctionsDefined(m))
         return false;
 
-    mc.finish(compileResults);
+    FinishAsmModuleCompilation(mc, compileResults);
     return true;
 }
 
 // Currently, only one asm.js parallel compilation is allowed at a time.
 // This RAII class attempts to claim this parallel compilation using atomic ops
 // on the helper thread state's asmJSCompilationInProgress.
 class ParallelCompilationGuard
 {
@@ -10857,22 +6604,19 @@ GetUsedTask(ModuleCompiler& m, ParallelG
     // Block until a used LifoAlloc becomes available.
     AsmJSParallelTask* task = GetFinishedCompilation(m, group);
     if (!task)
         return false;
 
     auto& func = *reinterpret_cast<AsmFunction*>(task->func);
     func.accumulateCompileTime(task->compileTime);
 
-    {
-        // Perform code generation on the main thread.
-        JitContext jitContext(m.runtime(), /* CompileCompartment = */ nullptr, &task->mir->alloc());
-        if (!GenerateCode(m, func, *task->mir, *task->lir))
-            return false;
-    }
+    // Perform code generation on the main thread.
+    if (!GenerateAsmFunctionCode(m, func, *task->mir, *task->lir))
+        return false;
 
     group.compiledJobs++;
 
     // Clear the LifoAlloc for use by another helper.
     TempAllocator& tempAlloc = task->mir->alloc();
     tempAlloc.TempAllocator::~TempAllocator();
     task->lifo.releaseAll();
 
@@ -10900,17 +6644,20 @@ CheckFunctionsParallel(ModuleValidator& 
     {
         AutoLockHelperThreadState lock;
         MOZ_ASSERT(HelperThreadState().asmJSWorklist().empty());
         MOZ_ASSERT(HelperThreadState().asmJSFinishedList().empty());
     }
 #endif
     HelperThreadState().resetAsmJSFailureState();
 
-    ModuleCompiler mc(m.compileInputs());
+    AsmModuleCompilerScope scope;
+    if (!CreateAsmModuleCompiler(m.compileInputs(), &scope))
+        return false;
+    ModuleCompiler& mc = scope.module();
 
     AsmJSParallelTask* task = nullptr;
     for (unsigned i = 0;; i++) {
         TokenKind tk;
         if (!PeekToken(m.parser(), &tk))
             return false;
         if (tk != TOK_FUNCTION)
             break;
@@ -10923,17 +6670,17 @@ CheckFunctionsParallel(ModuleValidator& 
             return false;
 
         // In the case of the change-heap function, no function is produced.
         if (!func)
             continue;
 
         // Generate MIR into the LifoAlloc on the main thread.
         MIRGenerator* mir;
-        if (!GenerateMIR(mc, task->lifo, *func, &mir))
+        if (!GenerateAsmFunctionMIR(mc, task->lifo, *func, &mir))
             return false;
 
         // Perform optimizations and LIR generation on a helper thread.
         task->init(m.cx()->compartment()->runtimeFromAnyThread(), func, mir);
         if (!StartOffThreadAsmJSCompile(m.cx(), task))
             return false;
 
         group.outstandingJobs++;
@@ -10956,17 +6703,17 @@ CheckFunctionsParallel(ModuleValidator& 
     {
         AutoLockHelperThreadState lock;
         MOZ_ASSERT(HelperThreadState().asmJSWorklist().empty());
         MOZ_ASSERT(HelperThreadState().asmJSFinishedList().empty());
     }
 #endif
     MOZ_ASSERT(!HelperThreadState().asmJSFailed());
 
-    mc.finish(compileResults);
+    FinishAsmModuleCompilation(mc, compileResults);
     return true;
 }
 
 static void
 CancelOutstandingJobs(ParallelGroupState& group)
 {
     // This is failure-handling code, so it's not allowed to fail. The problem
     // is that all memory for compilation is stored in LifoAllocs maintained in
@@ -12454,16 +8201,22 @@ EstablishPreconditions(ExclusiveContext*
         return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by generator context");
 
     if (parser.pc->isArrowFunction())
         return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by arrow function context");
 
     return true;
 }
 
+static bool
+NoExceptionPending(ExclusiveContext* cx)
+{
+    return !cx->isJSContext() || !cx->asJSContext()->isExceptionPending();
+}
+
 bool
 js::ValidateAsmJS(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, bool* validated)
 {
     *validated = false;
 
     if (!EstablishPreconditions(cx, parser))
         return NoExceptionPending(cx);
 
--- a/js/src/devtools/rootAnalysis/annotations.js
+++ b/js/src/devtools/rootAnalysis/annotations.js
@@ -33,17 +33,17 @@ function indirectCallCannotGC(fullCaller
         return true;
 
     if (name == "params" && caller == "PR_ExplodeTime")
         return true;
 
     if (name == "op" && /GetWeakmapKeyDelegate/.test(caller))
         return true;
 
-    var CheckCallArgs = "AsmJSValidate.cpp:uint8 CheckCallArgs(FunctionValidator*, js::frontend::ParseNode*, (uint8)(FunctionValidator*,js::frontend::ParseNode*,Type)*, Signature*)";
+    var CheckCallArgs = "AsmJSValidate.cpp:uint8 CheckCallArgs(FunctionValidator*, js::frontend::ParseNode*, (uint8)(FunctionValidator*,js::frontend::ParseNode*,js::wasm::Type)*, js::wasm::Signature*)";
     if (name == "checkArg" && caller == CheckCallArgs)
         return true;
 
     // hook called during script finalization which cannot GC.
     if (/CallDestroyScriptHook/.test(caller))
         return true;
 
     // template method called during marking and hence cannot GC
--- a/js/src/jit/CompileWrappers.h
+++ b/js/src/jit/CompileWrappers.h
@@ -97,16 +97,18 @@ class CompileZone
 
     const void* addressOfNeedsIncrementalBarrier();
 
     // arenas.getFreeList(allocKind)
     const void* addressOfFreeListFirst(gc::AllocKind allocKind);
     const void* addressOfFreeListLast(gc::AllocKind allocKind);
 };
 
+class JitCompartment;
+
 class CompileCompartment
 {
     JSCompartment* compartment();
 
   public:
     static CompileCompartment* get(JSCompartment* comp);
 
     CompileZone* zone();
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -133,16 +133,17 @@ EXPORTS.js += [
     '../public/UbiNodeTraverse.h',
     '../public/Utility.h',
     '../public/Value.h',
     '../public/Vector.h',
     '../public/WeakMapPtr.h',
 ]
 
 UNIFIED_SOURCES += [
+    'asmjs/AsmJSCompile.cpp',
     'asmjs/AsmJSFrameIterator.cpp',
     'asmjs/AsmJSLink.cpp',
     'asmjs/AsmJSModule.cpp',
     'asmjs/AsmJSSignalHandlers.cpp',
     'asmjs/AsmJSValidate.cpp',
     'builtin/AtomicsObject.cpp',
     'builtin/Eval.cpp',
     'builtin/Intl.cpp',