Bug 1313180 - Baldr: switch everything to using function indices (r=bbouvier)
authorLuke Wagner <luke@mozilla.com>
Fri, 04 Nov 2016 17:05:56 -0500
changeset 347925 c3b3c5505c198e1ef743eeb1ce2d6e27d4ad9c89
parent 347924 b01106827c2ba1a0abcb75c1e28c6a65ff8508b4
child 347926 700fccbb5044182632fea0a2b0cb7500eaae7097
push id10298
push userraliiev@mozilla.com
push dateMon, 14 Nov 2016 12:33:03 +0000
treeherdermozilla-aurora@7e29173b1641 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1313180
milestone52.0a1
Bug 1313180 - Baldr: switch everything to using function indices (r=bbouvier) MozReview-Commit-ID: 8q9PJZI9lHo
js/src/jit-test/tests/wasm/backtrace.js
js/src/jit-test/tests/wasm/basic.js
js/src/jit-test/tests/wasm/binary.js
js/src/jit-test/tests/wasm/extract-code.js
js/src/jit-test/tests/wasm/profiling.js
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jsfun.h
js/src/wasm/AsmJS.cpp
js/src/wasm/WasmBaselineCompile.cpp
js/src/wasm/WasmBinary.h
js/src/wasm/WasmBinaryIterator.cpp
js/src/wasm/WasmBinaryToAST.cpp
js/src/wasm/WasmCode.cpp
js/src/wasm/WasmCode.h
js/src/wasm/WasmCompile.cpp
js/src/wasm/WasmFrameIterator.cpp
js/src/wasm/WasmGenerator.cpp
js/src/wasm/WasmGenerator.h
js/src/wasm/WasmInstance.cpp
js/src/wasm/WasmInstance.h
js/src/wasm/WasmIonCompile.cpp
js/src/wasm/WasmIonCompile.h
js/src/wasm/WasmJS.cpp
js/src/wasm/WasmJS.h
js/src/wasm/WasmModule.cpp
js/src/wasm/WasmModule.h
js/src/wasm/WasmStubs.cpp
js/src/wasm/WasmStubs.h
js/src/wasm/WasmTypes.h
--- a/js/src/jit-test/tests/wasm/backtrace.js
+++ b/js/src/jit-test/tests/wasm/backtrace.js
@@ -8,16 +8,16 @@ var code = `(module
 )`;
 var mod = wasmEvalText(code, {
   env: {
     test: function() {
        // Expecting 3 lines in the backtrace (plus last empty).
        // The middle one is for the wasm function.
        var s = getBacktrace();
        assertEq(s.split('\n').length, 4);
-       assertEq(s.split('\n')[1].startsWith("1 wasm-function[0]("), true);
+       assertEq(s.split('\n')[1].startsWith("1 wasm-function[1]("), true);
 
        // Let's also run DumpBacktrace() to check if we are not crashing.
        backtrace();
     }
   }
 }).exports;
 mod.test();
--- a/js/src/jit-test/tests/wasm/basic.js
+++ b/js/src/jit-test/tests/wasm/basic.js
@@ -433,18 +433,18 @@ assertErrorMessage(() => i2v(5), Error, 
             (export "run" $bar)
         )`,
         undefined,
         {"":{f:() => { stack = new Error().stack }}}
     );
 
     disableSPSProfiling();
 
-    var inner = stack.indexOf("wasm-function[0]");
-    var outer = stack.indexOf("wasm-function[2]");
+    var inner = stack.indexOf("wasm-function[1]");
+    var outer = stack.indexOf("wasm-function[3]");
     assertEq(inner === -1, false);
     assertEq(outer === -1, false);
     assertEq(inner < outer, true);
 }
 
 for (bad of [6, 7, 100, Math.pow(2,31)-1, Math.pow(2,31), Math.pow(2,31)+1, Math.pow(2,32)-2, Math.pow(2,32)-1]) {
     assertThrowsInstanceOf(() => v2i(bad), WebAssembly.RuntimeError);
     assertThrowsInstanceOf(() => i2i(bad, 0), WebAssembly.RuntimeError);
--- a/js/src/jit-test/tests/wasm/binary.js
+++ b/js/src/jit-test/tests/wasm/binary.js
@@ -363,20 +363,21 @@ function runStackTraceTest(namesContent,
     var callback = () => {
         var prevFrameEntry = new Error().stack.split('\n')[1];
         result = prevFrameEntry.split('@')[0];
     };
     wasmEval(moduleWithSections(sections), {"env": { callback }}).run();
     assertEq(result, expectedName);
 };
 
-runStackTraceTest(null, 'wasm-function[0]');
-runStackTraceTest([{name: 'test'}], 'test');
-runStackTraceTest([{name: 'test', locals: [{name: 'var1'}, {name: 'var2'}]}], 'test');
-runStackTraceTest([{name: 'test', locals: [{name: 'var1'}, {name: 'var2'}]}], 'test');
-runStackTraceTest([{name: 'test1'}, {name: 'test2'}], 'test1');
-runStackTraceTest([{name: 'test☃'}], 'test☃');
-runStackTraceTest([{name: 'te\xE0\xFF'}], 'te\xE0\xFF');
-runStackTraceTest([], 'wasm-function[0]');
+runStackTraceTest(null, 'wasm-function[1]');
+runStackTraceTest([{name:'blah'}, {name: 'test'}], 'test');
+runStackTraceTest([{name:'blah'}, {name: 'test', locals: [{name: 'var1'}, {name: 'var2'}]}], 'test');
+runStackTraceTest([{name:'blah'}, {name: 'test', locals: [{name: 'var1'}, {name: 'var2'}]}], 'test');
+runStackTraceTest([{name:'blah'}, {name: 'test1'}, {name: 'test2'}], 'test1');
+runStackTraceTest([{name:'blah'}, {name: 'test☃'}], 'test☃');
+runStackTraceTest([{name:'blah'}, {name: 'te\xE0\xFF'}], 'te\xE0\xFF');
+runStackTraceTest([{name:'blah'}], 'wasm-function[1]');
+runStackTraceTest([], 'wasm-function[1]');
 // Notice that invalid names section content shall not fail the parsing
-runStackTraceTest([{nameLen: 100, name: 'test'}], 'wasm-function[0]'); // invalid name size
-runStackTraceTest([{name: 'test', locals: [{nameLen: 40, name: 'var1'}]}], 'wasm-function[0]'); // invalid variable name size
-runStackTraceTest([{name: ''}], 'wasm-function[0]'); // empty name
+runStackTraceTest([{name:'blah'}, {nameLen: 100, name: 'test'}], 'wasm-function[1]'); // invalid name size
+runStackTraceTest([{name:'blah'}, {name: 'test', locals: [{nameLen: 40, name: 'var1'}]}], 'wasm-function[1]'); // invalid variable name size
+runStackTraceTest([{name:'blah'}, {name: ''}], 'wasm-function[1]'); // empty name
--- a/js/src/jit-test/tests/wasm/extract-code.js
+++ b/js/src/jit-test/tests/wasm/extract-code.js
@@ -2,14 +2,14 @@
 load(libdir + "wasm.js");
 
 var module = new WebAssembly.Module(wasmTextToBinary(`(module (func (nop)))`));
 var exp = wasmExtractCode(module);
 assertEq(exp.code instanceof Uint8Array, true);
 assertEq(Array.isArray(exp.segments), true);
 var funcs = exp.segments.filter(s => s.kind === 0);
 assertEq(funcs.length, 1);
-assertEq(funcs[0].funcDefIndex, 0);
+assertEq(funcs[0].funcIndex, 0);
 assertEq(funcs[0].begin >= 0, true);
 assertEq(funcs[0].begin <= funcs[0].funcBodyBegin, true);
 assertEq(funcs[0].funcBodyBegin < funcs[0].funcBodyEnd, true);
 assertEq(funcs[0].funcBodyEnd <= funcs[0].end, true);
 assertEq(funcs[0].end <= exp.code.length, true);
--- a/js/src/jit-test/tests/wasm/profiling.js
+++ b/js/src/jit-test/tests/wasm/profiling.js
@@ -213,22 +213,22 @@ WebAssembly.RuntimeError,
     )`));
 
     // Instantiate while not active:
     var e1 = new Instance(m1).exports;
     var e2 = new Instance(m2, {a:e1}).exports;
     enableSPSProfiling();
     enableSingleStepProfiling();
     assertEq(e2.bar(), 42);
-    assertEqStacks(disableSingleStepProfiling(), ["", ">", "0,>", "0,0,>", "0,>", ">", ""]);
+    assertEqStacks(disableSingleStepProfiling(), ["", ">", "1,>", "0,1,>", "1,>", ">", ""]);
     disableSPSProfiling();
     assertEq(e2.bar(), 42);
 
     // Instantiate while active:
     enableSPSProfiling();
     var e3 = new Instance(m1).exports;
     var e4 = new Instance(m2, {a:e3}).exports;
     enableSingleStepProfiling();
     assertEq(e4.bar(), 42);
-    assertEqStacks(disableSingleStepProfiling(), ["", ">", "0,>", "0,0,>", "0,>", ">", ""]);
+    assertEqStacks(disableSingleStepProfiling(), ["", ">", "1,>", "0,1,>", "1,>", ">", ""]);
     disableSPSProfiling();
     assertEq(e4.bar(), 42);
 })();
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1502,18 +1502,18 @@ CodeGeneratorShared::emitWasmCallBase(LW
     // Save the caller's TLS register in a reserved stack slot (below the
     // call's stack arguments) for retrieval after the call.
     if (mir->saveTls())
         masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), mir->tlsStackOffset()));
 
     const wasm::CallSiteDesc& desc = mir->desc();
     const wasm::CalleeDesc& callee = mir->callee();
     switch (callee.which()) {
-      case wasm::CalleeDesc::Definition:
-        masm.call(desc, callee.funcDefIndex());
+      case wasm::CalleeDesc::Func:
+        masm.call(desc, callee.funcIndex());
         break;
       case wasm::CalleeDesc::Import:
         masm.wasmCallImport(desc, callee);
         break;
       case wasm::CalleeDesc::WasmTable:
       case wasm::CalleeDesc::AsmJSTable:
         masm.wasmCallIndirect(desc, callee);
         break;
--- a/js/src/jsfun.h
+++ b/js/src/jsfun.h
@@ -710,17 +710,17 @@ class FunctionExtended : public JSFuncti
      * first slot.
      */
     static const unsigned WASM_INSTANCE_SLOT = 0;
 
     /*
      * wasm/asm.js exported functions store the function index of the exported
      * function in the original module.
      */
-    static const unsigned WASM_FUNC_DEF_INDEX_SLOT = 1;
+    static const unsigned WASM_FUNC_INDEX_SLOT = 1;
 
     /*
      * asm.js module functions store their WasmModuleObject in the first slot.
      */
     static const unsigned ASMJS_MODULE_SLOT = 0;
 
 
     static inline size_t offsetOfExtendedSlot(unsigned which) {
--- a/js/src/wasm/AsmJS.cpp
+++ b/js/src/wasm/AsmJS.cpp
@@ -353,20 +353,23 @@ struct js::AsmJSMetadata : Metadata, Asm
         return scriptSource.get()->mutedErrors();
     }
     const char16_t* displayURL() const override {
         return scriptSource.get()->hasDisplayURL() ? scriptSource.get()->displayURL() : nullptr;
     }
     ScriptSource* maybeScriptSource() const override {
         return scriptSource.get();
     }
-    bool getFuncDefName(JSContext* cx, const Bytes*, uint32_t funcDefIndex,
-                        TwoByteName* name) const override
+    bool getFuncName(JSContext* cx, const Bytes*, uint32_t funcIndex,
+                     TwoByteName* name) const override
     {
-        const char* p = asmJSFuncNames[funcDefIndex].get();
+        // asm.js doesn't allow exporting imports or putting imports in tables
+        MOZ_ASSERT(funcIndex >= AsmJSFirstDefFuncIndex);
+
+        const char* p = asmJSFuncNames[funcIndex - AsmJSFirstDefFuncIndex].get();
         UTF8Chars utf8(p, strlen(p));
 
         size_t twoByteLength;
         UniqueTwoByteChars chars(JS::UTF8CharsToNewTwoByteCharsZ(cx, utf8, &twoByteLength).get());
         if (!chars)
             return false;
 
         if (!name->growByUninitialized(twoByteLength))
@@ -1835,18 +1838,18 @@ class MOZ_STACK_CLASS ModuleValidator
 
         CompileArgs args;
         if (!args.initFromContext(cx_, Move(scriptedCaller)))
             return false;
 
         auto genData = MakeUnique<ModuleGeneratorData>(ModuleKind::AsmJS);
         if (!genData ||
             !genData->sigs.resize(MaxSigs) ||
-            !genData->funcDefSigs.resize(MaxFuncs) ||
-            !genData->funcImports.resize(MaxImports) ||
+            !genData->funcSigs.resize(MaxFuncs) ||
+            !genData->funcImportGlobalDataOffsets.resize(AsmJSMaxImports) ||
             !genData->tables.resize(MaxTables) ||
             !genData->asmJSSigToTableIndex.resize(MaxSigs))
         {
             return false;
         }
 
         genData->minMemoryLength = RoundUpToNextValidAsmJSHeapLength(0);
 
@@ -2132,34 +2135,34 @@ class MOZ_STACK_CLASS ModuleValidator
         if (maybeField)
             fieldChars = StringToNewUTF8CharsZ(cx_, *maybeField);
         else
             fieldChars = DuplicateString("");
         if (!fieldChars)
             return false;
 
         // Declare which function is exported which gives us an index into the
-        // module FuncDefExportVector.
-        if (!mg_.addFuncDefExport(Move(fieldChars), mg_.numFuncImports() + func.index()))
+        // module FuncExportVector.
+        if (!mg_.addFuncExport(Move(fieldChars), func.index()))
             return false;
 
         // The exported function might have already been exported in which case
         // the index will refer into the range of AsmJSExports.
         return asmJSMetadata_->asmJSExports.emplaceBack(func.index(),
                                                         func.srcBegin() - asmJSMetadata_->srcStart,
                                                         func.srcEnd() - asmJSMetadata_->srcStart);
     }
     bool addFunction(PropertyName* name, uint32_t firstUse, Sig&& sig, Func** func) {
         uint32_t sigIndex;
         if (!declareSig(Move(sig), &sigIndex))
             return false;
-        uint32_t funcIndex = numFunctions();
+        uint32_t funcIndex = AsmJSFirstDefFuncIndex + numFunctions();
         if (funcIndex >= MaxFuncs)
             return failCurrentOffset("too many functions");
-        mg_.initFuncDefSig(funcIndex, sigIndex);
+        mg_.initFuncSig(funcIndex, sigIndex);
         Global* global = validationLifo_.new_<Global>(Global::Function);
         if (!global)
             return false;
         global->u.funcIndex_ = funcIndex;
         if (!globalMap_.putNew(name, global))
             return false;
         *func = validationLifo_.new_<Func>(name, firstUse, funcIndex);
         return *func && functions_.append(*func);
@@ -2185,33 +2188,33 @@ class MOZ_STACK_CLASS ModuleValidator
     }
     bool defineFuncPtrTable(uint32_t funcPtrTableIndex, Uint32Vector&& elems) {
         FuncPtrTable& table = *funcPtrTables_[funcPtrTableIndex];
         if (table.defined())
             return false;
         table.define();
         return mg_.initSigTableElems(table.sigIndex(), Move(elems));
     }
-    bool declareImport(PropertyName* name, Sig&& sig, unsigned ffiIndex, uint32_t* importIndex) {
+    bool declareImport(PropertyName* name, Sig&& sig, unsigned ffiIndex, uint32_t* funcIndex) {
         ImportMap::AddPtr p = importMap_.lookupForAdd(NamedSig::Lookup(name, sig));
         if (p) {
-            *importIndex = p->value();
+            *funcIndex = p->value();
             return true;
         }
-        *importIndex = asmJSMetadata_->asmJSImports.length();
-        if (*importIndex >= MaxImports)
+        *funcIndex = asmJSMetadata_->asmJSImports.length();
+        if (*funcIndex > AsmJSMaxImports)
             return failCurrentOffset("too many imports");
         if (!asmJSMetadata_->asmJSImports.emplaceBack(ffiIndex))
             return false;
         uint32_t sigIndex;
         if (!declareSig(Move(sig), &sigIndex))
             return false;
-        if (!mg_.initImport(*importIndex, sigIndex))
-            return false;
-        return importMap_.add(p, NamedSig(name, mg_.sig(sigIndex)), *importIndex);
+        if (!mg_.initImport(*funcIndex, sigIndex))
+            return false;
+        return importMap_.add(p, NamedSig(name, mg_.sig(sigIndex)), *funcIndex);
     }
 
     bool tryConstantAccess(uint64_t start, uint64_t width) {
         MOZ_ASSERT(UINT64_MAX - start > width);
         uint64_t len = start + width;
         if (len > uint64_t(INT32_MAX) + 1)
             return false;
         len = RoundUpToNextValidAsmJSHeapLength(len);
@@ -2308,18 +2311,20 @@ class MOZ_STACK_CLASS ModuleValidator
         if (GlobalMap::Ptr p = globalMap_.lookup(name))
             return p->value();
         return nullptr;
     }
 
     Func* lookupFunction(PropertyName* name) {
         if (GlobalMap::Ptr p = globalMap_.lookup(name)) {
             Global* value = p->value();
-            if (value->which() == Global::Function)
-                return functions_[value->funcIndex()];
+            if (value->which() == Global::Function) {
+                MOZ_ASSERT(value->funcIndex() >= AsmJSFirstDefFuncIndex);
+                return functions_[value->funcIndex() - AsmJSFirstDefFuncIndex];
+            }
         }
         return nullptr;
     }
 
     bool lookupStandardLibraryMathName(PropertyName* name, MathBuiltin* mathBuiltin) const {
         if (MathNameMap::Ptr p = standardLibraryMathNames_.lookup(name)) {
             *mathBuiltin = p->value();
             return true;
@@ -4748,17 +4753,17 @@ CheckFunctionSignature(ModuleValidator& 
 {
     ModuleValidator::Func* existing = m.lookupFunction(name);
     if (!existing) {
         if (!CheckModuleLevelName(m, usepn, name))
             return false;
         return m.addFunction(name, usepn->pn_pos.begin, Move(sig), func);
     }
 
-    if (!CheckSignatureAgainstExisting(m, usepn, sig, m.mg().funcDefSig(existing->index())))
+    if (!CheckSignatureAgainstExisting(m, usepn, sig, m.mg().funcSig(existing->index())))
         return false;
 
     *func = existing;
     return true;
 }
 
 static bool
 CheckIsArgType(FunctionValidator& f, ParseNode* argNode, Type type)
@@ -4785,17 +4790,16 @@ CheckInternalCall(FunctionValidator& f, 
 
     ModuleValidator::Func* callee;
     if (!CheckFunctionSignature(f.m(), callNode, Move(sig), calleeName, &callee))
         return false;
 
     if (!f.writeCall(callNode, Expr::Call))
         return false;
 
-    // Function's index, to find out the function's entry
     if (!f.encoder().writeVarU32(callee->index()))
         return false;
 
     *type = Type::ret(ret);
     return true;
 }
 
 static bool
@@ -4903,25 +4907,24 @@ CheckFFICall(FunctionValidator& f, Parse
         return f.fail(callNode, "FFI calls can't return SIMD values");
 
     ValTypeVector args;
     if (!CheckCallArgs<CheckIsExternType>(f, callNode, &args))
         return false;
 
     Sig sig(Move(args), ret.canonicalToExprType());
 
-    uint32_t importIndex;
-    if (!f.m().declareImport(calleeName, Move(sig), ffiIndex, &importIndex))
-        return false;
-
-    if (!f.writeCall(callNode, Expr::OldCallImport))
-        return false;
-
-    // Import index
-    if (!f.encoder().writeVarU32(importIndex))
+    uint32_t funcIndex;
+    if (!f.m().declareImport(calleeName, Move(sig), ffiIndex, &funcIndex))
+        return false;
+
+    if (!f.writeCall(callNode, Expr::Call))
+        return false;
+
+    if (!f.encoder().writeVarU32(funcIndex))
         return false;
 
     *type = Type::ret(ret);
     return true;
 }
 
 static bool
 CheckFloatCoercionArg(FunctionValidator& f, ParseNode* inputNode, Type inputType)
@@ -7203,17 +7206,17 @@ CheckFuncPtrTable(ModuleValidator& m, Pa
         if (!elem->isKind(PNK_NAME))
             return m.fail(elem, "function-pointer table's elements must be names of functions");
 
         PropertyName* funcName = elem->name();
         const ModuleValidator::Func* func = m.lookupFunction(funcName);
         if (!func)
             return m.fail(elem, "function-pointer table's elements must be names of functions");
 
-        const Sig& funcSig = m.mg().funcDefSig(func->index());
+        const Sig& funcSig = m.mg().funcSig(func->index());
         if (sig) {
             if (*sig != funcSig)
                 return m.fail(elem, "all functions in table must have same signature");
         } else {
             sig = &funcSig;
         }
 
         if (!elemFuncIndices.append(func->index()))
@@ -7263,24 +7266,21 @@ CheckFuncPtrTables(ModuleValidator& m)
 
 static bool
 CheckModuleExportFunction(ModuleValidator& m, ParseNode* pn, PropertyName* maybeFieldName = nullptr)
 {
     if (!pn->isKind(PNK_NAME))
         return m.fail(pn, "expected name of exported function");
 
     PropertyName* funcName = pn->name();
-    const ModuleValidator::Global* global = m.lookupGlobal(funcName);
-    if (!global)
-        return m.failName(pn, "exported function name '%s' not found", funcName);
-
-    if (global->which() != ModuleValidator::Global::Function)
-        return m.failName(pn, "'%s' is not a function", funcName);
-
-    return m.addExportField(pn, m.function(global->funcIndex()), maybeFieldName);
+    const ModuleValidator::Func* func = m.lookupFunction(funcName);
+    if (!func)
+        return m.failName(pn, "function '%s' not found", funcName);
+
+    return m.addExportField(pn, *func, maybeFieldName);
 }
 
 static bool
 CheckModuleExportObject(ModuleValidator& m, ParseNode* object)
 {
     MOZ_ASSERT(object->isKind(PNK_OBJECT));
 
     for (ParseNode* pn = ListHead(object); pn; pn = NextNode(pn)) {
@@ -8924,17 +8924,17 @@ js::AsmJSModuleToString(JSContext* cx, H
 }
 
 JSString*
 js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun)
 {
     MOZ_ASSERT(IsAsmJSFunction(fun));
 
     const AsmJSMetadata& metadata = ExportedFunctionToInstance(fun).metadata().asAsmJS();
-    const AsmJSExport& f = metadata.lookupAsmJSExport(ExportedFunctionToDefinitionIndex(fun));
+    const AsmJSExport& f = metadata.lookupAsmJSExport(ExportedFunctionToFuncIndex(fun));
 
     uint32_t begin = metadata.srcStart + f.startOffsetInModule();
     uint32_t end = metadata.srcStart + f.endOffsetInModule();
 
     ScriptSource* source = metadata.scriptSource.get();
     StringBuffer out(cx);
 
     if (!out.append("function "))
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -1969,17 +1969,17 @@ class BaseCompiler
 
     //////////////////////////////////////////////////////////////////////
     //
     // Function prologue and epilogue.
 
     void beginFunction() {
         JitSpew(JitSpew_Codegen, "# Emitting wasm baseline code");
 
-        SigIdDesc sigId = mg_.funcDefSigs[func_.defIndex()]->id;
+        SigIdDesc sigId = mg_.funcSigs[func_.index()]->id;
         GenerateFunctionPrologue(masm, localSize_, sigId, prologueTrapOffset_,
                                  &compileResults_.offsets());
 
         MOZ_ASSERT(masm.framePushed() == uint32_t(localSize_));
 
         maxFramePushed_ = localSize_;
 
         // We won't know until after we've generated code how big the
@@ -2301,20 +2301,20 @@ class BaseCompiler
             }
             break;
           }
           default:
             MOZ_CRASH("Function argument type");
         }
     }
 
-    void callDefinition(uint32_t funcDefIndex, const FunctionCall& call)
+    void callDefinition(uint32_t funcIndex, const FunctionCall& call)
     {
-        CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::FuncDef);
-        masm.call(desc, funcDefIndex);
+        CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Func);
+        masm.call(desc, funcIndex);
     }
 
     void callSymbolic(SymbolicAddress callee, const FunctionCall& call) {
         CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
         masm.call(callee);
     }
 
     // Precondition: sync()
@@ -3587,19 +3587,17 @@ class BaseCompiler
     MOZ_MUST_USE bool emitIf();
     MOZ_MUST_USE bool emitElse();
     MOZ_MUST_USE bool emitEnd();
     MOZ_MUST_USE bool emitBr();
     MOZ_MUST_USE bool emitBrIf();
     MOZ_MUST_USE bool emitBrTable();
     MOZ_MUST_USE bool emitReturn();
     MOZ_MUST_USE bool emitCallArgs(const ValTypeVector& args, FunctionCall& baselineCall);
-    MOZ_MUST_USE bool emitCallImportCommon(uint32_t lineOrBytecode, uint32_t funcImportIndex);
     MOZ_MUST_USE bool emitCall();
-    MOZ_MUST_USE bool emitOldCallImport();
     MOZ_MUST_USE bool emitCallIndirect(bool oldStyle);
     MOZ_MUST_USE bool emitCommonMathCall(uint32_t lineOrBytecode, SymbolicAddress callee,
                                          ValTypeVector& signature, ExprType retType);
     MOZ_MUST_USE bool emitUnaryMathBuiltinCall(SymbolicAddress callee, ValType operandType);
     MOZ_MUST_USE bool emitBinaryMathBuiltinCall(SymbolicAddress callee, ValType operandType);
 #ifdef QUOT_REM_I64_CALLOUT
     MOZ_MUST_USE bool emitDivOrModI64BuiltinCall(SymbolicAddress callee, ValType operandType);
 #endif
@@ -5344,122 +5342,64 @@ BaseCompiler::pushReturned(const Functio
 // lightweight sync.
 //
 // Even some of the pushing may be unnecessary if the registers
 // will be consumed by the call, because then what we want is
 // parallel assignment to the argument registers or onto the stack
 // for outgoing arguments.  A sync() is just simpler.
 
 bool
-BaseCompiler::emitCallImportCommon(uint32_t lineOrBytecode, uint32_t funcImportIndex)
-{
-    const FuncImportGenDesc& funcImport = mg_.funcImports[funcImportIndex];
-    const Sig& sig = *funcImport.sig;
+BaseCompiler::emitCall()
+{
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+    uint32_t funcIndex;
+    if (!iter_.readCall(&funcIndex))
+        return false;
 
     if (deadCode_)
         return true;
 
     sync();
 
+    const Sig& sig = *mg_.funcSigs[funcIndex];
+    bool import = mg_.funcIsImport(funcIndex);
+
     uint32_t numArgs = sig.args().length();
     size_t stackSpace = stackConsumed(numArgs);
 
     FunctionCall baselineCall(lineOrBytecode);
-    beginCall(baselineCall, UseABI::Wasm, InterModule::True);
+    beginCall(baselineCall, UseABI::Wasm, import ? InterModule::True : InterModule::False);
 
     if (!emitCallArgs(sig.args(), baselineCall))
         return false;
 
     if (!iter_.readCallReturn(sig.ret()))
         return false;
 
-    callImport(funcImport.globalDataOffset, baselineCall);
+    if (import)
+        callImport(mg_.funcImportGlobalDataOffsets[funcIndex], baselineCall);
+    else
+        callDefinition(funcIndex, baselineCall);
 
     endCall(baselineCall);
 
     // TODO / OPTIMIZE: It would be better to merge this freeStack()
     // into the one in endCall, if we can.
 
     popValueStackBy(numArgs);
     masm.freeStack(stackSpace);
 
     if (!IsVoid(sig.ret()))
         pushReturned(baselineCall, sig.ret());
 
     return true;
 }
 
 bool
-BaseCompiler::emitCall()
-{
-    uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
-
-    uint32_t calleeIndex;
-    if (!iter_.readCall(&calleeIndex))
-        return false;
-
-    // For asm.js, imports are not part of the function index space so in
-    // these cases firstFuncDefIndex is fixed to 0, even if there are
-    // function imports.
-    if (calleeIndex < mg_.firstFuncDefIndex)
-        return emitCallImportCommon(lineOrBytecode, calleeIndex);
-
-    if (deadCode_)
-        return true;
-
-    sync();
-
-    uint32_t funcDefIndex = calleeIndex - mg_.firstFuncDefIndex;
-    const Sig& sig = *mg_.funcDefSigs[funcDefIndex];
-    uint32_t numArgs = sig.args().length();
-    size_t stackSpace = stackConsumed(numArgs);
-
-    FunctionCall baselineCall(lineOrBytecode);
-    beginCall(baselineCall, UseABI::Wasm, InterModule::False);
-
-    if (!emitCallArgs(sig.args(), baselineCall))
-        return false;
-
-    if (!iter_.readCallReturn(sig.ret()))
-        return false;
-
-    callDefinition(funcDefIndex, baselineCall);
-
-    endCall(baselineCall);
-
-    // TODO / OPTIMIZE: It would be better to merge this freeStack()
-    // into the one in endCall, if we can.
-
-    popValueStackBy(numArgs);
-    masm.freeStack(stackSpace);
-
-    if (!IsVoid(sig.ret()))
-        pushReturned(baselineCall, sig.ret());
-
-    return true;
-}
-
-bool
-BaseCompiler::emitOldCallImport()
-{
-    MOZ_ASSERT(!mg_.firstFuncDefIndex);
-
-    uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
-
-    uint32_t funcImportIndex;
-    if (!iter_.readCall(&funcImportIndex))
-        return false;
-
-    if (deadCode_)
-        return true;
-
-    return emitCallImportCommon(lineOrBytecode, funcImportIndex);
-}
-
-bool
 BaseCompiler::emitCallIndirect(bool oldStyle)
 {
     uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
 
     uint32_t sigIndex;
     Nothing callee_;
     if (oldStyle) {
         if (!iter_.readOldCallIndirect(&sigIndex))
@@ -6666,18 +6606,16 @@ BaseCompiler::emitBody()
 
           // Calls
           case Expr::Call:
             CHECK_NEXT(emitCall());
           case Expr::CallIndirect:
             CHECK_NEXT(emitCallIndirect(/* oldStyle = */ false));
           case Expr::OldCallIndirect:
             CHECK_NEXT(emitCallIndirect(/* oldStyle = */ true));
-          case Expr::OldCallImport:
-            CHECK_NEXT(emitOldCallImport());
 
           // Locals and globals
           case Expr::GetLocal:
             CHECK_NEXT(emitGetLocal());
           case Expr::SetLocal:
             CHECK_NEXT(emitSetLocal());
           case Expr::TeeLocal:
             CHECK_NEXT(emitTeeLocal());
--- a/js/src/wasm/WasmBinary.h
+++ b/js/src/wasm/WasmBinary.h
@@ -384,21 +384,16 @@ enum class Expr : uint32_t // fix type s
     F64Exp,
     F64Log,
     F64Pow,
     F64Atan2,
 
     // asm.js-style call_indirect with the callee evaluated first.
     OldCallIndirect,
 
-    // asm.js-style call to an import; asm.js imports are not (and cannot be,
-    // due to streaming compilation and lazy discovery) injected into the
-    // function index space so Expr::Call cannot be used.
-    OldCallImport,
-
     // Atomics
     I32AtomicsCompareExchange,
     I32AtomicsExchange,
     I32AtomicsLoad,
     I32AtomicsStore,
     I32AtomicsBinOp,
 
     // SIMD
--- a/js/src/wasm/WasmBinaryIterator.cpp
+++ b/js/src/wasm/WasmBinaryIterator.cpp
@@ -348,17 +348,16 @@ wasm::Classify(Expr expr)
         return ExprKind::TeeLocal;
       case Expr::GetGlobal:
         return ExprKind::GetGlobal;
       case Expr::SetGlobal:
         return ExprKind::SetGlobal;
       case Expr::TeeGlobal:
         return ExprKind::TeeGlobal;
       case Expr::Call:
-      case Expr::OldCallImport:
         return ExprKind::Call;
       case Expr::CallIndirect:
         return ExprKind::CallIndirect;
       case Expr::OldCallIndirect:
         return ExprKind::OldCallIndirect;
       case Expr::Return:
       case Expr::Limit:
         // Accept Limit, for use in decoding the end of a function after the body.
--- a/js/src/wasm/WasmBinaryToAST.cpp
+++ b/js/src/wasm/WasmBinaryToAST.cpp
@@ -87,17 +87,17 @@ class AstDecodeContext
 
     JSContext* cx;
     LifoAlloc& lifo;
     Decoder& d;
     bool generateNames;
 
   private:
     AstModule& module_;
-    AstIndexVector funcSigs_;
+    AstIndexVector funcDefSigs_;
     AstDecodeExprIter *iter_;
     AstDecodeStack exprs_;
     DepthStack depths_;
     const ValTypeVector* locals_;
     GlobalDescVector globals_;
     AstNameVector blockLabels_;
     uint32_t currentLabelIndex_;
     ExprType retType_;
@@ -105,28 +105,28 @@ class AstDecodeContext
   public:
     AstDecodeContext(JSContext* cx, LifoAlloc& lifo, Decoder& d, AstModule& module,
                      bool generateNames)
      : cx(cx),
        lifo(lifo),
        d(d),
        generateNames(generateNames),
        module_(module),
-       funcSigs_(lifo),
+       funcDefSigs_(lifo),
        iter_(nullptr),
        exprs_(lifo),
        depths_(lifo),
        locals_(nullptr),
        blockLabels_(lifo),
        currentLabelIndex_(0),
        retType_(ExprType::Limit)
     {}
 
     AstModule& module() { return module_; }
-    AstIndexVector& funcSigs() { return funcSigs_; }
+    AstIndexVector& funcDefSigs() { return funcDefSigs_; }
     AstDecodeExprIter& iter() { return *iter_; }
     AstDecodeStack& exprs() { return exprs_; }
     DepthStack& depths() { return depths_; }
 
     AstNameVector& blockLabels() { return blockLabels_; }
 
     ExprType retType() const { return retType_; }
     const ValTypeVector& locals() const { return *locals_; }
@@ -317,37 +317,37 @@ AstDecodeDrop(AstDecodeContext& c)
         return false;
 
     return true;
 }
 
 static bool
 AstDecodeCall(AstDecodeContext& c)
 {
-    uint32_t calleeIndex;
-    if (!c.iter().readCall(&calleeIndex))
+    uint32_t funcIndex;
+    if (!c.iter().readCall(&funcIndex))
         return false;
 
     if (!c.iter().inReachableCode())
         return true;
 
     uint32_t sigIndex;
     AstRef funcRef;
-    if (calleeIndex < c.module().funcImportNames().length()) {
-        AstImport* import = c.module().imports()[calleeIndex];
+    if (funcIndex < c.module().funcImportNames().length()) {
+        AstImport* import = c.module().imports()[funcIndex];
         sigIndex = import->funcSig().index();
         funcRef = AstRef(import->name());
     } else {
-        uint32_t funcDefIndex = calleeIndex - c.module().funcImportNames().length();
-        if (funcDefIndex >= c.funcSigs().length())
+        uint32_t funcDefIndex = funcIndex - c.module().funcImportNames().length();
+        if (funcDefIndex >= c.funcDefSigs().length())
             return c.iter().fail("callee index out of range");
 
-        sigIndex = c.funcSigs()[funcDefIndex];
+        sigIndex = c.funcDefSigs()[funcDefIndex];
 
-        if (!AstDecodeGenerateRef(c, AstName(u"func"), calleeIndex, &funcRef))
+        if (!AstDecodeGenerateRef(c, AstName(u"func"), funcIndex, &funcRef))
             return false;
     }
 
     const AstSig* sig = c.module().sigs()[sigIndex];
 
     AstExprVector args(c.lifo);
     if (!AstDecodeCallArgs(c, *sig, &args))
         return false;
@@ -1524,21 +1524,21 @@ AstDecodeFunctionSection(AstDecodeContex
 
     uint32_t numDecls;
     if (!c.d.readVarU32(&numDecls))
         return c.d.fail("expected number of declarations");
 
     if (numDecls > MaxFuncs)
         return c.d.fail("too many functions");
 
-    if (!c.funcSigs().resize(numDecls))
+    if (!c.funcDefSigs().resize(numDecls))
         return false;
 
     for (uint32_t i = 0; i < numDecls; i++) {
-        if (!AstDecodeSignatureIndex(c, &c.funcSigs()[i]))
+        if (!AstDecodeSignatureIndex(c, &c.funcDefSigs()[i]))
             return false;
     }
 
     if (!c.d.finishSection(sectionStart, sectionSize, "function"))
         return false;
 
     return true;
 }
@@ -1892,32 +1892,32 @@ AstDecodeExportSection(AstDecodeContext&
 
     if (!c.d.finishSection(sectionStart, sectionSize, "export"))
         return false;
 
     return true;
 }
 
 static bool
-AstDecodeFunctionBody(AstDecodeContext &c, uint32_t funcIndex, AstFunc** func)
+AstDecodeFunctionBody(AstDecodeContext &c, uint32_t funcDefIndex, AstFunc** func)
 {
     uint32_t offset = c.d.currentOffset();
     uint32_t bodySize;
     if (!c.d.readVarU32(&bodySize))
         return c.d.fail("expected number of function body bytes");
 
     if (c.d.bytesRemain() < bodySize)
         return c.d.fail("function body length too big");
 
     const uint8_t* bodyBegin = c.d.currentPosition();
     const uint8_t* bodyEnd = bodyBegin + bodySize;
 
     AstDecodeExprIter iter(c.d);
 
-    uint32_t sigIndex = c.funcSigs()[funcIndex];
+    uint32_t sigIndex = c.funcDefSigs()[funcDefIndex];
     const AstSig* sig = c.module().sigs()[sigIndex];
 
     AstValTypeVector vars(c.lifo);
     AstNameVector localsNames(c.lifo);
     AstExprVector body(c.lifo);
 
     ValTypeVector locals;
     if (!locals.appendAll(sig->args()))
@@ -1925,17 +1925,17 @@ AstDecodeFunctionBody(AstDecodeContext &
 
     if (!DecodeLocalEntries(c.d, &locals))
         return c.d.fail("failed decoding local entries");
 
     c.startFunction(&iter, &locals, sig->ret());
 
     AstName funcName;
     if (!AstDecodeGenerateName(c, AstName(u"func"),
-                               c.module().funcImportNames().length() + funcIndex,
+                               c.module().funcImportNames().length() + funcDefIndex,
                                &funcName))
         return false;
 
     uint32_t numParams = sig->args().length();
     uint32_t numLocals = locals.length();
     for (uint32_t i = numParams; i < numLocals; i++) {
         if (!vars.append(locals[i]))
             return false;
@@ -1995,32 +1995,32 @@ AstDecodeFunctionBody(AstDecodeContext &
 static bool
 AstDecodeCodeSection(AstDecodeContext &c)
 {
     uint32_t sectionStart, sectionSize;
     if (!c.d.startSection(SectionId::Code, &sectionStart, &sectionSize, "code"))
         return false;
 
     if (sectionStart == Decoder::NotStarted) {
-        if (c.funcSigs().length() != 0)
+        if (c.funcDefSigs().length() != 0)
             return c.d.fail("expected function bodies");
 
         return false;
     }
 
     uint32_t numFuncBodies;
     if (!c.d.readVarU32(&numFuncBodies))
         return c.d.fail("expected function body count");
 
-    if (numFuncBodies != c.funcSigs().length())
+    if (numFuncBodies != c.funcDefSigs().length())
         return c.d.fail("function body count does not match function signature count");
 
-    for (uint32_t funcIndex = 0; funcIndex < numFuncBodies; funcIndex++) {
+    for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncBodies; funcDefIndex++) {
         AstFunc* func;
-        if (!AstDecodeFunctionBody(c, funcIndex, &func))
+        if (!AstDecodeFunctionBody(c, funcDefIndex, &func))
             return false;
         if (!c.module().append(func))
             return false;
     }
 
     if (!c.d.finishSection(sectionStart, sectionSize, "code"))
         return false;
 
--- a/js/src/wasm/WasmCode.cpp
+++ b/js/src/wasm/WasmCode.cpp
@@ -159,17 +159,17 @@ SendCodeRangesToProfiler(JSContext* cx, 
         if (!codeRange.isFunction())
             continue;
 
         uintptr_t start = uintptr_t(cs.base() + codeRange.begin());
         uintptr_t end = uintptr_t(cs.base() + codeRange.end());
         uintptr_t size = end - start;
 
         TwoByteName name(cx);
-        if (!metadata.getFuncDefName(cx, &bytecode, codeRange.funcDefIndex(), &name))
+        if (!metadata.getFuncName(cx, &bytecode, codeRange.funcIndex(), &name))
             return false;
 
         UniqueChars chars(
             (char*)JS::LossyTwoByteCharsToNewLatin1CharsZ(cx, name.begin(), name.length()).get());
         if (!chars)
             return false;
 
         // Avoid "unused" warnings
@@ -275,40 +275,40 @@ CodeSegment::onMovingGrow(uint8_t* prevM
     AutoWritableJitCode awjc(base(), codeLength());
     AutoFlushICache afc("CodeSegment::onMovingGrow");
     AutoFlushICache::setRange(uintptr_t(base()), codeLength());
 
     SpecializeToMemory(prevMemoryBase, *this, metadata, buffer);
 }
 
 size_t
-FuncDefExport::serializedSize() const
+FuncExport::serializedSize() const
 {
     return sig_.serializedSize() +
            sizeof(pod);
 }
 
 uint8_t*
-FuncDefExport::serialize(uint8_t* cursor) const
+FuncExport::serialize(uint8_t* cursor) const
 {
     cursor = sig_.serialize(cursor);
     cursor = WriteBytes(cursor, &pod, sizeof(pod));
     return cursor;
 }
 
 const uint8_t*
-FuncDefExport::deserialize(const uint8_t* cursor)
+FuncExport::deserialize(const uint8_t* cursor)
 {
     (cursor = sig_.deserialize(cursor)) &&
     (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
     return cursor;
 }
 
 size_t
-FuncDefExport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+FuncExport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return sig_.sizeOfExcludingThis(mallocSizeOf);
 }
 
 size_t
 FuncImport::serializedSize() const
 {
     return sig_.serializedSize() +
@@ -336,52 +336,52 @@ FuncImport::sizeOfExcludingThis(MallocSi
 {
     return sig_.sizeOfExcludingThis(mallocSizeOf);
 }
 
 CodeRange::CodeRange(Kind kind, Offsets offsets)
   : begin_(offsets.begin),
     profilingReturn_(0),
     end_(offsets.end),
-    funcDefIndex_(0),
+    funcIndex_(0),
     funcLineOrBytecode_(0),
     funcBeginToTableEntry_(0),
     funcBeginToTableProfilingJump_(0),
     funcBeginToNonProfilingEntry_(0),
     funcProfilingJumpToProfilingReturn_(0),
     funcProfilingEpilogueToProfilingReturn_(0),
     kind_(kind)
 {
     MOZ_ASSERT(begin_ <= end_);
     MOZ_ASSERT(kind_ == Entry || kind_ == Inline || kind_ == FarJumpIsland);
 }
 
 CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
   : begin_(offsets.begin),
     profilingReturn_(offsets.profilingReturn),
     end_(offsets.end),
-    funcDefIndex_(0),
+    funcIndex_(0),
     funcLineOrBytecode_(0),
     funcBeginToTableEntry_(0),
     funcBeginToTableProfilingJump_(0),
     funcBeginToNonProfilingEntry_(0),
     funcProfilingJumpToProfilingReturn_(0),
     funcProfilingEpilogueToProfilingReturn_(0),
     kind_(kind)
 {
     MOZ_ASSERT(begin_ < profilingReturn_);
     MOZ_ASSERT(profilingReturn_ < end_);
     MOZ_ASSERT(kind_ == ImportJitExit || kind_ == ImportInterpExit || kind_ == TrapExit);
 }
 
-CodeRange::CodeRange(uint32_t funcDefIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
+CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
   : begin_(offsets.begin),
     profilingReturn_(offsets.profilingReturn),
     end_(offsets.end),
-    funcDefIndex_(funcDefIndex),
+    funcIndex_(funcIndex),
     funcLineOrBytecode_(funcLineOrBytecode),
     funcBeginToTableEntry_(offsets.tableEntry - begin_),
     funcBeginToTableProfilingJump_(offsets.tableProfilingJump - begin_),
     funcBeginToNonProfilingEntry_(offsets.nonProfilingEntry - begin_),
     funcProfilingJumpToProfilingReturn_(profilingReturn_ - offsets.profilingJump),
     funcProfilingEpilogueToProfilingReturn_(profilingReturn_ - offsets.profilingEpilogue),
     kind_(Function)
 {
@@ -440,17 +440,17 @@ CacheableChars::sizeOfExcludingThis(Mall
     return mallocSizeOf(get());
 }
 
 size_t
 Metadata::serializedSize() const
 {
     return sizeof(pod()) +
            SerializedVectorSize(funcImports) +
-           SerializedVectorSize(funcDefExports) +
+           SerializedVectorSize(funcExports) +
            SerializedVectorSize(sigIds) +
            SerializedPodVectorSize(globals) +
            SerializedPodVectorSize(tables) +
            SerializedPodVectorSize(memoryAccesses) +
            SerializedPodVectorSize(memoryPatches) +
            SerializedPodVectorSize(boundsChecks) +
            SerializedPodVectorSize(codeRanges) +
            SerializedPodVectorSize(callSites) +
@@ -459,17 +459,17 @@ Metadata::serializedSize() const
            filename.serializedSize();
 }
 
 uint8_t*
 Metadata::serialize(uint8_t* cursor) const
 {
     cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
     cursor = SerializeVector(cursor, funcImports);
-    cursor = SerializeVector(cursor, funcDefExports);
+    cursor = SerializeVector(cursor, funcExports);
     cursor = SerializeVector(cursor, sigIds);
     cursor = SerializePodVector(cursor, globals);
     cursor = SerializePodVector(cursor, tables);
     cursor = SerializePodVector(cursor, memoryAccesses);
     cursor = SerializePodVector(cursor, memoryPatches);
     cursor = SerializePodVector(cursor, boundsChecks);
     cursor = SerializePodVector(cursor, codeRanges);
     cursor = SerializePodVector(cursor, callSites);
@@ -479,17 +479,17 @@ Metadata::serialize(uint8_t* cursor) con
     return cursor;
 }
 
 /* static */ const uint8_t*
 Metadata::deserialize(const uint8_t* cursor)
 {
     (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
     (cursor = DeserializeVector(cursor, &funcImports)) &&
-    (cursor = DeserializeVector(cursor, &funcDefExports)) &&
+    (cursor = DeserializeVector(cursor, &funcExports)) &&
     (cursor = DeserializeVector(cursor, &sigIds)) &&
     (cursor = DeserializePodVector(cursor, &globals)) &&
     (cursor = DeserializePodVector(cursor, &tables)) &&
     (cursor = DeserializePodVector(cursor, &memoryAccesses)) &&
     (cursor = DeserializePodVector(cursor, &memoryPatches)) &&
     (cursor = DeserializePodVector(cursor, &boundsChecks)) &&
     (cursor = DeserializePodVector(cursor, &codeRanges)) &&
     (cursor = DeserializePodVector(cursor, &callSites)) &&
@@ -498,60 +498,60 @@ Metadata::deserialize(const uint8_t* cur
     (cursor = filename.deserialize(cursor));
     return cursor;
 }
 
 size_t
 Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return SizeOfVectorExcludingThis(funcImports, mallocSizeOf) +
-           SizeOfVectorExcludingThis(funcDefExports, mallocSizeOf) +
+           SizeOfVectorExcludingThis(funcExports, mallocSizeOf) +
            SizeOfVectorExcludingThis(sigIds, mallocSizeOf) +
            globals.sizeOfExcludingThis(mallocSizeOf) +
            tables.sizeOfExcludingThis(mallocSizeOf) +
            memoryAccesses.sizeOfExcludingThis(mallocSizeOf) +
            memoryPatches.sizeOfExcludingThis(mallocSizeOf) +
            boundsChecks.sizeOfExcludingThis(mallocSizeOf) +
            codeRanges.sizeOfExcludingThis(mallocSizeOf) +
            callSites.sizeOfExcludingThis(mallocSizeOf) +
            callThunks.sizeOfExcludingThis(mallocSizeOf) +
            funcNames.sizeOfExcludingThis(mallocSizeOf) +
            filename.sizeOfExcludingThis(mallocSizeOf);
 }
 
-struct ProjectIndex
+struct ProjectFuncIndex
 {
-    const FuncDefExportVector& funcDefExports;
+    const FuncExportVector& funcExports;
 
-    explicit ProjectIndex(const FuncDefExportVector& funcDefExports)
-      : funcDefExports(funcDefExports)
+    explicit ProjectFuncIndex(const FuncExportVector& funcExports)
+      : funcExports(funcExports)
     {}
     uint32_t operator[](size_t index) const {
-        return funcDefExports[index].funcDefIndex();
+        return funcExports[index].funcIndex();
     }
 };
 
-const FuncDefExport&
-Metadata::lookupFuncDefExport(uint32_t funcDefIndex) const
+const FuncExport&
+Metadata::lookupFuncExport(uint32_t funcIndex) const
 {
     size_t match;
-    if (!BinarySearch(ProjectIndex(funcDefExports), 0, funcDefExports.length(), funcDefIndex, &match))
+    if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(), funcIndex, &match))
         MOZ_CRASH("missing function export");
 
-    return funcDefExports[match];
+    return funcExports[match];
 }
 
 bool
-Metadata::getFuncDefName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcDefIndex,
-                         TwoByteName* name) const
+Metadata::getFuncName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcIndex,
+                      TwoByteName* name) const
 {
-    if (funcDefIndex < funcNames.length()) {
+    if (funcIndex < funcNames.length()) {
         MOZ_ASSERT(maybeBytecode, "NameInBytecode requires preserved bytecode");
 
-        const NameInBytecode& n = funcNames[funcDefIndex];
+        const NameInBytecode& n = funcNames[funcIndex];
         MOZ_ASSERT(n.offset + n.length < maybeBytecode->length());
 
         if (n.length == 0)
             goto invalid;
 
         UTF8Chars utf8((const char*)maybeBytecode->begin() + n.offset, n.length);
 
         // This code could be optimized by having JS::UTF8CharsToNewTwoByteCharsZ
@@ -567,17 +567,17 @@ Metadata::getFuncDefName(JSContext* cx, 
         PodCopy(name->begin(), chars.get(), twoByteLength);
         return true;
     }
 
   invalid:
 
     // For names that are out of range or invalid, synthesize a name.
 
-    UniqueChars chars(JS_smprintf("wasm-function[%u]", funcDefIndex));
+    UniqueChars chars(JS_smprintf("wasm-function[%u]", funcIndex));
     if (!chars) {
         ReportOutOfMemory(cx);
         return false;
     }
 
     if (!name->growByUninitialized(strlen(chars.get())))
         return false;
 
@@ -652,27 +652,27 @@ Code::lookupMemoryAccess(void* pc) const
     size_t match;
     if (!BinarySearch(MemoryAccessOffset(metadata_->memoryAccesses), lowerBound, upperBound, target, &match))
         return nullptr;
 
     return &metadata_->memoryAccesses[match];
 }
 
 bool
-Code::getFuncDefName(JSContext* cx, uint32_t funcDefIndex, TwoByteName* name) const
+Code::getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const
 {
     const Bytes* maybeBytecode = maybeBytecode_ ? &maybeBytecode_.get()->bytes : nullptr;
-    return metadata_->getFuncDefName(cx, maybeBytecode, funcDefIndex, name);
+    return metadata_->getFuncName(cx, maybeBytecode, funcIndex, name);
 }
 
 JSAtom*
-Code::getFuncDefAtom(JSContext* cx, uint32_t funcDefIndex) const
+Code::getFuncAtom(JSContext* cx, uint32_t funcIndex) const
 {
     TwoByteName name(cx);
-    if (!getFuncDefName(cx, funcDefIndex, &name))
+    if (!getFuncName(cx, funcIndex, &name))
         return nullptr;
 
     return AtomizeChars(cx, name.begin(), name.length());
 }
 
 const char experimentalWarning[] =
     "Temporary\n"
     ".--.      .--.   ____       .-'''-. ,---.    ,---.\n"
@@ -782,37 +782,37 @@ Code::ensureProfilingState(JSContext* cx
     // do it now since, once we start sampling, we'll be in a signal-handing
     // context where we cannot malloc.
     if (newProfilingEnabled) {
         for (const CodeRange& codeRange : metadata_->codeRanges) {
             if (!codeRange.isFunction())
                 continue;
 
             TwoByteName name(cx);
-            if (!getFuncDefName(cx, codeRange.funcDefIndex(), &name))
+            if (!getFuncName(cx, codeRange.funcIndex(), &name))
                 return false;
             if (!name.append('\0'))
                 return false;
 
             TwoByteChars chars(name.begin(), name.length());
             UniqueChars utf8Name(JS::CharsToNewUTF8CharsZ(nullptr, chars).c_str());
             UniqueChars label(JS_smprintf("%s (%s:%u)",
                                           utf8Name.get(),
                                           metadata_->filename.get(),
                                           codeRange.funcLineOrBytecode()));
             if (!label) {
                 ReportOutOfMemory(cx);
                 return false;
             }
 
-            if (codeRange.funcDefIndex() >= funcLabels_.length()) {
-                if (!funcLabels_.resize(codeRange.funcDefIndex() + 1))
+            if (codeRange.funcIndex() >= funcLabels_.length()) {
+                if (!funcLabels_.resize(codeRange.funcIndex() + 1))
                     return false;
             }
-            funcLabels_[codeRange.funcDefIndex()] = Move(label);
+            funcLabels_[codeRange.funcIndex()] = Move(label);
         }
     } else {
         funcLabels_.clear();
     }
 
     // Only mutate the code after the fallible operations are complete to avoid
     // the need to rollback.
     profilingEnabled_ = newProfilingEnabled;
--- a/js/src/wasm/WasmCode.h
+++ b/js/src/wasm/WasmCode.h
@@ -117,66 +117,66 @@ struct ShareableBytes : ShareableBase<Sh
     const uint8_t* end() const { return bytes.end(); }
     size_t length() const { return bytes.length(); }
     bool append(const uint8_t *p, uint32_t ct) { return bytes.append(p, ct); }
 };
 
 typedef RefPtr<ShareableBytes> MutableBytes;
 typedef RefPtr<const ShareableBytes> SharedBytes;
 
-// A FuncDefExport represents a single function definition inside a wasm Module
-// that has been exported one or more times. A FuncDefExport represents an
+// A FuncExport represents a single function definition inside a wasm Module
+// that has been exported one or more times. A FuncExport represents an
 // internal entry point that can be called via function definition index by
-// Instance::callExport(). To allow O(log(n)) lookup of a FuncDefExport by
-// function definition index, the FuncDefExportVector is stored sorted by
+// Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by
+// function definition index, the FuncExportVector is stored sorted by
 // function definition index.
 
-class FuncDefExport
+class FuncExport
 {
     Sig sig_;
     MOZ_INIT_OUTSIDE_CTOR struct CacheablePod {
-        uint32_t funcDefIndex_;
+        uint32_t funcIndex_;
         uint32_t codeRangeIndex_;
         uint32_t entryOffset_;
     } pod;
 
   public:
-    FuncDefExport() = default;
-    explicit FuncDefExport(Sig&& sig,
-                           uint32_t funcDefIndex,
-                           uint32_t codeRangeIndex)
+    FuncExport() = default;
+    explicit FuncExport(Sig&& sig,
+                        uint32_t funcIndex,
+                        uint32_t codeRangeIndex)
       : sig_(Move(sig))
     {
-        pod.funcDefIndex_ = funcDefIndex;
+        pod.funcIndex_ = funcIndex;
         pod.codeRangeIndex_ = codeRangeIndex;
         pod.entryOffset_ = UINT32_MAX;
     }
     void initEntryOffset(uint32_t entryOffset) {
         MOZ_ASSERT(pod.entryOffset_ == UINT32_MAX);
         pod.entryOffset_ = entryOffset;
     }
 
     const Sig& sig() const {
         return sig_;
     }
-    uint32_t funcDefIndex() const {
-        return pod.funcDefIndex_;
+    uint32_t funcIndex() const {
+        return pod.funcIndex_;
     }
     uint32_t codeRangeIndex() const {
         return pod.codeRangeIndex_;
     }
     uint32_t entryOffset() const {
         MOZ_ASSERT(pod.entryOffset_ != UINT32_MAX);
         return pod.entryOffset_;
     }
 
-    WASM_DECLARE_SERIALIZABLE(FuncDefExport)
+    WASM_DECLARE_SERIALIZABLE(FuncExport)
 };
 
-typedef Vector<FuncDefExport, 0, SystemAllocPolicy> FuncDefExportVector;
+typedef Vector<FuncExport, 0, SystemAllocPolicy> FuncExportVector;
 
 // An FuncImport contains the runtime metadata needed to implement a call to an
 // imported function. Each function import has two call stubs: an optimized path
 // into JIT code and a slow path into the generic C++ js::Invoke and these
 // offsets of these stubs are stored so that function-import callsites can be
 // dynamically patched at runtime.
 
 class FuncImport
@@ -246,30 +246,30 @@ class CodeRange
                            // replaces/loses preceding innermost frame
     };
 
   private:
     // All fields are treated as cacheable POD:
     uint32_t begin_;
     uint32_t profilingReturn_;
     uint32_t end_;
-    uint32_t funcDefIndex_;
+    uint32_t funcIndex_;
     uint32_t funcLineOrBytecode_;
     uint8_t funcBeginToTableEntry_;
     uint8_t funcBeginToTableProfilingJump_;
     uint8_t funcBeginToNonProfilingEntry_;
     uint8_t funcProfilingJumpToProfilingReturn_;
     uint8_t funcProfilingEpilogueToProfilingReturn_;
     Kind kind_ : 8;
 
   public:
     CodeRange() = default;
     CodeRange(Kind kind, Offsets offsets);
     CodeRange(Kind kind, ProfilingOffsets offsets);
-    CodeRange(uint32_t funcDefIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
+    CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
 
     // All CodeRanges have a begin and end.
 
     uint32_t begin() const {
         return begin_;
     }
     uint32_t end() const {
         return end_;
@@ -324,19 +324,19 @@ class CodeRange
     uint32_t funcProfilingJump() const {
         MOZ_ASSERT(isFunction());
         return profilingReturn_ - funcProfilingJumpToProfilingReturn_;
     }
     uint32_t funcProfilingEpilogue() const {
         MOZ_ASSERT(isFunction());
         return profilingReturn_ - funcProfilingEpilogueToProfilingReturn_;
     }
-    uint32_t funcDefIndex() const {
+    uint32_t funcIndex() const {
         MOZ_ASSERT(isFunction());
-        return funcDefIndex_;
+        return funcIndex_;
     }
     uint32_t funcLineOrBytecode() const {
         MOZ_ASSERT(isFunction());
         return funcLineOrBytecode_;
     }
 
     // A sorted array of CodeRanges can be looked up via BinarySearch and PC.
 
@@ -358,21 +358,21 @@ WASM_DECLARE_POD_VECTOR(CodeRange, CodeR
 // patched at runtime when profiling is toggled. Thunks are emitted to connect
 // callsites that are too far away from callees to fit in a single call
 // instruction's relative offset.
 
 struct CallThunk
 {
     uint32_t offset;
     union {
-        uint32_t funcDefIndex;
+        uint32_t funcIndex;
         uint32_t codeRangeIndex;
     } u;
 
-    CallThunk(uint32_t offset, uint32_t funcDefIndex) : offset(offset) { u.funcDefIndex = funcDefIndex; }
+    CallThunk(uint32_t offset, uint32_t funcIndex) : offset(offset) { u.funcIndex = funcIndex; }
     CallThunk() = default;
 };
 
 WASM_DECLARE_POD_VECTOR(CallThunk, CallThunkVector)
 
 // CacheableChars is used to cacheably store UniqueChars.
 
 struct CacheableChars : UniqueChars
@@ -442,33 +442,33 @@ struct Metadata : ShareableBase<Metadata
 {
     explicit Metadata(ModuleKind kind = ModuleKind::Wasm) : MetadataCacheablePod(kind) {}
     virtual ~Metadata() {}
 
     MetadataCacheablePod& pod() { return *this; }
     const MetadataCacheablePod& pod() const { return *this; }
 
     FuncImportVector      funcImports;
-    FuncDefExportVector   funcDefExports;
+    FuncExportVector      funcExports;
     SigWithIdVector       sigIds;
     GlobalDescVector      globals;
     TableDescVector       tables;
     MemoryAccessVector    memoryAccesses;
     MemoryPatchVector     memoryPatches;
     BoundsCheckVector     boundsChecks;
     CodeRangeVector       codeRanges;
     CallSiteVector        callSites;
     CallThunkVector       callThunks;
     NameInBytecodeVector  funcNames;
     CacheableChars        filename;
 
     bool usesMemory() const { return UsesMemory(memoryUsage); }
     bool hasSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
 
-    const FuncDefExport& lookupFuncDefExport(uint32_t funcDefIndex) const;
+    const FuncExport& lookupFuncExport(uint32_t funcIndex) const;
 
     // AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is
     // encapsulated within AsmJS.cpp, but the additional virtual functions allow
     // asm.js to override wasm behavior in the handful of cases that can't be
     // easily encapsulated by AsmJS.cpp.
 
     bool isAsmJS() const {
         return kind == ModuleKind::AsmJS;
@@ -481,18 +481,18 @@ struct Metadata : ShareableBase<Metadata
         return false;
     }
     virtual const char16_t* displayURL() const {
         return nullptr;
     }
     virtual ScriptSource* maybeScriptSource() const {
         return nullptr;
     }
-    virtual bool getFuncDefName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcDefIndex,
-                                TwoByteName* name) const;
+    virtual bool getFuncName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcIndex,
+                             TwoByteName* name) const;
 
     WASM_DECLARE_SERIALIZABLE_VIRTUAL(Metadata);
 };
 
 typedef RefPtr<Metadata> MutableMetadata;
 typedef RefPtr<const Metadata> SharedMetadata;
 
 // Code objects own executable code and the metadata that describes it. At the
@@ -522,35 +522,35 @@ class Code
 
     const CallSite* lookupCallSite(void* returnAddress) const;
     const CodeRange* lookupRange(void* pc) const;
     const MemoryAccess* lookupMemoryAccess(void* pc) const;
 
     // Return the name associated with a given function index, or generate one
     // if none was given by the module.
 
-    bool getFuncDefName(JSContext* cx, uint32_t funcDefIndex, TwoByteName* name) const;
-    JSAtom* getFuncDefAtom(JSContext* cx, uint32_t funcDefIndex) const;
+    bool getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const;
+    JSAtom* getFuncAtom(JSContext* cx, uint32_t funcIndex) const;
 
     // If the source bytecode was saved when this Code was constructed, this
     // method will render the binary as text. Otherwise, a diagnostic string
     // will be returned.
 
     JSString* createText(JSContext* cx);
     bool getLineOffsets(size_t lineno, Vector<uint32_t>& offsets) const;
 
     // Each Code has a profiling mode that is updated to match the runtime's
     // profiling mode when there are no other activations of the code live on
     // the stack. Once in profiling mode, ProfilingFrameIterator can be used to
     // asynchronously walk the stack. Otherwise, the ProfilingFrameIterator will
     // skip any activations of this code.
 
     MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled);
     bool profilingEnabled() const { return profilingEnabled_; }
-    const char* profilingLabel(uint32_t funcDefIndex) const { return funcLabels_[funcDefIndex].get(); }
+    const char* profilingLabel(uint32_t funcIndex) const { return funcLabels_[funcIndex].get(); }
 
     // about:memory reporting:
 
     void addSizeOfMisc(MallocSizeOf mallocSizeOf,
                        Metadata::SeenSet* seenMetadata,
                        ShareableBytes::SeenSet* seenBytes,
                        size_t* code,
                        size_t* data) const;
--- a/js/src/wasm/WasmCompile.cpp
+++ b/js/src/wasm/WasmCompile.cpp
@@ -100,27 +100,27 @@ static bool
 DecodeCallReturn(FunctionDecoder& f, const Sig& sig)
 {
     return f.iter().readCallReturn(sig.ret());
 }
 
 static bool
 DecodeCall(FunctionDecoder& f)
 {
-    uint32_t calleeIndex;
-    if (!f.iter().readCall(&calleeIndex))
+    uint32_t funcIndex;
+    if (!f.iter().readCall(&funcIndex))
         return false;
 
-    if (calleeIndex >= f.mg().numFuncs())
+    if (funcIndex >= f.mg().numFuncs())
         return f.iter().fail("callee index out of range");
 
     if (!f.iter().inReachableCode())
         return true;
 
-    const Sig* sig = &f.mg().funcSig(calleeIndex);
+    const Sig* sig = &f.mg().funcSig(funcIndex);
 
     return DecodeCallArgs(f, *sig) &&
            DecodeCallReturn(f, *sig);
 }
 
 static bool
 DecodeCallIndirect(FunctionDecoder& f)
 {
@@ -522,25 +522,29 @@ DecodeFunctionSection(Decoder& d, Module
         return false;
     if (sectionStart == Decoder::NotStarted)
         return true;
 
     uint32_t numDefs;
     if (!d.readVarU32(&numDefs))
         return d.fail("expected number of function definitions");
 
-    if (numDefs > MaxFuncs)
+    uint32_t numFuncs = init->funcSigs.length() + numDefs;
+    if (numFuncs > MaxFuncs)
         return d.fail("too many functions");
 
-    if (!init->funcDefSigs.resize(numDefs))
+    if (!init->funcSigs.reserve(numFuncs))
         return false;
 
     for (uint32_t i = 0; i < numDefs; i++) {
-        if (!DecodeSignatureIndex(d, *init, &init->funcDefSigs[i]))
+        const SigWithId* sig;
+        if (!DecodeSignatureIndex(d, *init, &sig))
             return false;
+
+        init->funcSigs.infallibleAppend(sig);
     }
 
     if (!d.finishSection(sectionStart, sectionSize, "function"))
         return false;
 
     return true;
 }
 
@@ -635,17 +639,17 @@ DecodeImport(Decoder& d, ModuleGenerator
     if (!d.readVarU32(&importKind))
         return d.fail("failed to read import kind");
 
     switch (DefinitionKind(importKind)) {
       case DefinitionKind::Function: {
         const SigWithId* sig = nullptr;
         if (!DecodeSignatureIndex(d, *init, &sig))
             return false;
-        if (!init->funcImports.emplaceBack(sig))
+        if (!init->funcSigs.emplaceBack(sig))
             return false;
         break;
       }
       case DefinitionKind::Table: {
         if (!DecodeResizableTable(d, init))
             return false;
         break;
       }
@@ -688,16 +692,20 @@ DecodeImportSection(Decoder& d, ModuleGe
     if (numImports > MaxImports)
         return d.fail("too many imports");
 
     for (uint32_t i = 0; i < numImports; i++) {
         if (!DecodeImport(d, init, imports))
             return false;
     }
 
+    // The global data offsets will be filled in by ModuleGenerator::init.
+    if (!init->funcImportGlobalDataOffsets.resize(init->funcSigs.length()))
+        return false;
+
     if (!d.finishSection(sectionStart, sectionSize, "import"))
         return false;
 
     return true;
 }
 
 static bool
 DecodeTableSection(Decoder& d, ModuleGeneratorData* init, Uint32Vector* oldElems)
@@ -815,17 +823,17 @@ DecodeExport(Decoder& d, ModuleGenerator
       case DefinitionKind::Function: {
         uint32_t funcIndex;
         if (!d.readVarU32(&funcIndex))
             return d.fail("expected export internal index");
 
         if (funcIndex >= mg.numFuncs())
             return d.fail("exported function index out of bounds");
 
-        return mg.addFuncDefExport(Move(fieldName), funcIndex);
+        return mg.addFuncExport(Move(fieldName), funcIndex);
       }
       case DefinitionKind::Table: {
         uint32_t tableIndex;
         if (!d.readVarU32(&tableIndex))
             return d.fail("expected table index");
 
         if (tableIndex >= mg.tables().length())
             return d.fail("exported table index out of bounds");
@@ -890,34 +898,34 @@ DecodeExportSection(Decoder& d, ModuleGe
 
     if (!d.finishSection(sectionStart, sectionSize, "export"))
         return false;
 
     return true;
 }
 
 static bool
-DecodeFunctionBody(Decoder& d, ModuleGenerator& mg, uint32_t funcDefIndex)
+DecodeFunctionBody(Decoder& d, ModuleGenerator& mg, uint32_t funcIndex)
 {
     uint32_t bodySize;
     if (!d.readVarU32(&bodySize))
         return d.fail("expected number of function body bytes");
 
     if (d.bytesRemain() < bodySize)
         return d.fail("function body length too big");
 
     const uint8_t* bodyBegin = d.currentPosition();
     const size_t offsetInModule = d.currentOffset();
 
     FunctionGenerator fg;
     if (!mg.startFuncDef(offsetInModule, &fg))
         return false;
 
     ValTypeVector locals;
-    const Sig& sig = mg.funcDefSig(funcDefIndex);
+    const Sig& sig = mg.funcSig(funcIndex);
     if (!locals.appendAll(sig.args()))
         return false;
 
     if (!DecodeLocalEntries(d, &locals))
         return d.fail("failed decoding local entries");
 
     for (ValType type : locals) {
         if (!CheckValType(d, type))
@@ -938,17 +946,17 @@ DecodeFunctionBody(Decoder& d, ModuleGen
     if (d.currentPosition() != bodyBegin + bodySize)
         return d.fail("function body length mismatch");
 
     if (!fg.bytes().resize(bodySize))
         return false;
 
     memcpy(fg.bytes().begin(), bodyBegin, bodySize);
 
-    return mg.finishFuncDef(funcDefIndex, &fg);
+    return mg.finishFuncDef(funcIndex, &fg);
 }
 
 static bool
 DecodeStartSection(Decoder& d, ModuleGenerator& mg)
 {
     uint32_t sectionStart, sectionSize;
     if (!d.startSection(SectionId::Start, &sectionStart, &sectionSize, "start"))
         return false;
@@ -997,18 +1005,18 @@ DecodeCodeSection(Decoder& d, ModuleGene
 
     uint32_t numFuncDefs;
     if (!d.readVarU32(&numFuncDefs))
         return d.fail("expected function body count");
 
     if (numFuncDefs != mg.numFuncDefs())
         return d.fail("function body count does not match function signature count");
 
-    for (uint32_t i = 0; i < numFuncDefs; i++) {
-        if (!DecodeFunctionBody(d, mg, i))
+    for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
+        if (!DecodeFunctionBody(d, mg, mg.numFuncImports() + funcDefIndex))
             return false;
     }
 
     if (!d.finishSection(sectionStart, sectionSize, "code"))
         return false;
 
     return mg.finishFuncDefs();
 }
--- a/js/src/wasm/WasmFrameIterator.cpp
+++ b/js/src/wasm/WasmFrameIterator.cpp
@@ -186,17 +186,17 @@ FrameIterator::functionDisplayAtom() con
             return cx->names().empty;
         }
 
         return atom;
     }
 
     MOZ_ASSERT(codeRange_);
 
-    JSAtom* atom = code_->getFuncDefAtom(cx, codeRange_->funcDefIndex());
+    JSAtom* atom = code_->getFuncAtom(cx, codeRange_->funcIndex());
     if (!atom) {
         cx->clearPendingException();
         return cx->names().empty;
     }
 
     return atom;
 }
 
@@ -780,17 +780,17 @@ ProfilingFrameIterator::label() const
         return importInterpDescription;
       case ExitReason::Native:
         return nativeDescription;
       case ExitReason::Trap:
         return trapDescription;
     }
 
     switch (codeRange_->kind()) {
-      case CodeRange::Function:         return code_->profilingLabel(codeRange_->funcDefIndex());
+      case CodeRange::Function:         return code_->profilingLabel(codeRange_->funcIndex());
       case CodeRange::Entry:            return "entry trampoline (in asm.js)";
       case CodeRange::ImportJitExit:    return importJitDescription;
       case CodeRange::ImportInterpExit: return importInterpDescription;
       case CodeRange::TrapExit:         return trapDescription;
       case CodeRange::Inline:           return "inline stub (in asm.js)";
       case CodeRange::FarJumpIsland:    return "interstitial (in asm.js)";
     }
 
@@ -798,17 +798,17 @@ ProfilingFrameIterator::label() const
 }
 
 /*****************************************************************************/
 // Runtime patching to enable/disable profiling
 
 void
 wasm::ToggleProfiling(const Code& code, const CallSite& callSite, bool enabled)
 {
-    if (callSite.kind() != CallSite::FuncDef)
+    if (callSite.kind() != CallSite::Func)
         return;
 
     uint8_t* callerRetAddr = code.segment().base() + callSite.returnAddressOffset();
 
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
     void* callee = X86Encoding::GetRel32Target(callerRetAddr);
 #elif defined(JS_CODEGEN_ARM)
     uint8_t* caller = callerRetAddr - 4;
--- a/js/src/wasm/WasmGenerator.cpp
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -36,32 +36,34 @@ using namespace js::wasm;
 using mozilla::CheckedInt;
 using mozilla::MakeEnumeratedRange;
 
 // ****************************************************************************
 // ModuleGenerator
 
 static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
 static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
+static const uint32_t BAD_CODE_RANGE = UINT32_MAX;
 
 ModuleGenerator::ModuleGenerator(ImportVector&& imports)
   : alwaysBaseline_(false),
     imports_(Move(imports)),
     numSigs_(0),
     numTables_(0),
     lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
     masmAlloc_(&lifo_),
     masm_(MacroAssembler::WasmToken(), masmAlloc_),
     lastPatchedCallsite_(0),
     startOfUnpatchedCallsites_(0),
     parallel_(false),
     outstanding_(0),
     activeFuncDef_(nullptr),
     startedFuncDefs_(false),
-    finishedFuncDefs_(false)
+    finishedFuncDefs_(false),
+    numFinishedFuncDefs_(0)
 {
     MOZ_ASSERT(IsCompilingWasm());
 }
 
 ModuleGenerator::~ModuleGenerator()
 {
     if (parallel_) {
         // Wait for any outstanding jobs to fail or complete.
@@ -98,17 +100,20 @@ ModuleGenerator::~ModuleGenerator()
 
 bool
 ModuleGenerator::init(UniqueModuleGeneratorData shared, const CompileArgs& args,
                       Metadata* maybeAsmJSMetadata)
 {
     shared_ = Move(shared);
     alwaysBaseline_ = args.alwaysBaseline;
 
-    if (!exportedFuncDefs_.init())
+    if (!exportedFuncs_.init())
+        return false;
+
+    if (!funcToCodeRange_.appendN(BAD_CODE_RANGE, shared_->funcSigs.length()))
         return false;
 
     linkData_.globalDataLength = AlignBytes(InitialGlobalDataBytes, sizeof(void*));;
 
     // asm.js passes in an AsmJSMetadata subclass to use instead.
     if (maybeAsmJSMetadata) {
         metadata_ = maybeAsmJSMetadata;
         MOZ_ASSERT(isAsmJS());
@@ -132,23 +137,20 @@ ModuleGenerator::init(UniqueModuleGenera
     // and will be initialized in a linear order via init* functions as the
     // module is generated. For wasm, the Vectors are correctly-sized and
     // already initialized.
 
     if (!isAsmJS()) {
         numSigs_ = shared_->sigs.length();
         numTables_ = shared_->tables.length();
 
-        shared_->firstFuncDefIndex = shared_->funcImports.length();
-
-        for (FuncImportGenDesc& funcImport : shared_->funcImports) {
-            MOZ_ASSERT(!funcImport.globalDataOffset);
-            funcImport.globalDataOffset = linkData_.globalDataLength;
+        for (size_t i = 0; i < shared_->funcImportGlobalDataOffsets.length(); i++) {
+            shared_->funcImportGlobalDataOffsets[i] = linkData_.globalDataLength;
             linkData_.globalDataLength += sizeof(FuncImportTls);
-            if (!addFuncImport(*funcImport.sig, funcImport.globalDataOffset))
+            if (!addFuncImport(*shared_->funcSigs[i], shared_->funcImportGlobalDataOffsets[i]))
                 return false;
         }
 
         for (const Import& import : imports_) {
             if (import.kind == DefinitionKind::Table) {
                 MOZ_ASSERT(shared_->tables.length() == 1);
                 shared_->tables[0].external = true;
                 break;
@@ -217,44 +219,33 @@ ModuleGenerator::finishOutstandingTask()
 
             HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
         }
     }
 
     return finishTask(task);
 }
 
-static const uint32_t BadCodeRange = UINT32_MAX;
-
 bool
-ModuleGenerator::funcIndexIsDef(uint32_t funcIndex) const
+ModuleGenerator::funcIsImport(uint32_t funcIndex) const
 {
-    MOZ_ASSERT(funcIndex < numFuncImports() + numFuncDefs());
-    return funcIndex >= numFuncImports();
-}
-
-uint32_t
-ModuleGenerator::funcIndexToDef(uint32_t funcIndex) const
-{
-    MOZ_ASSERT(funcIndexIsDef(funcIndex));
-    return funcIndex - numFuncImports();
+    return funcIndex < shared_->funcImportGlobalDataOffsets.length();
 }
 
 bool
-ModuleGenerator::funcIsDefined(uint32_t funcDefIndex) const
+ModuleGenerator::funcIsCompiled(uint32_t funcIndex) const
 {
-    return funcDefIndex < funcDefIndexToCodeRange_.length() &&
-           funcDefIndexToCodeRange_[funcDefIndex] != BadCodeRange;
+    return funcToCodeRange_[funcIndex] != BAD_CODE_RANGE;
 }
 
 const CodeRange&
-ModuleGenerator::funcDefCodeRange(uint32_t funcDefIndex) const
+ModuleGenerator::funcCodeRange(uint32_t funcIndex) const
 {
-    MOZ_ASSERT(funcIsDefined(funcDefIndex));
-    const CodeRange& cr = metadata_->codeRanges[funcDefIndexToCodeRange_[funcDefIndex]];
+    MOZ_ASSERT(funcIsCompiled(funcIndex));
+    const CodeRange& cr = metadata_->codeRanges[funcToCodeRange_[funcIndex]];
     MOZ_ASSERT(cr.isFunction());
     return cr;
 }
 
 static uint32_t
 JumpRange()
 {
     return Min(JitOptions.jumpThreshold, JumpImmediateRange);
@@ -285,44 +276,44 @@ ModuleGenerator::patchCallSites(TrapExit
         const CallSiteAndTarget& cs = masm_.callSites()[lastPatchedCallsite_];
         uint32_t callerOffset = cs.returnAddressOffset();
         MOZ_RELEASE_ASSERT(callerOffset < INT32_MAX);
 
         switch (cs.kind()) {
           case CallSiteDesc::Dynamic:
           case CallSiteDesc::Symbolic:
             break;
-          case CallSiteDesc::FuncDef: {
-            if (funcIsDefined(cs.funcDefIndex())) {
-                uint32_t calleeOffset = funcDefCodeRange(cs.funcDefIndex()).funcNonProfilingEntry();
+          case CallSiteDesc::Func: {
+            if (funcIsCompiled(cs.funcIndex())) {
+                uint32_t calleeOffset = funcCodeRange(cs.funcIndex()).funcNonProfilingEntry();
                 MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX);
 
                 if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) {
                     masm_.patchCall(callerOffset, calleeOffset);
                     break;
                 }
             }
 
-            OffsetMap::AddPtr p = existingCallFarJumps.lookupForAdd(cs.funcDefIndex());
+            OffsetMap::AddPtr p = existingCallFarJumps.lookupForAdd(cs.funcIndex());
             if (!p) {
                 Offsets offsets;
                 offsets.begin = masm_.currentOffset();
                 uint32_t jumpOffset = masm_.farJumpWithPatch().offset();
                 offsets.end = masm_.currentOffset();
                 if (masm_.oom())
                     return false;
 
                 if (!metadata_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, offsets))
                     return false;
-                if (!existingCallFarJumps.add(p, cs.funcDefIndex(), offsets.begin))
+                if (!existingCallFarJumps.add(p, cs.funcIndex(), offsets.begin))
                     return false;
 
                 // Record calls' far jumps in metadata since they must be
                 // repatched at runtime when profiling mode is toggled.
-                if (!metadata_->callThunks.emplaceBack(jumpOffset, cs.funcDefIndex()))
+                if (!metadata_->callThunks.emplaceBack(jumpOffset, cs.funcIndex()))
                     return false;
             }
 
             masm_.patchCall(callerOffset, p->value());
             break;
           }
           case CallSiteDesc::TrapExit: {
             if (maybeTrapExits) {
@@ -375,82 +366,75 @@ ModuleGenerator::finishTask(IonCompileTa
 
     // Offset the recorded FuncOffsets by the offset of the function in the
     // whole module's code segment.
     uint32_t offsetInWhole = masm_.size();
     results.offsets().offsetBy(offsetInWhole);
 
     // Add the CodeRange for this function.
     uint32_t funcCodeRangeIndex = metadata_->codeRanges.length();
-    if (!metadata_->codeRanges.emplaceBack(func.defIndex(), func.lineOrBytecode(), results.offsets()))
+    if (!metadata_->codeRanges.emplaceBack(func.index(), func.lineOrBytecode(), results.offsets()))
         return false;
 
-    // Maintain a mapping from function index to CodeRange index.
-    if (func.defIndex() >= funcDefIndexToCodeRange_.length()) {
-        uint32_t n = func.defIndex() - funcDefIndexToCodeRange_.length() + 1;
-        if (!funcDefIndexToCodeRange_.appendN(BadCodeRange, n))
-            return false;
-    }
-    MOZ_ASSERT(!funcIsDefined(func.defIndex()));
-    funcDefIndexToCodeRange_[func.defIndex()] = funcCodeRangeIndex;
+    MOZ_ASSERT(!funcIsCompiled(func.index()));
+    funcToCodeRange_[func.index()] = funcCodeRangeIndex;
 
     // Merge the compiled results into the whole-module masm.
     mozilla::DebugOnly<size_t> sizeBefore = masm_.size();
     if (!masm_.asmMergeWith(results.masm()))
         return false;
     MOZ_ASSERT(masm_.size() == offsetInWhole + results.masm().size());
 
     freeTasks_.infallibleAppend(task);
     return true;
 }
 
 bool
-ModuleGenerator::finishFuncDefExports()
+ModuleGenerator::finishFuncExports()
 {
-    // ModuleGenerator::exportedFuncDefs_ is an unordered HashSet. The
-    // FuncDefExportVector stored in Metadata needs to be stored sorted by
+    // ModuleGenerator::exportedFuncs_ is an unordered HashSet. The
+    // FuncExportVector stored in Metadata needs to be stored sorted by
     // function index to allow O(log(n)) lookup at runtime.
 
-    Uint32Vector funcDefIndices;
-    if (!funcDefIndices.reserve(exportedFuncDefs_.count()))
+    Uint32Vector sorted;
+    if (!sorted.reserve(exportedFuncs_.count()))
         return false;
 
-    for (Uint32Set::Range r = exportedFuncDefs_.all(); !r.empty(); r.popFront())
-        funcDefIndices.infallibleAppend(r.front());
+    for (Uint32Set::Range r = exportedFuncs_.all(); !r.empty(); r.popFront())
+        sorted.infallibleAppend(r.front());
 
-    std::sort(funcDefIndices.begin(), funcDefIndices.end());
+    std::sort(sorted.begin(), sorted.end());
 
-    MOZ_ASSERT(metadata_->funcDefExports.empty());
-    if (!metadata_->funcDefExports.reserve(exportedFuncDefs_.count()))
+    MOZ_ASSERT(metadata_->funcExports.empty());
+    if (!metadata_->funcExports.reserve(sorted.length()))
         return false;
 
-    for (uint32_t funcDefIndex : funcDefIndices) {
+    for (uint32_t funcIndex : sorted) {
         Sig sig;
-        if (!sig.clone(funcDefSig(funcDefIndex)))
+        if (!sig.clone(funcSig(funcIndex)))
             return false;
 
-        metadata_->funcDefExports.infallibleEmplaceBack(Move(sig),
-                                                        funcDefIndex,
-                                                        funcDefIndexToCodeRange_[funcDefIndex]);
+        uint32_t codeRangeIndex = funcToCodeRange_[funcIndex];
+        metadata_->funcExports.infallibleEmplaceBack(Move(sig), funcIndex, codeRangeIndex);
     }
 
     return true;
 }
 
 typedef Vector<Offsets, 0, SystemAllocPolicy> OffsetVector;
 typedef Vector<ProfilingOffsets, 0, SystemAllocPolicy> ProfilingOffsetVector;
 
 bool
 ModuleGenerator::finishCodegen()
 {
     masm_.haltingAlign(CodeAlignment);
     uint32_t offsetInWhole = masm_.size();
 
-    uint32_t numFuncDefExports = metadata_->funcDefExports.length();
-    MOZ_ASSERT(numFuncDefExports == exportedFuncDefs_.count());
+    uint32_t numFuncExports = metadata_->funcExports.length();
+    MOZ_ASSERT(numFuncExports == exportedFuncs_.count());
 
     // Generate stubs in a separate MacroAssembler since, otherwise, for modules
     // larger than the JumpImmediateRange, even local uses of Label will fail
     // due to the large absolute offsets temporarily stored by Label::bind().
 
     OffsetVector entries;
     ProfilingOffsetVector interpExits;
     ProfilingOffsetVector jitExits;
@@ -460,20 +444,20 @@ ModuleGenerator::finishCodegen()
     Offsets interruptExit;
     Offsets throwStub;
 
     {
         TempAllocator alloc(&lifo_);
         MacroAssembler masm(MacroAssembler::WasmToken(), alloc);
         Label throwLabel;
 
-        if (!entries.resize(numFuncDefExports))
+        if (!entries.resize(numFuncExports))
             return false;
-        for (uint32_t i = 0; i < numFuncDefExports; i++)
-            entries[i] = GenerateEntry(masm, metadata_->funcDefExports[i]);
+        for (uint32_t i = 0; i < numFuncExports; i++)
+            entries[i] = GenerateEntry(masm, metadata_->funcExports[i]);
 
         if (!interpExits.resize(numFuncImports()))
             return false;
         if (!jitExits.resize(numFuncImports()))
             return false;
         for (uint32_t i = 0; i < numFuncImports(); i++) {
             interpExits[i] = GenerateInterpExit(masm, metadata_->funcImports[i], i, &throwLabel);
             jitExits[i] = GenerateJitExit(masm, metadata_->funcImports[i], &throwLabel);
@@ -489,19 +473,19 @@ ModuleGenerator::finishCodegen()
 
         if (masm.oom() || !masm_.asmMergeWith(masm))
             return false;
     }
 
     // Adjust each of the resulting Offsets (to account for being merged into
     // masm_) and then create code ranges for all the stubs.
 
-    for (uint32_t i = 0; i < numFuncDefExports; i++) {
+    for (uint32_t i = 0; i < numFuncExports; i++) {
         entries[i].offsetBy(offsetInWhole);
-        metadata_->funcDefExports[i].initEntryOffset(entries[i].begin);
+        metadata_->funcExports[i].initEntryOffset(entries[i].begin);
         if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
             return false;
     }
 
     for (uint32_t i = 0; i < numFuncImports(); i++) {
         interpExits[i].offsetBy(offsetInWhole);
         metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin);
         if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
@@ -543,20 +527,20 @@ ModuleGenerator::finishCodegen()
     // Now that all other code has been emitted, patch all remaining callsites.
 
     if (!patchCallSites(&trapExits))
         return false;
 
     // Now that all code has been generated, patch far jumps to destinations.
 
     for (CallThunk& callThunk : metadata_->callThunks) {
-        uint32_t funcDefIndex = callThunk.u.funcDefIndex;
-        callThunk.u.codeRangeIndex = funcDefIndexToCodeRange_[funcDefIndex];
+        uint32_t funcIndex = callThunk.u.funcIndex;
+        callThunk.u.codeRangeIndex = funcToCodeRange_[funcIndex];
         CodeOffset farJump(callThunk.offset);
-        masm_.patchFarJump(farJump, funcDefCodeRange(funcDefIndex).funcNonProfilingEntry());
+        masm_.patchFarJump(farJump, funcCodeRange(funcIndex).funcNonProfilingEntry());
     }
 
     for (const TrapFarJump& farJump : masm_.trapFarJumps())
         masm_.patchFarJump(farJump.jump, trapExits[farJump.trap].begin);
 
     // Code-generation is complete!
 
     masm_.finish();
@@ -708,22 +692,22 @@ ModuleGenerator::initSig(uint32_t sigInd
 const SigWithId&
 ModuleGenerator::sig(uint32_t index) const
 {
     MOZ_ASSERT(index < numSigs_);
     return shared_->sigs[index];
 }
 
 void
-ModuleGenerator::initFuncDefSig(uint32_t funcDefIndex, uint32_t sigIndex)
+ModuleGenerator::initFuncSig(uint32_t funcIndex, uint32_t sigIndex)
 {
     MOZ_ASSERT(isAsmJS());
-    MOZ_ASSERT(!shared_->funcDefSigs[funcDefIndex]);
+    MOZ_ASSERT(!shared_->funcSigs[funcIndex]);
 
-    shared_->funcDefSigs[funcDefIndex] = &shared_->sigs[sigIndex];
+    shared_->funcSigs[funcIndex] = &shared_->sigs[sigIndex];
 }
 
 void
 ModuleGenerator::initMemoryUsage(MemoryUsage memoryUsage)
 {
     MOZ_ASSERT(isAsmJS());
     MOZ_ASSERT(shared_->memoryUsage == MemoryUsage::None);
 
@@ -734,78 +718,77 @@ void
 ModuleGenerator::bumpMinMemoryLength(uint32_t newMinMemoryLength)
 {
     MOZ_ASSERT(isAsmJS());
     MOZ_ASSERT(newMinMemoryLength >= shared_->minMemoryLength);
 
     shared_->minMemoryLength = newMinMemoryLength;
 }
 
-const SigWithId&
-ModuleGenerator::funcDefSig(uint32_t funcDefIndex) const
-{
-    MOZ_ASSERT(shared_->funcDefSigs[funcDefIndex]);
-    return *shared_->funcDefSigs[funcDefIndex];
-}
-
 bool
-ModuleGenerator::initImport(uint32_t funcImportIndex, uint32_t sigIndex)
+ModuleGenerator::initImport(uint32_t funcIndex, uint32_t sigIndex)
 {
     MOZ_ASSERT(isAsmJS());
 
+    MOZ_ASSERT(!shared_->funcSigs[funcIndex]);
+    shared_->funcSigs[funcIndex] = &shared_->sigs[sigIndex];
+
     uint32_t globalDataOffset;
     if (!allocateGlobalBytes(sizeof(FuncImportTls), sizeof(void*), &globalDataOffset))
         return false;
 
-    MOZ_ASSERT(funcImportIndex == metadata_->funcImports.length());
-    if (!addFuncImport(sig(sigIndex), globalDataOffset))
-        return false;
+    MOZ_ASSERT(!shared_->funcImportGlobalDataOffsets[funcIndex]);
+    shared_->funcImportGlobalDataOffsets[funcIndex] = globalDataOffset;
 
-    FuncImportGenDesc& funcImport = shared_->funcImports[funcImportIndex];
-    MOZ_ASSERT(!funcImport.sig);
-    funcImport.sig = &shared_->sigs[sigIndex];
-    funcImport.globalDataOffset = globalDataOffset;
-    return true;
+    MOZ_ASSERT(funcIndex == metadata_->funcImports.length());
+    return addFuncImport(sig(sigIndex), globalDataOffset);
 }
 
 uint32_t
 ModuleGenerator::numFuncImports() const
 {
+    // Until all functions have been validated, asm.js doesn't know the total
+    // number of imports.
+    MOZ_ASSERT_IF(isAsmJS(), finishedFuncDefs_);
     return metadata_->funcImports.length();
 }
 
-const FuncImportGenDesc&
-ModuleGenerator::funcImport(uint32_t funcImportIndex) const
+uint32_t
+ModuleGenerator::numFuncDefs() const
 {
-    MOZ_ASSERT(shared_->funcImports[funcImportIndex].sig);
-    return shared_->funcImports[funcImportIndex];
+    // asm.js overallocates the length of funcSigs and in general does not know
+    // the number of function definitions until it's done compiling.
+    MOZ_ASSERT(!isAsmJS());
+    return shared_->funcSigs.length() - numFuncImports();
 }
 
 uint32_t
 ModuleGenerator::numFuncs() const
 {
-    return numFuncImports() + numFuncDefs();
+    // asm.js pre-reserves a bunch of function index space which is
+    // incrementally filled in during function-body validation. Thus, there are
+    // a few possible interpretations of numFuncs() (total index space size vs.
+    // exact number of imports/definitions encountered so far) and to simplify
+    // things we simply only define this quantity for wasm.
+    MOZ_ASSERT(!isAsmJS());
+    return shared_->funcSigs.length();
 }
 
 const SigWithId&
 ModuleGenerator::funcSig(uint32_t funcIndex) const
 {
-    MOZ_ASSERT(funcIndex < numFuncs());
-
-    if (funcIndex < numFuncImports())
-        return *funcImport(funcIndex).sig;
-
-    return funcDefSig(funcIndex - numFuncImports());
+    MOZ_ASSERT(shared_->funcSigs[funcIndex]);
+    return *shared_->funcSigs[funcIndex];
 }
 
 bool
-ModuleGenerator::addFuncDefExport(UniqueChars fieldName, uint32_t funcIndex)
+ModuleGenerator::addFuncExport(UniqueChars fieldName, uint32_t funcIndex)
 {
-    if (funcIndexIsDef(funcIndex)) {
-       if (!exportedFuncDefs_.put(funcIndexToDef(funcIndex)))
+    if (!funcIsImport(funcIndex)) {
+       if (!exportedFuncs_.put(funcIndex))
            return false;
     }
 
     return exports_.emplaceBack(Move(fieldName), funcIndex, DefinitionKind::Function);
 }
 
 bool
 ModuleGenerator::addTableExport(UniqueChars fieldName)
@@ -826,34 +809,34 @@ bool
 ModuleGenerator::addGlobalExport(UniqueChars fieldName, uint32_t globalIndex)
 {
     return exports_.emplaceBack(Move(fieldName), globalIndex, DefinitionKind::Global);
 }
 
 bool
 ModuleGenerator::setStartFunction(uint32_t funcIndex)
 {
-    if (funcIndexIsDef(funcIndex)) {
-        if (!exportedFuncDefs_.put(funcIndexToDef(funcIndex)))
+    if (!funcIsImport(funcIndex)) {
+        if (!exportedFuncs_.put(funcIndex))
             return false;
     }
 
     metadata_->startFuncIndex.emplace(funcIndex);
     return true;
 }
 
 bool
 ModuleGenerator::addElemSegment(InitExpr offset, Uint32Vector&& elemFuncIndices)
 {
     MOZ_ASSERT(!isAsmJS());
     MOZ_ASSERT(!startedFuncDefs_);
     MOZ_ASSERT(shared_->tables.length() == 1);
 
     for (uint32_t funcIndex : elemFuncIndices) {
-        if (!funcIndexIsDef(funcIndex)) {
+        if (funcIsImport(funcIndex)) {
             shared_->tables[0].external = true;
             break;
         }
     }
 
     return elemSegments_.emplaceBack(0, offset, Move(elemFuncIndices));
 }
 
@@ -874,20 +857,20 @@ ModuleGenerator::startFuncDefs()
     // elements of any external table as exported since they may be called from
     // outside the module.
 
     for (ElemSegment& elems : elemSegments_) {
         if (!shared_->tables[elems.tableIndex].external)
             continue;
 
         for (uint32_t funcIndex : elems.elemFuncIndices) {
-            if (!funcIndexIsDef(funcIndex))
+            if (funcIsImport(funcIndex))
                 continue;
 
-            if (!exportedFuncDefs_.put(funcIndexToDef(funcIndex)))
+            if (!exportedFuncs_.put(funcIndex))
                 return false;
         }
     }
 
     // The wasmCompilationInProgress atomic ensures that there is only one
     // parallel compilation in progress at a time. In the special case of
     // asm.js, where the ModuleGenerator itself can be on a helper thread, this
     // avoids the possibility of deadlock since at most 1 helper thread will be
@@ -949,23 +932,23 @@ ModuleGenerator::startFuncDef(uint32_t l
     fg->lineOrBytecode_ = lineOrBytecode;
     fg->m_ = this;
     fg->task_ = task;
     activeFuncDef_ = fg;
     return true;
 }
 
 bool
-ModuleGenerator::finishFuncDef(uint32_t funcDefIndex, FunctionGenerator* fg)
+ModuleGenerator::finishFuncDef(uint32_t funcIndex, FunctionGenerator* fg)
 {
     MOZ_ASSERT(activeFuncDef_ == fg);
 
     auto func = js::MakeUnique<FuncBytes>(Move(fg->bytes_),
-                                          funcDefIndex,
-                                          funcDefSig(funcDefIndex),
+                                          funcIndex,
+                                          funcSig(funcIndex),
                                           fg->lineOrBytecode_,
                                           Move(fg->callSiteLineNums_));
     if (!func)
         return false;
 
     auto mode = alwaysBaseline_ && BaselineCanCompile(fg)
                 ? IonCompileTask::CompileMode::Baseline
                 : IonCompileTask::CompileMode::Ion;
@@ -981,58 +964,73 @@ ModuleGenerator::finishFuncDef(uint32_t 
             return false;
         if (!finishTask(fg->task_))
             return false;
     }
 
     fg->m_ = nullptr;
     fg->task_ = nullptr;
     activeFuncDef_ = nullptr;
+    numFinishedFuncDefs_++;
     return true;
 }
 
 bool
 ModuleGenerator::finishFuncDefs()
 {
     MOZ_ASSERT(startedFuncDefs_);
     MOZ_ASSERT(!activeFuncDef_);
     MOZ_ASSERT(!finishedFuncDefs_);
 
     while (outstanding_ > 0) {
         if (!finishOutstandingTask())
             return false;
     }
 
+    linkData_.functionCodeLength = masm_.size();
+    finishedFuncDefs_ = true;
+
+    // In this patch, imports never have an associated code range.
+
 #ifdef DEBUG
-    for (uint32_t i = 0; i < funcDefIndexToCodeRange_.length(); i++)
-        MOZ_ASSERT(funcIsDefined(i));
+    if (isAsmJS()) {
+        MOZ_ASSERT(numFuncImports() < AsmJSFirstDefFuncIndex);
+        for (uint32_t i = 0; i < AsmJSFirstDefFuncIndex; i++)
+            MOZ_ASSERT(funcToCodeRange_[i] == BAD_CODE_RANGE);
+        for (uint32_t i = AsmJSFirstDefFuncIndex; i < numFinishedFuncDefs_; i++)
+            MOZ_ASSERT(funcCodeRange(i).funcIndex() == i);
+    } else {
+        MOZ_ASSERT(numFinishedFuncDefs_ == numFuncDefs());
+        for (uint32_t i = 0; i < numFuncImports(); i++)
+            MOZ_ASSERT(funcToCodeRange_[i] == BAD_CODE_RANGE);
+        for (uint32_t i = numFuncImports(); i < numFuncs(); i++)
+            MOZ_ASSERT(funcCodeRange(i).funcIndex() == i);
+    }
 #endif
 
     // Complete element segments with the code range index of every element, now
     // that all functions have been compiled.
 
     for (ElemSegment& elems : elemSegments_) {
         Uint32Vector& codeRangeIndices = elems.elemCodeRangeIndices;
 
         MOZ_ASSERT(codeRangeIndices.empty());
         if (!codeRangeIndices.reserve(elems.elemFuncIndices.length()))
             return false;
 
         for (uint32_t funcIndex : elems.elemFuncIndices) {
-            if (!funcIndexIsDef(funcIndex)) {
+            if (funcIsImport(funcIndex)) {
                 codeRangeIndices.infallibleAppend(UINT32_MAX);
                 continue;
             }
 
-            codeRangeIndices.infallibleAppend(funcDefIndexToCodeRange_[funcIndexToDef(funcIndex)]);
+            codeRangeIndices.infallibleAppend(funcToCodeRange_[funcIndex]);
         }
     }
 
-    linkData_.functionCodeLength = masm_.size();
-    finishedFuncDefs_ = true;
     return true;
 }
 
 void
 ModuleGenerator::setFuncNames(NameInBytecodeVector&& funcNames)
 {
     MOZ_ASSERT(metadata_->funcNames.empty());
     metadata_->funcNames = Move(funcNames);
@@ -1051,36 +1049,32 @@ ModuleGenerator::initSigTableLength(uint
     TableDesc& table = shared_->tables[numTables_++];
     table.kind = TableKind::TypedFunction;
     table.limits.initial = length;
     table.limits.maximum = Some(length);
     return allocateGlobalBytes(sizeof(TableTls), sizeof(void*), &table.globalDataOffset);
 }
 
 bool
-ModuleGenerator::initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncDefIndices)
+ModuleGenerator::initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices)
 {
     MOZ_ASSERT(isAsmJS());
     MOZ_ASSERT(finishedFuncDefs_);
 
     uint32_t tableIndex = shared_->asmJSSigToTableIndex[sigIndex];
-    MOZ_ASSERT(shared_->tables[tableIndex].limits.initial == elemFuncDefIndices.length());
+    MOZ_ASSERT(shared_->tables[tableIndex].limits.initial == elemFuncIndices.length());
 
     Uint32Vector codeRangeIndices;
-    if (!codeRangeIndices.resize(elemFuncDefIndices.length()))
+    if (!codeRangeIndices.resize(elemFuncIndices.length()))
         return false;
-    for (size_t i = 0; i < elemFuncDefIndices.length(); i++) {
-        codeRangeIndices[i] = funcDefIndexToCodeRange_[elemFuncDefIndices[i]];
-        elemFuncDefIndices[i] += numFuncImports();
-    }
+    for (size_t i = 0; i < elemFuncIndices.length(); i++)
+        codeRangeIndices[i] = funcToCodeRange_[elemFuncIndices[i]];
 
-    // By adding numFuncImports to each element, elemFuncDefIndices is now a
-    // Vector of func indices.
     InitExpr offset(Val(uint32_t(0)));
-    if (!elemSegments_.emplaceBack(tableIndex, offset, Move(elemFuncDefIndices)))
+    if (!elemSegments_.emplaceBack(tableIndex, offset, Move(elemFuncIndices)))
         return false;
 
     elemSegments_.back().elemCodeRangeIndices = Move(codeRangeIndices);
     return true;
 }
 
 SharedModule
 ModuleGenerator::finish(const ShareableBytes& bytecode)
@@ -1088,17 +1082,17 @@ ModuleGenerator::finish(const ShareableB
     MOZ_ASSERT(!activeFuncDef_);
     MOZ_ASSERT(finishedFuncDefs_);
 
     // Now that all asm.js tables have been created and the compiler threads are
     // done, shrink the (no longer shared) tables vector down to size.
     if (isAsmJS() && !shared_->tables.resize(numTables_))
         return nullptr;
 
-    if (!finishFuncDefExports())
+    if (!finishFuncExports())
         return nullptr;
 
     if (!finishCodegen())
         return nullptr;
 
     // Round up the code size to page size since this is eventually required by
     // the executable-code allocator and for setting memory protection.
     uint32_t bytesNeeded = masm_.bytesNeeded();
--- a/js/src/wasm/WasmGenerator.h
+++ b/js/src/wasm/WasmGenerator.h
@@ -31,52 +31,42 @@ class FunctionGenerator;
 
 // The ModuleGeneratorData holds all the state shared between the
 // ModuleGenerator thread and background compile threads. The background
 // threads are given a read-only view of the ModuleGeneratorData and the
 // ModuleGenerator is careful to initialize, and never subsequently mutate,
 // any given datum before being read by a background thread. In particular,
 // once created, the Vectors are never resized.
 
-struct FuncImportGenDesc
-{
-    const SigWithId* sig;
-    uint32_t globalDataOffset;
-
-    FuncImportGenDesc() : sig(nullptr), globalDataOffset(0) {}
-    explicit FuncImportGenDesc(const SigWithId* sig) : sig(sig), globalDataOffset(0) {}
-};
-
-typedef Vector<FuncImportGenDesc, 0, SystemAllocPolicy> FuncImportGenDescVector;
-
 struct ModuleGeneratorData
 {
     ModuleKind                kind;
     MemoryUsage               memoryUsage;
     mozilla::Atomic<uint32_t> minMemoryLength;
     Maybe<uint32_t>           maxMemoryLength;
-    uint32_t                  firstFuncDefIndex;
 
     SigWithIdVector           sigs;
-    SigWithIdPtrVector        funcDefSigs;
-    FuncImportGenDescVector   funcImports;
+    SigWithIdPtrVector        funcSigs;
+    Uint32Vector              funcImportGlobalDataOffsets;
     GlobalDescVector          globals;
     TableDescVector           tables;
     Uint32Vector              asmJSSigToTableIndex;
 
     explicit ModuleGeneratorData(ModuleKind kind = ModuleKind::Wasm)
       : kind(kind),
         memoryUsage(MemoryUsage::None),
-        minMemoryLength(0),
-        firstFuncDefIndex(0)
+        minMemoryLength(0)
     {}
 
     bool isAsmJS() const {
         return kind == ModuleKind::AsmJS;
     }
+    bool funcIsImport(uint32_t funcIndex) const {
+        return funcIndex < funcImportGlobalDataOffsets.length();
+    }
 };
 
 typedef UniquePtr<ModuleGeneratorData> UniqueModuleGeneratorData;
 
 // A ModuleGenerator encapsulates the creation of a wasm module. During the
 // lifetime of a ModuleGenerator, a sequence of FunctionGenerators are created
 // and destroyed to compile the individual function bodies. After generating all
 // functions, ModuleGenerator::finish() must be called to complete the
@@ -104,40 +94,40 @@ class MOZ_STACK_CLASS ModuleGenerator
     // Data scoped to the ModuleGenerator's lifetime
     UniqueModuleGeneratorData       shared_;
     uint32_t                        numSigs_;
     uint32_t                        numTables_;
     LifoAlloc                       lifo_;
     jit::JitContext                 jcx_;
     jit::TempAllocator              masmAlloc_;
     jit::MacroAssembler             masm_;
-    Uint32Vector                    funcDefIndexToCodeRange_;
-    Uint32Set                       exportedFuncDefs_;
+    Uint32Vector                    funcToCodeRange_;
+    Uint32Set                       exportedFuncs_;
     uint32_t                        lastPatchedCallsite_;
     uint32_t                        startOfUnpatchedCallsites_;
 
     // Parallel compilation
     bool                            parallel_;
     uint32_t                        outstanding_;
     IonCompileTaskVector            tasks_;
     IonCompileTaskPtrVector         freeTasks_;
 
     // Assertions
     DebugOnly<FunctionGenerator*>   activeFuncDef_;
     DebugOnly<bool>                 startedFuncDefs_;
     DebugOnly<bool>                 finishedFuncDefs_;
+    DebugOnly<uint32_t>             numFinishedFuncDefs_;
 
     MOZ_MUST_USE bool finishOutstandingTask();
-    bool funcIndexIsDef(uint32_t funcIndex) const;
-    uint32_t funcIndexToDef(uint32_t funcIndex) const;
-    bool funcIsDefined(uint32_t funcDefIndex) const;
-    const CodeRange& funcDefCodeRange(uint32_t funcDefIndex) const;
+    bool funcIsImport(uint32_t funcIndex) const;
+    bool funcIsCompiled(uint32_t funcIndex) const;
+    const CodeRange& funcCodeRange(uint32_t funcIndex) const;
     MOZ_MUST_USE bool patchCallSites(TrapExitOffsetArray* maybeTrapExits = nullptr);
     MOZ_MUST_USE bool finishTask(IonCompileTask* task);
-    MOZ_MUST_USE bool finishFuncDefExports();
+    MOZ_MUST_USE bool finishFuncExports();
     MOZ_MUST_USE bool finishCodegen();
     MOZ_MUST_USE bool finishLinkData(Bytes& code);
     MOZ_MUST_USE bool addFuncImport(const Sig& sig, uint32_t globalDataOffset);
     MOZ_MUST_USE bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOff);
     MOZ_MUST_USE bool allocateGlobal(GlobalDesc* global);
 
   public:
     explicit ModuleGenerator(ImportVector&& imports);
@@ -155,60 +145,54 @@ class MOZ_STACK_CLASS ModuleGenerator
 
     // Tables:
     uint32_t numTables() const { return numTables_; }
     const TableDescVector& tables() const { return shared_->tables; }
 
     // Signatures:
     uint32_t numSigs() const { return numSigs_; }
     const SigWithId& sig(uint32_t sigIndex) const;
-
-    // Function declarations:
-    uint32_t numFuncDefs() const { return shared_->funcDefSigs.length(); }
-    const SigWithId& funcDefSig(uint32_t funcDefIndex) const;
+    const SigWithId& funcSig(uint32_t funcIndex) const;
 
     // Globals:
     const GlobalDescVector& globals() const { return shared_->globals; }
 
-    // Imports:
+    // Functions declarations:
     uint32_t numFuncImports() const;
-    const FuncImportGenDesc& funcImport(uint32_t funcImportIndex) const;
-
-    // Function index space:
+    uint32_t numFuncDefs() const;
     uint32_t numFuncs() const;
-    const SigWithId& funcSig(uint32_t funcIndex) const;
 
     // Exports:
-    MOZ_MUST_USE bool addFuncDefExport(UniqueChars fieldName, uint32_t funcIndex);
+    MOZ_MUST_USE bool addFuncExport(UniqueChars fieldName, uint32_t funcIndex);
     MOZ_MUST_USE bool addTableExport(UniqueChars fieldName);
     MOZ_MUST_USE bool addMemoryExport(UniqueChars fieldName);
     MOZ_MUST_USE bool addGlobalExport(UniqueChars fieldName, uint32_t globalIndex);
 
     // Function definitions:
     MOZ_MUST_USE bool startFuncDefs();
     MOZ_MUST_USE bool startFuncDef(uint32_t lineOrBytecode, FunctionGenerator* fg);
-    MOZ_MUST_USE bool finishFuncDef(uint32_t funcDefIndex, FunctionGenerator* fg);
+    MOZ_MUST_USE bool finishFuncDef(uint32_t funcIndex, FunctionGenerator* fg);
     MOZ_MUST_USE bool finishFuncDefs();
 
     // Start function:
     bool setStartFunction(uint32_t funcIndex);
 
     // Segments:
     void setDataSegments(DataSegmentVector&& segments);
     MOZ_MUST_USE bool addElemSegment(InitExpr offset, Uint32Vector&& elemFuncIndices);
 
     // Function names:
     void setFuncNames(NameInBytecodeVector&& funcNames);
 
     // asm.js lazy initialization:
     void initSig(uint32_t sigIndex, Sig&& sig);
-    void initFuncDefSig(uint32_t funcIndex, uint32_t sigIndex);
-    MOZ_MUST_USE bool initImport(uint32_t importIndex, uint32_t sigIndex);
+    void initFuncSig(uint32_t funcIndex, uint32_t sigIndex);
+    MOZ_MUST_USE bool initImport(uint32_t funcIndex, uint32_t sigIndex);
     MOZ_MUST_USE bool initSigTableLength(uint32_t sigIndex, uint32_t length);
-    MOZ_MUST_USE bool initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncDefIndices);
+    MOZ_MUST_USE bool initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices);
     void initMemoryUsage(MemoryUsage memoryUsage);
     void bumpMinMemoryLength(uint32_t newMinMemoryLength);
     MOZ_MUST_USE bool addGlobal(ValType type, bool isConst, uint32_t* index);
 
     // Finish compilation, provided the list of imports and source bytecode.
     // Both these Vectors may be empty (viz., b/c asm.js does different things
     // for imports and source).
     SharedModule finish(const ShareableBytes& bytecode);
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -525,25 +525,25 @@ Instance::objectUnbarriered() const
 
 WasmInstanceObject*
 Instance::object() const
 {
     return object_;
 }
 
 bool
-Instance::callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args)
+Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args)
 {
     // If there has been a moving grow, this Instance should have been notified.
     MOZ_RELEASE_ASSERT(!memory_ || tlsData_.memoryBase == memory_->buffer().dataPointerEither());
 
     if (!cx->compartment()->wasm.ensureProfilingState(cx))
         return false;
 
-    const FuncDefExport& func = metadata().lookupFuncDefExport(funcDefIndex);
+    const FuncExport& func = metadata().lookupFuncExport(funcIndex);
 
     // The calling convention for an external call into wasm is to pass an
     // array of 16-byte values where each value contains either a coerced int32
     // (in the low word), a double value (in the low dword) or a SIMD vector
     // value, with the coercions specified by the wasm signature. The external
     // entry point unpacks this array into the system-ABI-specified registers
     // and stack memory and then calls into the internal entry point. The return
     // value is stored in the first element of the array (which, therefore, must
--- a/js/src/wasm/WasmInstance.h
+++ b/js/src/wasm/WasmInstance.h
@@ -95,17 +95,17 @@ class Instance
     // is explicitly waived.
 
     WasmInstanceObject* object() const;
     WasmInstanceObject* objectUnbarriered() const;
 
     // Execute the given export given the JS call arguments, storing the return
     // value in args.rval.
 
-    MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args);
+    MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcIndex, CallArgs args);
 
     // Initially, calls to imports in wasm code call out through the generic
     // callImport method. If the imported callee gets JIT compiled and the types
     // match up, callImport will patch the code to instead call through a thunk
     // directly into the JIT code. If the JIT code is released, the Instance must
     // be notified so it can go back to the generic callImport.
 
     void deoptimizeImportExit(uint32_t funcImportIndex);
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -973,27 +973,27 @@ class FunctionCompiler
             call->spIncrement_ = 0;
             stackBytes = Max(stackBytes, call->maxChildStackBytes_);
         }
 
         propagateMaxStackArgBytes(stackBytes);
         return true;
     }
 
-    bool callDefinition(const Sig& sig, uint32_t funcDefIndex, const CallCompileState& call,
-                        MDefinition** def)
+    bool callDirect(const Sig& sig, uint32_t funcIndex, const CallCompileState& call,
+                    MDefinition** def)
     {
         if (inDeadCode()) {
             *def = nullptr;
             return true;
         }
 
-        CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::FuncDef);
+        CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Func);
         MIRType ret = ToMIRType(sig.ret());
-        auto callee = CalleeDesc::definition(funcDefIndex);
+        auto callee = CalleeDesc::function(funcIndex);
         auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ret,
                                    call.spIncrement_, MWasmCall::DontSaveTls);
         if (!ins)
             return false;
 
         curBlock_->add(ins);
         *def = ins;
         return true;
@@ -1894,96 +1894,55 @@ EmitCallArgs(FunctionCompiler& f, const 
 
     if (!f.iter().readCallArgsEnd(numArgs))
         return false;
 
     return f.finishCall(call, tls);
 }
 
 static bool
-EmitCallImportCommon(FunctionCompiler& f, uint32_t lineOrBytecode, uint32_t funcImportIndex)
+EmitCall(FunctionCompiler& f)
 {
-    const FuncImportGenDesc& funcImport = f.mg().funcImports[funcImportIndex];
-    const Sig& sig = *funcImport.sig;
+    uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+    uint32_t funcIndex;
+    if (!f.iter().readCall(&funcIndex))
+        return false;
+
+    if (f.inDeadCode())
+        return true;
+
+    const Sig& sig = *f.mg().funcSigs[funcIndex];
+    bool import = f.mg().funcIsImport(funcIndex);
 
     CallCompileState call(f, lineOrBytecode);
-    if (!EmitCallArgs(f, sig, TlsUsage::CallerSaved, &call))
+    if (!EmitCallArgs(f, sig, import ? TlsUsage::CallerSaved : TlsUsage::Need, &call))
         return false;
 
     if (!f.iter().readCallReturn(sig.ret()))
         return false;
 
     MDefinition* def;
-    if (!f.callImport(funcImport.globalDataOffset, call, sig.ret(), &def))
-        return false;
+    if (import) {
+        uint32_t globalDataOffset = f.mg().funcImportGlobalDataOffsets[funcIndex];
+        if (!f.callImport(globalDataOffset, call, sig.ret(), &def))
+            return false;
+    } else {
+        if (!f.callDirect(sig, funcIndex, call, &def))
+            return false;
+    }
 
     if (IsVoid(sig.ret()))
         return true;
 
     f.iter().setResult(def);
     return true;
 }
 
 static bool
-EmitCall(FunctionCompiler& f)
-{
-    uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
-
-    uint32_t calleeIndex;
-    if (!f.iter().readCall(&calleeIndex))
-        return false;
-
-    if (f.inDeadCode())
-        return true;
-
-    // For asm.js, imports are not part of the function index space so in
-    // these cases firstFuncDefIndex is fixed to 0, even if there are
-    // function imports.
-    if (calleeIndex < f.mg().firstFuncDefIndex)
-        return EmitCallImportCommon(f, lineOrBytecode, calleeIndex);
-
-    uint32_t funcDefIndex = calleeIndex - f.mg().firstFuncDefIndex;
-    const Sig& sig = *f.mg().funcDefSigs[funcDefIndex];
-
-    CallCompileState call(f, lineOrBytecode);
-    if (!EmitCallArgs(f, sig, TlsUsage::Need, &call))
-        return false;
-
-    if (!f.iter().readCallReturn(sig.ret()))
-        return false;
-
-    MDefinition* def;
-    if (!f.callDefinition(sig, funcDefIndex, call, &def))
-        return false;
-
-    if (IsVoid(sig.ret()))
-        return true;
-
-    f.iter().setResult(def);
-    return true;
-}
-
-static bool
-EmitOldCallImport(FunctionCompiler& f)
-{
-    MOZ_ASSERT(!f.mg().firstFuncDefIndex);
-
-    uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
-
-    uint32_t funcImportIndex;
-    if (!f.iter().readCall(&funcImportIndex))
-        return false;
-
-    if (f.inDeadCode())
-        return true;
-
-    return EmitCallImportCommon(f, lineOrBytecode, funcImportIndex);
-}
-
-static bool
 EmitCallIndirect(FunctionCompiler& f, bool oldStyle)
 {
     uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
 
     uint32_t sigIndex;
     MDefinition* callee;
     if (oldStyle) {
         if (!f.iter().readOldCallIndirect(&sigIndex))
@@ -3177,18 +3136,16 @@ EmitExpr(FunctionCompiler& f)
 
       // Calls
       case Expr::Call:
         return EmitCall(f);
       case Expr::CallIndirect:
         return EmitCallIndirect(f, /* oldStyle = */ false);
       case Expr::OldCallIndirect:
         return EmitCallIndirect(f, /* oldStyle = */ true);
-      case Expr::OldCallImport:
-        return EmitOldCallImport(f);
 
       // Locals and globals
       case Expr::GetLocal:
         return EmitGetLocal(f);
       case Expr::SetLocal:
         return EmitSetLocal(f);
       case Expr::TeeLocal:
         return EmitTeeLocal(f);
@@ -3808,17 +3765,17 @@ wasm::IonCompileFunction(IonCompileTask*
 
         if (!OptimizeMIR(&mir))
             return false;
 
         LIRGraph* lir = GenerateLIR(&mir);
         if (!lir)
             return false;
 
-        SigIdDesc sigId = task->mg().funcDefSigs[func.defIndex()]->id;
+        SigIdDesc sigId = task->mg().funcSigs[func.index()]->id;
 
         CodeGenerator codegen(&mir, lir, &results.masm());
         if (!codegen.generateWasm(sigId, prologueTrapOffset, &results.offsets()))
             return false;
     }
 
     return true;
 }
--- a/js/src/wasm/WasmIonCompile.h
+++ b/js/src/wasm/WasmIonCompile.h
@@ -33,37 +33,37 @@ typedef jit::ABIArgIter<ValTypeVector> A
 
 // The FuncBytes class represents a single, concurrently-compilable function.
 // A FuncBytes object is composed of the wasm function body bytes along with the
 // ambient metadata describing the function necessary to compile it.
 
 class FuncBytes
 {
     Bytes            bytes_;
-    uint32_t         defIndex_;
+    uint32_t         index_;
     const SigWithId& sig_;
     uint32_t         lineOrBytecode_;
     Uint32Vector     callSiteLineNums_;
 
   public:
     FuncBytes(Bytes&& bytes,
-              uint32_t defIndex,
+              uint32_t index,
               const SigWithId& sig,
               uint32_t lineOrBytecode,
               Uint32Vector&& callSiteLineNums)
       : bytes_(Move(bytes)),
-        defIndex_(defIndex),
+        index_(index),
         sig_(sig),
         lineOrBytecode_(lineOrBytecode),
         callSiteLineNums_(Move(callSiteLineNums))
     {}
 
     Bytes& bytes() { return bytes_; }
     const Bytes& bytes() const { return bytes_; }
-    uint32_t defIndex() const { return defIndex_; }
+    uint32_t index() const { return index_; }
     const SigWithId& sig() const { return sig_; }
     uint32_t lineOrBytecode() const { return lineOrBytecode_; }
     const Uint32Vector& callSiteLineNums() const { return callSiteLineNums_; }
 };
 
 typedef UniquePtr<FuncBytes> UniqueFuncBytes;
 
 // The FuncCompileResults class contains the results of compiling a single
--- a/js/src/wasm/WasmJS.cpp
+++ b/js/src/wasm/WasmJS.cpp
@@ -751,58 +751,58 @@ WasmInstanceObject::exports() const
 
 static bool
 WasmCall(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     RootedFunction callee(cx, &args.callee().as<JSFunction>());
 
     Instance& instance = ExportedFunctionToInstance(callee);
-    uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(callee);
-    return instance.callExport(cx, funcDefIndex, args);
+    uint32_t funcIndex = ExportedFunctionToFuncIndex(callee);
+    return instance.callExport(cx, funcIndex, args);
 }
 
 /* static */ bool
 WasmInstanceObject::getExportedFunction(JSContext* cx, HandleWasmInstanceObject instanceObj,
-                                        uint32_t funcDefIndex, MutableHandleFunction fun)
+                                        uint32_t funcIndex, MutableHandleFunction fun)
 {
-    if (ExportMap::Ptr p = instanceObj->exports().lookup(funcDefIndex)) {
+    if (ExportMap::Ptr p = instanceObj->exports().lookup(funcIndex)) {
         fun.set(p->value());
         return true;
     }
 
     const Instance& instance = instanceObj->instance();
-    RootedAtom name(cx, instance.code().getFuncDefAtom(cx, funcDefIndex));
+    RootedAtom name(cx, instance.code().getFuncAtom(cx, funcIndex));
     if (!name)
         return false;
 
-    unsigned numArgs = instance.metadata().lookupFuncDefExport(funcDefIndex).sig().args().length();
+    unsigned numArgs = instance.metadata().lookupFuncExport(funcIndex).sig().args().length();
     fun.set(NewNativeConstructor(cx, WasmCall, numArgs, name, gc::AllocKind::FUNCTION_EXTENDED,
                                  SingletonObject, JSFunction::WASM_CTOR));
     if (!fun)
         return false;
 
     fun->setExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT, ObjectValue(*instanceObj));
-    fun->setExtendedSlot(FunctionExtended::WASM_FUNC_DEF_INDEX_SLOT, Int32Value(funcDefIndex));
+    fun->setExtendedSlot(FunctionExtended::WASM_FUNC_INDEX_SLOT, Int32Value(funcIndex));
 
-    if (!instanceObj->exports().putNew(funcDefIndex, fun)) {
+    if (!instanceObj->exports().putNew(funcIndex, fun)) {
         ReportOutOfMemory(cx);
         return false;
     }
 
     return true;
 }
 
 const CodeRange&
 WasmInstanceObject::getExportedFunctionCodeRange(HandleFunction fun)
 {
-    uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(fun);
-    MOZ_ASSERT(exports().lookup(funcDefIndex)->value() == fun);
+    uint32_t funcIndex = ExportedFunctionToFuncIndex(fun);
+    MOZ_ASSERT(exports().lookup(funcIndex)->value() == fun);
     const Metadata& metadata = instance().metadata();
-    return metadata.codeRanges[metadata.lookupFuncDefExport(funcDefIndex).codeRangeIndex()];
+    return metadata.codeRanges[metadata.lookupFuncExport(funcIndex).codeRangeIndex()];
 }
 
 bool
 wasm::IsExportedFunction(JSFunction* fun)
 {
     return fun->maybeNative() == WasmCall;
 }
 
@@ -836,20 +836,20 @@ WasmInstanceObject*
 wasm::ExportedFunctionToInstanceObject(JSFunction* fun)
 {
     MOZ_ASSERT(IsExportedFunction(fun));
     const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT);
     return &v.toObject().as<WasmInstanceObject>();
 }
 
 uint32_t
-wasm::ExportedFunctionToDefinitionIndex(JSFunction* fun)
+wasm::ExportedFunctionToFuncIndex(JSFunction* fun)
 {
     MOZ_ASSERT(IsExportedFunction(fun));
-    const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_FUNC_DEF_INDEX_SLOT);
+    const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_FUNC_INDEX_SLOT);
     return v.toInt32();
 }
 
 // ============================================================================
 // WebAssembly.Memory class and methods
 
 const ClassOps WasmMemoryObject::classOps_ =
 {
@@ -1274,17 +1274,17 @@ WasmTableObject::getImpl(JSContext* cx, 
     }
 
     Instance& instance = *elem.tls->instance;
     const CodeRange& codeRange = *instance.code().lookupRange(elem.code);
     MOZ_ASSERT(codeRange.isFunction());
 
     RootedWasmInstanceObject instanceObj(cx, instance.object());
     RootedFunction fun(cx);
-    if (!instanceObj->getExportedFunction(cx, instanceObj, codeRange.funcDefIndex(), &fun))
+    if (!instanceObj->getExportedFunction(cx, instanceObj, codeRange.funcIndex(), &fun))
         return false;
 
     args.rval().setObject(*fun);
     return true;
 }
 
 /* static */ bool
 WasmTableObject::get(JSContext* cx, unsigned argc, Value* vp)
@@ -1309,27 +1309,27 @@ WasmTableObject::setImpl(JSContext* cx, 
     RootedFunction value(cx);
     if (!IsExportedFunction(args[1], &value) && !args[1].isNull()) {
         JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_TABLE_VALUE);
         return false;
     }
 
     if (value) {
         RootedWasmInstanceObject instanceObj(cx, ExportedFunctionToInstanceObject(value));
-        uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(value);
+        uint32_t funcIndex = ExportedFunctionToFuncIndex(value);
 
 #ifdef DEBUG
         RootedFunction f(cx);
-        MOZ_ASSERT(instanceObj->getExportedFunction(cx, instanceObj, funcDefIndex, &f));
+        MOZ_ASSERT(instanceObj->getExportedFunction(cx, instanceObj, funcIndex, &f));
         MOZ_ASSERT(value == f);
 #endif
 
         Instance& instance = instanceObj->instance();
-        const FuncDefExport& funcDefExport = instance.metadata().lookupFuncDefExport(funcDefIndex);
-        const CodeRange& codeRange = instance.metadata().codeRanges[funcDefExport.codeRangeIndex()];
+        const FuncExport& funcExport = instance.metadata().lookupFuncExport(funcIndex);
+        const CodeRange& codeRange = instance.metadata().codeRanges[funcExport.codeRangeIndex()];
         void* code = instance.codeSegment().base() + codeRange.funcTableEntry();
         table.set(index, code, instance);
     } else {
         table.setNull(index);
     }
 
     args.rval().setUndefined();
     return true;
--- a/js/src/wasm/WasmJS.h
+++ b/js/src/wasm/WasmJS.h
@@ -92,17 +92,17 @@ IsExportedFunction(const Value& v, Mutab
 
 extern Instance&
 ExportedFunctionToInstance(JSFunction* fun);
 
 extern WasmInstanceObject*
 ExportedFunctionToInstanceObject(JSFunction* fun);
 
 extern uint32_t
-ExportedFunctionToDefinitionIndex(JSFunction* fun);
+ExportedFunctionToFuncIndex(JSFunction* fun);
 
 } // namespace wasm
 
 // The class of the WebAssembly global namespace object.
 
 extern const Class WebAssemblyClass;
 
 JSObject*
--- a/js/src/wasm/WasmModule.cpp
+++ b/js/src/wasm/WasmModule.cpp
@@ -533,18 +533,18 @@ Module::extractCode(JSContext* cx, Mutab
             return false;
         value.setNumber((uint32_t)p->end());
         if (!JS_DefineProperty(cx, segment, "end", value, JSPROP_ENUMERATE))
             return false;
         value.setNumber((uint32_t)p->kind());
         if (!JS_DefineProperty(cx, segment, "kind", value, JSPROP_ENUMERATE))
             return false;
         if (p->isFunction()) {
-            value.setNumber((uint32_t)p->funcDefIndex());
-            if (!JS_DefineProperty(cx, segment, "funcDefIndex", value, JSPROP_ENUMERATE))
+            value.setNumber((uint32_t)p->funcIndex());
+            if (!JS_DefineProperty(cx, segment, "funcIndex", value, JSPROP_ENUMERATE))
                 return false;
             value.setNumber((uint32_t)p->funcNonProfilingEntry());
             if (!JS_DefineProperty(cx, segment, "funcBodyBegin", value, JSPROP_ENUMERATE))
                 return false;
             value.setNumber((uint32_t)p->funcProfilingEpilogue());
             if (!JS_DefineProperty(cx, segment, "funcBodyEnd", value, JSPROP_ENUMERATE))
                 return false;
         }
@@ -688,21 +688,21 @@ Module::instantiateFunctions(JSContext* 
     if (metadata().isAsmJS())
         return true;
 
     for (size_t i = 0; i < metadata_->funcImports.length(); i++) {
         HandleFunction f = funcImports[i];
         if (!IsExportedFunction(f) || ExportedFunctionToInstance(f).isAsmJS())
             continue;
 
-        uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(f);
+        uint32_t funcIndex = ExportedFunctionToFuncIndex(f);
         Instance& instance = ExportedFunctionToInstance(f);
-        const FuncDefExport& funcDefExport = instance.metadata().lookupFuncDefExport(funcDefIndex);
+        const FuncExport& funcExport = instance.metadata().lookupFuncExport(funcIndex);
 
-        if (funcDefExport.sig() != metadata_->funcImports[i].sig()) {
+        if (funcExport.sig() != metadata_->funcImports[i].sig()) {
             JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMPORT_SIG);
             return false;
         }
     }
 
     return true;
 }
 
@@ -829,20 +829,18 @@ GetFunctionExport(JSContext* cx,
                   const Export& exp,
                   MutableHandleValue val)
 {
     if (exp.funcIndex() < funcImports.length()) {
         val.setObject(*funcImports[exp.funcIndex()]);
         return true;
     }
 
-    uint32_t funcDefIndex = exp.funcIndex() - funcImports.length();
-
     RootedFunction fun(cx);
-    if (!instanceObj->getExportedFunction(cx, instanceObj, funcDefIndex, &fun))
+    if (!instanceObj->getExportedFunction(cx, instanceObj, exp.funcIndex(), &fun))
         return false;
 
     val.setObject(*fun);
     return true;
 }
 
 static bool
 GetGlobalExport(JSContext* cx, const GlobalDescVector& globals, uint32_t globalIndex,
@@ -1047,18 +1045,17 @@ Module::instantiate(JSContext* cx,
         FixedInvokeArgs<0> args(cx);
         if (startFuncIndex < funcImports.length()) {
             RootedValue fval(cx, ObjectValue(*funcImports[startFuncIndex]));
             RootedValue thisv(cx);
             RootedValue rval(cx);
             if (!Call(cx, fval, thisv, args, &rval))
                 return false;
         } else {
-            uint32_t funcDefIndex = startFuncIndex - funcImports.length();
-            if (!instance->instance().callExport(cx, funcDefIndex, args))
+            if (!instance->instance().callExport(cx, startFuncIndex, args))
                 return false;
         }
     }
 
     uint32_t mode = uint32_t(metadata().isAsmJS() ? Telemetry::ASMJS : Telemetry::WASM);
     cx->runtime()->addTelemetry(JS_TELEMETRY_AOT_USAGE, mode);
 
     return true;
--- a/js/src/wasm/WasmModule.h
+++ b/js/src/wasm/WasmModule.h
@@ -96,17 +96,17 @@ struct Import
 
     WASM_DECLARE_SERIALIZABLE(Import)
 };
 
 typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
 
 // Export describes the export of a definition in a Module to a field in the
 // export object. For functions, Export stores an index into the
-// FuncDefExportVector in Metadata. For memory and table exports, there is
+// FuncExportVector in Metadata. For memory and table exports, there is
 // at most one (default) memory/table so no index is needed. Note: a single
 // definition can be exported by multiple Exports in the ExportVector.
 //
 // ExportVector is built incrementally by ModuleGenerator and then stored
 // immutably by Module.
 
 class Export
 {
--- a/js/src/wasm/WasmStubs.cpp
+++ b/js/src/wasm/WasmStubs.cpp
@@ -91,17 +91,17 @@ static const unsigned FramePushedAfterSa
 #endif
 static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void*);
 
 // Generate a stub that enters wasm from a C++ caller via the native ABI. The
 // signature of the entry point is Module::ExportFuncPtr. The exported wasm
 // function has an ABI derived from its specific signature, so this function
 // must map from the ABI of ExportFuncPtr to the export's signature's ABI.
 Offsets
-wasm::GenerateEntry(MacroAssembler& masm, const FuncDefExport& func)
+wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
 {
     masm.haltingAlign(CodeAlignment);
 
     Offsets offsets;
     offsets.begin = masm.currentOffset();
 
     // Save the return address if it wasn't already saved by the call insn.
 #if defined(JS_CODEGEN_ARM)
@@ -155,21 +155,21 @@ wasm::GenerateEntry(MacroAssembler& masm
     masm.storeStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
 
     // Dynamically align the stack since ABIStackAlignment is not necessarily
     // WasmStackAlignment. We'll use entrySP to recover the original stack
     // pointer on return.
     masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
 
     // Bump the stack for the call.
-    masm.reserveStack(AlignBytes(StackArgBytes(func.sig().args()), WasmStackAlignment));
+    masm.reserveStack(AlignBytes(StackArgBytes(fe.sig().args()), WasmStackAlignment));
 
     // Copy parameters out of argv and into the registers/stack-slots specified by
     // the system ABI.
-    for (ABIArgValTypeIter iter(func.sig().args()); !iter.done(); iter++) {
+    for (ABIArgValTypeIter iter(fe.sig().args()); !iter.done(); iter++) {
         unsigned argOffset = iter.index() * sizeof(ExportArg);
         Address src(argv, argOffset);
         MIRType type = iter.mirType();
         switch (iter->kind()) {
           case ABIArg::GPR:
             if (type == MIRType::Int32)
                 masm.load32(src, iter->gpr());
             else if (type == MIRType::Int64)
@@ -259,28 +259,28 @@ wasm::GenerateEntry(MacroAssembler& masm
                 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
             }
             break;
         }
     }
 
     // Call into the real function.
     masm.assertStackAlignment(WasmStackAlignment);
-    masm.call(CallSiteDesc(CallSiteDesc::FuncDef), func.funcDefIndex());
+    masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
 
     // Recover the stack pointer value before dynamic alignment.
     masm.loadWasmActivationFromTls(scratch);
     masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
     masm.setFramePushed(FramePushedForEntrySP);
 
     // Recover the 'argv' pointer which was saved before aligning the stack.
     masm.Pop(argv);
 
     // Store the return value in argv[0]
-    switch (func.sig().ret()) {
+    switch (fe.sig().ret()) {
       case ExprType::Void:
         break;
       case ExprType::I32:
         masm.store32(ReturnReg, Address(argv, 0));
         break;
       case ExprType::I64:
         masm.store64(ReturnReg64, Address(argv, 0));
         break;
@@ -349,17 +349,17 @@ FillArgumentArray(MacroAssembler& masm, 
                 MOZ_CRASH("unexpected input type?");
             }
             break;
 #ifdef JS_CODEGEN_REGISTER_PAIR
           case ABIArg::GPR_PAIR:
             if (type == MIRType::Int64)
                 masm.store64(i->gpr64(), dstAddr);
             else
-                MOZ_CRASH("AsmJS uses hardfp for function calls.");
+                MOZ_CRASH("wasm uses hardfp for function calls.");
             break;
 #endif
           case ABIArg::FPU: {
             MOZ_ASSERT(IsFloatingPointType(type));
             FloatRegister srcReg = i->fpu();
             if (type == MIRType::Double) {
                 if (toValue) {
                     // Preserve the NaN pattern in the input.
@@ -432,43 +432,41 @@ FillArgumentArray(MacroAssembler& masm, 
 
 // Generate a stub that is called via the internal ABI derived from the
 // signature of the import and calls into an appropriate callImport C++
 // function, having boxed all the ABI arguments into a homogeneous Value array.
 ProfilingOffsets
 wasm::GenerateInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex,
                          Label* throwLabel)
 {
-    const Sig& sig = fi.sig();
-
     masm.setFramePushed(0);
 
     // Argument types for Module::callImport_*:
     static const MIRType typeArray[] = { MIRType::Pointer,   // Instance*
                                          MIRType::Pointer,   // funcImportIndex
                                          MIRType::Int32,     // argc
                                          MIRType::Pointer }; // argv
     MIRTypeVector invokeArgTypes;
     MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray)));
 
     // At the point of the call, the stack layout shall be (sp grows to the left):
     //   | stack args | padding | Value argv[] | padding | retaddr | caller stack args |
     // The padding between stack args and argv ensures that argv is aligned. The
     // padding between argv and retaddr ensures that sp is aligned.
     unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
-    unsigned argBytes = Max<size_t>(1, sig.args().length()) * sizeof(Value);
+    unsigned argBytes = Max<size_t>(1, fi.sig().args().length()) * sizeof(Value);
     unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
 
     ProfilingOffsets offsets;
     GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, &offsets);
 
     // Fill the argument array.
     unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
     Register scratch = ABINonArgReturnReg0;
-    FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(false));
+    FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(false));
 
     // Prepare the arguments for the call to Module::callImport_*.
     ABIArgMIRTypeIter i(invokeArgTypes);
 
     // argument 0: Instance*
     Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
     if (i->kind() == ABIArg::GPR) {
         masm.loadPtr(instancePtr, i->gpr());
@@ -481,17 +479,17 @@ wasm::GenerateInterpExit(MacroAssembler&
     // argument 1: funcImportIndex
     if (i->kind() == ABIArg::GPR)
         masm.mov(ImmWord(funcImportIndex), i->gpr());
     else
         masm.store32(Imm32(funcImportIndex), Address(masm.getStackPointer(), i->offsetFromArgBase()));
     i++;
 
     // argument 2: argc
-    unsigned argc = sig.args().length();
+    unsigned argc = fi.sig().args().length();
     if (i->kind() == ABIArg::GPR)
         masm.mov(ImmWord(argc), i->gpr());
     else
         masm.store32(Imm32(argc), Address(masm.getStackPointer(), i->offsetFromArgBase()));
     i++;
 
     // argument 3: argv
     Address argv(masm.getStackPointer(), argOffset);
@@ -501,17 +499,17 @@ wasm::GenerateInterpExit(MacroAssembler&
         masm.computeEffectiveAddress(argv, scratch);
         masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
     }
     i++;
     MOZ_ASSERT(i.done());
 
     // Make the call, test whether it succeeded, and extract the return value.
     AssertStackAlignment(masm, ABIStackAlignment);
-    switch (sig.ret()) {
+    switch (fi.sig().ret()) {
       case ExprType::Void:
         masm.call(SymbolicAddress::CallImport_Void);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         break;
       case ExprType::I32:
         masm.call(SymbolicAddress::CallImport_I32);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         masm.load32(argv, ReturnReg);
@@ -566,29 +564,27 @@ wasm::GenerateInterpExit(MacroAssembler&
 static const unsigned SavedTlsReg = sizeof(void*);
 
 // Generate a stub that is called via the internal ABI derived from the
 // signature of the import and calls into a compatible JIT function,
 // having boxed all the ABI arguments into the JIT stack frame layout.
 ProfilingOffsets
 wasm::GenerateJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLabel)
 {
-    const Sig& sig = fi.sig();
-
     masm.setFramePushed(0);
 
     // JIT calls use the following stack layout (sp grows to the left):
     //   | retaddr | descriptor | callee | argc | this | arg1..N |
     // After the JIT frame, the global register (if present) is saved since the
     // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs,
     // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing
     // the return address.
     static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
     unsigned sizeOfRetAddr = sizeof(void*);
-    unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + sig.args().length()) * sizeof(Value);
+    unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + fi.sig().args().length()) * sizeof(Value);
     unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + SavedTlsReg;
     unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
                               sizeOfRetAddr;
 
     ProfilingOffsets offsets;
     GenerateExitPrologue(masm, jitFramePushed, ExitReason::ImportJit, &offsets);
 
     // 1. Descriptor
@@ -609,28 +605,28 @@ wasm::GenerateJitExit(MacroAssembler& ma
     masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
     argOffset += sizeof(size_t);
 
     // 2.3. Load callee executable entry point
     masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
     masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);
 
     // 3. Argc
-    unsigned argc = sig.args().length();
+    unsigned argc = fi.sig().args().length();
     masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset));
     argOffset += sizeof(size_t);
 
     // 4. |this| value
     masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
     argOffset += sizeof(Value);
 
     // 5. Fill the arguments
     unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(Frame);
-    FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(true));
-    argOffset += sig.args().length() * sizeof(Value);
+    FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(true));
+    argOffset += fi.sig().args().length() * sizeof(Value);
     MOZ_ASSERT(argOffset == jitFrameBytes);
 
     // 6. Jit code will clobber all registers, even non-volatiles. WasmTlsReg
     //    must be kept live for the benefit of the epilogue, so push it on the
     //    stack so that it can be restored before the epilogue.
     static_assert(SavedTlsReg == sizeof(void*), "stack frame accounting");
     masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), jitFrameBytes));
 
@@ -699,17 +695,17 @@ wasm::GenerateJitExit(MacroAssembler& ma
     static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
     masm.reserveStack(sizeOfRetAddr);
     unsigned nativeFramePushed = masm.framePushed();
     AssertStackAlignment(masm, ABIStackAlignment);
 
     masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel);
 
     Label oolConvert;
-    switch (sig.ret()) {
+    switch (fi.sig().ret()) {
       case ExprType::Void:
         break;
       case ExprType::I32:
         masm.convertValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg, &oolConvert,
                                  /* -0 check */ false);
         break;
       case ExprType::I64:
         // We don't expect int64 to be returned from Ion yet, because of a
@@ -769,17 +765,17 @@ wasm::GenerateJitExit(MacroAssembler& ma
             masm.computeEffectiveAddress(argv, scratch);
             masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
         }
         i++;
         MOZ_ASSERT(i.done());
 
         // Call coercion function
         AssertStackAlignment(masm, ABIStackAlignment);
-        switch (sig.ret()) {
+        switch (fi.sig().ret()) {
           case ExprType::I32:
             masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
             masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
             masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnReg);
             break;
           case ExprType::F64:
             masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
             masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
--- a/js/src/wasm/WasmStubs.h
+++ b/js/src/wasm/WasmStubs.h
@@ -22,21 +22,21 @@
 #include "wasm/WasmTypes.h"
 
 namespace js {
 
 namespace jit { class MacroAssembler; class Label; }
 
 namespace wasm {
 
-class FuncDefExport;
+class FuncExport;
 class FuncImport;
 
 extern Offsets
-GenerateEntry(jit::MacroAssembler& masm, const FuncDefExport& func);
+GenerateEntry(jit::MacroAssembler& masm, const FuncExport& fe);
 
 extern ProfilingOffsets
 GenerateInterpExit(jit::MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex,
                    jit::Label* throwLabel);
 
 extern ProfilingOffsets
 GenerateJitExit(jit::MacroAssembler& masm, const FuncImport& fi, jit::Label* throwLabel);
 
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -772,17 +772,17 @@ struct TrapOffset
 // adds the function index of the callee.
 
 class CallSiteDesc
 {
     uint32_t lineOrBytecode_ : 30;
     uint32_t kind_ : 2;
   public:
     enum Kind {
-        FuncDef,   // pc-relative call to a specific function
+        Func,      // pc-relative call to a specific function
         Dynamic,   // dynamic callee called via register
         Symbolic,  // call to a single symbolic callee
         TrapExit   // call to a trap exit
     };
     CallSiteDesc() {}
     explicit CallSiteDesc(Kind kind)
       : lineOrBytecode_(0), kind_(kind)
     {
@@ -828,31 +828,31 @@ WASM_DECLARE_POD_VECTOR(CallSite, CallSi
 class CallSiteAndTarget : public CallSite
 {
     uint32_t index_;
 
   public:
     explicit CallSiteAndTarget(CallSite cs)
       : CallSite(cs)
     {
-        MOZ_ASSERT(cs.kind() != FuncDef);
+        MOZ_ASSERT(cs.kind() != Func);
     }
-    CallSiteAndTarget(CallSite cs, uint32_t funcDefIndex)
-      : CallSite(cs), index_(funcDefIndex)
+    CallSiteAndTarget(CallSite cs, uint32_t funcIndex)
+      : CallSite(cs), index_(funcIndex)
     {
-        MOZ_ASSERT(cs.kind() == FuncDef);
+        MOZ_ASSERT(cs.kind() == Func);
     }
     CallSiteAndTarget(CallSite cs, Trap trap)
       : CallSite(cs),
         index_(uint32_t(trap))
     {
         MOZ_ASSERT(cs.kind() == TrapExit);
     }
 
-    uint32_t funcDefIndex() const { MOZ_ASSERT(kind() == FuncDef); return index_; }
+    uint32_t funcIndex() const { MOZ_ASSERT(kind() == Func); return index_; }
     Trap trap() const { MOZ_ASSERT(kind() == TrapExit); return Trap(index_); }
 };
 
 typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
 
 // A wasm::SymbolicAddress represents a pointer to a well-known function or
 // object that is embedded in wasm code. Since wasm code is serialized and
 // later deserialized into a different address space, symbolic addresses must be
@@ -1094,17 +1094,17 @@ struct ExternalTableElem
 // CalleeDesc describes how to compile one of the variety of asm.js/wasm calls.
 // This is hoisted into WasmTypes.h for sharing between Ion and Baseline.
 
 class CalleeDesc
 {
   public:
     enum Which {
         // Calls a function defined in the same module by its index.
-        Definition,
+        Func,
 
         // Calls the import identified by the offset of its FuncImportTls in
         // thread-local data.
         Import,
 
         // Calls a WebAssembly table (heterogeneous, index must be bounds
         // checked, callee instance depends on TableDesc).
         WasmTable,
@@ -1118,34 +1118,34 @@ class CalleeDesc
         // Like Builtin, but automatically passes Instance* as first argument.
         BuiltinInstanceMethod
     };
 
   private:
     Which which_;
     union U {
         U() {}
-        uint32_t funcDefIndex_;
+        uint32_t funcIndex_;
         struct {
             uint32_t globalDataOffset_;
         } import;
         struct {
             uint32_t globalDataOffset_;
             bool external_;
             SigIdDesc sigId_;
         } table;
         SymbolicAddress builtin_;
     } u;
 
   public:
     CalleeDesc() {}
-    static CalleeDesc definition(uint32_t funcDefIndex) {
+    static CalleeDesc function(uint32_t funcIndex) {
         CalleeDesc c;
-        c.which_ = Definition;
-        c.u.funcDefIndex_ = funcDefIndex;
+        c.which_ = Func;
+        c.u.funcIndex_ = funcIndex;
         return c;
     }
     static CalleeDesc import(uint32_t globalDataOffset) {
         CalleeDesc c;
         c.which_ = Import;
         c.u.import.globalDataOffset_ = globalDataOffset;
         return c;
     }
@@ -1173,19 +1173,19 @@ class CalleeDesc
         CalleeDesc c;
         c.which_ = BuiltinInstanceMethod;
         c.u.builtin_ = callee;
         return c;
     }
     Which which() const {
         return which_;
     }
-    uint32_t funcDefIndex() const {
-        MOZ_ASSERT(which_ == Definition);
-        return u.funcDefIndex_;
+    uint32_t funcIndex() const {
+        MOZ_ASSERT(which_ == Func);
+        return u.funcIndex_;
     }
     uint32_t importGlobalDataOffset() const {
         MOZ_ASSERT(which_ == Import);
         return u.import.globalDataOffset_;
     }
     bool isTable() const {
         return which_ == WasmTable || which_ == AsmJSTable;
     }
@@ -1384,12 +1384,24 @@ static const unsigned MaxImports        
 static const unsigned MaxExports                  =       64 * 1024;
 static const unsigned MaxTables                   =        4 * 1024;
 static const unsigned MaxTableElems               =     1024 * 1024;
 static const unsigned MaxDataSegments             =       64 * 1024;
 static const unsigned MaxElemSegments             =       64 * 1024;
 static const unsigned MaxArgsPerFunc              =        4 * 1024;
 static const unsigned MaxBrTableElems             = 4 * 1024 * 1024;
 
+// To be able to assign function indices during compilation while the number of
+// imports is still unknown, asm.js sets a maximum number of imports so it can
+// immediately start handing out function indices starting at the maximum + 1.
+// this means that there is a "hole" between the last import and the first
+// definition, but that's fine.
+
+static const unsigned AsmJSMaxImports             = 4 * 1024;
+static const unsigned AsmJSFirstDefFuncIndex      = AsmJSMaxImports + 1;
+
+static_assert(AsmJSMaxImports <= MaxImports, "conservative");
+static_assert(AsmJSFirstDefFuncIndex < MaxFuncs, "conservative");
+
 } // namespace wasm
 } // namespace js
 
 #endif // wasm_types_h