Bug 1292724 - Baldr: remove call_import, add imports to function index space (r=bbouvier)
authorLuke Wagner <luke@mozilla.com>
Tue, 06 Sep 2016 16:47:37 -0500
changeset 312919 94befb88aee28153afdd35ef8a46761cd1a07bc6
parent 312918 dd39ceedb7f08460bc4cb8fe95bde63b7b3f388b
child 312920 3972080f18990d102f1c74319dc5b2c89fabb541
push id30665
push usercbook@mozilla.com
push dateWed, 07 Sep 2016 15:20:43 +0000
treeherdermozilla-central@95acb9299faf [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1292724
milestone51.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1292724 - Baldr: remove call_import, add imports to function index space (r=bbouvier) MozReview-Commit-ID: BOqBB6FW2lV
js/src/asmjs/AsmJS.cpp
js/src/asmjs/WasmBaselineCompile.cpp
js/src/asmjs/WasmCode.cpp
js/src/asmjs/WasmCode.h
js/src/asmjs/WasmCompile.cpp
js/src/asmjs/WasmFrameIterator.cpp
js/src/asmjs/WasmGenerator.cpp
js/src/asmjs/WasmGenerator.h
js/src/asmjs/WasmInstance.cpp
js/src/asmjs/WasmInstance.h
js/src/asmjs/WasmIonCompile.cpp
js/src/asmjs/WasmIonCompile.h
js/src/asmjs/WasmJS.cpp
js/src/asmjs/WasmJS.h
js/src/asmjs/WasmModule.cpp
js/src/asmjs/WasmModule.h
js/src/asmjs/WasmStubs.cpp
js/src/asmjs/WasmStubs.h
js/src/asmjs/WasmTextToBinary.cpp
js/src/asmjs/WasmTypes.h
js/src/jit-test/tests/wasm/basic.js
js/src/jit-test/tests/wasm/import-export.js
js/src/jit-test/tests/wasm/import-gc.js
js/src/jit-test/tests/wasm/jsapi.js
js/src/jit-test/tests/wasm/profiling.js
js/src/jit-test/tests/wasm/spec/func_ptrs.wast
js/src/jit-test/tests/wasm/spec/start.wast
js/src/jit-test/tests/wasm/start.js
js/src/jit-test/tests/wasm/table-gc.js
js/src/jit-test/tests/wasm/tables.js
js/src/jit/MacroAssembler-inl.h
js/src/jit/MacroAssembler.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/js.msg
js/src/jsfun.h
--- a/js/src/asmjs/AsmJS.cpp
+++ b/js/src/asmjs/AsmJS.cpp
@@ -332,18 +332,20 @@ struct js::AsmJSMetadata : Metadata, Asm
         return scriptSource.get()->mutedErrors();
     }
     const char16_t* displayURL() const override {
         return scriptSource.get()->hasDisplayURL() ? scriptSource.get()->displayURL() : nullptr;
     }
     ScriptSource* maybeScriptSource() const override {
         return scriptSource.get();
     }
-    bool getFuncName(JSContext* cx, const Bytes*, uint32_t funcIndex, TwoByteName* name) const override {
-        const char* p = asmJSFuncNames[funcIndex].get();
+    bool getFuncDefName(JSContext* cx, const Bytes*, uint32_t funcDefIndex,
+                        TwoByteName* name) const override
+    {
+        const char* p = asmJSFuncNames[funcDefIndex].get();
         UTF8Chars utf8(p, strlen(p));
 
         size_t twoByteLength;
         UniqueTwoByteChars chars(JS::UTF8CharsToNewTwoByteCharsZ(cx, utf8, &twoByteLength).get());
         if (!chars)
             return false;
 
         if (!name->growByUninitialized(twoByteLength))
@@ -1770,17 +1772,17 @@ class MOZ_STACK_CLASS ModuleValidator
 
         CompileArgs args;
         if (!args.initFromContext(cx_, Move(scriptedCaller)))
             return false;
 
         auto genData = MakeUnique<ModuleGeneratorData>(ModuleKind::AsmJS);
         if (!genData ||
             !genData->sigs.resize(MaxSigs) ||
-            !genData->funcSigs.resize(MaxFuncs) ||
+            !genData->funcDefSigs.resize(MaxFuncs) ||
             !genData->funcImports.resize(MaxImports) ||
             !genData->tables.resize(MaxTables) ||
             !genData->asmJSSigToTableIndex.resize(MaxSigs))
         {
             return false;
         }
 
         genData->minMemoryLength = RoundUpToNextValidAsmJSHeapLength(0);
@@ -2063,34 +2065,34 @@ class MOZ_STACK_CLASS ModuleValidator
         if (maybeField)
             fieldChars = StringToNewUTF8CharsZ(cx_, *maybeField);
         else
             fieldChars = DuplicateString("");
         if (!fieldChars)
             return false;
 
         // Declare which function is exported which gives us an index into the
-        // module FuncExportVector.
-        if (!mg_.addFuncExport(Move(fieldChars), func.index()))
+        // module FuncDefExportVector.
+        if (!mg_.addFuncDefExport(Move(fieldChars), mg_.numFuncImports() + func.index()))
             return false;
 
         // The exported function might have already been exported in which case
         // the index will refer into the range of AsmJSExports.
         return asmJSMetadata_->asmJSExports.emplaceBack(func.index(),
                                                         func.srcBegin() - asmJSMetadata_->srcStart,
                                                         func.srcEnd() - asmJSMetadata_->srcStart);
     }
     bool addFunction(PropertyName* name, uint32_t firstUse, Sig&& sig, Func** func) {
         uint32_t sigIndex;
         if (!declareSig(Move(sig), &sigIndex))
             return false;
         uint32_t funcIndex = numFunctions();
         if (funcIndex >= MaxFuncs)
             return failCurrentOffset("too many functions");
-        mg_.initFuncSig(funcIndex, sigIndex);
+        mg_.initFuncDefSig(funcIndex, sigIndex);
         Global* global = validationLifo_.new_<Global>(Global::Function);
         if (!global)
             return false;
         global->u.funcIndex_ = funcIndex;
         if (!globalMap_.putNew(name, global))
             return false;
         *func = validationLifo_.new_<Func>(name, firstUse, funcIndex);
         return *func && functions_.append(*func);
@@ -4654,17 +4656,17 @@ CheckFunctionSignature(ModuleValidator& 
 {
     ModuleValidator::Func* existing = m.lookupFunction(name);
     if (!existing) {
         if (!CheckModuleLevelName(m, usepn, name))
             return false;
         return m.addFunction(name, usepn->pn_pos.begin, Move(sig), func);
     }
 
-    if (!CheckSignatureAgainstExisting(m, usepn, sig, m.mg().funcSig(existing->index())))
+    if (!CheckSignatureAgainstExisting(m, usepn, sig, m.mg().funcDefSig(existing->index())))
         return false;
 
     *func = existing;
     return true;
 }
 
 static bool
 CheckIsArgType(FunctionValidator& f, ParseNode* argNode, Type type)
@@ -7098,17 +7100,17 @@ CheckFuncPtrTable(ModuleValidator& m, Pa
         if (!elem->isKind(PNK_NAME))
             return m.fail(elem, "function-pointer table's elements must be names of functions");
 
         PropertyName* funcName = elem->name();
         const ModuleValidator::Func* func = m.lookupFunction(funcName);
         if (!func)
             return m.fail(elem, "function-pointer table's elements must be names of functions");
 
-        const Sig& funcSig = m.mg().funcSig(func->index());
+        const Sig& funcSig = m.mg().funcDefSig(func->index());
         if (sig) {
             if (*sig != funcSig)
                 return m.fail(elem, "all functions in table must have same signature");
         } else {
             sig = &funcSig;
         }
 
         if (!elemFuncIndices.append(func->index()))
@@ -8783,17 +8785,17 @@ js::AsmJSModuleToString(JSContext* cx, H
 }
 
 JSString*
 js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun)
 {
     MOZ_ASSERT(IsAsmJSFunction(fun));
 
     const AsmJSMetadata& metadata = ExportedFunctionToInstance(fun).metadata().asAsmJS();
-    const AsmJSExport& f = metadata.lookupAsmJSExport(ExportedFunctionToIndex(fun));
+    const AsmJSExport& f = metadata.lookupAsmJSExport(ExportedFunctionToDefinitionIndex(fun));
 
     uint32_t begin = metadata.srcStart + f.startOffsetInModule();
     uint32_t end = metadata.srcStart + f.endOffsetInModule();
 
     ScriptSource* source = metadata.scriptSource.get();
     StringBuffer out(cx);
 
     if (!out.append("function "))
--- a/js/src/asmjs/WasmBaselineCompile.cpp
+++ b/js/src/asmjs/WasmBaselineCompile.cpp
@@ -1782,18 +1782,18 @@ class BaseCompiler
 
     //////////////////////////////////////////////////////////////////////
     //
     // Function prologue and epilogue.
 
     void beginFunction() {
         JitSpew(JitSpew_Codegen, "# Emitting wasm baseline code");
 
-        wasm::GenerateFunctionPrologue(masm, localSize_, mg_.funcSigs[func_.index()]->id,
-                                       &compileResults_.offsets());
+        SigIdDesc sigId = mg_.funcDefSigs[func_.defIndex()]->id;
+        wasm::GenerateFunctionPrologue(masm, localSize_, sigId, &compileResults_.offsets());
 
         MOZ_ASSERT(masm.framePushed() == uint32_t(localSize_));
 
         maxFramePushed_ = localSize_;
 
         // We won't know until after we've generated code how big the
         // frame will be (we may need arbitrary spill slots and
         // outgoing param slots) so branch to code emitted after the
@@ -2117,20 +2117,20 @@ class BaseCompiler
             }
             break;
           }
           default:
             MOZ_CRASH("Function argument type");
         }
     }
 
-    void callDirect(uint32_t calleeIndex, const FunctionCall& call)
+    void callDefinition(uint32_t funcDefIndex, const FunctionCall& call)
     {
         CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Relative);
-        masm.call(desc, calleeIndex);
+        masm.call(desc, funcDefIndex);
     }
 
     void callSymbolic(wasm::SymbolicAddress callee, const FunctionCall& call) {
         CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Register);
         masm.call(callee);
     }
 
     // Precondition: sync()
@@ -3367,22 +3367,24 @@ class BaseCompiler
     bool emitBrTable();
     MOZ_MUST_USE
     bool emitReturn();
     MOZ_MUST_USE
     bool emitCallArgs(const ValTypeVector& args, FunctionCall& baselineCall);
     MOZ_MUST_USE
     bool skipCall(const ValTypeVector& args, ExprType maybeReturnType = ExprType::Limit);
     MOZ_MUST_USE
+    bool emitCallImportCommon(uint32_t lineOrBytecode, uint32_t funcImportIndex);
+    MOZ_MUST_USE
     bool emitCall(uint32_t callOffset);
     MOZ_MUST_USE
+    bool emitCallImport(uint32_t callOffset);
+    MOZ_MUST_USE
     bool emitCallIndirect(uint32_t callOffset);
     MOZ_MUST_USE
-    bool emitCallImport(uint32_t callOffset);
-    MOZ_MUST_USE
     bool emitUnaryMathBuiltinCall(uint32_t callOffset, SymbolicAddress callee, ValType operandType);
     MOZ_MUST_USE
     bool emitBinaryMathBuiltinCall(uint32_t callOffset, SymbolicAddress callee, ValType operandType);
     MOZ_MUST_USE
     bool emitGetLocal();
     MOZ_MUST_USE
     bool emitSetLocal();
     MOZ_MUST_USE
@@ -5253,26 +5255,71 @@ BaseCompiler::pushReturned(const Functio
 // lightweight sync.
 //
 // Even some of the pushing may be unnecessary if the registers
 // will be consumed by the call, because then what we want is
 // parallel assignment to the argument registers or onto the stack
 // for outgoing arguments.  A sync() is just simpler.
 
 bool
+BaseCompiler::emitCallImportCommon(uint32_t lineOrBytecode, uint32_t funcImportIndex)
+{
+    const FuncImportGenDesc& funcImport = mg_.funcImports[funcImportIndex];
+    const Sig& sig = *funcImport.sig;
+
+    if (deadCode_)
+        return skipCall(sig.args(), sig.ret());
+
+    sync();
+
+    uint32_t numArgs = sig.args().length();
+    size_t stackSpace = stackConsumed(numArgs);
+
+    FunctionCall baselineCall(lineOrBytecode);
+    beginCall(baselineCall, EscapesSandbox(true), IsBuiltinCall(false));
+
+    if (!emitCallArgs(sig.args(), baselineCall))
+        return false;
+
+    if (!iter_.readCallReturn(sig.ret()))
+        return false;
+
+    callImport(funcImport.globalDataOffset, baselineCall);
+
+    endCall(baselineCall);
+
+    // TODO / OPTIMIZE: It would be better to merge this freeStack()
+    // into the one in endCall, if we can.
+
+    popValueStackBy(numArgs);
+    masm.freeStack(stackSpace);
+
+    pushReturned(baselineCall, sig.ret());
+
+    return true;
+}
+
+bool
 BaseCompiler::emitCall(uint32_t callOffset)
 {
     uint32_t lineOrBytecode = readCallSiteLineOrBytecode(callOffset);
 
     uint32_t calleeIndex;
     uint32_t arity;
     if (!iter_.readCall(&calleeIndex, &arity))
         return false;
 
-    const Sig& sig = *mg_.funcSigs[calleeIndex];
+    // For asm.js and old-format wasm code, imports are not part of the function
+    // index space so in these cases firstFuncDefIndex is fixed to 0, even if
+    // there are function imports.
+    if (calleeIndex < mg_.firstFuncDefIndex)
+        return emitCallImportCommon(lineOrBytecode, calleeIndex);
+
+    uint32_t funcDefIndex = calleeIndex - mg_.firstFuncDefIndex;
+    const Sig& sig = *mg_.funcDefSigs[funcDefIndex];
 
     if (deadCode_)
         return skipCall(sig.args(), sig.ret());
 
     sync();
 
     uint32_t numArgs = sig.args().length();
     size_t stackSpace = stackConsumed(numArgs);
@@ -5281,32 +5328,47 @@ BaseCompiler::emitCall(uint32_t callOffs
     beginCall(baselineCall, EscapesSandbox(false), IsBuiltinCall(false));
 
     if (!emitCallArgs(sig.args(), baselineCall))
         return false;
 
     if (!iter_.readCallReturn(sig.ret()))
         return false;
 
-    callDirect(calleeIndex, baselineCall);
+    callDefinition(funcDefIndex, baselineCall);
 
     endCall(baselineCall);
 
     // TODO / OPTIMIZE: It would be better to merge this freeStack()
     // into the one in endCall, if we can.
 
     popValueStackBy(numArgs);
     masm.freeStack(stackSpace);
 
     pushReturned(baselineCall, sig.ret());
 
     return true;
 }
 
 bool
+BaseCompiler::emitCallImport(uint32_t callOffset)
+{
+    MOZ_ASSERT(!mg_.firstFuncDefIndex);
+
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode(callOffset);
+
+    uint32_t funcImportIndex;
+    uint32_t arity;
+    if (!iter_.readCallImport(&funcImportIndex, &arity))
+        return false;
+
+    return emitCallImportCommon(lineOrBytecode, funcImportIndex);
+}
+
+bool
 BaseCompiler::emitCallIndirect(uint32_t callOffset)
 {
     uint32_t lineOrBytecode = readCallSiteLineOrBytecode(callOffset);
 
     uint32_t sigIndex;
     uint32_t arity;
     if (!iter_.readCallIndirect(&sigIndex, &arity))
         return false;
@@ -5352,61 +5414,16 @@ BaseCompiler::emitCallIndirect(uint32_t 
     masm.freeStack(stackSpace);
 
     pushReturned(baselineCall, sig.ret());
 
     return true;
 }
 
 bool
-BaseCompiler::emitCallImport(uint32_t callOffset)
-{
-    uint32_t lineOrBytecode = readCallSiteLineOrBytecode(callOffset);
-
-    uint32_t funcImportIndex;
-    uint32_t arity;
-    if (!iter_.readCallImport(&funcImportIndex, &arity))
-        return false;
-
-    const FuncImportGenDesc& funcImport = mg_.funcImports[funcImportIndex];
-    const Sig& sig = *funcImport.sig;
-
-    if (deadCode_)
-        return skipCall(sig.args(), sig.ret());
-
-    sync();
-
-    uint32_t numArgs = sig.args().length();
-    size_t stackSpace = stackConsumed(numArgs);
-
-    FunctionCall baselineCall(lineOrBytecode);
-    beginCall(baselineCall, EscapesSandbox(true), IsBuiltinCall(false));
-
-    if (!emitCallArgs(sig.args(), baselineCall))
-        return false;
-
-    if (!iter_.readCallReturn(sig.ret()))
-        return false;
-
-    callImport(funcImport.globalDataOffset, baselineCall);
-
-    endCall(baselineCall);
-
-    // TODO / OPTIMIZE: It would be better to merge this freeStack()
-    // into the one in endCall, if we can.
-
-    popValueStackBy(numArgs);
-    masm.freeStack(stackSpace);
-
-    pushReturned(baselineCall, sig.ret());
-
-    return true;
-}
-
-bool
 BaseCompiler::emitUnaryMathBuiltinCall(uint32_t callOffset, SymbolicAddress callee,
                                        ValType operandType)
 {
     if (deadCode_) {
         switch (operandType) {
           case ValType::F64:
             return skipCall(SigD_, ExprType::F64);
           case ValType::F32:
--- a/js/src/asmjs/WasmCode.cpp
+++ b/js/src/asmjs/WasmCode.cpp
@@ -147,17 +147,17 @@ SendCodeRangesToProfiler(JSContext* cx, 
         if (!codeRange.isFunction())
             continue;
 
         uintptr_t start = uintptr_t(cs.base() + codeRange.begin());
         uintptr_t end = uintptr_t(cs.base() + codeRange.end());
         uintptr_t size = end - start;
 
         TwoByteName name(cx);
-        if (!metadata.getFuncName(cx, &bytecode, codeRange.funcIndex(), &name))
+        if (!metadata.getFuncDefName(cx, &bytecode, codeRange.funcDefIndex(), &name))
             return false;
 
         UniqueChars chars(
             (char*)JS::LossyTwoByteCharsToNewLatin1CharsZ(cx, name.begin(), name.length()).get());
         if (!chars)
             return false;
 
         // Avoid "unused" warnings
@@ -254,40 +254,40 @@ CodeSegment::~CodeSegment()
     MOZ_ASSERT(wasmCodeAllocations > 0);
     wasmCodeAllocations--;
 
     MOZ_ASSERT(totalLength() > 0);
     DeallocateExecutableMemory(bytes_, totalLength(), gc::SystemPageSize());
 }
 
 size_t
-FuncExport::serializedSize() const
+FuncDefExport::serializedSize() const
 {
     return sig_.serializedSize() +
            sizeof(pod);
 }
 
 uint8_t*
-FuncExport::serialize(uint8_t* cursor) const
+FuncDefExport::serialize(uint8_t* cursor) const
 {
     cursor = sig_.serialize(cursor);
     cursor = WriteBytes(cursor, &pod, sizeof(pod));
     return cursor;
 }
 
 const uint8_t*
-FuncExport::deserialize(const uint8_t* cursor)
+FuncDefExport::deserialize(const uint8_t* cursor)
 {
     (cursor = sig_.deserialize(cursor)) &&
     (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
     return cursor;
 }
 
 size_t
-FuncExport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+FuncDefExport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return sig_.sizeOfExcludingThis(mallocSizeOf);
 }
 
 size_t
 FuncImport::serializedSize() const
 {
     return sig_.serializedSize() +
@@ -315,52 +315,52 @@ FuncImport::sizeOfExcludingThis(MallocSi
 {
     return sig_.sizeOfExcludingThis(mallocSizeOf);
 }
 
 CodeRange::CodeRange(Kind kind, Offsets offsets)
   : begin_(offsets.begin),
     profilingReturn_(0),
     end_(offsets.end),
-    funcIndex_(0),
+    funcDefIndex_(0),
     funcLineOrBytecode_(0),
     funcBeginToTableEntry_(0),
     funcBeginToTableProfilingJump_(0),
     funcBeginToNonProfilingEntry_(0),
     funcProfilingJumpToProfilingReturn_(0),
     funcProfilingEpilogueToProfilingReturn_(0),
     kind_(kind)
 {
     MOZ_ASSERT(begin_ <= end_);
     MOZ_ASSERT(kind_ == Entry || kind_ == Inline || kind_ == CallThunk);
 }
 
 CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
   : begin_(offsets.begin),
     profilingReturn_(offsets.profilingReturn),
     end_(offsets.end),
-    funcIndex_(0),
+    funcDefIndex_(0),
     funcLineOrBytecode_(0),
     funcBeginToTableEntry_(0),
     funcBeginToTableProfilingJump_(0),
     funcBeginToNonProfilingEntry_(0),
     funcProfilingJumpToProfilingReturn_(0),
     funcProfilingEpilogueToProfilingReturn_(0),
     kind_(kind)
 {
     MOZ_ASSERT(begin_ < profilingReturn_);
     MOZ_ASSERT(profilingReturn_ < end_);
     MOZ_ASSERT(kind_ == ImportJitExit || kind_ == ImportInterpExit);
 }
 
-CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
+CodeRange::CodeRange(uint32_t funcDefIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
   : begin_(offsets.begin),
     profilingReturn_(offsets.profilingReturn),
     end_(offsets.end),
-    funcIndex_(funcIndex),
+    funcDefIndex_(funcDefIndex),
     funcLineOrBytecode_(funcLineOrBytecode),
     funcBeginToTableEntry_(offsets.tableEntry - begin_),
     funcBeginToTableProfilingJump_(offsets.tableProfilingJump - begin_),
     funcBeginToNonProfilingEntry_(offsets.nonProfilingEntry - begin_),
     funcProfilingJumpToProfilingReturn_(profilingReturn_ - offsets.profilingJump),
     funcProfilingEpilogueToProfilingReturn_(profilingReturn_ - offsets.profilingEpilogue),
     kind_(Function)
 {
@@ -419,17 +419,17 @@ CacheableChars::sizeOfExcludingThis(Mall
     return mallocSizeOf(get());
 }
 
 size_t
 Metadata::serializedSize() const
 {
     return sizeof(pod()) +
            SerializedVectorSize(funcImports) +
-           SerializedVectorSize(funcExports) +
+           SerializedVectorSize(funcDefExports) +
            SerializedVectorSize(sigIds) +
            SerializedPodVectorSize(globals) +
            SerializedPodVectorSize(tables) +
            SerializedPodVectorSize(memoryAccesses) +
            SerializedPodVectorSize(boundsChecks) +
            SerializedPodVectorSize(codeRanges) +
            SerializedPodVectorSize(callSites) +
            SerializedPodVectorSize(callThunks) +
@@ -438,17 +438,17 @@ Metadata::serializedSize() const
            assumptions.serializedSize();
 }
 
 uint8_t*
 Metadata::serialize(uint8_t* cursor) const
 {
     cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
     cursor = SerializeVector(cursor, funcImports);
-    cursor = SerializeVector(cursor, funcExports);
+    cursor = SerializeVector(cursor, funcDefExports);
     cursor = SerializeVector(cursor, sigIds);
     cursor = SerializePodVector(cursor, globals);
     cursor = SerializePodVector(cursor, tables);
     cursor = SerializePodVector(cursor, memoryAccesses);
     cursor = SerializePodVector(cursor, boundsChecks);
     cursor = SerializePodVector(cursor, codeRanges);
     cursor = SerializePodVector(cursor, callSites);
     cursor = SerializePodVector(cursor, callThunks);
@@ -458,17 +458,17 @@ Metadata::serialize(uint8_t* cursor) con
     return cursor;
 }
 
 /* static */ const uint8_t*
 Metadata::deserialize(const uint8_t* cursor)
 {
     (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
     (cursor = DeserializeVector(cursor, &funcImports)) &&
-    (cursor = DeserializeVector(cursor, &funcExports)) &&
+    (cursor = DeserializeVector(cursor, &funcDefExports)) &&
     (cursor = DeserializeVector(cursor, &sigIds)) &&
     (cursor = DeserializePodVector(cursor, &globals)) &&
     (cursor = DeserializePodVector(cursor, &tables)) &&
     (cursor = DeserializePodVector(cursor, &memoryAccesses)) &&
     (cursor = DeserializePodVector(cursor, &boundsChecks)) &&
     (cursor = DeserializePodVector(cursor, &codeRanges)) &&
     (cursor = DeserializePodVector(cursor, &callSites)) &&
     (cursor = DeserializePodVector(cursor, &callThunks)) &&
@@ -477,57 +477,60 @@ Metadata::deserialize(const uint8_t* cur
     (cursor = assumptions.deserialize(cursor));
     return cursor;
 }
 
 size_t
 Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return SizeOfVectorExcludingThis(funcImports, mallocSizeOf) +
-           SizeOfVectorExcludingThis(funcExports, mallocSizeOf) +
+           SizeOfVectorExcludingThis(funcDefExports, mallocSizeOf) +
            SizeOfVectorExcludingThis(sigIds, mallocSizeOf) +
            globals.sizeOfExcludingThis(mallocSizeOf) +
            tables.sizeOfExcludingThis(mallocSizeOf) +
            memoryAccesses.sizeOfExcludingThis(mallocSizeOf) +
            boundsChecks.sizeOfExcludingThis(mallocSizeOf) +
            codeRanges.sizeOfExcludingThis(mallocSizeOf) +
            callSites.sizeOfExcludingThis(mallocSizeOf) +
            callThunks.sizeOfExcludingThis(mallocSizeOf) +
            funcNames.sizeOfExcludingThis(mallocSizeOf) +
            filename.sizeOfExcludingThis(mallocSizeOf) +
            assumptions.sizeOfExcludingThis(mallocSizeOf);
 }
 
-struct ProjectFuncIndex
+struct ProjectIndex
 {
-    const FuncExportVector& funcExports;
-    explicit ProjectFuncIndex(const FuncExportVector& funcExports) : funcExports(funcExports) {}
+    const FuncDefExportVector& funcDefExports;
+
+    explicit ProjectIndex(const FuncDefExportVector& funcDefExports)
+      : funcDefExports(funcDefExports)
+    {}
     uint32_t operator[](size_t index) const {
-        return funcExports[index].funcIndex();
+        return funcDefExports[index].funcDefIndex();
     }
 };
 
-const FuncExport&
-Metadata::lookupFuncExport(uint32_t funcIndex) const
+const FuncDefExport&
+Metadata::lookupFuncDefExport(uint32_t funcDefIndex) const
 {
     size_t match;
-    if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(), funcIndex, &match))
+    if (!BinarySearch(ProjectIndex(funcDefExports), 0, funcDefExports.length(), funcDefIndex, &match))
         MOZ_CRASH("missing function export");
 
-    return funcExports[match];
+    return funcDefExports[match];
 }
 
 bool
-Metadata::getFuncName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcIndex,
-                      TwoByteName* name) const
+Metadata::getFuncDefName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcDefIndex,
+                         TwoByteName* name) const
 {
-    if (funcIndex < funcNames.length()) {
+    if (funcDefIndex < funcNames.length()) {
         MOZ_ASSERT(maybeBytecode, "NameInBytecode requires preserved bytecode");
 
-        const NameInBytecode& n = funcNames[funcIndex];
+        const NameInBytecode& n = funcNames[funcDefIndex];
         MOZ_ASSERT(n.offset + n.length < maybeBytecode->length());
 
         if (n.length == 0)
             goto invalid;
 
         UTF8Chars utf8((const char*)maybeBytecode->begin() + n.offset, n.length);
 
         // This code could be optimized by having JS::UTF8CharsToNewTwoByteCharsZ
@@ -543,17 +546,17 @@ Metadata::getFuncName(JSContext* cx, con
         PodCopy(name->begin(), chars.get(), twoByteLength);
         return true;
     }
 
   invalid:
 
     // For names that are out of range or invalid, synthesize a name.
 
-    UniqueChars chars(JS_smprintf("wasm-function[%u]", funcIndex));
+    UniqueChars chars(JS_smprintf("wasm-function[%u]", funcDefIndex));
     if (!chars) {
         ReportOutOfMemory(cx);
         return false;
     }
 
     if (!name->growByUninitialized(strlen(chars.get())))
         return false;
 
@@ -630,27 +633,27 @@ Code::lookupMemoryAccess(void* pc) const
     if (!BinarySearch(MemoryAccessOffset(metadata_->memoryAccesses), lowerBound, upperBound, target, &match))
         return nullptr;
 
     return &metadata_->memoryAccesses[match];
 }
 #endif
 
 bool
-Code::getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const
+Code::getFuncDefName(JSContext* cx, uint32_t funcDefIndex, TwoByteName* name) const
 {
     const Bytes* maybeBytecode = maybeBytecode_ ? &maybeBytecode_.get()->bytes : nullptr;
-    return metadata_->getFuncName(cx, maybeBytecode, funcIndex, name);
+    return metadata_->getFuncDefName(cx, maybeBytecode, funcDefIndex, name);
 }
 
 JSAtom*
-Code::getFuncAtom(JSContext* cx, uint32_t funcIndex) const
+Code::getFuncDefAtom(JSContext* cx, uint32_t funcDefIndex) const
 {
     TwoByteName name(cx);
-    if (!getFuncName(cx, funcIndex, &name))
+    if (!getFuncDefName(cx, funcDefIndex, &name))
         return nullptr;
 
     return AtomizeChars(cx, name.begin(), name.length());
 }
 
 const char experimentalWarning[] =
     "Temporary\n"
     ".--.      .--.   ____       .-'''-. ,---.    ,---.\n"
@@ -762,35 +765,35 @@ Code::ensureProfilingState(JSContext* cx
     // do it now since, once we start sampling, we'll be in a signal-handing
     // context where we cannot malloc.
     if (newProfilingEnabled) {
         for (const CodeRange& codeRange : metadata_->codeRanges) {
             if (!codeRange.isFunction())
                 continue;
 
             TwoByteName name(cx);
-            if (!getFuncName(cx, codeRange.funcIndex(), &name))
+            if (!getFuncDefName(cx, codeRange.funcDefIndex(), &name))
                 return false;
             if (!name.append('\0'))
                 return false;
 
             UniqueChars label(JS_smprintf("%hs (%s:%u)",
                                           name.begin(),
                                           metadata_->filename.get(),
                                           codeRange.funcLineOrBytecode()));
             if (!label) {
                 ReportOutOfMemory(cx);
                 return false;
             }
 
-            if (codeRange.funcIndex() >= funcLabels_.length()) {
-                if (!funcLabels_.resize(codeRange.funcIndex() + 1))
+            if (codeRange.funcDefIndex() >= funcLabels_.length()) {
+                if (!funcLabels_.resize(codeRange.funcDefIndex() + 1))
                     return false;
             }
-            funcLabels_[codeRange.funcIndex()] = Move(label);
+            funcLabels_[codeRange.funcDefIndex()] = Move(label);
         }
     } else {
         funcLabels_.clear();
     }
 
     // Only mutate the code after the fallible operations are complete to avoid
     // the need to rollback.
     profilingEnabled_ = newProfilingEnabled;
--- a/js/src/asmjs/WasmCode.h
+++ b/js/src/asmjs/WasmCode.h
@@ -114,65 +114,66 @@ struct ShareableBytes : ShareableBase<Sh
     const uint8_t* end() const { return bytes.end(); }
     size_t length() const { return bytes.length(); }
     bool append(const uint8_t *p, uint32_t ct) { return bytes.append(p, ct); }
 };
 
 typedef RefPtr<ShareableBytes> MutableBytes;
 typedef RefPtr<const ShareableBytes> SharedBytes;
 
-// A FuncExport represents a single function inside a wasm Module that has been
-// exported one or more times. A FuncExport represents an internal entry point
-// that can be called via function-index by Instance::callExport(). To allow
-// O(log(n)) lookup of a FuncExport by function-index, the FuncExportVector
-// is stored sorted by function index.
+// A FuncDefExport represents a single function definition inside a wasm Module
+// that has been exported one or more times. A FuncDefExport represents an
+// internal entry point that can be called via function definition index by
+// Instance::callExport(). To allow O(log(n)) lookup of a FuncDefExport by
+// function definition index, the FuncDefExportVector is stored sorted by
+// function definition index.
 
-class FuncExport
+class FuncDefExport
 {
     Sig sig_;
     MOZ_INIT_OUTSIDE_CTOR struct CacheablePod {
-        uint32_t funcIndex_;
+        uint32_t funcDefIndex_;
         uint32_t codeRangeIndex_;
         uint32_t entryOffset_;
     } pod;
 
   public:
-    FuncExport() = default;
-    explicit FuncExport(Sig&& sig,
-                        uint32_t funcIndex,
-                        uint32_t codeRangeIndex)
+    FuncDefExport() = default;
+    explicit FuncDefExport(Sig&& sig,
+                           uint32_t funcDefIndex,
+                           uint32_t codeRangeIndex)
       : sig_(Move(sig))
     {
-        pod.funcIndex_ = funcIndex;
+        pod.funcDefIndex_ = funcDefIndex;
         pod.codeRangeIndex_ = codeRangeIndex;
         pod.entryOffset_ = UINT32_MAX;
     }
     void initEntryOffset(uint32_t entryOffset) {
         MOZ_ASSERT(pod.entryOffset_ == UINT32_MAX);
         pod.entryOffset_ = entryOffset;
     }
 
     const Sig& sig() const {
         return sig_;
     }
-    uint32_t funcIndex() const {
-        return pod.funcIndex_;
+    uint32_t funcDefIndex() const {
+        return pod.funcDefIndex_;
     }
     uint32_t codeRangeIndex() const {
         return pod.codeRangeIndex_;
     }
     uint32_t entryOffset() const {
         MOZ_ASSERT(pod.entryOffset_ != UINT32_MAX);
         return pod.entryOffset_;
     }
 
-    WASM_DECLARE_SERIALIZABLE(FuncExport)
+    WASM_DECLARE_SERIALIZABLE(FuncDefExport)
 };
 
-typedef Vector<FuncExport, 0, SystemAllocPolicy> FuncExportVector;
+typedef Vector<FuncDefExport, 0, SystemAllocPolicy> FuncDefExportVector;
 
 // An FuncImport contains the runtime metadata needed to implement a call to an
 // imported function. Each function import has two call stubs: an optimized path
 // into JIT code and a slow path into the generic C++ js::Invoke and these
 // offsets of these stubs are stored so that function-import callsites can be
 // dynamically patched at runtime.
 
 class FuncImport
@@ -233,30 +234,30 @@ class CodeRange
   public:
     enum Kind { Function, Entry, ImportJitExit, ImportInterpExit, Inline, CallThunk };
 
   private:
     // All fields are treated as cacheable POD:
     uint32_t begin_;
     uint32_t profilingReturn_;
     uint32_t end_;
-    uint32_t funcIndex_;
+    uint32_t funcDefIndex_;
     uint32_t funcLineOrBytecode_;
     uint8_t funcBeginToTableEntry_;
     uint8_t funcBeginToTableProfilingJump_;
     uint8_t funcBeginToNonProfilingEntry_;
     uint8_t funcProfilingJumpToProfilingReturn_;
     uint8_t funcProfilingEpilogueToProfilingReturn_;
     Kind kind_ : 8;
 
   public:
     CodeRange() = default;
     CodeRange(Kind kind, Offsets offsets);
     CodeRange(Kind kind, ProfilingOffsets offsets);
-    CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
+    CodeRange(uint32_t funcDefIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
 
     // All CodeRanges have a begin and end.
 
     uint32_t begin() const {
         return begin_;
     }
     uint32_t end() const {
         return end_;
@@ -308,19 +309,19 @@ class CodeRange
     uint32_t funcProfilingJump() const {
         MOZ_ASSERT(isFunction());
         return profilingReturn_ - funcProfilingJumpToProfilingReturn_;
     }
     uint32_t funcProfilingEpilogue() const {
         MOZ_ASSERT(isFunction());
         return profilingReturn_ - funcProfilingEpilogueToProfilingReturn_;
     }
-    uint32_t funcIndex() const {
+    uint32_t funcDefIndex() const {
         MOZ_ASSERT(isFunction());
-        return funcIndex_;
+        return funcDefIndex_;
     }
     uint32_t funcLineOrBytecode() const {
         MOZ_ASSERT(isFunction());
         return funcLineOrBytecode_;
     }
 
     // A sorted array of CodeRanges can be looked up via BinarySearch and PC.
 
@@ -342,21 +343,21 @@ WASM_DECLARE_POD_VECTOR(CodeRange, CodeR
 // patched at runtime when profiling is toggled. Thunks are emitted to connect
 // callsites that are too far away from callees to fit in a single call
 // instruction's relative offset.
 
 struct CallThunk
 {
     uint32_t offset;
     union {
-        uint32_t funcIndex;
+        uint32_t funcDefIndex;
         uint32_t codeRangeIndex;
     } u;
 
-    CallThunk(uint32_t offset, uint32_t funcIndex) : offset(offset) { u.funcIndex = funcIndex; }
+    CallThunk(uint32_t offset, uint32_t funcDefIndex) : offset(offset) { u.funcDefIndex = funcDefIndex; }
     CallThunk() = default;
 };
 
 WASM_DECLARE_POD_VECTOR(CallThunk, CallThunkVector)
 
 // CacheableChars is used to cacheably store UniqueChars.
 
 struct CacheableChars : UniqueChars
@@ -444,33 +445,33 @@ struct Metadata : ShareableBase<Metadata
 {
     explicit Metadata(ModuleKind kind = ModuleKind::Wasm) : MetadataCacheablePod(kind) {}
     virtual ~Metadata() {}
 
     MetadataCacheablePod& pod() { return *this; }
     const MetadataCacheablePod& pod() const { return *this; }
 
     FuncImportVector      funcImports;
-    FuncExportVector      funcExports;
+    FuncDefExportVector   funcDefExports;
     SigWithIdVector       sigIds;
     GlobalDescVector      globals;
     TableDescVector       tables;
     MemoryAccessVector    memoryAccesses;
     BoundsCheckVector     boundsChecks;
     CodeRangeVector       codeRanges;
     CallSiteVector        callSites;
     CallThunkVector       callThunks;
     NameInBytecodeVector  funcNames;
     CacheableChars        filename;
     Assumptions           assumptions;
 
     bool usesMemory() const { return UsesMemory(memoryUsage); }
     bool hasSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
 
-    const FuncExport& lookupFuncExport(uint32_t funcIndex) const;
+    const FuncDefExport& lookupFuncDefExport(uint32_t funcDefIndex) const;
 
     // AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is
     // encapsulated within AsmJS.cpp, but the additional virtual functions allow
     // asm.js to override wasm behavior in the handful of cases that can't be
     // easily encapsulated by AsmJS.cpp.
 
     bool isAsmJS() const {
         return kind == ModuleKind::AsmJS;
@@ -483,18 +484,18 @@ struct Metadata : ShareableBase<Metadata
         return false;
     }
     virtual const char16_t* displayURL() const {
         return nullptr;
     }
     virtual ScriptSource* maybeScriptSource() const {
         return nullptr;
     }
-    virtual bool getFuncName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcIndex,
-                             TwoByteName* name) const;
+    virtual bool getFuncDefName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcDefIndex,
+                                TwoByteName* name) const;
 
     WASM_DECLARE_SERIALIZABLE_VIRTUAL(Metadata);
 };
 
 typedef RefPtr<Metadata> MutableMetadata;
 typedef RefPtr<const Metadata> SharedMetadata;
 
 // Code objects own executable code and the metadata that describes it. At the
@@ -525,35 +526,35 @@ class Code
     const CodeRange* lookupRange(void* pc) const;
 #ifdef WASM_HUGE_MEMORY
     const MemoryAccess* lookupMemoryAccess(void* pc) const;
 #endif
 
     // Return the name associated with a given function index, or generate one
     // if none was given by the module.
 
-    bool getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const;
-    JSAtom* getFuncAtom(JSContext* cx, uint32_t funcIndex) const;
+    bool getFuncDefName(JSContext* cx, uint32_t funcDefIndex, TwoByteName* name) const;
+    JSAtom* getFuncDefAtom(JSContext* cx, uint32_t funcDefIndex) const;
 
     // If the source bytecode was saved when this Code was constructed, this
     // method will render the binary as text. Otherwise, a diagnostic string
     // will be returned.
 
     JSString* createText(JSContext* cx);
     bool getLineOffsets(size_t lineno, Vector<uint32_t>& offsets) const;
 
     // Each Code has a profiling mode that is updated to match the runtime's
     // profiling mode when there are no other activations of the code live on
     // the stack. Once in profiling mode, ProfilingFrameIterator can be used to
     // asynchronously walk the stack. Otherwise, the ProfilingFrameIterator will
     // skip any activations of this code.
 
     MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled);
     bool profilingEnabled() const { return profilingEnabled_; }
-    const char* profilingLabel(uint32_t funcIndex) const { return funcLabels_[funcIndex].get(); }
+    const char* profilingLabel(uint32_t funcDefIndex) const { return funcLabels_[funcDefIndex].get(); }
 
     // about:memory reporting:
 
     void addSizeOfMisc(MallocSizeOf mallocSizeOf,
                        Metadata::SeenSet* seenMetadata,
                        ShareableBytes::SeenSet* seenBytes,
                        size_t* code,
                        size_t* data) const;
--- a/js/src/asmjs/WasmCompile.cpp
+++ b/js/src/asmjs/WasmCompile.cpp
@@ -51,30 +51,38 @@ struct ValidatingPolicy : ExprIterPolicy
 
 typedef ExprIter<ValidatingPolicy> ValidatingExprIter;
 
 class FunctionDecoder
 {
     const ModuleGenerator& mg_;
     const ValTypeVector& locals_;
     ValidatingExprIter iter_;
+    bool newFormat_;
 
   public:
-    FunctionDecoder(const ModuleGenerator& mg, const ValTypeVector& locals, Decoder& d)
-      : mg_(mg), locals_(locals), iter_(d)
+    FunctionDecoder(const ModuleGenerator& mg, const ValTypeVector& locals, Decoder& d, bool newFormat)
+      : mg_(mg), locals_(locals), iter_(d), newFormat_(newFormat)
     {}
     const ModuleGenerator& mg() const { return mg_; }
     ValidatingExprIter& iter() { return iter_; }
     const ValTypeVector& locals() const { return locals_; }
+    bool newFormat() const { return newFormat_; }
 
     bool checkHasMemory() {
         if (!mg().usesMemory())
             return iter().fail("can't touch memory without memory");
         return true;
     }
+
+    bool checkIsOldFormat() {
+        if (newFormat_)
+            return iter().fail("opcode no longer in new format");
+        return true;
+    }
 };
 
 } // end anonymous namespace
 
 static bool
 CheckValType(Decoder& d, ValType type)
 {
     switch (type) {
@@ -118,22 +126,31 @@ DecodeCallReturn(FunctionDecoder& f, con
 static bool
 DecodeCall(FunctionDecoder& f)
 {
     uint32_t calleeIndex;
     uint32_t arity;
     if (!f.iter().readCall(&calleeIndex, &arity))
         return false;
 
-    if (calleeIndex >= f.mg().numFuncSigs())
-        return f.iter().fail("callee index out of range");
+    const Sig* sig;
+    if (f.newFormat()) {
+        if (calleeIndex >= f.mg().numFuncs())
+            return f.iter().fail("callee index out of range");
 
-    const Sig& sig = f.mg().funcSig(calleeIndex);
-    return DecodeCallArgs(f, arity, sig) &&
-           DecodeCallReturn(f, sig);
+        sig = &f.mg().funcSig(calleeIndex);
+    } else {
+        if (calleeIndex >= f.mg().numFuncDefs())
+            return f.iter().fail("callee index out of range");
+
+        sig = &f.mg().funcDefSig(calleeIndex);
+    }
+
+    return DecodeCallArgs(f, arity, *sig) &&
+           DecodeCallReturn(f, *sig);
 }
 
 static bool
 DecodeCallIndirect(FunctionDecoder& f)
 {
     if (!f.mg().numTables())
         return f.iter().fail("can't call_indirect without a table");
 
@@ -199,17 +216,18 @@ DecodeExpr(FunctionDecoder& f)
     switch (expr) {
       case Expr::Nop:
         return f.iter().readNullary(ExprType::Void);
       case Expr::Call:
         return DecodeCall(f);
       case Expr::CallIndirect:
         return DecodeCallIndirect(f);
       case Expr::CallImport:
-        return DecodeCallImport(f);
+        return f.checkIsOldFormat() &&
+               DecodeCallImport(f);
       case Expr::I32Const:
         return f.iter().readI32Const(nullptr);
       case Expr::I64Const:
         return f.iter().readI64Const(nullptr);
       case Expr::F32Const:
         return f.iter().readF32Const(nullptr);
       case Expr::F64Const:
         return f.iter().readF64Const(nullptr);
@@ -568,28 +586,28 @@ static bool
 DecodeFunctionSection(Decoder& d, ModuleGeneratorData* init)
 {
     uint32_t sectionStart, sectionSize;
     if (!d.startSection(FunctionSectionId, &sectionStart, &sectionSize))
         return Fail(d, "failed to start section");
     if (sectionStart == Decoder::NotStarted)
         return true;
 
-    uint32_t numDecls;
-    if (!d.readVarU32(&numDecls))
-        return Fail(d, "expected number of declarations");
+    uint32_t numDefs;
+    if (!d.readVarU32(&numDefs))
+        return Fail(d, "expected number of function definitions");
 
-    if (numDecls > MaxFuncs)
+    if (numDefs > MaxFuncs)
         return Fail(d, "too many functions");
 
-    if (!init->funcSigs.resize(numDecls))
+    if (!init->funcDefSigs.resize(numDefs))
         return false;
 
-    for (uint32_t i = 0; i < numDecls; i++) {
-        if (!DecodeSignatureIndex(d, *init, &init->funcSigs[i]))
+    for (uint32_t i = 0; i < numDefs; i++) {
+        if (!DecodeSignatureIndex(d, *init, &init->funcDefSigs[i]))
             return false;
     }
 
     if (!d.finishSection(sectionStart, sectionSize))
         return Fail(d, "decls section byte size mismatch");
 
     return true;
 }
@@ -871,24 +889,24 @@ DecodeTableSection(Decoder& d, bool newF
 
         if (table.initial > MaxTableElems)
             return Fail(d, "too many table elements");
 
         if (!oldElems->resize(table.initial))
             return false;
 
         for (uint32_t i = 0; i < table.initial; i++) {
-            uint32_t funcIndex;
-            if (!d.readVarU32(&funcIndex))
+            uint32_t funcDefIndex;
+            if (!d.readVarU32(&funcDefIndex))
                 return Fail(d, "expected table element");
 
-            if (funcIndex >= init->funcSigs.length())
+            if (funcDefIndex >= init->funcDefSigs.length())
                 return Fail(d, "table element out of range");
 
-            (*oldElems)[i] = funcIndex;
+            (*oldElems)[i] = init->funcImports.length() + funcDefIndex;
         }
 
         MOZ_ASSERT(init->tables.empty());
         if (!init->tables.append(table))
             return false;
     }
 
     if (!d.finishSection(sectionStart, sectionSize))
@@ -1073,48 +1091,48 @@ DecodeExportName(Decoder& d, CStringSet*
 
     return Move(exportName);
 }
 
 static bool
 DecodeExport(Decoder& d, bool newFormat, ModuleGenerator& mg, CStringSet* dupSet)
 {
     if (!newFormat) {
-        uint32_t funcIndex;
-        if (!d.readVarU32(&funcIndex))
+        uint32_t funcDefIndex;
+        if (!d.readVarU32(&funcDefIndex))
             return Fail(d, "expected export internal index");
 
-        if (funcIndex >= mg.numFuncSigs())
+        if (funcDefIndex >= mg.numFuncDefs())
             return Fail(d, "exported function index out of bounds");
 
         UniqueChars fieldName = DecodeExportName(d, dupSet);
         if (!fieldName)
             return false;
 
-        return mg.addFuncExport(Move(fieldName), funcIndex);
+        return mg.addFuncDefExport(Move(fieldName), mg.numFuncImports() + funcDefIndex);
     }
 
     UniqueChars fieldName = DecodeExportName(d, dupSet);
     if (!fieldName)
         return false;
 
     uint32_t exportKind;
     if (!d.readVarU32(&exportKind))
         return Fail(d, "failed to read export kind");
 
     switch (DefinitionKind(exportKind)) {
       case DefinitionKind::Function: {
         uint32_t funcIndex;
         if (!d.readVarU32(&funcIndex))
             return Fail(d, "expected export internal index");
 
-        if (funcIndex >= mg.numFuncSigs())
+        if (funcIndex >= mg.numFuncs())
             return Fail(d, "exported function index out of bounds");
 
-        return mg.addFuncExport(Move(fieldName), funcIndex);
+        return mg.addFuncDefExport(Move(fieldName), funcIndex);
       }
       case DefinitionKind::Table: {
         uint32_t tableIndex;
         if (!d.readVarU32(&tableIndex))
             return Fail(d, "expected table index");
 
         if (tableIndex >= mg.tables().length())
             return Fail(d, "exported table index out of bounds");
@@ -1185,46 +1203,46 @@ DecodeExportSection(Decoder& d, bool new
 
     if (!d.finishSection(sectionStart, sectionSize))
         return Fail(d, "export section byte size mismatch");
 
     return true;
 }
 
 static bool
-DecodeFunctionBody(Decoder& d, ModuleGenerator& mg, uint32_t funcIndex)
+DecodeFunctionBody(Decoder& d, bool newFormat, ModuleGenerator& mg, uint32_t funcDefIndex)
 {
     uint32_t bodySize;
     if (!d.readVarU32(&bodySize))
         return Fail(d, "expected number of function body bytes");
 
     if (d.bytesRemain() < bodySize)
         return Fail(d, "function body length too big");
 
     const uint8_t* bodyBegin = d.currentPosition();
     const uint8_t* bodyEnd = bodyBegin + bodySize;
 
     FunctionGenerator fg;
     if (!mg.startFuncDef(d.currentOffset(), &fg))
         return false;
 
     ValTypeVector locals;
-    const Sig& sig = mg.funcSig(funcIndex);
+    const Sig& sig = mg.funcDefSig(funcDefIndex);
     if (!locals.appendAll(sig.args()))
         return false;
 
     if (!DecodeLocalEntries(d, &locals))
         return Fail(d, "failed decoding local entries");
 
     for (ValType type : locals) {
         if (!CheckValType(d, type))
             return false;
     }
 
-    FunctionDecoder f(mg, locals, d);
+    FunctionDecoder f(mg, locals, d, newFormat);
 
     if (!f.iter().readFunctionStart())
         return false;
 
     while (d.currentPosition() < bodyEnd) {
         if (!DecodeExpr(f))
             return false;
     }
@@ -1235,77 +1253,77 @@ DecodeFunctionBody(Decoder& d, ModuleGen
     if (d.currentPosition() != bodyEnd)
         return Fail(d, "function body length mismatch");
 
     if (!fg.bytes().resize(bodySize))
         return false;
 
     memcpy(fg.bytes().begin(), bodyBegin, bodySize);
 
-    return mg.finishFuncDef(funcIndex, &fg);
+    return mg.finishFuncDef(funcDefIndex, &fg);
 }
 
 static bool
 DecodeStartSection(Decoder& d, ModuleGenerator& mg)
 {
     uint32_t sectionStart, sectionSize;
     if (!d.startSection(StartSectionId, &sectionStart, &sectionSize))
         return Fail(d, "failed to start section");
     if (sectionStart == Decoder::NotStarted)
         return true;
 
-    uint32_t startFuncIndex;
-    if (!d.readVarU32(&startFuncIndex))
+    uint32_t funcIndex;
+    if (!d.readVarU32(&funcIndex))
         return Fail(d, "failed to read start func index");
 
-    if (startFuncIndex >= mg.numFuncSigs())
+    if (funcIndex >= mg.numFuncs())
         return Fail(d, "unknown start function");
 
-    const Sig& sig = mg.funcSig(startFuncIndex);
+    const Sig& sig = mg.funcSig(funcIndex);
     if (sig.ret() != ExprType::Void)
         return Fail(d, "start function must not return anything");
 
     if (sig.args().length())
         return Fail(d, "start function must be nullary");
 
-    if (!mg.setStartFunction(startFuncIndex))
+    if (!mg.setStartFunction(funcIndex))
         return false;
 
     if (!d.finishSection(sectionStart, sectionSize))
         return Fail(d, "data section byte size mismatch");
 
     return true;
 }
 
 static bool
-DecodeCodeSection(Decoder& d, ModuleGenerator& mg)
+DecodeCodeSection(Decoder& d, bool newFormat, ModuleGenerator& mg)
 {
     if (!mg.startFuncDefs())
         return false;
 
     uint32_t sectionStart, sectionSize;
     if (!d.startSection(CodeSectionId, &sectionStart, &sectionSize))
         return Fail(d, "failed to start section");
 
     if (sectionStart == Decoder::NotStarted) {
-        if (mg.numFuncSigs() != 0)
+        if (mg.numFuncDefs() != 0)
             return Fail(d, "expected function bodies");
 
         return mg.finishFuncDefs();
     }
 
-    uint32_t numFuncBodies;
-    if (!d.readVarU32(&numFuncBodies))
+    uint32_t numFuncDefs;
+    if (!d.readVarU32(&numFuncDefs))
         return Fail(d, "expected function body count");
 
-    if (numFuncBodies != mg.numFuncSigs())
+    if (numFuncDefs != mg.numFuncDefs())
         return Fail(d, "function body count does not match function signature count");
 
-    for (uint32_t funcIndex = 0; funcIndex < numFuncBodies; funcIndex++) {
-        if (!DecodeFunctionBody(d, mg, funcIndex))
+    for (uint32_t i = 0; i < numFuncDefs; i++) {
+        if (!DecodeFunctionBody(d, newFormat, mg, i))
             return false;
     }
 
     if (!d.finishSection(sectionStart, sectionSize))
         return Fail(d, "function section byte size mismatch");
 
     return mg.finishFuncDefs();
 }
@@ -1361,17 +1379,17 @@ DecodeElemSection(Decoder& d, bool newFo
 
         Uint32Vector elemFuncIndices;
         if (!elemFuncIndices.resize(numElems))
             return false;
 
         for (uint32_t i = 0; i < numElems; i++) {
             if (!d.readVarU32(&elemFuncIndices[i]))
                 return Fail(d, "failed to read element function index");
-            if (elemFuncIndices[i] >= mg.numFuncSigs())
+            if (elemFuncIndices[i] >= mg.numFuncs())
                 return Fail(d, "table element out of range");
         }
 
         if (!mg.addElemSegment(offset, Move(elemFuncIndices)))
             return false;
     }
 
     if (!d.finishSection(sectionStart, sectionSize))
@@ -1572,20 +1590,20 @@ wasm::Compile(const ShareableBytes& byte
         return nullptr;
 
     if (!DecodeExportSection(d, newFormat, memoryExported, mg))
         return nullptr;
 
     if (!DecodeStartSection(d, mg))
         return nullptr;
 
-    if (!DecodeCodeSection(d, mg))
+    if (!DecodeElemSection(d, newFormat, Move(oldElems), mg))
         return nullptr;
 
-    if (!DecodeElemSection(d, newFormat, Move(oldElems), mg))
+    if (!DecodeCodeSection(d, newFormat, mg))
         return nullptr;
 
     if (!DecodeDataSection(d, newFormat, mg))
         return nullptr;
 
     if (!DecodeNameSection(d, mg))
         return nullptr;
 
--- a/js/src/asmjs/WasmFrameIterator.cpp
+++ b/js/src/asmjs/WasmFrameIterator.cpp
@@ -185,17 +185,17 @@ FrameIterator::functionDisplayAtom() con
             return cx->names().empty;
         }
 
         return atom;
     }
 
     MOZ_ASSERT(codeRange_);
 
-    JSAtom* atom = code_->getFuncAtom(cx, codeRange_->funcIndex());
+    JSAtom* atom = code_->getFuncDefAtom(cx, codeRange_->funcDefIndex());
     if (!atom) {
         cx->clearPendingException();
         return cx->names().empty;
     }
 
     return atom;
 }
 
@@ -774,17 +774,17 @@ ProfilingFrameIterator::label() const
         return importJitDescription;
       case ExitReason::ImportInterp:
         return importInterpDescription;
       case ExitReason::Native:
         return nativeDescription;
     }
 
     switch (codeRange_->kind()) {
-      case CodeRange::Function:         return code_->profilingLabel(codeRange_->funcIndex());
+      case CodeRange::Function:         return code_->profilingLabel(codeRange_->funcDefIndex());
       case CodeRange::Entry:            return "entry trampoline (in asm.js)";
       case CodeRange::ImportJitExit:    return importJitDescription;
       case CodeRange::ImportInterpExit: return importInterpDescription;
       case CodeRange::Inline:           return "inline stub (in asm.js)";
       case CodeRange::CallThunk:        return "call thunk (in asm.js)";
     }
 
     MOZ_CRASH("bad code range kind");
--- a/js/src/asmjs/WasmGenerator.cpp
+++ b/js/src/asmjs/WasmGenerator.cpp
@@ -49,17 +49,17 @@ ModuleGenerator::ModuleGenerator(ImportV
     numTables_(0),
     lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
     masmAlloc_(&lifo_),
     masm_(MacroAssembler::AsmJSToken(), masmAlloc_),
     lastPatchedCallsite_(0),
     startOfUnpatchedBranches_(0),
     parallel_(false),
     outstanding_(0),
-    activeFunc_(nullptr),
+    activeFuncDef_(nullptr),
     startedFuncDefs_(false),
     finishedFuncDefs_(false)
 {
     MOZ_ASSERT(IsCompilingAsmJS());
 }
 
 ModuleGenerator::~ModuleGenerator()
 {
@@ -98,17 +98,17 @@ ModuleGenerator::~ModuleGenerator()
 
 bool
 ModuleGenerator::init(UniqueModuleGeneratorData shared, const CompileArgs& args,
                       Metadata* maybeAsmJSMetadata)
 {
     shared_ = Move(shared);
     alwaysBaseline_ = args.alwaysBaseline;
 
-    if (!exportedFuncs_.init())
+    if (!exportedFuncDefs_.init())
         return false;
 
     linkData_.globalDataLength = AlignBytes(InitialGlobalDataBytes, sizeof(void*));;
 
     // asm.js passes in an AsmJSMetadata subclass to use instead.
     if (maybeAsmJSMetadata) {
         metadata_ = maybeAsmJSMetadata;
         MOZ_ASSERT(isAsmJS());
@@ -132,16 +132,19 @@ ModuleGenerator::init(UniqueModuleGenera
     // and will be initialized in a linear order via init* functions as the
     // module is generated. For wasm, the Vectors are correctly-sized and
     // already initialized.
 
     if (!isAsmJS()) {
         numSigs_ = shared_->sigs.length();
         numTables_ = shared_->tables.length();
 
+        if (args.assumptions.newFormat)
+            shared_->firstFuncDefIndex = shared_->funcImports.length();
+
         for (FuncImportGenDesc& funcImport : shared_->funcImports) {
             MOZ_ASSERT(!funcImport.globalDataOffset);
             funcImport.globalDataOffset = linkData_.globalDataLength;
             linkData_.globalDataLength += sizeof(FuncImportTls);
             if (!addFuncImport(*funcImport.sig, funcImport.globalDataOffset))
                 return false;
         }
 
@@ -218,27 +221,41 @@ ModuleGenerator::finishOutstandingTask()
     }
 
     return finishTask(task);
 }
 
 static const uint32_t BadCodeRange = UINT32_MAX;
 
 bool
-ModuleGenerator::funcIsDefined(uint32_t funcIndex) const
+ModuleGenerator::funcIndexIsDef(uint32_t funcIndex) const
+{
+    MOZ_ASSERT(funcIndex < numFuncImports() + numFuncDefs());
+    return funcIndex >= numFuncImports();
+}
+
+uint32_t
+ModuleGenerator::funcIndexToDef(uint32_t funcIndex) const
 {
-    return funcIndex < funcIndexToCodeRange_.length() &&
-           funcIndexToCodeRange_[funcIndex] != BadCodeRange;
+    MOZ_ASSERT(funcIndexIsDef(funcIndex));
+    return funcIndex - numFuncImports();
+}
+
+bool
+ModuleGenerator::funcIsDefined(uint32_t funcDefIndex) const
+{
+    return funcDefIndex < funcDefIndexToCodeRange_.length() &&
+           funcDefIndexToCodeRange_[funcDefIndex] != BadCodeRange;
 }
 
 const CodeRange&
-ModuleGenerator::funcCodeRange(uint32_t funcIndex) const
+ModuleGenerator::funcDefCodeRange(uint32_t funcDefIndex) const
 {
-    MOZ_ASSERT(funcIsDefined(funcIndex));
-    const CodeRange& cr = metadata_->codeRanges[funcIndexToCodeRange_[funcIndex]];
+    MOZ_ASSERT(funcIsDefined(funcDefIndex));
+    const CodeRange& cr = metadata_->codeRanges[funcDefIndexToCodeRange_[funcDefIndex]];
     MOZ_ASSERT(cr.isFunction());
     return cr;
 }
 
 static uint32_t
 JumpRange()
 {
     return Min(JitOptions.jumpThreshold, JumpImmediateRange);
@@ -255,46 +272,46 @@ ModuleGenerator::convertOutOfRangeBranch
     // create one thunk for each callee since there is often high reuse.
 
     OffsetMap alreadyThunked;
     if (!alreadyThunked.init())
         return false;
 
     for (; lastPatchedCallsite_ < masm_.callSites().length(); lastPatchedCallsite_++) {
         const CallSiteAndTarget& cs = masm_.callSites()[lastPatchedCallsite_];
-        if (!cs.isInternal())
+        if (!cs.isDefinition())
             continue;
 
         uint32_t callerOffset = cs.returnAddressOffset();
         MOZ_RELEASE_ASSERT(callerOffset < INT32_MAX);
 
-        if (funcIsDefined(cs.targetIndex())) {
-            uint32_t calleeOffset = funcCodeRange(cs.targetIndex()).funcNonProfilingEntry();
+        if (funcIsDefined(cs.funcDefIndex())) {
+            uint32_t calleeOffset = funcDefCodeRange(cs.funcDefIndex()).funcNonProfilingEntry();
             MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX);
 
             if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) {
                 masm_.patchCall(callerOffset, calleeOffset);
                 continue;
             }
         }
 
-        OffsetMap::AddPtr p = alreadyThunked.lookupForAdd(cs.targetIndex());
+        OffsetMap::AddPtr p = alreadyThunked.lookupForAdd(cs.funcDefIndex());
         if (!p) {
             Offsets offsets;
             offsets.begin = masm_.currentOffset();
             uint32_t thunkOffset = masm_.thunkWithPatch().offset();
             if (masm_.oom())
                 return false;
             offsets.end = masm_.currentOffset();
 
             if (!metadata_->codeRanges.emplaceBack(CodeRange::CallThunk, offsets))
                 return false;
-            if (!metadata_->callThunks.emplaceBack(thunkOffset, cs.targetIndex()))
+            if (!metadata_->callThunks.emplaceBack(thunkOffset, cs.funcDefIndex()))
                 return false;
-            if (!alreadyThunked.add(p, cs.targetIndex(), offsets.begin))
+            if (!alreadyThunked.add(p, cs.funcDefIndex(), offsets.begin))
                 return false;
         }
 
         masm_.patchCall(callerOffset, p->value());
     }
 
     // Create thunks for jumps to stubs. Stubs are always generated at the end
     // so unconditionally thunk all existing jump sites.
@@ -346,100 +363,100 @@ ModuleGenerator::finishTask(IonCompileTa
 
     // Offset the recorded FuncOffsets by the offset of the function in the
     // whole module's code segment.
     uint32_t offsetInWhole = masm_.size();
     results.offsets().offsetBy(offsetInWhole);
 
     // Add the CodeRange for this function.
     uint32_t funcCodeRangeIndex = metadata_->codeRanges.length();
-    if (!metadata_->codeRanges.emplaceBack(func.index(), func.lineOrBytecode(), results.offsets()))
+    if (!metadata_->codeRanges.emplaceBack(func.defIndex(), func.lineOrBytecode(), results.offsets()))
         return false;
 
     // Maintain a mapping from function index to CodeRange index.
-    if (func.index() >= funcIndexToCodeRange_.length()) {
-        uint32_t n = func.index() - funcIndexToCodeRange_.length() + 1;
-        if (!funcIndexToCodeRange_.appendN(BadCodeRange, n))
+    if (func.defIndex() >= funcDefIndexToCodeRange_.length()) {
+        uint32_t n = func.defIndex() - funcDefIndexToCodeRange_.length() + 1;
+        if (!funcDefIndexToCodeRange_.appendN(BadCodeRange, n))
             return false;
     }
-    MOZ_ASSERT(!funcIsDefined(func.index()));
-    funcIndexToCodeRange_[func.index()] = funcCodeRangeIndex;
+    MOZ_ASSERT(!funcIsDefined(func.defIndex()));
+    funcDefIndexToCodeRange_[func.defIndex()] = funcCodeRangeIndex;
 
     // Merge the compiled results into the whole-module masm.
     mozilla::DebugOnly<size_t> sizeBefore = masm_.size();
     if (!masm_.asmMergeWith(results.masm()))
         return false;
     MOZ_ASSERT(masm_.size() == offsetInWhole + results.masm().size());
 
     freeTasks_.infallibleAppend(task);
     return true;
 }
 
 bool
-ModuleGenerator::finishFuncExports()
+ModuleGenerator::finishFuncDefExports()
 {
-    // ModuleGenerator::exportedFuncs_ is an unordered HashSet. The
-    // FuncExportVector stored in Metadata needs to be stored sorted by
+    // ModuleGenerator::exportedFuncDefs_ is an unordered HashSet. The
+    // FuncDefExportVector stored in Metadata needs to be stored sorted by
     // function index to allow O(log(n)) lookup at runtime.
 
-    Uint32Vector funcIndices;
-    if (!funcIndices.reserve(exportedFuncs_.count()))
+    Uint32Vector funcDefIndices;
+    if (!funcDefIndices.reserve(exportedFuncDefs_.count()))
         return false;
 
-    for (Uint32Set::Range r = exportedFuncs_.all(); !r.empty(); r.popFront())
-        funcIndices.infallibleAppend(r.front());
+    for (Uint32Set::Range r = exportedFuncDefs_.all(); !r.empty(); r.popFront())
+        funcDefIndices.infallibleAppend(r.front());
 
-    std::sort(funcIndices.begin(), funcIndices.end());
+    std::sort(funcDefIndices.begin(), funcDefIndices.end());
 
-    MOZ_ASSERT(metadata_->funcExports.empty());
-    if (!metadata_->funcExports.reserve(exportedFuncs_.count()))
+    MOZ_ASSERT(metadata_->funcDefExports.empty());
+    if (!metadata_->funcDefExports.reserve(exportedFuncDefs_.count()))
         return false;
 
-    for (uint32_t funcIndex : funcIndices) {
+    for (uint32_t funcDefIndex : funcDefIndices) {
         Sig sig;
-        if (!sig.clone(funcSig(funcIndex)))
+        if (!sig.clone(funcDefSig(funcDefIndex)))
             return false;
 
-        metadata_->funcExports.infallibleEmplaceBack(Move(sig),
-                                                     funcIndex,
-                                                     funcIndexToCodeRange_[funcIndex]);
+        metadata_->funcDefExports.infallibleEmplaceBack(Move(sig),
+                                                        funcDefIndex,
+                                                        funcDefIndexToCodeRange_[funcDefIndex]);
     }
 
     return true;
 }
 
 typedef Vector<Offsets, 0, SystemAllocPolicy> OffsetVector;
 typedef Vector<ProfilingOffsets, 0, SystemAllocPolicy> ProfilingOffsetVector;
 
 bool
 ModuleGenerator::finishCodegen()
 {
     uint32_t offsetInWhole = masm_.size();
 
-    uint32_t numFuncExports = metadata_->funcExports.length();
-    MOZ_ASSERT(numFuncExports == exportedFuncs_.count());
+    uint32_t numFuncDefExports = metadata_->funcDefExports.length();
+    MOZ_ASSERT(numFuncDefExports == exportedFuncDefs_.count());
 
     // Generate stubs in a separate MacroAssembler since, otherwise, for modules
     // larger than the JumpImmediateRange, even local uses of Label will fail
     // due to the large absolute offsets temporarily stored by Label::bind().
 
     OffsetVector entries;
     ProfilingOffsetVector interpExits;
     ProfilingOffsetVector jitExits;
     EnumeratedArray<JumpTarget, JumpTarget::Limit, Offsets> jumpTargets;
     Offsets interruptExit;
 
     {
         TempAllocator alloc(&lifo_);
         MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc);
 
-        if (!entries.resize(numFuncExports))
+        if (!entries.resize(numFuncDefExports))
             return false;
-        for (uint32_t i = 0; i < numFuncExports; i++)
-            entries[i] = GenerateEntry(masm, metadata_->funcExports[i]);
+        for (uint32_t i = 0; i < numFuncDefExports; i++)
+            entries[i] = GenerateEntry(masm, metadata_->funcDefExports[i]);
 
         if (!interpExits.resize(numFuncImports()))
             return false;
         if (!jitExits.resize(numFuncImports()))
             return false;
         for (uint32_t i = 0; i < numFuncImports(); i++) {
             interpExits[i] = GenerateInterpExit(masm, metadata_->funcImports[i], i);
             jitExits[i] = GenerateJitExit(masm, metadata_->funcImports[i]);
@@ -452,19 +469,19 @@ ModuleGenerator::finishCodegen()
 
         if (masm.oom() || !masm_.asmMergeWith(masm))
             return false;
     }
 
     // Adjust each of the resulting Offsets (to account for being merged into
     // masm_) and then create code ranges for all the stubs.
 
-    for (uint32_t i = 0; i < numFuncExports; i++) {
+    for (uint32_t i = 0; i < numFuncDefExports; i++) {
         entries[i].offsetBy(offsetInWhole);
-        metadata_->funcExports[i].initEntryOffset(entries[i].begin);
+        metadata_->funcDefExports[i].initEntryOffset(entries[i].begin);
         if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
             return false;
     }
 
     for (uint32_t i = 0; i < numFuncImports(); i++) {
         interpExits[i].offsetBy(offsetInWhole);
         metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin);
         if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
@@ -497,19 +514,19 @@ ModuleGenerator::finishCodegen()
     // emit new jumps to JumpTargets has finished.
 
     if (!convertOutOfRangeBranchesToThunks())
         return false;
 
     // Now that all thunks have been generated, patch all the thunks.
 
     for (CallThunk& callThunk : metadata_->callThunks) {
-        uint32_t funcIndex = callThunk.u.funcIndex;
-        callThunk.u.codeRangeIndex = funcIndexToCodeRange_[funcIndex];
-        masm_.patchThunk(callThunk.offset, funcCodeRange(funcIndex).funcNonProfilingEntry());
+        uint32_t funcDefIndex = callThunk.u.funcDefIndex;
+        callThunk.u.codeRangeIndex = funcDefIndexToCodeRange_[funcDefIndex];
+        masm_.patchThunk(callThunk.offset, funcDefCodeRange(funcDefIndex).funcNonProfilingEntry());
     }
 
     for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
         for (uint32_t thunkOffset : jumpThunks_[target])
             masm_.patchThunk(thunkOffset, jumpTargets[target].begin);
     }
 
     // Code-generation is complete!
@@ -666,22 +683,22 @@ ModuleGenerator::initSig(uint32_t sigInd
 const SigWithId&
 ModuleGenerator::sig(uint32_t index) const
 {
     MOZ_ASSERT(index < numSigs_);
     return shared_->sigs[index];
 }
 
 void
-ModuleGenerator::initFuncSig(uint32_t funcIndex, uint32_t sigIndex)
+ModuleGenerator::initFuncDefSig(uint32_t funcDefIndex, uint32_t sigIndex)
 {
     MOZ_ASSERT(isAsmJS());
-    MOZ_ASSERT(!shared_->funcSigs[funcIndex]);
+    MOZ_ASSERT(!shared_->funcDefSigs[funcDefIndex]);
 
-    shared_->funcSigs[funcIndex] = &shared_->sigs[sigIndex];
+    shared_->funcDefSigs[funcDefIndex] = &shared_->sigs[sigIndex];
 }
 
 void
 ModuleGenerator::initMemoryUsage(MemoryUsage memoryUsage)
 {
     MOZ_ASSERT(isAsmJS());
     MOZ_ASSERT(shared_->memoryUsage == MemoryUsage::None);
 
@@ -693,20 +710,20 @@ ModuleGenerator::bumpMinMemoryLength(uin
 {
     MOZ_ASSERT(isAsmJS());
     MOZ_ASSERT(newMinMemoryLength >= shared_->minMemoryLength);
 
     shared_->minMemoryLength = newMinMemoryLength;
 }
 
 const SigWithId&
-ModuleGenerator::funcSig(uint32_t funcIndex) const
+ModuleGenerator::funcDefSig(uint32_t funcDefIndex) const
 {
-    MOZ_ASSERT(shared_->funcSigs[funcIndex]);
-    return *shared_->funcSigs[funcIndex];
+    MOZ_ASSERT(shared_->funcDefSigs[funcDefIndex]);
+    return *shared_->funcDefSigs[funcDefIndex];
 }
 
 bool
 ModuleGenerator::initImport(uint32_t funcImportIndex, uint32_t sigIndex)
 {
     MOZ_ASSERT(isAsmJS());
 
     uint32_t globalDataOffset;
@@ -732,27 +749,48 @@ ModuleGenerator::numFuncImports() const
 
 const FuncImportGenDesc&
 ModuleGenerator::funcImport(uint32_t funcImportIndex) const
 {
     MOZ_ASSERT(shared_->funcImports[funcImportIndex].sig);
     return shared_->funcImports[funcImportIndex];
 }
 
-bool
-ModuleGenerator::addFuncExport(UniqueChars fieldName, uint32_t funcIndex)
+uint32_t
+ModuleGenerator::numFuncs() const
+{
+    return numFuncImports() + numFuncDefs();
+}
+
+const SigWithId&
+ModuleGenerator::funcSig(uint32_t funcIndex) const
 {
-    return exports_.emplaceBack(Move(fieldName), funcIndex, DefinitionKind::Function) &&
-           exportedFuncs_.put(funcIndex);
+    MOZ_ASSERT(funcIndex < numFuncs());
+
+    if (funcIndex < numFuncImports())
+        return *funcImport(funcIndex).sig;
+
+    return funcDefSig(funcIndex - numFuncImports());
+}
+
+bool
+ModuleGenerator::addFuncDefExport(UniqueChars fieldName, uint32_t funcIndex)
+{
+    if (funcIndexIsDef(funcIndex)) {
+       if (!exportedFuncDefs_.put(funcIndexToDef(funcIndex)))
+           return false;
+    }
+
+    return exports_.emplaceBack(Move(fieldName), funcIndex, DefinitionKind::Function);
 }
 
 bool
 ModuleGenerator::addTableExport(UniqueChars fieldName)
 {
-    MOZ_ASSERT(elemSegments_.empty());
+    MOZ_ASSERT(!startedFuncDefs_);
     MOZ_ASSERT(shared_->tables.length() == 1);
     shared_->tables[0].external = true;
     return exports_.emplaceBack(Move(fieldName), DefinitionKind::Table);
 }
 
 bool
 ModuleGenerator::addMemoryExport(UniqueChars fieldName)
 {
@@ -763,26 +801,65 @@ bool
 ModuleGenerator::addGlobalExport(UniqueChars fieldName, uint32_t globalIndex)
 {
     return exports_.emplaceBack(Move(fieldName), globalIndex, DefinitionKind::Global);
 }
 
 bool
 ModuleGenerator::setStartFunction(uint32_t funcIndex)
 {
+    if (funcIndexIsDef(funcIndex)) {
+        if (!exportedFuncDefs_.put(funcIndexToDef(funcIndex)))
+            return false;
+    }
+
     metadata_->initStartFuncIndex(funcIndex);
-    return exportedFuncs_.put(funcIndex);
+    return true;
+}
+
+bool
+ModuleGenerator::addElemSegment(InitExpr offset, Uint32Vector&& elemFuncIndices)
+{
+    MOZ_ASSERT(!isAsmJS());
+    MOZ_ASSERT(!startedFuncDefs_);
+    MOZ_ASSERT(shared_->tables.length() == 1);
+
+    for (uint32_t funcIndex : elemFuncIndices) {
+        if (!funcIndexIsDef(funcIndex)) {
+            shared_->tables[0].external = true;
+            break;
+        }
+    }
+
+    return elemSegments_.emplaceBack(0, offset, Move(elemFuncIndices));
 }
 
 bool
 ModuleGenerator::startFuncDefs()
 {
     MOZ_ASSERT(!startedFuncDefs_);
     MOZ_ASSERT(!finishedFuncDefs_);
 
+    // Now that it is known whether tables are internal or external, mark the
+    // elements of any external table as exported since they may be called from
+    // outside the module.
+
+    for (ElemSegment& elems : elemSegments_) {
+        if (!shared_->tables[elems.tableIndex].external)
+            continue;
+
+        for (uint32_t funcIndex : elems.elemFuncIndices) {
+            if (!funcIndexIsDef(funcIndex))
+                continue;
+
+            if (!exportedFuncDefs_.put(funcIndexToDef(funcIndex)))
+                return false;
+        }
+    }
+
     // The wasmCompilationInProgress atomic ensures that there is only one
     // parallel compilation in progress at a time. In the special case of
     // asm.js, where the ModuleGenerator itself can be on a helper thread, this
     // avoids the possibility of deadlock since at most 1 helper thread will be
     // blocking on other helper threads and there are always >1 helper threads.
     // With wasm, this restriction could be relaxed by moving the worklist state
     // out of HelperThreadState since each independent compilation needs its own
     // worklist pair. Alternatively, the deadlock could be avoided by having the
@@ -822,41 +899,41 @@ ModuleGenerator::startFuncDefs()
     MOZ_ASSERT(!finishedFuncDefs_);
     return true;
 }
 
 bool
 ModuleGenerator::startFuncDef(uint32_t lineOrBytecode, FunctionGenerator* fg)
 {
     MOZ_ASSERT(startedFuncDefs_);
-    MOZ_ASSERT(!activeFunc_);
+    MOZ_ASSERT(!activeFuncDef_);
     MOZ_ASSERT(!finishedFuncDefs_);
 
     if (freeTasks_.empty() && !finishOutstandingTask())
         return false;
 
     IonCompileTask* task = freeTasks_.popCopy();
 
     task->reset(&fg->bytes_);
     fg->bytes_.clear();
     fg->lineOrBytecode_ = lineOrBytecode;
     fg->m_ = this;
     fg->task_ = task;
-    activeFunc_ = fg;
+    activeFuncDef_ = fg;
     return true;
 }
 
 bool
-ModuleGenerator::finishFuncDef(uint32_t funcIndex, FunctionGenerator* fg)
+ModuleGenerator::finishFuncDef(uint32_t funcDefIndex, FunctionGenerator* fg)
 {
-    MOZ_ASSERT(activeFunc_ == fg);
+    MOZ_ASSERT(activeFuncDef_ == fg);
 
     auto func = js::MakeUnique<FuncBytes>(Move(fg->bytes_),
-                                          funcIndex,
-                                          funcSig(funcIndex),
+                                          funcDefIndex,
+                                          funcDefSig(funcDefIndex),
                                           fg->lineOrBytecode_,
                                           Move(fg->callSiteLineNums_));
     if (!func)
         return false;
 
     auto mode = alwaysBaseline_ && BaselineCanCompile(fg)
                 ? IonCompileTask::CompileMode::Baseline
                 : IonCompileTask::CompileMode::Ion;
@@ -871,63 +948,62 @@ ModuleGenerator::finishFuncDef(uint32_t 
         if (!CompileFunction(fg->task_))
             return false;
         if (!finishTask(fg->task_))
             return false;
     }
 
     fg->m_ = nullptr;
     fg->task_ = nullptr;
-    activeFunc_ = nullptr;
+    activeFuncDef_ = nullptr;
     return true;
 }
 
 bool
 ModuleGenerator::finishFuncDefs()
 {
     MOZ_ASSERT(startedFuncDefs_);
-    MOZ_ASSERT(!activeFunc_);
+    MOZ_ASSERT(!activeFuncDef_);
     MOZ_ASSERT(!finishedFuncDefs_);
 
     while (outstanding_ > 0) {
         if (!finishOutstandingTask())
             return false;
     }
 
-    for (uint32_t funcIndex = 0; funcIndex < funcIndexToCodeRange_.length(); funcIndex++)
-        MOZ_ASSERT(funcIsDefined(funcIndex));
+#ifdef DEBUG
+    for (uint32_t i = 0; i < funcDefIndexToCodeRange_.length(); i++)
+        MOZ_ASSERT(funcIsDefined(i));
+#endif
+
+    // Complete element segments with the code range index of every element, now
+    // that all functions have been compiled.
+
+    for (ElemSegment& elems : elemSegments_) {
+        Uint32Vector& codeRangeIndices = elems.elemCodeRangeIndices;
+
+        MOZ_ASSERT(codeRangeIndices.empty());
+        if (!codeRangeIndices.reserve(elems.elemFuncIndices.length()))
+            return false;
+
+        for (uint32_t funcIndex : elems.elemFuncIndices) {
+            if (!funcIndexIsDef(funcIndex)) {
+                codeRangeIndices.infallibleAppend(UINT32_MAX);
+                continue;
+            }
+
+            codeRangeIndices.infallibleAppend(funcDefIndexToCodeRange_[funcIndexToDef(funcIndex)]);
+        }
+    }
 
     linkData_.functionCodeLength = masm_.size();
     finishedFuncDefs_ = true;
     return true;
 }
 
-bool
-ModuleGenerator::addElemSegment(InitExpr offset, Uint32Vector&& elemFuncIndices)
-{
-    MOZ_ASSERT(!isAsmJS());
-    MOZ_ASSERT(finishedFuncDefs_);
-    MOZ_ASSERT(shared_->tables.length() == 1);
-
-    if (shared_->tables[0].external) {
-        for (uint32_t funcIndex : elemFuncIndices) {
-            if (!exportedFuncs_.put(funcIndex))
-                return false;
-        }
-    }
-
-    Uint32Vector codeRangeIndices;
-    if (!codeRangeIndices.resize(elemFuncIndices.length()))
-        return false;
-    for (size_t i = 0; i < elemFuncIndices.length(); i++)
-        codeRangeIndices[i] = funcIndexToCodeRange_[elemFuncIndices[i]];
-
-    return elemSegments_.emplaceBack(0, offset, Move(elemFuncIndices), Move(codeRangeIndices));
-}
-
 void
 ModuleGenerator::setFuncNames(NameInBytecodeVector&& funcNames)
 {
     MOZ_ASSERT(metadata_->funcNames.empty());
     metadata_->funcNames = Move(funcNames);
 }
 
 bool
@@ -945,46 +1021,54 @@ ModuleGenerator::initSigTableLength(uint
     MOZ_ASSERT(table.initial == 0);
     table.kind = TableKind::TypedFunction;
     table.initial = length;
     table.maximum = UINT32_MAX;
     return allocateGlobalBytes(sizeof(void*), sizeof(void*), &table.globalDataOffset);
 }
 
 bool
-ModuleGenerator::initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices)
+ModuleGenerator::initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncDefIndices)
 {
     MOZ_ASSERT(isAsmJS());
     MOZ_ASSERT(finishedFuncDefs_);
 
     uint32_t tableIndex = shared_->asmJSSigToTableIndex[sigIndex];
-    MOZ_ASSERT(shared_->tables[tableIndex].initial == elemFuncIndices.length());
+    MOZ_ASSERT(shared_->tables[tableIndex].initial == elemFuncDefIndices.length());
 
     Uint32Vector codeRangeIndices;
-    if (!codeRangeIndices.resize(elemFuncIndices.length()))
+    if (!codeRangeIndices.resize(elemFuncDefIndices.length()))
         return false;
-    for (size_t i = 0; i < elemFuncIndices.length(); i++)
-        codeRangeIndices[i] = funcIndexToCodeRange_[elemFuncIndices[i]];
+    for (size_t i = 0; i < elemFuncDefIndices.length(); i++) {
+        codeRangeIndices[i] = funcDefIndexToCodeRange_[elemFuncDefIndices[i]];
+        elemFuncDefIndices[i] += numFuncImports();
+    }
 
+    // By adding numFuncImports to each element, elemFuncDefIndices is now a
+    // Vector of func indices.
     InitExpr offset(Val(uint32_t(0)));
-    return elemSegments_.emplaceBack(tableIndex, offset, Move(elemFuncIndices), Move(codeRangeIndices));
+    if (!elemSegments_.emplaceBack(tableIndex, offset, Move(elemFuncDefIndices)))
+        return false;
+
+    elemSegments_.back().elemCodeRangeIndices = Move(codeRangeIndices);
+    return true;
 }
 
 SharedModule
 ModuleGenerator::finish(const ShareableBytes& bytecode)
 {
-    MOZ_ASSERT(!activeFunc_);
+    MOZ_ASSERT(!activeFuncDef_);
     MOZ_ASSERT(finishedFuncDefs_);
 
     // Now that all asm.js tables have been created and the compiler threads are
     // done, shrink the (no longer shared) tables vector down to size.
     if (isAsmJS() && !shared_->tables.resize(numTables_))
         return nullptr;
 
-    if (!finishFuncExports())
+    if (!finishFuncDefExports())
         return nullptr;
 
     if (!finishCodegen())
         return nullptr;
 
     // Round up the code size to page size since this is eventually required by
     // the executable-code allocator and for setting memory protection.
     uint32_t bytesNeeded = masm_.bytesNeeded();
--- a/js/src/asmjs/WasmGenerator.h
+++ b/js/src/asmjs/WasmGenerator.h
@@ -48,28 +48,30 @@ struct FuncImportGenDesc
 typedef Vector<FuncImportGenDesc, 0, SystemAllocPolicy> FuncImportGenDescVector;
 
 struct ModuleGeneratorData
 {
     ModuleKind                kind;
     MemoryUsage               memoryUsage;
     mozilla::Atomic<uint32_t> minMemoryLength;
     Maybe<uint32_t>           maxMemoryLength;
+    uint32_t                  firstFuncDefIndex;
 
     SigWithIdVector           sigs;
-    SigWithIdPtrVector        funcSigs;
+    SigWithIdPtrVector        funcDefSigs;
     FuncImportGenDescVector   funcImports;
     GlobalDescVector          globals;
     TableDescVector           tables;
     Uint32Vector              asmJSSigToTableIndex;
 
     explicit ModuleGeneratorData(ModuleKind kind = ModuleKind::Wasm)
       : kind(kind),
         memoryUsage(MemoryUsage::None),
-        minMemoryLength(0)
+        minMemoryLength(0),
+        firstFuncDefIndex(0)
     {}
 
     bool isAsmJS() const {
         return kind == ModuleKind::AsmJS;
     }
 };
 
 typedef UniquePtr<ModuleGeneratorData> UniqueModuleGeneratorData;
@@ -100,39 +102,41 @@ class MOZ_STACK_CLASS ModuleGenerator
     // Data scoped to the ModuleGenerator's lifetime
     UniqueModuleGeneratorData       shared_;
     uint32_t                        numSigs_;
     uint32_t                        numTables_;
     LifoAlloc                       lifo_;
     jit::JitContext                 jcx_;
     jit::TempAllocator              masmAlloc_;
     jit::MacroAssembler             masm_;
-    Uint32Vector                    funcIndexToCodeRange_;
-    Uint32Set                       exportedFuncs_;
+    Uint32Vector                    funcDefIndexToCodeRange_;
+    Uint32Set                       exportedFuncDefs_;
     uint32_t                        lastPatchedCallsite_;
     uint32_t                        startOfUnpatchedBranches_;
     JumpSiteArray                   jumpThunks_;
 
     // Parallel compilation
     bool                            parallel_;
     uint32_t                        outstanding_;
     IonCompileTaskVector            tasks_;
     IonCompileTaskPtrVector         freeTasks_;
 
     // Assertions
-    DebugOnly<FunctionGenerator*>   activeFunc_;
+    DebugOnly<FunctionGenerator*>   activeFuncDef_;
     DebugOnly<bool>                 startedFuncDefs_;
     DebugOnly<bool>                 finishedFuncDefs_;
 
     MOZ_MUST_USE bool finishOutstandingTask();
-    bool funcIsDefined(uint32_t funcIndex) const;
-    const CodeRange& funcCodeRange(uint32_t funcIndex) const;
+    bool funcIndexIsDef(uint32_t funcIndex) const;
+    uint32_t funcIndexToDef(uint32_t funcIndex) const;
+    bool funcIsDefined(uint32_t funcDefIndex) const;
+    const CodeRange& funcDefCodeRange(uint32_t funcDefIndex) const;
     MOZ_MUST_USE bool convertOutOfRangeBranchesToThunks();
     MOZ_MUST_USE bool finishTask(IonCompileTask* task);
-    MOZ_MUST_USE bool finishFuncExports();
+    MOZ_MUST_USE bool finishFuncDefExports();
     MOZ_MUST_USE bool finishCodegen();
     MOZ_MUST_USE bool finishLinkData(Bytes& code);
     MOZ_MUST_USE bool addFuncImport(const Sig& sig, uint32_t globalDataOffset);
     MOZ_MUST_USE bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOff);
     MOZ_MUST_USE bool allocateGlobal(GlobalDesc* global);
 
   public:
     explicit ModuleGenerator(ImportVector&& imports);
@@ -152,54 +156,58 @@ class MOZ_STACK_CLASS ModuleGenerator
     uint32_t numTables() const { return numTables_; }
     const TableDescVector& tables() const { return shared_->tables; }
 
     // Signatures:
     uint32_t numSigs() const { return numSigs_; }
     const SigWithId& sig(uint32_t sigIndex) const;
 
     // Function declarations:
-    uint32_t numFuncSigs() const { return shared_->funcSigs.length(); }
-    const SigWithId& funcSig(uint32_t funcIndex) const;
+    uint32_t numFuncDefs() const { return shared_->funcDefSigs.length(); }
+    const SigWithId& funcDefSig(uint32_t funcDefIndex) const;
 
     // Globals:
     const GlobalDescVector& globals() const { return shared_->globals; }
 
     // Imports:
     uint32_t numFuncImports() const;
     const FuncImportGenDesc& funcImport(uint32_t funcImportIndex) const;
 
+    // Function index space:
+    uint32_t numFuncs() const;
+    const SigWithId& funcSig(uint32_t funcIndex) const;
+
     // Exports:
-    MOZ_MUST_USE bool addFuncExport(UniqueChars fieldName, uint32_t funcIndex);
+    MOZ_MUST_USE bool addFuncDefExport(UniqueChars fieldName, uint32_t funcIndex);
     MOZ_MUST_USE bool addTableExport(UniqueChars fieldName);
     MOZ_MUST_USE bool addMemoryExport(UniqueChars fieldName);
     MOZ_MUST_USE bool addGlobalExport(UniqueChars fieldName, uint32_t globalIndex);
 
     // Function definitions:
     MOZ_MUST_USE bool startFuncDefs();
     MOZ_MUST_USE bool startFuncDef(uint32_t lineOrBytecode, FunctionGenerator* fg);
-    MOZ_MUST_USE bool finishFuncDef(uint32_t funcIndex, FunctionGenerator* fg);
+    MOZ_MUST_USE bool finishFuncDef(uint32_t funcDefIndex, FunctionGenerator* fg);
     MOZ_MUST_USE bool finishFuncDefs();
 
     // Start function:
     bool setStartFunction(uint32_t funcIndex);
 
     // Segments:
     MOZ_MUST_USE bool addDataSegment(DataSegment s) { return dataSegments_.append(s); }
     MOZ_MUST_USE bool addElemSegment(InitExpr offset, Uint32Vector&& elemFuncIndices);
 
     // Function names:
     void setFuncNames(NameInBytecodeVector&& funcNames);
 
     // asm.js lazy initialization:
     void initSig(uint32_t sigIndex, Sig&& sig);
-    void initFuncSig(uint32_t funcIndex, uint32_t sigIndex);
+    void initFuncDefSig(uint32_t funcIndex, uint32_t sigIndex);
     MOZ_MUST_USE bool initImport(uint32_t importIndex, uint32_t sigIndex);
     MOZ_MUST_USE bool initSigTableLength(uint32_t sigIndex, uint32_t length);
-    MOZ_MUST_USE bool initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices);
+    MOZ_MUST_USE bool initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncDefIndices);
     void initMemoryUsage(MemoryUsage memoryUsage);
     void bumpMinMemoryLength(uint32_t newMinMemoryLength);
     MOZ_MUST_USE bool addGlobal(ValType type, bool isConst, uint32_t* index);
 
     // Finish compilation, provided the list of imports and source bytecode.
     // Both these Vectors may be empty (viz., b/c asm.js does different things
     // for imports and source).
     SharedModule finish(const ShareableBytes& bytecode);
--- a/js/src/asmjs/WasmInstance.cpp
+++ b/js/src/asmjs/WasmInstance.cpp
@@ -349,26 +349,24 @@ Instance::Instance(JSContext* cx,
     tlsData_.globalData = code_->segment().globalData();
     tlsData_.memoryBase = memory ? memory->buffer().dataPointerEither().unwrap() : nullptr;
     tlsData_.stackLimit = *(void**)cx->stackLimitAddressForJitCode(StackForUntrustedScript);
 
     for (size_t i = 0; i < metadata().funcImports.length(); i++) {
         HandleFunction f = funcImports[i];
         const FuncImport& fi = metadata().funcImports[i];
         FuncImportTls& import = funcImportTls(fi);
-        if (IsExportedFunction(f) && !isAsmJS() && !ExportedFunctionToInstance(f).isAsmJS()) {
-            Instance& calleeInstance = ExportedFunctionToInstance(f);
-            const Metadata& calleeMetadata = calleeInstance.metadata();
-            uint32_t funcIndex = ExportedFunctionToIndex(f);
-            const FuncExport& funcExport = calleeMetadata.lookupFuncExport(funcIndex);
-            const CodeRange& codeRange = calleeMetadata.codeRanges[funcExport.codeRangeIndex()];
+        if (!isAsmJS() && IsExportedWasmFunction(f)) {
+            WasmInstanceObject* calleeInstanceObj = ExportedFunctionToInstanceObject(f);
+            const CodeRange& codeRange = calleeInstanceObj->getExportedFunctionCodeRange(f);
+            Instance& calleeInstance = calleeInstanceObj->instance();
             import.tls = &calleeInstance.tlsData_;
             import.code = calleeInstance.codeSegment().base() + codeRange.funcNonProfilingEntry();
             import.baselineScript = nullptr;
-            import.obj = ExportedFunctionToInstanceObject(f);
+            import.obj = calleeInstanceObj;
         } else {
             import.tls = &tlsData_;
             import.code = codeBase() + fi.interpExitCodeOffset();
             import.baselineScript = nullptr;
             import.obj = f;
         }
     }
 
@@ -585,22 +583,22 @@ ReadCustomDoubleNaNObject(JSContext* cx,
 
 WasmInstanceObject*
 Instance::object() const
 {
     return object_;
 }
 
 bool
-Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args)
+Instance::callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args)
 {
     if (!cx->compartment()->wasm.ensureProfilingState(cx))
         return false;
 
-    const FuncExport& func = metadata().lookupFuncExport(funcIndex);
+    const FuncDefExport& func = metadata().lookupFuncDefExport(funcDefIndex);
 
     // The calling convention for an external call into wasm is to pass an
     // array of 16-byte values where each value contains either a coerced int32
     // (in the low word), a double value (in the low dword) or a SIMD vector
     // value, with the coercions specified by the wasm signature. The external
     // entry point unpacks this array into the system-ABI-specified registers
     // and stack memory and then calls into the internal entry point. The return
     // value is stored in the first element of the array (which, therefore, must
--- a/js/src/asmjs/WasmInstance.h
+++ b/js/src/asmjs/WasmInstance.h
@@ -99,17 +99,17 @@ class Instance
     // Instances may be reached via weak edges (e.g., Compartment::instances_)
     // so this perform a read-barrier on the returned object.
 
     WasmInstanceObject* object() const;
 
     // Execute the given export given the JS call arguments, storing the return
     // value in args.rval.
 
-    MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcIndex, CallArgs args);
+    MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args);
 
     // Initially, calls to imports in wasm code call out through the generic
     // callImport method. If the imported callee gets JIT compiled and the types
     // match up, callImport will patch the code to instead call through a thunk
     // directly into the JIT code. If the JIT code is released, the Instance must
     // be notified so it can go back to the generic callImport.
 
     void deoptimizeImportExit(uint32_t funcImportIndex);
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -989,27 +989,27 @@ class FunctionCompiler
             call->spIncrement_ = 0;
             stackBytes = Max(stackBytes, call->maxChildStackBytes_);
         }
 
         propagateMaxStackArgBytes(stackBytes);
         return true;
     }
 
-    bool internalCall(const Sig& sig, uint32_t funcIndex, const CallCompileState& call,
-                      MDefinition** def)
+    bool callDefinition(const Sig& sig, uint32_t funcDefIndex, const CallCompileState& call,
+                        MDefinition** def)
     {
         if (inDeadCode()) {
             *def = nullptr;
             return true;
         }
 
         CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Relative);
         MIRType ret = ToMIRType(sig.ret());
-        auto callee = CalleeDesc::internal(funcIndex);
+        auto callee = CalleeDesc::definition(funcDefIndex);
         auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ret,
                                    call.spIncrement_, MWasmCall::DontSaveTls);
         if (!ins)
             return false;
 
         curBlock_->add(ins);
         *def = ins;
         return true;
@@ -1880,46 +1880,92 @@ EmitCallArgs(FunctionCompiler& f, const 
 
     if (!f.iter().readCallArgsEnd(numArgs))
         return false;
 
     return f.finishCall(call, PassTls::True, interModule);
 }
 
 static bool
+EmitCallImportCommon(FunctionCompiler& f, uint32_t lineOrBytecode, uint32_t funcImportIndex)
+{
+    const FuncImportGenDesc& funcImport = f.mg().funcImports[funcImportIndex];
+    const Sig& sig = *funcImport.sig;
+
+    CallCompileState call(f, lineOrBytecode);
+    if (!EmitCallArgs(f, sig, InterModule::True, &call))
+        return false;
+
+    if (!f.iter().readCallReturn(sig.ret()))
+        return false;
+
+    MDefinition* def;
+    if (!f.callImport(funcImport.globalDataOffset, call, sig.ret(), &def))
+        return false;
+
+    if (IsVoid(sig.ret()))
+        return true;
+
+    f.iter().setResult(def);
+    return true;
+}
+
+static bool
 EmitCall(FunctionCompiler& f, uint32_t callOffset)
 {
     uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset);
 
     uint32_t calleeIndex;
     uint32_t arity;
     if (!f.iter().readCall(&calleeIndex, &arity))
         return false;
 
-    const Sig& sig = *f.mg().funcSigs[calleeIndex];
+    // For asm.js and old-format wasm code, imports are not part of the function
+    // index space so in these cases firstFuncDefIndex is fixed to 0, even if
+    // there are function imports.
+    if (calleeIndex < f.mg().firstFuncDefIndex)
+        return EmitCallImportCommon(f, lineOrBytecode, calleeIndex);
+
+    uint32_t funcDefIndex = calleeIndex - f.mg().firstFuncDefIndex;
+    const Sig& sig = *f.mg().funcDefSigs[funcDefIndex];
 
     CallCompileState call(f, lineOrBytecode);
     if (!EmitCallArgs(f, sig, InterModule::False, &call))
         return false;
 
     if (!f.iter().readCallReturn(sig.ret()))
         return false;
 
     MDefinition* def;
-    if (!f.internalCall(sig, calleeIndex, call, &def))
+    if (!f.callDefinition(sig, funcDefIndex, call, &def))
         return false;
 
     if (IsVoid(sig.ret()))
         return true;
 
     f.iter().setResult(def);
     return true;
 }
 
 static bool
+EmitCallImport(FunctionCompiler& f, uint32_t callOffset)
+{
+    MOZ_ASSERT(!f.mg().firstFuncDefIndex);
+
+    uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset);
+
+    uint32_t funcImportIndex;
+    uint32_t arity;
+    if (!f.iter().readCallImport(&funcImportIndex, &arity))
+        return false;
+
+    return EmitCallImportCommon(f, lineOrBytecode, funcImportIndex);
+}
+
+static bool
 EmitCallIndirect(FunctionCompiler& f, uint32_t callOffset)
 {
     uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset);
 
     uint32_t sigIndex;
     uint32_t arity;
     if (!f.iter().readCallIndirect(&sigIndex, &arity))
         return false;
@@ -1946,47 +1992,16 @@ EmitCallIndirect(FunctionCompiler& f, ui
     if (IsVoid(sig.ret()))
         return true;
 
     f.iter().setResult(def);
     return true;
 }
 
 static bool
-EmitCallImport(FunctionCompiler& f, uint32_t callOffset)
-{
-    uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset);
-
-    uint32_t funcImportIndex;
-    uint32_t arity;
-    if (!f.iter().readCallImport(&funcImportIndex, &arity))
-        return false;
-
-    const FuncImportGenDesc& funcImport = f.mg().funcImports[funcImportIndex];
-    const Sig& sig = *funcImport.sig;
-
-    CallCompileState call(f, lineOrBytecode);
-    if (!EmitCallArgs(f, sig, InterModule::True, &call))
-        return false;
-
-    if (!f.iter().readCallReturn(sig.ret()))
-        return false;
-
-    MDefinition* def;
-    if (!f.callImport(funcImport.globalDataOffset, call, sig.ret(), &def))
-        return false;
-
-    if (IsVoid(sig.ret()))
-        return true;
-
-    f.iter().setResult(def);
-    return true;
-}
-
-static bool
 EmitGetLocal(FunctionCompiler& f)
 {
     uint32_t id;
     if (!f.iter().readGetLocal(f.locals(), &id))
         return false;
 
     f.iter().setResult(f.getLocalDef(id));
     return true;
@@ -3689,17 +3704,17 @@ wasm::IonCompileFunction(IonCompileTask*
 
         if (!OptimizeMIR(&mir))
             return false;
 
         LIRGraph* lir = GenerateLIR(&mir);
         if (!lir)
             return false;
 
-        SigIdDesc sigId = task->mg().funcSigs[func.index()]->id;
+        SigIdDesc sigId = task->mg().funcDefSigs[func.defIndex()]->id;
 
         CodeGenerator codegen(&mir, lir, &results.masm());
         if (!codegen.generateWasm(sigId, &results.offsets()))
             return false;
     }
 
     return true;
 }
--- a/js/src/asmjs/WasmIonCompile.h
+++ b/js/src/asmjs/WasmIonCompile.h
@@ -33,37 +33,37 @@ typedef jit::ABIArgIter<ValTypeVector> A
 
 // The FuncBytes class represents a single, concurrently-compilable function.
 // A FuncBytes object is composed of the wasm function body bytes along with the
 // ambient metadata describing the function necessary to compile it.
 
 class FuncBytes
 {
     Bytes            bytes_;
-    uint32_t         index_;
+    uint32_t         defIndex_;
     const SigWithId& sig_;
     uint32_t         lineOrBytecode_;
     Uint32Vector     callSiteLineNums_;
 
   public:
     FuncBytes(Bytes&& bytes,
-              uint32_t index,
+              uint32_t defIndex,
               const SigWithId& sig,
               uint32_t lineOrBytecode,
               Uint32Vector&& callSiteLineNums)
       : bytes_(Move(bytes)),
-        index_(index),
+        defIndex_(defIndex),
         sig_(sig),
         lineOrBytecode_(lineOrBytecode),
         callSiteLineNums_(Move(callSiteLineNums))
     {}
 
     Bytes& bytes() { return bytes_; }
     const Bytes& bytes() const { return bytes_; }
-    uint32_t index() const { return index_; }
+    uint32_t defIndex() const { return defIndex_; }
     const SigWithId& sig() const { return sig_; }
     uint32_t lineOrBytecode() const { return lineOrBytecode_; }
     const Uint32Vector& callSiteLineNums() const { return callSiteLineNums_; }
 };
 
 typedef UniquePtr<FuncBytes> UniqueFuncBytes;
 
 // The FuncCompileResults class contains the results of compiling a single
--- a/js/src/asmjs/WasmJS.cpp
+++ b/js/src/asmjs/WasmJS.cpp
@@ -612,58 +612,73 @@ WasmInstanceObject::exports() const
 
 static bool
 WasmCall(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     RootedFunction callee(cx, &args.callee().as<JSFunction>());
 
     Instance& instance = ExportedFunctionToInstance(callee);
-    uint32_t funcIndex = ExportedFunctionToIndex(callee);
-    return instance.callExport(cx, funcIndex, args);
+    uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(callee);
+    return instance.callExport(cx, funcDefIndex, args);
 }
 
 /* static */ bool
 WasmInstanceObject::getExportedFunction(JSContext* cx, HandleWasmInstanceObject instanceObj,
-                                        uint32_t funcIndex, MutableHandleFunction fun)
+                                        uint32_t funcDefIndex, MutableHandleFunction fun)
 {
-    if (ExportMap::Ptr p = instanceObj->exports().lookup(funcIndex)) {
+    if (ExportMap::Ptr p = instanceObj->exports().lookup(funcDefIndex)) {
         fun.set(p->value());
         return true;
     }
 
     const Instance& instance = instanceObj->instance();
-    RootedAtom name(cx, instance.code().getFuncAtom(cx, funcIndex));
+    RootedAtom name(cx, instance.code().getFuncDefAtom(cx, funcDefIndex));
     if (!name)
         return false;
 
-    unsigned numArgs = instance.metadata().lookupFuncExport(funcIndex).sig().args().length();
+    unsigned numArgs = instance.metadata().lookupFuncDefExport(funcDefIndex).sig().args().length();
     fun.set(NewNativeConstructor(cx, WasmCall, numArgs, name, gc::AllocKind::FUNCTION_EXTENDED,
                                  GenericObject, JSFunction::ASMJS_CTOR));
     if (!fun)
         return false;
 
     fun->setExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT, ObjectValue(*instanceObj));
-    fun->setExtendedSlot(FunctionExtended::WASM_FUNC_INDEX_SLOT, Int32Value(funcIndex));
+    fun->setExtendedSlot(FunctionExtended::WASM_FUNC_DEF_INDEX_SLOT, Int32Value(funcDefIndex));
 
-    if (!instanceObj->exports().putNew(funcIndex, fun)) {
+    if (!instanceObj->exports().putNew(funcDefIndex, fun)) {
         ReportOutOfMemory(cx);
         return false;
     }
 
     return true;
 }
 
+const CodeRange&
+WasmInstanceObject::getExportedFunctionCodeRange(HandleFunction fun)
+{
+    uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(fun);
+    MOZ_ASSERT(exports().lookup(funcDefIndex)->value() == fun);
+    const Metadata& metadata = instance().metadata();
+    return metadata.codeRanges[metadata.lookupFuncDefExport(funcDefIndex).codeRangeIndex()];
+}
+
 bool
 wasm::IsExportedFunction(JSFunction* fun)
 {
     return fun->maybeNative() == WasmCall;
 }
 
 bool
+wasm::IsExportedWasmFunction(JSFunction* fun)
+{
+    return IsExportedFunction(fun) && !ExportedFunctionToInstance(fun).isAsmJS();
+}
+
+bool
 wasm::IsExportedFunction(const Value& v, MutableHandleFunction f)
 {
     if (!v.isObject())
         return false;
 
     JSObject& obj = v.toObject();
     if (!obj.is<JSFunction>() || !IsExportedFunction(&obj.as<JSFunction>()))
         return false;
@@ -682,20 +697,20 @@ WasmInstanceObject*
 wasm::ExportedFunctionToInstanceObject(JSFunction* fun)
 {
     MOZ_ASSERT(IsExportedFunction(fun));
     const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT);
     return &v.toObject().as<WasmInstanceObject>();
 }
 
 uint32_t
-wasm::ExportedFunctionToIndex(JSFunction* fun)
+wasm::ExportedFunctionToDefinitionIndex(JSFunction* fun)
 {
     MOZ_ASSERT(IsExportedFunction(fun));
-    const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_FUNC_INDEX_SLOT);
+    const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_FUNC_DEF_INDEX_SLOT);
     return v.toInt32();
 }
 
 // ============================================================================
 // WebAssembly.Memory class and methods
 
 const Class WasmMemoryObject::class_ =
 {
@@ -1035,17 +1050,17 @@ WasmTableObject::getImpl(JSContext* cx, 
     // A non-function code range means the bad-indirect-call stub, so a null element.
     if (!codeRange || !codeRange->isFunction()) {
         args.rval().setNull();
         return true;
     }
 
     RootedWasmInstanceObject instanceObj(cx, instance.object());
     RootedFunction fun(cx);
-    if (!instanceObj->getExportedFunction(cx, instanceObj, codeRange->funcIndex(), &fun))
+    if (!instanceObj->getExportedFunction(cx, instanceObj, codeRange->funcDefIndex(), &fun))
         return false;
 
     args.rval().setObject(*fun);
     return true;
 }
 
 /* static */ bool
 WasmTableObject::get(JSContext* cx, unsigned argc, Value* vp)
@@ -1072,42 +1087,42 @@ WasmTableObject::setImpl(JSContext* cx, 
         return false;
     }
 
     uint32_t index = uint32_t(indexDbl);
     MOZ_ASSERT(double(index) == indexDbl);
 
     RootedFunction value(cx);
     if (!IsExportedFunction(args[1], &value) && !args[1].isNull()) {
-        JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_SET_VALUE);
+        JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_TABLE_VALUE);
         return false;
     }
 
     if (!table.initialized()) {
         if (!value) {
             args.rval().setUndefined();
             return true;
         }
 
         table.init(ExportedFunctionToInstance(value));
     }
 
     if (value) {
         RootedWasmInstanceObject instanceObj(cx, ExportedFunctionToInstanceObject(value));
-        uint32_t funcIndex = ExportedFunctionToIndex(value);
+        uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(value);
 
 #ifdef DEBUG
         RootedFunction f(cx);
-        MOZ_ASSERT(instanceObj->getExportedFunction(cx, instanceObj, funcIndex, &f));
+        MOZ_ASSERT(instanceObj->getExportedFunction(cx, instanceObj, funcDefIndex, &f));
         MOZ_ASSERT(value == f);
 #endif
 
         Instance& instance = instanceObj->instance();
-        const FuncExport& funcExport = instance.metadata().lookupFuncExport(funcIndex);
-        const CodeRange& codeRange = instance.metadata().codeRanges[funcExport.codeRangeIndex()];
+        const FuncDefExport& funcDefExport = instance.metadata().lookupFuncDefExport(funcDefIndex);
+        const CodeRange& codeRange = instance.metadata().codeRanges[funcDefExport.codeRangeIndex()];
         void* code = instance.codeSegment().base() + codeRange.funcTableEntry();
         table.set(index, code, instance);
     } else {
         table.setNull(index);
     }
 
     args.rval().setUndefined();
     return true;
--- a/js/src/asmjs/WasmJS.h
+++ b/js/src/asmjs/WasmJS.h
@@ -49,26 +49,29 @@ extern const char InstanceExportField[];
 
 // These accessors can be used to probe JS values for being an exported wasm
 // function.
 
 extern bool
 IsExportedFunction(JSFunction* fun);
 
 extern bool
+IsExportedWasmFunction(JSFunction* fun);
+
+extern bool
 IsExportedFunction(const Value& v, MutableHandleFunction f);
 
 extern Instance&
 ExportedFunctionToInstance(JSFunction* fun);
 
 extern WasmInstanceObject*
 ExportedFunctionToInstanceObject(JSFunction* fun);
 
 extern uint32_t
-ExportedFunctionToIndex(JSFunction* fun);
+ExportedFunctionToDefinitionIndex(JSFunction* fun);
 
 } // namespace wasm
 
 // 'Wasm' and its one function 'instantiateModule' are transitional APIs and
 // will be removed (replaced by 'WebAssembly') before release.
 
 extern const Class WasmClass;
 
@@ -112,19 +115,19 @@ class WasmInstanceObject : public Native
 {
     static const unsigned INSTANCE_SLOT = 0;
     static const unsigned EXPORTS_SLOT = 1;
     static const ClassOps classOps_;
     bool isNewborn() const;
     static void finalize(FreeOp* fop, JSObject* obj);
     static void trace(JSTracer* trc, JSObject* obj);
 
-    // ExportMap maps from function index to exported function object. This map
-    // is weak to avoid holding objects alive; the point is just to ensure a
-    // unique object identity for any given function object.
+    // ExportMap maps from function definition index to exported function
+    // object. This map is weak to avoid holding objects alive; the point is
+    // just to ensure a unique object identity for any given function object.
     using ExportMap = GCHashMap<uint32_t,
                                 ReadBarrieredFunction,
                                 DefaultHasher<uint32_t>,
                                 SystemAllocPolicy>;
     using WeakExportMap = JS::WeakCache<ExportMap>;
     WeakExportMap& exports() const;
 
   public:
@@ -142,16 +145,18 @@ class WasmInstanceObject : public Native
                                       const wasm::ValVector& globalImports,
                                       HandleObject proto);
     wasm::Instance& instance() const;
 
     static bool getExportedFunction(JSContext* cx,
                                     HandleWasmInstanceObject instanceObj,
                                     uint32_t funcIndex,
                                     MutableHandleFunction fun);
+
+    const wasm::CodeRange& getExportedFunctionCodeRange(HandleFunction fun);
 };
 
 // The class of WebAssembly.Memory. A WasmMemoryObject references an ArrayBuffer
 // or SharedArrayBuffer object which owns the actual memory.
 
 class WasmMemoryObject : public NativeObject
 {
     static const unsigned BUFFER_SLOT = 0;
--- a/js/src/asmjs/WasmModule.cpp
+++ b/js/src/asmjs/WasmModule.cpp
@@ -423,16 +423,17 @@ EvaluateInitExpr(const ValVector& global
     }
 
     MOZ_CRASH("bad initializer expression");
 }
 
 bool
 Module::initSegments(JSContext* cx,
                      HandleWasmInstanceObject instanceObj,
+                     Handle<FunctionVector> funcImports,
                      HandleWasmMemoryObject memoryObj,
                      const ValVector& globalImports) const
 {
     Instance& instance = instanceObj->instance();
     const SharedTableVector& tables = instance.tables();
 
     // Perform all error checks up front so that this function does not perform
     // partial initialization if an error is reported.
@@ -474,23 +475,43 @@ Module::initSegments(JSContext* cx,
     for (const ElemSegment& seg : elemSegments_) {
         Table& table = *tables[seg.tableIndex];
         uint32_t offset = EvaluateInitExpr(globalImports, seg.offset);
         bool profilingEnabled = instance.code().profilingEnabled();
         const CodeRangeVector& codeRanges = metadata().codeRanges;
         uint8_t* codeBase = instance.codeBase();
 
         for (uint32_t i = 0; i < seg.elemCodeRangeIndices.length(); i++) {
-            const CodeRange& cr = codeRanges[seg.elemCodeRangeIndices[i]];
-            uint32_t entryOffset = table.isTypedFunction()
-                                   ? profilingEnabled
-                                     ? cr.funcProfilingEntry()
-                                     : cr.funcNonProfilingEntry()
-                                   : cr.funcTableEntry();
-            table.set(offset + i, codeBase + entryOffset, instance);
+            uint32_t elemFuncIndex = seg.elemFuncIndices[i];
+            if (elemFuncIndex < funcImports.length()) {
+                MOZ_ASSERT(!metadata().isAsmJS());
+                MOZ_ASSERT(!table.isTypedFunction());
+                MOZ_ASSERT(seg.elemCodeRangeIndices[i] == UINT32_MAX);
+
+                HandleFunction f = funcImports[elemFuncIndex];
+                if (!IsExportedWasmFunction(f)) {
+                    JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_TABLE_VALUE);
+                    return false;
+                }
+
+                WasmInstanceObject* exportInstanceObj = ExportedFunctionToInstanceObject(f);
+                const CodeRange& cr = exportInstanceObj->getExportedFunctionCodeRange(f);
+                Instance& exportInstance = exportInstanceObj->instance();
+                table.set(offset + i, exportInstance.codeBase() + cr.funcTableEntry(), exportInstance);
+            } else {
+                MOZ_ASSERT(seg.elemCodeRangeIndices[i] != UINT32_MAX);
+
+                const CodeRange& cr = codeRanges[seg.elemCodeRangeIndices[i]];
+                uint32_t entryOffset = table.isTypedFunction()
+                                       ? profilingEnabled
+                                         ? cr.funcProfilingEntry()
+                                         : cr.funcNonProfilingEntry()
+                                       : cr.funcTableEntry();
+                table.set(offset + i, codeBase + entryOffset, instance);
+            }
         }
     }
 
     if (memoryObj) {
         uint8_t* memoryBase = memoryObj->buffer().dataPointerEither().unwrap(/* memcpy */);
 
         for (const DataSegment& seg : dataSegments_) {
             MOZ_ASSERT(seg.bytecodeOffset <= bytecode_->length());
@@ -511,21 +532,21 @@ Module::instantiateFunctions(JSContext* 
     if (metadata().isAsmJS())
         return true;
 
     for (size_t i = 0; i < metadata_->funcImports.length(); i++) {
         HandleFunction f = funcImports[i];
         if (!IsExportedFunction(f) || ExportedFunctionToInstance(f).isAsmJS())
             continue;
 
-        uint32_t funcIndex = ExportedFunctionToIndex(f);
+        uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(f);
         Instance& instance = ExportedFunctionToInstance(f);
-        const FuncExport& funcExport = instance.metadata().lookupFuncExport(funcIndex);
+        const FuncDefExport& funcDefExport = instance.metadata().lookupFuncDefExport(funcDefIndex);
 
-        if (funcExport.sig() != metadata_->funcImports[i].sig()) {
+        if (funcDefExport.sig() != metadata_->funcImports[i].sig()) {
             JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMPORT_SIG);
             return false;
         }
     }
 
     return true;
 }
 
@@ -631,18 +652,40 @@ Module::instantiateTable(JSContext* cx, 
             }
         }
     }
 
     return true;
 }
 
 static bool
-ExportGlobalValue(JSContext* cx, const GlobalDescVector& globals, uint32_t globalIndex,
-                  const ValVector& globalImports, MutableHandleValue jsval)
+GetFunctionExport(JSContext* cx,
+                  HandleWasmInstanceObject instanceObj,
+                  Handle<FunctionVector> funcImports,
+                  const Export& exp,
+                  MutableHandleValue val)
+{
+    if (exp.funcIndex() < funcImports.length()) {
+        val.setObject(*funcImports[exp.funcIndex()]);
+        return true;
+    }
+
+    uint32_t funcDefIndex = exp.funcIndex() - funcImports.length();
+
+    RootedFunction fun(cx);
+    if (!instanceObj->getExportedFunction(cx, instanceObj, funcDefIndex, &fun))
+        return false;
+
+    val.setObject(*fun);
+    return true;
+}
+
+static bool
+GetGlobalExport(JSContext* cx, const GlobalDescVector& globals, uint32_t globalIndex,
+                const ValVector& globalImports, MutableHandleValue jsval)
 {
     const GlobalDesc& global = globals[globalIndex];
 
     // Imports are located upfront in the globals array.
     Val val;
     switch (global.kind()) {
       case GlobalKind::Import:   val = globalImports[globalIndex]; break;
       case GlobalKind::Variable: MOZ_CRASH("mutable variables can't be exported");
@@ -675,68 +718,63 @@ ExportGlobalValue(JSContext* cx, const G
       }
     }
     MOZ_CRASH("unexpected type when creating global exports");
 }
 
 static bool
 CreateExportObject(JSContext* cx,
                    HandleWasmInstanceObject instanceObj,
+                   Handle<FunctionVector> funcImports,
                    HandleWasmTableObject tableObj,
                    HandleWasmMemoryObject memoryObj,
                    const ValVector& globalImports,
                    const ExportVector& exports,
                    MutableHandleObject exportObj)
 {
     const Instance& instance = instanceObj->instance();
     const Metadata& metadata = instance.metadata();
 
     if (metadata.isAsmJS() && exports.length() == 1 && strlen(exports[0].fieldName()) == 0) {
-        RootedFunction fun(cx);
-        if (!instanceObj->getExportedFunction(cx, instanceObj, exports[0].funcIndex(), &fun))
+        RootedValue val(cx);
+        if (!GetFunctionExport(cx, instanceObj, funcImports, exports[0], &val))
             return false;
-        exportObj.set(fun);
+        exportObj.set(&val.toObject());
         return true;
     }
 
     exportObj.set(JS_NewPlainObject(cx));
     if (!exportObj)
         return false;
 
     for (const Export& exp : exports) {
         JSAtom* atom = AtomizeUTF8Chars(cx, exp.fieldName(), strlen(exp.fieldName()));
         if (!atom)
             return false;
 
         RootedId id(cx, AtomToId(atom));
         RootedValue val(cx);
         switch (exp.kind()) {
-          case DefinitionKind::Function: {
-            RootedFunction fun(cx);
-            if (!instanceObj->getExportedFunction(cx, instanceObj, exp.funcIndex(), &fun))
+          case DefinitionKind::Function:
+            if (!GetFunctionExport(cx, instanceObj, funcImports, exp, &val))
                 return false;
-            val = ObjectValue(*fun);
             break;
-          }
-          case DefinitionKind::Table: {
+          case DefinitionKind::Table:
             val = ObjectValue(*tableObj);
             break;
-          }
-          case DefinitionKind::Memory: {
+          case DefinitionKind::Memory:
             if (metadata.assumptions.newFormat)
                 val = ObjectValue(*memoryObj);
             else
                 val = ObjectValue(memoryObj->buffer());
             break;
-          }
-          case DefinitionKind::Global: {
-            if (!ExportGlobalValue(cx, metadata.globals, exp.globalIndex(), globalImports, &val))
+          case DefinitionKind::Global:
+            if (!GetGlobalExport(cx, metadata.globals, exp.globalIndex(), globalImports, &val))
                 return false;
             break;
-          }
         }
 
         if (!JS_DefinePropertyById(cx, exportObj, id, val, JSPROP_ENUMERATE))
             return false;
     }
 
     return true;
 }
@@ -787,17 +825,17 @@ Module::instantiate(JSContext* cx,
                                             Move(tables),
                                             funcImports,
                                             globalImports,
                                             instanceProto));
     if (!instance)
         return false;
 
     RootedObject exportObj(cx);
-    if (!CreateExportObject(cx, instance, table, memory, globalImports, exports_, &exportObj))
+    if (!CreateExportObject(cx, instance, funcImports, table, memory, globalImports, exports_, &exportObj))
         return false;
 
     JSAtom* atom = Atomize(cx, InstanceExportField, strlen(InstanceExportField));
     if (!atom)
         return false;
     RootedId id(cx, AtomToId(atom));
 
     RootedValue val(cx, ObjectValue(*exportObj));
@@ -811,23 +849,33 @@ Module::instantiate(JSContext* cx,
 
     if (!cx->compartment()->wasm.registerInstance(cx, instance))
         return false;
 
     // Perform initialization as the final step after the instance is fully
     // constructed since this can make the instance live to content (even if the
     // start function fails).
 
-    if (!initSegments(cx, instance, memory, globalImports))
+    if (!initSegments(cx, instance, funcImports, memory, globalImports))
         return false;
 
     // Now that the instance is fully live and initialized, the start function.
     // Note that failure may cause instantiation to throw, but the instance may
     // still be live via edges created by initSegments or the start function.
 
     if (metadata_->hasStartFunction()) {
+        uint32_t startFuncIndex = metadata_->startFuncIndex();
         FixedInvokeArgs<0> args(cx);
-        if (!instance->instance().callExport(cx, metadata_->startFuncIndex(), args))
-            return false;
+        if (startFuncIndex < funcImports.length()) {
+            RootedValue fval(cx, ObjectValue(*funcImports[startFuncIndex]));
+            RootedValue thisv(cx);
+            RootedValue rval(cx);
+            if (!Call(cx, fval, thisv, args, &rval))
+                return false;
+        } else {
+            uint32_t funcDefIndex = startFuncIndex - funcImports.length();
+            if (!instance->instance().callExport(cx, funcDefIndex, args))
+                return false;
+        }
     }
 
     return true;
 }
--- a/js/src/asmjs/WasmModule.h
+++ b/js/src/asmjs/WasmModule.h
@@ -108,17 +108,17 @@ struct Import
 
     WASM_DECLARE_SERIALIZABLE(Import)
 };
 
 typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
 
 // Export describes the export of a definition in a Module to a field in the
 // export object. For functions, Export stores an index into the
-// FuncExportVector in Metadata. For memory and table exports, there is
+// FuncDefExportVector in Metadata. For memory and table exports, there is
 // at most one (default) memory/table so no index is needed. Note: a single
 // definition can be exported by multiple Exports in the ExportVector.
 //
 // ExportVector is built incrementally by ModuleGenerator and then stored
 // immutably by Module.
 
 class Export
 {
@@ -162,27 +162,19 @@ typedef Vector<DataSegment, 0, SystemAll
 struct ElemSegment
 {
     uint32_t tableIndex;
     InitExpr offset;
     Uint32Vector elemFuncIndices;
     Uint32Vector elemCodeRangeIndices;
 
     ElemSegment() = default;
-    ElemSegment(uint32_t tableIndex,
-                InitExpr offset,
-                Uint32Vector&& elemFuncIndices,
-                Uint32Vector&& elemCodeRangeIndices)
-      : tableIndex(tableIndex),
-        offset(offset),
-        elemFuncIndices(Move(elemFuncIndices)),
-        elemCodeRangeIndices(Move(elemCodeRangeIndices))
-    {
-        MOZ_ASSERT(elemFuncIndices.length() == elemCodeRangeIndices.length());
-    }
+    ElemSegment(uint32_t tableIndex, InitExpr offset, Uint32Vector&& elemFuncIndices)
+      : tableIndex(tableIndex), offset(offset), elemFuncIndices(Move(elemFuncIndices))
+    {}
 
     WASM_DECLARE_SERIALIZABLE(ElemSegment)
 };
 
 typedef Vector<ElemSegment, 0, SystemAllocPolicy> ElemSegmentVector;
 
 // Module represents a compiled wasm module and primarily provides two
 // operations: instantiation and serialization. A Module can be instantiated any
@@ -209,16 +201,17 @@ class Module : public RefCounted<Module>
 
     bool instantiateFunctions(JSContext* cx, Handle<FunctionVector> funcImports) const;
     bool instantiateMemory(JSContext* cx, MutableHandleWasmMemoryObject memory) const;
     bool instantiateTable(JSContext* cx,
                           MutableHandleWasmTableObject table,
                           SharedTableVector* tables) const;
     bool initSegments(JSContext* cx,
                       HandleWasmInstanceObject instance,
+                      Handle<FunctionVector> funcImports,
                       HandleWasmMemoryObject memory,
                       const ValVector& globalImports) const;
 
   public:
     Module(Bytes&& code,
            LinkData&& linkData,
            ImportVector&& imports,
            ExportVector&& exports,
--- a/js/src/asmjs/WasmStubs.cpp
+++ b/js/src/asmjs/WasmStubs.cpp
@@ -91,17 +91,17 @@ static const unsigned FramePushedAfterSa
 #endif
 static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void*);
 
 // Generate a stub that enters wasm from a C++ caller via the native ABI. The
 // signature of the entry point is Module::ExportFuncPtr. The exported wasm
 // function has an ABI derived from its specific signature, so this function
 // must map from the ABI of ExportFuncPtr to the export's signature's ABI.
 Offsets
-wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
+wasm::GenerateEntry(MacroAssembler& masm, const FuncDefExport& func)
 {
     masm.haltingAlign(CodeAlignment);
 
     Offsets offsets;
     offsets.begin = masm.currentOffset();
 
     // Save the return address if it wasn't already saved by the call insn.
 #if defined(JS_CODEGEN_ARM)
@@ -155,21 +155,21 @@ wasm::GenerateEntry(MacroAssembler& masm
     masm.storeStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
 
     // Dynamically align the stack since ABIStackAlignment is not necessarily
     // AsmJSStackAlignment. We'll use entrySP to recover the original stack
     // pointer on return.
     masm.andToStackPtr(Imm32(~(AsmJSStackAlignment - 1)));
 
     // Bump the stack for the call.
-    masm.reserveStack(AlignBytes(StackArgBytes(fe.sig().args()), AsmJSStackAlignment));
+    masm.reserveStack(AlignBytes(StackArgBytes(func.sig().args()), AsmJSStackAlignment));
 
     // Copy parameters out of argv and into the registers/stack-slots specified by
     // the system ABI.
-    for (ABIArgValTypeIter iter(fe.sig().args()); !iter.done(); iter++) {
+    for (ABIArgValTypeIter iter(func.sig().args()); !iter.done(); iter++) {
         unsigned argOffset = iter.index() * sizeof(ExportArg);
         Address src(argv, argOffset);
         MIRType type = iter.mirType();
         switch (iter->kind()) {
           case ABIArg::GPR:
             if (type == MIRType::Int32)
                 masm.load32(src, iter->gpr());
             else if (type == MIRType::Int64)
@@ -259,28 +259,28 @@ wasm::GenerateEntry(MacroAssembler& masm
                 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
             }
             break;
         }
     }
 
     // Call into the real function.
     masm.assertStackAlignment(AsmJSStackAlignment);
-    masm.call(CallSiteDesc(CallSiteDesc::Relative), fe.funcIndex());
+    masm.call(CallSiteDesc(CallSiteDesc::Relative), func.funcDefIndex());
 
     // Recover the stack pointer value before dynamic alignment.
     masm.loadWasmActivationFromTls(scratch);
     masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
     masm.setFramePushed(FramePushedForEntrySP);
 
     // Recover the 'argv' pointer which was saved before aligning the stack.
     masm.Pop(argv);
 
     // Store the return value in argv[0]
-    switch (fe.sig().ret()) {
+    switch (func.sig().ret()) {
       case ExprType::Void:
         break;
       case ExprType::I32:
         masm.store32(ReturnReg, Address(argv, 0));
         break;
       case ExprType::I64:
         masm.store64(ReturnReg64, Address(argv, 0));
         break;
--- a/js/src/asmjs/WasmStubs.h
+++ b/js/src/asmjs/WasmStubs.h
@@ -22,21 +22,21 @@
 #include "asmjs/WasmTypes.h"
 
 namespace js {
 
 namespace jit { class MacroAssembler; }
 
 namespace wasm {
 
-class FuncExport;
+class FuncDefExport;
 class FuncImport;
 
 extern Offsets
-GenerateEntry(jit::MacroAssembler& masm, const FuncExport& fe);
+GenerateEntry(jit::MacroAssembler& masm, const FuncDefExport& func);
 
 extern ProfilingOffsets
 GenerateInterpExit(jit::MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex);
 
 extern ProfilingOffsets
 GenerateJitExit(jit::MacroAssembler& masm, const FuncImport& fi);
 
 extern Offsets
--- a/js/src/asmjs/WasmTextToBinary.cpp
+++ b/js/src/asmjs/WasmTextToBinary.cpp
@@ -1368,22 +1368,24 @@ WasmTokenStream::next()
 namespace {
 
 struct WasmParseContext
 {
     WasmTokenStream ts;
     LifoAlloc& lifo;
     UniqueChars* error;
     DtoaState* dtoaState;
-
-    WasmParseContext(const char16_t* text, LifoAlloc& lifo, UniqueChars* error)
+    bool newFormat;
+
+    WasmParseContext(const char16_t* text, LifoAlloc& lifo, UniqueChars* error, bool newFormat)
       : ts(text, error),
         lifo(lifo),
         error(error),
-        dtoaState(NewDtoaState())
+        dtoaState(NewDtoaState()),
+        newFormat(newFormat)
     {}
 
     bool fail(const char* message) {
         error->reset(JS_smprintf(message));
         return false;
     }
     ~WasmParseContext() {
         DestroyDtoaState(dtoaState);
@@ -2203,17 +2205,17 @@ ParseExprInsideParens(WasmParseContext& 
         return ParseBranch(c, Expr::Br);
       case WasmToken::BrIf:
         return ParseBranch(c, Expr::BrIf);
       case WasmToken::BrTable:
         return ParseBranchTable(c, token);
       case WasmToken::Call:
         return ParseCall(c, Expr::Call);
       case WasmToken::CallImport:
-        return ParseCall(c, Expr::CallImport);
+        return ParseCall(c, c.newFormat ? Expr::Call : Expr::CallImport);
       case WasmToken::CallIndirect:
         return ParseCallIndirect(c);
       case WasmToken::ComparisonOpcode:
         return ParseComparisonOperator(c, token.expr());
       case WasmToken::Const:
         return ParseConst(c, token);
       case WasmToken::ConversionOpcode:
         return ParseConversionOperator(c, token.expr());
@@ -2497,32 +2499,32 @@ ParseGlobalType(WasmParseContext& c, Was
     *flags = 0x1;
     if (c.ts.getIf(WasmToken::Immutable))
         *flags = 0x0;
 
     return true;
 }
 
 static AstImport*
-ParseImport(WasmParseContext& c, bool newFormat, AstModule* module)
+ParseImport(WasmParseContext& c, AstModule* module)
 {
     AstName name = c.ts.getIfName();
 
     WasmToken moduleName;
     if (!c.ts.match(WasmToken::Text, &moduleName, c.error))
         return nullptr;
 
     WasmToken fieldName;
     if (!c.ts.match(WasmToken::Text, &fieldName, c.error))
         return nullptr;
 
     AstRef sigRef;
     WasmToken openParen;
     if (c.ts.getIf(WasmToken::OpenParen, &openParen)) {
-        if (newFormat) {
+        if (c.newFormat) {
             if (c.ts.getIf(WasmToken::Memory)) {
                 AstResizable memory;
                 if (!ParseResizable(c, &memory))
                     return nullptr;
                 if (!c.ts.match(WasmToken::CloseParen, c.error))
                     return nullptr;
                 return new(c.lifo) AstImport(name, moduleName.text(), fieldName.text(),
                                              DefinitionKind::Memory, memory);
@@ -2678,17 +2680,17 @@ ParseGlobal(WasmParseContext& c)
         return nullptr;
 
     return new(c.lifo) AstGlobal(name, typeToken.valueType(), flags, Some(init));
 }
 
 static AstModule*
 ParseModule(const char16_t* text, bool newFormat, LifoAlloc& lifo, UniqueChars* error)
 {
-    WasmParseContext c(text, lifo, error);
+    WasmParseContext c(text, lifo, error, newFormat);
 
     if (!c.ts.match(WasmToken::OpenParen, c.error))
         return nullptr;
     if (!c.ts.match(WasmToken::Module, c.error))
         return nullptr;
 
     auto module = new(c.lifo) AstModule(c.lifo);
     if (!module || !module->init())
@@ -2722,17 +2724,17 @@ ParseModule(const char16_t* text, bool n
           }
           case WasmToken::Data: {
             AstDataSegment* segment = ParseDataSegment(c, newFormat);
             if (!segment || !module->append(segment))
                 return nullptr;
             break;
           }
           case WasmToken::Import: {
-            AstImport* imp = ParseImport(c, newFormat, module);
+            AstImport* imp = ParseImport(c, module);
             if (!imp || !module->append(imp))
                 return nullptr;
             break;
           }
           case WasmToken::Export: {
             AstExport* exp = ParseExport(c);
             if (!exp || !module->append(exp))
                 return nullptr;
@@ -3200,61 +3202,63 @@ ResolveFunc(Resolver& r, AstFunc& func)
     for (AstExpr* expr : func.body()) {
         if (!ResolveExpr(r, *expr))
             return false;
     }
     return true;
 }
 
 static bool
-ResolveModule(LifoAlloc& lifo, AstModule* module, UniqueChars* error)
+ResolveModule(LifoAlloc& lifo, bool newFormat, AstModule* module, UniqueChars* error)
 {
     Resolver r(lifo, error);
 
     if (!r.init())
         return false;
 
     size_t numSigs = module->sigs().length();
     for (size_t i = 0; i < numSigs; i++) {
         AstSig* sig = module->sigs()[i];
         if (!r.registerSigName(sig->name(), i))
             return r.fail("duplicate signature");
     }
 
-    size_t numFuncs = module->funcs().length();
-    for (size_t i = 0; i < numFuncs; i++) {
-        AstFunc* func = module->funcs()[i];
-        if (!r.resolveSignature(func->sig()))
-            return false;
-        if (!r.registerFuncName(func->name(), i))
-            return r.fail("duplicate function");
-    }
-
-    size_t numImports = module->imports().length();
+    size_t lastFuncIndex = 0;
     size_t lastFuncImportIndex = 0;
     size_t lastGlobalIndex = 0;
-    for (size_t i = 0; i < numImports; i++) {
-        AstImport* imp = module->imports()[i];
+    for (AstImport* imp : module->imports()) {
         switch (imp->kind()) {
           case DefinitionKind::Function:
-            if (!r.registerImportName(imp->name(), lastFuncImportIndex++))
-                return r.fail("duplicate import");
+            if (newFormat) {
+                if (!r.registerFuncName(imp->name(), lastFuncIndex++))
+                    return r.fail("duplicate import");
+            } else {
+                if (!r.registerImportName(imp->name(), lastFuncImportIndex++))
+                    return r.fail("duplicate import");
+            }
             if (!r.resolveSignature(imp->funcSig()))
                 return false;
             break;
           case DefinitionKind::Global:
             if (!r.registerGlobalName(imp->name(), lastGlobalIndex++))
                 return r.fail("duplicate import");
             break;
           case DefinitionKind::Memory:
           case DefinitionKind::Table:
             break;
         }
     }
 
+    for (AstFunc* func : module->funcs()) {
+        if (!r.resolveSignature(func->sig()))
+            return false;
+        if (!r.registerFuncName(func->name(), lastFuncIndex++))
+            return r.fail("duplicate function");
+    }
+
     const AstGlobalVector& globals = module->globals();
     for (const AstGlobal* global : globals) {
         if (!r.registerGlobalName(global->name(), lastGlobalIndex++))
             return r.fail("duplicate import");
         if (global->hasInit() && !ResolveExpr(r, global->init()))
             return false;
     }
 
@@ -4198,20 +4202,20 @@ EncodeModule(AstModule& module, bool new
         return false;
 
     if (!EncodeExportSection(e, newFormat, module))
         return false;
 
     if (!EncodeStartSection(e, module))
         return false;
 
-    if (!EncodeCodeSection(e, module))
+    if (!EncodeElemSection(e, newFormat, module))
         return false;
 
-    if (!EncodeElemSection(e, newFormat, module))
+    if (!EncodeCodeSection(e, module))
         return false;
 
     if (!EncodeDataSection(e, newFormat, module))
         return false;
 
     return true;
 }
 
@@ -4220,13 +4224,13 @@ EncodeModule(AstModule& module, bool new
 bool
 wasm::TextToBinary(const char16_t* text, bool newFormat, Bytes* bytes, UniqueChars* error)
 {
     LifoAlloc lifo(AST_LIFO_DEFAULT_CHUNK_SIZE);
     AstModule* module = ParseModule(text, newFormat, lifo, error);
     if (!module)
         return false;
 
-    if (!ResolveModule(lifo, module, error))
+    if (!ResolveModule(lifo, newFormat, module, error))
         return false;
 
     return EncodeModule(*module, newFormat, bytes);
 }
--- a/js/src/asmjs/WasmTypes.h
+++ b/js/src/asmjs/WasmTypes.h
@@ -79,16 +79,17 @@ using mozilla::PodCopy;
 using mozilla::PodEqual;
 using mozilla::RefCounted;
 using mozilla::Some;
 using mozilla::Unused;
 
 typedef Vector<uint32_t, 0, SystemAllocPolicy> Uint32Vector;
 
 class Code;
+class CodeRange;
 class Memory;
 class Module;
 class Instance;
 class Table;
 
 // To call Vector::podResizeToFit, a type must specialize mozilla::IsPod
 // which is pretty verbose to do within js::wasm, so factor that process out
 // into a macro.
@@ -770,27 +771,27 @@ class CallSite : public CallSiteDesc
     // return address (x86/x64) or the prologue does (ARM/MIPS)).
     uint32_t stackDepth() const { return stackDepth_; }
 };
 
 WASM_DECLARE_POD_VECTOR(CallSite, CallSiteVector)
 
 class CallSiteAndTarget : public CallSite
 {
-    uint32_t targetIndex_;
+    uint32_t funcDefIndex_;
 
   public:
-    CallSiteAndTarget(CallSite cs, uint32_t targetIndex)
-      : CallSite(cs), targetIndex_(targetIndex)
+    CallSiteAndTarget(CallSite cs, uint32_t funcDefIndex)
+      : CallSite(cs), funcDefIndex_(funcDefIndex)
     { }
 
-    static const uint32_t NOT_INTERNAL = UINT32_MAX;
+    static const uint32_t NOT_DEFINITION = UINT32_MAX;
 
-    bool isInternal() const { return targetIndex_ != NOT_INTERNAL; }
-    uint32_t targetIndex() const { MOZ_ASSERT(isInternal()); return targetIndex_; }
+    bool isDefinition() const { return funcDefIndex_ != NOT_DEFINITION; }
+    uint32_t funcDefIndex() const { MOZ_ASSERT(isDefinition()); return funcDefIndex_; }
 };
 
 typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
 
 // Metadata for a bounds check that may need patching later.
 
 class BoundsCheck
 {
@@ -1054,41 +1055,59 @@ struct TableDesc
 WASM_DECLARE_POD_VECTOR(TableDesc, TableDescVector)
 
 // CalleeDesc describes how to compile one of the variety of asm.js/wasm calls.
 // This is hoisted into WasmTypes.h for sharing between Ion and Baseline.
 
 class CalleeDesc
 {
   public:
-    // Unlike Builtin, BuilinInstanceMethod expects an implicit Instance*
-    // as its first argument. (e.g. see Instance::growMemory)
-    enum Which { Internal, Import, WasmTable, AsmJSTable, Builtin, BuiltinInstanceMethod };
+    enum Which {
+        // Calls a function defined in the same module by its index.
+        Definition,
+
+        // Calls the import identified by the offset of its FuncImportTls in
+        // thread-local data.
+        Import,
+
+        // Calls a WebAssembly table (heterogeneous, index must be bounds
+        // checked, callee instance depends on TableDesc).
+        WasmTable,
+
+        // Calls an asm.js table (homogeneous, masked index, same-instance).
+        AsmJSTable,
+
+        // Call a C++ function identified by SymbolicAddress.
+        Builtin,
+
+        // Like Builtin, but automatically passes Instance* as first argument.
+        BuiltinInstanceMethod
+    };
 
   private:
     Which which_;
     union U {
         U() {}
-        uint32_t internalFuncIndex_;
+        uint32_t funcDefIndex_;
         struct {
             uint32_t globalDataOffset_;
         } import;
         struct {
             TableDesc desc_;
             SigIdDesc sigId_;
         } table;
         SymbolicAddress builtin_;
     } u;
 
   public:
     CalleeDesc() {}
-    static CalleeDesc internal(uint32_t callee) {
+    static CalleeDesc definition(uint32_t funcDefIndex) {
         CalleeDesc c;
-        c.which_ = Internal;
-        c.u.internalFuncIndex_ = callee;
+        c.which_ = Definition;
+        c.u.funcDefIndex_ = funcDefIndex;
         return c;
     }
     static CalleeDesc import(uint32_t globalDataOffset) {
         CalleeDesc c;
         c.which_ = Import;
         c.u.import.globalDataOffset_ = globalDataOffset;
         return c;
     }
@@ -1115,19 +1134,19 @@ class CalleeDesc
         CalleeDesc c;
         c.which_ = BuiltinInstanceMethod;
         c.u.builtin_ = callee;
         return c;
     }
     Which which() const {
         return which_;
     }
-    uint32_t internalFuncIndex() const {
-        MOZ_ASSERT(which_ == Internal);
-        return u.internalFuncIndex_;
+    uint32_t funcDefIndex() const {
+        MOZ_ASSERT(which_ == Definition);
+        return u.funcDefIndex_;
     }
     uint32_t importGlobalDataOffset() const {
         MOZ_ASSERT(which_ == Import);
         return u.import.globalDataOffset_;
     }
     bool isTable() const {
         return which_ == WasmTable || which_ == AsmJSTable;
     }
--- a/js/src/jit-test/tests/wasm/basic.js
+++ b/js/src/jit-test/tests/wasm/basic.js
@@ -440,19 +440,19 @@ assertErrorMessage(() => i2v(5), Error, 
 {
     enableSPSProfiling();
 
     var stack;
     assertEq(wasmEvalText(
         `(module
             (type $v2v (func))
             (import $foo "f" "")
-            (func (call_import $foo))
-            (func (result i32) (i32.const 0))
-            (table 0 1)
+            (func $a (call_import $foo))
+            (func $b (result i32) (i32.const 0))
+            (table $a $b)
             (func $bar (call_indirect $v2v (i32.const 0)))
             (export "" $bar)
         )`,
         {f:() => { stack = new Error().stack }}
     )(), undefined);
 
     disableSPSProfiling();
 
--- a/js/src/jit-test/tests/wasm/import-export.js
+++ b/js/src/jit-test/tests/wasm/import-export.js
@@ -298,16 +298,31 @@ assertEq(e1.foo, tbl.get(0));
 tbl.set(1, e1.foo);
 assertEq(e1.foo, tbl.get(1));
 var e2 = new Instance(new Module(code), {a:{b:tbl}}).exports;
 assertEq(e2.foo, tbl.get(0));
 assertEq(e1.foo, tbl.get(1));
 assertEq(tbl.get(0) === e1.foo, false);
 assertEq(e1.foo === e2.foo, false);
 
+var code = textToBinary('(module (table (resizable 2 2)) (import $foo "a" "b" (result i32)) (func $bar (result i32) (i32.const 13)) (elem (i32.const 0) $foo $bar) (export "foo" $foo) (export "bar" $bar) (export "tbl" table))');
+var foo = new Instance(new Module(textToBinary('(module (func (result i32) (i32.const 42)) (export "foo" 0))'))).exports.foo;
+var e1 = new Instance(new Module(code), {a:{b:foo}}).exports;
+assertEq(foo, e1.foo);
+assertEq(foo, e1.tbl.get(0));
+assertEq(e1.bar, e1.tbl.get(1));
+assertEq(e1.tbl.get(0)(), 42);
+assertEq(e1.tbl.get(1)(), 13);
+var e2 = new Instance(new Module(code), {a:{b:foo}}).exports;
+assertEq(e1.foo, e2.foo);
+assertEq(e1.bar === e2.bar, false);
+assertEq(e1.tbl === e2.tbl, false);
+assertEq(e1.tbl.get(0), e2.tbl.get(0));
+assertEq(e1.tbl.get(1) === e2.tbl.get(1), false);
+
 // i64 is fully allowed for imported wasm functions
 
 var code1 = textToBinary('(module (func $exp (param i64) (result i64) (i64.add (get_local 0) (i64.const 10))) (export "exp" $exp))');
 var e1 = new Instance(new Module(code1)).exports;
 var code2 = textToBinary('(module (import $i "a" "b" (param i64) (result i64)) (func $f (result i32) (i32.wrap/i64 (call_import $i (i64.const 42)))) (export "f" $f))');
 var e2 = new Instance(new Module(code2), {a:{b:e1.exp}}).exports;
 assertEq(e2.f(), 52);
 
@@ -426,29 +441,49 @@ assertEq(tbl.get(2), null);
 assertEq(tbl.get(3)(), 3);
 assertEq(tbl.get(4)(), 4);
 for (var i = 5; i < 10; i++)
     assertEq(tbl.get(i), null);
 
 // Cross-instance calls
 
 var i1 = new Instance(new Module(textToBinary(`(module (func) (func (param i32) (result i32) (i32.add (get_local 0) (i32.const 1))) (func) (export "f" 1))`)));
-var i2 = new Instance(new Module(textToBinary(`(module (import "a" "b" (param i32) (result i32)) (func $g (result i32) (call_import 0 (i32.const 13))) (export "g" $g))`)), {a:{b:i1.exports.f}});
+var i2 = new Instance(new Module(textToBinary(`(module (import $imp "a" "b" (param i32) (result i32)) (func $g (result i32) (call $imp (i32.const 13))) (export "g" $g))`)), {a:{b:i1.exports.f}});
 assertEq(i2.exports.g(), 14);
 
+var i1 = new Instance(new Module(textToBinary(`(module
+    (memory 1 1)
+    (data (i32.const 0) "\\42")
+    (func $f (result i32) (i32.load (i32.const 0)))
+    (export "f" $f)
+)`)));
+var i2 = new Instance(new Module(textToBinary(`(module
+    (import $imp "a" "b" (result i32))
+    (memory 1 1)
+    (data (i32.const 0) "\\13")
+    (table (resizable 2 2))
+    (elem (i32.const 0) $imp $def)
+    (func $def (result i32) (i32.load (i32.const 0)))
+    (type $v2i (func (result i32)))
+    (func $call (param i32) (result i32) (call_indirect $v2i (get_local 0)))
+    (export "call" $call)
+)`)), {a:{b:i1.exports.f}});
+assertEq(i2.exports.call(0), 0x42);
+assertEq(i2.exports.call(1), 0x13);
+
 var m = new Module(textToBinary(`(module
     (import $val "a" "val" (global i32 immutable))
     (import $next "a" "next" (result i32))
     (memory 1)
     (func $start (i32.store (i32.const 0) (get_global $val)))
     (start $start)
     (func $call (result i32)
         (i32.add
             (get_global $val)
             (i32.add
                 (i32.load (i32.const 0))
-                (call_import $next))))
+                (call $next))))
     (export "call" $call)
 )`));
 var e = {call:() => 1000};
 for (var i = 0; i < 10; i++)
     e = new Instance(m, {a:{val:i, next:e.call}}).exports;
 assertEq(e.call(), 1090);
--- a/js/src/jit-test/tests/wasm/import-gc.js
+++ b/js/src/jit-test/tests/wasm/import-gc.js
@@ -7,18 +7,18 @@ load(libdir + 'asserts.js');
 
 const Module = WebAssembly.Module;
 const Instance = WebAssembly.Instance;
 
 // Explicitly opt into the new binary format for imports and exports until it
 // is used by default everywhere.
 const textToBinary = str => wasmTextToBinary(str, 'new-format');
 
-const m1 = new Module(textToBinary(`(module (func) (export "f" 0))`));
-const m2 = new Module(textToBinary(`(module (import "a" "f") (func) (export "g" 0))`));
+const m1 = new Module(textToBinary(`(module (func $f) (export "f" $f))`));
+const m2 = new Module(textToBinary(`(module (import "a" "f") (func $f) (export "g" $f))`));
 
 // Imported instance objects should stay alive as long as any importer is alive.
 resetFinalizeCount();
 var i1 = new Instance(m1);
 var i2 = new Instance(m2, {a:i1.exports});
 var f = i1.exports.f;
 var g = i2.exports.g;
 i1.edge = makeFinalizeObserver();
--- a/js/src/jit-test/tests/wasm/jsapi.js
+++ b/js/src/jit-test/tests/wasm/jsapi.js
@@ -249,20 +249,20 @@ assertEq(setDesc.configurable, true);
 const set = setDesc.value;
 assertEq(set.length, 2);
 assertErrorMessage(() => set.call(), TypeError, /called on incompatible undefined/);
 assertErrorMessage(() => set.call({}), TypeError, /called on incompatible Object/);
 assertErrorMessage(() => set.call(tbl1, 0), TypeError, /requires more than 1 argument/);
 assertErrorMessage(() => set.call(tbl1, 2, null), RangeError, /out-of-range index/);
 assertErrorMessage(() => set.call(tbl1, -1, null), RangeError, /out-of-range index/);
 assertErrorMessage(() => set.call(tbl1, Math.pow(2,33), null), RangeError, /out-of-range index/);
-assertErrorMessage(() => set.call(tbl1, 0, undefined), TypeError, /second argument must be null or an exported WebAssembly Function object/);
-assertErrorMessage(() => set.call(tbl1, 0, {}), TypeError, /second argument must be null or an exported WebAssembly Function object/);
-assertErrorMessage(() => set.call(tbl1, 0, function() {}), TypeError, /second argument must be null or an exported WebAssembly Function object/);
-assertErrorMessage(() => set.call(tbl1, 0, Math.sin), TypeError, /second argument must be null or an exported WebAssembly Function object/);
+assertErrorMessage(() => set.call(tbl1, 0, undefined), TypeError, /can only assign WebAssembly exported functions to Table/);
+assertErrorMessage(() => set.call(tbl1, 0, {}), TypeError, /can only assign WebAssembly exported functions to Table/);
+assertErrorMessage(() => set.call(tbl1, 0, function() {}), TypeError, /can only assign WebAssembly exported functions to Table/);
+assertErrorMessage(() => set.call(tbl1, 0, Math.sin), TypeError, /can only assign WebAssembly exported functions to Table/);
 assertErrorMessage(() => set.call(tbl1, {valueOf() { throw Error("hai") }}, null), Error, "hai");
 assertEq(set.call(tbl1, 0, null), undefined);
 assertEq(set.call(tbl1, 1, null), undefined);
 
 // 'WebAssembly.compile' property
 const compileDesc = Object.getOwnPropertyDescriptor(WebAssembly, 'compile');
 assertEq(typeof compileDesc.value, "function");
 assertEq(compileDesc.writable, true);
--- a/js/src/jit-test/tests/wasm/profiling.js
+++ b/js/src/jit-test/tests/wasm/profiling.js
@@ -184,17 +184,17 @@ Error);
 
 (function() {
     var m1 = new Module(textToBinary(`(module
         (func $foo (result i32) (i32.const 42))
         (export "foo" $foo)
     )`));
     var m2 = new Module(textToBinary(`(module
         (import $foo "a" "foo" (result i32))
-        (func $bar (result i32) (call_import $foo))
+        (func $bar (result i32) (call $foo))
         (export "bar" $bar)
     )`));
 
     // Instantiate while not active:
     var e1 = new Instance(m1).exports;
     var e2 = new Instance(m2, {a:e1}).exports;
     enableSPSProfiling();
     enableSingleStepProfiling();
--- a/js/src/jit-test/tests/wasm/spec/func_ptrs.wast
+++ b/js/src/jit-test/tests/wasm/spec/func_ptrs.wast
@@ -73,17 +73,17 @@
 (assert_return (invoke "callu" (i32.const 4)) (i32.const 5))
 (assert_return (invoke "callu" (i32.const 5)) (i32.const 1))
 (assert_return (invoke "callu" (i32.const 6)) (i32.const 3))
 (assert_trap   (invoke "callu" (i32.const 7)) "undefined table index 7")
 (assert_trap   (invoke "callu" (i32.const -1)) "undefined table index -1")
 
 (module
     (type $T (func (result i32)))
-    (table 0 1)
+    (table $t1 $t2)
 
     (import $print_i32 "spectest" "print" (param i32))
 
     (func $t1 (type $T) (i32.const 1))
     (func $t2 (type $T) (i32.const 2))
 
     (func $callt (param $i i32) (result i32)
         (call_indirect $T (get_local $i)))
--- a/js/src/jit-test/tests/wasm/spec/start.wast
+++ b/js/src/jit-test/tests/wasm/spec/start.wast
@@ -59,31 +59,31 @@
   (func $get (result i32)
     (return (i32.load8_u (i32.const 0)))
   )
   (func $main
     (call $inc)
     (call $inc)
     (call $inc)
   )
-  (start 2)
+  (start $main)
   (export "inc" $inc)
   (export "get" $get)
 )
 (assert_return (invoke "get") (i32.const 68))
 (invoke "inc")
 (assert_return (invoke "get") (i32.const 69))
 (invoke "inc")
 (assert_return (invoke "get") (i32.const 70))
 
 (module
  (import $print_i32 "spectest" "print" (param i32))
  (func $main
    (call_import $print_i32 (i32.const 1)))
- (start 0)
+ (start $main)
 )
 
 (module
  (import $print_i32 "spectest" "print" (param i32))
  (func $main
    (call_import $print_i32 (i32.const 2)))
  (start $main)
 )
--- a/js/src/jit-test/tests/wasm/start.js
+++ b/js/src/jit-test/tests/wasm/start.js
@@ -9,38 +9,46 @@ assertErrorMessage(() => wasmEvalText('(
 assertErrorMessage(() => wasmEvalText('(module (func (param i32) (param f32)) (start 0))'), TypeError, /must be nullary/);
 assertErrorMessage(() => wasmEvalText('(module (func (param i32) (param f32) (param f64)) (start 0))'), TypeError, /must be nullary/);
 
 assertErrorMessage(() => wasmEvalText('(module (func (result f32)) (start 0))'), TypeError, /must not return anything/);
 
 // Basic use case.
 var count = 0;
 function inc() { count++; }
-var exports = wasmEvalText(`(module (import "inc" "") (func (param i32)) (func (call_import 0)) (start 1))`, { inc });
+var exports = wasmEvalText(`(module (import $imp "inc" "") (func $f (param i32)) (func (call_import $imp)) (start $f))`, { inc });
 assertEq(count, 1);
 assertEq(Object.keys(exports).length, 0);
 
 count = 0;
 exports = wasmEvalText(`(module (import "inc" "") (func $start (call_import 0)) (start $start) (export "" 0))`, { inc });
 assertEq(count, 1);
 assertEq(typeof exports, 'function');
 assertEq(exports(), undefined);
 assertEq(count, 2);
 
 // New API.
 const Module = WebAssembly.Module;
 const Instance = WebAssembly.Instance;
 const textToBinary = str => wasmTextToBinary(str, 'new-format');
 
 count = 0;
-const m = new Module(textToBinary('(module (import "inc" "") (func) (func (call_import 0)) (start 1) (export "" 1))'));
+const m = new Module(textToBinary('(module (import $imp "inc" "") (func) (func $start (call $imp)) (start $start) (export "" $start))'));
 assertEq(count, 0);
 
 assertErrorMessage(() => new Instance(m), TypeError, /no import object given/);
 assertEq(count, 0);
 
 const i1 = new Instance(m, { inc });
 assertEq(count, 1);
 i1.exports[""]();
 assertEq(count, 2);
 
 const i2 = new Instance(m, { inc });
 assertEq(count, 3);
+
+function fail() { assertEq(true, false); }
+
+count = 0;
+const m2 = new Module(textToBinary('(module (import "fail" "") (import $imp "inc" "") (func) (start $imp))'));
+assertEq(count, 0);
+new Instance(m2, { inc, fail });
+assertEq(count, 1);
--- a/js/src/jit-test/tests/wasm/table-gc.js
+++ b/js/src/jit-test/tests/wasm/table-gc.js
@@ -197,18 +197,18 @@ tbl.edge = makeFinalizeObserver();
 function runTest() {
     tbl = null;
     gc();
     assertEq(finalizeCount(), 0);
     return 100;
 }
 var i = evalText(
     `(module
-        (import "a" "b" (result i32))
-        (func $f (param i32) (result i32) (call_import 0))
+        (import $imp "a" "b" (result i32))
+        (func $f (param i32) (result i32) (call $imp))
         (export "f" $f)
     )`,
     {a:{b:runTest}}
 );
 i.edge = makeFinalizeObserver();
 tbl.set(0, i.exports.f);
 var m = new Module(textToBinary(`(module
     (import "a" "b" (table ${N}))
--- a/js/src/jit-test/tests/wasm/tables.js
+++ b/js/src/jit-test/tests/wasm/tables.js
@@ -123,17 +123,17 @@ assertEq(e4.call(2), 13);
 
 var m = new Module(textToBinary(`(module
     (type $i2i (func (param i32) (result i32)))
     (import "a" "mem" (memory 1))
     (import "a" "tbl" (table 10))
     (import $imp "a" "imp" (result i32))
     (func $call (param $i i32) (result i32)
         (i32.add
-            (call_import $imp)
+            (call $imp)
             (i32.add
                 (i32.load (i32.const 0))
                 (if (i32.eqz (get_local $i))
                     (then (i32.const 0))
                     (else
                         (set_local $i (i32.sub (get_local $i) (i32.const 1)))
                         (call_indirect $i2i (get_local $i) (get_local $i)))))))
     (export "call" $call)
--- a/js/src/jit/MacroAssembler-inl.h
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -81,20 +81,20 @@ MacroAssembler::PushWithPatch(ImmPtr imm
 void
 MacroAssembler::call(const wasm::CallSiteDesc& desc, const Register reg)
 {
     CodeOffset l = call(reg);
     append(desc, l, framePushed());
 }
 
 void
-MacroAssembler::call(const wasm::CallSiteDesc& desc, uint32_t callee)
+MacroAssembler::call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex)
 {
     CodeOffset l = callWithPatch();
-    append(desc, l, framePushed(), callee);
+    append(desc, l, framePushed(), funcDefIndex);
 }
 
 // ===============================================================
 // ABI function calls.
 
 void
 MacroAssembler::passABIArg(Register reg)
 {
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -482,17 +482,17 @@ class MacroAssembler : public MacroAssem
     void call(ImmWord imm) PER_SHARED_ARCH;
     // Call a target native function, which is neither traceable nor movable.
     void call(ImmPtr imm) PER_SHARED_ARCH;
     void call(wasm::SymbolicAddress imm) PER_SHARED_ARCH;
     // Call a target JitCode, which must be traceable, and may be movable.
     void call(JitCode* c) PER_SHARED_ARCH;
 
     inline void call(const wasm::CallSiteDesc& desc, const Register reg);
-    inline void call(const wasm::CallSiteDesc& desc, uint32_t callee);
+    inline void call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex);
 
     CodeOffset callWithPatch() PER_SHARED_ARCH;
     void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
 
     // Thunks provide the ability to jump to any uint32_t offset from any other
     // uint32_t offset without using a constant pool (thus returning a simple
     // CodeOffset instead of a CodeOffsetJump).
     CodeOffset thunkWithPatch() PER_SHARED_ARCH;
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -749,22 +749,22 @@ class AssemblerShared
         return !enoughMemory_;
     }
 
     bool embedsNurseryPointers() const {
         return embedsNurseryPointers_;
     }
 
     void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr, size_t framePushed,
-                uint32_t targetIndex = wasm::CallSiteAndTarget::NOT_INTERNAL)
+                uint32_t funcDefIndex = wasm::CallSiteAndTarget::NOT_DEFINITION)
     {
         // framePushed does not include sizeof(AsmJSFrame), so add it in here (see
         // CallSite::stackDepth).
         wasm::CallSite callsite(desc, retAddr.offset(), framePushed + sizeof(AsmJSFrame));
-        enoughMemory_ &= callsites_.append(wasm::CallSiteAndTarget(callsite, targetIndex));
+        enoughMemory_ &= callsites_.append(wasm::CallSiteAndTarget(callsite, funcDefIndex));
     }
     wasm::CallSiteAndTargetVector& callSites() { return callsites_; }
 
     void append(wasm::JumpTarget target, uint32_t offset) {
         enoughMemory_ &= jumpsites_[target].append(offset);
     }
     const wasm::JumpSiteArray& jumpSites() { return jumpsites_; }
     void clearJumpSites() { for (auto& v : jumpsites_) v.clear(); }
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1501,18 +1501,18 @@ CodeGeneratorShared::emitWasmCallBase(LW
     // Save the caller's TLS register in a reserved stack slot (below the
     // call's stack arguments) for retrieval after the call.
     if (mir->saveTls())
         masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), mir->tlsStackOffset()));
 
     const wasm::CallSiteDesc& desc = mir->desc();
     const wasm::CalleeDesc& callee = mir->callee();
     switch (callee.which()) {
-      case wasm::CalleeDesc::Internal:
-        masm.call(desc, callee.internalFuncIndex());
+      case wasm::CalleeDesc::Definition:
+        masm.call(desc, callee.funcDefIndex());
         break;
       case wasm::CalleeDesc::Import:
         masm.wasmCallImport(desc, callee);
         break;
       case wasm::CalleeDesc::WasmTable:
       case wasm::CalleeDesc::AsmJSTable:
         masm.wasmCallIndirect(desc, callee);
         break;
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -396,19 +396,17 @@ CodeGeneratorX86::visitLoadTypedArrayEle
 
 void
 CodeGeneratorX86::emitWasmCall(LWasmCallBase* ins)
 {
     MWasmCall* mir = ins->mir();
 
     emitWasmCallBase(ins);
 
-    if (IsFloatingPointType(mir->type()) &&
-        mir->callee().which() == wasm::CalleeDesc::Builtin)
-    {
+    if (IsFloatingPointType(mir->type()) && mir->callee().which() == wasm::CalleeDesc::Builtin) {
         if (mir->type() == MIRType::Float32) {
             masm.reserveStack(sizeof(float));
             Operand op(esp, 0);
             masm.fstp32(op);
             masm.loadFloat32(op, ReturnFloat32Reg);
             masm.freeStack(sizeof(float));
         } else {
             MOZ_ASSERT(mir->type() == MIRType::Double);
--- a/js/src/js.msg
+++ b/js/src/js.msg
@@ -353,17 +353,17 @@ MSG_DEF(JSMSG_WASM_BAD_BUF_ARG,        0
 MSG_DEF(JSMSG_WASM_BAD_MOD_ARG,        0, JSEXN_TYPEERR,     "first argument must be a WebAssembly.Module")
 MSG_DEF(JSMSG_WASM_BAD_DESC_ARG,       1, JSEXN_TYPEERR,     "first argument must be a {0} descriptor")
 MSG_DEF(JSMSG_WASM_BAD_IMP_SIZE,       1, JSEXN_TYPEERR,     "imported {0} with incompatible size")
 MSG_DEF(JSMSG_WASM_BAD_SIZE,           2, JSEXN_TYPEERR,     "bad {0} {1} size")
 MSG_DEF(JSMSG_WASM_BAD_ELEMENT,        0, JSEXN_TYPEERR,     "\"element\" property of table descriptor must be \"anyfunc\"")
 MSG_DEF(JSMSG_WASM_BAD_IMPORT_ARG,     0, JSEXN_TYPEERR,     "second argument, if present, must be an object")
 MSG_DEF(JSMSG_WASM_BAD_IMPORT_FIELD,   1, JSEXN_TYPEERR,     "import object field is not {0}")
 MSG_DEF(JSMSG_WASM_BAD_IMPORT_SIG,     0, JSEXN_TYPEERR,     "imported function signature mismatch")
-MSG_DEF(JSMSG_WASM_BAD_SET_VALUE,      0, JSEXN_TYPEERR,     "second argument must be null or an exported WebAssembly Function object")
+MSG_DEF(JSMSG_WASM_BAD_TABLE_VALUE,    0, JSEXN_TYPEERR,     "can only assign WebAssembly exported functions to Table")
 MSG_DEF(JSMSG_WASM_BAD_I64,            0, JSEXN_TYPEERR,     "cannot pass i64 to or from JS")
 MSG_DEF(JSMSG_WASM_BAD_FIT,            2, JSEXN_RANGEERR,    "{0} segment does not fit in {1}")
 MSG_DEF(JSMSG_WASM_UNREACHABLE,        0, JSEXN_ERR,         "unreachable executed")
 MSG_DEF(JSMSG_WASM_INTEGER_OVERFLOW,   0, JSEXN_ERR,         "integer overflow")
 MSG_DEF(JSMSG_WASM_INVALID_CONVERSION, 0, JSEXN_ERR,         "invalid conversion to integer")
 MSG_DEF(JSMSG_WASM_INT_DIVIDE_BY_ZERO, 0, JSEXN_ERR,         "integer divide by zero")
 MSG_DEF(JSMSG_WASM_UNALIGNED_ACCESS,   0, JSEXN_ERR,         "unaligned memory access")
 MSG_DEF(JSMSG_WASM_OVERRECURSED,       0, JSEXN_INTERNALERR, "call stack exhausted")
--- a/js/src/jsfun.h
+++ b/js/src/jsfun.h
@@ -680,17 +680,17 @@ class FunctionExtended : public JSFuncti
      * first slot.
      */
     static const unsigned WASM_INSTANCE_SLOT = 0;
 
     /*
      * wasm/asm.js exported functions store the function index of the exported
      * function in the original module.
      */
-    static const unsigned WASM_FUNC_INDEX_SLOT = 1;
+    static const unsigned WASM_FUNC_DEF_INDEX_SLOT = 1;
 
     /*
      * asm.js module functions store their WasmModuleObject in the first slot.
      */
     static const unsigned ASMJS_MODULE_SLOT = 0;
 
 
     static inline size_t offsetOfExtendedSlot(unsigned which) {