Bug 1239177 - Odin: make calls more like wasm (r=bbouvier)
authorLuke Wagner <luke@mozilla.com>
Tue, 12 Jan 2016 22:12:07 -0600
changeset 280402 20619c132abb081f017de02162ffe083601c0085
parent 280401 2f33275036d3c7d43c052680428ec75b491e4c13
child 280403 9e376b08b9787b3012b9fe294425595601f5efbc
push id70433
push userlwagner@mozilla.com
push dateMon, 18 Jan 2016 20:23:37 +0000
treeherdermozilla-inbound@20619c132abb [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1239177
milestone46.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1239177 - Odin: make calls more like wasm (r=bbouvier)
js/src/asmjs/AsmJS.cpp
js/src/asmjs/WasmBinary.h
js/src/asmjs/WasmGenerator.cpp
js/src/asmjs/WasmGenerator.h
js/src/asmjs/WasmIonCompile.cpp
js/src/asmjs/WasmIonCompile.h
js/src/asmjs/WasmModule.cpp
js/src/asmjs/WasmModule.h
js/src/asmjs/WasmStubs.cpp
js/src/asmjs/WasmTypes.h
--- a/js/src/asmjs/AsmJS.cpp
+++ b/js/src/asmjs/AsmJS.cpp
@@ -392,17 +392,17 @@ class js::AsmJSModule final : public Mod
 
     const UniqueConstStaticLinkData link_;
     const UniqueConstAsmJSModuleData module_;
 
   public:
     AsmJSModule(UniqueModuleData base,
                 UniqueStaticLinkData link,
                 UniqueAsmJSModuleData module)
-      : Module(Move(base), Module::IsAsmJS),
+      : Module(Move(base), AsmJSBool::IsAsmJS),
         link_(Move(link)),
         module_(Move(module))
     {}
 
     virtual void trace(JSTracer* trc) override {
         Module::trace(trc);
         module_->trace(trc);
     }
@@ -1349,27 +1349,26 @@ static const unsigned VALIDATION_LIFO_DE
 //
 // ModuleValidator is marked as rooted in the rooting analysis.  Don't add
 // non-JSAtom pointers, or this will break!
 class MOZ_STACK_CLASS ModuleValidator
 {
   public:
     class Func
     {
-        const LifoSig& sig_;
         PropertyName* name_;
         uint32_t firstUse_;
         uint32_t index_;
         uint32_t srcBegin_;
         uint32_t srcEnd_;
         bool defined_;
 
       public:
-        Func(PropertyName* name, uint32_t firstUse, const LifoSig& sig, uint32_t index)
-          : sig_(sig), name_(name), firstUse_(firstUse), index_(index),
+        Func(PropertyName* name, uint32_t firstUse, uint32_t index)
+          : name_(name), firstUse_(firstUse), index_(index),
             srcBegin_(0), srcEnd_(0), defined_(false)
         {}
 
         PropertyName* name() const { return name_; }
         uint32_t firstUse() const { return firstUse_; }
         bool defined() const { return defined_; }
         uint32_t index() const { return index_; }
 
@@ -1377,39 +1376,37 @@ class MOZ_STACK_CLASS ModuleValidator
             MOZ_ASSERT(!defined_);
             defined_ = true;
             srcBegin_ = fn->pn_pos.begin;
             srcEnd_ = fn->pn_pos.end;
         }
 
         uint32_t srcBegin() const { MOZ_ASSERT(defined_); return srcBegin_; }
         uint32_t srcEnd() const { MOZ_ASSERT(defined_); return srcEnd_; }
-        const LifoSig& sig() const { return sig_; }
     };
 
     typedef Vector<const Func*> ConstFuncVector;
     typedef Vector<Func*> FuncVector;
 
     class FuncPtrTable
     {
-        const LifoSig& sig_;
+        uint32_t sigIndex_;
         PropertyName* name_;
         uint32_t firstUse_;
         uint32_t mask_;
         bool defined_;
 
         FuncPtrTable(FuncPtrTable&& rhs) = delete;
 
       public:
-        FuncPtrTable(ExclusiveContext* cx, PropertyName* name, uint32_t firstUse,
-                     const LifoSig& sig, uint32_t mask)
-          : sig_(sig), name_(name), firstUse_(firstUse), mask_(mask), defined_(false)
+        FuncPtrTable(uint32_t sigIndex, PropertyName* name, uint32_t firstUse, uint32_t mask)
+          : sigIndex_(sigIndex), name_(name), firstUse_(firstUse), mask_(mask), defined_(false)
         {}
 
-        const LifoSig& sig() const { return sig_; }
+        uint32_t sigIndex() const { return sigIndex_; }
         PropertyName* name() const { return name_; }
         uint32_t firstUse() const { return firstUse_; }
         unsigned mask() const { return mask_; }
         bool defined() const { return defined_; }
         void define() { MOZ_ASSERT(!defined_); defined_ = true; }
     };
 
     typedef Vector<FuncPtrTable*> FuncPtrTableVector;
@@ -1556,67 +1553,74 @@ class MOZ_STACK_CLASS ModuleValidator
         ArrayView(PropertyName* name, Scalar::Type type)
           : name(name), type(type)
         {}
 
         PropertyName* name;
         Scalar::Type type;
     };
 
-    class ImportDescriptor
+  private:
+    struct SigHashPolicy
+    {
+        typedef const Sig& Lookup;
+        static HashNumber hash(Lookup sig) { return sig.hash(); }
+        static bool match(const Sig* lhs, Lookup rhs) { return *lhs == rhs; }
+    };
+    typedef HashMap<const DeclaredSig*, uint32_t, SigHashPolicy> SigMap;
+    class NamedSig
     {
         PropertyName* name_;
-        const LifoSig* sig_;
+        const DeclaredSig* sig_;
 
       public:
-        ImportDescriptor(PropertyName* name, const LifoSig& sig)
+        NamedSig(PropertyName* name, const DeclaredSig& sig)
           : name_(name), sig_(&sig)
         {}
-
         PropertyName* name() const {
             return name_;
         }
-        const LifoSig& sig() const {
+        const Sig& sig() const {
             return *sig_;
         }
 
-        struct Lookup {  // implements HashPolicy
-            PropertyName* name_;
-            const MallocSig& sig_;
-            Lookup(PropertyName* name, const MallocSig& sig) : name_(name), sig_(sig) {}
+        // Implement HashPolicy:
+        struct Lookup {
+            PropertyName* name;
+            const Sig& sig;
+            Lookup(PropertyName* name, const Sig& sig) : name(name), sig(sig) {}
         };
-        static HashNumber hash(const Lookup& l) {
-            return HashGeneric(l.name_, l.sig_.hash());
+        static HashNumber hash(Lookup l) {
+            return HashGeneric(l.name, l.sig.hash());
         }
-        static bool match(const ImportDescriptor& lhs, const Lookup& rhs) {
-            return lhs.name_ == rhs.name_ && *lhs.sig_ == rhs.sig_;
+        static bool match(NamedSig lhs, Lookup rhs) {
+            return lhs.name_ == rhs.name && *lhs.sig_ == rhs.sig;
         }
     };
-
-  private:
+    typedef HashMap<NamedSig, uint32_t, NamedSig> ImportMap;
     typedef HashMap<PropertyName*, Global*> GlobalMap;
     typedef HashMap<PropertyName*, MathBuiltin> MathNameMap;
     typedef HashMap<PropertyName*, AsmJSAtomicsBuiltinFunction> AtomicsNameMap;
     typedef HashMap<PropertyName*, AsmJSSimdOperation> SimdOperationNameMap;
     typedef Vector<ArrayView> ArrayViewVector;
-    typedef HashMap<ImportDescriptor, unsigned, ImportDescriptor> ImportMap;
 
     ExclusiveContext*     cx_;
     AsmJSParser&          parser_;
     ParseNode*            moduleFunctionNode_;
     PropertyName*         moduleFunctionName_;
     MathNameMap           standardLibraryMathNames_;
     AtomicsNameMap        standardLibraryAtomicsNames_;
     SimdOperationNameMap  standardLibrarySimdOpNames_;
 
     // Validation-internal state:
     LifoAlloc             validationLifo_;
     FuncVector            functions_;
     FuncPtrTableVector    funcPtrTables_;
     GlobalMap             globalMap_;
+    SigMap                sigMap_;
     ImportMap             importMap_;
     ArrayViewVector       arrayViews_;
     bool                  atomicsPresent_;
 
     // State used to build the AsmJSModule in finish():
     ModuleGenerator       mg_;
     UniqueAsmJSModuleData module_;
 
@@ -1647,30 +1651,51 @@ class MOZ_STACK_CLASS ModuleValidator
         return standardLibraryAtomicsNames_.putNew(atom->asPropertyName(), func);
     }
     bool addStandardLibrarySimdOpName(const char* name, AsmJSSimdOperation op) {
         JSAtom* atom = Atomize(cx_, name, strlen(name));
         if (!atom)
             return false;
         return standardLibrarySimdOpNames_.putNew(atom->asPropertyName(), op);
     }
+    bool declareSig(Sig&& sig, uint32_t* sigIndex) {
+        SigMap::AddPtr p = sigMap_.lookupForAdd(sig);
+        if (p) {
+            *sigIndex = p->value();
+            MOZ_ASSERT(mg_.sig(*sigIndex) == sig);
+            return true;
+        }
+
+        *sigIndex = sigMap_.count();
+        if (*sigIndex >= MaxSigs)
+            return failCurrentOffset("too many unique signatures");
+
+        mg_.initSig(*sigIndex, Move(sig));
+        return sigMap_.add(p, &mg_.sig(*sigIndex), *sigIndex);
+    }
+
+    // ModuleGeneratorData limits:
+    static const unsigned MaxSigs    =   4 * 1024;
+    static const unsigned MaxFuncs   = 512 * 1024;
+    static const unsigned MaxImports =   4 * 1024;
 
   public:
     ModuleValidator(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* moduleFunctionNode)
       : cx_(cx),
         parser_(parser),
         moduleFunctionNode_(moduleFunctionNode),
         moduleFunctionName_(FunctionName(moduleFunctionNode)),
         standardLibraryMathNames_(cx),
         standardLibraryAtomicsNames_(cx),
         standardLibrarySimdOpNames_(cx),
         validationLifo_(VALIDATION_LIFO_DEFAULT_CHUNK_SIZE),
         functions_(cx),
         funcPtrTables_(cx),
         globalMap_(cx),
+        sigMap_(cx),
         importMap_(cx),
         arrayViews_(cx),
         atomicsPresent_(false),
         mg_(cx),
         errorString_(nullptr),
         errorOffset_(UINT32_MAX),
         errorOverRecursed_(false)
     {}
@@ -1692,17 +1717,17 @@ class MOZ_STACK_CLASS ModuleValidator
             return false;
 
         module_->minHeapLength = RoundUpToNextValidAsmJSHeapLength(0);
         module_->srcStart = moduleFunctionNode_->pn_body->pn_pos.begin;
         module_->srcBodyStart = parser_.tokenStream.currentToken().pos.end;
         module_->strict = parser_.pc->sc->strict() && !parser_.pc->sc->hasExplicitUseStrict();
         module_->scriptSource.reset(parser_.ss);
 
-        if (!globalMap_.init() || !importMap_.init())
+        if (!globalMap_.init() || !sigMap_.init() || !importMap_.init())
             return false;
 
         if (!standardLibraryMathNames_.init() ||
             !addStandardLibraryMathName("sin", AsmJSMathBuiltin_sin) ||
             !addStandardLibraryMathName("cos", AsmJSMathBuiltin_cos) ||
             !addStandardLibraryMathName("tan", AsmJSMathBuiltin_tan) ||
             !addStandardLibraryMathName("asin", AsmJSMathBuiltin_asin) ||
             !addStandardLibraryMathName("acos", AsmJSMathBuiltin_acos) ||
@@ -1751,17 +1776,26 @@ class MOZ_STACK_CLASS ModuleValidator
 #define ADDSTDLIBSIMDOPNAME(op) || !addStandardLibrarySimdOpName(#op, AsmJSSimdOperation_##op)
         if (!standardLibrarySimdOpNames_.init()
             FORALL_SIMD_ASMJS_OP(ADDSTDLIBSIMDOPNAME))
         {
             return false;
         }
 #undef ADDSTDLIBSIMDOPNAME
 
-        return mg_.init();
+        UniqueModuleGeneratorData genData = MakeUnique<ModuleGeneratorData>();
+        if (!genData ||
+            !genData->sigs.resize(MaxSigs) ||
+            !genData->funcSigs.resize(MaxFuncs) ||
+            !genData->imports.resize(MaxImports))
+        {
+            return false;
+        }
+
+        return mg_.init(Move(genData));
     }
 
     ExclusiveContext* cx() const             { return cx_; }
     PropertyName* moduleFunctionName() const { return moduleFunctionName_; }
     PropertyName* globalArgumentName() const { return module_->globalArgumentName; }
     PropertyName* importArgumentName() const { return module_->importArgumentName; }
     PropertyName* bufferArgumentName() const { return module_->bufferArgumentName; }
     ModuleGenerator& mg()                    { return mg_; }
@@ -1965,21 +1999,18 @@ class MOZ_STACK_CLASS ModuleValidator
             fieldName = StringToNewUTF8CharsZ(cx_, *maybeFieldName);
         else
             fieldName = DuplicateString("");
         if (!fieldName || !module_->exportMap.fieldNames.append(Move(fieldName)))
             return false;
 
         // Declare which function is exported which gives us an index into the
         // module ExportVector.
-        MallocSig::ArgVector args;
-        if (!args.appendAll(func.sig().args()))
-            return false;
         uint32_t exportIndex;
-        if (!mg_.declareExport(MallocSig(Move(args), func.sig().ret()), func.index(), &exportIndex))
+        if (!mg_.declareExport(func.index(), &exportIndex))
             return false;
 
         // Add a mapping from the given field to the Export's index.
         if (!module_->exportMap.fieldsToExports.append(exportIndex))
             return false;
 
         // The exported function might have already been exported in which case
         // the index will refer into the range of AsmJSExports.
@@ -1989,84 +2020,80 @@ class MOZ_STACK_CLASS ModuleValidator
 
         // If this is a new export, record the src info for later toString.
         CacheableChars exportName = StringToNewUTF8CharsZ(cx_, *func.name());
         return exportName &&
                module_->exportMap.exportNames.emplaceBack(Move(exportName)) &&
                module_->exports.emplaceBack(func.srcBegin() - module_->srcStart,
                                             func.srcEnd() - module_->srcStart);
     }
-  private:
-    const LifoSig* getLifoSig(const LifoSig& sig) {
-        return &sig;
-    }
-    const LifoSig* getLifoSig(const MallocSig& sig) {
-        return mg_.newLifoSig(sig);
-    }
-  public:
-    bool addFunction(PropertyName* name, uint32_t firstUse, const MallocSig& sig, Func** func) {
+    bool addFunction(PropertyName* name, uint32_t firstUse, Sig&& sig, Func** func) {
+        uint32_t sigIndex;
+        if (!declareSig(Move(sig), &sigIndex))
+            return false;
         uint32_t funcIndex = numFunctions();
+        if (funcIndex >= MaxFuncs)
+            return failCurrentOffset("too many functions");
+        if (!mg_.initFuncSig(funcIndex, sigIndex))
+            return false;
         Global* global = validationLifo_.new_<Global>(Global::Function);
         if (!global)
             return false;
         global->u.funcIndex_ = funcIndex;
         if (!globalMap_.putNew(name, global))
             return false;
-        const LifoSig* lifoSig = getLifoSig(sig);
-        if (!lifoSig)
-            return false;
-        *func = validationLifo_.new_<Func>(name, firstUse, *lifoSig, funcIndex);
+        *func = validationLifo_.new_<Func>(name, firstUse, funcIndex);
         return *func && functions_.append(*func);
     }
-    template <class SigT>
-    bool declareFuncPtrTable(PropertyName* name, uint32_t firstUse, SigT& sig, uint32_t mask,
+    bool declareFuncPtrTable(Sig&& sig, PropertyName* name, uint32_t firstUse, uint32_t mask,
                              uint32_t* index)
     {
         if (!mg_.declareFuncPtrTable(/* numElems = */ mask + 1, index))
             return false;
         MOZ_ASSERT(*index == numFuncPtrTables());
         Global* global = validationLifo_.new_<Global>(Global::FuncPtrTable);
         if (!global)
             return false;
         global->u.funcPtrTableIndex_ = *index;
         if (!globalMap_.putNew(name, global))
             return false;
-        const LifoSig* lifoSig = getLifoSig(sig);
-        if (!lifoSig)
-            return false;
-        FuncPtrTable* t = validationLifo_.new_<FuncPtrTable>(cx_, name, firstUse, *lifoSig, mask);
+        uint32_t sigIndex;
+        if (!declareSig(Move(sig), &sigIndex))
+            return false;
+        FuncPtrTable* t = validationLifo_.new_<FuncPtrTable>(sigIndex, name, firstUse, mask);
         return t && funcPtrTables_.append(t);
     }
     bool defineFuncPtrTable(uint32_t funcPtrTableIndex, const Vector<uint32_t>& elems) {
         FuncPtrTable& table = *funcPtrTables_[funcPtrTableIndex];
         if (table.defined())
             return false;
         table.define();
         mg_.defineFuncPtrTable(funcPtrTableIndex, elems);
         return true;
     }
-    bool addImport(PropertyName* name, MallocSig&& sig, unsigned ffiIndex, unsigned* importIndex,
-                 const LifoSig** lifoSig)
-    {
-        ImportDescriptor::Lookup lookup(name, sig);
-        ImportMap::AddPtr p = importMap_.lookupForAdd(lookup);
+    bool declareImport(PropertyName* name, Sig&& sig, unsigned ffiIndex, uint32_t* importIndex) {
+        ImportMap::AddPtr p = importMap_.lookupForAdd(NamedSig::Lookup(name, sig));
         if (p) {
-            *lifoSig = &p->key().sig();
             *importIndex = p->value();
             return true;
         }
-        *lifoSig = getLifoSig(sig);
-        if (!*lifoSig)
-            return false;
-        if (!mg_.declareImport(Move(sig), importIndex))
-            return false;
-        if (!importMap_.add(p, ImportDescriptor(name, **lifoSig), *importIndex))
-            return false;
-        MOZ_ASSERT(module_->imports.length() == *importIndex);
-        return module_->imports.emplaceBack(ffiIndex);
+        *importIndex = module_->imports.length();
+        if (*importIndex >= MaxImports)
+            return failCurrentOffset("too many imports");
+        if (!module_->imports.emplaceBack(ffiIndex))
+            return false;
+        uint32_t sigIndex;
+        if (!declareSig(Move(sig), &sigIndex))
+            return false;
+        uint32_t globalDataOffset;
+        if (!mg_.allocateGlobalBytes(Module::SizeOfImportExit, sizeof(void*), &globalDataOffset))
+            return false;
+        if (!mg_.initImport(*importIndex, sigIndex, globalDataOffset))
+            return false;
+        return importMap_.add(p, NamedSig(name, mg_.sig(sigIndex)), *importIndex);
     }
 
     bool tryConstantAccess(uint64_t start, uint64_t width) {
         MOZ_ASSERT(UINT64_MAX - start > width);
         uint64_t len = start + width;
         if (len > uint64_t(INT32_MAX) + 1)
             return false;
         len = RoundUpToNextValidAsmJSHeapLength(len);
@@ -2084,16 +2111,20 @@ class MOZ_STACK_CLASS ModuleValidator
         MOZ_ASSERT(!hasAlreadyFailed());
         MOZ_ASSERT(errorOffset_ == UINT32_MAX);
         MOZ_ASSERT(str);
         errorOffset_ = offset;
         errorString_ = DuplicateString(str);
         return false;
     }
 
+    bool failCurrentOffset(const char* str) {
+        return failOffset(tokenStream().currentToken().pos.begin, str);
+    }
+
     bool fail(ParseNode* pn, const char* str) {
         return failOffset(pn->pn_pos.begin, str);
     }
 
     bool failfVAOffset(uint32_t offset, const char* fmt, va_list ap) {
         MOZ_ASSERT(!hasAlreadyFailed());
         MOZ_ASSERT(errorOffset_ == UINT32_MAX);
         MOZ_ASSERT(fmt);
@@ -2583,18 +2614,18 @@ class MOZ_STACK_CLASS FunctionValidator
     bool init(PropertyName* name, unsigned line, unsigned column) {
         UniqueBytecode recycled;
         return m_.mg().startFunc(name, line, column, &recycled, &fg_) &&
                encoder_.init(Move(recycled)) &&
                locals_.init() &&
                labels_.init();
     }
 
-    bool finish(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime) {
-        return m_.mg().finishFunc(funcIndex, sig, encoder().finish(), generateTime, &fg_);
+    bool finish(uint32_t funcIndex, unsigned generateTime) {
+        return m_.mg().finishFunc(funcIndex, encoder().finish(), generateTime, &fg_);
     }
 
     bool fail(ParseNode* pn, const char* str) {
         return m_.fail(pn, str);
     }
 
     bool failf(ParseNode* pn, const char* fmt, ...) {
         va_list ap;
@@ -2747,19 +2778,16 @@ class MOZ_STACK_CLASS FunctionValidator
     void patchU8(size_t pos, uint8_t u8) {
         encoder().patchU8(pos, u8);
     }
     template<class T>
     void patch32(size_t pos, T val) {
         static_assert(sizeof(T) == sizeof(uint32_t), "patch32 is used for 4-bytes long ops");
         encoder().patch32(pos, val);
     }
-    void patchSig(size_t pos, const LifoSig* ptr) {
-        encoder().patchSig(pos, ptr);
-    }
 
     MOZ_WARN_UNUSED_RESULT
     bool tempU8(size_t* offset) {
         return encoder().writeU8(uint8_t(Expr::Unreachable), offset);
     }
     MOZ_WARN_UNUSED_RESULT
     bool tempOp(size_t* offset) {
         return tempU8(offset);
@@ -2769,27 +2797,16 @@ class MOZ_STACK_CLASS FunctionValidator
         if (!encoder().writeU8(uint8_t(Expr::Unreachable), offset))
             return false;
         for (size_t i = 1; i < 4; i++) {
             if (!encoder().writeU8(uint8_t(Expr::Unreachable)))
                 return false;
         }
         return true;
     }
-    MOZ_WARN_UNUSED_RESULT
-    bool tempPtr(size_t* offset) {
-        if (!encoder().writeU8(uint8_t(Expr::Unreachable), offset))
-            return false;
-        for (size_t i = 1; i < sizeof(intptr_t); i++) {
-            if (!encoder().writeU8(uint8_t(Expr::Unreachable)))
-                return false;
-        }
-        return true;
-    }
-    /************************************************** End of build helpers */
 };
 
 } /* anonymous namespace */
 
 /*****************************************************************************/
 // asm.js type-checking and code-generation algorithm
 
 static bool
@@ -3260,25 +3277,23 @@ CheckModuleProcessingDirectives(ModuleVa
     while (true) {
         bool matched;
         if (!ts.matchToken(&matched, TOK_STRING, TokenStream::Operand))
             return false;
         if (!matched)
             return true;
 
         if (!IsIgnoredDirectiveName(m.cx(), ts.currentToken().atom()))
-            return m.failOffset(ts.currentToken().pos.begin, "unsupported processing directive");
+            return m.failCurrentOffset("unsupported processing directive");
 
         TokenKind tt;
         if (!ts.getToken(&tt))
             return false;
-        if (tt != TOK_SEMI) {
-            return m.failOffset(ts.currentToken().pos.begin,
-                                "expected semicolon after string literal");
-        }
+        if (tt != TOK_SEMI)
+            return m.failCurrentOffset("expected semicolon after string literal");
     }
 }
 
 static bool
 CheckModuleGlobals(ModuleValidator& m)
 {
     while (true) {
         ParseNode* varStmt;
@@ -3336,17 +3351,17 @@ CheckProcessingDirectives(ModuleValidato
     while (stmt && IsIgnoredDirective(m.cx(), stmt))
         stmt = NextNode(stmt);
 
     *stmtIter = stmt;
     return true;
 }
 
 static bool
-CheckArguments(FunctionValidator& f, ParseNode** stmtIter, MallocSig::ArgVector* argTypes)
+CheckArguments(FunctionValidator& f, ParseNode** stmtIter, ValTypeVector* argTypes)
 {
     ParseNode* stmt = *stmtIter;
 
     unsigned numFormals;
     ParseNode* argpn = FunctionArgsList(f.fn(), &numFormals);
 
     for (unsigned i = 0; i < numFormals; i++, argpn = NextNode(argpn), stmt = NextNode(stmt)) {
         PropertyName* name;
@@ -4313,37 +4328,35 @@ CheckAtomicsBuiltinCall(FunctionValidato
         MOZ_CRASH("unexpected atomicsBuiltin function");
     }
 }
 
 typedef bool (*CheckArgType)(FunctionValidator& f, ParseNode* argNode, Type type);
 
 template <CheckArgType checkArg>
 static bool
-CheckCallArgs(FunctionValidator& f, ParseNode* callNode, MallocSig::ArgVector* args)
+CheckCallArgs(FunctionValidator& f, ParseNode* callNode, ValTypeVector* args)
 {
     ParseNode* argNode = CallArgList(callNode);
     for (unsigned i = 0; i < CallArgListLength(callNode); i++, argNode = NextNode(argNode)) {
         Type type;
         if (!CheckExpr(f, argNode, &type))
             return false;
 
         if (!checkArg(f, argNode, type))
             return false;
 
         if (!args->append(type.checkedValueType()))
             return false;
     }
     return true;
 }
 
-template <class SigT>
-static bool
-CheckSignatureAgainstExisting(ModuleValidator& m, ParseNode* usepn, SigT& sig,
-                              const LifoSig& existing)
+static bool
+CheckSignatureAgainstExisting(ModuleValidator& m, ParseNode* usepn, const Sig& sig, const Sig& existing)
 {
     if (sig.args().length() != existing.args().length()) {
         return m.failf(usepn, "incompatible number of arguments (%u here vs. %u before)",
                        sig.args().length(), existing.args().length());
     }
 
     for (unsigned i = 0; i < sig.args().length(); i++) {
         if (sig.arg(i) != existing.arg(i)) {
@@ -4357,27 +4370,27 @@ CheckSignatureAgainstExisting(ModuleVali
                        Type::ret(sig.ret()).toChars(), Type::ret(existing.ret()).toChars());
     }
 
     MOZ_ASSERT(sig == existing);
     return true;
 }
 
 static bool
-CheckFunctionSignature(ModuleValidator& m, ParseNode* usepn, const MallocSig& sig,
-                       PropertyName* name, ModuleValidator::Func** func)
+CheckFunctionSignature(ModuleValidator& m, ParseNode* usepn, Sig&& sig, PropertyName* name,
+                       ModuleValidator::Func** func)
 {
     ModuleValidator::Func* existing = m.lookupFunction(name);
     if (!existing) {
         if (!CheckModuleLevelName(m, usepn, name))
             return false;
-        return m.addFunction(name, usepn->pn_pos.begin, sig, func);
-    }
-
-    if (!CheckSignatureAgainstExisting(m, usepn, sig, existing->sig()))
+        return m.addFunction(name, usepn->pn_pos.begin, Move(sig), func);
+    }
+
+    if (!CheckSignatureAgainstExisting(m, usepn, sig, m.mg().funcSig(existing->index())))
         return false;
 
     *func = existing;
     return true;
 }
 
 static bool
 CheckIsVarType(FunctionValidator& f, ParseNode* argNode, Type type)
@@ -4391,64 +4404,58 @@ static bool
 CheckInternalCall(FunctionValidator& f, ParseNode* callNode, PropertyName* calleeName,
                   ExprType ret, Type* type)
 {
     if (!f.writeOp(Expr::CallInternal))
         return false;
 
     // Function's index, to find out the function's entry
     size_t funcIndexAt;
-    // Function's signature in lifo
-    size_t sigAt;
-    if (!f.temp32(&funcIndexAt) || !f.tempPtr(&sigAt))
+    if (!f.temp32(&funcIndexAt))
         return false;
 
     if (!f.noteLineCol(callNode))
         return false;
 
-    MallocSig::ArgVector args;
+    ValTypeVector args;
     if (!CheckCallArgs<CheckIsVarType>(f, callNode, &args))
         return false;
 
-    MallocSig sig(Move(args), ret);
-
     ModuleValidator::Func* callee;
-    if (!CheckFunctionSignature(f.m(), callNode, sig, calleeName, &callee))
+    if (!CheckFunctionSignature(f.m(), callNode, Sig(Move(args), ret), calleeName, &callee))
         return false;
 
     f.patch32(funcIndexAt, callee->index());
-    f.patchSig(sigAt, &callee->sig());
     *type = Type::ret(ret);
     return true;
 }
 
-template <class SigT>
 static bool
 CheckFuncPtrTableAgainstExisting(ModuleValidator& m, ParseNode* usepn, PropertyName* name,
-                                 SigT& sig, unsigned mask, uint32_t* funcPtrTableIndex)
+                                 Sig&& sig, unsigned mask, uint32_t* funcPtrTableIndex)
 {
     if (const ModuleValidator::Global* existing = m.lookupGlobal(name)) {
         if (existing->which() != ModuleValidator::Global::FuncPtrTable)
             return m.failName(usepn, "'%s' is not a function-pointer table", name);
 
         ModuleValidator::FuncPtrTable& table = m.funcPtrTable(existing->funcPtrTableIndex());
         if (mask != table.mask())
             return m.failf(usepn, "mask does not match previous value (%u)", table.mask());
 
-        if (!CheckSignatureAgainstExisting(m, usepn, sig, table.sig()))
+        if (!CheckSignatureAgainstExisting(m, usepn, sig, m.mg().sig(table.sigIndex())))
             return false;
 
         *funcPtrTableIndex = existing->funcPtrTableIndex();
         return true;
     }
 
     if (!CheckModuleLevelName(m, usepn, name))
         return false;
 
-    if (!m.declareFuncPtrTable(name, usepn->pn_pos.begin, sig, mask, funcPtrTableIndex))
+    if (!m.declareFuncPtrTable(Move(sig), name, usepn->pn_pos.begin, mask, funcPtrTableIndex))
         return m.fail(usepn, "table too big");
 
     return true;
 }
 
 static bool
 CheckFuncPtrCall(FunctionValidator& f, ParseNode* callNode, ExprType ret, Type* type)
 {
@@ -4473,50 +4480,53 @@ CheckFuncPtrCall(FunctionValidator& f, P
 
     uint32_t mask;
     if (!IsLiteralInt(f.m(), maskNode, &mask) || mask == UINT32_MAX || !IsPowerOfTwo(mask + 1))
         return f.fail(maskNode, "function-pointer table index mask value must be a power of two minus 1");
 
     // Opcode
     if (!f.writeOp(Expr::CallIndirect))
         return false;
+
     // Table's mask
     if (!f.writeU32(mask))
         return false;
+
     // Global data offset
     size_t globalDataOffsetAt;
     if (!f.temp32(&globalDataOffsetAt))
         return false;
-    // Signature
-    size_t sigAt;
-    if (!f.tempPtr(&sigAt))
+
+    // Call signature
+    size_t sigIndexAt;
+    if (!f.temp32(&sigIndexAt))
         return false;
 
     if (!f.noteLineCol(callNode))
         return false;
 
     Type indexType;
     if (!CheckExpr(f, indexNode, &indexType))
         return false;
 
     if (!indexType.isIntish())
         return f.failf(indexNode, "%s is not a subtype of intish", indexType.toChars());
 
-    MallocSig::ArgVector args;
+    ValTypeVector args;
     if (!CheckCallArgs<CheckIsVarType>(f, callNode, &args))
         return false;
 
-    MallocSig sig(Move(args), ret);
-
-    uint32_t funcPtrTableIndex;
-    if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, sig, mask, &funcPtrTableIndex))
-        return false;
-
-    f.patch32(globalDataOffsetAt, f.m().mg().funcPtrTableGlobalDataOffset(funcPtrTableIndex));
-    f.patchSig(sigAt, &f.m().funcPtrTable(funcPtrTableIndex).sig());
+    Sig sig(Move(args), ret);
+
+    uint32_t tableIndex;
+    if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, Move(sig), mask, &tableIndex))
+        return false;
+
+    f.patch32(globalDataOffsetAt, f.m().mg().funcPtrTableGlobalDataOffset(tableIndex));
+    f.patch32(sigIndexAt, f.m().funcPtrTable(tableIndex).sigIndex());
 
     *type = Type::ret(ret);
     return true;
 }
 
 static bool
 CheckIsExternType(FunctionValidator& f, ParseNode* argNode, Type type)
 {
@@ -4534,41 +4544,35 @@ CheckFFICall(FunctionValidator& f, Parse
     if (ret == ExprType::F32)
         return f.fail(callNode, "FFI calls can't return float");
     if (IsSimdType(ret))
         return f.fail(callNode, "FFI calls can't return SIMD values");
 
     // Opcode
     if (!f.writeOp(Expr::CallImport))
         return false;
-    // Global data offset
-    size_t offsetAt;
-    if (!f.temp32(&offsetAt))
-        return false;
-    // Pointer to the import's signature in the module's lifo
-    size_t sigAt;
-    if (!f.tempPtr(&sigAt))
+
+    // Import index
+    size_t importIndexAt;
+    if (!f.temp32(&importIndexAt))
         return false;
 
     if (!f.noteLineCol(callNode))
         return false;
 
-    MallocSig::ArgVector args;
+    ValTypeVector args;
     if (!CheckCallArgs<CheckIsExternType>(f, callNode, &args))
         return false;
 
-    MallocSig sig(Move(args), ret);
-
-    unsigned importIndex = 0;
-    const LifoSig* lifoSig = nullptr;
-    if (!f.m().addImport(calleeName, Move(sig), ffiIndex, &importIndex, &lifoSig))
-        return false;
-
-    f.patch32(offsetAt, f.m().mg().importExitGlobalDataOffset(importIndex));
-    f.patchSig(sigAt, lifoSig);
+    uint32_t importIndex;
+    if (!f.m().declareImport(calleeName, Sig(Move(args), ret), ffiIndex, &importIndex))
+        return false;
+
+    f.patch32(importIndexAt, importIndex);
+
     *type = Type::ret(ret);
     return true;
 }
 
 static bool
 CheckFloatCoercionArg(FunctionValidator& f, ParseNode* inputNode, Type inputType,
                       size_t opcodeAt)
 {
@@ -6841,17 +6845,17 @@ CheckFunction(ModuleValidator& m)
     if (!f.init(FunctionName(fn), line, column))
         return m.fail(fn, "internal compiler failure (probably out of memory)");
 
     ParseNode* stmtIter = ListHead(FunctionStatementList(fn));
 
     if (!CheckProcessingDirectives(m, &stmtIter))
         return false;
 
-    MallocSig::ArgVector args;
+    ValTypeVector args;
     if (!CheckArguments(f, &stmtIter, &args))
         return false;
 
     if (!MaybeAddInterruptCheck(f, InterruptCheckPosition::Head, fn))
         return false;
 
     if (!CheckVariables(f, &stmtIter))
         return false;
@@ -6862,28 +6866,26 @@ CheckFunction(ModuleValidator& m)
             return false;
         if (!IsEmptyStatement(stmtIter))
             lastNonEmptyStmt = stmtIter;
     }
 
     if (!CheckFinalReturn(f, lastNonEmptyStmt))
         return false;
 
-    MallocSig sig(Move(args), f.returnedType());
-
     ModuleValidator::Func* func = nullptr;
-    if (!CheckFunctionSignature(m, fn, sig, FunctionName(fn), &func))
+    if (!CheckFunctionSignature(m, fn, Sig(Move(args), f.returnedType()), FunctionName(fn), &func))
         return false;
 
     if (func->defined())
         return m.failName(fn, "function '%s' already defined", FunctionName(fn));
 
     func->define(fn);
 
-    if (!f.finish(func->index(), func->sig(), (PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC))
+    if (!f.finish(func->index(), (PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC))
         return m.fail(fn, "internal compiler failure (probably out of memory)");
 
     // Release the parser's lifo memory only after the last use of a parse node.
     m.parser().release(mark);
     return true;
 }
 
 static bool
@@ -6929,42 +6931,47 @@ CheckFuncPtrTable(ModuleValidator& m, Pa
     unsigned length = ListLength(arrayLiteral);
 
     if (!IsPowerOfTwo(length))
         return m.failf(arrayLiteral, "function-pointer table length must be a power of 2 (is %u)", length);
 
     unsigned mask = length - 1;
 
     Vector<uint32_t> elemFuncIndices(m.cx());
-    const LifoSig* sig = nullptr;
+    const Sig* sig = nullptr;
     for (ParseNode* elem = ListHead(arrayLiteral); elem; elem = NextNode(elem)) {
         if (!elem->isKind(PNK_NAME))
             return m.fail(elem, "function-pointer table's elements must be names of functions");
 
         PropertyName* funcName = elem->name();
         const ModuleValidator::Func* func = m.lookupFunction(funcName);
         if (!func)
             return m.fail(elem, "function-pointer table's elements must be names of functions");
 
+        const Sig& funcSig = m.mg().funcSig(func->index());
         if (sig) {
-            if (*sig != func->sig())
+            if (*sig != funcSig)
                 return m.fail(elem, "all functions in table must have same signature");
         } else {
-            sig = &func->sig();
+            sig = &funcSig;
         }
 
         if (!elemFuncIndices.append(func->index()))
             return false;
     }
 
-    uint32_t funcPtrTableIndex;
-    if (!CheckFuncPtrTableAgainstExisting(m, var, var->name(), *sig, mask, &funcPtrTableIndex))
-        return false;
-
-    if (!m.defineFuncPtrTable(funcPtrTableIndex, elemFuncIndices))
+    Sig copy;
+    if (!copy.clone(*sig))
+        return false;
+
+    uint32_t tableIndex;
+    if (!CheckFuncPtrTableAgainstExisting(m, var, var->name(), Move(copy), mask, &tableIndex))
+        return false;
+
+    if (!m.defineFuncPtrTable(tableIndex, elemFuncIndices))
         return m.fail(var, "duplicate function-pointer definition");
 
     return true;
 }
 
 static bool
 CheckFuncPtrTables(ModuleValidator& m)
 {
@@ -7034,20 +7041,19 @@ CheckModuleExportObject(ModuleValidator&
 static bool
 CheckModuleReturn(ModuleValidator& m)
 {
     TokenKind tk;
     if (!GetToken(m.parser(), &tk))
         return false;
     TokenStream& ts = m.parser().tokenStream;
     if (tk != TOK_RETURN) {
-        const char* msg = (tk == TOK_RC || tk == TOK_EOF)
-                          ? "expecting return statement"
-                          : "invalid asm.js. statement";
-        return m.failOffset(ts.currentToken().pos.begin, msg);
+        return m.failCurrentOffset((tk == TOK_RC || tk == TOK_EOF)
+                                   ? "expecting return statement"
+                                   : "invalid asm.js. statement");
     }
     ts.ungetToken();
 
     ParseNode* returnStmt = m.parser().statement(YieldIsName);
     if (!returnStmt)
         return false;
 
     ParseNode* returnExpr = ReturnExpr(returnStmt);
@@ -7072,20 +7078,18 @@ CheckModuleReturn(ModuleValidator& m)
 
 static bool
 CheckModuleEnd(ModuleValidator &m)
 {
     TokenKind tk;
     if (!GetToken(m.parser(), &tk))
         return false;
 
-    if (tk != TOK_EOF && tk != TOK_RC) {
-        return m.failOffset(m.parser().tokenStream.currentToken().pos.begin,
-                            "top-level export (return) must be the last statement");
-    }
+    if (tk != TOK_EOF && tk != TOK_RC)
+        return m.failCurrentOffset("top-level export (return) must be the last statement");
 
     m.parser().tokenStream.ungetToken();
     return true;
 }
 
 static bool
 CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList,
             MutableHandle<WasmModuleObject*> moduleObj, unsigned* time,
--- a/js/src/asmjs/WasmBinary.h
+++ b/js/src/asmjs/WasmBinary.h
@@ -304,17 +304,17 @@ enum NeedsBoundsCheck : uint8_t
 typedef Vector<uint8_t, 0, SystemAllocPolicy> Bytecode;
 typedef UniquePtr<Bytecode> UniqueBytecode;
 
 // The Encoder class recycles (through its constructor) or creates a new Bytecode (through its
 // init() method). Its Bytecode is released when it's done building the wasm IR in finish().
 class Encoder
 {
     UniqueBytecode bytecode_;
-    mozilla::DebugOnly<bool> done_;
+    DebugOnly<bool> done_;
 
     template<class T>
     MOZ_WARN_UNUSED_RESULT
     bool write(T v, size_t* offset) {
         if (offset)
             *offset = bytecode_->length();
         return bytecode_->append(reinterpret_cast<uint8_t*>(&v), sizeof(T));
     }
@@ -322,31 +322,31 @@ class Encoder
   public:
     Encoder()
       : bytecode_(nullptr),
         done_(false)
     {}
 
     bool init(UniqueBytecode bytecode = UniqueBytecode()) {
         if (bytecode) {
-            bytecode_ = mozilla::Move(bytecode);
+            bytecode_ = Move(bytecode);
             bytecode_->clear();
             return true;
         }
         bytecode_ = MakeUnique<Bytecode>();
         return !!bytecode_;
     }
 
     size_t bytecodeOffset() const { return bytecode_->length(); }
     bool empty() const { return bytecodeOffset() == 0; }
 
     UniqueBytecode finish() {
         MOZ_ASSERT(!done_);
         done_ = true;
-        return mozilla::Move(bytecode_);
+        return Move(bytecode_);
     }
 
     MOZ_WARN_UNUSED_RESULT bool
     writeVarU32(uint32_t i) {
         do {
             uint8_t byte = i & 0x7F;
             i >>= 7;
             if (i != 0)
@@ -405,21 +405,16 @@ class Encoder
 
     template<class T>
     void patch32(size_t pc, T i) {
         static_assert(sizeof(T) == sizeof(uint32_t),
                       "patch32 must be used with 32-bits wide types");
         MOZ_ASSERT(pcIsPatchable(pc, sizeof(uint32_t)));
         memcpy(&(*bytecode_)[pc], &i, sizeof(uint32_t));
     }
-
-    void patchSig(size_t pc, const LifoSig* ptr) {
-        MOZ_ASSERT(pcIsPatchable(pc, sizeof(LifoSig*)));
-        memcpy(&(*bytecode_)[pc], &ptr, sizeof(LifoSig*));
-    }
 };
 
 class Decoder
 {
     const uint8_t* const beg_;
     const uint8_t* const end_;
     const uint8_t* cur_;
 
@@ -460,17 +455,16 @@ class Decoder
 
     // The fallible unpacking API should be used when we're not assuming
     // anything about the bytecode, in particular if it is well-formed.
     MOZ_WARN_UNUSED_RESULT bool readU8 (uint8_t* i)         { return read(i); }
     MOZ_WARN_UNUSED_RESULT bool readI32(int32_t* i)         { return read(i); }
     MOZ_WARN_UNUSED_RESULT bool readF32(float* f)           { return read(f); }
     MOZ_WARN_UNUSED_RESULT bool readU32(uint32_t* u)        { return read(u); }
     MOZ_WARN_UNUSED_RESULT bool readF64(double* d)          { return read(d); }
-    MOZ_WARN_UNUSED_RESULT bool readSig(const LifoSig* sig) { return read(sig); }
 
     MOZ_WARN_UNUSED_RESULT bool readI32X4(jit::SimdConstant* c) {
         int32_t v[4] = { 0, 0, 0, 0 };
         for (size_t i = 0; i < 4; i++) {
             if (!readI32(&v[i]))
                 return false;
         }
         *c = jit::SimdConstant::CreateX4(v[0], v[1], v[2], v[3]);
@@ -508,17 +502,16 @@ class Decoder
 
     // The infallible unpacking API should be used when we are sure that the
     // bytecode is well-formed.
     uint8_t        uncheckedReadU8 () { return uncheckedRead<uint8_t>(); }
     int32_t        uncheckedReadI32() { return uncheckedRead<int32_t>(); }
     float          uncheckedReadF32() { return uncheckedRead<float>(); }
     uint32_t       uncheckedReadU32() { return uncheckedRead<uint32_t>(); }
     double         uncheckedReadF64() { return uncheckedRead<double>(); }
-    const LifoSig* uncheckedReadSig() { return uncheckedRead<const LifoSig*>(); }
 
     jit::SimdConstant uncheckedReadI32X4() {
         int32_t v[4] = { 0, 0, 0, 0 };
         for (size_t i = 0; i < 4; i++)
             v[i] = uncheckedReadI32();
         return jit::SimdConstant::CreateX4(v[0], v[1], v[2], v[3]);
     }
     jit::SimdConstant uncheckedReadF32X4() {
@@ -553,67 +546,66 @@ class Decoder
 // check its correctness in debug mode.
 struct SourceCoords {
     DebugOnly<size_t> offset; // after call opcode
     uint32_t line;
     uint32_t column;
 };
 
 typedef Vector<SourceCoords, 0, SystemAllocPolicy> SourceCoordsVector;
-typedef Vector<ValType, 0, SystemAllocPolicy> ValTypeVector;
 
 // The FuncBytecode class contains the intermediate representation of a
 // parsed/decoded and validated asm.js/WebAssembly function. The FuncBytecode
 // lives only until it is fully compiled.
 class FuncBytecode
 {
     // Note: this unrooted field assumes AutoKeepAtoms via TokenStream via
     // asm.js compilation.
     PropertyName* name_;
     unsigned line_;
     unsigned column_;
 
     SourceCoordsVector callSourceCoords_;
 
     uint32_t index_;
-    const LifoSig& sig_;
+    const DeclaredSig& sig_;
     UniqueBytecode bytecode_;
     ValTypeVector localVars_;
     unsigned generateTime_;
 
   public:
     FuncBytecode(PropertyName* name,
                  unsigned line,
                  unsigned column,
                  SourceCoordsVector&& sourceCoords,
                  uint32_t index,
-                 const LifoSig& sig,
+                 const DeclaredSig& sig,
                  UniqueBytecode bytecode,
                  ValTypeVector&& localVars,
                  unsigned generateTime)
       : name_(name),
         line_(line),
         column_(column),
-        callSourceCoords_(mozilla::Move(sourceCoords)),
+        callSourceCoords_(Move(sourceCoords)),
         index_(index),
         sig_(sig),
-        bytecode_(mozilla::Move(bytecode)),
-        localVars_(mozilla::Move(localVars)),
+        bytecode_(Move(bytecode)),
+        localVars_(Move(localVars)),
         generateTime_(generateTime)
     {}
 
-    UniqueBytecode recycleBytecode() { return mozilla::Move(bytecode_); }
+    UniqueBytecode recycleBytecode() { return Move(bytecode_); }
 
     PropertyName* name() const { return name_; }
     unsigned line() const { return line_; }
     unsigned column() const { return column_; }
     const SourceCoords& sourceCoords(size_t i) const { return callSourceCoords_[i]; }
 
     uint32_t index() const { return index_; }
-    const LifoSig& sig() const { return sig_; }
+    const DeclaredSig& sig() const { return sig_; }
     const Bytecode& bytecode() const { return *bytecode_; }
 
     size_t numLocalVars() const { return localVars_.length(); }
     ValType localVarType(size_t i) const { return localVars_[i]; }
     size_t numLocals() const { return sig_.args().length() + numLocalVars(); }
 
     unsigned generateTime() const { return generateTime_; }
 };
--- a/js/src/asmjs/WasmGenerator.cpp
+++ b/js/src/asmjs/WasmGenerator.cpp
@@ -30,24 +30,21 @@ using namespace js::wasm;
 // ****************************************************************************
 // ModuleGenerator
 
 static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
 static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
 
 ModuleGenerator::ModuleGenerator(ExclusiveContext* cx)
   : cx_(cx),
+    jcx_(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())),
     slowFuncs_(cx),
     lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
-    jcx_(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())),
     alloc_(&lifo_),
     masm_(MacroAssembler::AsmJSToken(), alloc_),
-    sigs_(cx),
-    funcEntryOffsets_(cx),
-    exportFuncIndices_(cx),
     funcIndexToExport_(cx),
     parallel_(false),
     outstanding_(0),
     tasks_(cx),
     freeTasks_(cx),
     activeFunc_(nullptr),
     finishedFuncs_(false)
 {
@@ -100,30 +97,36 @@ ParallelCompilationEnabled(ExclusiveCont
         return false;
 
     // If 'cx' isn't a JSContext, then we are already off the main thread so
     // off-thread compilation must be enabled.
     return !cx->isJSContext() || cx->asJSContext()->runtime()->canUseOffthreadIonCompilation();
 }
 
 bool
-ModuleGenerator::init()
+ModuleGenerator::init(UniqueModuleGeneratorData shared)
 {
-    module_ = cx_->make_unique<ModuleData>();
+    module_ = MakeUnique<ModuleData>();
     if (!module_)
         return false;
 
     module_->globalBytes = InitialGlobalDataBytes;
     module_->compileArgs = CompileArgs(cx_);
 
-    link_ = cx_->make_unique<StaticLinkData>();
+    link_ = MakeUnique<StaticLinkData>();
     if (!link_)
         return false;
 
-    if (!sigs_.init() || !funcIndexToExport_.init())
+    shared_ = Move(shared);
+
+    threadView_ = MakeUnique<ModuleGeneratorThreadView>(*shared_);
+    if (!threadView_)
+        return false;
+
+    if (!funcIndexToExport_.init())
         return false;
 
     uint32_t numTasks;
     if (ParallelCompilationEnabled(cx_) &&
         HelperThreadState().wasmCompilationInProgress.compareExchange(false, true))
     {
 #ifdef DEBUG
         {
@@ -137,46 +140,29 @@ ModuleGenerator::init()
         parallel_ = true;
         numTasks = HelperThreadState().maxWasmCompilationThreads();
     } else {
         numTasks = 1;
     }
 
     if (!tasks_.initCapacity(numTasks))
         return false;
-    JSRuntime* runtime = cx_->compartment()->runtimeFromAnyThread();
+    JSRuntime* rt = cx_->compartment()->runtimeFromAnyThread();
     for (size_t i = 0; i < numTasks; i++)
-        tasks_.infallibleEmplaceBack(runtime, args(), COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
+        tasks_.infallibleEmplaceBack(rt, args(), *threadView_, COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
 
     if (!freeTasks_.reserve(numTasks))
         return false;
     for (size_t i = 0; i < numTasks; i++)
         freeTasks_.infallibleAppend(&tasks_[i]);
 
     return true;
 }
 
 bool
-ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset)
-{
-    uint32_t globalBytes = module_->globalBytes;
-
-    uint32_t pad = ComputeByteAlignment(globalBytes, align);
-    if (UINT32_MAX - globalBytes < pad + bytes)
-        return false;
-
-    globalBytes += pad;
-    *globalDataOffset = globalBytes;
-    globalBytes += bytes;
-
-    module_->globalBytes = globalBytes;
-    return true;
-}
-
-bool
 ModuleGenerator::finishOutstandingTask()
 {
     MOZ_ASSERT(parallel_);
 
     IonCompileTask* task = nullptr;
     {
         AutoLockHelperThreadState lock;
         while (true) {
@@ -205,16 +191,17 @@ ModuleGenerator::finishTask(IonCompileTa
     FuncCompileResults& results = task->results();
 
     // Offset the recorded FuncOffsets by the offset of the function in the
     // whole module's code segment.
     uint32_t offsetInWhole = masm_.size();
     results.offsets().offsetBy(offsetInWhole);
 
     // Record the non-profiling entry for whole-module linking later.
+    // Cannot simply append because funcIndex order is nonlinear.
     if (func.index() >= funcEntryOffsets_.length()) {
         if (!funcEntryOffsets_.resize(func.index() + 1))
             return false;
     }
     funcEntryOffsets_[func.index()] = results.offsets().nonProfilingEntry;
 
     // Merge the compiled results into the whole-module masm.
     DebugOnly<size_t> sizeBefore = masm_.size();
@@ -238,28 +225,31 @@ ModuleGenerator::finishTask(IonCompileTa
         if (!slowFuncs_.emplaceBack(func.name(), totalTime, func.line(), func.column()))
             return false;
     }
 
     freeTasks_.infallibleAppend(task);
     return true;
 }
 
-const LifoSig*
-ModuleGenerator::newLifoSig(const MallocSig& sig)
+bool
+ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset)
 {
-    SigSet::AddPtr p = sigs_.lookupForAdd(sig);
-    if (p)
-        return *p;
+    uint32_t globalBytes = module_->globalBytes;
+
+    uint32_t pad = ComputeByteAlignment(globalBytes, align);
+    if (UINT32_MAX - globalBytes < pad + bytes)
+        return false;
 
-    LifoSig* lifoSig = LifoSig::new_(lifo_, sig);
-    if (!lifoSig || !sigs_.add(p, lifoSig))
-        return nullptr;
+    globalBytes += pad;
+    *globalDataOffset = globalBytes;
+    globalBytes += bytes;
 
-    return lifoSig;
+    module_->globalBytes = globalBytes;
+    return true;
 }
 
 bool
 ModuleGenerator::allocateGlobalVar(ValType type, uint32_t* globalDataOffset)
 {
     unsigned width = 0;
     switch (type) {
       case wasm::ValType::I32:
@@ -274,79 +264,96 @@ ModuleGenerator::allocateGlobalVar(ValTy
       case wasm::ValType::F32x4:
       case wasm::ValType::B32x4:
         width = 16;
         break;
     }
     return allocateGlobalBytes(width, width, globalDataOffset);
 }
 
-bool
-ModuleGenerator::declareImport(MallocSig&& sig, unsigned* index)
+void
+ModuleGenerator::initSig(uint32_t sigIndex, Sig&& sig)
+{
+    MOZ_ASSERT(shared_->sigs[sigIndex] == Sig());
+    shared_->sigs[sigIndex] = Move(sig);
+}
+
+const DeclaredSig&
+ModuleGenerator::sig(uint32_t index) const
 {
-    static_assert(Module::SizeOfImportExit % sizeof(void*) == 0, "word aligned");
+    return shared_->sigs[index];
+}
 
-    uint32_t globalDataOffset;
-    if (!allocateGlobalBytes(Module::SizeOfImportExit, sizeof(void*), &globalDataOffset))
+bool
+ModuleGenerator::initImport(uint32_t importIndex, uint32_t sigIndex, uint32_t globalDataOffset)
+{
+    MOZ_ASSERT(importIndex == module_->imports.length());
+
+    Sig copy;
+    if (!copy.clone(sig(sigIndex)))
+        return false;
+    if (!module_->imports.emplaceBack(Move(copy), globalDataOffset))
         return false;
 
-    *index = unsigned(module_->imports.length());
-    return module_->imports.emplaceBack(Move(sig), globalDataOffset);
+    ModuleImportGeneratorData& import = shared_->imports[importIndex];
+    MOZ_ASSERT(!import.sig);
+    import.sig = &shared_->sigs[sigIndex];
+    import.globalDataOffset = globalDataOffset;
+    return true;
 }
 
 uint32_t
 ModuleGenerator::numImports() const
 {
     return module_->imports.length();
 }
 
-uint32_t
-ModuleGenerator::importExitGlobalDataOffset(uint32_t index) const
+const ModuleImportGeneratorData&
+ModuleGenerator::import(uint32_t index) const
 {
-    return module_->imports[index].exitGlobalDataOffset();
-}
-
-const MallocSig&
-ModuleGenerator::importSig(uint32_t index) const
-{
-    return module_->imports[index].sig();
+    MOZ_ASSERT(shared_->imports[index].sig);
+    return shared_->imports[index];
 }
 
 bool
 ModuleGenerator::defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit)
 {
     Import& import = module_->imports[index];
     import.initInterpExitOffset(interpExit.begin);
     import.initJitExitOffset(jitExit.begin);
     return module_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExit) &&
            module_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExit);
 }
 
 bool
-ModuleGenerator::declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* exportIndex)
+ModuleGenerator::declareExport(uint32_t funcIndex, uint32_t* exportIndex)
 {
     FuncIndexMap::AddPtr p = funcIndexToExport_.lookupForAdd(funcIndex);
     if (p) {
         *exportIndex = p->value();
         return true;
     }
 
+    Sig copy;
+    if (!copy.clone(funcSig(funcIndex)))
+        return false;
+
     *exportIndex = module_->exports.length();
     return funcIndexToExport_.add(p, funcIndex, *exportIndex) &&
-           module_->exports.append(Move(sig)) &&
+           module_->exports.append(Move(copy)) &&
            exportFuncIndices_.append(funcIndex);
 }
 
 uint32_t
 ModuleGenerator::exportFuncIndex(uint32_t index) const
 {
     return exportFuncIndices_[index];
 }
 
-const MallocSig&
+const Sig&
 ModuleGenerator::exportSig(uint32_t index) const
 {
     return module_->exports[index].sig();
 }
 
 uint32_t
 ModuleGenerator::numExports() const
 {
@@ -356,16 +363,31 @@ ModuleGenerator::numExports() const
 bool
 ModuleGenerator::defineExport(uint32_t index, Offsets offsets)
 {
     module_->exports[index].initStubOffset(offsets.begin);
     return module_->codeRanges.emplaceBack(CodeRange::Entry, offsets);
 }
 
 bool
+ModuleGenerator::initFuncSig(uint32_t funcIndex, uint32_t sigIndex)
+{
+    MOZ_ASSERT(!shared_->funcSigs[funcIndex]);
+    shared_->funcSigs[funcIndex] = &shared_->sigs[sigIndex];
+    return true;
+}
+
+const DeclaredSig&
+ModuleGenerator::funcSig(uint32_t funcIndex) const
+{
+    MOZ_ASSERT(shared_->funcSigs[funcIndex]);
+    return *shared_->funcSigs[funcIndex];
+}
+
+bool
 ModuleGenerator::startFunc(PropertyName* name, unsigned line, unsigned column,
                            UniqueBytecode* recycled, FunctionGenerator* fg)
 {
     MOZ_ASSERT(!activeFunc_);
     MOZ_ASSERT(!finishedFuncs_);
 
     if (freeTasks_.empty() && !finishOutstandingTask())
         return false;
@@ -379,31 +401,31 @@ ModuleGenerator::startFunc(PropertyName*
     fg->column_ = column;
     fg->m_ = this;
     fg->task_ = task;
     activeFunc_ = fg;
     return true;
 }
 
 bool
-ModuleGenerator::finishFunc(uint32_t funcIndex, const LifoSig& sig, UniqueBytecode bytecode,
-                            unsigned generateTime, FunctionGenerator* fg)
+ModuleGenerator::finishFunc(uint32_t funcIndex, UniqueBytecode bytecode, unsigned generateTime,
+                            FunctionGenerator* fg)
 {
     MOZ_ASSERT(activeFunc_ == fg);
 
-    UniqueFuncBytecode func = cx_->make_unique<FuncBytecode>(fg->name_,
-        fg->line_,
-        fg->column_,
-        Move(fg->callSourceCoords_),
-        funcIndex,
-        sig,
-        Move(bytecode),
-        Move(fg->localVars_),
-        generateTime
-    );
+    UniqueFuncBytecode func =
+        js::MakeUnique<FuncBytecode>(fg->name_,
+                                     fg->line_,
+                                     fg->column_,
+                                     Move(fg->callSourceCoords_),
+                                     funcIndex,
+                                     funcSig(funcIndex),
+                                     Move(bytecode),
+                                     Move(fg->localVars_),
+                                     generateTime);
     if (!func)
         return false;
 
     fg->task_->init(Move(func));
 
     if (parallel_) {
         if (!StartOffThreadWasmCompile(cx_, fg->task_))
             return false;
--- a/js/src/asmjs/WasmGenerator.h
+++ b/js/src/asmjs/WasmGenerator.h
@@ -23,118 +23,167 @@
 #include "asmjs/WasmIonCompile.h"
 #include "asmjs/WasmModule.h"
 #include "jit/MacroAssembler.h"
 
 namespace js {
 namespace wasm {
 
 class FunctionGenerator;
+typedef Vector<uint32_t, 0, SystemAllocPolicy> Uint32Vector;
 
 // A slow function describes a function that took longer than msThreshold to
 // validate and compile.
+
 struct SlowFunction
 {
     SlowFunction(PropertyName* name, unsigned ms, unsigned line, unsigned column)
      : name(name), ms(ms), line(line), column(column)
     {}
 
     static const unsigned msThreshold = 250;
 
     PropertyName* name;
     unsigned ms;
     unsigned line;
     unsigned column;
 };
 typedef Vector<SlowFunction> SlowFunctionVector;
 
+// The ModuleGeneratorData holds all the state shared between the
+// ModuleGenerator and ModuleGeneratorThreadView. The ModuleGeneratorData is
+// encapsulated by ModuleGenerator/ModuleGeneratorThreadView classes which
+// present a race-free interface to the code in each thread assuming any given
+// element is initialized by the ModuleGenerator thread before an index to that
+// element is written to Bytecode sent to a ModuleGeneratorThreadView thread.
+// Once created, the Vectors are never resized.
+
+struct ModuleImportGeneratorData
+{
+    DeclaredSig* sig;
+    uint32_t globalDataOffset;
+};
+
+typedef Vector<ModuleImportGeneratorData, 0, SystemAllocPolicy> ModuleImportGeneratorDataVector;
+
+struct ModuleGeneratorData
+{
+    DeclaredSigVector               sigs;
+    DeclaredSigPtrVector            funcSigs;
+    ModuleImportGeneratorDataVector imports;
+};
+
+typedef UniquePtr<ModuleGeneratorData> UniqueModuleGeneratorData;
+
+// The ModuleGeneratorThreadView class presents a restricted, read-only view of
+// the shared state needed by helper threads. There is only one
+// ModuleGeneratorThreadView object owned by ModuleGenerator and referenced by
+// all compile tasks.
+
+class ModuleGeneratorThreadView
+{
+    const ModuleGeneratorData& shared_;
+
+  public:
+    explicit ModuleGeneratorThreadView(const ModuleGeneratorData& shared)
+      : shared_(shared)
+    {}
+    const DeclaredSig& sig(uint32_t sigIndex) const {
+        return shared_.sigs[sigIndex];
+    }
+    const DeclaredSig& funcSig(uint32_t funcIndex) const {
+        MOZ_ASSERT(shared_.funcSigs[funcIndex]);
+        return *shared_.funcSigs[funcIndex];
+    }
+    const ModuleImportGeneratorData& import(uint32_t importIndex) const {
+        MOZ_ASSERT(shared_.imports[importIndex].sig);
+        return shared_.imports[importIndex];
+    }
+};
+
 // A ModuleGenerator encapsulates the creation of a wasm module. During the
 // lifetime of a ModuleGenerator, a sequence of FunctionGenerators are created
 // and destroyed to compile the individual function bodies. After generating all
 // functions, ModuleGenerator::finish() must be called to complete the
 // compilation and extract the resulting wasm module.
+
 class MOZ_STACK_CLASS ModuleGenerator
 {
-    typedef Vector<uint32_t> FuncOffsetVector;
-    typedef Vector<uint32_t> FuncIndexVector;
+    typedef UniquePtr<ModuleGeneratorThreadView> UniqueModuleGeneratorThreadView;
     typedef HashMap<uint32_t, uint32_t> FuncIndexMap;
 
-    struct SigHashPolicy
-    {
-        typedef const MallocSig& Lookup;
-        static HashNumber hash(Lookup l) { return l.hash(); }
-        static bool match(const LifoSig* lhs, Lookup rhs) { return *lhs == rhs; }
-    };
-    typedef HashSet<const LifoSig*, SigHashPolicy> SigSet;
-
-    ExclusiveContext*             cx_;
+    ExclusiveContext*               cx_;
+    jit::JitContext                 jcx_;
 
     // Data handed back to the caller in finish()
-    UniqueModuleData              module_;
-    UniqueStaticLinkData          link_;
-    SlowFunctionVector            slowFuncs_;
+    UniqueModuleData                module_;
+    UniqueStaticLinkData            link_;
+    SlowFunctionVector              slowFuncs_;
 
     // Data scoped to the ModuleGenerator's lifetime
-    LifoAlloc                     lifo_;
-    jit::JitContext               jcx_;
-    jit::TempAllocator            alloc_;
-    jit::MacroAssembler           masm_;
-    SigSet                        sigs_;
-    FuncOffsetVector              funcEntryOffsets_;
-    FuncIndexVector               exportFuncIndices_;
-    FuncIndexMap                  funcIndexToExport_;
+    UniqueModuleGeneratorData       shared_;
+    LifoAlloc                       lifo_;
+    jit::TempAllocator              alloc_;
+    jit::MacroAssembler             masm_;
+    Uint32Vector                    funcEntryOffsets_;
+    Uint32Vector                    exportFuncIndices_;
+    FuncIndexMap                    funcIndexToExport_;
 
     // Parallel compilation
-    bool                          parallel_;
-    uint32_t                      outstanding_;
-    Vector<IonCompileTask>        tasks_;
-    Vector<IonCompileTask*>       freeTasks_;
+    bool                            parallel_;
+    uint32_t                        outstanding_;
+    UniqueModuleGeneratorThreadView threadView_;
+    Vector<IonCompileTask>          tasks_;
+    Vector<IonCompileTask*>         freeTasks_;
 
     // Assertions
-    DebugOnly<FunctionGenerator*> activeFunc_;
-    DebugOnly<bool>               finishedFuncs_;
+    DebugOnly<FunctionGenerator*>   activeFunc_;
+    DebugOnly<bool>                 finishedFuncs_;
 
-    bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset);
     bool finishOutstandingTask();
     bool finishTask(IonCompileTask* task);
 
   public:
     explicit ModuleGenerator(ExclusiveContext* cx);
     ~ModuleGenerator();
 
-    bool init();
+    bool init(UniqueModuleGeneratorData shared);
 
     CompileArgs args() const { return module_->compileArgs; }
     jit::MacroAssembler& masm() { return masm_; }
-    const FuncOffsetVector& funcEntryOffsets() const { return funcEntryOffsets_; }
-
-    const LifoSig* newLifoSig(const MallocSig& sig);
+    const Uint32Vector& funcEntryOffsets() const { return funcEntryOffsets_; }
 
     // Global data:
+    bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset);
     bool allocateGlobalVar(ValType type, uint32_t* globalDataOffset);
 
+    // Signatures:
+    void initSig(uint32_t sigIndex, Sig&& sig);
+    const DeclaredSig& sig(uint32_t sigIndex) const;
+
     // Imports:
-    bool declareImport(MallocSig&& sig, uint32_t* index);
+    bool initImport(uint32_t importIndex, uint32_t sigIndex, uint32_t globalDataOffset);
     uint32_t numImports() const;
-    uint32_t importExitGlobalDataOffset(uint32_t index) const;
-    const MallocSig& importSig(uint32_t index) const;
+    const ModuleImportGeneratorData& import(uint32_t index) const;
     bool defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit);
 
     // Exports:
-    bool declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* exportIndex);
+    bool declareExport(uint32_t funcIndex, uint32_t* exportIndex);
     uint32_t numExports() const;
     uint32_t exportFuncIndex(uint32_t index) const;
-    const MallocSig& exportSig(uint32_t index) const;
+    const Sig& exportSig(uint32_t index) const;
     bool defineExport(uint32_t index, Offsets offsets);
 
     // Functions:
+    bool initFuncSig(uint32_t funcIndex, uint32_t sigIndex);
+    const DeclaredSig& funcSig(uint32_t funcIndex) const;
     bool startFunc(PropertyName* name, unsigned line, unsigned column, UniqueBytecode* recycled,
                    FunctionGenerator* fg);
-    bool finishFunc(uint32_t funcIndex, const LifoSig& sig, UniqueBytecode bytecode,
-                    unsigned generateTime, FunctionGenerator* fg);
+    bool finishFunc(uint32_t funcIndex, UniqueBytecode bytecode, unsigned generateTime,
+                    FunctionGenerator* fg);
     bool finishFuncs();
 
     // Function-pointer tables:
     bool declareFuncPtrTable(uint32_t numElems, uint32_t* index);
     uint32_t funcPtrTableGlobalDataOffset(uint32_t index) const;
     void defineFuncPtrTable(uint32_t index, const Vector<uint32_t>& elemFuncIndices);
 
     // Stubs:
@@ -155,16 +204,17 @@ class MOZ_STACK_CLASS ModuleGenerator
                 SlowFunctionVector* slowFuncs);
 };
 
 // A FunctionGenerator encapsulates the generation of a single function body.
 // ModuleGenerator::startFunc must be called after construction and before doing
 // anything else. After the body is complete, ModuleGenerator::finishFunc must
 // be called before the FunctionGenerator is destroyed and the next function is
 // started.
+
 class MOZ_STACK_CLASS FunctionGenerator
 {
     friend class ModuleGenerator;
 
     ModuleGenerator*   m_;
     IonCompileTask*    task_;
 
     // Function metadata created during function generation, then handed over
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -34,76 +34,80 @@ typedef Vector<MBasicBlock*, 8, SystemAl
 // MIR graph.
 class FunctionCompiler
 {
   private:
     typedef HashMap<uint32_t, BlockVector, DefaultHasher<uint32_t>, SystemAllocPolicy> LabeledBlockMap;
     typedef HashMap<size_t, BlockVector, DefaultHasher<uint32_t>, SystemAllocPolicy> UnlabeledBlockMap;
     typedef Vector<size_t, 4, SystemAllocPolicy> PositionStack;
 
-    const FuncBytecode& func_;
-    Decoder             decoder_;
-    size_t              nextId_;
-    size_t              lastReadCallSite_;
-
-    TempAllocator&      alloc_;
-    MIRGraph&           graph_;
-    const CompileInfo&  info_;
-    MIRGenerator&       mirGen_;
-
-    MBasicBlock*        curBlock_;
-
-    PositionStack       loopStack_;
-    PositionStack       breakableStack_;
-    UnlabeledBlockMap   unlabeledBreaks_;
-    UnlabeledBlockMap   unlabeledContinues_;
-    LabeledBlockMap     labeledBreaks_;
-    LabeledBlockMap     labeledContinues_;
-
-    FuncCompileResults& compileResults_;
+    ModuleGeneratorThreadView& mg_;
+    const FuncBytecode&        func_;
+    Decoder                    decoder_;
+    size_t                     nextId_;
+    size_t                     lastReadCallSite_;
+
+    TempAllocator&             alloc_;
+    MIRGraph&                  graph_;
+    const CompileInfo&         info_;
+    MIRGenerator&              mirGen_;
+
+    MBasicBlock*               curBlock_;
+
+    PositionStack              loopStack_;
+    PositionStack              breakableStack_;
+    UnlabeledBlockMap          unlabeledBreaks_;
+    UnlabeledBlockMap          unlabeledContinues_;
+    LabeledBlockMap            labeledBreaks_;
+    LabeledBlockMap            labeledContinues_;
+
+    FuncCompileResults&        compileResults_;
 
   public:
-    FunctionCompiler(const FuncBytecode& func, MIRGenerator& mirGen, FuncCompileResults& compileResults)
-      : func_(func),
+    FunctionCompiler(ModuleGeneratorThreadView& mg, const FuncBytecode& func, MIRGenerator& mirGen,
+                     FuncCompileResults& compileResults)
+      : mg_(mg),
+        func_(func),
         decoder_(func.bytecode()),
         nextId_(0),
         lastReadCallSite_(0),
         alloc_(mirGen.alloc()),
         graph_(mirGen.graph()),
         info_(mirGen.info()),
         mirGen_(mirGen),
         curBlock_(nullptr),
         compileResults_(compileResults)
     {}
 
-    TempAllocator&   alloc() const { return alloc_; }
-    MacroAssembler&  masm() const  { return compileResults_.masm(); }
-    const LifoSig&   sig() const   { return func_.sig(); }
+    ModuleGeneratorThreadView& mg() const    { return mg_; }
+    TempAllocator&             alloc() const { return alloc_; }
+    MacroAssembler&            masm() const  { return compileResults_.masm(); }
+    const Sig&                 sig() const   { return func_.sig(); }
 
     bool init()
     {
         if (!unlabeledBreaks_.init() ||
             !unlabeledContinues_.init() ||
             !labeledBreaks_.init() ||
             !labeledContinues_.init())
         {
             return false;
         }
 
         // Prepare the entry block for MIR generation:
 
-        const LifoSig::ArgVector& args = func_.sig().args();
+        const ValTypeVector& args = func_.sig().args();
         unsigned firstVarSlot = args.length();
 
         if (!mirGen_.ensureBallast())
             return false;
         if (!newBlock(/* pred = */ nullptr, &curBlock_))
             return false;
 
-        for (ABIArgIter<LifoSig::ArgVector> i(args); !i.done(); i++) {
+        for (ABIArgValTypeIter i(args); !i.done(); i++) {
             MAsmJSParameter* ins = MAsmJSParameter::New(alloc(), *i, i.mirType());
             curBlock_->add(ins);
             curBlock_->initSlot(info().localSlot(i.index()), ins);
             if (!mirGen_.ensureBallast())
                 return false;
         }
 
         for (size_t i = 0; i < func_.numLocalVars(); i++) {
@@ -724,22 +728,22 @@ class FunctionCompiler
             return false;
 
         curBlock_->add(ins);
         *def = ins;
         return true;
     }
 
   public:
-    bool internalCall(const LifoSig& sig, uint32_t funcIndex, const Call& call, MDefinition** def)
+    bool internalCall(const Sig& sig, uint32_t funcIndex, const Call& call, MDefinition** def)
     {
         return callPrivate(MAsmJSCall::Callee(AsmJSInternalCallee(funcIndex)), call, sig.ret(), def);
     }
 
-    bool funcPtrCall(const LifoSig& sig, uint32_t maskLit, uint32_t globalDataOffset, MDefinition* index,
+    bool funcPtrCall(const Sig& sig, uint32_t maskLit, uint32_t globalDataOffset, MDefinition* index,
                      const Call& call, MDefinition** def)
     {
         if (inDeadCode()) {
             *def = nullptr;
             return true;
         }
 
         MConstant* mask = MConstant::New(alloc(), Int32Value(maskLit));
@@ -1169,17 +1173,16 @@ class FunctionCompiler
     /************************************************************ DECODING ***/
 
     uint8_t        readU8()     { return decoder_.uncheckedReadU8(); }
     uint32_t       readU32()    { return decoder_.uncheckedReadU32(); }
     uint32_t       readVarU32() { return decoder_.uncheckedReadVarU32(); }
     int32_t        readI32()    { return decoder_.uncheckedReadI32(); }
     float          readF32()    { return decoder_.uncheckedReadF32(); }
     double         readF64()    { return decoder_.uncheckedReadF64(); }
-    const LifoSig* readSig()    { return decoder_.uncheckedReadSig(); }
     SimdConstant   readI32X4()  { return decoder_.uncheckedReadI32X4(); }
     SimdConstant   readF32X4()  { return decoder_.uncheckedReadF32X4(); }
 
     Expr           readOpcode() { return Expr(readU8()); }
 
     void readCallLineCol(uint32_t* line, uint32_t* column) {
         const SourceCoords& sc = func_.sourceCoords(lastReadCallSite_++);
         decoder_.assertCurrentIs(sc.offset);
@@ -1543,17 +1546,17 @@ EmitAtomicsExchange(FunctionCompiler& f,
     MDefinition* value;
     if (!EmitExpr(f, ExprType::I32, &value))
         return false;
     *def = f.atomicExchangeHeap(viewType, index, value, needsBoundsCheck);
     return true;
 }
 
 static bool
-EmitCallArgs(FunctionCompiler& f, const LifoSig& sig, FunctionCompiler::Call* call)
+EmitCallArgs(FunctionCompiler& f, const Sig& sig, FunctionCompiler::Call* call)
 {
     f.startCallArgs(call);
     for (unsigned i = 0; i < sig.args().length(); i++) {
         MDefinition *arg = nullptr;
         switch (sig.arg(i)) {
           case ValType::I32:    if (!EmitExpr(f, ExprType::I32, &arg))   return false; break;
           case ValType::I64:    MOZ_CRASH("int64");
           case ValType::F32:    if (!EmitExpr(f, ExprType::F32, &arg))   return false; break;
@@ -1568,17 +1571,18 @@ EmitCallArgs(FunctionCompiler& f, const 
     f.finishCallArgs(call);
     return true;
 }
 
 static bool
 EmitInternalCall(FunctionCompiler& f, ExprType ret, MDefinition** def)
 {
     uint32_t funcIndex = f.readU32();
-    const LifoSig& sig = *f.readSig();
+
+    const Sig& sig = f.mg().funcSig(funcIndex);
     MOZ_ASSERT_IF(!IsVoid(sig.ret()), sig.ret() == ret);
 
     uint32_t lineno, column;
     f.readCallLineCol(&lineno, &column);
 
     FunctionCompiler::Call call(f, lineno, column);
     if (!EmitCallArgs(f, sig, &call))
         return false;
@@ -1586,18 +1590,19 @@ EmitInternalCall(FunctionCompiler& f, Ex
     return f.internalCall(sig, funcIndex, call, def);
 }
 
 static bool
 EmitFuncPtrCall(FunctionCompiler& f, ExprType ret, MDefinition** def)
 {
     uint32_t mask = f.readU32();
     uint32_t globalDataOffset = f.readU32();
-
-    const LifoSig& sig = *f.readSig();
+    uint32_t sigIndex = f.readU32();
+
+    const Sig& sig = f.mg().sig(sigIndex);
     MOZ_ASSERT_IF(!IsVoid(sig.ret()), sig.ret() == ret);
 
     uint32_t lineno, column;
     f.readCallLineCol(&lineno, &column);
 
     MDefinition *index;
     if (!EmitExpr(f, ExprType::I32, &index))
         return false;
@@ -1607,29 +1612,30 @@ EmitFuncPtrCall(FunctionCompiler& f, Exp
         return false;
 
     return f.funcPtrCall(sig, mask, globalDataOffset, index, call, def);
 }
 
 static bool
 EmitFFICall(FunctionCompiler& f, ExprType ret, MDefinition** def)
 {
-    unsigned globalDataOffset = f.readI32();
-
-    const LifoSig& sig = *f.readSig();
-    MOZ_ASSERT_IF(!IsVoid(sig.ret()), sig.ret() == ret);
+    uint32_t importIndex = f.readU32();
 
     uint32_t lineno, column;
     f.readCallLineCol(&lineno, &column);
 
+    const ModuleImportGeneratorData& import = f.mg().import(importIndex);
+    const Sig& sig = *import.sig;
+    MOZ_ASSERT_IF(!IsVoid(sig.ret()), sig.ret() == ret);
+
     FunctionCompiler::Call call(f, lineno, column);
     if (!EmitCallArgs(f, sig, &call))
         return false;
 
-    return f.ffiCall(globalDataOffset, call, ret, def);
+    return f.ffiCall(import.globalDataOffset, call, ret, def);
 }
 
 static bool
 EmitF32MathBuiltinCall(FunctionCompiler& f, Expr f32, MDefinition** def)
 {
     MOZ_ASSERT(f32 == Expr::F32Ceil || f32 == Expr::F32Floor);
 
     uint32_t lineno, column;
@@ -2936,17 +2942,17 @@ wasm::IonCompileFunction(IonCompileTask*
     MIRGraph graph(&results.alloc());
     CompileInfo compileInfo(func.numLocals());
     MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo,
                      IonOptimizations.get(OptimizationLevel::AsmJS),
                      task->args().useSignalHandlersForOOB);
 
     // Build MIR graph
     {
-        FunctionCompiler f(func, mir, results);
+        FunctionCompiler f(task->mg(), func, mir, results);
         if (!f.init())
             return false;
 
         while (!f.done()) {
             MDefinition* _;
             if (!EmitExpr(f, ExprType::Void, &_))
                 return false;
         }
--- a/js/src/asmjs/WasmIonCompile.h
+++ b/js/src/asmjs/WasmIonCompile.h
@@ -20,16 +20,22 @@
 #define wasm_ion_compile_h
 
 #include "asmjs/WasmBinary.h"
 #include "jit/MacroAssembler.h"
 
 namespace js {
 namespace wasm {
 
+class ModuleGeneratorThreadView;
+
+typedef Vector<jit::MIRType, 8, SystemAllocPolicy> MIRTypeVector;
+typedef jit::ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter;
+typedef jit::ABIArgIter<ValTypeVector> ABIArgValTypeIter;
+
 // The FuncCompileResults contains the results of compiling a single function
 // body, ready to be merged into the whole-module MacroAssembler.
 class FuncCompileResults
 {
     jit::TempAllocator alloc_;
     jit::MacroAssembler masm_;
     FuncOffsets offsets_;
     unsigned compileTime_;
@@ -57,39 +63,44 @@ class FuncCompileResults
 // validation thread, sent off to an Ion compilation helper thread which creates
 // the FuncCompileResults, and finally sent back to the validation thread. To
 // save time allocating and freeing memory, IonCompileTasks are reset() and
 // reused.
 class IonCompileTask
 {
     JSRuntime* const runtime_;
     const CompileArgs args_;
+    ModuleGeneratorThreadView& mg_;
     LifoAlloc lifo_;
     UniqueFuncBytecode func_;
     mozilla::Maybe<FuncCompileResults> results_;
 
     IonCompileTask(const IonCompileTask&) = delete;
     IonCompileTask& operator=(const IonCompileTask&) = delete;
 
   public:
-    IonCompileTask(JSRuntime* runtime, CompileArgs args, size_t defaultChunkSize)
-      : runtime_(runtime),
+    IonCompileTask(JSRuntime* rt, CompileArgs args, ModuleGeneratorThreadView& mg, size_t defaultChunkSize)
+      : runtime_(rt),
         args_(args),
+        mg_(mg),
         lifo_(defaultChunkSize),
         func_(nullptr)
     {}
     JSRuntime* runtime() const {
         return runtime_;
     }
     LifoAlloc& lifo() {
         return lifo_;
     }
     CompileArgs args() const {
         return args_;
     }
+    ModuleGeneratorThreadView& mg() const {
+        return mg_;
+    }
     void init(UniqueFuncBytecode func) {
         MOZ_ASSERT(!func_);
         func_ = mozilla::Move(func);
         results_.emplace(lifo_);
     }
     const FuncBytecode& func() const {
         MOZ_ASSERT(func_);
         return *func_;
--- a/js/src/asmjs/WasmModule.cpp
+++ b/js/src/asmjs/WasmModule.cpp
@@ -238,58 +238,47 @@ StaticLinkData::sizeOfExcludingThis(Mall
 
     for (const OffsetVector& offsets : symbolicLinks)
         size += offsets.sizeOfExcludingThis(mallocSizeOf);
 
     return size;
 }
 
 static size_t
-SerializedSigSize(const MallocSig& sig)
+SerializedSigSize(const Sig& sig)
 {
     return sizeof(ExprType) +
            SerializedPodVectorSize(sig.args());
 }
 
 static uint8_t*
-SerializeSig(uint8_t* cursor, const MallocSig& sig)
+SerializeSig(uint8_t* cursor, const Sig& sig)
 {
     cursor = WriteScalar<ExprType>(cursor, sig.ret());
     cursor = SerializePodVector(cursor, sig.args());
     return cursor;
 }
 
 static const uint8_t*
-DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, MallocSig* sig)
+DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, Sig* sig)
 {
     ExprType ret;
     cursor = ReadScalar<ExprType>(cursor, &ret);
 
-    MallocSig::ArgVector args;
+    ValTypeVector args;
     cursor = DeserializePodVector(cx, cursor, &args);
     if (!cursor)
         return nullptr;
 
-    sig->init(Move(args), ret);
+    *sig = Sig(Move(args), ret);
     return cursor;
 }
 
-static bool
-CloneSig(JSContext* cx, const MallocSig& sig, MallocSig* out)
-{
-    MallocSig::ArgVector args;
-    if (!ClonePodVector(cx, sig.args(), &args))
-        return false;
-
-    out->init(Move(args), sig.ret());
-    return true;
-}
-
 static size_t
-SizeOfSigExcludingThis(const MallocSig& sig, MallocSizeOf mallocSizeOf)
+SizeOfSigExcludingThis(const Sig& sig, MallocSizeOf mallocSizeOf)
 {
     return sig.args().sizeOfExcludingThis(mallocSizeOf);
 }
 
 size_t
 Export::serializedSize() const
 {
     return SerializedSigSize(sig_) +
@@ -311,17 +300,17 @@ Export::deserialize(ExclusiveContext* cx
     (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
     return cursor;
 }
 
 bool
 Export::clone(JSContext* cx, Export* out) const
 {
     out->pod = pod;
-    return CloneSig(cx, sig_, &out->sig_);
+    return out->sig_.clone(sig_);
 }
 
 size_t
 Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return SizeOfSigExcludingThis(sig_, mallocSizeOf);
 }
 
@@ -347,17 +336,17 @@ Import::deserialize(ExclusiveContext* cx
     (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
     return cursor;
 }
 
 bool
 Import::clone(JSContext* cx, Import* out) const
 {
     out->pod = pod;
-    return CloneSig(cx, sig_, &out->sig_);
+    return out->sig_.clone(sig_);
 }
 
 size_t
 Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return SizeOfSigExcludingThis(sig_, mallocSizeOf);
 }
 
--- a/js/src/asmjs/WasmModule.h
+++ b/js/src/asmjs/WasmModule.h
@@ -98,24 +98,24 @@ struct StaticLinkData
 
 typedef UniquePtr<StaticLinkData> UniqueStaticLinkData;
 
 // An Export represents a single function inside a wasm Module that has been
 // exported one or more times.
 
 class Export
 {
-    MallocSig sig_;
+    Sig sig_;
     struct CacheablePod {
         uint32_t stubOffset_;
     } pod;
 
   public:
     Export() = default;
-    explicit Export(MallocSig&& sig)
+    explicit Export(Sig&& sig)
       : sig_(Move(sig))
     {
         pod.stubOffset_ = UINT32_MAX;
     }
     Export(Export&& rhs)
       : sig_(Move(rhs.sig_)),
         pod(rhs.pod)
     {}
@@ -123,59 +123,59 @@ class Export
     void initStubOffset(uint32_t stubOffset) {
         MOZ_ASSERT(pod.stubOffset_ == UINT32_MAX);
         pod.stubOffset_ = stubOffset;
     }
 
     uint32_t stubOffset() const {
         return pod.stubOffset_;
     }
-    const MallocSig& sig() const {
+    const Sig& sig() const {
         return sig_;
     }
 
     WASM_DECLARE_SERIALIZABLE(Export)
 };
 
 typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
 
 // An Import describes a wasm module import. Currently, only functions can be
 // imported in wasm. A function import includes the signature used within the
 // module to call it.
 
 class Import
 {
-    MallocSig sig_;
+    Sig sig_;
     struct CacheablePod {
         uint32_t exitGlobalDataOffset_;
         uint32_t interpExitCodeOffset_;
         uint32_t jitExitCodeOffset_;
     } pod;
 
   public:
     Import() {}
     Import(Import&& rhs) : sig_(Move(rhs.sig_)), pod(rhs.pod) {}
-    Import(MallocSig&& sig, uint32_t exitGlobalDataOffset)
+    Import(Sig&& sig, uint32_t exitGlobalDataOffset)
       : sig_(Move(sig))
     {
         pod.exitGlobalDataOffset_ = exitGlobalDataOffset;
         pod.interpExitCodeOffset_ = 0;
         pod.jitExitCodeOffset_ = 0;
     }
 
     void initInterpExitOffset(uint32_t off) {
         MOZ_ASSERT(!pod.interpExitCodeOffset_);
         pod.interpExitCodeOffset_ = off;
     }
     void initJitExitOffset(uint32_t off) {
         MOZ_ASSERT(!pod.jitExitCodeOffset_);
         pod.jitExitCodeOffset_ = off;
     }
 
-    const MallocSig& sig() const {
+    const Sig& sig() const {
         return sig_;
     }
     uint32_t exitGlobalDataOffset() const {
         return pod.exitGlobalDataOffset_;
     }
     uint32_t interpExitCodeOffset() const {
         MOZ_ASSERT(pod.interpExitCodeOffset_);
         return pod.interpExitCodeOffset_;
--- a/js/src/asmjs/WasmStubs.cpp
+++ b/js/src/asmjs/WasmStubs.cpp
@@ -25,20 +25,16 @@
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
 using mozilla::ArrayLength;
 using mozilla::MakeEnumeratedRange;
 
-typedef Vector<MIRType, 8, SystemAllocPolicy> MIRTypeVector;
-typedef ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter;
-typedef ABIArgIter<MallocSig::ArgVector> ABIArgValTypeIter;
-
 static void
 AssertStackAlignment(MacroAssembler& masm, uint32_t alignment, uint32_t addBeforeAssert = 0)
 {
     MOZ_ASSERT((sizeof(AsmJSFrame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
     masm.assertStackAlignment(alignment, addBeforeAssert);
 }
 
 static unsigned
@@ -97,17 +93,17 @@ static const unsigned FramePushedForEntr
 // Generate a stub that enters wasm from a C++ caller via the native ABI.
 // The signature of the entry point is Module::CodePtr. The exported wasm
 // function has an ABI derived from its specific signature, so this function
 // must map from the ABI of CodePtr to the export's signature's ABI.
 static bool
 GenerateEntry(ModuleGenerator& mg, unsigned exportIndex, bool usesHeap)
 {
     MacroAssembler& masm = mg.masm();
-    const MallocSig& sig = mg.exportSig(exportIndex);
+    const Sig& sig = mg.exportSig(exportIndex);
 
     masm.haltingAlign(CodeAlignment);
 
     Offsets offsets;
     offsets.begin = masm.currentOffset();
 
     // Save the return address if it wasn't already saved by the call insn.
 #if defined(JS_CODEGEN_ARM)
@@ -288,17 +284,17 @@ GenerateEntry(ModuleGenerator& mg, unsig
     if (masm.oom())
         return false;
 
     offsets.end = masm.currentOffset();
     return mg.defineExport(exportIndex, offsets);
 }
 
 static void
-FillArgumentArray(MacroAssembler& masm, const MallocSig::ArgVector& args, unsigned argOffset,
+FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argOffset,
                   unsigned offsetToCallerStackArgs, Register scratch)
 {
     for (ABIArgValTypeIter i(args); !i.done(); i++) {
         Address dstAddr(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
         switch (i->kind()) {
           case ABIArg::GPR:
             masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dstAddr);
             break;
@@ -335,17 +331,17 @@ FillArgumentArray(MacroAssembler& masm, 
 // Generate a stub that is called via the internal ABI derived from the
 // signature of the import and calls into an appropriate InvokeImport C++
 // function, having boxed all the ABI arguments into a homogeneous Value array.
 static bool
 GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Label* throwLabel,
                        ProfilingOffsets* offsets)
 {
     MacroAssembler& masm = mg.masm();
-    const MallocSig& sig = mg.importSig(importIndex);
+    const Sig& sig = *mg.import(importIndex).sig;
 
     masm.setFramePushed(0);
 
     // Argument types for InvokeImport_*:
     static const MIRType typeArray[] = { MIRType_Pointer,   // ImportExit
                                          MIRType_Int32,     // argc
                                          MIRType_Pointer }; // argv
     MIRTypeVector invokeArgTypes;
@@ -440,17 +436,17 @@ static const unsigned MaybeSavedGlobalRe
 // Generate a stub that is called via the internal ABI derived from the
 // signature of the import and calls into a compatible JIT function,
 // having boxed all the ABI arguments into the JIT stack frame layout.
 static bool
 GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, bool usesHeap,
                     Label* throwLabel, ProfilingOffsets* offsets)
 {
     MacroAssembler& masm = mg.masm();
-    const MallocSig& sig = mg.importSig(importIndex);
+    const Sig& sig = *mg.import(importIndex).sig;
 
     masm.setFramePushed(0);
 
     // JIT calls use the following stack layout (sp grows to the left):
     //   | retaddr | descriptor | callee | argc | this | arg1..N |
     // After the JIT frame, the global register (if present) is saved since the
     // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs,
     // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing
@@ -470,17 +466,17 @@ GenerateJitExitStub(ModuleGenerator& mg,
     masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset));
     argOffset += sizeof(size_t);
 
     // 2. Callee
     Register callee = ABIArgGenerator::NonArgReturnReg0;   // live until call
     Register scratch = ABIArgGenerator::NonArgReturnReg1;  // repeatedly clobbered
 
     // 2.1. Get ExitDatum
-    unsigned globalDataOffset = mg.importExitGlobalDataOffset(importIndex);
+    unsigned globalDataOffset = mg.import(importIndex).globalDataOffset;
 #if defined(JS_CODEGEN_X64)
     masm.append(AsmJSGlobalAccess(masm.leaRipRelative(callee), globalDataOffset));
 #elif defined(JS_CODEGEN_X86)
     masm.append(AsmJSGlobalAccess(masm.movlWithPatch(Imm32(0), callee), globalDataOffset));
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
       defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
     masm.computeEffectiveAddress(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), callee);
 #endif
--- a/js/src/asmjs/WasmTypes.h
+++ b/js/src/asmjs/WasmTypes.h
@@ -52,16 +52,18 @@ enum class ValType
     I64,
     F32,
     F64,
     I32x4,
     F32x4,
     B32x4
 };
 
+typedef Vector<ValType, 8, SystemAllocPolicy> ValTypeVector;
+
 static inline bool
 IsSimdType(ValType vt)
 {
     return vt == ValType::I32x4 || vt == ValType::F32x4 || vt == ValType::B32x4;
 }
 
 static inline jit::MIRType
 ToMIRType(ValType vt)
@@ -190,94 +192,82 @@ ToMIRType(ExprType et)
 // representations of the argument Vector's memory (when elements do not fit
 // inline): normal malloc allocation (via SystemAllocPolicy) and allocation in
 // a LifoAlloc (via LifoAllocPolicy). The former Sig objects can have any
 // lifetime since they own the memory. The latter Sig objects must not outlive
 // the associated LifoAlloc mark/release interval (which is currently the
 // duration of module validation+compilation). Thus, long-lived objects like
 // WasmModule must use malloced allocation.
 
-template <class AllocPolicy>
 class Sig
 {
-  public:
-    typedef Vector<ValType, 4, AllocPolicy> ArgVector;
-
-  private:
-    ArgVector args_;
+    ValTypeVector args_;
     ExprType ret_;
 
-  protected:
-    explicit Sig(AllocPolicy alloc = AllocPolicy()) : args_(alloc) {}
-    Sig(Sig&& rhs) : args_(Move(rhs.args_)), ret_(rhs.ret_) {}
-    Sig(ArgVector&& args, ExprType ret) : args_(Move(args)), ret_(ret) {}
+    Sig(const Sig&) = delete;
+    Sig& operator=(const Sig&) = delete;
 
   public:
-    void init(ArgVector&& args, ExprType ret) {
+    Sig() : args_(), ret_(ExprType::Void) {}
+    Sig(Sig&& rhs) : args_(Move(rhs.args_)), ret_(rhs.ret_) {}
+    Sig(ValTypeVector&& args, ExprType ret) : args_(Move(args)), ret_(ret) {}
+
+    bool clone(const Sig& rhs) {
+        ret_ = rhs.ret_;
         MOZ_ASSERT(args_.empty());
-        args_ = Move(args);
-        ret_ = ret;
+        return args_.appendAll(rhs.args_);
+    }
+    Sig& operator=(Sig&& rhs) {
+        ret_ = rhs.ret_;
+        args_ = Move(rhs.args_);
+        return *this;
     }
 
     ValType arg(unsigned i) const { return args_[i]; }
-    const ArgVector& args() const { return args_; }
+    const ValTypeVector& args() const { return args_; }
     const ExprType& ret() const { return ret_; }
 
     HashNumber hash() const {
         HashNumber hn = HashNumber(ret_);
         for (unsigned i = 0; i < args_.length(); i++)
             hn = mozilla::AddToHash(hn, HashNumber(args_[i]));
         return hn;
     }
-
-    template <class AllocPolicy2>
-    bool operator==(const Sig<AllocPolicy2>& rhs) const {
+    bool operator==(const Sig& rhs) const {
         if (ret() != rhs.ret())
             return false;
         if (args().length() != rhs.args().length())
             return false;
         for (unsigned i = 0; i < args().length(); i++) {
             if (arg(i) != rhs.arg(i))
                 return false;
         }
         return true;
     }
-
-    template <class AllocPolicy2>
-    bool operator!=(const Sig<AllocPolicy2>& rhs) const {
+    bool operator!=(const Sig& rhs) const {
         return !(*this == rhs);
     }
 };
 
-class MallocSig : public Sig<SystemAllocPolicy>
-{
-    typedef Sig<SystemAllocPolicy> BaseSig;
+// A "declared" signature is a Sig object that is created and owned by the
+// ModuleGenerator. These signature objects are read-only and have the same
+// lifetime as the ModuleGenerator. This type is useful since some uses of Sig
+// need this extended lifetime and want to statically distinguish from the
+// common stack-allocated Sig objects that get passed around.
 
-  public:
-    MallocSig() = default;
-    MallocSig(MallocSig&& rhs) : BaseSig(Move(rhs)) {}
-    MallocSig(ArgVector&& args, ExprType ret) : BaseSig(Move(args), ret) {}
+struct DeclaredSig : Sig
+{
+    DeclaredSig() = default;
+    DeclaredSig(DeclaredSig&& rhs) : Sig(Move(rhs)) {}
+    explicit DeclaredSig(Sig&& sig) : Sig(Move(sig)) {}
+    void operator=(Sig&& rhs) { Sig& base = *this; base = Move(rhs); }
 };
 
-class LifoSig : public Sig<LifoAllocPolicy<Fallible>>
-{
-    typedef Sig<LifoAllocPolicy<Fallible>> BaseSig;
-    LifoSig(ArgVector&& args, ExprType ret) : BaseSig(Move(args), ret) {}
-
-  public:
-    static LifoSig* new_(LifoAlloc& lifo, const MallocSig& src) {
-        void* mem = lifo.alloc(sizeof(LifoSig));
-        if (!mem)
-            return nullptr;
-        ArgVector args(lifo);
-        if (!args.appendAll(src.args()))
-            return nullptr;
-        return new (mem) LifoSig(Move(args), src.ret());
-    }
-};
+typedef Vector<DeclaredSig, 0, SystemAllocPolicy> DeclaredSigVector;
+typedef Vector<const DeclaredSig*, 0, SystemAllocPolicy> DeclaredSigPtrVector;
 
 // The (,Profiling,Func)Offsets classes are used to record the offsets of
 // different key points in a CodeRange during compilation.
 
 struct Offsets
 {
     MOZ_IMPLICIT Offsets(uint32_t begin = 0, uint32_t end = 0)
       : begin(begin), end(end)