Bug 998490 - OdinMonkey: make asm.js frames show up in FrameIter (r=dougc)
authorLuke Wagner <luke@mozilla.com>
Wed, 16 Apr 2014 18:46:03 -0500
changeset 180462 f742f98f4d041dc20ff32eebcbb1aa83305dc90d
parent 180461 4ffd7defeba72abfc05705f96c1be11b53eeb55f
child 180463 acbb894db34842044c641097e3b119665384dd17
push id272
push userpvanderbeken@mozilla.com
push dateMon, 05 May 2014 16:31:18 +0000
reviewersdougc
bugs998490
milestone31.0a1
Bug 998490 - OdinMonkey: make asm.js frames show up in FrameIter (r=dougc)
js/src/jit-test/tests/asm.js/testStackWalking.js
js/src/jit/AsmJS.cpp
js/src/jit/AsmJSLink.cpp
js/src/jit/AsmJSLink.h
js/src/jit/AsmJSModule.cpp
js/src/jit/AsmJSModule.h
js/src/jit/AsmJSSignalHandlers.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/arm/Simulator-arm.cpp
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/MacroAssembler-x86-shared.h
js/src/jit/x64/MacroAssembler-x64.h
js/src/jit/x86/MacroAssembler-x86.h
js/src/vm/Stack.cpp
js/src/vm/Stack.h
--- a/js/src/jit-test/tests/asm.js/testStackWalking.js
+++ b/js/src/jit-test/tests/asm.js/testStackWalking.js
@@ -1,38 +1,93 @@
 load(libdir + "asm.js");
 load(libdir + "asserts.js");
 
-var callFFI1 = asmCompile('global', 'ffis', USE_ASM + "var ffi=ffis.ffi; function asmfun1() { return ffi(1)|0 } return asmfun1");
-var callFFI2 = asmCompile('global', 'ffis', USE_ASM + "var ffi=ffis.ffi; function asmfun2() { return ffi(2)|0 } return asmfun2");
+function matchStack(stackString, stackArray)
+{
+    var match = 0;
+    for (name of stackArray) {
+        match = stackString.indexOf(name, match);
+        if (match === -1)
+            throw name + " not found in the stack " + stack;
+    }
+}
 
 var stack;
-function dumpStack(i) { stack = new Error().stack; return i+11 }
+function dumpStack()
+{
+    stack = new Error().stack
+}
+
+var callFFI = asmCompile('global', 'ffis', USE_ASM + "var ffi=ffis.ffi; function f() { return ffi()|0 } return f");
 
-var asmfun1 = asmLink(callFFI1, null, {ffi:dumpStack});
-assertEq(asmfun1(), 12);
-assertEq(stack.indexOf("asmfun1") === -1, false);
+var f = asmLink(callFFI, null, {ffi:dumpStack});
+for (var i = 0; i < 5000; i++) {
+    stack = null;
+    f();
+    matchStack(stack, ['dumpStack', 'f']);
+}
 
-var asmfun2 = asmLink(callFFI2, null, {ffi:function ffi(i){return asmfun1()+20}});
-assertEq(asmfun2(), 32);
-assertEq(stack.indexOf("asmfun1") == -1, false);
-assertEq(stack.indexOf("asmfun2") == -1, false);
-assertEq(stack.indexOf("asmfun2") > stack.indexOf("asmfun1"), true);
+if (isAsmJSCompilationAvailable() && isCachingEnabled()) {
+    var callFFI = asmCompile('global', 'ffis', USE_ASM + "var ffi=ffis.ffi; function f() { return ffi()|0 } return f");
+    assertEq(isAsmJSModuleLoadedFromCache(callFFI), true);
+    stack = null;
+    f();
+    matchStack(stack, ['dumpStack', 'f']);
+}
+
+var f1 = asmLink(callFFI, null, {ffi:dumpStack});
+var f2 = asmLink(callFFI, null, {ffi:function middle() { f1() }});
+stack = null;
+(function outer() { f2() })();
+matchStack(stack, ["dumpStack", "f", "middle", "f"]);
+
+function returnStackDumper() { return { valueOf:function() { stack = new Error().stack } } }
+var f = asmLink(callFFI, null, {ffi:returnStackDumper});
+for (var i = 0; i < 5000; i++) {
+    stack = null;
+    f();
+    matchStack(stack, ['valueOf', 'f']);
+}
 
 var caught = false;
 try {
+    stack = null;
     asmLink(asmCompile(USE_ASM + "function asmRec() { asmRec() } return asmRec"))();
 } catch (e) {
     caught = true;
+    matchStack(e.stack, ['asmRec', 'asmRec', 'asmRec', 'asmRec']);
+}
+assertEq(caught, true);
+
+var caught = false;
+try {
+    callFFI(null, {ffi:Object.preventExtensions})();
+} catch (e) {
+    caught = true;
 }
 assertEq(caught, true);
 
-var caught = false;
-try {
-    callFFI1(null, {ffi:Object.preventExtensions})();
-} catch (e) {
-    caught = true;
-}
-assertEq(caught, true);
+asmLink(callFFI, null, {ffi:eval})();
+asmLink(callFFI, null, {ffi:Function})();
+asmLink(callFFI, null, {ffi:Error})();
 
-assertEq(asmLink(callFFI1, null, {ffi:eval})(), 1);
-assertEq(asmLink(callFFI1, null, {ffi:Function})(), 0);
-assertEq(asmLink(callFFI1, null, {ffi:Error})(), 0);
+var manyCalls = asmCompile('global', 'ffis',
+    USE_ASM +
+    "var ffi=ffis.ffi;\
+     function f1(a,b,c,d,e,f,g,h,i,j,k) { \
+       a=a|0;b=b|0;c=c|0;d=d|0;e=e|0;f=f|0;g=g|0;h=h|0;i=i|0;j=j|0;k=k|0; \
+       ffi(); \
+       return (a+b+c+d+e+f+g+h+i+j+k)|0; \
+     } \
+     function f2() { \
+       return f1(1,2,3,4,5,6,7,8,f1(1,2,3,4,5,6,7,8,9,10,11)|0,10,11)|0; \
+     } \
+     function f3() { return 13 } \
+     function f4(i) { \
+       i=i|0; \
+       return TBL[i&3]()|0; \
+     } \
+     var TBL=[f3, f3, f2, f3]; \
+     return f4;");
+stack = null;
+assertEq(asmLink(manyCalls, null, {ffi:dumpStack})(2), 123);
+matchStack(stack, ['dumpStack', 'f1', 'f2', 'f4']);
--- a/js/src/jit/AsmJS.cpp
+++ b/js/src/jit/AsmJS.cpp
@@ -1084,19 +1084,19 @@ class MOZ_STACK_CLASS ModuleCompiler
         finishedFunctionBodies_(false)
     {
         JS_ASSERT(moduleFunctionNode_->pn_funbox == parser.pc->sc->asFunctionBox());
     }
 
     ~ModuleCompiler() {
         if (errorString_) {
             JS_ASSERT(errorOffset_ != UINT32_MAX);
-            parser_.tokenStream.reportAsmJSError(errorOffset_,
-                                                 JSMSG_USE_ASM_TYPE_FAIL,
-                                                 errorString_);
+            tokenStream().reportAsmJSError(errorOffset_,
+                                           JSMSG_USE_ASM_TYPE_FAIL,
+                                           errorString_);
             js_free(errorString_);
         }
         if (errorOverRecursed_)
             js_ReportOverRecursed(cx_);
 
         // Avoid spurious Label assertions on compilation failure.
         if (!stackOverflowLabel_.bound())
             stackOverflowLabel_.bind(0);
@@ -1135,17 +1135,17 @@ class MOZ_STACK_CLASS ModuleCompiler
             !addStandardLibraryMathName("PI", M_PI) ||
             !addStandardLibraryMathName("SQRT1_2", M_SQRT1_2) ||
             !addStandardLibraryMathName("SQRT2", M_SQRT2))
         {
             return false;
         }
 
         uint32_t funcStart = parser_.pc->maybeFunction->pn_body->pn_pos.begin;
-        uint32_t offsetToEndOfUseAsm = parser_.tokenStream.currentToken().pos.end;
+        uint32_t offsetToEndOfUseAsm = tokenStream().currentToken().pos.end;
 
         // "use strict" should be added to the source if we are in an implicit
         // strict context, see also comment above addUseStrict in
         // js::FunctionToString.
         bool strict = parser_.pc->sc->strict && !parser_.pc->sc->hasExplicitUseStrict();
 
         module_ = cx_->new_<AsmJSModule>(parser_.ss, funcStart, offsetToEndOfUseAsm, strict);
         if (!module_)
@@ -1167,24 +1167,24 @@ class MOZ_STACK_CLASS ModuleCompiler
         if (pn)
             return failOffset(pn->pn_pos.begin, str);
 
         // The exact rooting static analysis does not perform dataflow analysis, so it believes
         // that unrooted things on the stack during compilation may still be accessed after this.
         // Since pn is typically only null under OOM, this suppression simply forces any GC to be
         // delayed until the compilation is off the stack and more memory can be freed.
         gc::AutoSuppressGC nogc(cx_);
-        return failOffset(parser_.tokenStream.peekTokenPos().begin, str);
+        return failOffset(tokenStream().peekTokenPos().begin, str);
     }
 
     bool failfVA(ParseNode *pn, const char *fmt, va_list ap) {
         JS_ASSERT(!errorString_);
         JS_ASSERT(errorOffset_ == UINT32_MAX);
         JS_ASSERT(fmt);
-        errorOffset_ = pn ? pn->pn_pos.begin : parser_.tokenStream.currentToken().pos.end;
+        errorOffset_ = pn ? pn->pn_pos.begin : tokenStream().currentToken().pos.end;
         errorString_ = JS_vsmprintf(fmt, ap);
         return false;
     }
 
     bool failf(ParseNode *pn, const char *fmt, ...) {
         va_list ap;
         va_start(ap, fmt);
         failfVA(pn, fmt, ap);
@@ -1209,24 +1209,25 @@ class MOZ_STACK_CLASS ModuleCompiler
     static const unsigned SLOW_FUNCTION_THRESHOLD_MS = 250;
 
     bool maybeReportCompileTime(const Func &func) {
         if (func.compileTime() < SLOW_FUNCTION_THRESHOLD_MS)
             return true;
         SlowFunction sf;
         sf.name = func.name();
         sf.ms = func.compileTime();
-        parser_.tokenStream.srcCoords.lineNumAndColumnIndex(func.srcOffset(), &sf.line, &sf.column);
+        tokenStream().srcCoords.lineNumAndColumnIndex(func.srcOffset(), &sf.line, &sf.column);
         return slowFunctions_.append(sf);
     }
 
     /*************************************************** Read-only interface */
 
     ExclusiveContext *cx() const { return cx_; }
     AsmJSParser &parser() const { return parser_; }
+    TokenStream &tokenStream() const { return parser_.tokenStream; }
     MacroAssembler &masm() { return masm_; }
     Label &stackOverflowLabel() { return stackOverflowLabel_; }
     Label &interruptLabel() { return interruptLabel_; }
     bool hasError() const { return errorString_ != nullptr; }
     const AsmJSModule &module() const { return *module_.get(); }
     uint32_t moduleStart() const { return module_->funcStart(); }
 
     ParseNode *moduleFunctionNode() const { return moduleFunctionNode_; }
@@ -1390,50 +1391,50 @@ class MOZ_STACK_CLASS ModuleCompiler
     bool addExportedFunction(const Func *func, PropertyName *maybeFieldName) {
         AsmJSModule::ArgCoercionVector argCoercions;
         const VarTypeVector &args = func->sig().args();
         if (!argCoercions.resize(args.length()))
             return false;
         for (unsigned i = 0; i < args.length(); i++)
             argCoercions[i] = args[i].toCoercion();
         AsmJSModule::ReturnType retType = func->sig().retType().toModuleReturnType();
-        uint32_t line, column;
-        parser_.tokenStream.srcCoords.lineNumAndColumnIndex(func->srcOffset(), &line, &column);
-        return module_->addExportedFunction(func->name(), line, column,
-                                            func->srcOffset(), func->endOffset(), maybeFieldName,
-                                            Move(argCoercions), retType);
+        return module_->addExportedFunction(func->name(), func->srcOffset(), func->endOffset(),
+                                            maybeFieldName, Move(argCoercions), retType);
     }
     bool addExit(unsigned ffiIndex, PropertyName *name, Signature &&sig, unsigned *exitIndex) {
         ExitDescriptor exitDescriptor(name, Move(sig));
         ExitMap::AddPtr p = exits_.lookupForAdd(exitDescriptor);
         if (p) {
             *exitIndex = p->value();
             return true;
         }
         if (!module_->addExit(ffiIndex, exitIndex))
             return false;
         return exits_.add(p, Move(exitDescriptor), *exitIndex);
     }
+    bool addFunctionName(PropertyName *name, uint32_t *index) {
+        return module_->addFunctionName(name, index);
+    }
 
     // Note a constraint on the minimum size of the heap.  The heap size is
     // constrained when linking to be at least the maximum of all such constraints.
     void requireHeapLengthToBeAtLeast(uint32_t len) {
         module_->requireHeapLengthToBeAtLeast(len);
     }
     uint32_t minHeapLength() const {
         return module_->minHeapLength();
     }
     LifoAlloc &lifo() {
         return moduleLifo_;
     }
 
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     bool trackProfiledFunction(const Func &func, unsigned endCodeOffset) {
         unsigned lineno = 0U, columnIndex = 0U;
-        parser().tokenStream.srcCoords.lineNumAndColumnIndex(func.srcOffset(), &lineno, &columnIndex);
+        tokenStream().srcCoords.lineNumAndColumnIndex(func.srcOffset(), &lineno, &columnIndex);
         unsigned startCodeOffset = func.code()->offset();
         return module_->trackProfiledFunction(func.name(), startCodeOffset, endCodeOffset,
                                               lineno, columnIndex);
     }
 #endif
 
 #ifdef JS_ION_PERF
     bool trackPerfProfiledBlocks(AsmJSPerfSpewer &perfSpewer, const Func &func, unsigned endCodeOffset) {
@@ -1496,31 +1497,36 @@ class MOZ_STACK_CLASS ModuleCompiler
                                msTotal,
                                storedInCache ? "stored in cache" : "not stored in cache",
                                slowFuns ? slowFuns.get() : ""));
 #endif
     }
 
     bool finish(ScopedJSDeletePtr<AsmJSModule> *module)
     {
-        module_->initFuncEnd(parser_.tokenStream.currentToken().pos.end,
-                             parser_.tokenStream.peekTokenPos().end);
+        module_->initFuncEnd(tokenStream().currentToken().pos.end,
+                             tokenStream().peekTokenPos().end);
         masm_.finish();
         if (masm_.oom())
             return false;
 
+        module_->assignCallSites(masm_.extractCallSites());
         module_->assignHeapAccesses(masm_.extractAsmJSHeapAccesses());
 
 #if defined(JS_CODEGEN_ARM)
         // Now that compilation has finished, we need to update offsets to
         // reflect actual offsets (an ARM distinction).
         for (unsigned i = 0; i < module_->numHeapAccesses(); i++) {
             AsmJSHeapAccess &a = module_->heapAccess(i);
             a.setOffset(masm_.actualOffset(a.offset()));
         }
+        for (unsigned i = 0; i < module_->numCallSites(); i++) {
+            CallSite &c = module_->callSite(i);
+            c.setReturnAddressOffset(masm_.actualOffset(c.returnAddressOffset()));
+        }
 #endif
 
         // The returned memory is owned by module_.
         if (!module_->allocateAndCopyCode(cx_, masm_))
             return false;
 
         // c.f. JitCode::copyFrom
         JS_ASSERT(masm_.jumpRelocationTableBytes() == 0);
@@ -1586,17 +1592,17 @@ class MOZ_STACK_CLASS ModuleCompiler
             }
         }
 
 #if defined(JS_CODEGEN_X86)
         // Global data accesses in x86 need to be patched with the absolute
         // address of the global. Globals are allocated sequentially after the
         // code section so we can just use an RelativeLink.
         for (unsigned i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) {
-            AsmJSGlobalAccess a = masm_.asmJSGlobalAccesses(i);
+            AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
             AsmJSModule::RelativeLink link;
             link.patchAtOffset = masm_.labelOffsetToPatchOffset(a.patchAt.offset());
             link.targetOffset = module_->offsetOfGlobalData() + a.globalDataOffset;
             if (!module_->addRelativeLink(link))
                 return false;
         }
 #endif
 
@@ -1882,16 +1888,17 @@ class FunctionCompiler
     typedef js::Vector<TypedValue> VarInitializerVector;
     typedef HashMap<PropertyName*, BlockVector> LabeledBlockMap;
     typedef HashMap<ParseNode*, BlockVector> UnlabeledBlockMap;
     typedef js::Vector<ParseNode*, 4> NodeStack;
 
     ModuleCompiler &       m_;
     LifoAlloc &            lifo_;
     ParseNode *            fn_;
+    uint32_t               functionNameIndex_;
 
     LocalMap               locals_;
     VarInitializerVector   varInitializers_;
     Maybe<RetType>         alreadyReturned_;
 
     TempAllocator *        alloc_;
     MIRGraph *             graph_;
     CompileInfo *          info_;
@@ -1902,21 +1909,25 @@ class FunctionCompiler
 
     NodeStack              loopStack_;
     NodeStack              breakableStack_;
     UnlabeledBlockMap      unlabeledBreaks_;
     UnlabeledBlockMap      unlabeledContinues_;
     LabeledBlockMap        labeledBreaks_;
     LabeledBlockMap        labeledContinues_;
 
+    static const uint32_t NO_FUNCTION_NAME_INDEX = UINT32_MAX;
+    JS_STATIC_ASSERT(NO_FUNCTION_NAME_INDEX > CallSiteDesc::FUNCTION_NAME_INDEX_MAX);
+
   public:
     FunctionCompiler(ModuleCompiler &m, ParseNode *fn, LifoAlloc &lifo)
       : m_(m),
         lifo_(lifo),
         fn_(fn),
+        functionNameIndex_(NO_FUNCTION_NAME_INDEX),
         locals_(m.cx()),
         varInitializers_(m.cx()),
         alloc_(nullptr),
         graph_(nullptr),
         info_(nullptr),
         mirGen_(nullptr),
         curBlock_(nullptr),
         loopStack_(m.cx()),
@@ -2256,37 +2267,39 @@ class FunctionCompiler
     // between evaluating an argument and making the call, another argument
     // evaluation could perform a call that also needs to store to the stack.
     // When this occurs childClobbers_ = true and the parent expression's
     // arguments are stored above the maximum depth clobbered by a child
     // expression.
 
     class Call
     {
+        ParseNode *node_;
         ABIArgGenerator abi_;
         uint32_t prevMaxStackBytes_;
         uint32_t maxChildStackBytes_;
         uint32_t spIncrement_;
         Signature sig_;
         MAsmJSCall::Args regArgs_;
         js::Vector<MAsmJSPassStackArg*> stackArgs_;
         bool childClobbers_;
 
         friend class FunctionCompiler;
 
       public:
-        Call(FunctionCompiler &f, RetType retType)
-          : prevMaxStackBytes_(0),
+        Call(FunctionCompiler &f, ParseNode *callNode, RetType retType)
+          : node_(callNode),
+            prevMaxStackBytes_(0),
             maxChildStackBytes_(0),
             spIncrement_(0),
             sig_(f.m().lifo(), retType),
             regArgs_(f.cx()),
             stackArgs_(f.cx()),
             childClobbers_(false)
-        {}
+        { }
         Signature &sig() { return sig_; }
         const Signature &sig() const { return sig_; }
     };
 
     void startCallArgs(Call *call)
     {
         if (inDeadCode())
             return;
@@ -2342,20 +2355,31 @@ class FunctionCompiler
 
   private:
     bool callPrivate(MAsmJSCall::Callee callee, const Call &call, MIRType returnType, MDefinition **def)
     {
         if (inDeadCode()) {
             *def = nullptr;
             return true;
         }
-        MAsmJSCall *ins = MAsmJSCall::New(alloc(), callee, call.regArgs_, returnType,
+
+        uint32_t line, column;
+        m_.tokenStream().srcCoords.lineNumAndColumnIndex(call.node_->pn_pos.begin, &line, &column);
+
+        if (functionNameIndex_ == NO_FUNCTION_NAME_INDEX) {
+            if (!m_.addFunctionName(FunctionName(fn_), &functionNameIndex_))
+                return false;
+        }
+
+        CallSiteDesc desc(line, column, functionNameIndex_);
+        MAsmJSCall *ins = MAsmJSCall::New(alloc(), desc, callee, call.regArgs_, returnType,
                                           call.spIncrement_);
         if (!ins)
             return false;
+
         curBlock_->add(ins);
         *def = ins;
         return true;
     }
 
   public:
     bool internalCall(const ModuleCompiler::Func &func, const Call &call, MDefinition **def)
     {
@@ -2746,17 +2770,17 @@ class FunctionCompiler
 
     /*************************************************************************/
   private:
     void noteBasicBlockPosition(MBasicBlock *blk, ParseNode *pn)
     {
 #if defined(JS_ION_PERF)
         if (pn) {
             unsigned line = 0U, column = 0U;
-            m().parser().tokenStream.srcCoords.lineNumAndColumnIndex(pn->pn_pos.begin, &line, &column);
+            m().tokenStream().srcCoords.lineNumAndColumnIndex(pn->pn_pos.begin, &line, &column);
             blk->setLineno(line);
             blk->setColumnIndex(column);
         }
 #endif
     }
 
     bool newBlockWithDepth(MBasicBlock *pred, unsigned loopDepth, MBasicBlock **block, ParseNode *pn)
     {
@@ -3861,17 +3885,17 @@ CheckIsVarType(FunctionCompiler &f, Pars
         return f.failf(argNode, "%s is not a subtype of int, float or double", type.toChars());
     return true;
 }
 
 static bool
 CheckInternalCall(FunctionCompiler &f, ParseNode *callNode, PropertyName *calleeName,
                   RetType retType, MDefinition **def, Type *type)
 {
-    FunctionCompiler::Call call(f, retType);
+    FunctionCompiler::Call call(f, callNode, retType);
 
     if (!CheckCallArgs(f, callNode, CheckIsVarType, &call))
         return false;
 
     ModuleCompiler::Func *callee;
     if (!CheckFunctionSignature(f.m(), callNode, Move(call.sig()), calleeName, &callee))
         return false;
 
@@ -3937,17 +3961,17 @@ CheckFuncPtrCall(FunctionCompiler &f, Pa
     MDefinition *indexDef;
     Type indexType;
     if (!CheckExpr(f, indexNode, &indexDef, &indexType))
         return false;
 
     if (!indexType.isIntish())
         return f.failf(indexNode, "%s is not a subtype of intish", indexType.toChars());
 
-    FunctionCompiler::Call call(f, retType);
+    FunctionCompiler::Call call(f, callNode, retType);
 
     if (!CheckCallArgs(f, callNode, CheckIsVarType, &call))
         return false;
 
     ModuleCompiler::FuncPtrTable *table;
     if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, Move(call.sig()), mask, &table))
         return false;
 
@@ -3970,17 +3994,17 @@ static bool
 CheckFFICall(FunctionCompiler &f, ParseNode *callNode, unsigned ffiIndex, RetType retType,
              MDefinition **def, Type *type)
 {
     PropertyName *calleeName = CallCallee(callNode)->name();
 
     if (retType == RetType::Float)
         return f.fail(callNode, "FFI calls can't return float");
 
-    FunctionCompiler::Call call(f, retType);
+    FunctionCompiler::Call call(f, callNode, retType);
     if (!CheckCallArgs(f, callNode, CheckIsExternType, &call))
         return false;
 
     unsigned exitIndex;
     if (!f.m().addExit(ffiIndex, calleeName, Move(call.sig()), &exitIndex))
         return false;
 
     if (!f.ffiCall(exitIndex, call, retType.toMIRType(), def))
@@ -4095,17 +4119,17 @@ CheckMathBuiltinCall(FunctionCompiler &f
       default: MOZ_ASSUME_UNREACHABLE("unexpected mathBuiltin function");
     }
 
     if (retType == RetType::Float && floatCallee == AsmJSImm_Invalid)
         return f.fail(callNode, "math builtin cannot be used as float");
     if (retType != RetType::Double && retType != RetType::Float)
         return f.failf(callNode, "return type of math function is double or float, used as %s", retType.toType().toChars());
 
-    FunctionCompiler::Call call(f, retType);
+    FunctionCompiler::Call call(f, callNode, retType);
     if (retType == RetType::Float && !CheckCallArgs(f, callNode, CheckIsMaybeFloat, &call))
         return false;
     if (retType == RetType::Double && !CheckCallArgs(f, callNode, CheckIsMaybeDouble, &call))
         return false;
 
     if (call.sig().args().length() != arity)
         return f.failf(callNode, "call passed %u arguments, expected %u", call.sig().args().length(), arity);
 
@@ -5297,17 +5321,17 @@ CheckStatement(FunctionCompiler &f, Pars
     }
 
     return f.fail(stmt, "unexpected statement kind");
 }
 
 static bool
 ParseFunction(ModuleCompiler &m, ParseNode **fnOut)
 {
-    TokenStream &tokenStream = m.parser().tokenStream;
+    TokenStream &tokenStream = m.tokenStream();
 
     DebugOnly<TokenKind> tk = tokenStream.getToken();
     JS_ASSERT(tk == TOK_FUNCTION);
 
     RootedPropertyName name(m.cx());
 
     TokenKind tt = tokenStream.getToken();
     if (tt == TOK_NAME) {
@@ -5337,17 +5361,17 @@ ParseFunction(ModuleCompiler &m, ParseNo
     FunctionBox *funbox = m.parser().newFunctionBox(fn, fun, outerpc, directives, NotGenerator);
     if (!funbox)
         return false;
 
     Directives newDirectives = directives;
     AsmJSParseContext funpc(&m.parser(), outerpc, fn, funbox, &newDirectives,
                             outerpc->staticLevel + 1, outerpc->blockidGen,
                             /* blockScopeDepth = */ 0);
-    if (!funpc.init(m.parser().tokenStream))
+    if (!funpc.init(tokenStream))
         return false;
 
     if (!m.parser().functionArgsAndBodyGeneric(fn, fun, Normal, Statement, &newDirectives))
         return false;
 
     if (tokenStream.hadError() || directives != newDirectives)
         return false;
 
@@ -6017,16 +6041,26 @@ static unsigned
 StackDecrementForCall(MacroAssembler &masm, const VectorT &argTypes, unsigned extraBytes = 0)
 {
     return StackDecrementForCall(masm, StackArgBytes(argTypes) + extraBytes);
 }
 
 static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
                                              NonVolatileRegs.fpus().size() * sizeof(double);
 
+// On arm, we need to include an extra word of space at the top of the stack so
+// we can explicitly store the return address before making the call to C++ or
+// Ion. On x86/x64, this isn't necessary since the call instruction pushes the
+// return address.
+#ifdef JS_CODEGEN_ARM
+static const unsigned MaybeRetAddr = sizeof(void*);
+#else
+static const unsigned MaybeRetAddr = 0;
+#endif
+
 static bool
 GenerateEntry(ModuleCompiler &m, const AsmJSModule::ExportedFunction &exportedFunc)
 {
     MacroAssembler &masm = m.masm();
 
     // In constrast to the system ABI, the Ion convention is that all registers
     // are clobbered by calls. Thus, we must save the caller's non-volatile
     // registers.
@@ -6096,17 +6130,17 @@ GenerateEntry(ModuleCompiler &m, const A
                 masm.storeDouble(ScratchFloatReg, Address(StackPointer, iter->offsetFromArgBase()));
             }
             break;
         }
     }
 
     // Call into the real function.
     AssertStackAlignment(masm);
-    masm.call(func.code());
+    masm.call(CallSiteDesc::Entry(), func.code());
 
     // Pop the stack and recover the original 'argv' argument passed to the
     // trampoline (which was pushed on the stack).
     masm.freeStack(stackDec);
     masm.Pop(argv);
 
     // Store the return value in argv[0]
     switch (func.sig().retType().which()) {
@@ -6290,33 +6324,36 @@ GenerateFFIInterpreterExit(ModuleCompile
 
     MIRType typeArray[] = { MIRType_Pointer,   // cx
                             MIRType_Pointer,   // exitDatum
                             MIRType_Int32,     // argc
                             MIRType_Pointer }; // argv
     MIRTypeVector invokeArgTypes(m.cx());
     invokeArgTypes.infallibleAppend(typeArray, ArrayLength(typeArray));
 
-    // Reserve space for a call to InvokeFromAsmJS_* and an array of values
-    // passed to this FFI call.
+    // The stack layout looks like:
+    // | return address | stack arguments | array of values |
     unsigned arraySize = Max<size_t>(1, exit.sig().args().length()) * sizeof(Value);
-    unsigned stackDec = StackDecrementForCall(masm, invokeArgTypes, arraySize);
+    unsigned stackDec = StackDecrementForCall(masm, invokeArgTypes, arraySize + MaybeRetAddr);
     masm.reserveStack(stackDec);
 
     // Fill the argument array.
     unsigned offsetToCallerStackArgs = AlignmentAtPrologue + masm.framePushed();
-    unsigned offsetToArgv = StackArgBytes(invokeArgTypes);
+    unsigned offsetToArgv = StackArgBytes(invokeArgTypes) + MaybeRetAddr;
     Register scratch = ABIArgGenerator::NonArgReturnVolatileReg0;
     FillArgumentArray(m, exit.sig().args(), offsetToArgv, offsetToCallerStackArgs, scratch);
 
     // Prepare the arguments for the call to InvokeFromAsmJS_*.
     ABIArgMIRTypeIter i(invokeArgTypes);
     Register activation = ABIArgGenerator::NonArgReturnVolatileReg1;
     LoadAsmJSActivationIntoRegister(masm, activation);
 
+    // Record sp in the AsmJSActivation for stack-walking.
+    masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfExitSP()));
+
     // argument 0: cx
     if (i->kind() == ABIArg::GPR) {
         LoadJSContextFromActivation(masm, activation, i->gpr());
     } else {
         LoadJSContextFromActivation(masm, activation, scratch);
         masm.storePtr(scratch, Address(StackPointer, i->offsetFromArgBase()));
     }
     i++;
@@ -6346,26 +6383,26 @@ GenerateFFIInterpreterExit(ModuleCompile
     }
     i++;
     JS_ASSERT(i.done());
 
     // Make the call, test whether it succeeded, and extract the return value.
     AssertStackAlignment(masm);
     switch (exit.sig().retType().which()) {
       case RetType::Void:
-        masm.call(AsmJSImm_InvokeFromAsmJS_Ignore);
+        masm.callExit(AsmJSImm_InvokeFromAsmJS_Ignore, i.stackBytesConsumedSoFar());
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         break;
       case RetType::Signed:
-        masm.call(AsmJSImm_InvokeFromAsmJS_ToInt32);
+        masm.callExit(AsmJSImm_InvokeFromAsmJS_ToInt32, i.stackBytesConsumedSoFar());
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         masm.unboxInt32(argv, ReturnReg);
         break;
       case RetType::Double:
-        masm.call(AsmJSImm_InvokeFromAsmJS_ToNumber);
+        masm.callExit(AsmJSImm_InvokeFromAsmJS_ToNumber, i.stackBytesConsumedSoFar());
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         masm.loadDouble(argv, ReturnFloatReg);
         break;
       case RetType::Float:
         MOZ_ASSUME_UNREACHABLE("Float32 shouldn't be returned from a FFI");
         break;
     }
 
@@ -6384,26 +6421,30 @@ GenerateOOLConvert(ModuleCompiler &m, Re
                             MIRType_Pointer }; // argv
     MIRTypeVector callArgTypes(m.cx());
     callArgTypes.infallibleAppend(typeArray, ArrayLength(typeArray));
 
     // The stack is assumed to be aligned.  The frame is allocated by GenerateFFIIonExit and
     // the stack usage here needs to kept in sync with GenerateFFIIonExit.
 
     // Store value
-    unsigned offsetToArgv = StackArgBytes(callArgTypes);
+    unsigned offsetToArgv = StackArgBytes(callArgTypes) + MaybeRetAddr;
     masm.storeValue(JSReturnOperand, Address(StackPointer, offsetToArgv));
 
+    Register scratch = ABIArgGenerator::NonArgReturnVolatileReg0;
+    Register activation = ABIArgGenerator::NonArgReturnVolatileReg1;
+    LoadAsmJSActivationIntoRegister(masm, activation);
+
+    // Record sp in the AsmJSActivation for stack-walking.
+    masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfExitSP()));
+
     // Store real arguments
     ABIArgMIRTypeIter i(callArgTypes);
-    Register scratch = ABIArgGenerator::NonArgReturnVolatileReg0;
 
     // argument 0: cx
-    Register activation = ABIArgGenerator::NonArgReturnVolatileReg1;
-    LoadAsmJSActivationIntoRegister(masm, activation);
     if (i->kind() == ABIArg::GPR) {
         LoadJSContextFromActivation(masm, activation, i->gpr());
     } else {
         LoadJSContextFromActivation(masm, activation, scratch);
         masm.storePtr(scratch, Address(StackPointer, i->offsetFromArgBase()));
     }
     i++;
 
@@ -6417,27 +6458,27 @@ GenerateOOLConvert(ModuleCompiler &m, Re
     }
     i++;
     JS_ASSERT(i.done());
 
     // Call
     AssertStackAlignment(masm);
     switch (retType.which()) {
       case RetType::Signed:
-          masm.call(AsmJSImm_CoerceInPlace_ToInt32);
-          masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
-          masm.unboxInt32(Address(StackPointer, offsetToArgv), ReturnReg);
-          break;
+        masm.callExit(AsmJSImm_CoerceInPlace_ToInt32, i.stackBytesConsumedSoFar());
+        masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+        masm.unboxInt32(Address(StackPointer, offsetToArgv), ReturnReg);
+        break;
       case RetType::Double:
-          masm.call(AsmJSImm_CoerceInPlace_ToNumber);
-          masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
-          masm.loadDouble(Address(StackPointer, offsetToArgv), ReturnFloatReg);
-          break;
+        masm.callExit(AsmJSImm_CoerceInPlace_ToNumber, i.stackBytesConsumedSoFar());
+        masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+        masm.loadDouble(Address(StackPointer, offsetToArgv), ReturnFloatReg);
+        break;
       default:
-          MOZ_ASSUME_UNREACHABLE("Unsupported convert type");
+        MOZ_ASSUME_UNREACHABLE("Unsupported convert type");
     }
 }
 
 static void
 GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit,
                          unsigned exitIndex, Label *throwLabel)
 {
     MacroAssembler &masm = m.masm();
@@ -6457,38 +6498,29 @@ GenerateFFIIonExit(ModuleCompiler &m, co
                                                        (1<<lr.code())),
                                     FloatRegisterSet(uint32_t(0))));
 #endif
 
     // The stack frame is used for the call into Ion and also for calls into C for OOL
     // conversion of the result.  A frame large enough for both is allocated.
     //
     // Arguments to the Ion function are in the following order on the stack:
-    // descriptor | callee | argc | this | arg1 | arg2 | ...
+    // | return address | descriptor | callee | argc | this | arg1 | arg2 | ...
     unsigned argBytes = 3 * sizeof(size_t) + (1 + exit.sig().args().length()) * sizeof(Value);
-
-    // On ARM, we call with ma_callIonNoPush which, following the Ion calling convention,
-    // stores the return address into *sp. This means we need to include an extra word of
-    // space before the arguments in the stack allocation. (On x86/x64, the call
-    // instruction does the push itself and the ABI just requires us to be aligned before
-    // the call instruction.)
-    unsigned offsetToArgs = 0;
-#if defined(JS_CODEGEN_ARM)
-    offsetToArgs += sizeof(size_t);
-#endif
-
+    unsigned offsetToArgs = MaybeRetAddr;
     unsigned stackDecForIonCall = StackDecrementForCall(masm, argBytes + offsetToArgs);
 
     // Reserve space for a call to AsmJSImm_CoerceInPlace_* and an array of values used by
     // OOLConvert which reuses the same frame. This code needs to be kept in sync with the
     // stack usage in GenerateOOLConvert.
     MIRType typeArray[] = { MIRType_Pointer, MIRType_Pointer }; // cx, argv
     MIRTypeVector callArgTypes(m.cx());
     callArgTypes.infallibleAppend(typeArray, ArrayLength(typeArray));
-    unsigned stackDecForOOLCall = StackDecrementForCall(masm, callArgTypes, sizeof(Value));
+    unsigned oolExtraBytes = sizeof(Value) + MaybeRetAddr;
+    unsigned stackDecForOOLCall = StackDecrementForCall(masm, callArgTypes, oolExtraBytes);
 
     // Allocate a frame large enough for both of the above calls.
     unsigned stackDec = Max(stackDecForIonCall, stackDecForOOLCall);
 
     masm.reserveStack(stackDec);
     AssertStackAlignment(masm);
 
     // 1. Descriptor
@@ -6561,16 +6593,19 @@ GenerateFFIIonExit(ModuleCompiler &m, co
         JS_ASSERT(callee == AsmJSIonExitRegCallee);
         Register reg0 = AsmJSIonExitRegE0;
         Register reg1 = AsmJSIonExitRegE1;
         Register reg2 = AsmJSIonExitRegE2;
         Register reg3 = AsmJSIonExitRegE3;
 
         LoadAsmJSActivationIntoRegister(masm, reg0);
 
+        // Record sp in the AsmJSActivation for stack-walking.
+        masm.storePtr(StackPointer, Address(reg0, AsmJSActivation::offsetOfExitSP()));
+
         // The following is inlined:
         //   JSContext *cx = activation->cx();
         //   Activation *act = cx->mainThread().activation();
         //   act.active_ = true;
         //   act.prevIonTop_ = cx->mainThread().ionTop;
         //   act.prevJitJSContext_ = cx->mainThread().jitJSContext;
         //   cx->mainThread().jitJSContext = cx;
         // On the ARM store8() uses the secondScratchReg (lr) as a temp.
@@ -6587,24 +6622,17 @@ GenerateFFIIonExit(ModuleCompiler &m, co
         masm.storePtr(reg2, Address(reg1, JitActivation::offsetOfPrevIonTop()));
         masm.loadPtr(Address(reg0, offsetOfJitJSContext), reg2);
         masm.storePtr(reg2, Address(reg1, JitActivation::offsetOfPrevJitJSContext()));
         masm.storePtr(reg3, Address(reg0, offsetOfJitJSContext));
     }
 
     // 2. Call
     AssertStackAlignment(masm);
-#if defined(JS_CODEGEN_ARM)
-    masm.ma_callIonNoPush(callee);
-    // The return address has been popped from the stack, so adjust the stack
-    // without changing the frame-pushed counter to keep the stack aligned.
-    masm.subPtr(Imm32(4), sp);
-#else
-    masm.callIon(callee);
-#endif
+    masm.callIonFromAsmJS(callee);
     AssertStackAlignment(masm);
 
     {
         // Disable Activation.
         //
         // This sequence needs three registers, and must preserve the JSReturnReg_Data and
         // JSReturnReg_Type, so there are five live registers.
         JS_ASSERT(JSReturnReg_Data == AsmJSIonExitRegReturnData);
@@ -6715,37 +6743,40 @@ GenerateStackOverflowExit(ModuleCompiler
 
     // The overflow check always occurs before the initial function-specific
     // stack-size adjustment. See CodeGenerator::generateAsmJSPrologue.
     masm.setFramePushed(AlignmentMidPrologue - AlignmentAtPrologue);
 
     MIRTypeVector argTypes(m.cx());
     argTypes.infallibleAppend(MIRType_Pointer); // cx
 
-    unsigned stackDec = StackDecrementForCall(masm, argTypes);
+    unsigned stackDec = StackDecrementForCall(masm, argTypes, MaybeRetAddr);
     masm.reserveStack(stackDec);
 
+    Register activation = ABIArgGenerator::NonArgReturnVolatileReg0;
+    LoadAsmJSActivationIntoRegister(masm, activation);
+
+    // Record sp in the AsmJSActivation for stack-walking.
+    masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfExitSP()));
+
     ABIArgMIRTypeIter i(argTypes);
 
-    Register scratch = ABIArgGenerator::NonArgReturnVolatileReg0;
-    LoadAsmJSActivationIntoRegister(masm, scratch);
-
     // argument 0: cx
     if (i->kind() == ABIArg::GPR) {
-        LoadJSContextFromActivation(masm, scratch, i->gpr());
+        LoadJSContextFromActivation(masm, activation, i->gpr());
     } else {
-        LoadJSContextFromActivation(masm, scratch, scratch);
-        masm.storePtr(scratch, Address(StackPointer, i->offsetFromArgBase()));
+        LoadJSContextFromActivation(masm, activation, activation);
+        masm.storePtr(activation, Address(StackPointer, i->offsetFromArgBase()));
     }
     i++;
 
     JS_ASSERT(i.done());
 
     AssertStackAlignment(masm);
-    masm.call(AsmJSImm_ReportOverRecursed);
+    masm.callExit(AsmJSImm_ReportOverRecursed, i.stackBytesConsumedSoFar());
 
     // Don't worry about restoring the stack; throwLabel will pop everything.
     masm.jump(throwLabel);
     return !masm.oom();
 }
 
 // The operation-callback exit is called from arbitrarily-interrupted asm.js
 // code. That means we must first save *all* registers and restore *all*
@@ -6890,16 +6921,18 @@ GenerateThrowExit(ModuleCompiler &m, Lab
 
 static bool
 GenerateStubs(ModuleCompiler &m)
 {
     for (unsigned i = 0; i < m.module().numExportedFunctions(); i++) {
         m.setEntryOffset(i);
         if (!GenerateEntry(m, m.module().exportedFunction(i)))
             return false;
+        if (m.masm().oom())
+            return false;
     }
 
     Label throwLabel;
 
     // The order of the iterations here is non-deterministic, since
     // m.allExits() is a hash keyed by pointer values!
     for (ModuleCompiler::ExitMap::Range r = m.allExits(); !r.empty(); r.popFront()) {
         GenerateFFIExit(m, r.front().key(), r.front().value(), &throwLabel);
--- a/js/src/jit/AsmJSLink.cpp
+++ b/js/src/jit/AsmJSLink.cpp
@@ -1,16 +1,19 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/AsmJSLink.h"
 
+#include "mozilla/BinarySearch.h"
+#include "mozilla/PodOperations.h"
+
 #ifdef MOZ_VTUNE
 # include "vtune/VTuneWrapper.h"
 #endif
 
 #include "jscntxt.h"
 #include "jsmath.h"
 #include "jsprf.h"
 #include "jswrapper.h"
@@ -24,17 +27,112 @@
 #endif
 #include "vm/StringBuffer.h"
 
 #include "jsobjinlines.h"
 
 using namespace js;
 using namespace js::jit;
 
+using mozilla::BinarySearch;
 using mozilla::IsNaN;
+using mozilla::PodZero;
+
+AsmJSFrameIterator::AsmJSFrameIterator(const AsmJSActivation *activation)
+{
+    if (!activation || activation->isInterruptedSP()) {
+        PodZero(this);
+        JS_ASSERT(done());
+        return;
+    }
+
+    module_ = &activation->module();
+    sp_ = activation->exitSP();
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+    // For calls to Ion/C++ on x86/x64, the exitSP is the SP right before the call
+    // to C++. Since the call instruction pushes the return address, we know
+    // that the return address is 1 word below exitSP.
+    returnAddress_ = *(uint8_t**)(sp_ - sizeof(void*));
+#else
+    // For calls to Ion/C++ on ARM, the *caller* pushes the return address on
+    // the stack. For Ion, this is just part of the ABI. For C++, the return
+    // address is explicitly pushed before the call since we cannot expect the
+    // callee to immediately push lr. This means that exitSP points to the
+    // return address.
+    returnAddress_ = *(uint8_t**)sp_;
+#endif
+
+    settle();
+}
+
+struct GetCallSite
+{
+    const AsmJSModule &module;
+    GetCallSite(const AsmJSModule &module) : module(module) {}
+    uint32_t operator[](size_t index) const {
+        return module.callSite(index).returnAddressOffset();
+    }
+};
+
+void
+AsmJSFrameIterator::popFrame()
+{
+    // After adding stackDepth, sp points to the word before the return address,
+    // on both ARM and x86/x64.
+    sp_ += callsite_->stackDepth();
+    returnAddress_ = *(uint8_t**)(sp_ - sizeof(void*));
+}
+
+void
+AsmJSFrameIterator::settle()
+{
+    while (true) {
+        uint32_t target = returnAddress_ - module_->codeBase();
+        size_t lowerBound = 0;
+        size_t upperBound = module_->numCallSites();
+
+        size_t match;
+        if (!BinarySearch(GetCallSite(*module_), lowerBound, upperBound, target, &match)) {
+            callsite_ = nullptr;
+            return;
+        }
+
+        callsite_ = &module_->callSite(match);
+
+        if (callsite_->isExit()) {
+            popFrame();
+            continue;
+        }
+
+        if (callsite_->isEntry()) {
+            callsite_ = nullptr;
+            return;
+        }
+
+        JS_ASSERT(callsite_->isNormal());
+        return;
+    }
+}
+
+JSAtom *
+AsmJSFrameIterator::functionDisplayAtom() const
+{
+    JS_ASSERT(!done());
+    return module_->functionName(callsite_->functionNameIndex());
+}
+
+unsigned
+AsmJSFrameIterator::computeLine(uint32_t *column) const
+{
+    JS_ASSERT(!done());
+    if (column)
+        *column = callsite_->column();
+    return callsite_->line();
+}
 
 static bool
 CloneModule(JSContext *cx, MutableHandle<AsmJSModuleObject*> moduleObj)
 {
     ScopedJSDeletePtr<AsmJSModule> module;
     if (!moduleObj->module().clone(cx, &module))
         return false;
 
@@ -403,18 +501,17 @@ CallAsmJS(JSContext *cx, unsigned argc, 
     }
 
     {
         // Push an AsmJSActivation to describe the asm.js frames we're about to
         // push when running this module. Additionally, push a JitActivation so
         // that the optimized asm.js-to-Ion FFI call path (which we want to be
         // very fast) can avoid doing so. The JitActivation is marked as
         // inactive so stack iteration will skip over it.
-        unsigned exportIndex = FunctionToExportedFunctionIndex(callee);
-        AsmJSActivation activation(cx, module, exportIndex);
+        AsmJSActivation activation(cx, module);
         JitActivation jitActivation(cx, /* firstFrameIsConstructing = */ false, /* active */ false);
 
         // Call the per-exported-function trampoline created by GenerateEntry.
         AsmJSModule::CodePtr enter = module.entryTrampoline(func);
         if (!CALL_GENERATED_ASMJS(enter, coercedArgs.begin(), module.globalData()))
             return false;
     }
 
--- a/js/src/jit/AsmJSLink.h
+++ b/js/src/jit/AsmJSLink.h
@@ -4,18 +4,43 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_AsmJSLink_h
 #define jit_AsmJSLink_h
 
 #include "NamespaceImports.h"
 
+class JSAtom;
+
 namespace js {
 
+class AsmJSActivation;
+class AsmJSModule;
+namespace jit { class CallSite; }
+
+// Iterates over the frames of a single AsmJSActivation.
+class AsmJSFrameIterator
+{
+    const AsmJSModule *module_;
+    const jit::CallSite *callsite_;
+    uint8_t *sp_;
+    uint8_t *returnAddress_;
+
+    void popFrame();
+    void settle();
+
+  public:
+    AsmJSFrameIterator(const AsmJSActivation *activation);
+    void operator++() { popFrame(); settle(); }
+    bool done() const { return !callsite_; }
+    JSAtom *functionDisplayAtom() const;
+    unsigned computeLine(uint32_t *column) const;
+};
+
 #ifdef JS_ION
 
 // Create a new JSFunction to replace originalFun as the representation of the
 // function defining the succesfully-validated module 'moduleObj'.
 extern JSFunction *
 NewAsmJSModuleFunction(ExclusiveContext *cx, JSFunction *originalFun, HandleObject moduleObj);
 
 // Return whether this is the js::Native returned by NewAsmJSModuleFunction.
--- a/js/src/jit/AsmJSModule.cpp
+++ b/js/src/jit/AsmJSModule.cpp
@@ -377,16 +377,18 @@ void
 AsmJSModule::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t *asmJSModuleCode,
                            size_t *asmJSModuleData)
 {
     *asmJSModuleCode += pod.totalBytes_;
     *asmJSModuleData += mallocSizeOf(this) +
                         globals_.sizeOfExcludingThis(mallocSizeOf) +
                         exits_.sizeOfExcludingThis(mallocSizeOf) +
                         exports_.sizeOfExcludingThis(mallocSizeOf) +
+                        callSites_.sizeOfExcludingThis(mallocSizeOf) +
+                        functionNames_.sizeOfExcludingThis(mallocSizeOf) +
                         heapAccesses_.sizeOfExcludingThis(mallocSizeOf) +
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
                         profiledFunctions_.sizeOfExcludingThis(mallocSizeOf) +
 #endif
 #if defined(JS_ION_PERF)
                         perfProfiledBlocksFunctions_.sizeOfExcludingThis(mallocSizeOf) +
 #endif
                         staticLinkData_.sizeOfExcludingThis(mallocSizeOf);
@@ -472,29 +474,41 @@ ReadScalar(const uint8_t *src, T *dst)
 
 static size_t
 SerializedNameSize(PropertyName *name)
 {
     return sizeof(uint32_t) +
            (name ? name->length() * sizeof(jschar) : 0);
 }
 
+size_t
+AsmJSModule::Name::serializedSize() const
+{
+    return SerializedNameSize(name_);
+}
+
 static uint8_t *
 SerializeName(uint8_t *cursor, PropertyName *name)
 {
     JS_ASSERT_IF(name, !name->empty());
     if (name) {
         cursor = WriteScalar<uint32_t>(cursor, name->length());
         cursor = WriteBytes(cursor, name->chars(), name->length() * sizeof(jschar));
     } else {
         cursor = WriteScalar<uint32_t>(cursor, 0);
     }
     return cursor;
 }
 
+uint8_t *
+AsmJSModule::Name::serialize(uint8_t *cursor) const
+{
+    return SerializeName(cursor, name_);
+}
+
 static const uint8_t *
 DeserializeName(ExclusiveContext *cx, const uint8_t *cursor, PropertyName **name)
 {
     uint32_t length;
     cursor = ReadScalar<uint32_t>(cursor, &length);
 
     if (length == 0) {
         *name = nullptr;
@@ -516,16 +530,29 @@ DeserializeName(ExclusiveContext *cx, co
     JSAtom *atom = AtomizeChars(cx, src, length);
     if (!atom)
         return nullptr;
 
     *name = atom->asPropertyName();
     return cursor + length * sizeof(jschar);
 }
 
+const uint8_t *
+AsmJSModule::Name::deserialize(ExclusiveContext *cx, const uint8_t *cursor)
+{
+    return DeserializeName(cx, cursor, &name_);
+}
+
+bool
+AsmJSModule::Name::clone(ExclusiveContext *cx, Name *out) const
+{
+    out->name_ = name_;
+    return true;
+}
+
 template <class T>
 size_t
 SerializedVectorSize(const js::Vector<T, 0, SystemAllocPolicy> &vec)
 {
     size_t size = sizeof(uint32_t);
     for (size_t i = 0; i < vec.length(); i++)
         size += vec[i].serializedSize();
     return size;
@@ -783,16 +810,18 @@ AsmJSModule::serializedSize() const
     return sizeof(pod) +
            pod.codeBytes_ +
            SerializedNameSize(globalArgumentName_) +
            SerializedNameSize(importArgumentName_) +
            SerializedNameSize(bufferArgumentName_) +
            SerializedVectorSize(globals_) +
            SerializedVectorSize(exits_) +
            SerializedVectorSize(exports_) +
+           SerializedPodVectorSize(callSites_) +
+           SerializedVectorSize(functionNames_) +
            SerializedPodVectorSize(heapAccesses_) +
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
            SerializedVectorSize(profiledFunctions_) +
 #endif
            staticLinkData_.serializedSize();
 }
 
 uint8_t *
@@ -801,16 +830,18 @@ AsmJSModule::serialize(uint8_t *cursor) 
     cursor = WriteBytes(cursor, &pod, sizeof(pod));
     cursor = WriteBytes(cursor, code_, pod.codeBytes_);
     cursor = SerializeName(cursor, globalArgumentName_);
     cursor = SerializeName(cursor, importArgumentName_);
     cursor = SerializeName(cursor, bufferArgumentName_);
     cursor = SerializeVector(cursor, globals_);
     cursor = SerializeVector(cursor, exits_);
     cursor = SerializeVector(cursor, exports_);
+    cursor = SerializePodVector(cursor, callSites_);
+    cursor = SerializeVector(cursor, functionNames_);
     cursor = SerializePodVector(cursor, heapAccesses_);
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     cursor = SerializeVector(cursor, profiledFunctions_);
 #endif
     cursor = staticLinkData_.serialize(cursor);
     return cursor;
 }
 
@@ -825,16 +856,18 @@ AsmJSModule::deserialize(ExclusiveContex
     (code_ = AllocateExecutableMemory(cx, pod.totalBytes_)) &&
     (cursor = ReadBytes(cursor, code_, pod.codeBytes_)) &&
     (cursor = DeserializeName(cx, cursor, &globalArgumentName_)) &&
     (cursor = DeserializeName(cx, cursor, &importArgumentName_)) &&
     (cursor = DeserializeName(cx, cursor, &bufferArgumentName_)) &&
     (cursor = DeserializeVector(cx, cursor, &globals_)) &&
     (cursor = DeserializeVector(cx, cursor, &exits_)) &&
     (cursor = DeserializeVector(cx, cursor, &exports_)) &&
+    (cursor = DeserializePodVector(cx, cursor, &callSites_)) &&
+    (cursor = DeserializeVector(cx, cursor, &functionNames_)) &&
     (cursor = DeserializePodVector(cx, cursor, &heapAccesses_)) &&
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     (cursor = DeserializeVector(cx, cursor, &profiledFunctions_)) &&
 #endif
     (cursor = staticLinkData_.deserialize(cx, cursor));
 
     loadedFromCache_ = true;
     return cursor;
@@ -892,16 +925,18 @@ AsmJSModule::clone(JSContext *cx, Scoped
 
     out.globalArgumentName_ = globalArgumentName_;
     out.importArgumentName_ = importArgumentName_;
     out.bufferArgumentName_ = bufferArgumentName_;
 
     if (!CloneVector(cx, globals_, &out.globals_) ||
         !CloneVector(cx, exits_, &out.exits_) ||
         !CloneVector(cx, exports_, &out.exports_) ||
+        !ClonePodVector(cx, callSites_, &out.callSites_) ||
+        !CloneVector(cx, functionNames_, &out.functionNames_) ||
         !ClonePodVector(cx, heapAccesses_, &out.heapAccesses_) ||
         !staticLinkData_.clone(cx, &out.staticLinkData_))
     {
         return false;
     }
 
     out.loadedFromCache_ = loadedFromCache_;
 
--- a/js/src/jit/AsmJSModule.h
+++ b/js/src/jit/AsmJSModule.h
@@ -216,41 +216,36 @@ class AsmJSModule
     class ExportedFunction
     {
         PropertyName *name_;
         PropertyName *maybeFieldName_;
         ArgCoercionVector argCoercions_;
         struct Pod {
             ReturnType returnType_;
             uint32_t codeOffset_;
-            uint32_t line_;
-            uint32_t column_;
             // These two fields are offsets to the beginning of the ScriptSource
             // of the module, and thus invariant under serialization (unlike
             // absolute offsets into ScriptSource).
             uint32_t startOffsetInModule_;
             uint32_t endOffsetInModule_;
         } pod;
 
         friend class AsmJSModule;
 
         ExportedFunction(PropertyName *name,
-                         uint32_t line, uint32_t column,
                          uint32_t startOffsetInModule, uint32_t endOffsetInModule,
                          PropertyName *maybeFieldName,
                          ArgCoercionVector &&argCoercions,
                          ReturnType returnType)
         {
             name_ = name;
             maybeFieldName_ = maybeFieldName;
             argCoercions_ = mozilla::Move(argCoercions);
             pod.returnType_ = returnType;
             pod.codeOffset_ = UINT32_MAX;
-            pod.line_ = line;
-            pod.column_ = column;
             pod.startOffsetInModule_ = startOffsetInModule;
             pod.endOffsetInModule_ = endOffsetInModule;
             JS_ASSERT_IF(maybeFieldName_, name_->isTenured());
         }
 
         void trace(JSTracer *trc) {
             MarkStringUnbarriered(trc, &name_, "asm.js export name");
             if (maybeFieldName_)
@@ -269,22 +264,16 @@ class AsmJSModule
         void initCodeOffset(unsigned off) {
             JS_ASSERT(pod.codeOffset_ == UINT32_MAX);
             pod.codeOffset_ = off;
         }
 
         PropertyName *name() const {
             return name_;
         }
-        uint32_t line() const {
-            return pod.line_;
-        }
-        uint32_t column() const {
-            return pod.column_;
-        }
         uint32_t startOffsetInModule() const {
             return pod.startOffsetInModule_;
         }
         uint32_t endOffsetInModule() const {
             return pod.endOffsetInModule_;
         }
         PropertyName *maybeFieldName() const {
             return maybeFieldName_;
@@ -300,16 +289,30 @@ class AsmJSModule
         }
 
         size_t serializedSize() const;
         uint8_t *serialize(uint8_t *cursor) const;
         const uint8_t *deserialize(ExclusiveContext *cx, const uint8_t *cursor);
         bool clone(ExclusiveContext *cx, ExportedFunction *out) const;
     };
 
+    class Name
+    {
+        PropertyName *name_;
+      public:
+        Name() : name_(nullptr) {}
+        Name(PropertyName *name) : name_(name) {}
+        PropertyName *name() const { return name_; }
+        PropertyName *&name() { return name_; }
+        size_t serializedSize() const;
+        uint8_t *serialize(uint8_t *cursor) const;
+        const uint8_t *deserialize(ExclusiveContext *cx, const uint8_t *cursor);
+        bool clone(ExclusiveContext *cx, Name *out) const;
+    };
+
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     // Function information to add to the VTune JIT profiler following linking.
     struct ProfiledFunction
     {
         PropertyName *name;
         struct Pod {
             unsigned startCodeOffset;
             unsigned endCodeOffset;
@@ -395,19 +398,21 @@ class AsmJSModule
         uint8_t *serialize(uint8_t *cursor) const;
         const uint8_t *deserialize(ExclusiveContext *cx, const uint8_t *cursor);
         bool clone(ExclusiveContext *cx, StaticLinkData *out) const;
 
         size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
     };
 
   private:
-    typedef Vector<ExportedFunction, 0, SystemAllocPolicy> ExportedFunctionVector;
     typedef Vector<Global, 0, SystemAllocPolicy> GlobalVector;
     typedef Vector<Exit, 0, SystemAllocPolicy> ExitVector;
+    typedef Vector<ExportedFunction, 0, SystemAllocPolicy> ExportedFunctionVector;
+    typedef Vector<jit::CallSite, 0, SystemAllocPolicy> CallSiteVector;
+    typedef Vector<Name, 0, SystemAllocPolicy> FunctionNameVector;
     typedef Vector<jit::AsmJSHeapAccess, 0, SystemAllocPolicy> HeapAccessVector;
     typedef Vector<jit::IonScriptCounts *, 0, SystemAllocPolicy> FunctionCountsVector;
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     typedef Vector<ProfiledFunction, 0, SystemAllocPolicy> ProfiledFunctionVector;
 #endif
 #if defined(JS_ION_PERF)
     typedef Vector<ProfiledBlocksFunction, 0, SystemAllocPolicy> ProfiledBlocksFunctionVector;
 #endif
@@ -415,16 +420,18 @@ class AsmJSModule
   private:
     PropertyName *                        globalArgumentName_;
     PropertyName *                        importArgumentName_;
     PropertyName *                        bufferArgumentName_;
 
     GlobalVector                          globals_;
     ExitVector                            exits_;
     ExportedFunctionVector                exports_;
+    CallSiteVector                        callSites_;
+    FunctionNameVector                    functionNames_;
     HeapAccessVector                      heapAccesses_;
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     ProfiledFunctionVector                profiledFunctions_;
 #endif
 #if defined(JS_ION_PERF)
     ProfiledBlocksFunctionVector          perfProfiledBlocksFunctions_;
 #endif
 
@@ -471,16 +478,18 @@ class AsmJSModule
         for (unsigned i = 0; i < globals_.length(); i++)
             globals_[i].trace(trc);
         for (unsigned i = 0; i < exports_.length(); i++)
             exports_[i].trace(trc);
         for (unsigned i = 0; i < exits_.length(); i++) {
             if (exitIndexToGlobalDatum(i).fun)
                 MarkObject(trc, &exitIndexToGlobalDatum(i).fun, "asm.js imported function");
         }
+        for (unsigned i = 0; i < functionNames_.length(); i++)
+            MarkStringUnbarriered(trc, &functionNames_[i].name(), "asm.js module function name");
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
         for (unsigned i = 0; i < profiledFunctions_.length(); i++)
             profiledFunctions_[i].trace(trc);
 #endif
 #if defined(JS_ION_PERF)
         for (unsigned i = 0; i < perfProfiledBlocksFunctions_.length(); i++)
             perfProfiledBlocksFunctions_[i].trace(trc);
 #endif
@@ -590,23 +599,22 @@ class AsmJSModule
             return false;
         uint32_t globalDataOffset = globalDataBytes();
         JS_STATIC_ASSERT(sizeof(ExitDatum) % sizeof(void*) == 0);
         pod.funcPtrTableAndExitBytes_ += sizeof(ExitDatum);
         *exitIndex = unsigned(exits_.length());
         return exits_.append(Exit(ffiIndex, globalDataOffset));
     }
 
-    bool addExportedFunction(PropertyName *name, uint32_t line, uint32_t column,
-                             uint32_t srcStart, uint32_t srcEnd,
+    bool addExportedFunction(PropertyName *name, uint32_t srcStart, uint32_t srcEnd,
                              PropertyName *maybeFieldName,
                              ArgCoercionVector &&argCoercions,
                              ReturnType returnType)
     {
-        ExportedFunction func(name, line, column, srcStart, srcEnd, maybeFieldName,
+        ExportedFunction func(name, srcStart, srcEnd, maybeFieldName,
                               mozilla::Move(argCoercions), returnType);
         if (exports_.length() >= UINT32_MAX)
             return false;
         return exports_.append(mozilla::Move(func));
     }
     unsigned numExportedFunctions() const {
         return exports_.length();
     }
@@ -616,16 +624,27 @@ class AsmJSModule
     ExportedFunction &exportedFunction(unsigned i) {
         return exports_[i];
     }
     CodePtr entryTrampoline(const ExportedFunction &func) const {
         JS_ASSERT(func.pod.codeOffset_ != UINT32_MAX);
         return JS_DATA_TO_FUNC_PTR(CodePtr, code_ + func.pod.codeOffset_);
     }
 
+    bool addFunctionName(PropertyName *name, uint32_t *nameIndex) {
+        JS_ASSERT(name->isTenured());
+        if (functionNames_.length() > jit::CallSiteDesc::FUNCTION_NAME_INDEX_MAX)
+            return false;
+        *nameIndex = functionNames_.length();
+        return functionNames_.append(name);
+    }
+    PropertyName *functionName(uint32_t i) const {
+        return functionNames_[i].name();
+    }
+
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     bool trackProfiledFunction(PropertyName *name, unsigned startCodeOffset, unsigned endCodeOffset,
                                unsigned line, unsigned column)
     {
         ProfiledFunction func(name, startCodeOffset, endCodeOffset, line, column);
         return profiledFunctions_.append(func);
     }
     unsigned numProfiledFunctions() const {
@@ -761,16 +780,29 @@ class AsmJSModule
     }
     const jit::AsmJSHeapAccess &heapAccess(unsigned i) const {
         return heapAccesses_[i];
     }
     jit::AsmJSHeapAccess &heapAccess(unsigned i) {
         return heapAccesses_[i];
     }
 
+    void assignCallSites(jit::CallSiteVector &&callsites) {
+        callSites_ = Move(callsites);
+    }
+    unsigned numCallSites() const {
+        return callSites_.length();
+    }
+    const jit::CallSite &callSite(unsigned i) const {
+        return callSites_[i];
+    }
+    jit::CallSite &callSite(unsigned i) {
+        return callSites_[i];
+    }
+
     void initHeap(Handle<ArrayBufferObject*> heap, JSContext *cx);
 
     void requireHeapLengthToBeAtLeast(uint32_t len) {
         if (len > pod.minHeapLength_)
             pod.minHeapLength_ = len;
     }
     uint32_t minHeapLength() const {
         return pod.minHeapLength_;
--- a/js/src/jit/AsmJSSignalHandlers.cpp
+++ b/js/src/jit/AsmJSSignalHandlers.cpp
@@ -337,17 +337,17 @@ HandleSimulatorInterrupt(JSRuntime *rt, 
     // simulator could be in the middle of an instruction. On ARM, the signal
     // handlers are currently only used for Odin code, see bug 964258.
 
 #ifdef JS_ARM_SIMULATOR
     const AsmJSModule &module = activation->module();
     if (module.containsPC((void *)rt->mainThread.simulator()->get_pc()) &&
         module.containsPC(faultingAddress))
     {
-        activation->setResumePC(nullptr);
+        activation->setInterrupted(nullptr);
         int32_t nextpc = int32_t(module.interruptExit());
         rt->mainThread.simulator()->set_resume_pc(nextpc);
         return true;
     }
 #endif
     return false;
 }
 
@@ -447,17 +447,17 @@ HandleException(PEXCEPTION_POINTERS exce
         return false;
 
     // If we faulted trying to execute code in 'module', this must be an
     // interrupt callback (see RequestInterruptForAsmJSCode). Redirect
     // execution to a trampoline which will call js::HandleExecutionInterrupt.
     // The trampoline will jump to activation->resumePC if execution isn't
     // interrupted.
     if (module.containsPC(faultingAddress)) {
-        activation->setResumePC(pc);
+        activation->setInterrupted(pc);
         *ppc = module.interruptExit();
 
         JSRuntime::AutoLockForInterrupt lock(rt);
         module.unprotectCode(rt);
         return true;
     }
 
 # if defined(JS_CODEGEN_X64)
@@ -650,17 +650,17 @@ HandleMachException(JSRuntime *rt, const
         return false;
 
     // If we faulted trying to execute code in 'module', this must be an
     // interrupt callback (see RequestInterruptForAsmJSCode). Redirect
     // execution to a trampoline which will call js::HandleExecutionInterrupt.
     // The trampoline will jump to activation->resumePC if execution isn't
     // interrupted.
     if (module.containsPC(faultingAddress)) {
-        activation->setResumePC(pc);
+        activation->setInterrupted(pc);
         *ppc = module.interruptExit();
 
         JSRuntime::AutoLockForInterrupt lock(rt);
         module.unprotectCode(rt);
 
         // Update the thread state with the new pc.
         kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT);
         return kret == KERN_SUCCESS;
@@ -900,17 +900,17 @@ HandleSignal(int signum, siginfo_t *info
         return false;
 
     // If we faulted trying to execute code in 'module', this must be an
     // interrupt callback (see RequestInterruptForAsmJSCode). Redirect
     // execution to a trampoline which will call js::HandleExecutionInterrupt.
     // The trampoline will jump to activation->resumePC if execution isn't
     // interrupted.
     if (module.containsPC(faultingAddress)) {
-        activation->setResumePC(pc);
+        activation->setInterrupted(pc);
         *ppc = module.interruptExit();
 
         JSRuntime::AutoLockForInterrupt lock(rt);
         module.unprotectCode(rt);
         return true;
     }
 
 # if defined(JS_CODEGEN_X64)
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -8213,39 +8213,40 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall
             if (a->isFloatReg()) {
                 FloatRegister fr = ToFloatRegister(a);
                 int srcId = fr.code() * 2;
                 masm.ma_vxfer(fr, Register::FromCode(srcId), Register::FromCode(srcId+1));
             }
         }
     }
 #endif
+
    if (mir->spIncrement())
         masm.freeStack(mir->spIncrement());
 
    JS_ASSERT((AlignmentAtPrologue +  masm.framePushed()) % StackAlignment == 0);
 
 #ifdef DEBUG
     Label ok;
     JS_ASSERT(IsPowerOfTwo(StackAlignment));
     masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok);
     masm.assumeUnreachable("Stack should be aligned.");
     masm.bind(&ok);
 #endif
 
     MAsmJSCall::Callee callee = mir->callee();
     switch (callee.which()) {
       case MAsmJSCall::Callee::Internal:
-        masm.call(callee.internal());
+        masm.call(mir->desc(), callee.internal());
         break;
       case MAsmJSCall::Callee::Dynamic:
-        masm.call(ToRegister(ins->getOperand(mir->dynamicCalleeOperandIndex())));
+        masm.call(mir->desc(), ToRegister(ins->getOperand(mir->dynamicCalleeOperandIndex())));
         break;
       case MAsmJSCall::Callee::Builtin:
-        masm.call(callee.builtin());
+        masm.call(mir->desc(), callee.builtin());
         break;
     }
 
     if (mir->spIncrement())
         masm.reserveStack(mir->spIncrement());
 
     postAsmJSCall(ins);
     return true;
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -2963,22 +2963,20 @@ MAsmJSUnsignedToFloat32::foldsTo(TempAll
                 return MConstant::NewAsmJS(alloc, JS::Float32Value(float(dval)), MIRType_Float32);
         }
     }
 
     return this;
 }
 
 MAsmJSCall *
-MAsmJSCall::New(TempAllocator &alloc, Callee callee, const Args &args, MIRType resultType,
-                size_t spIncrement)
+MAsmJSCall::New(TempAllocator &alloc, const CallSiteDesc &desc, Callee callee,
+                const Args &args, MIRType resultType, size_t spIncrement)
 {
-    MAsmJSCall *call = new(alloc) MAsmJSCall;
-    call->spIncrement_ = spIncrement;
-    call->callee_ = callee;
+    MAsmJSCall *call = new(alloc) MAsmJSCall(desc, callee, spIncrement);
     call->setResultType(resultType);
 
     if (!call->argRegs_.init(alloc, args.length()))
         return nullptr;
     for (size_t i = 0; i < call->argRegs_.length(); i++)
         call->argRegs_[i] = args[i].reg;
 
     if (!call->operands_.init(alloc, call->argRegs_.length() + (callee.which() == Callee::Dynamic ? 1 : 0)))
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -9977,21 +9977,26 @@ class MAsmJSCall MOZ_FINAL : public MIns
     };
 
   private:
     struct Operand {
         AnyRegister reg;
         MUse use;
     };
 
+    CallSiteDesc desc_;
     Callee callee_;
     FixedList<MUse> operands_;
     FixedList<AnyRegister> argRegs_;
     size_t spIncrement_;
 
+    MAsmJSCall(const CallSiteDesc &desc, Callee callee, size_t spIncrement)
+     : desc_(desc), callee_(callee), spIncrement_(spIncrement)
+    { }
+
   protected:
     void setOperand(size_t index, MDefinition *operand) {
         operands_[index].set(operand, this, index);
         operand->addUse(&operands_[index]);
     }
     MUse *getUseFor(size_t index) {
         return &operands_[index];
     }
@@ -10001,33 +10006,36 @@ class MAsmJSCall MOZ_FINAL : public MIns
 
     struct Arg {
         AnyRegister reg;
         MDefinition *def;
         Arg(AnyRegister reg, MDefinition *def) : reg(reg), def(def) {}
     };
     typedef Vector<Arg, 8> Args;
 
-    static MAsmJSCall *New(TempAllocator &alloc, Callee callee, const Args &args,
-                           MIRType resultType, size_t spIncrement);
+    static MAsmJSCall *New(TempAllocator &alloc, const CallSiteDesc &desc, Callee callee,
+                           const Args &args, MIRType resultType, size_t spIncrement);
 
     size_t numOperands() const {
         return operands_.length();
     }
     MDefinition *getOperand(size_t index) const {
         JS_ASSERT(index < numOperands());
         return operands_[index].producer();
     }
     size_t numArgs() const {
         return argRegs_.length();
     }
     AnyRegister registerForArg(size_t index) const {
         JS_ASSERT(index < numArgs());
         return argRegs_[index];
     }
+    const CallSiteDesc &desc() const {
+        return desc_;
+    }
     Callee callee() const {
         return callee_;
     }
     size_t dynamicCalleeOperandIndex() const {
         JS_ASSERT(callee_.which() == Callee::Dynamic);
         JS_ASSERT(numArgs() == numOperands() - 1);
         return numArgs();
     }
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -3564,16 +3564,29 @@ MacroAssemblerARM::ma_call(ImmPtr dest)
     else
         rs = L_LDR;
 
     ma_movPatchable(dest, CallReg, Always, rs);
     as_blx(CallReg);
 }
 
 void
+MacroAssemblerARM::ma_callAndStoreRet(const Register r, uint32_t stackArgBytes)
+{
+    // Note: this function stores the return address to sp[0]. The caller must
+    // anticipate this by pushing additional space on the stack. The ABI does
+    // not provide space for a return address so this function may only be
+    // called if no argument are passed.
+    JS_ASSERT(stackArgBytes == 0);
+    AutoForbidPools afp(this);
+    as_dtr(IsStore, 32, Offset, pc, DTRAddr(sp, DtrOffImm(0)));
+    as_blx(r);
+}
+
+void
 MacroAssemblerARMCompat::breakpoint()
 {
     as_bkpt();
 }
 
 void
 MacroAssemblerARMCompat::ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure)
 {
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -394,16 +394,19 @@ class MacroAssemblerARM : public Assembl
     void ma_callIon(const Register reg);
     // callso an Ion function, assuming that sp has already been decremented
     void ma_callIonNoPush(const Register reg);
     // calls an ion function, assuming that the stack is currently not 8 byte aligned
     void ma_callIonHalfPush(const Register reg);
 
     void ma_call(ImmPtr dest);
 
+    // calls reg, storing the return address into sp[0]
+    void ma_callAndStoreRet(const Register reg, uint32_t stackArgBytes);
+
     // Float registers can only be loaded/stored in continuous runs
     // when using vstm/vldm.
     // This function breaks set into continuous runs and loads/stores
     // them at [rm]. rm will be modified and left in a state logically
     // suitable for the next load/store.
     // Returns the offset from [dm] for the logical next load/store.
     int32_t transferMultipleByRuns(FloatRegisterSet set, LoadStore ls,
                                    Register rm, DTMMode mode)
@@ -538,17 +541,16 @@ class MacroAssemblerARMCompat : public M
     }
     void mov(Address src, Register dest) {
         MOZ_ASSUME_UNREACHABLE("NYI-IC");
     }
 
     void call(const Register reg) {
         as_blx(reg);
     }
-
     void call(Label *label) {
         // for now, assume that it'll be nearby?
         as_bl(label, Always);
     }
     void call(ImmWord imm) {
         call(ImmPtr((void*)imm.value));
     }
     void call(ImmPtr imm) {
@@ -567,16 +569,48 @@ class MacroAssemblerARMCompat : public M
         if (hasMOVWT())
             rs = L_MOVWT;
         else
             rs = L_LDR;
 
         ma_movPatchable(ImmPtr(c->raw()), ScratchRegister, Always, rs);
         ma_callIonHalfPush(ScratchRegister);
     }
+
+    void appendCallSite(const CallSiteDesc &desc) {
+        enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_));
+    }
+
+    void call(const CallSiteDesc &desc, const Register reg) {
+        call(reg);
+        appendCallSite(desc);
+    }
+    void call(const CallSiteDesc &desc, Label *label) {
+        call(label);
+        appendCallSite(desc);
+    }
+    void call(const CallSiteDesc &desc, AsmJSImmPtr imm) {
+        call(imm);
+        appendCallSite(desc);
+    }
+    void callExit(AsmJSImmPtr imm, uint32_t stackArgBytes) {
+        movePtr(imm, CallReg);
+        ma_callAndStoreRet(CallReg, stackArgBytes);
+        appendCallSite(CallSiteDesc::Exit());
+    }
+    void callIonFromAsmJS(const Register reg) {
+        ma_callIonNoPush(reg);
+        appendCallSite(CallSiteDesc::Exit());
+
+        // The Ion ABI has the callee pop the return address off the stack.
+        // The asm.js caller assumes that the call leaves sp unchanged, so bump
+        // the stack.
+        subPtr(Imm32(sizeof(void*)), sp);
+    }
+
     void branch(JitCode *c) {
         BufferOffset bo = m_buffer.nextOffset();
         addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
         RelocStyle rs;
         if (hasMOVWT())
             rs = L_MOVWT;
         else
             rs = L_LDR;
--- a/js/src/jit/arm/Simulator-arm.cpp
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -4066,17 +4066,17 @@ Simulator::execute()
         } else {
             SimInstruction *instr = reinterpret_cast<SimInstruction *>(program_counter);
             instructionDecode(instr);
             icount_++;
 
             int32_t rpc = resume_pc_;
             if (MOZ_UNLIKELY(rpc != 0)) {
                 // AsmJS signal handler ran and we have to adjust the pc.
-                activation->setResumePC((void *)get_pc());
+                activation->setInterrupted((void *)get_pc());
                 set_pc(rpc);
                 resume_pc_ = 0;
             }
         }
         program_counter = get_pc();
     }
 }
 
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -658,16 +658,87 @@ class CodeLocationLabel
         return raw_;
     }
     uint8_t *offset() const {
         JS_ASSERT(state_ == Relative);
         return raw_;
     }
 };
 
+// Describes the user-visible properties of a callsite.
+//
+// A few general notes about the stack-walking supported by CallSite(Desc):
+//  - This information facilitates stack-walking performed by FrameIter which
+//    is used by Error.stack and other user-visible stack-walking functions.
+//  - Ion/asm.js calling conventions do not maintain a frame-pointer so
+//    stack-walking must lookup the stack depth based on the PC.
+//  - Stack-walking only occurs from C++ after a synchronous calls (JS-to-JS and
+//    JS-to-C++). Thus, we do not need to map arbitrary PCs to stack-depths,
+//    just the return address at callsites.
+//  - An exception to the above rule is the interrupt callback which can happen
+//    at arbitrary PCs. In such cases, we drop frames from the stack-walk. In
+//    the future when a full PC->stack-depth map is maintained, we handle this
+//    case.
+class CallSiteDesc
+{
+    uint32_t line_;
+    uint32_t column_;
+    uint32_t functionNameIndex_;
+
+    static const uint32_t sEntryTrampoline = UINT32_MAX;
+    static const uint32_t sExit = UINT32_MAX - 1;
+
+  public:
+    static const uint32_t FUNCTION_NAME_INDEX_MAX = UINT32_MAX - 2;
+
+    CallSiteDesc() {}
+
+    CallSiteDesc(uint32_t line, uint32_t column, uint32_t functionNameIndex)
+     : line_(line), column_(column), functionNameIndex_(functionNameIndex)
+    {}
+
+    static CallSiteDesc Entry() { return CallSiteDesc(0, 0, sEntryTrampoline); }
+    static CallSiteDesc Exit() { return CallSiteDesc(0, 0, sExit); }
+
+    bool isEntry() const { return functionNameIndex_ == sEntryTrampoline; }
+    bool isExit() const { return functionNameIndex_ == sExit; }
+    bool isNormal() const { return !(isEntry() || isExit()); }
+
+    uint32_t line() const { JS_ASSERT(isNormal()); return line_; }
+    uint32_t column() const { JS_ASSERT(isNormal()); return column_; }
+    uint32_t functionNameIndex() const { JS_ASSERT(isNormal()); return functionNameIndex_; }
+};
+
+// Adds to CallSiteDesc the metadata necessary to walk the stack given an
+// initial stack-pointer.
+struct CallSite : public CallSiteDesc
+{
+    uint32_t returnAddressOffset_;
+    uint32_t stackDepth_;
+
+  public:
+    CallSite() {}
+
+    CallSite(CallSiteDesc desc, uint32_t returnAddressOffset, uint32_t stackDepth)
+      : CallSiteDesc(desc),
+        returnAddressOffset_(returnAddressOffset),
+        stackDepth_(stackDepth)
+    { }
+
+    void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
+    uint32_t returnAddressOffset() const { return returnAddressOffset_; }
+
+    // The stackDepth measures the amount of stack space pushed since the
+    // function was called. In particular, this includes the word pushed by the
+    // call instruction on x86/x64.
+    uint32_t stackDepth() const { JS_ASSERT(!isEntry()); return stackDepth_; }
+};
+
+typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector;
+
 // Summarizes a heap access made by asm.js code that needs to be patched later
 // and/or looked up by the asm.js signal handlers. Different architectures need
 // to know different things (x64: offset and length, ARM: where to patch in
 // heap length, x86: where to patch in heap length and base) hence the massive
 // #ifdefery.
 class AsmJSHeapAccess
 {
     uint32_t offset_;
@@ -809,21 +880,25 @@ struct AsmJSAbsoluteLink
       : patchAt(patchAt), target(target) {}
     CodeOffsetLabel patchAt;
     AsmJSImmKind target;
 };
 
 // The base class of all Assemblers for all archs.
 class AssemblerShared
 {
+    Vector<CallSite, 0, SystemAllocPolicy> callsites_;
     Vector<AsmJSHeapAccess, 0, SystemAllocPolicy> asmJSHeapAccesses_;
     Vector<AsmJSGlobalAccess, 0, SystemAllocPolicy> asmJSGlobalAccesses_;
     Vector<AsmJSAbsoluteLink, 0, SystemAllocPolicy> asmJSAbsoluteLinks_;
 
   public:
+    bool append(CallSite callsite) { return callsites_.append(callsite); }
+    CallSiteVector &&extractCallSites() { return Move(callsites_); }
+
     bool append(AsmJSHeapAccess access) { return asmJSHeapAccesses_.append(access); }
     AsmJSHeapAccessVector &&extractAsmJSHeapAccesses() { return Move(asmJSHeapAccesses_); }
 
     bool append(AsmJSGlobalAccess access) { return asmJSGlobalAccesses_.append(access); }
     size_t numAsmJSGlobalAccesses() const { return asmJSGlobalAccesses_.length(); }
     AsmJSGlobalAccess asmJSGlobalAccess(size_t i) const { return asmJSGlobalAccesses_[i]; }
 
     bool append(AsmJSAbsoluteLink link) { return asmJSAbsoluteLinks_.append(link); }
--- a/js/src/jit/shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/shared/MacroAssembler-x86-shared.h
@@ -25,16 +25,18 @@ class MacroAssemblerX86Shared : public A
     // Bytes pushed onto the frame by the callee; includes frameDepth_. This is
     // needed to compute offsets to stack slots while temporary space has been
     // reserved for unexpected spills or C++ function calls. It is maintained
     // by functions which track stack alignment, which for clear distinction
     // use StudlyCaps (for example, Push, Pop).
     uint32_t framePushed_;
 
   public:
+    using Assembler::call;
+
     MacroAssemblerX86Shared()
       : framePushed_(0)
     { }
 
     void compareDouble(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs) {
         if (cond & DoubleConditionBitInvert)
             ucomisd(rhs, lhs);
         else
@@ -657,16 +659,34 @@ class MacroAssemblerX86Shared : public A
     // non-function. Returns offset to be passed to markSafepointAt().
     bool buildFakeExitFrame(const Register &scratch, uint32_t *offset);
     void callWithExitFrame(JitCode *target);
 
     void callIon(const Register &callee) {
         call(callee);
     }
 
+    void appendCallSite(const CallSiteDesc &desc) {
+        // Add an extra sizeof(void*) to include the return address that was
+        // pushed by the call instruction (see CallSite::stackDepth).
+        enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_ + sizeof(void*)));
+    }
+
+    void call(const CallSiteDesc &desc, Label *label) {
+        call(label);
+        appendCallSite(desc);
+    }
+    void call(const CallSiteDesc &desc, const Register &reg) {
+        call(reg);
+        appendCallSite(desc);
+    }
+    void callIonFromAsmJS(const Register &reg) {
+        call(CallSiteDesc::Exit(), reg);
+    }
+
     void checkStackAlignment() {
         // Exists for ARM compatibility.
     }
 
     CodeOffsetLabel labelForPatch() {
         return CodeOffsetLabel(size());
     }
 
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -105,16 +105,24 @@ class MacroAssemblerX64 : public MacroAs
     void call(ImmPtr target) {
         call(ImmWord(uintptr_t(target.value)));
     }
     void call(AsmJSImmPtr target) {
         mov(target, rax);
         call(rax);
     }
 
+    void call(const CallSiteDesc &desc, AsmJSImmPtr target) {
+        call(target);
+        appendCallSite(desc);
+    }
+    void callExit(AsmJSImmPtr target, uint32_t stackArgBytes) {
+        call(CallSiteDesc::Exit(), target);
+    }
+
     // Refers to the upper 32 bits of a 64-bit Value operand.
     // On x86_64, the upper 32 bits do not necessarily only contain the type.
     Operand ToUpper32(Operand base) {
         switch (base.kind()) {
           case Operand::MEM_REG_DISP:
             return Operand(Register::FromCode(base.base()), base.disp() + 4);
 
           case Operand::MEM_SCALE:
--- a/js/src/jit/x86/MacroAssembler-x86.h
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -66,16 +66,17 @@ class MacroAssemblerX86 : public MacroAs
 
   public:
     using MacroAssemblerX86Shared::Push;
     using MacroAssemblerX86Shared::Pop;
     using MacroAssemblerX86Shared::callWithExitFrame;
     using MacroAssemblerX86Shared::branch32;
     using MacroAssemblerX86Shared::load32;
     using MacroAssemblerX86Shared::store32;
+    using MacroAssemblerX86Shared::call;
 
     MacroAssemblerX86()
       : inCall_(false),
         enoughMemory_(true)
     {
     }
 
     // The buffer is about to be linked, make sure any constant pools or excess
@@ -1100,16 +1101,23 @@ class MacroAssemblerX86 : public MacroAs
     }
 
     void callWithExitFrame(JitCode *target, Register dynStack) {
         addPtr(Imm32(framePushed()), dynStack);
         makeFrameDescriptor(dynStack, JitFrame_IonJS);
         Push(dynStack);
         call(target);
     }
+    void call(const CallSiteDesc &desc, AsmJSImmPtr target) {
+        call(target);
+        appendCallSite(desc);
+    }
+    void callExit(AsmJSImmPtr target, uint32_t stackArgBytes) {
+        call(CallSiteDesc::Exit(), target);
+    }
 
     // Save an exit frame to the thread data of the current thread, given a
     // register that holds a PerThreadData *.
     void linkParallelExitFrame(const Register &pt) {
         movl(StackPointer, Operand(pt, offsetof(PerThreadData, ionTop)));
     }
 
 #ifdef JSGC_GENERATIONAL
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -581,28 +581,33 @@ FrameIter::settleOnActivation()
                 continue;
             }
 
             nextJitFrame();
             data_.state_ = JIT;
             return;
         }
 
+        if (activation->isAsmJS()) {
+            data_.asmJSFrames_ = AsmJSFrameIterator(data_.activations_->asAsmJS());
+
+            if (data_.asmJSFrames_.done()) {
+                ++data_.activations_;
+                continue;
+            }
+
+            data_.state_ = ASMJS;
+            return;
+        }
+
         // ForkJoin activations don't contain iterable frames, so skip them.
         if (activation->isForkJoin()) {
             ++data_.activations_;
             continue;
         }
-
-        // Until asm.js has real stack-walking, we have each AsmJSActivation
-        // expose a single function (the entry function).
-        if (activation->isAsmJS()) {
-            data_.state_ = ASMJS;
-            return;
-        }
 #endif
 
         JS_ASSERT(activation->isInterpreter());
 
         InterpreterActivation *interpAct = activation->asInterpreter();
         data_.interpFrames_ = InterpreterFrameIterator(interpAct);
 
         // If we OSR'ed into JIT code, skip the interpreter frame so that
@@ -629,32 +634,34 @@ FrameIter::Data::Data(JSContext *cx, Sav
     contextOption_(contextOption),
     principals_(principals),
     pc_(nullptr),
     interpFrames_(nullptr),
     activations_(cx->runtime())
 #ifdef JS_ION
   , jitFrames_((uint8_t *)nullptr, SequentialExecution)
   , ionInlineFrameNo_(0)
+  , asmJSFrames_(nullptr)
 #endif
 {
 }
 
 FrameIter::Data::Data(const FrameIter::Data &other)
   : cx_(other.cx_),
     savedOption_(other.savedOption_),
     contextOption_(other.contextOption_),
     principals_(other.principals_),
     state_(other.state_),
     pc_(other.pc_),
     interpFrames_(other.interpFrames_),
     activations_(other.activations_)
 #ifdef JS_ION
   , jitFrames_(other.jitFrames_)
   , ionInlineFrameNo_(other.ionInlineFrameNo_)
+  , asmJSFrames_(other.asmJSFrames_)
 #endif
 {
 }
 
 FrameIter::FrameIter(JSContext *cx, SavedOption savedOption)
   : data_(cx, savedOption, CURRENT_CONTEXT, nullptr)
 #ifdef JS_ION
   , ionInlineFrames_(cx, (js::jit::JitFrameIterator*) nullptr)
@@ -726,16 +733,26 @@ FrameIter::popJitFrame()
 
     if (!data_.jitFrames_.done()) {
         nextJitFrame();
         return;
     }
 
     popActivation();
 }
+
+void
+FrameIter::popAsmJSFrame()
+{
+    JS_ASSERT(data_.state_ == ASMJS);
+
+    ++data_.asmJSFrames_;
+    if (data_.asmJSFrames_.done())
+        popActivation();
+}
 #endif
 
 FrameIter &
 FrameIter::operator++()
 {
     switch (data_.state_) {
       case DONE:
         MOZ_ASSUME_UNREACHABLE("Unexpected state");
@@ -775,21 +792,17 @@ FrameIter::operator++()
       case JIT:
 #ifdef JS_ION
         popJitFrame();
         break;
 #else
         MOZ_ASSUME_UNREACHABLE("Unexpected state");
 #endif
       case ASMJS:
-        // As described in settleOnActivation, an AsmJSActivation currently only
-        // represents a single asm.js function, so, if the FrameIter is
-        // currently stopped on an ASMJS frame, then we can pop the entire
-        // AsmJSActivation.
-        popActivation();
+        popAsmJSFrame();
         break;
     }
     return *this;
 }
 
 FrameIter::Data *
 FrameIter::copyData() const
 {
@@ -935,18 +948,17 @@ FrameIter::functionDisplayAtom() const
     switch (data_.state_) {
       case DONE:
         break;
       case INTERP:
       case JIT:
         return callee()->displayAtom();
       case ASMJS: {
 #ifdef JS_ION
-        AsmJSActivation &act = *data_.activations_->asAsmJS();
-        return act.module().exportedFunction(act.exportIndex()).name();
+        return data_.asmJSFrames_.functionDisplayAtom();
 #else
         break;
 #endif
       }
     }
 
     MOZ_ASSUME_UNREACHABLE("Unexpected state");
 }
@@ -995,27 +1007,22 @@ unsigned
 FrameIter::computeLine(uint32_t *column) const
 {
     switch (data_.state_) {
       case DONE:
         break;
       case INTERP:
       case JIT:
         return PCToLineNumber(script(), pc(), column);
-      case ASMJS: {
+      case ASMJS:
 #ifdef JS_ION
-        AsmJSActivation &act = *data_.activations_->asAsmJS();
-        AsmJSModule::ExportedFunction &func = act.module().exportedFunction(act.exportIndex());
-        if (column)
-            *column = func.column();
-        return func.line();
+        return data_.asmJSFrames_.computeLine(column);
 #else
         break;
 #endif
-      }
     }
 
     MOZ_ASSUME_UNREACHABLE("Unexpected state");
 }
 
 JSPrincipals *
 FrameIter::originPrincipals() const
 {
@@ -1649,23 +1656,23 @@ jit::JitActivation::markRematerializedFr
         RematerializedFrameVector &frames = e.front().value();
         for (size_t i = 0; i < frames.length(); i++)
             frames[i]->mark(trc);
     }
 }
 
 #endif // JS_ION
 
-AsmJSActivation::AsmJSActivation(JSContext *cx, AsmJSModule &module, unsigned exportIndex)
+AsmJSActivation::AsmJSActivation(JSContext *cx, AsmJSModule &module)
   : Activation(cx, AsmJS),
     module_(module),
     errorRejoinSP_(nullptr),
     profiler_(nullptr),
     resumePC_(nullptr),
-    exportIndex_(exportIndex)
+    exitSP_(nullptr)
 {
     if (cx->runtime()->spsProfiler.enabled()) {
         // Use a profiler string that matches jsMatch regex in
         // browser/devtools/profiler/cleopatra/js/parserWorker.js.
         // (For now use a single static string to avoid further slowing down
         // calls into asm.js.)
         profiler_ = &cx->runtime()->spsProfiler;
         profiler_->enterNative("asm.js code :0", this);
--- a/js/src/vm/Stack.h
+++ b/js/src/vm/Stack.h
@@ -7,16 +7,17 @@
 #ifndef vm_Stack_h
 #define vm_Stack_h
 
 #include "mozilla/MemoryReporting.h"
 
 #include "jsfun.h"
 #include "jsscript.h"
 
+#include "jit/AsmJSLink.h"
 #include "jit/JitFrameIterator.h"
 #ifdef CHECK_OSIPOINT_REGISTERS
 #include "jit/Registers.h" // for RegisterDump
 #endif
 #include "js/OldDebugAPI.h"
 
 struct JSCompartment;
 struct JSGenerator;
@@ -1497,39 +1498,44 @@ class InterpreterFrameIterator
 // all kinds of jit code.
 class AsmJSActivation : public Activation
 {
     AsmJSModule &module_;
     AsmJSActivation *prevAsmJS_;
     void *errorRejoinSP_;
     SPSProfiler *profiler_;
     void *resumePC_;
+    uint8_t *exitSP_;
 
-    // These bits are temporary and will be replaced when real asm.js
-    // stack-walking support lands:
-    unsigned exportIndex_;
+    static const intptr_t InterruptedSP = -1;
 
   public:
-    AsmJSActivation(JSContext *cx, AsmJSModule &module, unsigned exportIndex);
+    AsmJSActivation(JSContext *cx, AsmJSModule &module);
     ~AsmJSActivation();
 
     JSContext *cx() { return cx_; }
     AsmJSModule &module() const { return module_; }
-    unsigned exportIndex() const { return exportIndex_; }
     AsmJSActivation *prevAsmJS() const { return prevAsmJS_; }
 
     // Read by JIT code:
     static unsigned offsetOfContext() { return offsetof(AsmJSActivation, cx_); }
     static unsigned offsetOfResumePC() { return offsetof(AsmJSActivation, resumePC_); }
 
     // Initialized by JIT code:
     static unsigned offsetOfErrorRejoinSP() { return offsetof(AsmJSActivation, errorRejoinSP_); }
+    static unsigned offsetOfExitSP() { return offsetof(AsmJSActivation, exitSP_); }
 
     // Set from SIGSEGV handler:
-    void setResumePC(void *pc) { resumePC_ = pc; }
+    void setInterrupted(void *pc) { resumePC_ = pc; exitSP_ = (uint8_t*)InterruptedSP; }
+    bool isInterruptedSP() const { return exitSP_ == (uint8_t*)InterruptedSP; }
+
+    // Note: exitSP is the sp right before the call instruction. On x86, this
+    // means before the return address is pushed on the stack, on ARM, this
+    // means after.
+    uint8_t *exitSP() const { JS_ASSERT(!isInterruptedSP()); return exitSP_; }
 };
 
 // A FrameIter walks over the runtime's stack of JS script activations,
 // abstracting over whether the JS scripts were running in the interpreter or
 // different modes of compiled code.
 //
 // FrameIter is parameterized by what it includes in the stack iteration:
 //  - The SavedOption controls whether FrameIter stops when it finds an
@@ -1570,16 +1576,17 @@ class FrameIter
         jsbytecode *    pc_;
 
         InterpreterFrameIterator interpFrames_;
         ActivationIterator activations_;
 
 #ifdef JS_ION
         jit::JitFrameIterator jitFrames_;
         unsigned ionInlineFrameNo_;
+        AsmJSFrameIterator asmJSFrames_;
 #endif
 
         Data(JSContext *cx, SavedOption savedOption, ContextOption contextOption,
              JSPrincipals *principals);
         Data(const Data &other);
     };
 
     FrameIter(JSContext *cx, SavedOption = STOP_AT_SAVED);
@@ -1696,16 +1703,17 @@ class FrameIter
     jit::InlineFrameIterator ionInlineFrames_;
 #endif
 
     void popActivation();
     void popInterpreterFrame();
 #ifdef JS_ION
     void nextJitFrame();
     void popJitFrame();
+    void popAsmJSFrame();
 #endif
     void settleOnActivation();
 
     friend class ::JSBrokenFrameIterator;
 };
 
 class ScriptFrameIter : public FrameIter
 {