Bug 1030446 - Build a list of code ranges and use this instead of CallSite for describing functions (r=dougc)
authorLuke Wagner <luke@mozilla.com>
Wed, 25 Jun 2014 17:34:23 -0500
changeset 192260 7abc1d5a4e4c97dbb904e93ba8dae3ec4e828241
parent 192259 d1235dfcbda034c6f4bef24389645f6294cf0895
child 192261 32ad89bbe0759d6ea9c44d3b9593b7e1f29f4e82
push id27078
push userryanvm@gmail.com
push dateFri, 04 Jul 2014 03:04:00 +0000
treeherdermozilla-central@39bf1eaa9190 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdougc
bugs1030446
milestone33.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1030446 - Build a list of code ranges and use this instead of CallSite for describing functions (r=dougc)
js/src/jit/AsmJS.cpp
js/src/jit/AsmJSFrameIterator.cpp
js/src/jit/AsmJSFrameIterator.h
js/src/jit/AsmJSModule.cpp
js/src/jit/AsmJSModule.h
js/src/jit/CodeGenerator.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/mips/Assembler-mips.h
js/src/jit/mips/CodeGenerator-mips.cpp
js/src/jit/mips/MacroAssembler-mips.cpp
js/src/jit/mips/MacroAssembler-mips.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/MacroAssembler-x86-shared.h
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/MacroAssembler-x64.h
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/MacroAssembler-x86.h
mfbt/BinarySearch.h
--- a/js/src/jit/AsmJS.cpp
+++ b/js/src/jit/AsmJS.cpp
@@ -11,17 +11,16 @@
 #ifdef MOZ_VTUNE
 # include "vtune/VTuneWrapper.h"
 #endif
 
 #include "jsmath.h"
 #include "jsprf.h"
 #include "prmjtime.h"
 
-#include "assembler/assembler/MacroAssembler.h"
 #include "frontend/Parser.h"
 #include "jit/AsmJSLink.h"
 #include "jit/AsmJSModule.h"
 #include "jit/AsmJSSignalHandlers.h"
 #include "jit/CodeGenerator.h"
 #include "jit/CompileWrappers.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
@@ -1404,19 +1403,16 @@ class MOZ_STACK_CLASS ModuleCompiler
         if (p) {
             *exitIndex = p->value();
             return true;
         }
         if (!module_->addExit(ffiIndex, exitIndex))
             return false;
         return exits_.add(p, Move(exitDescriptor), *exitIndex);
     }
-    bool addFunctionName(PropertyName *name, uint32_t *index) {
-        return module_->addFunctionName(name, index);
-    }
 
     // Note a constraint on the minimum size of the heap.  The heap size is
     // constrained when linking to be at least the maximum of all such constraints.
     void requireHeapLengthToBeAtLeast(uint32_t len) {
         module_->requireHeapLengthToBeAtLeast(len);
     }
     uint32_t minHeapLength() const {
         return module_->minHeapLength();
@@ -1438,16 +1434,21 @@ class MOZ_STACK_CLASS ModuleCompiler
         masm_.resetForNewCodeGenerator(mir.alloc());
         masm_.align(CodeAlignment);
         masm_.bind(func.code());
     }
 
     bool finishGeneratingFunction(Func &func, MIRGenerator &mir, CodeGenerator &codegen) {
         JS_ASSERT(func.defined() && func.code()->bound());
 
+        uint32_t beginOffset = func.code()->offset();
+        uint32_t endOffset = masm_.currentOffset();
+        if (!module_->addFunctionCodeRange(func.name(), beginOffset, endOffset))
+            return false;
+
         jit::IonScriptCounts *counts = codegen.extractScriptCounts();
         if (counts && !module_->addFunctionCounts(counts)) {
             js_delete(counts);
             return false;
         }
 
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
         unsigned line = 0, column = 0;
@@ -1475,25 +1476,29 @@ class MOZ_STACK_CLASS ModuleCompiler
 
     void finishFunctionBodies() {
         JS_ASSERT(!finishedFunctionBodies_);
         masm_.align(AsmJSPageSize);
         finishedFunctionBodies_ = true;
         module_->finishFunctionBodies(masm_.currentOffset());
     }
 
+    void startGeneratingEntry(unsigned exportIndex) {
+        module_->exportedFunction(exportIndex).initCodeOffset(masm_.currentOffset());
+    }
+    bool finishGeneratingEntry(unsigned exportIndex) {
+        return module_->addEntryCodeRange(exportIndex, masm_.currentOffset());
+    }
+
     void setInterpExitOffset(unsigned exitIndex) {
         module_->exit(exitIndex).initInterpOffset(masm_.currentOffset());
     }
     void setIonExitOffset(unsigned exitIndex) {
         module_->exit(exitIndex).initIonOffset(masm_.currentOffset());
     }
-    void setEntryOffset(unsigned exportIndex) {
-        module_->exportedFunction(exportIndex).initCodeOffset(masm_.currentOffset());
-    }
 
     void buildCompilationTimeReport(bool storedInCache, ScopedJSFreePtr<char> *out) {
         ScopedJSFreePtr<char> slowFuns;
 #ifndef JS_MORE_DETERMINISTIC
         int64_t usecAfter = PRMJ_Now();
         int msTotal = (usecAfter - usecBefore_) / PRMJ_USEC_PER_MSEC;
         if (!slowFunctions_.empty()) {
             slowFuns.reset(JS_smprintf("; %d functions compiled slowly: ", slowFunctions_.length()));
@@ -1803,17 +1808,16 @@ class FunctionCompiler
     typedef Vector<TypedValue> VarInitializerVector;
     typedef HashMap<PropertyName*, BlockVector> LabeledBlockMap;
     typedef HashMap<ParseNode*, BlockVector> UnlabeledBlockMap;
     typedef Vector<ParseNode*, 4> NodeStack;
 
     ModuleCompiler &       m_;
     LifoAlloc &            lifo_;
     ParseNode *            fn_;
-    uint32_t               functionNameIndex_;
 
     LocalMap               locals_;
     VarInitializerVector   varInitializers_;
     Maybe<RetType>         alreadyReturned_;
 
     TempAllocator *        alloc_;
     MIRGraph *             graph_;
     CompileInfo *          info_;
@@ -1824,25 +1828,21 @@ class FunctionCompiler
 
     NodeStack              loopStack_;
     NodeStack              breakableStack_;
     UnlabeledBlockMap      unlabeledBreaks_;
     UnlabeledBlockMap      unlabeledContinues_;
     LabeledBlockMap        labeledBreaks_;
     LabeledBlockMap        labeledContinues_;
 
-    static const uint32_t NO_FUNCTION_NAME_INDEX = UINT32_MAX;
-    JS_STATIC_ASSERT(NO_FUNCTION_NAME_INDEX > CallSiteDesc::FUNCTION_NAME_INDEX_MAX);
-
   public:
     FunctionCompiler(ModuleCompiler &m, ParseNode *fn, LifoAlloc &lifo)
       : m_(m),
         lifo_(lifo),
         fn_(fn),
-        functionNameIndex_(NO_FUNCTION_NAME_INDEX),
         locals_(m.cx()),
         varInitializers_(m.cx()),
         alloc_(nullptr),
         graph_(nullptr),
         info_(nullptr),
         mirGen_(nullptr),
         curBlock_(nullptr),
         loopStack_(m.cx()),
@@ -2274,22 +2274,17 @@ class FunctionCompiler
         if (inDeadCode()) {
             *def = nullptr;
             return true;
         }
 
         uint32_t line, column;
         m_.tokenStream().srcCoords.lineNumAndColumnIndex(call.node_->pn_pos.begin, &line, &column);
 
-        if (functionNameIndex_ == NO_FUNCTION_NAME_INDEX) {
-            if (!m_.addFunctionName(FunctionName(fn_), &functionNameIndex_))
-                return false;
-        }
-
-        CallSiteDesc desc(line, column, functionNameIndex_);
+        CallSiteDesc desc(line, column);
         MAsmJSCall *ins = MAsmJSCall::New(alloc(), desc, callee, call.regArgs_, returnType,
                                           call.spIncrement_);
         if (!ins)
             return false;
 
         curBlock_->add(ins);
         *def = ins;
         return true;
@@ -5950,17 +5945,17 @@ GenerateEntry(ModuleCompiler &m, const A
     // In constrast to the system ABI, the Ion convention is that all registers
     // are clobbered by calls. Thus, we must save the caller's non-volatile
     // registers.
     //
     // NB: GenerateExits assumes that masm.framePushed() == 0 before
     // PushRegsInMask(NonVolatileRegs).
     masm.setFramePushed(0);
 
-    // See AsmJSFrameSize comment in Assembler-*.h.
+    // See AsmJSFrameSize comment in Assembler-shared.h.
 #if defined(JS_CODEGEN_ARM)
     masm.push(lr);
 #endif // JS_CODEGEN_ARM
 #if defined(JS_CODEGEN_MIPS)
     masm.push(ra);
 #endif
 
     masm.PushRegsInMask(NonVolatileRegs);
@@ -6025,17 +6020,17 @@ GenerateEntry(ModuleCompiler &m, const A
                 masm.storeDouble(ScratchDoubleReg, Address(StackPointer, iter->offsetFromArgBase()));
             }
             break;
         }
     }
 
     // Call into the real function.
     AssertStackAlignment(masm);
-    masm.call(CallSiteDesc::Entry(), func.code());
+    masm.call(func.code());
 
     // Pop the stack and recover the original 'argv' argument passed to the
     // trampoline (which was pushed on the stack).
     masm.freeStack(stackDec);
     masm.Pop(argv);
 
     // Store the return value in argv[0]
     switch (func.sig().retType().which()) {
@@ -6209,17 +6204,17 @@ static void
 GenerateFFIInterpreterExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit,
                            unsigned exitIndex, Label *throwLabel)
 {
     MacroAssembler &masm = m.masm();
     masm.align(CodeAlignment);
     m.setInterpExitOffset(exitIndex);
     masm.setFramePushed(0);
 
-    // See AsmJSFrameSize comment in Assembler-*.h.
+    // See AsmJSFrameSize comment in Assembler-shared.h.
 #if defined(JS_CODEGEN_ARM)
     masm.push(lr);
 #elif defined(JS_CODEGEN_MIPS)
     masm.push(ra);
 #endif
 
     // Store the frame pointer in AsmJSActivation::exitFP for stack unwinding.
     Register activation = ABIArgGenerator::NonArgReturnVolatileReg0;
@@ -6384,17 +6379,17 @@ static void
 GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit,
                          unsigned exitIndex, Label *throwLabel)
 {
     MacroAssembler &masm = m.masm();
     masm.align(CodeAlignment);
     m.setIonExitOffset(exitIndex);
     masm.setFramePushed(0);
 
-    // See AsmJSFrameSize comment in Assembler-*.h.
+    // See AsmJSFrameSize comment in Assembler-shared.h.
 #if defined(JS_CODEGEN_ARM)
     masm.push(lr);
 #elif defined(JS_CODEGEN_MIPS)
     masm.push(ra);
 #endif
 
     // Store the frame pointer in AsmJSActivation::exitFP for stack unwinding.
     Register activation = ABIArgGenerator::NonArgReturnVolatileReg0;
@@ -6866,20 +6861,20 @@ GenerateThrowExit(ModuleCompiler &m, Lab
 
     return !masm.oom();
 }
 
 static bool
 GenerateStubs(ModuleCompiler &m)
 {
     for (unsigned i = 0; i < m.module().numExportedFunctions(); i++) {
-        m.setEntryOffset(i);
+        m.startGeneratingEntry(i);
         if (!GenerateEntry(m, m.module().exportedFunction(i)))
             return false;
-        if (m.masm().oom())
+        if (m.masm().oom() || !m.finishGeneratingEntry(i))
             return false;
     }
 
     Label throwLabel;
 
     // The order of the iterations here is non-deterministic, since
     // m.allExits() is a hash keyed by pointer values!
     for (ModuleCompiler::ExitMap::Range r = m.allExits(); !r.empty(); r.popFront()) {
--- a/js/src/jit/AsmJSFrameIterator.cpp
+++ b/js/src/jit/AsmJSFrameIterator.cpp
@@ -7,17 +7,17 @@
 #include "jit/AsmJSFrameIterator.h"
 
 #include "jit/AsmJS.h"
 #include "jit/AsmJSModule.h"
 
 using namespace js;
 using namespace js::jit;
 
-static uint8_t *
+static void *
 ReturnAddressFromFP(uint8_t *fp)
 {
     // In asm.js code, the "frame" consists of a single word: the saved
     // return address of the caller.
     static_assert(AsmJSFrameSize == sizeof(void*), "Frame size mismatch");
     return *(uint8_t**)fp;
 }
 
@@ -34,35 +34,39 @@ void
 AsmJSFrameIterator::operator++()
 {
     JS_ASSERT(!done());
     fp_ += callsite_->stackDepth();
     settle(ReturnAddressFromFP(fp_));
 }
 
 void
-AsmJSFrameIterator::settle(uint8_t *returnAddress)
+AsmJSFrameIterator::settle(void *returnAddress)
 {
-    callsite_ = module_->lookupCallSite(returnAddress);
-    JS_ASSERT(callsite_);
+    const AsmJSModule::CodeRange *codeRange = module_->lookupCodeRange(ReturnAddressFromFP(fp_));
+    JS_ASSERT(codeRange);
+    codeRange_ = codeRange;
 
-    if (callsite_->isEntry()) {
+    switch (codeRange->kind()) {
+      case AsmJSModule::CodeRange::Entry:
         fp_ = nullptr;
         JS_ASSERT(done());
         return;
+      case AsmJSModule::CodeRange::Function:
+        callsite_ = module_->lookupCallSite(returnAddress);
+        JS_ASSERT(callsite_);
+        break;
     }
-
-    JS_ASSERT(callsite_->isNormal());
 }
 
 JSAtom *
 AsmJSFrameIterator::functionDisplayAtom() const
 {
     JS_ASSERT(!done());
-    return module_->functionName(callsite_->functionNameIndex());
+    return reinterpret_cast<const AsmJSModule::CodeRange*>(codeRange_)->functionName(*module_);
 }
 
 unsigned
 AsmJSFrameIterator::computeLine(uint32_t *column) const
 {
     JS_ASSERT(!done());
     if (column)
         *column = callsite_->column();
--- a/js/src/jit/AsmJSFrameIterator.h
+++ b/js/src/jit/AsmJSFrameIterator.h
@@ -21,17 +21,21 @@ namespace jit { struct CallSite; }
 
 // Iterates over the frames of a single AsmJSActivation.
 class AsmJSFrameIterator
 {
     const AsmJSModule *module_;
     const jit::CallSite *callsite_;
     uint8_t *fp_;
 
-    void settle(uint8_t *returnAddress);
+    // Really, a const AsmJSModule::CodeRange*, but no forward declarations of
+    // nested classes, so use void* to avoid pulling in all of AsmJSModule.h.
+    const void *codeRange_;
+
+    void settle(void *returnAddress);
 
   public:
     explicit AsmJSFrameIterator() : module_(nullptr) {}
     explicit AsmJSFrameIterator(const AsmJSActivation &activation);
     void operator++();
     bool done() const { return !fp_; }
     JSAtom *functionDisplayAtom() const;
     unsigned computeLine(uint32_t *column) const;
--- a/js/src/jit/AsmJSModule.cpp
+++ b/js/src/jit/AsmJSModule.cpp
@@ -162,16 +162,17 @@ AsmJSModule::addSizeOfMisc(mozilla::Mall
                            size_t *asmJSModuleData)
 {
     *asmJSModuleCode += pod.totalBytes_;
     *asmJSModuleData += mallocSizeOf(this) +
                         globals_.sizeOfExcludingThis(mallocSizeOf) +
                         exits_.sizeOfExcludingThis(mallocSizeOf) +
                         exports_.sizeOfExcludingThis(mallocSizeOf) +
                         callSites_.sizeOfExcludingThis(mallocSizeOf) +
+                        codeRanges_.sizeOfExcludingThis(mallocSizeOf) +
                         functionNames_.sizeOfExcludingThis(mallocSizeOf) +
                         heapAccesses_.sizeOfExcludingThis(mallocSizeOf) +
                         functionCounts_.sizeOfExcludingThis(mallocSizeOf) +
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
                         profiledFunctions_.sizeOfExcludingThis(mallocSizeOf) +
 #endif
 #if defined(JS_ION_PERF)
                         perfProfiledBlocksFunctions_.sizeOfExcludingThis(mallocSizeOf) +
@@ -184,47 +185,86 @@ struct CallSiteRetAddrOffset
     const CallSiteVector &callSites;
     explicit CallSiteRetAddrOffset(const CallSiteVector &callSites) : callSites(callSites) {}
     uint32_t operator[](size_t index) const {
         return callSites[index].returnAddressOffset();
     }
 };
 
 const CallSite *
-AsmJSModule::lookupCallSite(uint8_t *returnAddress) const
+AsmJSModule::lookupCallSite(void *returnAddress) const
 {
     JS_ASSERT(isFinished());
 
-    uint32_t target = returnAddress - code_;
+    uint32_t target = ((uint8_t*)returnAddress) - code_;
     size_t lowerBound = 0;
     size_t upperBound = callSites_.length();
 
     size_t match;
     if (!BinarySearch(CallSiteRetAddrOffset(callSites_), lowerBound, upperBound, target, &match))
         return nullptr;
 
     return &callSites_[match];
 }
 
+namespace js {
+
+// Create an ordering on CodeRange and pc offsets suitable for BinarySearch.
+// Stick these in the same namespace as AsmJSModule so that argument-dependent
+// lookup will find it.
+bool
+operator==(size_t pcOffset, const AsmJSModule::CodeRange &rhs)
+{
+    return pcOffset >= rhs.begin() && pcOffset < rhs.end();
+}
+bool
+operator<=(const AsmJSModule::CodeRange &lhs, const AsmJSModule::CodeRange &rhs)
+{
+    return lhs.begin() <= rhs.begin();
+}
+bool
+operator<(size_t pcOffset, const AsmJSModule::CodeRange &rhs)
+{
+    return pcOffset < rhs.begin();
+}
+
+} // namespace js
+
+const AsmJSModule::CodeRange *
+AsmJSModule::lookupCodeRange(void *pc) const
+{
+    JS_ASSERT(isFinished());
+
+    uint32_t target = ((uint8_t*)pc) - code_;
+    size_t lowerBound = 0;
+    size_t upperBound = codeRanges_.length();
+
+    size_t match;
+    if (!BinarySearch(codeRanges_, lowerBound, upperBound, target, &match))
+        return nullptr;
+
+    return &codeRanges_[match];
+}
+
 struct HeapAccessOffset
 {
     const AsmJSHeapAccessVector &accesses;
     explicit HeapAccessOffset(const AsmJSHeapAccessVector &accesses) : accesses(accesses) {}
     uintptr_t operator[](size_t index) const {
         return accesses[index].offset();
     }
 };
 
 const AsmJSHeapAccess *
-AsmJSModule::lookupHeapAccess(uint8_t *pc) const
+AsmJSModule::lookupHeapAccess(void *pc) const
 {
     JS_ASSERT(isFinished());
     JS_ASSERT(containsPC(pc));
 
-    uint32_t target = pc - code_;
+    uint32_t target = ((uint8_t*)pc) - code_;
     size_t lowerBound = 0;
     size_t upperBound = heapAccesses_.length();
 
     size_t match;
     if (!BinarySearch(HeapAccessOffset(heapAccesses_), lowerBound, upperBound, target, &match))
         return nullptr;
 
     return &heapAccesses_[match];
@@ -288,16 +328,21 @@ AsmJSModule::finish(ExclusiveContext *cx
     for (unsigned i = 0; i < numExportedFunctions(); i++)
         exportedFunction(i).updateCodeOffset(masm);
     for (unsigned i = 0; i < numExits(); i++)
         exit(i).updateOffsets(masm);
     for (size_t i = 0; i < callSites_.length(); i++) {
         CallSite &c = callSites_[i];
         c.setReturnAddressOffset(masm.actualOffset(c.returnAddressOffset()));
     }
+    for (size_t i = 0; i < codeRanges_.length(); i++) {
+        CodeRange &c = codeRanges_[i];
+        c.begin_ = masm.actualOffset(c.begin_);
+        c.end_ = masm.actualOffset(c.end_);
+    }
 #endif
     JS_ASSERT(pod.functionBytes_ % AsmJSPageSize == 0);
 
     // Absolute link metadata: absolute addresses that refer to some fixed
     // address in the address space.
     for (size_t i = 0; i < masm.numAsmJSAbsoluteLinks(); i++) {
         AsmJSAbsoluteLink src = masm.asmJSAbsoluteLink(i);
         AbsoluteLink link;
@@ -1079,16 +1124,17 @@ AsmJSModule::serializedSize() const
            pod.codeBytes_ +
            SerializedNameSize(globalArgumentName_) +
            SerializedNameSize(importArgumentName_) +
            SerializedNameSize(bufferArgumentName_) +
            SerializedVectorSize(globals_) +
            SerializedVectorSize(exits_) +
            SerializedVectorSize(exports_) +
            SerializedPodVectorSize(callSites_) +
+           SerializedPodVectorSize(codeRanges_) +
            SerializedVectorSize(functionNames_) +
            SerializedPodVectorSize(heapAccesses_) +
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
            SerializedVectorSize(profiledFunctions_) +
 #endif
            staticLinkData_.serializedSize();
 }
 
@@ -1099,16 +1145,17 @@ AsmJSModule::serialize(uint8_t *cursor) 
     cursor = WriteBytes(cursor, code_, pod.codeBytes_);
     cursor = SerializeName(cursor, globalArgumentName_);
     cursor = SerializeName(cursor, importArgumentName_);
     cursor = SerializeName(cursor, bufferArgumentName_);
     cursor = SerializeVector(cursor, globals_);
     cursor = SerializeVector(cursor, exits_);
     cursor = SerializeVector(cursor, exports_);
     cursor = SerializePodVector(cursor, callSites_);
+    cursor = SerializePodVector(cursor, codeRanges_);
     cursor = SerializeVector(cursor, functionNames_);
     cursor = SerializePodVector(cursor, heapAccesses_);
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     cursor = SerializeVector(cursor, profiledFunctions_);
 #endif
     cursor = staticLinkData_.serialize(cursor);
     return cursor;
 }
@@ -1125,16 +1172,17 @@ AsmJSModule::deserialize(ExclusiveContex
     (cursor = ReadBytes(cursor, code_, pod.codeBytes_)) &&
     (cursor = DeserializeName(cx, cursor, &globalArgumentName_)) &&
     (cursor = DeserializeName(cx, cursor, &importArgumentName_)) &&
     (cursor = DeserializeName(cx, cursor, &bufferArgumentName_)) &&
     (cursor = DeserializeVector(cx, cursor, &globals_)) &&
     (cursor = DeserializeVector(cx, cursor, &exits_)) &&
     (cursor = DeserializeVector(cx, cursor, &exports_)) &&
     (cursor = DeserializePodVector(cx, cursor, &callSites_)) &&
+    (cursor = DeserializePodVector(cx, cursor, &codeRanges_)) &&
     (cursor = DeserializeVector(cx, cursor, &functionNames_)) &&
     (cursor = DeserializePodVector(cx, cursor, &heapAccesses_)) &&
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     (cursor = DeserializeVector(cx, cursor, &profiledFunctions_)) &&
 #endif
     (cursor = staticLinkData_.deserialize(cx, cursor));
 
     loadedFromCache_ = true;
@@ -1195,16 +1243,17 @@ AsmJSModule::clone(JSContext *cx, Scoped
     out.globalArgumentName_ = globalArgumentName_;
     out.importArgumentName_ = importArgumentName_;
     out.bufferArgumentName_ = bufferArgumentName_;
 
     if (!CloneVector(cx, globals_, &out.globals_) ||
         !CloneVector(cx, exits_, &out.exits_) ||
         !CloneVector(cx, exports_, &out.exports_) ||
         !ClonePodVector(cx, callSites_, &out.callSites_) ||
+        !ClonePodVector(cx, codeRanges_, &out.codeRanges_) ||
         !CloneVector(cx, functionNames_, &out.functionNames_) ||
         !ClonePodVector(cx, heapAccesses_, &out.heapAccesses_) ||
         !staticLinkData_.clone(cx, &out.staticLinkData_))
     {
         return false;
     }
 
     out.loadedFromCache_ = loadedFromCache_;
--- a/js/src/jit/AsmJSModule.h
+++ b/js/src/jit/AsmJSModule.h
@@ -308,16 +308,43 @@ class AsmJSModule
         }
 
         size_t serializedSize() const;
         uint8_t *serialize(uint8_t *cursor) const;
         const uint8_t *deserialize(ExclusiveContext *cx, const uint8_t *cursor);
         bool clone(ExclusiveContext *cx, ExportedFunction *out) const;
     };
 
+    class CodeRange
+    {
+      public:
+        enum Kind { Entry, Function };
+
+      private:
+        Kind kind_;
+        uint32_t begin_;
+        uint32_t end_;
+        uint32_t functionNameIndex_;
+
+        friend class AsmJSModule;
+        CodeRange(Kind k, uint32_t begin, uint32_t end, uint32_t functionNameIndex)
+          : kind_(k), begin_(begin), end_(end), functionNameIndex_(functionNameIndex)
+        {}
+
+      public:
+        CodeRange() {}
+        Kind kind() const { return kind_; }
+        uint32_t begin() const { return begin_; }
+        uint32_t end() const { return end_; }
+        PropertyName *functionName(const AsmJSModule &module) const {
+            JS_ASSERT(kind_ == Function);
+            return module.functionNames_[functionNameIndex_].name();
+        }
+    };
+
     class Name
     {
         PropertyName *name_;
       public:
         Name() : name_(nullptr) {}
         MOZ_IMPLICIT Name(PropertyName *name) : name_(name) {}
         PropertyName *name() const { return name_; }
         PropertyName *&name() { return name_; }
@@ -474,16 +501,17 @@ class AsmJSModule
     // respect to caching.
     const uint32_t                        funcStart_;
     const uint32_t                        offsetToEndOfUseAsm_;
 
     Vector<Global,                 0, SystemAllocPolicy> globals_;
     Vector<Exit,                   0, SystemAllocPolicy> exits_;
     Vector<ExportedFunction,       0, SystemAllocPolicy> exports_;
     Vector<jit::CallSite,          0, SystemAllocPolicy> callSites_;
+    Vector<CodeRange,              0, SystemAllocPolicy> codeRanges_;
     Vector<Name,                   0, SystemAllocPolicy> functionNames_;
     Vector<jit::AsmJSHeapAccess,   0, SystemAllocPolicy> heapAccesses_;
     Vector<jit::IonScriptCounts*,  0, SystemAllocPolicy> functionCounts_;
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     Vector<ProfiledFunction,       0, SystemAllocPolicy> profiledFunctions_;
 #endif
 #if defined(JS_ION_PERF)
     Vector<ProfiledBlocksFunction, 0, SystemAllocPolicy> perfProfiledBlocksFunctions_;
@@ -661,27 +689,30 @@ class AsmJSModule
     /*************************************************************************/
     // These functions are called while parsing/compiling function bodies:
 
     void requireHeapLengthToBeAtLeast(uint32_t len) {
         JS_ASSERT(isFinishedWithModulePrologue() && !isFinishedWithFunctionBodies());
         if (len > pod.minHeapLength_)
             pod.minHeapLength_ = len;
     }
-    bool addFunctionName(PropertyName *name, uint32_t *nameIndex) {
+    bool addFunctionCodeRange(PropertyName *name, uint32_t begin, uint32_t end) {
         JS_ASSERT(isFinishedWithModulePrologue() && !isFinishedWithFunctionBodies());
         JS_ASSERT(name->isTenured());
-        if (functionNames_.length() > jit::CallSiteDesc::FUNCTION_NAME_INDEX_MAX)
+        JS_ASSERT(begin <= end);
+        JS_ASSERT_IF(!codeRanges_.empty(), codeRanges_.back().end() <= begin);
+        if (functionNames_.length() >= UINT32_MAX)
             return false;
-        *nameIndex = functionNames_.length();
-        return functionNames_.append(name);
+        CodeRange codeRange(CodeRange::Function, begin, end, functionNames_.length());
+        return functionNames_.append(name) && codeRanges_.append(codeRange);
     }
-    PropertyName *functionName(uint32_t i) const {
-        JS_ASSERT(isFinished());
-        return functionNames_[i].name();
+    bool addEntryCodeRange(unsigned exportIndex, uint32_t end) {
+        uint32_t begin = exports_[exportIndex].pod.codeOffset_;
+        CodeRange codeRange(CodeRange::Entry, begin, end, UINT32_MAX);
+        return codeRanges_.append(codeRange);
     }
     bool addExit(unsigned ffiIndex, unsigned *exitIndex) {
         JS_ASSERT(isFinishedWithModulePrologue() && !isFinishedWithFunctionBodies());
         if (SIZE_MAX - pod.funcPtrTableAndExitBytes_ < sizeof(ExitDatum))
             return false;
         uint32_t globalDataOffset = globalDataBytes();
         JS_STATIC_ASSERT(sizeof(ExitDatum) % sizeof(void*) == 0);
         pod.funcPtrTableAndExitBytes_ += sizeof(ExitDatum);
@@ -847,21 +878,25 @@ class AsmJSModule
     uint8_t *ionExitTrampoline(const Exit &exit) const {
         JS_ASSERT(isFinished());
         JS_ASSERT(exit.ionCodeOffset_);
         return code_ + exit.ionCodeOffset_;
     }
 
     // Lookup a callsite by the return pc (from the callee to the caller).
     // Return null if no callsite was found.
-    const jit::CallSite *lookupCallSite(uint8_t *returnAddress) const;
+    const jit::CallSite *lookupCallSite(void *returnAddress) const;
+
+    // Lookup the name the code range containing the given pc. Return null if no
+    // code range was found.
+    const CodeRange *lookupCodeRange(void *pc) const;
 
     // Lookup a heap access site by the pc which performs the access. Return
     // null if no heap access was found.
-    const jit::AsmJSHeapAccess *lookupHeapAccess(uint8_t *pc) const;
+    const jit::AsmJSHeapAccess *lookupHeapAccess(void *pc) const;
 
     // The global data section is placed after the executable code (i.e., at
     // offset codeBytes_) in the module's linear allocation. The global data
     // are laid out in this order:
     //   0. a pointer (padded up to 8 bytes to ensure double-alignment of
     //      globals) for the heap that was linked to the module.
     //   1. global variable state (elements are sizeof(uint64_t))
     //   2. interleaved function-pointer tables and exits. These are allocated
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -8578,17 +8578,17 @@ CodeGenerator::visitAsmJSCall(LAsmJSCall
     switch (callee.which()) {
       case MAsmJSCall::Callee::Internal:
         masm.call(mir->desc(), callee.internal());
         break;
       case MAsmJSCall::Callee::Dynamic:
         masm.call(mir->desc(), ToRegister(ins->getOperand(mir->dynamicCalleeOperandIndex())));
         break;
       case MAsmJSCall::Callee::Builtin:
-        masm.call(mir->desc(), AsmJSImmPtr(callee.builtin()));
+        masm.call(AsmJSImmPtr(callee.builtin()));
         break;
     }
 
     if (mir->spIncrement())
         masm.reserveStack(mir->spIncrement());
 
     postAsmJSCall(ins);
     return true;
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -135,22 +135,16 @@ static MOZ_CONSTEXPR_VAR FloatRegister d
 // ldrd/strd (dual-register load/store) operate in a single cycle
 // when the address they are dealing with is 8 byte aligned.
 // Also, the ARM abi wants the stack to be 8 byte aligned at
 // function boundaries.  I'm trying to make sure this is always true.
 static const uint32_t StackAlignment = 8;
 static const uint32_t CodeAlignment = 8;
 static const bool StackKeptAligned = true;
 
-// As an invariant across architectures, within asm.js code:
-//    $sp % StackAlignment = (AsmJSFrameSize + masm.framePushed) % StackAlignment
-// To achieve this on ARM, the first instruction of the asm.js prologue pushes
-// lr without incrementing masm.framePushed.
-static const uint32_t AsmJSFrameSize = sizeof(void*);
-
 static const Scale ScalePointer = TimesFour;
 
 class Instruction;
 class InstBranchImm;
 uint32_t RM(Register r);
 uint32_t RS(Register r);
 uint32_t RD(Register r);
 uint32_t RT(Register r);
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -48,17 +48,17 @@ CodeGeneratorARM::generatePrologue()
     return true;
 }
 
 bool
 CodeGeneratorARM::generateAsmJSPrologue(Label *stackOverflowLabel)
 {
     JS_ASSERT(gen->compilingAsmJS());
 
-    // See comment in Assembler-arm.h about AsmJSFrameSize.
+    // See comment in Assembler-shared.h about AsmJSFrameSize.
     masm.push(lr);
 
     // The asm.js over-recursed handler wants to be able to assume that SP
     // points to the return address, so perform the check after pushing lr but
     // before pushing frameDepth.
     if (!omitOverRecursedCheck()) {
         masm.branchPtr(Assembler::AboveOrEqual,
                        AsmJSAbsoluteAddress(AsmJSImm_StackLimit),
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -1787,16 +1787,27 @@ MacroAssemblerARMCompat::callIon(Registe
         ma_callIonHalfPush(callee);
     } else {
         adjustFrame(sizeof(void*));
         ma_callIon(callee);
     }
 }
 
 void
+MacroAssemblerARMCompat::callIonFromAsmJS(Register callee)
+{
+    ma_callIonNoPush(callee);
+
+    // The Ion ABI has the callee pop the return address off the stack.
+    // The asm.js caller assumes that the call leaves sp unchanged, so bump
+    // the stack.
+    subPtr(Imm32(sizeof(void*)), sp);
+}
+
+void
 MacroAssemblerARMCompat::reserveStack(uint32_t amount)
 {
     if (amount)
         ma_sub(Imm32(amount), sp);
     adjustFrame(amount);
 }
 void
 MacroAssemblerARMCompat::freeStack(uint32_t amount)
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -559,43 +559,23 @@ class MacroAssemblerARMCompat : public M
         if (HasMOVWT())
             rs = L_MOVWT;
         else
             rs = L_LDR;
 
         ma_movPatchable(ImmPtr(c->raw()), ScratchRegister, Always, rs);
         ma_callIonHalfPush(ScratchRegister);
     }
-
-    void appendCallSite(const CallSiteDesc &desc) {
-        // Add an extra sizeof(void*) to include the return address that was
-        // pushed by the call instruction (see CallSite::stackDepth).
-        enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_ + AsmJSFrameSize));
-    }
-
     void call(const CallSiteDesc &desc, const Register reg) {
         call(reg);
-        appendCallSite(desc);
+        enoughMemory_ &= append(desc, currentOffset(), framePushed_);
     }
     void call(const CallSiteDesc &desc, Label *label) {
         call(label);
-        appendCallSite(desc);
-    }
-    void call(const CallSiteDesc &desc, AsmJSImmPtr imm) {
-        call(imm);
-        appendCallSite(desc);
-    }
-    void callIonFromAsmJS(const Register reg) {
-        ma_callIonNoPush(reg);
-        appendCallSite(CallSiteDesc::Exit());
-
-        // The Ion ABI has the callee pop the return address off the stack.
-        // The asm.js caller assumes that the call leaves sp unchanged, so bump
-        // the stack.
-        subPtr(Imm32(sizeof(void*)), sp);
+        enoughMemory_ &= append(desc, currentOffset(), framePushed_);
     }
 
     void branch(JitCode *c) {
         BufferOffset bo = m_buffer.nextOffset();
         addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
         RelocStyle rs;
         if (HasMOVWT())
             rs = L_MOVWT;
@@ -1267,16 +1247,17 @@ class MacroAssemblerARMCompat : public M
     bool buildFakeExitFrame(Register scratch, uint32_t *offset);
 
     void callWithExitFrame(JitCode *target);
     void callWithExitFrame(JitCode *target, Register dynStack);
 
     // Makes an Ion call using the only two methods that it is sane for
     // indep code to make a call
     void callIon(Register callee);
+    void callIonFromAsmJS(Register callee);
 
     void reserveStack(uint32_t amount);
     void freeStack(uint32_t amount);
     void freeStack(Register amount);
 
     void add32(Register src, Register dest);
     void add32(Imm32 imm, Register dest);
     void add32(Imm32 imm, const Address &dest);
--- a/js/src/jit/mips/Assembler-mips.h
+++ b/js/src/jit/mips/Assembler-mips.h
@@ -147,22 +147,16 @@ static MOZ_CONSTEXPR_VAR FloatRegister f
 static MOZ_CONSTEXPR_VAR FloatRegister f30 = {FloatRegisters::f30};
 
 // MIPS CPUs can only load multibyte data that is "naturally"
 // four-byte-aligned, sp register should be eight-byte-aligned.
 static const uint32_t StackAlignment = 8;
 static const uint32_t CodeAlignment = 4;
 static const bool StackKeptAligned = true;
 
-// As an invariant across architectures, within asm.js code:
-//    $sp % StackAlignment = (AsmJSFrameSize + masm.framePushed) % StackAlignment
-// To achieve this on MIPS, the first instruction of the asm.js prologue pushes
-// ra without incrementing masm.framePushed.
-static const uint32_t AsmJSFrameSize = sizeof(void*);
-
 static const Scale ScalePointer = TimesFour;
 
 // MIPS instruction types
 //                +---------------------------------------------------------------+
 //                |    6      |    5    |    5    |    5    |    5    |    6      |
 //                +---------------------------------------------------------------+
 // Register type  |  Opcode   |    Rs   |    Rt   |    Rd   |    Sa   | Function  |
 //                +---------------------------------------------------------------+
--- a/js/src/jit/mips/CodeGenerator-mips.cpp
+++ b/js/src/jit/mips/CodeGenerator-mips.cpp
@@ -47,17 +47,17 @@ CodeGeneratorMIPS::generatePrologue()
     return true;
 }
 
 bool
 CodeGeneratorMIPS::generateAsmJSPrologue(Label *stackOverflowLabel)
 {
     JS_ASSERT(gen->compilingAsmJS());
 
-    // See comment in Assembler-mips.h about AsmJSFrameSize.
+    // See comment in Assembler-shared.h about AsmJSFrameSize.
     masm.push(ra);
 
     // The asm.js over-recursed handler wants to be able to assume that SP
     // points to the return address, so perform the check after pushing ra but
     // before pushing frameDepth.
     if (!omitOverRecursedCheck()) {
         masm.branchPtr(Assembler::AboveOrEqual,
                        AsmJSAbsoluteAddress(AsmJSImm_StackLimit),
--- a/js/src/jit/mips/MacroAssembler-mips.cpp
+++ b/js/src/jit/mips/MacroAssembler-mips.cpp
@@ -1518,16 +1518,26 @@ MacroAssemblerMIPSCompat::callIon(Regist
     MOZ_ASSERT((framePushed() & 3) == 0);
     if ((framePushed() & 7) == 4) {
         ma_callIonHalfPush(callee);
     } else {
         adjustFrame(sizeof(uint32_t));
         ma_callIon(callee);
     }
 }
+void
+MacroAssemblerMIPSCompat::callIonFromAsmJS(Register callee)
+{
+    ma_callIonNoPush(reg);
+
+    // The Ion ABI has the callee pop the return address off the stack.
+    // The asm.js caller assumes that the call leaves sp unchanged, so bump
+    // the stack.
+    subPtr(Imm32(sizeof(void*)), StackPointer);
+}
 
 void
 MacroAssemblerMIPSCompat::reserveStack(uint32_t amount)
 {
     if (amount)
         ma_subu(StackPointer, StackPointer, Imm32(amount));
     adjustFrame(amount);
 }
--- a/js/src/jit/mips/MacroAssembler-mips.h
+++ b/js/src/jit/mips/MacroAssembler-mips.h
@@ -407,43 +407,23 @@ class MacroAssemblerMIPSCompat : public 
         call(CallReg);
     }
     void call(JitCode *c) {
         BufferOffset bo = m_buffer.nextOffset();
         addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
         ma_liPatchable(ScratchRegister, Imm32((uint32_t)c->raw()));
         ma_callIonHalfPush(ScratchRegister);
     }
-
-    void appendCallSite(const CallSiteDesc &desc) {
-        // Add an extra sizeof(void*) to include the return address that was
-        // pushed by the call instruction (see CallSite::stackDepth).
-        enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_ + AsmJSFrameSize));
-    }
-
     void call(const CallSiteDesc &desc, const Register reg) {
         call(reg);
-        appendCallSite(desc);
+        enoughMemory_ &= append(desc, currentOffset(), framePushed_);
     }
     void call(const CallSiteDesc &desc, Label *label) {
         call(label);
-        appendCallSite(desc);
-    }
-    void call(const CallSiteDesc &desc, AsmJSImmPtr imm) {
-        call(imm);
-        appendCallSite(desc);
-    }
-    void callIonFromAsmJS(const Register reg) {
-        ma_callIonNoPush(reg);
-        appendCallSite(CallSiteDesc::Exit());
-
-        // The Ion ABI has the callee pop the return address off the stack.
-        // The asm.js caller assumes that the call leaves sp unchanged, so bump
-        // the stack.
-        subPtr(Imm32(sizeof(void*)), StackPointer);
+        enoughMemory_ &= append(desc, currentOffset(), framePushed_);
     }
 
     void branch(JitCode *c) {
         BufferOffset bo = m_buffer.nextOffset();
         addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
         ma_liPatchable(ScratchRegister, Imm32((uint32_t)c->raw()));
         as_jr(ScratchRegister);
         as_nop();
@@ -989,16 +969,17 @@ public:
     bool buildFakeExitFrame(Register scratch, uint32_t *offset);
 
     void callWithExitFrame(JitCode *target);
     void callWithExitFrame(JitCode *target, Register dynStack);
 
     // Makes an Ion call using the only two methods that it is sane for
     // indep code to make a call
     void callIon(Register callee);
+    void callIonFromAsmJS(Register callee);
 
     void reserveStack(uint32_t amount);
     void freeStack(uint32_t amount);
     void freeStack(Register amount);
 
     void add32(Register src, Register dest);
     void add32(Imm32 imm, Register dest);
     void add32(Imm32 imm, const Address &dest);
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -572,58 +572,29 @@ class CodeLocationLabel
         return raw_;
     }
     uint8_t *offset() const {
         JS_ASSERT(state_ == Relative);
         return raw_;
     }
 };
 
-// Describes the user-visible properties of a callsite.
-//
-// A few general notes about the stack-walking supported by CallSite(Desc):
-//  - This information facilitates stack-walking performed by FrameIter which
-//    is used by Error.stack and other user-visible stack-walking functions.
-//  - Ion/asm.js calling conventions do not maintain a frame-pointer so
-//    stack-walking must lookup the stack depth based on the PC.
-//  - Stack-walking only occurs from C++ after a synchronous calls (JS-to-JS and
-//    JS-to-C++). Thus, we do not need to map arbitrary PCs to stack-depths,
-//    just the return address at callsites.
-//  - An exception to the above rule is the interrupt callback which can happen
-//    at arbitrary PCs. In such cases, we drop frames from the stack-walk. In
-//    the future when a full PC->stack-depth map is maintained, we handle this
-//    case.
+// While the frame-pointer chain allows the stack to be unwound without
+// metadata, Error.stack still needs to know the line/column of every call in
+// the chain. A CallSiteDesc describes the line/column of a single callsite.
+// A CallSiteDesc is created by callers of MacroAssembler.
 class CallSiteDesc
 {
     uint32_t line_;
     uint32_t column_;
-    uint32_t functionNameIndex_;
-
-    static const uint32_t sEntryTrampoline = UINT32_MAX;
-    static const uint32_t sExit = UINT32_MAX - 1;
-
   public:
-    static const uint32_t FUNCTION_NAME_INDEX_MAX = UINT32_MAX - 2;
-
     CallSiteDesc() {}
-
-    CallSiteDesc(uint32_t line, uint32_t column, uint32_t functionNameIndex)
-     : line_(line), column_(column), functionNameIndex_(functionNameIndex)
-    {}
-
-    static CallSiteDesc Entry() { return CallSiteDesc(0, 0, sEntryTrampoline); }
-    static CallSiteDesc Exit() { return CallSiteDesc(0, 0, sExit); }
-
-    bool isEntry() const { return functionNameIndex_ == sEntryTrampoline; }
-    bool isExit() const { return functionNameIndex_ == sExit; }
-    bool isNormal() const { return !(isEntry() || isExit()); }
-
-    uint32_t line() const { JS_ASSERT(isNormal()); return line_; }
-    uint32_t column() const { JS_ASSERT(isNormal()); return column_; }
-    uint32_t functionNameIndex() const { JS_ASSERT(isNormal()); return functionNameIndex_; }
+    CallSiteDesc(uint32_t line, uint32_t column) : line_(line), column_(column) {}
+    uint32_t line() const { return line_; }
+    uint32_t column() const { return column_; }
 };
 
 // Adds to CallSiteDesc the metadata necessary to walk the stack given an
 // initial stack-pointer.
 struct CallSite : public CallSiteDesc
 {
     uint32_t returnAddressOffset_;
     uint32_t stackDepth_;
@@ -636,23 +607,31 @@ struct CallSite : public CallSiteDesc
         returnAddressOffset_(returnAddressOffset),
         stackDepth_(stackDepth)
     { }
 
     void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
     uint32_t returnAddressOffset() const { return returnAddressOffset_; }
 
     // The stackDepth measures the amount of stack space pushed since the
-    // function was called. In particular, this includes the word pushed by the
-    // call instruction on x86/x64.
-    uint32_t stackDepth() const { JS_ASSERT(!isEntry()); return stackDepth_; }
+    // function was called. In particular, this includes the pushed return
+    // address on all archs (whether or not the call instruction pushes the
+    // return address (x86/x64) or the prologue does (ARM/MIPS).
+    uint32_t stackDepth() const { return stackDepth_; }
 };
 
 typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector;
 
+// As an invariant across architectures, within asm.js code:
+//    $sp % StackAlignment = (AsmJSFrameSize + masm.framePushed) % StackAlignment
+// AsmJSFrameSize is 1 word, for the return address pushed by the call (or, in
+// the case of ARM/MIPS, by the first instruction of the prologue). This means
+// masm.framePushed never includes the pushed return address.
+static const uint32_t AsmJSFrameSize = sizeof(void*);
+
 // Summarizes a heap access made by asm.js code that needs to be patched later
 // and/or looked up by the asm.js signal handlers. Different architectures need
 // to know different things (x64: offset and length, ARM: where to patch in
 // heap length, x86: where to patch in heap length and base) hence the massive
 // #ifdefery.
 class AsmJSHeapAccess
 {
     uint32_t offset_;
@@ -816,17 +795,21 @@ class AssemblerShared
     void propagateOOM(bool success) {
         enoughMemory_ &= success;
     }
 
     bool oom() const {
         return !enoughMemory_;
     }
 
-    bool append(CallSite callsite) { return callsites_.append(callsite); }
+    bool append(const CallSiteDesc &desc, size_t currentOffset, size_t framePushed) {
+        // framePushed does not include AsmJSFrameSize, so add it in here (see
+        // CallSite::stackDepth).
+        return callsites_.append(CallSite(desc, currentOffset, framePushed + AsmJSFrameSize));
+    }
     CallSiteVector &&extractCallSites() { return Move(callsites_); }
 
     bool append(AsmJSHeapAccess access) { return asmJSHeapAccesses_.append(access); }
     AsmJSHeapAccessVector &&extractAsmJSHeapAccesses() { return Move(asmJSHeapAccesses_); }
 
     bool append(AsmJSGlobalAccess access) { return asmJSGlobalAccesses_.append(access); }
     size_t numAsmJSGlobalAccesses() const { return asmJSGlobalAccesses_.length(); }
     AsmJSGlobalAccess asmJSGlobalAccess(size_t i) const { return asmJSGlobalAccesses_[i]; }
--- a/js/src/jit/shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/shared/MacroAssembler-x86-shared.h
@@ -662,36 +662,33 @@ class MacroAssemblerX86Shared : public A
         lea(Operand(address), dest);
     }
 
     // Builds an exit frame on the stack, with a return address to an internal
     // non-function. Returns offset to be passed to markSafepointAt().
     bool buildFakeExitFrame(Register scratch, uint32_t *offset);
     void callWithExitFrame(JitCode *target);
 
+    void call(const CallSiteDesc &desc, Label *label) {
+        call(label);
+        enoughMemory_ &= append(desc, currentOffset(), framePushed_);
+    }
+    void call(const CallSiteDesc &desc, Register reg) {
+        call(reg);
+        enoughMemory_ &= append(desc, currentOffset(), framePushed_);
+    }
     void callIon(Register callee) {
         call(callee);
     }
-
-    void appendCallSite(const CallSiteDesc &desc) {
-        // Add an extra sizeof(void*) to include the return address that was
-        // pushed by the call instruction (see CallSite::stackDepth).
-        enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_ + AsmJSFrameSize));
+    void callIonFromAsmJS(Register callee) {
+        call(callee);
     }
-
-    void call(const CallSiteDesc &desc, Label *label) {
-        call(label);
-        appendCallSite(desc);
-    }
-    void call(const CallSiteDesc &desc, Register reg) {
-        call(reg);
-        appendCallSite(desc);
-    }
-    void callIonFromAsmJS(Register reg) {
-        call(CallSiteDesc::Exit(), reg);
+    void call(AsmJSImmPtr target) {
+        mov(target, eax);
+        call(eax);
     }
 
     void checkStackAlignment() {
         // Exists for ARM compatibility.
     }
 
     CodeOffsetLabel labelForPatch() {
         return CodeOffsetLabel(size());
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -180,23 +180,16 @@ static MOZ_CONSTEXPR_VAR Register OsrFra
 static MOZ_CONSTEXPR_VAR Register PreBarrierReg = rdx;
 
 // GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
 // jitted code.
 static const uint32_t StackAlignment = 16;
 static const bool StackKeptAligned = false;
 static const uint32_t CodeAlignment = 8;
 
-// As an invariant across architectures, within asm.js code:
-//   $sp % StackAlignment = (AsmJSFrameSize + masm.framePushed) % StackAlignment
-// On x64, this naturally falls out of the fact that the 'call' instruction
-// pushes the return address on the stack and masm.framePushed = 0 at the first
-// instruction of the prologue.
-static const uint32_t AsmJSFrameSize = sizeof(void*);
-
 static const Scale ScalePointer = TimesEight;
 
 } // namespace jit
 } // namespace js
 
 #include "jit/shared/Assembler-x86-shared.h"
 
 namespace js {
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -95,25 +95,16 @@ class MacroAssemblerX64 : public MacroAs
     /////////////////////////////////////////////////////////////////
     void call(ImmWord target) {
         mov(target, rax);
         call(rax);
     }
     void call(ImmPtr target) {
         call(ImmWord(uintptr_t(target.value)));
     }
-    void call(AsmJSImmPtr target) {
-        mov(target, rax);
-        call(rax);
-    }
-
-    void call(const CallSiteDesc &desc, AsmJSImmPtr target) {
-        call(target);
-        appendCallSite(desc);
-    }
 
     // Refers to the upper 32 bits of a 64-bit Value operand.
     // On x86_64, the upper 32 bits do not necessarily only contain the type.
     Operand ToUpper32(Operand base) {
         switch (base.kind()) {
           case Operand::MEM_REG_DISP:
             return Operand(Register::FromCode(base.base()), base.disp() + 4);
 
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -108,23 +108,16 @@ static MOZ_CONSTEXPR_VAR Register AsmJSI
 #if defined(__GNUC__)
 static const uint32_t StackAlignment = 16;
 #else
 static const uint32_t StackAlignment = 4;
 #endif
 static const bool StackKeptAligned = false;
 static const uint32_t CodeAlignment = 8;
 
-// As an invariant across architectures, within asm.js code:
-//   $sp % StackAlignment = (AsmJSFrameSize + masm.framePushed) % StackAlignment
-// On x86, this naturally falls out of the fact that the 'call' instruction
-// pushes the return address on the stack and masm.framePushed = 0 at the first
-// instruction of the prologue.
-static const uint32_t AsmJSFrameSize = sizeof(void*);
-
 struct ImmTag : public Imm32
 {
     ImmTag(JSValueTag mask)
       : Imm32(int32_t(mask))
     { }
 };
 
 struct ImmType : public ImmTag
@@ -377,23 +370,16 @@ class Assembler : public AssemblerX86Sha
     }
     void call(ImmWord target) {
         call(ImmPtr((void*)target.value));
     }
     void call(ImmPtr target) {
         JmpSrc src = masm.call();
         addPendingJump(src, target, Relocation::HARDCODED);
     }
-    void call(AsmJSImmPtr target) {
-        // Moving to a register is suboptimal. To fix (use a single
-        // call-immediate instruction) we'll need to distinguish a new type of
-        // relative patch to an absolute address in AsmJSAbsoluteLink.
-        mov(target, eax);
-        call(eax);
-    }
 
     // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
     // this instruction.
     CodeOffsetLabel toggledCall(JitCode *target, bool enabled) {
         CodeOffsetLabel offset(size());
         JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
         addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
         JS_ASSERT(size() - offset.offset() == ToggledCallSize(nullptr));
--- a/js/src/jit/x86/MacroAssembler-x86.h
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -1115,20 +1115,16 @@ class MacroAssemblerX86 : public MacroAs
     }
 
     void callWithExitFrame(JitCode *target, Register dynStack) {
         addPtr(Imm32(framePushed()), dynStack);
         makeFrameDescriptor(dynStack, JitFrame_IonJS);
         Push(dynStack);
         call(target);
     }
-    void call(const CallSiteDesc &desc, AsmJSImmPtr target) {
-        call(target);
-        appendCallSite(desc);
-    }
 
 #ifdef JSGC_GENERATIONAL
     void branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label *label);
     void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, Label *label);
 #endif
 };
 
 typedef MacroAssemblerX86 MacroAssemblerSpecific;
--- a/mfbt/BinarySearch.h
+++ b/mfbt/BinarySearch.h
@@ -36,17 +36,20 @@ BinarySearch(const Container& aContainer
              T aTarget, size_t* aMatchOrInsertionPoint)
 {
   MOZ_ASSERT(aBegin <= aEnd);
 
   size_t low = aBegin;
   size_t high = aEnd;
   while (low != high) {
     size_t middle = low + (high - low) / 2;
-    const T& middleValue = aContainer[middle];
+
+    // Allow any intermediate type so long as it provides a suitable ordering
+    // relation.
+    const auto& middleValue = aContainer[middle];
 
     MOZ_ASSERT(aContainer[low] <= aContainer[middle]);
     MOZ_ASSERT(aContainer[middle] <= aContainer[high - 1]);
     MOZ_ASSERT(aContainer[low] <= aContainer[high - 1]);
 
     if (aTarget == middleValue) {
       *aMatchOrInsertionPoint = middle;
       return true;