Bug 937540 part 6 - Pass TempAllocator to IonAllocPolicy. r=luke
authorJan de Mooij <jdemooij@mozilla.com>
Mon, 18 Nov 2013 13:14:08 +0100
changeset 170686 a42ee99942e1be6d423689d1ff9eceb3c4d86b01
parent 170685 67278f2624971c8da0ac8461d3c709570731020d
child 170687 8bbb6bcfcd50749f1f1d7d3d627118e69e53bbda
push id3224
push userlsblakk@mozilla.com
push dateTue, 04 Feb 2014 01:06:49 +0000
treeherdermozilla-beta@60c04d0987f1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs937540
milestone28.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 937540 part 6 - Pass TempAllocator to IonAllocPolicy. r=luke
js/src/jit/AliasAnalysis.cpp
js/src/jit/AliasAnalysis.h
js/src/jit/BacktrackingAllocator.cpp
js/src/jit/BacktrackingAllocator.h
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineCompiler.h
js/src/jit/BaselineJIT.cpp
js/src/jit/BytecodeAnalysis.cpp
js/src/jit/BytecodeAnalysis.h
js/src/jit/CodeGenerator.cpp
js/src/jit/Ion.cpp
js/src/jit/IonAllocPolicy.h
js/src/jit/IonAnalysis.cpp
js/src/jit/IonAnalysis.h
js/src/jit/IonBuilder.cpp
js/src/jit/IonBuilder.h
js/src/jit/LICM.cpp
js/src/jit/LIR-Common.h
js/src/jit/LIR.cpp
js/src/jit/LIR.h
js/src/jit/LinearScan.cpp
js/src/jit/LinearScan.h
js/src/jit/LiveRangeAllocator.cpp
js/src/jit/LiveRangeAllocator.h
js/src/jit/Lowering.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MIRGraph.cpp
js/src/jit/RangeAnalysis.cpp
js/src/jit/RegisterAllocator.cpp
js/src/jit/StupidAllocator.cpp
js/src/jit/ValueNumbering.cpp
js/src/jit/arm/BaselineCompiler-arm.cpp
js/src/jit/arm/BaselineCompiler-arm.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/shared/BaselineCompiler-shared.cpp
js/src/jit/shared/BaselineCompiler-shared.h
js/src/jit/shared/BaselineCompiler-x86-shared.cpp
js/src/jit/shared/BaselineCompiler-x86-shared.h
js/src/jit/shared/CodeGenerator-shared.h
js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
js/src/jit/shared/Lowering-shared.cpp
js/src/jit/x64/BaselineCompiler-x64.cpp
js/src/jit/x64/BaselineCompiler-x64.h
js/src/jit/x86/BaselineCompiler-x86.cpp
js/src/jit/x86/BaselineCompiler-x86.h
js/src/jsinfer.cpp
js/src/jsinfer.h
--- a/js/src/jit/AliasAnalysis.cpp
+++ b/js/src/jit/AliasAnalysis.cpp
@@ -14,16 +14,51 @@
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::Array;
 
+namespace js {
+namespace jit {
+
+class LoopAliasInfo : public TempObject
+{
+  private:
+    LoopAliasInfo *outer_;
+    MBasicBlock *loopHeader_;
+    MDefinitionVector invariantLoads_;
+
+  public:
+    LoopAliasInfo(TempAllocator &alloc, LoopAliasInfo *outer, MBasicBlock *loopHeader)
+      : outer_(outer), loopHeader_(loopHeader), invariantLoads_(alloc)
+    { }
+
+    MBasicBlock *loopHeader() const {
+        return loopHeader_;
+    }
+    LoopAliasInfo *outer() const {
+        return outer_;
+    }
+    bool addInvariantLoad(MDefinition *ins) {
+        return invariantLoads_.append(ins);
+    }
+    const MDefinitionVector& invariantLoads() const {
+        return invariantLoads_;
+    }
+    MDefinition *firstInstruction() const {
+        return *loopHeader_->begin();
+    }
+};
+
+} // namespace jit
+} // namespace js
+
 namespace {
 
 // Iterates over the flags in an AliasSet.
 class AliasSetIterator
 {
   private:
     uint32_t flags;
     unsigned pos;
@@ -123,37 +158,40 @@ IonSpewAliasInfo(const char *pre, MDefin
 // having an implicit dependency on the last instruction of the loop header, so that
 // it's never moved before the loop header.
 //
 // The algorithm depends on the invariant that both control instructions and effectful
 // instructions (stores) are never hoisted.
 bool
 AliasAnalysis::analyze()
 {
-    Array<MDefinitionVector, AliasSet::NumCategories> stores;
+    Vector<MDefinitionVector, AliasSet::NumCategories, IonAllocPolicy> stores(alloc());
 
     // Initialize to the first instruction.
     MDefinition *firstIns = *graph_.begin()->begin();
-    for (unsigned i=0; i < AliasSet::NumCategories; i++) {
-        if (!stores[i].append(firstIns))
+    for (unsigned i = 0; i < AliasSet::NumCategories; i++) {
+        MDefinitionVector defs(alloc());
+        if (!defs.append(firstIns))
+            return false;
+        if (!stores.append(OldMove(defs)))
             return false;
     }
 
     // Type analysis may have inserted new instructions. Since this pass depends
     // on the instruction number ordering, all instructions are renumbered.
     // We start with 1 because some passes use 0 to denote failure.
     uint32_t newId = 1;
 
     for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
         if (mir->shouldCancel("Alias Analysis (main loop)"))
             return false;
 
         if (block->isLoopHeader()) {
             IonSpew(IonSpew_Alias, "Processing loop header %d", block->id());
-            loop_ = new(graph_.alloc()) LoopAliasInfo(loop_, *block);
+            loop_ = new(alloc()) LoopAliasInfo(alloc(), loop_, *block);
         }
 
         for (MDefinitionIterator def(*block); def; def++) {
             def->setId(newId++);
 
             AliasSet set = def->getAliasSet();
             if (set.isNone())
                 continue;
@@ -203,17 +241,17 @@ AliasAnalysis::analyze()
 
         if (block->isLoopBackedge()) {
             JS_ASSERT(loop_->loopHeader() == block->loopHeaderOfBackedge());
             IonSpew(IonSpew_Alias, "Processing loop backedge %d (header %d)", block->id(),
                     loop_->loopHeader()->id());
             LoopAliasInfo *outerLoop = loop_->outer();
             MInstruction *firstLoopIns = *loop_->loopHeader()->begin();
 
-            const InstructionVector &invariant = loop_->invariantLoads();
+            const MDefinitionVector &invariant = loop_->invariantLoads();
 
             for (unsigned i = 0; i < invariant.length(); i++) {
                 MDefinition *ins = invariant[i];
                 AliasSet set = ins->getAliasSet();
                 JS_ASSERT(set.isLoad());
 
                 bool hasAlias = false;
                 for (AliasSetIterator iter(set); iter; iter++) {
--- a/js/src/jit/AliasAnalysis.h
+++ b/js/src/jit/AliasAnalysis.h
@@ -8,55 +8,29 @@
 #define jit_AliasAnalysis_h
 
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
 
 namespace js {
 namespace jit {
 
+class LoopAliasInfo;
 class MIRGraph;
 
-typedef Vector<MDefinition *, 4, IonAllocPolicy> InstructionVector;
-
-class LoopAliasInfo : public TempObject
-{
-  private:
-    LoopAliasInfo *outer_;
-    MBasicBlock *loopHeader_;
-    InstructionVector invariantLoads_;
-
-  public:
-    LoopAliasInfo(LoopAliasInfo *outer, MBasicBlock *loopHeader)
-      : outer_(outer), loopHeader_(loopHeader)
-    { }
-
-    MBasicBlock *loopHeader() const {
-        return loopHeader_;
-    }
-    LoopAliasInfo *outer() const {
-        return outer_;
-    }
-    bool addInvariantLoad(MDefinition *ins) {
-        return invariantLoads_.append(ins);
-    }
-    const InstructionVector& invariantLoads() const {
-        return invariantLoads_;
-    }
-    MDefinition *firstInstruction() const {
-        return *loopHeader_->begin();
-    }
-};
-
 class AliasAnalysis
 {
     MIRGenerator *mir;
     MIRGraph &graph_;
     LoopAliasInfo *loop_;
 
+    TempAllocator &alloc() const {
+        return graph_.alloc();
+    }
+
   public:
     AliasAnalysis(MIRGenerator *mir, MIRGraph &graph);
     bool analyze();
 };
 
 } // namespace js
 } // namespace jit
 
--- a/js/src/jit/BacktrackingAllocator.cpp
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -41,17 +41,17 @@ BacktrackingAllocator::init()
 
     hotcode.setAllocator(lifoAlloc);
 
     // Partition the graph into hot and cold sections, for helping to make
     // splitting decisions. Since we don't have any profiling data this is a
     // crapshoot, so just mark the bodies of inner loops as hot and everything
     // else as cold.
 
-    LiveInterval *hotcodeInterval = new(alloc()) LiveInterval(0);
+    LiveInterval *hotcodeInterval = LiveInterval::New(alloc(), 0);
 
     LBlock *backedge = nullptr;
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         LBlock *block = graph.getBlock(i);
 
         // If we see a loop header, mark the backedge so we know when we have
         // hit the end of the loop. Don't process the loop immediately, so that
         // if there is an inner loop we will ignore the outer backedge.
@@ -222,17 +222,17 @@ BacktrackingAllocator::tryGroupRegisters
             return false;
         reg1->setGroup(group0);
         return true;
     }
 
     if (LifetimesOverlap(reg0, reg1))
         return true;
 
-    VirtualRegisterGroup *group = new(alloc()) VirtualRegisterGroup();
+    VirtualRegisterGroup *group = new(alloc()) VirtualRegisterGroup(alloc());
     if (!group->registers.append(vreg0) || !group->registers.append(vreg1))
         return false;
 
     reg0->setGroup(group);
     reg1->setGroup(group);
     return true;
 }
 
@@ -294,27 +294,27 @@ BacktrackingAllocator::tryGroupReusedReg
             return true;
         }
         if (use->policy() != LUse::ANY && use->policy() != LUse::KEEPALIVE) {
             reg.setMustCopyInput();
             return true;
         }
     }
 
-    LiveInterval *preInterval = new(alloc()) LiveInterval(interval->vreg(), 0);
+    LiveInterval *preInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
     for (size_t i = 0; i < interval->numRanges(); i++) {
         const LiveInterval::Range *range = interval->getRange(i);
         JS_ASSERT(range->from <= inputOf(reg.ins()));
 
         CodePosition to = (range->to <= outputOf(reg.ins())) ? range->to : outputOf(reg.ins());
         if (!preInterval->addRange(range->from, to))
             return false;
     }
 
-    LiveInterval *postInterval = new(alloc()) LiveInterval(interval->vreg(), 0);
+    LiveInterval *postInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
     if (!postInterval->addRange(inputOf(reg.ins()), interval->end()))
         return false;
 
     LiveIntervalVector newIntervals;
     if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval))
         return false;
 
     distributeUses(interval, newIntervals);
@@ -949,17 +949,17 @@ BacktrackingAllocator::resolveControlFlo
             for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
                 LBlock *predecessor = mSuccessor->getPredecessor(k)->lir();
                 JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
 
                 LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor());
                 LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId()));
                 JS_ASSERT(from);
 
-                LMoveGroup *moves = predecessor->getExitMoveGroup();
+                LMoveGroup *moves = predecessor->getExitMoveGroup(alloc());
                 if (!addMove(moves, from, to))
                     return false;
             }
         }
 
         // Resolve split intervals with moves
         BitSet *live = liveIn[mSuccessor->id()];
 
@@ -975,21 +975,21 @@ BacktrackingAllocator::resolveControlFlo
                         continue;
                     if (to->covers(outputOf(predecessor->lastId())))
                         continue;
 
                     LiveInterval *from = reg.intervalFor(outputOf(predecessor->lastId()));
 
                     if (mSuccessor->numPredecessors() > 1) {
                         JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
-                        LMoveGroup *moves = predecessor->getExitMoveGroup();
+                        LMoveGroup *moves = predecessor->getExitMoveGroup(alloc());
                         if (!addMove(moves, from, to))
                             return false;
                     } else {
-                        LMoveGroup *moves = successor->getEntryMoveGroup();
+                        LMoveGroup *moves = successor->getEntryMoveGroup(alloc());
                         if (!addMove(moves, from, to))
                             return false;
                     }
                 }
             }
         }
     }
 
@@ -1317,17 +1317,17 @@ BacktrackingAllocator::dumpAllocations()
 #endif // DEBUG
 }
 
 bool
 BacktrackingAllocator::addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg,
                                        LiveInterval *spillInterval,
                                        CodePosition from, CodePosition to)
 {
-    LiveInterval *interval = new(alloc()) LiveInterval(vreg, 0);
+    LiveInterval *interval = LiveInterval::New(alloc(), vreg, 0);
     interval->setSpillInterval(spillInterval);
     return interval->addRange(from, to) && intervals.append(interval);
 }
 
 ///////////////////////////////////////////////////////////////////////////////
 // Heuristic Methods
 ///////////////////////////////////////////////////////////////////////////////
 
@@ -1506,40 +1506,40 @@ BacktrackingAllocator::trySplitAcrossHot
         if (!hotRange->contains(interval->getRange(i))) {
             coldCode = true;
             break;
         }
     }
     if (!coldCode)
         return true;
 
-    LiveInterval *hotInterval = new(alloc()) LiveInterval(interval->vreg(), 0);
+    LiveInterval *hotInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
     LiveInterval *preInterval = nullptr, *postInterval = nullptr;
 
     // Accumulate the ranges of hot and cold code in the interval. Note that
     // we are only comparing with the single hot range found, so the cold code
     // may contain separate hot ranges.
     Vector<LiveInterval::Range, 1, SystemAllocPolicy> hotList, coldList;
     for (size_t i = 0; i < interval->numRanges(); i++) {
         LiveInterval::Range hot, coldPre, coldPost;
         interval->getRange(i)->intersect(hotRange, &coldPre, &hot, &coldPost);
 
         if (!hot.empty() && !hotInterval->addRange(hot.from, hot.to))
             return false;
 
         if (!coldPre.empty()) {
             if (!preInterval)
-                preInterval = new(alloc()) LiveInterval(interval->vreg(), 0);
+                preInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
             if (!preInterval->addRange(coldPre.from, coldPre.to))
                 return false;
         }
 
         if (!coldPost.empty()) {
             if (!postInterval)
-                postInterval = new(alloc()) LiveInterval(interval->vreg(), 0);
+                postInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
             if (!postInterval->addRange(coldPost.from, coldPost.to))
                 return false;
         }
     }
 
     JS_ASSERT(preInterval || postInterval);
     JS_ASSERT(hotInterval->numRanges());
 
@@ -1582,18 +1582,18 @@ BacktrackingAllocator::trySplitAfterLast
         }
     }
 
     if (!lastRegisterFrom.pos() || lastRegisterFrom == lastUse) {
         // Can't trim non-register uses off the end by splitting.
         return true;
     }
 
-    LiveInterval *preInterval = new(alloc()) LiveInterval(interval->vreg(), 0);
-    LiveInterval *postInterval = new(alloc()) LiveInterval(interval->vreg(), 0);
+    LiveInterval *preInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
+    LiveInterval *postInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
 
     for (size_t i = 0; i < interval->numRanges(); i++) {
         const LiveInterval::Range *range = interval->getRange(i);
 
         if (range->from < lastRegisterTo) {
             CodePosition to = (range->to <= lastRegisterTo) ? range->to : lastRegisterTo;
             if (!preInterval->addRange(range->from, to))
                 return false;
@@ -1626,17 +1626,17 @@ BacktrackingAllocator::splitAtAllRegiste
     uint32_t vreg = interval->vreg();
 
     // If this LiveInterval is the result of an earlier split which created a
     // spill interval, that spill interval covers the whole range, so we don't
     // need to create a new one.
     bool spillIntervalIsNew = false;
     LiveInterval *spillInterval = interval->spillInterval();
     if (!spillInterval) {
-        spillInterval = new(alloc()) LiveInterval(vreg, 0);
+        spillInterval = LiveInterval::New(alloc(), vreg, 0);
         spillIntervalIsNew = true;
     }
 
     CodePosition spillStart = interval->start();
     if (isRegisterDefinition(interval)) {
         // Treat the definition of the interval as a register use so that it
         // can be split and spilled ASAP.
         CodePosition from = interval->start();
@@ -1718,32 +1718,32 @@ BacktrackingAllocator::splitAcrossCalls(
     uint32_t vreg = interval->vreg();
 
     // If this LiveInterval is the result of an earlier split which created a
     // spill interval, that spill interval covers the whole range, so we don't
     // need to create a new one.
     bool spillIntervalIsNew = false;
     LiveInterval *spillInterval = interval->spillInterval();
     if (!spillInterval) {
-        spillInterval = new(alloc()) LiveInterval(vreg, 0);
+        spillInterval = LiveInterval::New(alloc(), vreg, 0);
         spillIntervalIsNew = true;
 
         for (size_t i = 0; i < interval->numRanges(); i++) {
             const LiveInterval::Range *range = interval->getRange(i);
             CodePosition from = range->from < spillStart ? spillStart : range->from;
             if (!spillInterval->addRange(from, range->to))
                 return false;
         }
     }
 
     LiveIntervalVector newIntervals;
 
     CodePosition lastRegisterUse;
     if (spillStart != interval->start()) {
-        LiveInterval *newInterval = new(alloc()) LiveInterval(vreg, 0);
+        LiveInterval *newInterval = LiveInterval::New(alloc(), vreg, 0);
         newInterval->setSpillInterval(spillInterval);
         if (!newIntervals.append(newInterval))
             return false;
         lastRegisterUse = interval->start();
     }
 
     int activeCallPosition = callPositions.length() - 1;
     for (UsePositionIterator iter(interval->usesBegin()); iter != interval->usesEnd(); iter++) {
@@ -1774,17 +1774,17 @@ BacktrackingAllocator::splitAcrossCalls(
                         if (range->from <= lastRegisterUse && range->to <= iter->pos) {
                             useNewInterval = true;
                             break;
                         }
                     }
                 }
             }
             if (useNewInterval) {
-                LiveInterval *newInterval = new(alloc()) LiveInterval(vreg, 0);
+                LiveInterval *newInterval = LiveInterval::New(alloc(), vreg, 0);
                 newInterval->setSpillInterval(spillInterval);
                 if (!newIntervals.append(newInterval))
                     return false;
             }
             newIntervals.back()->addUse(new(alloc()) UsePosition(iter->use, iter->pos));
             lastRegisterUse = iter->pos;
         } else {
             JS_ASSERT(spillIntervalIsNew);
--- a/js/src/jit/BacktrackingAllocator.h
+++ b/js/src/jit/BacktrackingAllocator.h
@@ -31,18 +31,18 @@ struct VirtualRegisterGroup : public Tem
     Vector<uint32_t, 2, IonAllocPolicy> registers;
 
     // Desired physical register to use for registers in the group.
     LAllocation allocation;
 
     // Spill location to be shared by registers in the group.
     LAllocation spill;
 
-    VirtualRegisterGroup()
-      : allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY))
+    VirtualRegisterGroup(TempAllocator &alloc)
+      : registers(alloc), allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY))
     {}
 
     uint32_t canonicalReg() {
         uint32_t minimum = registers[0];
         for (size_t i = 1; i < registers.length(); i++)
             minimum = Min(minimum, registers[i]);
         return minimum;
     }
@@ -62,16 +62,19 @@ class BacktrackingVirtualRegister : publ
     CodePosition canonicalSpillExclude_;
 
     // If this register is associated with a group of other registers,
     // information about the group. This structure is shared between all
     // registers in the group.
     VirtualRegisterGroup *group_;
 
   public:
+    BacktrackingVirtualRegister(TempAllocator &alloc)
+      : VirtualRegister(alloc)
+    {}
     void setMustCopyInput() {
         mustCopyInput_ = true;
     }
     bool mustCopyInput() {
         return mustCopyInput_;
     }
 
     void setCanonicalSpill(LAllocation alloc) {
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -19,26 +19,26 @@
 
 #include "jsscriptinlines.h"
 
 #include "vm/Interpreter-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
-BaselineCompiler::BaselineCompiler(JSContext *cx, HandleScript script)
-  : BaselineCompilerSpecific(cx, script),
+BaselineCompiler::BaselineCompiler(JSContext *cx, TempAllocator &alloc, HandleScript script)
+  : BaselineCompilerSpecific(cx, alloc, script),
     modifiesArguments_(false)
 {
 }
 
 bool
 BaselineCompiler::init()
 {
-    if (!analysis_.init(cx->runtime()->gsnCache))
+    if (!analysis_.init(alloc_, cx->runtime()->gsnCache))
         return false;
 
     if (!labels_.init(script->length))
         return false;
 
     for (size_t i = 0; i < script->length; i++)
         new (&labels_[i]) Label();
 
--- a/js/src/jit/BaselineCompiler.h
+++ b/js/src/jit/BaselineCompiler.h
@@ -189,17 +189,17 @@ class BaselineCompiler : public Baseline
     // If a script has more |nslots| than this, then emit code to do an
     // early stack check.
     static const unsigned EARLY_STACK_CHECK_SLOT_COUNT = 128;
     bool needsEarlyStackCheck() const {
         return script->nslots > EARLY_STACK_CHECK_SLOT_COUNT;
     }
 
   public:
-    BaselineCompiler(JSContext *cx, HandleScript script);
+    BaselineCompiler(JSContext *cx, TempAllocator &alloc, HandleScript script);
     bool init();
 
     MethodStatus compile();
 
   private:
     MethodStatus emitBody();
 
     bool emitPrologue();
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -221,17 +221,17 @@ jit::BaselineCompile(JSContext *cx, Hand
     LifoAlloc alloc(BASELINE_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
 
     TempAllocator *temp = alloc.new_<TempAllocator>(&alloc);
     if (!temp)
         return Method_Error;
 
     IonContext ictx(cx, temp);
 
-    BaselineCompiler compiler(cx, script);
+    BaselineCompiler compiler(cx, *temp, script);
     if (!compiler.init())
         return Method_Error;
 
     AutoFlushCache afc("BaselineJIT", cx->runtime()->jitRuntime());
     MethodStatus status = compiler.compile();
 
     JS_ASSERT_IF(status == Method_Compiled, script->hasBaselineScript());
     JS_ASSERT_IF(status != Method_Compiled, !script->hasBaselineScript());
--- a/js/src/jit/BytecodeAnalysis.cpp
+++ b/js/src/jit/BytecodeAnalysis.cpp
@@ -8,19 +8,19 @@
 
 #include "jsopcode.h"
 #include "jit/IonSpewer.h"
 #include "jsopcodeinlines.h"
 
 using namespace js;
 using namespace js::jit;
 
-BytecodeAnalysis::BytecodeAnalysis(JSScript *script)
+BytecodeAnalysis::BytecodeAnalysis(TempAllocator &alloc, JSScript *script)
   : script_(script),
-    infos_(),
+    infos_(alloc),
     usesScopeChain_(false),
     hasTryFinally_(false),
     hasSetArg_(false)
 {
 }
 
 // Bytecode range containing only catch or finally code.
 struct CatchFinallyRange
@@ -35,28 +35,28 @@ struct CatchFinallyRange
     }
 
     bool contains(uint32_t offset) const {
         return start <= offset && offset < end;
     }
 };
 
 bool
-BytecodeAnalysis::init(GSNCache &gsn)
+BytecodeAnalysis::init(TempAllocator &alloc, GSNCache &gsn)
 {
     if (!infos_.growByUninitialized(script_->length))
         return false;
 
     jsbytecode *end = script_->code + script_->length;
 
     // Clear all BytecodeInfo.
     mozilla::PodZero(infos_.begin(), infos_.length());
     infos_[0].init(/*stackDepth=*/0);
 
-    Vector<CatchFinallyRange, 0, IonAllocPolicy> catchFinallyRanges;
+    Vector<CatchFinallyRange, 0, IonAllocPolicy> catchFinallyRanges(alloc);
 
     for (jsbytecode *pc = script_->code; pc < end; pc += GetBytecodeLength(pc)) {
         JSOp op = JSOp(*pc);
         unsigned offset = pc - script_->code;
 
         IonSpew(IonSpew_BaselineOp, "Analyzing op @ %d (end=%d): %s",
                 int(pc - script_->code), int(end - script_->code), js_CodeName[op]);
 
--- a/js/src/jit/BytecodeAnalysis.h
+++ b/js/src/jit/BytecodeAnalysis.h
@@ -40,19 +40,19 @@ class BytecodeAnalysis
     JSScript *script_;
     Vector<BytecodeInfo, 0, IonAllocPolicy> infos_;
 
     bool usesScopeChain_;
     bool hasTryFinally_;
     bool hasSetArg_;
 
   public:
-    explicit BytecodeAnalysis(JSScript *script);
+    explicit BytecodeAnalysis(TempAllocator &alloc, JSScript *script);
 
-    bool init(GSNCache &gsn);
+    bool init(TempAllocator &alloc, GSNCache &gsn);
 
     BytecodeInfo &info(jsbytecode *pc) {
         JS_ASSERT(infos_[pc - script_->code].initialized);
         return infos_[pc - script_->code];
     }
 
     BytecodeInfo *maybeInfo(jsbytecode *pc) {
         if (infos_[pc - script_->code].initialized)
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -5824,17 +5824,17 @@ CodeGenerator::link(JSContext *cx, types
                            ? frameDepth_
                            : FrameSizeClass::FromDepth(frameDepth_).frameSize();
 
     // We encode safepoints after the OSI-point offsets have been determined.
     encodeSafepoints();
 
     // List of possible scripts that this graph may call. Currently this is
     // only tracked when compiling for parallel execution.
-    CallTargetVector callTargets;
+    CallTargetVector callTargets(alloc());
     if (executionMode == ParallelExecution)
         AddPossibleCallees(cx, graph.mir(), callTargets);
 
     IonScript *ionScript =
       IonScript::New(cx, recompileInfo,
                      graph.totalSlotCount(), scriptFrameSize, snapshots_.size(),
                      bailouts_.length(), graph.numConstants(),
                      safepointIndices_.length(), osiIndices_.length(),
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -1635,17 +1635,17 @@ IonCompile(JSContext *cx, JSScript *scri
     if (!info)
         return AbortReason_Alloc;
 
     BaselineInspector inspector(script);
 
     AutoFlushCache afc("IonCompile", cx->runtime()->jitRuntime());
 
     AutoTempAllocatorRooter root(cx, temp);
-    types::CompilerConstraintList *constraints = types::NewCompilerConstraintList();
+    types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(*temp);
     if (!constraints)
         return AbortReason_Alloc;
 
     IonBuilder *builder = alloc->new_<IonBuilder>((JSContext *) nullptr, cx->compartment(), temp, graph, constraints,
                                                   &inspector, info, baselineFrame);
     if (!builder)
         return AbortReason_Alloc;
 
--- a/js/src/jit/IonAllocPolicy.h
+++ b/js/src/jit/IonAllocPolicy.h
@@ -80,17 +80,51 @@ class AutoTempAllocatorRooter : private 
 
   private:
     TempAllocator *temp;
     MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
 };
 
 class IonAllocPolicy
 {
+    TempAllocator &alloc_;
+
   public:
+    IonAllocPolicy(TempAllocator &alloc)
+      : alloc_(alloc)
+    {}
+    void *malloc_(size_t bytes) {
+        return alloc_.allocate(bytes);
+    }
+    void *calloc_(size_t bytes) {
+        void *p = alloc_.allocate(bytes);
+        if (p)
+            memset(p, 0, bytes);
+        return p;
+    }
+    void *realloc_(void *p, size_t oldBytes, size_t bytes) {
+        void *n = malloc_(bytes);
+        if (!n)
+            return n;
+        memcpy(n, p, Min(oldBytes, bytes));
+        return n;
+    }
+    void free_(void *p) {
+    }
+    void reportAllocOverflow() const {
+    }
+};
+
+// Deprecated. Don't use this. Will be removed after everything has been
+// converted to IonAllocPolicy.
+class OldIonAllocPolicy
+{
+  public:
+    OldIonAllocPolicy()
+    {}
     void *malloc_(size_t bytes) {
         return GetIonContext()->temp->allocate(bytes);
     }
     void *calloc_(size_t bytes) {
         void *p = GetIonContext()->temp->allocate(bytes);
         if (p)
             memset(p, 0, bytes);
         return p;
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -1102,17 +1102,17 @@ jit::BuildDominatorTree(MIRGraph &graph)
     // If compiling with OSR, many blocks will self-dominate.
     // Without OSR, there is only one root block which dominates all.
     if (!graph.osrBlock())
         JS_ASSERT(graph.begin()->numDominated() == graph.numBlocks() - 1);
 #endif
     // Now, iterate through the dominator tree and annotate every
     // block with its index in the pre-order traversal of the
     // dominator tree.
-    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;
+    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist(graph.alloc());
 
     // The index of the current block in the CFG traversal.
     size_t index = 0;
 
     // Add all self-dominating blocks to the worklist.
     // This includes all roots. Order does not matter.
     for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
         MBasicBlock *block = *i;
@@ -1671,23 +1671,23 @@ TryEliminateTypeBarrier(MTypeBarrier *ba
 // to cover the ranges of both checks.
 //
 // Bounds checks are added to a hash map and since the hash function ignores
 // differences in constant offset, this offers a fast way to find redundant
 // checks.
 bool
 jit::EliminateRedundantChecks(MIRGraph &graph)
 {
-    BoundsCheckMap checks;
+    BoundsCheckMap checks(graph.alloc());
 
     if (!checks.init())
         return false;
 
     // Stack for pre-order CFG traversal.
-    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;
+    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist(graph.alloc());
 
     // The index of the current block in the CFG traversal.
     size_t index = 0;
 
     // Add all self-dominating blocks to the worklist.
     // This includes all roots. Order does not matter.
     for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
         MBasicBlock *block = *i;
@@ -2102,17 +2102,17 @@ jit::AnalyzeNewScriptProperties(JSContex
 
     MIRGraph graph(&temp);
     CompileInfo info(script, fun,
                      /* osrPc = */ nullptr, /* constructing = */ false,
                      DefinitePropertiesAnalysis);
 
     AutoTempAllocatorRooter root(cx, &temp);
 
-    types::CompilerConstraintList *constraints = types::NewCompilerConstraintList();
+    types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(temp);
     BaselineInspector inspector(script);
     IonBuilder builder(cx, cx->compartment(), &temp, &graph, constraints,
                        &inspector, &info, /* baselineFrame = */ nullptr);
 
     if (!builder.build()) {
         if (builder.abortReason() == AbortReason_Alloc)
             return false;
         return true;
--- a/js/src/jit/IonAnalysis.h
+++ b/js/src/jit/IonAnalysis.h
@@ -92,23 +92,25 @@ struct LinearTerm
     {
     }
 };
 
 // General linear sum of the form 'x1*n1 + x2*n2 + ... + n'
 class LinearSum
 {
   public:
-    LinearSum()
-      : constant_(0)
+    LinearSum(TempAllocator &alloc)
+      : terms_(alloc),
+        constant_(0)
     {
     }
 
     LinearSum(const LinearSum &other)
-      : constant_(other.constant_)
+      : terms_(other.terms_.allocPolicy()),
+        constant_(other.constant_)
     {
         terms_.appendAll(other.terms_);
     }
 
     bool multiply(int32_t scale);
     bool add(const LinearSum &other);
     bool add(MDefinition *term, int32_t scale);
     bool add(int32_t constant);
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -43,24 +43,30 @@ IonBuilder::IonBuilder(JSContext *analys
                        size_t inliningDepth, uint32_t loopDepth)
   : MIRGenerator(comp, temp, graph, info),
     backgroundCodegen_(nullptr),
     analysisContext(analysisContext),
     baselineFrame_(baselineFrame),
     abortReason_(AbortReason_Disable),
     reprSetHash_(nullptr),
     constraints_(constraints),
-    analysis_(info->script()),
+    analysis_(*temp, info->script()),
     thisTypes(nullptr),
     argTypes(nullptr),
     typeArray(nullptr),
     typeArrayHint(0),
     loopDepth_(loopDepth),
     callerResumePoint_(nullptr),
     callerBuilder_(nullptr),
+    cfgStack_(*temp),
+    loops_(*temp),
+    switches_(*temp),
+    labels_(*temp),
+    iterators_(*temp),
+    loopHeaders_(*temp),
     inspector(inspector),
     inliningDepth_(inliningDepth),
     numLoopRestarts_(0),
     failedBoundsCheck_(info->script()->failedBoundsCheck),
     failedShapeGuard_(info->script()->failedShapeGuard),
     nonStringIteration_(false),
     lazyArguments_(nullptr),
     inlineCallInfo_(nullptr)
@@ -537,17 +543,17 @@ bool
 IonBuilder::init()
 {
     if (!types::TypeScript::FreezeTypeSets(constraints(), script(),
                                            &thisTypes, &argTypes, &typeArray))
     {
         return false;
     }
 
-    if (!analysis().init(gsn))
+    if (!analysis().init(alloc(), gsn))
         return false;
 
     return true;
 }
 
 bool
 IonBuilder::build()
 {
@@ -1174,18 +1180,18 @@ IonBuilder::traverseBytecode()
         // popped by this opcode either:
         //
         //   (1) Have the Folded flag set on them.
         //   (2) Have more uses than before compiling this op (the value is
         //       used as operand of a new MIR instruction).
         //
         // This is used to catch problems where IonBuilder pops a value without
         // adding any SSA uses and doesn't call setFoldedUnchecked on it.
-        Vector<MDefinition *, 4, IonAllocPolicy> popped;
-        Vector<size_t, 4, IonAllocPolicy> poppedUses;
+        Vector<MDefinition *, 4, IonAllocPolicy> popped(alloc());
+        Vector<size_t, 4, IonAllocPolicy> poppedUses(alloc());
         unsigned nuses = GetUseCount(script_, pc - script_->code);
 
         for (unsigned i = 0; i < nuses; i++) {
             MDefinition *def = current->peek(-int32_t(i + 1));
             if (!popped.append(def) || !poppedUses.append(def->defUseCount()))
                 return false;
         }
 #endif
@@ -3797,17 +3803,17 @@ IonBuilder::inlineScriptedCall(CallInfo 
     // Start inlining.
     LifoAlloc *lifoAlloc = alloc_->lifoAlloc();
     CompileInfo *info = lifoAlloc->new_<CompileInfo>(calleeScript, target,
                                                      (jsbytecode *)nullptr, callInfo.constructing(),
                                                      this->info().executionMode());
     if (!info)
         return false;
 
-    MIRGraphExits saveExits;
+    MIRGraphExits saveExits(alloc());
     AutoAccumulateExits aae(graph(), saveExits);
 
     // Build the graph.
     JS_ASSERT_IF(analysisContext, !analysisContext->isExceptionPending());
     IonBuilder inlineBuilder(analysisContext, compartment,
                              &alloc(), &graph(), constraints(), &inspector, info, nullptr,
                              inliningDepth_ + 1, loopDepth_);
     if (!inlineBuilder.buildInline(this, outerResumePoint, callInfo)) {
@@ -4151,17 +4157,17 @@ IonBuilder::inlineCallsite(ObjectVector 
             current->add(constFun);
             callInfo.setFun(constFun);
         }
 
         return inlineSingleCall(callInfo, target);
     }
 
     // Choose a subset of the targets for polymorphic inlining.
-    BoolVector choiceSet;
+    BoolVector choiceSet(alloc());
     uint32_t numInlined = selectInliningTargets(targets, callInfo, choiceSet);
     if (numInlined == 0)
         return InliningStatus_NotInlined;
 
     // Perform a polymorphic dispatch.
     if (!inlineCalls(callInfo, targets, originals, choiceSet, propCache))
         return InliningStatus_Error;
 
@@ -4173,17 +4179,17 @@ IonBuilder::inlineGenericFallback(JSFunc
                                   bool clonedAtCallsite)
 {
     // Generate a new block with all arguments on-stack.
     MBasicBlock *fallbackBlock = newBlock(dispatchBlock, pc);
     if (!fallbackBlock)
         return false;
 
     // Create a new CallInfo to track modified state within this block.
-    CallInfo fallbackInfo(callInfo.constructing());
+    CallInfo fallbackInfo(alloc(), callInfo.constructing());
     if (!fallbackInfo.init(callInfo))
         return false;
     fallbackInfo.popFormals(fallbackBlock);
     fallbackInfo.wrapArgs(alloc(), fallbackBlock);
 
     // Generate an MCall, which uses stateful |current|.
     setCurrentAndSpecializePhis(fallbackBlock);
     if (!makeCall(target, fallbackInfo, clonedAtCallsite))
@@ -4211,17 +4217,17 @@ IonBuilder::inlineTypeObjectFallback(Cal
     JS_ASSERT_IF(callInfo.fun()->isGetPropertyCache(), !cache->hasUses());
     JS_ASSERT_IF(callInfo.fun()->isTypeBarrier(), cache->hasOneUse());
 
     // This means that no resume points yet capture the MGetPropertyCache,
     // so everything from the MGetPropertyCache up until the call is movable.
     // We now move the MGetPropertyCache and friends into a fallback path.
 
     // Create a new CallInfo to track modified state within the fallback path.
-    CallInfo fallbackInfo(callInfo.constructing());
+    CallInfo fallbackInfo(alloc(), callInfo.constructing());
     if (!fallbackInfo.init(callInfo))
         return false;
 
     // Capture stack prior to the call operation. This captures the function.
     MResumePoint *preCallResumePoint =
         MResumePoint::New(alloc(), dispatchBlock, pc, callerResumePoint_, MResumePoint::ResumeAt);
     if (!preCallResumePoint)
         return false;
@@ -4393,17 +4399,17 @@ IonBuilder::inlineCalls(CallInfo &callIn
         dispatchBlock->add(funcDef);
 
         // Use the MConstant in the inline resume point and on stack.
         int funIndex = inlineBlock->entryResumePoint()->numOperands() - callInfo.numFormals();
         inlineBlock->entryResumePoint()->replaceOperand(funIndex, funcDef);
         inlineBlock->rewriteSlot(funIndex, funcDef);
 
         // Create a new CallInfo to track modified state within the inline block.
-        CallInfo inlineInfo(callInfo.constructing());
+        CallInfo inlineInfo(alloc(), callInfo.constructing());
         if (!inlineInfo.init(callInfo))
             return false;
         inlineInfo.popFormals(inlineBlock);
         inlineInfo.setFun(funcDef);
         inlineInfo.wrapArgs(alloc(), inlineBlock);
 
         if (maybeCache) {
             JS_ASSERT(callInfo.thisArg() == maybeCache->object());
@@ -4725,17 +4731,17 @@ IonBuilder::jsop_funcall(uint32_t argc)
 
     int calleeDepth = -((int)argc + 2);
     int funcDepth = -((int)argc + 1);
 
     // If |Function.prototype.call| may be overridden, don't optimize callsite.
     types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet();
     JSFunction *native = getSingleCallTarget(calleeTypes);
     if (!native || !native->isNative() || native->native() != &js_fun_call) {
-        CallInfo callInfo(false);
+        CallInfo callInfo(alloc(), false);
         if (!callInfo.init(current, argc))
             return false;
         return makeCall(native, callInfo, false);
     }
     current->peek(calleeDepth)->setFoldedUnchecked();
 
     // Extract call target.
     types::TemporaryTypeSet *funTypes = current->peek(funcDepth)->resultTypeSet();
@@ -4760,17 +4766,17 @@ IonBuilder::jsop_funcall(uint32_t argc)
         MPassArg *pass = MPassArg::New(alloc(), undef);
         current->add(pass);
         current->push(pass);
     } else {
         // |this| becomes implicit in the call.
         argc -= 1;
     }
 
-    CallInfo callInfo(false);
+    CallInfo callInfo(alloc(), false);
     if (!callInfo.init(current, argc))
         return false;
 
     // Try inlining call
     if (argc > 0 && makeInliningDecision(target, callInfo) && target->isInterpreted())
         return inlineScriptedCall(callInfo, target);
 
     // Call without inlining.
@@ -4780,17 +4786,17 @@ IonBuilder::jsop_funcall(uint32_t argc)
 bool
 IonBuilder::jsop_funapply(uint32_t argc)
 {
     int calleeDepth = -((int)argc + 2);
 
     types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet();
     JSFunction *native = getSingleCallTarget(calleeTypes);
     if (argc != 2) {
-        CallInfo callInfo(false);
+        CallInfo callInfo(alloc(), false);
         if (!callInfo.init(current, argc))
             return false;
         return makeCall(native, callInfo, false);
     }
 
     // Disable compilation if the second argument to |apply| cannot be guaranteed
     // to be either definitely |arguments| or definitely not |arguments|.
     MDefinition *argument = current->peek(-1);
@@ -4798,17 +4804,17 @@ IonBuilder::jsop_funapply(uint32_t argc)
         argument->mightBeType(MIRType_Magic) &&
         argument->type() != MIRType_Magic)
     {
         return abort("fun.apply with MaybeArguments");
     }
 
     // Fallback to regular call if arg 2 is not definitely |arguments|.
     if (argument->type() != MIRType_Magic) {
-        CallInfo callInfo(false);
+        CallInfo callInfo(alloc(), false);
         if (!callInfo.init(current, argc))
             return false;
         return makeCall(native, callInfo, false);
     }
 
     if (!native ||
         !native->isNative() ||
         native->native() != js_fun_apply)
@@ -4876,26 +4882,26 @@ IonBuilder::jsop_funapplyarguments(uint3
     }
 
     // When inlining we have the arguments the function gets called with
     // and can optimize even more, by just calling the functions with the args.
     // We also try this path when doing the definite properties analysis, as we
     // can inline the apply() target and don't care about the actual arguments
     // that were passed in.
 
-    CallInfo callInfo(false);
+    CallInfo callInfo(alloc(), false);
 
     // Vp
     MPassArg *passVp = current->pop()->toPassArg();
     passVp->getArgument()->setFoldedUnchecked();
     passVp->replaceAllUsesWith(passVp->getArgument());
     passVp->block()->discard(passVp);
 
     // Arguments
-    MDefinitionVector args;
+    MDefinitionVector args(alloc());
     if (inliningDepth_) {
         if (!args.append(inlineCallInfo_->argv().begin(), inlineCallInfo_->argv().end()))
             return false;
     }
     callInfo.setArgs(&args);
 
     // This
     MPassArg *passThis = current->pop()->toPassArg();
@@ -4940,42 +4946,42 @@ IonBuilder::jsop_call(uint32_t argc, boo
             if (!observed->addType(types::Type::DoubleType(), alloc_->lifoAlloc()))
                 return false;
         }
     }
 
     int calleeDepth = -((int)argc + 2);
 
     // Acquire known call target if existent.
-    ObjectVector originals;
+    ObjectVector originals(alloc());
     bool gotLambda = false;
     types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet();
     if (calleeTypes) {
         if (!getPolyCallTargets(calleeTypes, constructing, originals, 4, &gotLambda))
             return false;
     }
     JS_ASSERT_IF(gotLambda, originals.length() <= 1);
 
     // If any call targets need to be cloned, look for existing clones to use.
     // Keep track of the originals as we need to case on them for poly inline.
     bool hasClones = false;
-    ObjectVector targets;
+    ObjectVector targets(alloc());
     for (uint32_t i = 0; i < originals.length(); i++) {
         JSFunction *fun = &originals[i]->as<JSFunction>();
         if (fun->hasScript() && fun->nonLazyScript()->shouldCloneAtCallsite) {
             if (JSFunction *clone = ExistingCloneFunctionAtCallsite(compartment, fun, script(), pc)) {
                 fun = clone;
                 hasClones = true;
             }
         }
         if (!targets.append(fun))
             return false;
     }
 
-    CallInfo callInfo(constructing);
+    CallInfo callInfo(alloc(), constructing);
     if (!callInfo.init(current, argc))
         return false;
 
     // Try inlining
     InliningStatus status = inlineCallsite(targets, originals, gotLambda, callInfo);
     if (status == InliningStatus_Inlined)
         return true;
     if (status == InliningStatus_Error)
@@ -5287,17 +5293,17 @@ IonBuilder::jsop_eval(uint32_t argc)
         // The 'this' value for the outer and eval scripts must be the
         // same. This is not guaranteed if a primitive string/number/etc.
         // is passed through to the eval invoke as the primitive may be
         // boxed into different objects if accessed via 'this'.
         JSValueType type = thisTypes->getKnownTypeTag();
         if (type != JSVAL_TYPE_OBJECT && type != JSVAL_TYPE_NULL && type != JSVAL_TYPE_UNDEFINED)
             return abort("Direct eval from script with maybe-primitive 'this'");
 
-        CallInfo callInfo(/* constructing = */ false);
+        CallInfo callInfo(alloc(), /* constructing = */ false);
         if (!callInfo.init(current, argc))
             return false;
         callInfo.unwrapArgs();
 
         callInfo.fun()->setFoldedUnchecked();
 
         MDefinition *scopeChain = current->scopeChain();
         MDefinition *string = callInfo.getArg(0);
@@ -5320,17 +5326,17 @@ IonBuilder::jsop_eval(uint32_t argc)
                 current->add(dynamicName);
 
                 MInstruction *thisv = MPassArg::New(alloc(), thisValue);
                 current->add(thisv);
 
                 current->push(dynamicName);
                 current->push(thisv);
 
-                CallInfo evalCallInfo(/* constructing = */ false);
+                CallInfo evalCallInfo(alloc(), /* constructing = */ false);
                 if (!evalCallInfo.init(current, /* argc = */ 0))
                     return false;
 
                 return makeCall(nullptr, evalCallInfo, false);
             }
         }
 
         MInstruction *filterArguments = MFilterArgumentsOrEval::New(alloc(), string);
@@ -8344,17 +8350,17 @@ IonBuilder::getPropTryCommonGetter(bool 
 
     // Spoof stack to expected state for call.
     pushConstant(ObjectValue(*commonGetter));
 
     MPassArg *wrapper = MPassArg::New(alloc(), obj);
     current->add(wrapper);
     current->push(wrapper);
 
-    CallInfo callInfo(false);
+    CallInfo callInfo(alloc(), false);
     if (!callInfo.init(current, 0))
         return false;
 
     // Inline if we can, otherwise, forget it and just generate a call.
     if (makeInliningDecision(commonGetter, callInfo) && commonGetter->isInterpreted()) {
         if (!inlineScriptedCall(callInfo, commonGetter))
             return false;
     } else {
@@ -8384,17 +8390,17 @@ CanInlinePropertyOpShapes(const Baseline
 bool
 IonBuilder::getPropTryInlineAccess(bool *emitted, PropertyName *name,
                                    bool barrier, types::TemporaryTypeSet *types)
 {
     JS_ASSERT(*emitted == false);
     if (current->peek(-1)->type() != MIRType_Object)
         return true;
 
-    BaselineInspector::ShapeVector shapes;
+    BaselineInspector::ShapeVector shapes(alloc());
     if (!inspector->maybeShapesForPropertyOp(pc, shapes))
         return false;
 
     if (shapes.empty() || !CanInlinePropertyOpShapes(shapes))
         return true;
 
     MIRType rvalType = MIRTypeFromValueType(types->getKnownTypeTag());
     if (barrier || IsNullOrUndefined(rvalType))
@@ -8631,17 +8637,17 @@ IonBuilder::setPropTryCommonSetter(bool 
     current->add(wrapper);
 
     MPassArg *arg = MPassArg::New(alloc(), value);
     current->push(arg);
     current->add(arg);
 
     // Call the setter. Note that we have to push the original value, not
     // the setter's return value.
-    CallInfo callInfo(false);
+    CallInfo callInfo(alloc(), false);
     if (!callInfo.init(current, 1))
         return false;
 
     // Ensure that we know we are calling a setter in case we inline it.
     callInfo.markAsSetter();
 
     // Inline the setter if we can.
     if (makeInliningDecision(commonSetter, callInfo) && commonSetter->isInterpreted()) {
@@ -8777,17 +8783,17 @@ IonBuilder::setPropTryInlineAccess(bool 
                                    MDefinition *value, bool barrier,
                                    types::TemporaryTypeSet *objTypes)
 {
     JS_ASSERT(*emitted == false);
 
     if (barrier)
         return true;
 
-    BaselineInspector::ShapeVector shapes;
+    BaselineInspector::ShapeVector shapes(alloc());
     if (!inspector->maybeShapesForPropertyOp(pc, shapes))
         return false;
 
     if (shapes.empty())
         return true;
 
     if (!CanInlinePropertyOpShapes(shapes))
         return true;
@@ -9496,17 +9502,18 @@ IonBuilder::bytecodeTypes(jsbytecode *pc
 {
     return types::TypeScript::BytecodeTypes(script(), pc, &typeArrayHint, typeArray);
 }
 
 TypeRepresentationSetHash *
 IonBuilder::getOrCreateReprSetHash()
 {
     if (!reprSetHash_) {
-        TypeRepresentationSetHash *hash = alloc_->lifoAlloc()->new_<TypeRepresentationSetHash>();
+        TypeRepresentationSetHash *hash =
+            alloc_->lifoAlloc()->new_<TypeRepresentationSetHash>(alloc());
         if (!hash || !hash->init())
             return nullptr;
 
         reprSetHash_ = hash;
     }
     return reprSetHash_;
 }
 
--- a/js/src/jit/IonBuilder.h
+++ b/js/src/jit/IonBuilder.h
@@ -817,19 +817,20 @@ class CallInfo
     MDefinition *fun_;
     MDefinition *thisArg_;
     MDefinitionVector args_;
 
     bool constructing_;
     bool setter_;
 
   public:
-    CallInfo(bool constructing)
+    CallInfo(TempAllocator &alloc, bool constructing)
       : fun_(nullptr),
         thisArg_(nullptr),
+        args_(alloc),
         constructing_(constructing),
         setter_(false)
     { }
 
     bool init(CallInfo &callInfo) {
         JS_ASSERT(constructing_ == callInfo.constructing());
 
         fun_ = callInfo.fun();
--- a/js/src/jit/LICM.cpp
+++ b/js/src/jit/LICM.cpp
@@ -13,17 +13,16 @@
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
 
 using namespace js;
 using namespace js::jit;
 
 namespace {
 
-typedef Vector<MBasicBlock*, 1, IonAllocPolicy> BlockQueue;
 typedef Vector<MInstruction*, 1, IonAllocPolicy> InstructionQueue;
 
 class Loop
 {
     MIRGenerator *mir;
 
   public:
     // Loop code may return three values:
@@ -52,16 +51,20 @@ class Loop
     MBasicBlock* preLoop_;
 
     // This indicates whether the loop contains calls or other things which
     // clobber most or all floating-point registers. In such loops,
     // floating-point constants should not be hoisted unless it enables further
     // hoisting.
     bool containsPossibleCall_;
 
+    TempAllocator &alloc() const {
+        return mir->alloc();
+    }
+
     bool hoistInstructions(InstructionQueue &toHoist);
 
     // Utility methods for invariance testing and instruction hoisting.
     bool isInLoop(MDefinition *ins);
     bool isBeforeLoop(MDefinition *ins);
     bool isLoopInvariant(MInstruction *ins);
 
     // This method determines if this block hot within a loop.  That is, if it's
@@ -118,34 +121,35 @@ LICM::analyze()
     }
 
     return true;
 }
 
 Loop::Loop(MIRGenerator *mir, MBasicBlock *header)
   : mir(mir),
     header_(header),
-    containsPossibleCall_(false)
+    containsPossibleCall_(false),
+    worklist_(mir->alloc())
 {
     preLoop_ = header_->getPredecessor(0);
 }
 
 Loop::LoopReturn
 Loop::init()
 {
     IonSpew(IonSpew_LICM, "Loop identified, headed by block %d", header_->id());
     IonSpew(IonSpew_LICM, "footer is block %d", header_->backedge()->id());
 
     // The first predecessor of the loop header must dominate the header.
     JS_ASSERT(header_->id() > header_->getPredecessor(0)->id());
 
     // Loops from backedge to header and marks all visited blocks
     // as part of the loop. At the same time add all hoistable instructions
     // (in RPO order) to the instruction worklist.
-    Vector<MBasicBlock *, 1, IonAllocPolicy> inlooplist;
+    Vector<MBasicBlock *, 1, IonAllocPolicy> inlooplist(alloc());
     if (!inlooplist.append(header_->backedge()))
         return LoopReturn_Error;
     header_->backedge()->mark();
 
     while (!inlooplist.empty()) {
         MBasicBlock *block = inlooplist.back();
 
         // Hoisting requires more finesse if the loop contains a block that
@@ -197,17 +201,17 @@ Loop::init()
     }
 
     return LoopReturn_Success;
 }
 
 bool
 Loop::optimize()
 {
-    InstructionQueue invariantInstructions;
+    InstructionQueue invariantInstructions(alloc());
 
     IonSpew(IonSpew_LICM, "These instructions are in the loop: ");
 
     while (!worklist_.empty()) {
         if (mir->shouldCancel("LICM (worklist)"))
             return false;
 
         MInstruction *ins = popFromWorklist();
--- a/js/src/jit/LIR-Common.h
+++ b/js/src/jit/LIR-Common.h
@@ -100,16 +100,20 @@ class LMove
 
 class LMoveGroup : public LInstructionHelper<0, 0, 0>
 {
     js::Vector<LMove, 2, IonAllocPolicy> moves_;
 
   public:
     LIR_HEADER(MoveGroup)
 
+    LMoveGroup(TempAllocator &alloc)
+      : moves_(alloc)
+    { }
+
     void printOperands(FILE *fp);
 
     // Add a move which takes place simultaneously with all others in the group.
     bool add(LAllocation *from, LAllocation *to);
 
     // Add a move which takes place after existing moves in the group.
     bool addAfter(LAllocation *from, LAllocation *to);
 
--- a/js/src/jit/LIR.cpp
+++ b/js/src/jit/LIR.cpp
@@ -13,17 +13,21 @@
 #include "jit/IonSpewer.h"
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
 
 using namespace js;
 using namespace js::jit;
 
 LIRGraph::LIRGraph(MIRGraph *mir)
-  : numVirtualRegisters_(0),
+  : blocks_(mir->alloc()),
+    constantPool_(mir->alloc()),
+    safepoints_(mir->alloc()),
+    nonCallSafepoints_(mir->alloc()),
+    numVirtualRegisters_(0),
     numInstructions_(1), // First id is 1.
     localSlotCount_(0),
     argumentSlotCount_(0),
     entrySnapshot_(nullptr),
     osrBlock_(nullptr),
     mir_(*mir)
 {
 }
@@ -76,32 +80,32 @@ LBlock::lastId()
     LInstruction *last = *instructions_.rbegin();
     JS_ASSERT(last->id());
     if (last->numDefs())
         return last->getDef(last->numDefs() - 1)->virtualRegister();
     return last->id();
 }
 
 LMoveGroup *
-LBlock::getEntryMoveGroup()
+LBlock::getEntryMoveGroup(TempAllocator &alloc)
 {
     if (entryMoveGroup_)
         return entryMoveGroup_;
-    entryMoveGroup_ = new LMoveGroup;
+    entryMoveGroup_ = new LMoveGroup(alloc);
     JS_ASSERT(begin()->isLabel());
     insertAfter(*begin(), entryMoveGroup_);
     return entryMoveGroup_;
 }
 
 LMoveGroup *
-LBlock::getExitMoveGroup()
+LBlock::getExitMoveGroup(TempAllocator &alloc)
 {
     if (exitMoveGroup_)
         return exitMoveGroup_;
-    exitMoveGroup_ = new LMoveGroup;
+    exitMoveGroup_ = new LMoveGroup(alloc);
     insertBefore(*rbegin(), exitMoveGroup_);
     return exitMoveGroup_;
 }
 
 static size_t
 TotalOperandCount(MResumePoint *mir)
 {
     size_t accum = mir->numOperands();
@@ -337,20 +341,20 @@ LInstruction::print(FILE *fp)
             if (i != numTemps() - 1)
                 fprintf(fp, ", ");
         }
         fprintf(fp, ")");
     }
 }
 
 void
-LInstruction::initSafepoint()
+LInstruction::initSafepoint(TempAllocator &alloc)
 {
     JS_ASSERT(!safepoint_);
-    safepoint_ = new LSafepoint();
+    safepoint_ = new LSafepoint(alloc);
     JS_ASSERT(safepoint_);
 }
 
 bool
 LMoveGroup::add(LAllocation *from, LAllocation *to)
 {
 #ifdef DEBUG
     JS_ASSERT(*from != *to);
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -662,17 +662,17 @@ class LInstruction
     void setMir(MDefinition *mir) {
         mir_ = mir;
     }
     MDefinition *mirRaw() const {
         /* Untyped MIR for this op. Prefer mir() methods in subclasses. */
         return mir_;
     }
     void assignSnapshot(LSnapshot *snapshot);
-    void initSafepoint();
+    void initSafepoint(TempAllocator &alloc);
 
     // For an instruction which has a MUST_REUSE_INPUT output, whether that
     // output register will be restored to its original value when bailing out.
     virtual bool recoversInput() const {
         return false;
     }
 
     virtual void print(FILE *fp);
@@ -731,25 +731,26 @@ class LMoveGroup;
 class LBlock : public TempObject
 {
     MBasicBlock *block_;
     Vector<LPhi *, 4, IonAllocPolicy> phis_;
     InlineList<LInstruction> instructions_;
     LMoveGroup *entryMoveGroup_;
     LMoveGroup *exitMoveGroup_;
 
-    LBlock(MBasicBlock *block)
+    LBlock(TempAllocator &alloc, MBasicBlock *block)
       : block_(block),
+        phis_(alloc),
         entryMoveGroup_(nullptr),
         exitMoveGroup_(nullptr)
     { }
 
   public:
-    static LBlock *New(MBasicBlock *from) {
-        return new LBlock(from);
+    static LBlock *New(TempAllocator &alloc, MBasicBlock *from) {
+        return new(alloc) LBlock(alloc, from);
     }
     void add(LInstruction *ins) {
         instructions_.pushBack(ins);
     }
     bool addPhi(LPhi *phi) {
         return phis_.append(phi);
     }
     size_t numPhis() const {
@@ -793,18 +794,18 @@ class LBlock : public TempObject
     }
     void insertBefore(LInstruction *at, LInstruction *ins) {
         JS_ASSERT(!at->isLabel());
         instructions_.insertBefore(at, ins);
     }
     uint32_t firstId();
     uint32_t lastId();
     Label *label();
-    LMoveGroup *getEntryMoveGroup();
-    LMoveGroup *getExitMoveGroup();
+    LMoveGroup *getEntryMoveGroup(TempAllocator &alloc);
+    LMoveGroup *getExitMoveGroup(TempAllocator &alloc);
 };
 
 template <size_t Defs, size_t Operands, size_t Temps>
 class LInstructionHelper : public LInstruction
 {
     mozilla::Array<LDefinition, Defs> defs_;
     mozilla::Array<LAllocation, Operands> operands_;
     mozilla::Array<LDefinition, Temps> temps_;
@@ -1016,22 +1017,26 @@ class LSafepoint : public TempObject
 
     // The subset of liveRegs which contains pointers to slots/elements.
     GeneralRegisterSet slotsOrElementsRegs_;
 
     // List of stack slots which have slots/elements pointers.
     SlotList slotsOrElementsSlots_;
 
   public:
-    LSafepoint()
+    LSafepoint(TempAllocator &alloc)
       : safepointOffset_(INVALID_SAFEPOINT_OFFSET)
       , osiCallPointOffset_(0)
+      , gcSlots_(alloc)
+      , valueSlots_(alloc)
 #ifdef JS_NUNBOX32
+      , nunboxParts_(alloc)
       , partialNunboxes_(0)
 #endif
+      , slotsOrElementsSlots_(alloc)
     { }
     void addLiveRegister(AnyRegister reg) {
         liveRegs_.addUnchecked(reg);
     }
     const RegisterSet &liveRegs() const {
         return liveRegs_;
     }
 #ifdef CHECK_OSIPOINT_REGISTERS
--- a/js/src/jit/LinearScan.cpp
+++ b/js/src/jit/LinearScan.cpp
@@ -240,24 +240,24 @@ LinearScanAllocator::resolveControlFlow(
             for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
                 LBlock *predecessor = mSuccessor->getPredecessor(k)->lir();
                 JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
 
                 LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor());
                 LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId()));
                 JS_ASSERT(from);
 
-                LMoveGroup *moves = predecessor->getExitMoveGroup();
+                LMoveGroup *moves = predecessor->getExitMoveGroup(alloc());
                 if (!addMove(moves, from, to))
                     return false;
             }
 
             if (vreg->mustSpillAtDefinition() && !to->isSpill()) {
                 // Make sure this phi is spilled at the loop header.
-                LMoveGroup *moves = successor->getEntryMoveGroup();
+                LMoveGroup *moves = successor->getEntryMoveGroup(alloc());
                 if (!moves->add(to->getAllocation(), vregs[to->vreg()].canonicalSpill()))
                     return false;
             }
         }
 
         // Resolve split intervals with moves
         BitSet *live = liveIn[mSuccessor->id()];
 
@@ -279,21 +279,21 @@ LinearScanAllocator::resolveControlFlow(
                 if (vreg->mustSpillAtDefinition() && to->getAllocation()->isStackSlot()) {
                     JS_ASSERT(vreg->canonicalSpill());
                     JS_ASSERT(*vreg->canonicalSpill() == *to->getAllocation());
                     continue;
                 }
 
                 if (mSuccessor->numPredecessors() > 1) {
                     JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
-                    LMoveGroup *moves = predecessor->getExitMoveGroup();
+                    LMoveGroup *moves = predecessor->getExitMoveGroup(alloc());
                     if (!addMove(moves, from, to))
                         return false;
                 } else {
-                    LMoveGroup *moves = successor->getEntryMoveGroup();
+                    LMoveGroup *moves = successor->getEntryMoveGroup(alloc());
                     if (!addMove(moves, from, to))
                         return false;
                 }
             }
         }
     }
 
     return true;
@@ -645,17 +645,17 @@ LinearScanAllocator::splitInterval(LiveI
     JS_ASSERT(interval->start() < pos && pos < interval->end());
 
     LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
 
     // "Bogus" intervals cannot be split.
     JS_ASSERT(reg);
 
     // Do the split.
-    LiveInterval *newInterval = new(alloc()) LiveInterval(interval->vreg(), interval->index() + 1);
+    LiveInterval *newInterval = LiveInterval::New(alloc(), interval->vreg(), interval->index() + 1);
     if (!interval->splitFrom(pos, newInterval))
         return false;
 
     JS_ASSERT(interval->numRanges() > 0);
     JS_ASSERT(newInterval->numRanges() > 0);
 
     if (!reg->addInterval(newInterval))
         return false;
--- a/js/src/jit/LinearScan.h
+++ b/js/src/jit/LinearScan.h
@@ -21,16 +21,19 @@ class LinearScanVirtualRegister : public
 
     bool spillAtDefinition_ : 1;
 
     // This bit is used to determine whether both halves of a nunbox have been
     // processed by freeAllocation().
     bool finished_ : 1;
 
   public:
+    LinearScanVirtualRegister(TempAllocator &alloc)
+      : VirtualRegister(alloc)
+    {}
     void setCanonicalSpill(LAllocation *alloc) {
         canonicalSpill_ = alloc;
     }
     LAllocation *canonicalSpill() const {
         return canonicalSpill_;
     }
     unsigned canonicalSpillSlot() const {
         return canonicalSpill_->toStackSlot()->slot();
--- a/js/src/jit/LiveRangeAllocator.cpp
+++ b/js/src/jit/LiveRangeAllocator.cpp
@@ -401,22 +401,22 @@ LiveRangeAllocator<VREG>::init()
 
     liveIn = mir->allocate<BitSet*>(graph.numBlockIds());
     if (!liveIn)
         return false;
 
     // Initialize fixed intervals.
     for (size_t i = 0; i < AnyRegister::Total; i++) {
         AnyRegister reg = AnyRegister::FromCode(i);
-        LiveInterval *interval = new(alloc()) LiveInterval(0);
+        LiveInterval *interval = LiveInterval::New(alloc(), 0);
         interval->setAllocation(LAllocation(reg));
         fixedIntervals[i] = interval;
     }
 
-    fixedIntervalsUnion = new(alloc()) LiveInterval(0);
+    fixedIntervalsUnion = LiveInterval::New(alloc(), 0);
 
     if (!vregs.init(mir, graph.numVirtualRegisters()))
         return false;
 
     // Build virtual register objects
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         if (mir->shouldCancel("Create data structures (main loop)"))
             return false;
--- a/js/src/jit/LiveRangeAllocator.h
+++ b/js/src/jit/LiveRangeAllocator.h
@@ -227,32 +227,40 @@ class LiveInterval
     LiveInterval *spillInterval_;
     uint32_t vreg_;
     uint32_t index_;
     Requirement requirement_;
     Requirement hint_;
     InlineForwardList<UsePosition> uses_;
     size_t lastProcessedRange_;
 
-  public:
-
-    LiveInterval(uint32_t vreg, uint32_t index)
-      : spillInterval_(nullptr),
+    LiveInterval(TempAllocator &alloc, uint32_t vreg, uint32_t index)
+      : ranges_(alloc),
+        spillInterval_(nullptr),
         vreg_(vreg),
         index_(index),
         lastProcessedRange_(size_t(-1))
     { }
 
-    LiveInterval(uint32_t index)
-      : spillInterval_(nullptr),
+    LiveInterval(TempAllocator &alloc, uint32_t index)
+      : ranges_(alloc),
+        spillInterval_(nullptr),
         vreg_(UINT32_MAX),
         index_(index),
         lastProcessedRange_(size_t(-1))
     { }
 
+  public:
+    static LiveInterval *New(TempAllocator &alloc, uint32_t vreg, uint32_t index) {
+        return new(alloc) LiveInterval(alloc, vreg, index);
+    }
+    static LiveInterval *New(TempAllocator &alloc, uint32_t index) {
+        return new(alloc) LiveInterval(alloc, index);
+    }
+
     bool addRange(CodePosition from, CodePosition to);
     bool addRangeAtHead(CodePosition from, CodePosition to);
     void setFrom(CodePosition from);
     CodePosition intersect(LiveInterval *other);
     bool covers(CodePosition pos);
     CodePosition nextCoveredAfter(CodePosition pos);
 
     CodePosition start() const {
@@ -382,26 +390,31 @@ class VirtualRegister
     Vector<LiveInterval *, 1, IonAllocPolicy> intervals_;
 
     // Whether def_ is a temp or an output.
     bool isTemp_ : 1;
 
     void operator=(const VirtualRegister &) MOZ_DELETE;
     VirtualRegister(const VirtualRegister &) MOZ_DELETE;
 
+  protected:
+    VirtualRegister(TempAllocator &alloc)
+      : intervals_(alloc)
+    {}
+
   public:
     bool init(TempAllocator &alloc, LBlock *block, LInstruction *ins, LDefinition *def,
               bool isTemp)
     {
         JS_ASSERT(block && !block_);
         block_ = block;
         ins_ = ins;
         def_ = def;
         isTemp_ = isTemp;
-        LiveInterval *initial = new(alloc) LiveInterval(def->virtualRegister(), 0);
+        LiveInterval *initial = LiveInterval::New(alloc, def->virtualRegister(), 0);
         if (!initial)
             return false;
         return intervals_.append(initial);
     }
     LBlock *block() {
         return block_;
     }
     LInstruction *ins() {
@@ -475,16 +488,19 @@ class VirtualRegisterMap
     { }
 
     bool init(MIRGenerator *gen, uint32_t numVregs) {
         vregs_ = gen->allocate<VREG>(numVregs);
         numVregs_ = numVregs;
         if (!vregs_)
             return false;
         memset(vregs_, 0, sizeof(VREG) * numVregs);
+        TempAllocator &alloc = gen->alloc();
+        for (uint32_t i = 0; i < numVregs; i++)
+            new(&vregs_[i]) VREG(alloc);
         return true;
     }
     VREG &operator[](unsigned int index) {
         JS_ASSERT(index < numVregs_);
         return vregs_[index];
     }
     VREG &operator[](const LAllocation *alloc) {
         JS_ASSERT(alloc->isUse());
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -3540,17 +3540,17 @@ LIRGenerator::precreatePhi(LBlock *block
 bool
 LIRGenerator::generate()
 {
     // Create all blocks and prep all phis beforehand.
     for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
         if (gen->shouldCancel("Lowering (preparation loop)"))
             return false;
 
-        current = LBlock::New(*block);
+        current = LBlock::New(alloc(), *block);
         if (!current)
             return false;
         if (!lirGraph_.addBlock(current))
             return false;
         block->assignLir(current);
 
         // For each MIR phi, add LIR phis as appropriate. We'll fill in their
         // operands on each incoming edge, and set their definitions at the
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -708,17 +708,17 @@ MCompare::NewAsmJS(TempAllocator &alloc,
     comp->operandMightEmulateUndefined_ = false;
     comp->setResultType(MIRType_Int32);
     return comp;
 }
 
 MTableSwitch *
 MTableSwitch::New(TempAllocator &alloc, MDefinition *ins, int32_t low, int32_t high)
 {
-    return new(alloc) MTableSwitch(ins, low, high);
+    return new(alloc) MTableSwitch(alloc, ins, low, high);
 }
 
 MGoto *
 MGoto::New(TempAllocator &alloc, MBasicBlock *target)
 {
     JS_ASSERT(target);
     return new(alloc) MGoto(target);
 }
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -1076,21 +1076,21 @@ class MTableSwitch MOZ_FINAL
 
     // Contains the blocks/cases that still need to get build
     Vector<MBasicBlock*, 0, IonAllocPolicy> blocks_;
 
     MUse operand_;
     int32_t low_;
     int32_t high_;
 
-    MTableSwitch(MDefinition *ins,
+    MTableSwitch(TempAllocator &alloc, MDefinition *ins,
                  int32_t low, int32_t high)
-      : successors_(),
-        cases_(),
-        blocks_(),
+      : successors_(alloc),
+        cases_(alloc),
+        blocks_(alloc),
         low_(low),
         high_(high)
     {
         setOperand(0, ins);
     }
 
   protected:
     void setOperand(size_t index, MDefinition *operand) {
@@ -4342,18 +4342,19 @@ class MPhi MOZ_FINAL : public MDefinitio
     bool canProduceFloat32_;
     bool canConsumeFloat32_;
 
 #if DEBUG
     bool specialized_;
     uint32_t capacity_;
 #endif
 
-    MPhi(uint32_t slot, MIRType resultType)
-      : slot_(slot),
+    MPhi(TempAllocator &alloc, uint32_t slot, MIRType resultType)
+      : inputs_(alloc),
+        slot_(slot),
         hasBackedgeType_(false),
         triedToSpecialize_(false),
         isIterator_(false),
         canProduceFloat32_(false),
         canConsumeFloat32_(false)
 #if DEBUG
         , specialized_(false)
         , capacity_(0)
@@ -4365,17 +4366,17 @@ class MPhi MOZ_FINAL : public MDefinitio
   protected:
     MUse *getUseFor(size_t index) {
         return &inputs_[index];
     }
 
   public:
     INSTRUCTION_HEADER(Phi)
     static MPhi *New(TempAllocator &alloc, uint32_t slot, MIRType resultType = MIRType_Value) {
-        return new(alloc) MPhi(slot, resultType);
+        return new(alloc) MPhi(alloc, slot, resultType);
     }
 
     void setOperand(size_t index, MDefinition *operand) {
         // Note: after the initial IonBuilder pass, it is OK to change phi
         // operands such that they do not include the type sets of their
         // operands. This can arise during e.g. value numbering, where
         // definitions producing the same value may have different type sets.
         JS_ASSERT(index < numOperands());
@@ -6342,18 +6343,18 @@ class InlinePropertyTable : public TempO
         { }
     };
 
     jsbytecode *pc_;
     MResumePoint *priorResumePoint_;
     Vector<Entry *, 4, IonAllocPolicy> entries_;
 
   public:
-    InlinePropertyTable(jsbytecode *pc)
-      : pc_(pc), priorResumePoint_(nullptr), entries_()
+    InlinePropertyTable(TempAllocator &alloc, jsbytecode *pc)
+      : pc_(pc), priorResumePoint_(nullptr), entries_(alloc)
     { }
 
     void setPriorResumePoint(MResumePoint *resumePoint) {
         JS_ASSERT(priorResumePoint_ == nullptr);
         priorResumePoint_ = resumePoint;
     }
 
     MResumePoint *priorResumePoint() const {
@@ -6436,17 +6437,17 @@ class MGetPropertyCache
     INSTRUCTION_HEADER(GetPropertyCache)
 
     static MGetPropertyCache *New(TempAllocator &alloc, MDefinition *obj, PropertyName *name) {
         return new(alloc) MGetPropertyCache(obj, name);
     }
 
     InlinePropertyTable *initInlinePropertyTable(TempAllocator &alloc, jsbytecode *pc) {
         JS_ASSERT(inlinePropertyTable_ == nullptr);
-        inlinePropertyTable_ = new(alloc) InlinePropertyTable(pc);
+        inlinePropertyTable_ = new(alloc) InlinePropertyTable(alloc, pc);
         return inlinePropertyTable_;
     }
 
     void clearInlinePropertyTable() {
         inlinePropertyTable_ = nullptr;
     }
 
     InlinePropertyTable *propTable() const {
@@ -6512,33 +6513,34 @@ class MGetPropertyPolymorphic
 
         // The property to laod.
         Shape *shape;
     };
 
     Vector<Entry, 4, IonAllocPolicy> shapes_;
     CompilerRootPropertyName name_;
 
-    MGetPropertyPolymorphic(MDefinition *obj, PropertyName *name)
+    MGetPropertyPolymorphic(TempAllocator &alloc, MDefinition *obj, PropertyName *name)
       : MUnaryInstruction(obj),
+        shapes_(alloc),
         name_(name)
     {
         setMovable();
         setResultType(MIRType_Value);
     }
 
     PropertyName *name() const {
         return name_;
     }
 
   public:
     INSTRUCTION_HEADER(GetPropertyPolymorphic)
 
     static MGetPropertyPolymorphic *New(TempAllocator &alloc, MDefinition *obj, PropertyName *name) {
-        return new(alloc) MGetPropertyPolymorphic(obj, name);
+        return new(alloc) MGetPropertyPolymorphic(alloc, obj, name);
     }
 
     bool congruentTo(MDefinition *ins) const {
         if (!ins->isGetPropertyPolymorphic())
             return false;
         if (name() != ins->toGetPropertyPolymorphic()->name())
             return false;
         return congruentIfOperandsEqual(ins);
@@ -6584,27 +6586,28 @@ class MSetPropertyPolymorphic
 
         // The property to laod.
         Shape *shape;
     };
 
     Vector<Entry, 4, IonAllocPolicy> shapes_;
     bool needsBarrier_;
 
-    MSetPropertyPolymorphic(MDefinition *obj, MDefinition *value)
+    MSetPropertyPolymorphic(TempAllocator &alloc, MDefinition *obj, MDefinition *value)
       : MBinaryInstruction(obj, value),
+        shapes_(alloc),
         needsBarrier_(false)
     {
     }
 
   public:
     INSTRUCTION_HEADER(SetPropertyPolymorphic)
 
     static MSetPropertyPolymorphic *New(TempAllocator &alloc, MDefinition *obj, MDefinition *value) {
-        return new(alloc) MSetPropertyPolymorphic(obj, value);
+        return new(alloc) MSetPropertyPolymorphic(alloc, obj, value);
     }
 
     TypePolicy *typePolicy() {
         return this;
     }
     bool addShape(Shape *objShape, Shape *shape) {
         Entry entry;
         entry.objShape = objShape;
@@ -6652,18 +6655,18 @@ class MDispatchInstruction
     };
     Vector<Entry, 4, IonAllocPolicy> map_;
 
     // An optional fallback path that uses MCall.
     MBasicBlock *fallback_;
     MUse operand_;
 
   public:
-    MDispatchInstruction(MDefinition *input)
-      : map_(), fallback_(nullptr)
+    MDispatchInstruction(TempAllocator &alloc, MDefinition *input)
+      : map_(alloc), fallback_(nullptr)
     {
         setOperand(0, input);
     }
 
   protected:
     void setOperand(size_t index, MDefinition *operand) MOZ_FINAL MOZ_OVERRIDE {
         JS_ASSERT(index == 0);
         operand_.set(operand, this, 0);
@@ -6738,47 +6741,47 @@ class MDispatchInstruction
 };
 
 // Polymorphic dispatch for inlining, keyed off incoming TypeObject.
 class MTypeObjectDispatch : public MDispatchInstruction
 {
     // Map TypeObject (of CallProp's Target Object) -> JSFunction (yielded by the CallProp).
     InlinePropertyTable *inlinePropertyTable_;
 
-    MTypeObjectDispatch(MDefinition *input, InlinePropertyTable *table)
-      : MDispatchInstruction(input),
+    MTypeObjectDispatch(TempAllocator &alloc, MDefinition *input, InlinePropertyTable *table)
+      : MDispatchInstruction(alloc, input),
         inlinePropertyTable_(table)
     { }
 
   public:
     INSTRUCTION_HEADER(TypeObjectDispatch)
 
     static MTypeObjectDispatch *New(TempAllocator &alloc, MDefinition *ins,
                                     InlinePropertyTable *table)
     {
-        return new(alloc) MTypeObjectDispatch(ins, table);
+        return new(alloc) MTypeObjectDispatch(alloc, ins, table);
     }
 
     InlinePropertyTable *propTable() const {
         return inlinePropertyTable_;
     }
 };
 
 // Polymorphic dispatch for inlining, keyed off incoming JSFunction*.
 class MFunctionDispatch : public MDispatchInstruction
 {
-    MFunctionDispatch(MDefinition *input)
-      : MDispatchInstruction(input)
+    MFunctionDispatch(TempAllocator &alloc, MDefinition *input)
+      : MDispatchInstruction(alloc, input)
     { }
 
   public:
     INSTRUCTION_HEADER(FunctionDispatch)
 
     static MFunctionDispatch *New(TempAllocator &alloc, MDefinition *ins) {
-        return new(alloc) MFunctionDispatch(ins);
+        return new(alloc) MFunctionDispatch(alloc, ins);
     }
 };
 
 class MGetElementCache
   : public MBinaryInstruction
 {
     MixPolicy<ObjectPolicy<0>, BoxPolicy<1> > PolicyV;
     MixPolicy<ObjectPolicy<0>, IntPolicy<1> > PolicyT;
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -21,16 +21,18 @@ MIRGenerator::MIRGenerator(JSCompartment
   : compartment(compartment),
     info_(info),
     alloc_(alloc),
     graph_(graph),
     error_(false),
     cancelBuild_(0),
     maxAsmJSStackArgBytes_(0),
     performsAsmJSCall_(false),
+    asmJSHeapAccesses_(*alloc),
+    asmJSGlobalAccesses_(*alloc),
     minAsmJSHeapLength_(AsmJSAllocationGranularity)
 { }
 
 bool
 MIRGenerator::abortFmt(const char *message, va_list ap)
 {
     IonSpewVA(IonSpew_Abort, message, ap);
     error_ = true;
@@ -268,30 +270,32 @@ MBasicBlock::NewAsmJS(MIRGraph &graph, C
         if (!block->predecessors_.append(pred))
             return nullptr;
     }
 
     return block;
 }
 
 MBasicBlock::MBasicBlock(MIRGraph &graph, CompileInfo &info, jsbytecode *pc, Kind kind)
-    : earlyAbort_(false),
+  : earlyAbort_(false),
     graph_(graph),
     info_(info),
+    predecessors_(graph.alloc()),
     stackPosition_(info_.firstStackSlot()),
     lastIns_(nullptr),
     pc_(pc),
     lir_(nullptr),
     start_(nullptr),
     entryResumePoint_(nullptr),
     successorWithPhis_(nullptr),
     positionInPhiSuccessor_(0),
     kind_(kind),
     loopDepth_(0),
     mark_(false),
+    immediatelyDominated_(graph.alloc()),
     immediateDominator_(nullptr),
     numDominated_(0),
     loopHeader_(nullptr),
     trackedPc_(pc)
 #if defined (JS_ION_PERF)
     , lineno_(0u),
     columnIndex_(0u)
 #endif
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -1461,17 +1461,17 @@ MRandom::computeRange()
 
 ///////////////////////////////////////////////////////////////////////////////
 // Range Analysis
 ///////////////////////////////////////////////////////////////////////////////
 
 bool
 RangeAnalysis::markBlocksInLoopBody(MBasicBlock *header, MBasicBlock *backedge)
 {
-    Vector<MBasicBlock *, 16, IonAllocPolicy> worklist;
+    Vector<MBasicBlock *, 16, IonAllocPolicy> worklist(alloc());
 
     // Mark the header as being in the loop. This terminates the walk.
     header->mark();
 
     backedge->mark();
     if (!worklist.append(backedge))
         return false;
 
@@ -1554,17 +1554,17 @@ RangeAnalysis::analyzeLoop(MBasicBlock *
     // loop, expressed in terms of the iteration bound just computed.
 
     for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++)
         analyzeLoopPhi(header, iterationBound, *iter);
 
     if (!mir->compilingAsmJS()) {
         // Try to hoist any bounds checks from the loop using symbolic bounds.
 
-        Vector<MBoundsCheck *, 0, IonAllocPolicy> hoistedChecks;
+        Vector<MBoundsCheck *, 0, IonAllocPolicy> hoistedChecks(alloc());
 
         for (ReversePostorderIterator iter(graph_.rpoBegin(header)); iter != graph_.rpoEnd(); iter++) {
             MBasicBlock *block = *iter;
             if (!block->isMarked())
                 continue;
 
             for (MDefinitionIterator iter(block); iter; iter++) {
                 MDefinition *def = *iter;
@@ -1657,17 +1657,17 @@ RangeAnalysis::analyzeLoopIterationCount
     // of the iteration, and not that written to lhs in a previous iteration,
     // as such a previous value could not appear directly in the addition:
     // it could not be stored in lhs as the lhs add/sub executes in every
     // iteration, and if it were stored in another variable its use here would
     // be as an operand to a phi node for that variable.
     if (lhsModified.term != lhs.term)
         return nullptr;
 
-    LinearSum bound;
+    LinearSum bound(alloc());
 
     if (lhsModified.constant == 1 && !lessEqual) {
         // The value of lhs is 'initial(lhs) + iterCount' and this will end
         // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
         // on the number of backedges executed is:
         //
         // initial(lhs) + iterCount + lhsN == rhs
         // iterCount == rhsN - initial(lhs) - lhsN
@@ -1731,17 +1731,17 @@ RangeAnalysis::analyzeLoopPhi(MBasicBloc
     SimpleLinearSum modified = ExtractLinearSum(phi->getOperand(backedge->positionInPhiSuccessor()));
 
     if (modified.term != phi || modified.constant == 0)
         return;
 
     if (!phi->range())
         phi->setRange(new Range());
 
-    LinearSum initialSum;
+    LinearSum initialSum(alloc());
     if (!initialSum.add(initial, 1))
         return;
 
     // The phi may change by N each iteration, and is either nondecreasing or
     // nonincreasing. initial(phi) is either a lower or upper bound for the
     // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
     // at all points within the loop, provided that loopBound >= 0.
     //
--- a/js/src/jit/RegisterAllocator.cpp
+++ b/js/src/jit/RegisterAllocator.cpp
@@ -482,33 +482,33 @@ RegisterAllocator::getInputMoveGroup(uin
 {
     InstructionData *data = &insData[ins];
     JS_ASSERT(!data->ins()->isPhi());
     JS_ASSERT(!data->ins()->isLabel());
 
     if (data->inputMoves())
         return data->inputMoves();
 
-    LMoveGroup *moves = new LMoveGroup;
+    LMoveGroup *moves = new LMoveGroup(alloc());
     data->setInputMoves(moves);
     data->block()->insertBefore(data->ins(), moves);
 
     return moves;
 }
 
 LMoveGroup *
 RegisterAllocator::getMoveGroupAfter(uint32_t ins)
 {
     InstructionData *data = &insData[ins];
     JS_ASSERT(!data->ins()->isPhi());
 
     if (data->movesAfter())
         return data->movesAfter();
 
-    LMoveGroup *moves = new LMoveGroup;
+    LMoveGroup *moves = new LMoveGroup(alloc());
     data->setMovesAfter(moves);
 
     if (data->ins()->isLabel())
-        data->block()->insertAfter(data->block()->getEntryMoveGroup(), moves);
+        data->block()->insertAfter(data->block()->getEntryMoveGroup(alloc()), moves);
     else
         data->block()->insertAfter(data->ins(), moves);
     return moves;
 }
--- a/js/src/jit/StupidAllocator.cpp
+++ b/js/src/jit/StupidAllocator.cpp
@@ -300,17 +300,17 @@ StupidAllocator::syncForBlockEnd(LBlock 
 
             if (!group) {
                 // The moves we insert here need to happen simultaneously with
                 // each other, yet after any existing moves before the instruction.
                 LMoveGroup *input = getInputMoveGroup(ins->id());
                 if (input->numMoves() == 0) {
                     group = input;
                 } else {
-                    group = new LMoveGroup;
+                    group = new LMoveGroup(alloc());
                     block->insertAfter(input, group);
                 }
             }
 
             group->add(source, dest);
         }
     }
 }
--- a/js/src/jit/ValueNumbering.cpp
+++ b/js/src/jit/ValueNumbering.cpp
@@ -11,16 +11,17 @@
 #include "jit/MIRGraph.h"
 
 using namespace js;
 using namespace js::jit;
 
 ValueNumberer::ValueNumberer(MIRGenerator *mir, MIRGraph &graph, bool optimistic)
   : mir(mir),
     graph_(graph),
+    values(graph.alloc()),
     pessimisticPass_(!optimistic),
     count_(0)
 { }
 
 TempAllocator &
 ValueNumberer::alloc() const
 {
     return graph_.alloc();
@@ -335,25 +336,25 @@ ValueNumberer::eliminateRedundancies()
     // If there is a definition d' in the hashmap, and the current traversal
     // index is within that instruction's dominated range, then we eliminate d,
     // replacing all uses of d with uses of d'.
     //
     // If there is no valid definition in the hashtable (the current definition
     // is not in dominated scope), then we insert the current instruction,
     // since it is the most dominant instruction with the given value number.
 
-    InstructionMap defs;
+    InstructionMap defs(alloc());
 
     if (!defs.init())
         return false;
 
     IonSpew(IonSpew_GVN, "Eliminating redundant instructions");
 
     // Stack for pre-order CFG traversal.
-    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;
+    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist(alloc());
 
     // The index of the current block in the CFG traversal.
     size_t index = 0;
 
     // Add all self-dominating blocks to the worklist.
     // This includes all roots. Order does not matter.
     for (MBasicBlockIterator i(graph_.begin()); i != graph_.end(); i++) {
         MBasicBlock *block = *i;
--- a/js/src/jit/arm/BaselineCompiler-arm.cpp
+++ b/js/src/jit/arm/BaselineCompiler-arm.cpp
@@ -4,12 +4,12 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/arm/BaselineCompiler-arm.h"
 
 using namespace js;
 using namespace js::jit;
 
-BaselineCompilerARM::BaselineCompilerARM(JSContext *cx, HandleScript script)
-  : BaselineCompilerShared(cx, script)
+BaselineCompilerARM::BaselineCompilerARM(JSContext *cx, TempAllocator &alloc, HandleScript script)
+  : BaselineCompilerShared(cx, alloc, script)
 {
 }
--- a/js/src/jit/arm/BaselineCompiler-arm.h
+++ b/js/src/jit/arm/BaselineCompiler-arm.h
@@ -10,17 +10,17 @@
 #include "jit/shared/BaselineCompiler-shared.h"
 
 namespace js {
 namespace jit {
 
 class BaselineCompilerARM : public BaselineCompilerShared
 {
   protected:
-    BaselineCompilerARM(JSContext *cx, HandleScript script);
+    BaselineCompilerARM(JSContext *cx, TempAllocator &alloc, HandleScript script);
 };
 
 typedef BaselineCompilerARM BaselineCompilerSpecific;
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_arm_BaselineCompiler_arm_h */
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -1039,18 +1039,19 @@ class js::jit::OutOfLineTableSwitch : pu
     MTableSwitch *mir_;
     Vector<CodeLabel, 8, IonAllocPolicy> codeLabels_;
 
     bool accept(CodeGeneratorARM *codegen) {
         return codegen->visitOutOfLineTableSwitch(this);
     }
 
   public:
-    OutOfLineTableSwitch(MTableSwitch *mir)
-      : mir_(mir)
+    OutOfLineTableSwitch(TempAllocator &alloc, MTableSwitch *mir)
+      : mir_(mir),
+        codeLabels_(alloc)
     {}
 
     MTableSwitch *mir() const {
         return mir_;
     }
 
     bool addCodeLabel(CodeLabel label) {
         return codeLabels_.append(label);
@@ -1120,17 +1121,17 @@ CodeGeneratorARM::emitTableSwitchDispatc
     masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::Unsigned);
     AutoForbidPools afp(&masm);
     masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::Unsigned);
     masm.ma_b(defaultcase);
 
     // To fill in the CodeLabels for the case entries, we need to first
     // generate the case entries (we don't yet know their offsets in the
     // instruction stream).
-    OutOfLineTableSwitch *ool = new OutOfLineTableSwitch(mir);
+    OutOfLineTableSwitch *ool = new OutOfLineTableSwitch(alloc(), mir);
     for (int32_t i = 0; i < cases; i++) {
         CodeLabel cl;
         masm.writeCodePointer(cl.dest());
         if (!ool->addCodeLabel(cl))
             return false;
     }
     if (!addOutOfLineCode(ool))
         return false;
--- a/js/src/jit/shared/BaselineCompiler-shared.cpp
+++ b/js/src/jit/shared/BaselineCompiler-shared.cpp
@@ -7,24 +7,25 @@
 #include "jit/shared/BaselineCompiler-shared.h"
 
 #include "jit/BaselineIC.h"
 #include "jit/VMFunctions.h"
 
 using namespace js;
 using namespace js::jit;
 
-BaselineCompilerShared::BaselineCompilerShared(JSContext *cx, HandleScript script)
+BaselineCompilerShared::BaselineCompilerShared(JSContext *cx, TempAllocator &alloc, HandleScript script)
   : cx(cx),
     script(cx, script),
     pc(script->code),
     ionCompileable_(jit::IsIonEnabled(cx) && CanIonCompileScript(cx, script, false)),
     ionOSRCompileable_(jit::IsIonEnabled(cx) && CanIonCompileScript(cx, script, true)),
     debugMode_(cx->compartment()->debugMode()),
-    analysis_(script),
+    alloc_(alloc),
+    analysis_(alloc, script),
     frame(cx, script, masm),
     stubSpace_(),
     icEntries_(),
     pcMappingEntries_(),
     icLoadLabels_(),
     pushedBeforeCall_(0),
     inCall_(false),
     spsPushToggleOffset_()
--- a/js/src/jit/shared/BaselineCompiler-shared.h
+++ b/js/src/jit/shared/BaselineCompiler-shared.h
@@ -21,16 +21,17 @@ class BaselineCompilerShared
     JSContext *cx;
     RootedScript script;
     jsbytecode *pc;
     MacroAssembler masm;
     bool ionCompileable_;
     bool ionOSRCompileable_;
     bool debugMode_;
 
+    TempAllocator &alloc_;
     BytecodeAnalysis analysis_;
     FrameInfo frame;
 
     FallbackICStubSpace stubSpace_;
     js::Vector<ICEntry, 16, SystemAllocPolicy> icEntries_;
 
     // Stores the native code offset for a bytecode pc.
     struct PCMappingEntry
@@ -63,17 +64,17 @@ class BaselineCompilerShared
     };
     js::Vector<ICLoadLabel, 16, SystemAllocPolicy> icLoadLabels_;
 
     uint32_t pushedBeforeCall_;
     mozilla::DebugOnly<bool> inCall_;
 
     CodeOffsetLabel spsPushToggleOffset_;
 
-    BaselineCompilerShared(JSContext *cx, HandleScript script);
+    BaselineCompilerShared(JSContext *cx, TempAllocator &alloc, HandleScript script);
 
     ICEntry *allocateICEntry(ICStub *stub, bool isForOp) {
         if (!stub)
             return nullptr;
 
         // Create the entry and add it to the vector.
         if (!icEntries_.append(ICEntry((uint32_t) (pc - script->code), isForOp)))
             return nullptr;
--- a/js/src/jit/shared/BaselineCompiler-x86-shared.cpp
+++ b/js/src/jit/shared/BaselineCompiler-x86-shared.cpp
@@ -4,12 +4,12 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/shared/BaselineCompiler-x86-shared.h"
 
 using namespace js;
 using namespace js::jit;
 
-BaselineCompilerX86Shared::BaselineCompilerX86Shared(JSContext *cx, HandleScript script)
-  : BaselineCompilerShared(cx, script)
+BaselineCompilerX86Shared::BaselineCompilerX86Shared(JSContext *cx, TempAllocator &alloc, HandleScript script)
+  : BaselineCompilerShared(cx, alloc, script)
 {
 }
--- a/js/src/jit/shared/BaselineCompiler-x86-shared.h
+++ b/js/src/jit/shared/BaselineCompiler-x86-shared.h
@@ -10,15 +10,15 @@
 #include "jit/shared/BaselineCompiler-shared.h"
 
 namespace js {
 namespace jit {
 
 class BaselineCompilerX86Shared : public BaselineCompilerShared
 {
   protected:
-    BaselineCompilerX86Shared(JSContext *cx, HandleScript script);
+    BaselineCompilerX86Shared(JSContext *cx, TempAllocator &alloc, HandleScript script);
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_shared_BaselineCompiler_x86_shared_h */
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -93,16 +93,20 @@ class CodeGeneratorShared : public LInst
     // scripts) and when instrumentation needs to be emitted or skipped.
     IonInstrumentation sps_;
 
   protected:
     // The offset of the first instruction of the OSR entry block from the
     // beginning of the code buffer.
     size_t osrEntryOffset_;
 
+    TempAllocator &alloc() const {
+        return graph.mir().alloc();
+    }
+
     inline void setOsrEntryOffset(size_t offset) {
         JS_ASSERT(osrEntryOffset_ == 0);
         osrEntryOffset_ = offset;
     }
     inline size_t getOsrEntryOffset() const {
         return osrEntryOffset_;
     }
 
--- a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
+++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
@@ -10,20 +10,20 @@
 #include "mozilla/DebugOnly.h"
 
 #include "assembler/wtf/SegmentedVector.h"
 #include "jit/IonSpewer.h"
 #include "jit/shared/IonAssemblerBuffer.h"
 
 namespace js {
 namespace jit {
-typedef Vector<BufferOffset, 512, IonAllocPolicy> LoadOffsets;
+typedef Vector<BufferOffset, 512, OldIonAllocPolicy> LoadOffsets;
 
 struct Pool
-  : public IonAllocPolicy
+  : public OldIonAllocPolicy
 {
     const int maxOffset;
     const int immSize;
     const int instSize;
     const int bias;
 
   private:
     const int alignment;
--- a/js/src/jit/shared/Lowering-shared.cpp
+++ b/js/src/jit/shared/Lowering-shared.cpp
@@ -178,17 +178,17 @@ LIRGeneratorShared::assignSnapshot(LInst
 }
 
 bool
 LIRGeneratorShared::assignSafepoint(LInstruction *ins, MInstruction *mir)
 {
     JS_ASSERT(!osiPoint_);
     JS_ASSERT(!ins->safepoint());
 
-    ins->initSafepoint();
+    ins->initSafepoint(alloc());
 
     MResumePoint *mrp = mir->resumePoint() ? mir->resumePoint() : lastResumePoint_;
     LSnapshot *postSnapshot = buildSnapshot(ins, mrp, Bailout_Normal);
     if (!postSnapshot)
         return false;
 
     osiPoint_ = new LOsiPoint(ins->safepoint(), postSnapshot);
 
--- a/js/src/jit/x64/BaselineCompiler-x64.cpp
+++ b/js/src/jit/x64/BaselineCompiler-x64.cpp
@@ -4,12 +4,12 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/x64/BaselineCompiler-x64.h"
 
 using namespace js;
 using namespace js::jit;
 
-BaselineCompilerX64::BaselineCompilerX64(JSContext *cx, HandleScript script)
-  : BaselineCompilerX86Shared(cx, script)
+BaselineCompilerX64::BaselineCompilerX64(JSContext *cx, TempAllocator &alloc, HandleScript script)
+  : BaselineCompilerX86Shared(cx, alloc, script)
 {
 }
--- a/js/src/jit/x64/BaselineCompiler-x64.h
+++ b/js/src/jit/x64/BaselineCompiler-x64.h
@@ -10,17 +10,17 @@
 #include "jit/shared/BaselineCompiler-x86-shared.h"
 
 namespace js {
 namespace jit {
 
 class BaselineCompilerX64 : public BaselineCompilerX86Shared
 {
   protected:
-    BaselineCompilerX64(JSContext *cx, HandleScript script);
+    BaselineCompilerX64(JSContext *cx, TempAllocator &alloc, HandleScript script);
 };
 
 typedef BaselineCompilerX64 BaselineCompilerSpecific;
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_x64_BaselineCompiler_x64_h */
--- a/js/src/jit/x86/BaselineCompiler-x86.cpp
+++ b/js/src/jit/x86/BaselineCompiler-x86.cpp
@@ -4,12 +4,12 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/x86/BaselineCompiler-x86.h"
 
 using namespace js;
 using namespace js::jit;
 
-BaselineCompilerX86::BaselineCompilerX86(JSContext *cx, HandleScript script)
-  : BaselineCompilerX86Shared(cx, script)
+BaselineCompilerX86::BaselineCompilerX86(JSContext *cx, TempAllocator &alloc, HandleScript script)
+  : BaselineCompilerX86Shared(cx, alloc, script)
 {
 }
--- a/js/src/jit/x86/BaselineCompiler-x86.h
+++ b/js/src/jit/x86/BaselineCompiler-x86.h
@@ -10,17 +10,17 @@
 #include "jit/shared/BaselineCompiler-x86-shared.h"
 
 namespace js {
 namespace jit {
 
 class BaselineCompilerX86 : public BaselineCompilerX86Shared
 {
   protected:
-    BaselineCompilerX86(JSContext *cx, HandleScript script);
+    BaselineCompilerX86(JSContext *cx, TempAllocator &alloc, HandleScript script);
 };
 
 typedef BaselineCompilerX86 BaselineCompilerSpecific;
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_x86_BaselineCompiler_x86_h */
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -597,18 +597,20 @@ class types::CompilerConstraintList
     // Scripts whose stack type sets were frozen for the compilation.
     Vector<FrozenScript, 1, jit::IonAllocPolicy> frozenScripts;
 #endif
 
     // OOM during generation of some constraint.
     bool failed_;
 
   public:
-    CompilerConstraintList()
-      : failed_(false)
+    CompilerConstraintList(jit::TempAllocator &alloc)
+      : constraints(alloc),
+        frozenScripts(alloc),
+        failed_(false)
     {}
 
     void add(CompilerConstraint *constraint) {
 #ifdef JS_ION
         if (!constraint || !constraints.append(constraint))
             setFailed();
 #else
         MOZ_CRASH();
@@ -669,19 +671,19 @@ class types::CompilerConstraintList
         return failed_;
     }
     void setFailed() {
         failed_ = true;
     }
 };
 
 CompilerConstraintList *
-types::NewCompilerConstraintList()
+types::NewCompilerConstraintList(jit::TempAllocator &alloc)
 {
-    return IonAlloc()->new_<CompilerConstraintList>();
+    return IonAlloc()->new_<CompilerConstraintList>(alloc);
 }
 
 /* static */ bool
 TypeScript::FreezeTypeSets(CompilerConstraintList *constraints, JSScript *script,
                            TemporaryTypeSet **pThisTypes,
                            TemporaryTypeSet **pArgTypes,
                            TemporaryTypeSet **pBytecodeTypes)
 {
--- a/js/src/jsinfer.h
+++ b/js/src/jsinfer.h
@@ -163,16 +163,17 @@ template <> struct ExecutionModeTraits<P
     typedef ForkJoinSlice * ExclusiveContextType;
 
     static inline ForkJoinSlice *toContextType(ForkJoinSlice *cx) { return cx; }
 };
 
 namespace jit {
     struct IonScript;
     class IonAllocPolicy;
+    class TempAllocator;
 }
 
 namespace analyze {
     class ScriptAnalysis;
 }
 
 namespace types {
 
@@ -613,17 +614,17 @@ class HeapTypeSet : public ConstraintTyp
   public:
     /* Mark this type set as representing a configured property. */
     inline void setConfiguredProperty(ExclusiveContext *cx);
 };
 
 class CompilerConstraintList;
 
 CompilerConstraintList *
-NewCompilerConstraintList();
+NewCompilerConstraintList(jit::TempAllocator &alloc);
 
 class TemporaryTypeSet : public TypeSet
 {
   public:
     TemporaryTypeSet() {}
     TemporaryTypeSet(Type type);
 
     TemporaryTypeSet(uint32_t flags, TypeObjectKey **objectSet) {