Bug 937540 part 7 - Use placement new syntax for range analysis and some other classes. r=nbp
authorJan de Mooij <jdemooij@mozilla.com>
Thu, 05 Dec 2013 13:32:13 +0100
changeset 158922 efaee7511571ee5a43daf68caa749d5c181f51e1
parent 158921 3d1b55d822b2eb6884927172638391fa8d957b54
child 158923 ea36b327a1ee18c08613d1fe36cc6b8d18c8e47f
push id25767
push userryanvm@gmail.com
push dateThu, 05 Dec 2013 22:29:08 +0000
treeherdermozilla-central@ee425b3ccc29 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnbp
bugs937540
milestone28.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 937540 part 7 - Use placement new syntax for range analysis and some other classes. r=nbp
js/src/jit/IonAllocPolicy.h
js/src/jit/IonBuilder.cpp
js/src/jit/LIR.cpp
js/src/jit/LIR.h
js/src/jit/LinearScan.cpp
js/src/jit/MCallOptimize.cpp
js/src/jit/MIR.h
js/src/jit/MoveResolver.h
js/src/jit/ParallelSafetyAnalysis.cpp
js/src/jit/RangeAnalysis.cpp
js/src/jit/RangeAnalysis.h
js/src/jit/StupidAllocator.cpp
js/src/jit/shared/CodeGenerator-shared.h
--- a/js/src/jit/IonAllocPolicy.h
+++ b/js/src/jit/IonAllocPolicy.h
@@ -160,30 +160,39 @@ class AutoIonContextAlloc
     ~AutoIonContextAlloc() {
         JS_ASSERT(icx_->temp == &tempAlloc_);
         icx_->temp = prevAlloc_;
     }
 };
 
 struct TempObject
 {
-    inline void *operator new(size_t nbytes) {
-        return GetIonContext()->temp->allocateInfallible(nbytes);
-    }
     inline void *operator new(size_t nbytes, TempAllocator &alloc) {
         return alloc.allocateInfallible(nbytes);
     }
     template <class T>
     inline void *operator new(size_t nbytes, T *pos) {
         static_assert(mozilla::IsConvertible<T*, TempObject*>::value,
                       "Placement new argument type must inherit from TempObject");
         return pos;
     }
 };
 
+// Deprecated, don't use for (new) classes. Will be removed when all classes have
+// been converted to placement new/TempObject (bug 937540).
+struct OldTempObject
+  : public TempObject
+{
+    using TempObject::operator new;
+
+    inline void *operator new(size_t nbytes) {
+        return GetIonContext()->temp->allocateInfallible(nbytes);
+    }
+};
+
 template <typename T>
 class TempObjectPool
 {
     InlineForwardList<T> freed_;
 
   public:
     T *allocate() {
         if (freed_.empty())
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -661,17 +661,17 @@ IonBuilder::build()
     current->makeStart(MStart::New(alloc(), MStart::StartType_Default));
     if (instrumentedProfiling())
         current->add(MFunctionBoundary::New(alloc(), script(), MFunctionBoundary::Enter));
 
     // Guard against over-recursion. Do this before we start unboxing, since
     // this will create an OSI point that will read the incoming argument
     // values, which is nice to do before their last real use, to minimize
     // register/stack pressure.
-    MCheckOverRecursed *check = new MCheckOverRecursed;
+    MCheckOverRecursed *check = MCheckOverRecursed::New(alloc());
     current->add(check);
     check->setResumePoint(current->entryResumePoint());
 
     // Parameters have been checked to correspond to the typeset, now we unbox
     // what we can in an infallible manner.
     rewriteParameters();
 
     // It's safe to start emitting actual IR, so now build the scope chain.
@@ -2440,27 +2440,27 @@ IonBuilder::processBreak(JSOp op, jssrcn
     jsbytecode *target = pc + GetJumpOffset(pc);
     DebugOnly<bool> found = false;
 
     if (SN_TYPE(sn) == SRC_BREAK2LABEL) {
         for (size_t i = labels_.length() - 1; i < labels_.length(); i--) {
             CFGState &cfg = cfgStack_[labels_[i].cfgEntry];
             JS_ASSERT(cfg.state == CFGState::LABEL);
             if (cfg.stopAt == target) {
-                cfg.label.breaks = new DeferredEdge(current, cfg.label.breaks);
+                cfg.label.breaks = new(alloc()) DeferredEdge(current, cfg.label.breaks);
                 found = true;
                 break;
             }
         }
     } else {
         for (size_t i = loops_.length() - 1; i < loops_.length(); i--) {
             CFGState &cfg = cfgStack_[loops_[i].cfgEntry];
             JS_ASSERT(cfg.isLoop());
             if (cfg.loop.exitpc == target) {
-                cfg.loop.breaks = new DeferredEdge(current, cfg.loop.breaks);
+                cfg.loop.breaks = new(alloc()) DeferredEdge(current, cfg.loop.breaks);
                 found = true;
                 break;
             }
         }
     }
 
     JS_ASSERT(found);
 
@@ -2494,17 +2494,17 @@ IonBuilder::processContinue(JSOp op)
         }
     }
 
     // There must always be a valid target loop structure. If not, there's
     // probably an off-by-something error in which pc we track.
     JS_ASSERT(found);
     CFGState &state = *found;
 
-    state.loop.continues = new DeferredEdge(current, state.loop.continues);
+    state.loop.continues = new(alloc()) DeferredEdge(current, state.loop.continues);
 
     setCurrent(nullptr);
     pc += js_CodeSpec[op].length;
     return processControlEnd();
 }
 
 IonBuilder::ControlStatus
 IonBuilder::processSwitchBreak(JSOp op)
@@ -2533,17 +2533,17 @@ IonBuilder::processSwitchBreak(JSOp op)
         break;
       case CFGState::COND_SWITCH_BODY:
         breaks = &state.condswitch.breaks;
         break;
       default:
         MOZ_ASSUME_UNREACHABLE("Unexpected switch state.");
     }
 
-    *breaks = new DeferredEdge(current, *breaks);
+    *breaks = new(alloc()) DeferredEdge(current, *breaks);
 
     setCurrent(nullptr);
     pc += js_CodeSpec[op].length;
     return processControlEnd();
 }
 
 IonBuilder::ControlStatus
 IonBuilder::processSwitchEnd(DeferredEdge *breaks, jsbytecode *exitpc)
@@ -5205,17 +5205,17 @@ IonBuilder::makeCallHelper(JSFunction *t
         JS_ASSERT(callInfo.getArg(i)->isPassArg());
         call->addArg(i + 1, callInfo.getArg(i)->toPassArg());
     }
 
     // Place an MPrepareCall before the first passed argument, before we
     // potentially perform rearrangement.
     JS_ASSERT(callInfo.thisArg()->isPassArg());
     MPassArg *thisArg = callInfo.thisArg()->toPassArg();
-    MPrepareCall *start = new MPrepareCall;
+    MPrepareCall *start = MPrepareCall::New(alloc());
     thisArg->block()->insertBefore(thisArg, start);
     call->initPrepareCall(start);
 
     // Inline the constructor on the caller-side.
     if (callInfo.constructing()) {
         MDefinition *create = createThis(target, callInfo.fun());
         if (!create) {
             abort("Failure inlining constructor for call.");
@@ -6721,20 +6721,21 @@ IonBuilder::getElemTryComplexElemOfTyped
                                         MMul::Integer);
     current->add(indexAsByteOffset);
 
     // Find location within the owner object.
     MDefinition *owner, *ownerOffset;
     loadTypedObjectData(obj, indexAsByteOffset, &owner, &ownerOffset);
 
     // Create the derived type object.
-    MInstruction *derived = new MNewDerivedTypedObject(elemTypeReprs,
-                                                       elemType,
-                                                       owner,
-                                                       ownerOffset);
+    MInstruction *derived = MNewDerivedTypedObject::New(alloc(),
+                                                        elemTypeReprs,
+                                                        elemType,
+                                                        owner,
+                                                        ownerOffset);
 
     types::TemporaryTypeSet *resultTypes = bytecodeTypes(pc);
     derived->setResultTypeSet(resultTypes);
     current->add(derived);
     current->push(derived);
 
     return true;
 }
@@ -7696,17 +7697,17 @@ IonBuilder::jsop_length_fastPath()
             objTypes->getKnownClass() == &ArrayObject::class_ &&
             !objTypes->hasObjectFlags(constraints(), types::OBJECT_FLAG_LENGTH_OVERFLOW))
         {
             current->pop();
             MElements *elements = MElements::New(alloc(), obj);
             current->add(elements);
 
             // Read length.
-            MArrayLength *length = new MArrayLength(elements);
+            MArrayLength *length = MArrayLength::New(alloc(), elements);
             current->add(length);
             current->push(length);
             return true;
         }
 
         if (objTypes && objTypes->getTypedArrayType() != ScalarTypeRepresentation::TYPE_MAX) {
             current->pop();
             MInstruction *length = getTypedArrayLength(obj);
@@ -7844,17 +7845,17 @@ IonBuilder::jsop_runonce()
     return resumeAfter(ins);
 }
 
 bool
 IonBuilder::jsop_not()
 {
     MDefinition *value = current->pop();
 
-    MNot *ins = new MNot(value);
+    MNot *ins = MNot::New(alloc(), value);
     current->add(ins);
     current->push(ins);
     ins->infer();
     return true;
 }
 
 bool
 IonBuilder::objectsHaveCommonPrototype(types::TemporaryTypeSet *types, PropertyName *name,
@@ -8357,20 +8358,21 @@ IonBuilder::getPropTryComplexPropOfTyped
     MDefinition *fieldType = typeObjectForFieldFromStructType(type, fieldIndex);
 
     // Find location within the owner object.
     MDefinition *owner, *ownerOffset;
     loadTypedObjectData(typedObj, constantInt(fieldOffset),
                         &owner, &ownerOffset);
 
     // Create the derived type object.
-    MInstruction *derived = new MNewDerivedTypedObject(fieldTypeReprs,
-                                                       fieldType,
-                                                       owner,
-                                                       ownerOffset);
+    MInstruction *derived = MNewDerivedTypedObject::New(alloc(),
+                                                        fieldTypeReprs,
+                                                        fieldType,
+                                                        owner,
+                                                        ownerOffset);
     derived->setResultTypeSet(resultTypes);
     current->add(derived);
     current->push(derived);
     *emitted = true;
     return true;
 }
 
 bool
@@ -9490,17 +9492,17 @@ IonBuilder::jsop_in()
     if (ElementAccessIsDenseNative(obj, id) &&
         !ElementAccessHasExtraIndexedProperty(constraints(), obj))
     {
         return jsop_in_dense();
     }
 
     current->pop();
     current->pop();
-    MIn *ins = new MIn(id, obj);
+    MIn *ins = MIn::New(alloc(), id, obj);
 
     current->add(ins);
     current->push(ins);
 
     return resumeAfter(ins);
 }
 
 bool
@@ -9553,25 +9555,25 @@ IonBuilder::jsop_instanceof()
         types::HeapTypeSetKey protoProperty =
             rhsType->property(NameToId(names().prototype));
         JSObject *protoObject = protoProperty.singleton(constraints());
         if (!protoObject)
             break;
 
         rhs->setFoldedUnchecked();
 
-        MInstanceOf *ins = new MInstanceOf(obj, protoObject);
+        MInstanceOf *ins = MInstanceOf::New(alloc(), obj, protoObject);
 
         current->add(ins);
         current->push(ins);
 
         return resumeAfter(ins);
     } while (false);
 
-    MCallInstanceOf *ins = new MCallInstanceOf(obj, rhs);
+    MCallInstanceOf *ins = MCallInstanceOf::New(alloc(), obj, rhs);
 
     current->add(ins);
     current->push(ins);
 
     return resumeAfter(ins);
 }
 
 MInstruction *
--- a/js/src/jit/LIR.cpp
+++ b/js/src/jit/LIR.cpp
@@ -124,17 +124,17 @@ LSnapshot::init(MIRGenerator *gen)
 {
     slots_ = gen->allocate<LAllocation>(numSlots_);
     return !!slots_;
 }
 
 LSnapshot *
 LSnapshot::New(MIRGenerator *gen, MResumePoint *mir, BailoutKind kind)
 {
-    LSnapshot *snapshot = new LSnapshot(mir, kind);
+    LSnapshot *snapshot = new(gen->alloc()) LSnapshot(mir, kind);
     if (!snapshot->init(gen))
         return nullptr;
 
     IonSpew(IonSpew_Snapshots, "Generating LIR snapshot %p from MIR (%p)",
             (void *)snapshot, (void *)mir);
 
     return snapshot;
 }
@@ -352,17 +352,17 @@ LInstruction::dump()
 {
     return dump(stderr);
 }
 
 void
 LInstruction::initSafepoint(TempAllocator &alloc)
 {
     JS_ASSERT(!safepoint_);
-    safepoint_ = new LSafepoint(alloc);
+    safepoint_ = new(alloc) LSafepoint(alloc);
     JS_ASSERT(safepoint_);
 }
 
 bool
 LMoveGroup::add(LAllocation *from, LAllocation *to)
 {
 #ifdef DEBUG
     JS_ASSERT(*from != *to);
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -106,22 +106,22 @@ class LAllocation : public TempObject
     explicit LAllocation(Kind kind) {
         setKindAndData(kind, 0);
     }
 
   public:
     LAllocation() : bits_(0)
     { }
 
-    static LAllocation *New() {
-        return new LAllocation();
+    static LAllocation *New(TempAllocator &alloc) {
+        return new(alloc) LAllocation();
     }
     template <typename T>
-    static LAllocation *New(const T &other) {
-        return new LAllocation(other);
+    static LAllocation *New(TempAllocator &alloc, const T &other) {
+        return new(alloc) LAllocation(other);
     }
 
     // The value pointer must be rooted in MIR and have its low bit cleared.
     explicit LAllocation(const Value *vp) {
         bits_ = uintptr_t(vp);
         JS_ASSERT(!isTagged());
         bits_ |= TAG_MASK;
     }
@@ -564,17 +564,17 @@ class LDefinition
     LIR_OPCODE_LIST(LIROP)
 #undef LIROP
 
 class LSnapshot;
 class LSafepoint;
 class LInstructionVisitor;
 
 class LInstruction
-  : public TempObject,
+  : public OldTempObject,
     public InlineListNode<LInstruction>
 {
     uint32_t id_;
 
     // This snapshot could be set after a ResumePoint.  It is used to restart
     // from the resume point pc.
     LSnapshot *snapshot_;
 
--- a/js/src/jit/LinearScan.cpp
+++ b/js/src/jit/LinearScan.cpp
@@ -376,23 +376,23 @@ LinearScanAllocator::reifyAllocations()
                 // it should use the fixed register instead.
                 SetOsiPointUses(interval, defEnd, LAllocation(fixedReg));
 
                 if (!moveAfter(defEnd, from, interval))
                     return false;
                 spillFrom = from->getAllocation();
             } else {
                 if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
-                    LAllocation *alloc = reg->ins()->getOperand(def->getReusedInput());
-                    LAllocation *origAlloc = LAllocation::New(*alloc);
+                    LAllocation *inputAlloc = reg->ins()->getOperand(def->getReusedInput());
+                    LAllocation *origAlloc = LAllocation::New(alloc(), *inputAlloc);
 
-                    JS_ASSERT(!alloc->isUse());
+                    JS_ASSERT(!inputAlloc->isUse());
 
-                    *alloc = *interval->getAllocation();
-                    if (!moveInputAlloc(inputOf(reg->ins()), origAlloc, alloc))
+                    *inputAlloc = *interval->getAllocation();
+                    if (!moveInputAlloc(inputOf(reg->ins()), origAlloc, inputAlloc))
                         return false;
                 }
 
                 JS_ASSERT(DefinitionCompatibleWith(reg->ins(), def, *interval->getAllocation()));
                 def->setOutput(*interval->getAllocation());
 
                 spillFrom = interval->getAllocation();
             }
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -568,17 +568,17 @@ IonBuilder::inlineMathFloor(CallInfo &ca
     if (argType == MIRType_Int32 && returnType == MIRType_Int32) {
         callInfo.unwrapArgs();
         current->push(callInfo.getArg(0));
         return InliningStatus_Inlined;
     }
 
     if (IsFloatingPointType(argType) && returnType == MIRType_Int32) {
         callInfo.unwrapArgs();
-        MFloor *ins = new MFloor(callInfo.getArg(0));
+        MFloor *ins = MFloor::New(alloc(), callInfo.getArg(0));
         current->add(ins);
         current->push(ins);
         return InliningStatus_Inlined;
     }
 
     if (IsFloatingPointType(argType) && returnType == MIRType_Double) {
         callInfo.unwrapArgs();
         MMathFunction *ins = MMathFunction::New(alloc(), callInfo.getArg(0), MMathFunction::Floor, nullptr);
@@ -606,17 +606,17 @@ IonBuilder::inlineMathRound(CallInfo &ca
     if (argType == MIRType_Int32 && returnType == MIRType_Int32) {
         callInfo.unwrapArgs();
         current->push(callInfo.getArg(0));
         return InliningStatus_Inlined;
     }
 
     if (argType == MIRType_Double && returnType == MIRType_Int32) {
         callInfo.unwrapArgs();
-        MRound *ins = new MRound(callInfo.getArg(0));
+        MRound *ins = MRound::New(alloc(), callInfo.getArg(0));
         current->add(ins);
         current->push(ins);
         return InliningStatus_Inlined;
     }
 
     if (argType == MIRType_Double && returnType == MIRType_Double) {
         callInfo.unwrapArgs();
         MMathFunction *ins = MMathFunction::New(alloc(), callInfo.getArg(0), MMathFunction::Round, nullptr);
@@ -1362,17 +1362,17 @@ IonBuilder::inlineParallelArrayTail(Call
         MDefinition *arg = callInfo.getArg(i + discards);
         MPassArg *passArg = MPassArg::New(alloc(), arg);
         current->add(passArg);
         call->addArg(i + 1, passArg);
     }
 
     // Place an MPrepareCall before the first passed argument, before we
     // potentially perform rearrangement.
-    MPrepareCall *start = new MPrepareCall;
+    MPrepareCall *start = MPrepareCall::New(alloc());
     oldThis->block()->insertBefore(oldThis, start);
     call->initPrepareCall(start);
 
     // Create the MIR to allocate the new parallel array.  Take the type
     // object is taken from the prediction set.
     MNewParallelArray *newObject = MNewParallelArray::New(alloc(), templateObject);
     current->add(newObject);
     MPassArg *newThis = MPassArg::New(alloc(), newObject);
@@ -1434,19 +1434,20 @@ IonBuilder::inlineNewDenseArrayForParall
     types::TypeObject *typeObject = returnTypes->getTypeObject(0);
 
     JSObject *templateObject = inspector->getTemplateObjectForNative(pc, intrinsic_NewDenseArray);
     if (!templateObject || templateObject->type() != typeObject)
         return InliningStatus_NotInlined;
 
     callInfo.unwrapArgs();
 
-    MNewDenseArrayPar *newObject = new MNewDenseArrayPar(graph().forkJoinSlice(),
-                                                         callInfo.getArg(0),
-                                                         templateObject);
+    MNewDenseArrayPar *newObject = MNewDenseArrayPar::New(alloc(),
+                                                          graph().forkJoinSlice(),
+                                                          callInfo.getArg(0),
+                                                          templateObject);
     current->add(newObject);
     current->push(newObject);
 
     return InliningStatus_Inlined;
 }
 
 IonBuilder::InliningStatus
 IonBuilder::inlineUnsafeSetReservedSlot(CallInfo &callInfo)
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -384,17 +384,17 @@ class MDefinition : public MNode
     virtual void analyzeEdgeCasesBackward();
 
     virtual bool truncate();
     virtual bool isOperandTruncated(size_t index) const;
 
     bool earlyAbortCheck();
 
     // Compute an absolute or symbolic range for the value of this node.
-    virtual void computeRange() {
+    virtual void computeRange(TempAllocator &alloc) {
     }
 
     // Collect information from the pre-truncated ranges.
     virtual void collectRangeInfoPreTrunc() {
     }
 
     MNode::Kind kind() const {
         return MNode::Definition;
@@ -986,17 +986,17 @@ class MConstant : public MNullaryInstruc
         // value by a double value.
         if (type() == MIRType_Float32)
             return c->type() == MIRType_Float32;
         if (type() == MIRType_Double)
             return c->type() != MIRType_Float32;
         return true;
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     bool truncate();
 
     bool canProduceFloat32() const;
 };
 
 class MParameter : public MNullaryInstruction
 {
     int32_t index_;
@@ -1507,26 +1507,30 @@ class MNewObject : public MNullaryInstru
     }
 };
 
 // Could be allocating either a new array or a new object.
 class MNewPar : public MUnaryInstruction
 {
     CompilerRootObject templateObject_;
 
-  public:
-    INSTRUCTION_HEADER(NewPar);
-
     MNewPar(MDefinition *slice, JSObject *templateObject)
       : MUnaryInstruction(slice),
         templateObject_(templateObject)
     {
         setResultType(MIRType_Object);
     }
 
+  public:
+    INSTRUCTION_HEADER(NewPar);
+
+    static MNewPar *New(TempAllocator &alloc, MDefinition *slice, JSObject *templateObject) {
+        return new(alloc) MNewPar(slice, templateObject);
+    }
+
     MDefinition *forkJoinSlice() const {
         return getOperand(0);
     }
 
     JSObject *templateObject() const {
         return templateObject_;
     }
 };
@@ -1547,30 +1551,36 @@ class MNewDerivedTypedObject
   : public MTernaryInstruction,
     public Mix3Policy<ObjectPolicy<0>,
                       ObjectPolicy<1>,
                       IntPolicy<2> >
 {
   private:
     TypeRepresentationSet set_;
 
-  public:
-    INSTRUCTION_HEADER(NewDerivedTypedObject);
-
     MNewDerivedTypedObject(TypeRepresentationSet set,
                            MDefinition *type,
                            MDefinition *owner,
                            MDefinition *offset)
       : MTernaryInstruction(type, owner, offset),
         set_(set)
     {
         setMovable();
         setResultType(MIRType_Object);
     }
 
+  public:
+    INSTRUCTION_HEADER(NewDerivedTypedObject);
+
+    static MNewDerivedTypedObject *New(TempAllocator &alloc, TypeRepresentationSet set,
+                                       MDefinition *type, MDefinition *owner, MDefinition *offset)
+    {
+        return new(alloc) MNewDerivedTypedObject(set, type, owner, offset);
+    }
+
     TypeRepresentationSet set() const {
         return set_;
     }
 
     MDefinition *type() const {
         return getOperand(0);
     }
 
@@ -1759,18 +1769,19 @@ class MInitElemGetterSetter
 // Designates the start of call frame construction.
 // Generates code to adjust the stack pointer for the argument vector.
 // Argc is inferred by checking the use chain during lowering.
 class MPrepareCall : public MNullaryInstruction
 {
   public:
     INSTRUCTION_HEADER(PrepareCall)
 
-    MPrepareCall()
-    { }
+    static MPrepareCall *New(TempAllocator &alloc) {
+        return new(alloc) MPrepareCall();
+    }
 
     // Get the vector size for the upcoming call by looking at the call.
     uint32_t argc() const;
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 };
@@ -2894,17 +2905,17 @@ class MToDouble
         if (!ins->isToDouble() || ins->toToDouble()->conversion() != conversion())
             return false;
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     bool truncate();
     bool isOperandTruncated(size_t index) const;
 
 #ifdef DEBUG
     bool isConsistentFloat32Use() const { return true; }
 #endif
 };
 
@@ -2956,17 +2967,17 @@ class MToFloat32
         if (!ins->isToFloat32() || ins->toToFloat32()->conversion() != conversion())
             return false;
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 
     bool canConsumeFloat32() const { return true; }
     bool canProduceFloat32() const { return true; }
 };
 
 // Converts a uint32 to a double (coming from asm.js).
 class MAsmJSUnsignedToDouble
   : public MUnaryInstruction
@@ -3063,17 +3074,17 @@ class MToInt32
 
     bool congruentTo(MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 
 #ifdef DEBUG
     bool isConsistentFloat32Use() const { return true; }
 #endif
 };
 
 // Converts a value or typed input to a truncated int32, for use with bitwise
 // operations. This is an infallible ValueToECMAInt32.
@@ -3099,17 +3110,17 @@ class MTruncateToInt32 : public MUnaryIn
 
     bool congruentTo(MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     bool isOperandTruncated(size_t index) const;
 # ifdef DEBUG
     bool isConsistentFloat32Use() const {
         return true;
     }
 #endif
 };
 
@@ -3168,17 +3179,17 @@ class MBitNot
     bool congruentTo(MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         if (specialization_ == MIRType_None)
             return AliasSet::Store(AliasSet::Any);
         return AliasSet::None();
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MTypeOf
   : public MUnaryInstruction,
     public BoxInputsPolicy
 {
     MIRType inputType_;
     bool inputMaybeCallableOrEmulatesUndefined_;
@@ -3295,17 +3306,17 @@ class MBitAnd : public MBinaryBitwiseIns
         return getOperand(operand); // 0 & x => 0;
     }
     MDefinition *foldIfNegOne(size_t operand) {
         return getOperand(1 - operand); // x & -1 => x
     }
     MDefinition *foldIfEqual() {
         return getOperand(0); // x & x => x;
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MBitOr : public MBinaryBitwiseInstruction
 {
     MBitOr(MDefinition *left, MDefinition *right)
       : MBinaryBitwiseInstruction(left, right)
     { }
 
@@ -3318,17 +3329,17 @@ class MBitOr : public MBinaryBitwiseInst
         return getOperand(1 - operand); // 0 | x => x, so if ith is 0, return (1-i)th
     }
     MDefinition *foldIfNegOne(size_t operand) {
         return getOperand(operand); // x | -1 => -1
     }
     MDefinition *foldIfEqual() {
         return getOperand(0); // x | x => x
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MBitXor : public MBinaryBitwiseInstruction
 {
     MBitXor(MDefinition *left, MDefinition *right)
       : MBinaryBitwiseInstruction(left, right)
     { }
 
@@ -3341,17 +3352,17 @@ class MBitXor : public MBinaryBitwiseIns
         return getOperand(1 - operand); // 0 ^ x => x
     }
     MDefinition *foldIfNegOne(size_t operand) {
         return this;
     }
     MDefinition *foldIfEqual() {
         return this;
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MShiftInstruction
   : public MBinaryBitwiseInstruction
 {
   protected:
     MShiftInstruction(MDefinition *left, MDefinition *right)
       : MBinaryBitwiseInstruction(left, right)
@@ -3379,17 +3390,17 @@ class MLsh : public MShiftInstruction
     static MLsh *NewAsmJS(TempAllocator &alloc, MDefinition *left, MDefinition *right);
 
     MDefinition *foldIfZero(size_t operand) {
         // 0 << x => 0
         // x << 0 => x
         return getOperand(0);
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MRsh : public MShiftInstruction
 {
     MRsh(MDefinition *left, MDefinition *right)
       : MShiftInstruction(left, right)
     { }
 
@@ -3398,17 +3409,17 @@ class MRsh : public MShiftInstruction
     static MRsh *New(TempAllocator &alloc, MDefinition *left, MDefinition *right);
     static MRsh *NewAsmJS(TempAllocator &alloc, MDefinition *left, MDefinition *right);
 
     MDefinition *foldIfZero(size_t operand) {
         // 0 >> x => 0
         // x >> 0 => x
         return getOperand(0);
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MUrsh : public MShiftInstruction
 {
     bool bailoutsDisabled_;
 
     MUrsh(MDefinition *left, MDefinition *right)
       : MShiftInstruction(left, right),
@@ -3431,17 +3442,17 @@ class MUrsh : public MShiftInstruction
     void infer(BaselineInspector *inspector, jsbytecode *pc);
 
     bool bailoutsDisabled() const {
         return bailoutsDisabled_;
     }
 
     bool fallible() const;
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     void collectRangeInfoPreTrunc();
 };
 
 class MBinaryArithInstruction
   : public MBinaryInstruction,
     public ArithPolicy
 {
     // Implicit truncate flag is set by the truncate backward range analysis
@@ -3541,17 +3552,17 @@ class MMinMax
         if (isMax() != ins->toMinMax()->isMax())
             return false;
         return congruentIfOperandsEqual(ins);
     }
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MAbs
   : public MUnaryInstruction,
     public ArithPolicy
 {
     bool implicitTruncate_;
 
@@ -3585,17 +3596,17 @@ class MAbs
     bool congruentTo(MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     bool fallible() const;
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     bool isFloat32Commutative() const { return true; }
     void trySpecializeFloat32(TempAllocator &alloc);
 };
 
 // Inline implementation of Math.sqrt().
 class MSqrt
   : public MUnaryInstruction,
     public FloatingPointPolicy<0>
@@ -3625,17 +3636,17 @@ class MSqrt
     }
     bool congruentTo(MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 
     bool isFloat32Commutative() const { return true; }
     void trySpecializeFloat32(TempAllocator &alloc);
 };
 
 // Inline implementation of atan2 (arctangent of y/x).
 class MAtan2
   : public MBinaryInstruction,
@@ -3825,17 +3836,17 @@ class MRandom : public MNullaryInstructi
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 
     bool possiblyCalls() const {
         return true;
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MMathFunction
   : public MUnaryInstruction,
     public FloatingPointPolicy<0>
 {
   public:
     enum Function {
@@ -3915,17 +3926,17 @@ class MMathFunction
     static const char *FunctionName(Function function);
 
     bool isFloat32Commutative() const {
         return function_ == Log || function_ == Sin || function_ == Cos
                || function_ == Exp || function_ == Tan || function_ == ATan
                || function_ == ASin || function_ == ACos || function_ == Floor;
     }
     void trySpecializeFloat32(TempAllocator &alloc);
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MAdd : public MBinaryArithInstruction
 {
     // Is this instruction really an int at heart?
     MAdd(MDefinition *left, MDefinition *right)
       : MBinaryArithInstruction(left, right)
     {
@@ -3953,17 +3964,17 @@ class MAdd : public MBinaryArithInstruct
 
     bool isFloat32Commutative() const { return true; }
 
     double getIdentity() {
         return 0;
     }
 
     bool fallible() const;
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     bool truncate();
     bool isOperandTruncated(size_t index) const;
 };
 
 class MSub : public MBinaryArithInstruction
 {
     MSub(MDefinition *left, MDefinition *right)
       : MBinaryArithInstruction(left, right)
@@ -3989,17 +4000,17 @@ class MSub : public MBinaryArithInstruct
 
     double getIdentity() {
         return 0;
     }
 
     bool isFloat32Commutative() const { return true; }
 
     bool fallible() const;
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     bool truncate();
     bool isOperandTruncated(size_t index) const;
 };
 
 class MMul : public MBinaryArithInstruction
 {
   public:
     enum Mode {
@@ -4078,17 +4089,17 @@ class MMul : public MBinaryArithInstruct
     bool updateForReplacement(MDefinition *ins);
 
     bool fallible() const {
         return canBeNegativeZero_ || canOverflow();
     }
 
     bool isFloat32Commutative() const { return true; }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     bool truncate();
     bool isOperandTruncated(size_t index) const;
 
     Mode mode() { return mode_; }
 };
 
 class MDiv : public MBinaryArithInstruction
 {
@@ -4157,17 +4168,17 @@ class MDiv : public MBinaryArithInstruct
     }
 
     bool isUnsigned() const {
         return unsigned_;
     }
 
     bool isFloat32Commutative() const { return true; }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     bool fallible() const;
     bool truncate();
     void collectRangeInfoPreTrunc();
 };
 
 class MMod : public MBinaryArithInstruction
 {
     bool unsigned_;
@@ -4212,17 +4223,17 @@ class MMod : public MBinaryArithInstruct
     bool canBePowerOfTwoDivisor() const;
 
     bool isUnsigned() const {
         return unsigned_;
     }
 
     bool fallible() const;
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     bool truncate();
     void collectRangeInfoPreTrunc();
 };
 
 class MConcat
   : public MBinaryInstruction,
     public BinaryStringPolicy
 {
@@ -4311,17 +4322,17 @@ class MCharCodeAt
         return this;
     }
 
     virtual AliasSet getAliasSet() const {
         // Strings are immutable, so there is no implicit dependency.
         return AliasSet::None();
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MFromCharCode
   : public MUnaryInstruction,
     public IntPolicy<0>
 {
     MFromCharCode(MDefinition *code)
       : MUnaryInstruction(code)
@@ -4522,17 +4533,17 @@ class MPhi MOZ_FINAL : public MDefinitio
     }
     void setIterator() {
         isIterator_ = true;
     }
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 
     MDefinition *operandIfRedundant() {
         // If this phi is redundant (e.g., phi(a,a) or b=phi(a,this)),
         // returns the operand that it will always be equal to (a, in
         // those two cases).
         MDefinition *first = getOperand(0);
         for (size_t i = 1, e = numOperands(); i < e; i++) {
             if (getOperand(i) != first && getOperand(i) != this)
@@ -4584,17 +4595,17 @@ class MBeta : public MUnaryInstruction
     {
         return new(alloc) MBeta(val, comp);
     }
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 // MIR representation of a Value on the OSR StackFrame.
 // The Value is indexed off of OsrFrameReg.
 class MOsrValue : public MUnaryInstruction
 {
   private:
     ptrdiff_t frameOffset_;
@@ -4691,52 +4702,64 @@ class MOsrReturnValue : public MUnaryIns
     }
 };
 
 // Check the current frame for over-recursion past the global stack limit.
 class MCheckOverRecursed : public MNullaryInstruction
 {
   public:
     INSTRUCTION_HEADER(CheckOverRecursed)
+
+    static MCheckOverRecursed *New(TempAllocator &alloc) {
+        return new(alloc) MCheckOverRecursed();
+    }
 };
 
 // Check the current frame for over-recursion past the global stack limit.
 // Uses the per-thread recursion limit.
 class MCheckOverRecursedPar : public MUnaryInstruction
 {
-  public:
-    INSTRUCTION_HEADER(CheckOverRecursedPar);
-
     MCheckOverRecursedPar(MDefinition *slice)
       : MUnaryInstruction(slice)
     {
         setResultType(MIRType_None);
         setGuard();
         setMovable();
     }
 
+  public:
+    INSTRUCTION_HEADER(CheckOverRecursedPar);
+
+    static MCheckOverRecursedPar *New(TempAllocator &alloc, MDefinition *slice) {
+        return new(alloc) MCheckOverRecursedPar(slice);
+    }
+
     MDefinition *forkJoinSlice() const {
         return getOperand(0);
     }
 };
 
 // Check for an interrupt (or rendezvous) in parallel mode.
 class MCheckInterruptPar : public MUnaryInstruction
 {
-  public:
-    INSTRUCTION_HEADER(CheckInterruptPar);
-
     MCheckInterruptPar(MDefinition *slice)
       : MUnaryInstruction(slice)
     {
         setResultType(MIRType_None);
         setGuard();
         setMovable();
     }
 
+  public:
+    INSTRUCTION_HEADER(CheckInterruptPar);
+
+    static MCheckInterruptPar *New(TempAllocator &alloc, MDefinition *slice) {
+        return new(alloc) MCheckInterruptPar(slice);
+    }
+
     MDefinition *forkJoinSlice() const {
         return getOperand(0);
     }
 };
 
 // Check whether we need to fire the interrupt handler.
 class MInterruptCheck : public MNullaryInstruction
 {
@@ -5229,17 +5252,17 @@ class MInitializedLength
     }
     bool congruentTo(MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::Load(AliasSet::ObjectFields);
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 // Store to the initialized length in an elements header. Note the input is an
 // *index*, one less than the desired length.
 class MSetInitializedLength
   : public MAryInstruction<2>
 {
     MSetInitializedLength(MDefinition *elements, MDefinition *index) {
@@ -5264,37 +5287,41 @@ class MSetInitializedLength
         return AliasSet::Store(AliasSet::ObjectFields);
     }
 };
 
 // Load the array length from an elements header.
 class MArrayLength
   : public MUnaryInstruction
 {
-  public:
     MArrayLength(MDefinition *elements)
       : MUnaryInstruction(elements)
     {
         setResultType(MIRType_Int32);
         setMovable();
     }
 
+  public:
     INSTRUCTION_HEADER(ArrayLength)
 
+    static MArrayLength *New(TempAllocator &alloc, MDefinition *elements) {
+        return new(alloc) MArrayLength(elements);
+    }
+
     MDefinition *elements() const {
         return getOperand(0);
     }
     bool congruentTo(MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::Load(AliasSet::ObjectFields);
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 // Store to the length in an elements header. Note the input is an *index*, one
 // less than the desired length.
 class MSetArrayLength
   : public MAryInstruction<2>
 {
     MSetArrayLength(MDefinition *elements, MDefinition *index) {
@@ -5349,17 +5376,17 @@ class MTypedArrayLength
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         // The typed array |length| property is immutable, so there is no
         // implicit dependency.
         return AliasSet::None();
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 // Load a typed array's elements vector.
 class MTypedArrayElements
   : public MUnaryInstruction,
     public SingleObjectPolicy
 {
     MTypedArrayElements(MDefinition *object)
@@ -5541,17 +5568,17 @@ class MBoundsCheck
         MBoundsCheck *other = ins->toBoundsCheck();
         if (minimum() != other->minimum() || maximum() != other->maximum())
             return false;
         return congruentIfOperandsEqual(other);
     }
     virtual AliasSet getAliasSet() const {
         return AliasSet::None();
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 // Bailout if index < minimum.
 class MBoundsCheckLower
   : public MUnaryInstruction
 {
     int32_t minimum_;
     bool fallible_;
@@ -5900,17 +5927,17 @@ class MArrayPush
         return getOperand(1);
     }
     TypePolicy *typePolicy() {
         return this;
     }
     AliasSet getAliasSet() const {
         return AliasSet::Store(AliasSet::Element | AliasSet::ObjectFields);
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 // Array.prototype.concat on two dense arrays.
 class MArrayConcat
   : public MBinaryInstruction,
     public MixPolicy<ObjectPolicy<0>, ObjectPolicy<1> >
 {
     CompilerRootObject templateObj_;
@@ -5992,17 +6019,17 @@ class MLoadTypedArrayElement
         return getOperand(1);
     }
     AliasSet getAliasSet() const {
         return AliasSet::Load(AliasSet::TypedArrayElement);
     }
 
     void printOpcode(FILE *fp) const;
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 
     bool canProduceFloat32() const { return arrayType_ == ScalarTypeRepresentation::TYPE_FLOAT32; }
 };
 
 // Load a value from a typed array. Out-of-bounds accesses are handled using
 // a VM call.
 class MLoadTypedArrayElementHole
   : public MBinaryInstruction,
@@ -6100,17 +6127,17 @@ class MLoadTypedArrayElementStatic
     void setInfallible() {
         fallible_ = false;
     }
 
     TypePolicy *typePolicy() {
         return this;
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
     bool truncate();
     bool canProduceFloat32() const { return typedArray_->type() == ScalarTypeRepresentation::TYPE_FLOAT32; }
 };
 
 class MStoreTypedArrayElement
   : public MTernaryInstruction,
     public StoreTypedArrayPolicy
 {
@@ -6350,17 +6377,17 @@ class MClampToUint8
         return this;
     }
     bool congruentTo(MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 class MLoadFixedSlot
   : public MUnaryInstruction,
     public SingleObjectPolicy
 {
     size_t slot_;
 
@@ -8010,35 +8037,39 @@ class MStringLength
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         // The string |length| property is immutable, so there is no
         // implicit dependency.
         return AliasSet::None();
     }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 // Inlined version of Math.floor().
 class MFloor
   : public MUnaryInstruction,
     public FloatingPointPolicy<0>
 {
-  public:
     MFloor(MDefinition *num)
       : MUnaryInstruction(num)
     {
         setResultType(MIRType_Int32);
         setPolicyType(MIRType_Double);
         setMovable();
     }
 
+  public:
     INSTRUCTION_HEADER(Floor)
 
+    static MFloor *New(TempAllocator &alloc, MDefinition *num) {
+        return new(alloc) MFloor(num);
+    }
+
     MDefinition *num() const {
         return getOperand(0);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
     TypePolicy *typePolicy() {
         return this;
@@ -8054,26 +8085,30 @@ class MFloor
 #endif
 };
 
 // Inlined version of Math.round().
 class MRound
   : public MUnaryInstruction,
     public DoublePolicy<0>
 {
-  public:
     MRound(MDefinition *num)
       : MUnaryInstruction(num)
     {
         setResultType(MIRType_Int32);
         setMovable();
     }
 
+  public:
     INSTRUCTION_HEADER(Round)
 
+    static MRound *New(TempAllocator &alloc, MDefinition *num) {
+        return new(alloc) MRound(num);
+    }
+
     MDefinition *num() const {
         return getOperand(0);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
     TypePolicy *typePolicy() {
         return this;
@@ -8183,25 +8218,29 @@ class MIteratorEnd
     }
 };
 
 // Implementation for 'in' operator.
 class MIn
   : public MBinaryInstruction,
     public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >
 {
-  public:
     MIn(MDefinition *key, MDefinition *obj)
       : MBinaryInstruction(key, obj)
     {
         setResultType(MIRType_Boolean);
     }
 
+  public:
     INSTRUCTION_HEADER(In)
 
+    static MIn *New(TempAllocator &alloc, MDefinition *key, MDefinition *obj) {
+        return new(alloc) MIn(key, obj);
+    }
+
     TypePolicy *typePolicy() {
         return this;
     }
     bool possiblyCalls() const {
         return true;
     }
 };
 
@@ -8268,49 +8307,57 @@ class MInArray
 
 // Implementation for instanceof operator with specific rhs.
 class MInstanceOf
   : public MUnaryInstruction,
     public InstanceOfPolicy
 {
     CompilerRootObject protoObj_;
 
-  public:
     MInstanceOf(MDefinition *obj, JSObject *proto)
       : MUnaryInstruction(obj),
         protoObj_(proto)
     {
         setResultType(MIRType_Boolean);
     }
 
+  public:
     INSTRUCTION_HEADER(InstanceOf)
 
+    static MInstanceOf *New(TempAllocator &alloc, MDefinition *obj, JSObject *proto) {
+        return new(alloc) MInstanceOf(obj, proto);
+    }
+
     TypePolicy *typePolicy() {
         return this;
     }
 
     JSObject *prototypeObject() {
         return protoObj_;
     }
 };
 
 // Implementation for instanceof operator with unknown rhs.
 class MCallInstanceOf
   : public MBinaryInstruction,
     public MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >
 {
-  public:
     MCallInstanceOf(MDefinition *obj, MDefinition *proto)
       : MBinaryInstruction(obj, proto)
     {
         setResultType(MIRType_Boolean);
     }
 
+  public:
     INSTRUCTION_HEADER(CallInstanceOf)
 
+    static MCallInstanceOf *New(TempAllocator &alloc, MDefinition *obj, MDefinition *proto) {
+        return new(alloc) MCallInstanceOf(obj, proto);
+    }
+
     TypePolicy *typePolicy() {
         return this;
     }
 };
 
 class MArgumentsLength : public MNullaryInstruction
 {
     MArgumentsLength()
@@ -8329,17 +8376,17 @@ class MArgumentsLength : public MNullary
     bool congruentTo(MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         // Arguments |length| cannot be mutated by Ion Code.
         return AliasSet::None();
    }
 
-    void computeRange();
+    void computeRange(TempAllocator &alloc);
 };
 
 // This MIR instruction is used to get an argument from the actual arguments.
 class MGetFrameArgument
   : public MUnaryInstruction,
     public IntPolicy<0>
 {
     bool scriptHasSetArg_;
@@ -8910,26 +8957,32 @@ class MEnclosingScope : public MLoadFixe
 
 // Creates a dense array of the given length.
 //
 // Note: the template object should be an *empty* dense array!
 class MNewDenseArrayPar : public MBinaryInstruction
 {
     CompilerRootObject templateObject_;
 
-  public:
-    INSTRUCTION_HEADER(NewDenseArrayPar);
-
     MNewDenseArrayPar(MDefinition *slice, MDefinition *length, JSObject *templateObject)
       : MBinaryInstruction(slice, length),
         templateObject_(templateObject)
     {
         setResultType(MIRType_Object);
     }
 
+  public:
+    INSTRUCTION_HEADER(NewDenseArrayPar);
+
+    static MNewDenseArrayPar *New(TempAllocator &alloc, MDefinition *slice, MDefinition *length,
+                                  JSObject *templateObject)
+    {
+        return new(alloc) MNewDenseArrayPar(slice, length, templateObject);
+    }
+
     MDefinition *forkJoinSlice() const {
         return getOperand(0);
     }
 
     MDefinition *length() const {
         return getOperand(1);
     }
 
--- a/js/src/jit/MoveResolver.h
+++ b/js/src/jit/MoveResolver.h
@@ -152,17 +152,17 @@ class MoveResolver
         Kind kind() const {
             return kind_;
         }
     };
 
   private:
     struct PendingMove
       : public Move,
-        public TempObject,
+        public OldTempObject,
         public InlineListNode<PendingMove>
     {
         PendingMove()
         { }
         PendingMove(const MoveOperand &from, const MoveOperand &to, Kind kind)
           : Move(from, to, kind, false)
         { }
         
--- a/js/src/jit/ParallelSafetyAnalysis.cpp
+++ b/js/src/jit/ParallelSafetyAnalysis.cpp
@@ -591,17 +591,17 @@ ParallelSafetyVisitor::visitToString(MTo
         return markUnsafe();
     return true;
 }
 
 bool
 ParallelSafetyVisitor::replaceWithNewPar(MInstruction *newInstruction,
                                          JSObject *templateObject)
 {
-    replace(newInstruction, new MNewPar(forkJoinSlice(), templateObject));
+    replace(newInstruction, MNewPar::New(alloc(), forkJoinSlice(), templateObject));
     return true;
 }
 
 bool
 ParallelSafetyVisitor::replace(MInstruction *oldInstruction,
                                MInstruction *replacementInstruction)
 {
     MBasicBlock *block = oldInstruction->block();
@@ -734,23 +734,23 @@ ParallelSafetyVisitor::visitCall(MCall *
 // In sequential Ion code, the stack limit is stored in the JSRuntime.
 // We store it in the thread context.  We therefore need a separate
 // instruction to access it, one parameterized by the thread context.
 // Similar considerations apply to checking for interrupts.
 
 bool
 ParallelSafetyVisitor::visitCheckOverRecursed(MCheckOverRecursed *ins)
 {
-    return replace(ins, new MCheckOverRecursedPar(forkJoinSlice()));
+    return replace(ins, MCheckOverRecursedPar::New(alloc(), forkJoinSlice()));
 }
 
 bool
 ParallelSafetyVisitor::visitInterruptCheck(MInterruptCheck *ins)
 {
-    return replace(ins, new MCheckInterruptPar(forkJoinSlice()));
+    return replace(ins, MCheckInterruptPar::New(alloc(), forkJoinSlice()));
 }
 
 /////////////////////////////////////////////////////////////////////////////
 // Specialized ops
 //
 // Some ops, like +, can be specialized to ints/doubles.  Anything
 // else is terrifying.
 //
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -186,21 +186,23 @@ RangeAnalysis::addBetaNodes()
                 smaller = left;
                 greater = right;
             } else if (jsop == JSOP_GT) {
                 smaller = right;
                 greater = left;
             }
             if (smaller && greater) {
                 MBeta *beta;
-                beta = MBeta::New(alloc(), smaller, Range::NewInt32Range(JSVAL_INT_MIN, JSVAL_INT_MAX-1));
+                beta = MBeta::New(alloc(), smaller,
+                                  Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX-1));
                 block->insertBefore(*block->begin(), beta);
                 replaceDominatedUsesWith(smaller, beta, block);
                 IonSpew(IonSpew_Range, "Adding beta node for smaller %d", smaller->id());
-                beta = MBeta::New(alloc(), greater, Range::NewInt32Range(JSVAL_INT_MIN+1, JSVAL_INT_MAX));
+                beta = MBeta::New(alloc(), greater,
+                                  Range::NewInt32Range(alloc(), JSVAL_INT_MIN+1, JSVAL_INT_MAX));
                 block->insertBefore(*block->begin(), beta);
                 replaceDominatedUsesWith(greater, beta, block);
                 IonSpew(IonSpew_Range, "Adding beta node for greater %d", greater->id());
             }
             continue;
         } else {
             continue;
         }
@@ -244,17 +246,17 @@ RangeAnalysis::addBetaNodes()
         }
 
         if (IonSpewEnabled(IonSpew_Range)) {
             IonSpewHeader(IonSpew_Range);
             fprintf(IonSpewFile, "Adding beta node for %d with range ", val->id());
             comp.dump(IonSpewFile);
         }
 
-        MBeta *beta = MBeta::New(alloc(), val, new Range(comp));
+        MBeta *beta = MBeta::New(alloc(), val, new(alloc()) Range(comp));
         block->insertBefore(*block->begin(), beta);
         replaceDominatedUsesWith(val, beta, block);
     }
 
     return true;
 }
 
 bool
@@ -376,27 +378,27 @@ Range::dump(FILE *fp) const
 
 void
 Range::dump() const
 {
     dump(stderr);
 }
 
 Range *
-Range::intersect(const Range *lhs, const Range *rhs, bool *emptyRange)
+Range::intersect(TempAllocator &alloc, const Range *lhs, const Range *rhs, bool *emptyRange)
 {
     *emptyRange = false;
 
     if (!lhs && !rhs)
         return nullptr;
 
     if (!lhs)
-        return new Range(*rhs);
+        return new(alloc) Range(*rhs);
     if (!rhs)
-        return new Range(*lhs);
+        return new(alloc) Range(*lhs);
 
     int32_t newLower = Max(lhs->lower_, rhs->lower_);
     int32_t newUpper = Min(lhs->upper_, rhs->upper_);
 
     // :TODO: This information could be used better. If upper < lower, then we
     // have conflicting constraints. Consider:
     //
     // if (x < 0) {
@@ -456,18 +458,18 @@ Range::intersect(const Range *lhs, const
         // push the bounds past each other, since the actual intersection is
         // the empty set.
         if (newLower > newUpper) {
             *emptyRange = true;
             return nullptr;
         }
     }
 
-    return new Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
-                     newFractional, newExponent);
+    return new(alloc) Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
+                            newFractional, newExponent);
 }
 
 void
 Range::unionWith(const Range *other)
 {
     int32_t newLower = Min(lower_, other->lower_);
     int32_t newUpper = Max(upper_, other->upper_);
 
@@ -585,17 +587,17 @@ Range::setDouble(double l, double h)
 static inline bool
 MissingAnyInt32Bounds(const Range *lhs, const Range *rhs)
 {
     return !lhs->hasInt32LowerBound() || !lhs->hasInt32UpperBound() ||
            !rhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound();
 }
 
 Range *
-Range::add(const Range *lhs, const Range *rhs)
+Range::add(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     int64_t l = (int64_t) lhs->lower_ + (int64_t) rhs->lower_;
     if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound())
         l = NoInt32LowerBound;
 
     int64_t h = (int64_t) lhs->upper_ + (int64_t) rhs->upper_;
     if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound())
         h = NoInt32UpperBound;
@@ -605,21 +607,21 @@ Range::add(const Range *lhs, const Range
     uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
     if (e <= Range::MaxFiniteExponent)
         ++e;
 
     // Infinity + -Infinity is NaN.
     if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
         e = Range::IncludesInfinityAndNaN;
 
-    return new Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
+    return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
 }
 
 Range *
-Range::sub(const Range *lhs, const Range *rhs)
+Range::sub(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     int64_t l = (int64_t) lhs->lower_ - (int64_t) rhs->upper_;
     if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound())
         l = NoInt32LowerBound;
 
     int64_t h = (int64_t) lhs->upper_ - (int64_t) rhs->lower_;
     if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound())
         h = NoInt32UpperBound;
@@ -629,66 +631,66 @@ Range::sub(const Range *lhs, const Range
     uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
     if (e <= Range::MaxFiniteExponent)
         ++e;
 
     // Infinity - Infinity is NaN.
     if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
         e = Range::IncludesInfinityAndNaN;
 
-    return new Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
+    return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
 }
 
 Range *
-Range::and_(const Range *lhs, const Range *rhs)
+Range::and_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     JS_ASSERT(lhs->isInt32());
     JS_ASSERT(rhs->isInt32());
 
     // If both numbers can be negative, result can be negative in the whole range
     if (lhs->lower() < 0 && rhs->lower() < 0)
-        return Range::NewInt32Range(INT32_MIN, Max(lhs->upper(), rhs->upper()));
+        return Range::NewInt32Range(alloc, INT32_MIN, Max(lhs->upper(), rhs->upper()));
 
     // Only one of both numbers can be negative.
     // - result can't be negative
     // - Upper bound is minimum of both upper range,
     int32_t lower = 0;
     int32_t upper = Min(lhs->upper(), rhs->upper());
 
     // EXCEPT when upper bound of non negative number is max value,
     // because negative value can return the whole max value.
     // -1 & 5 = 5
     if (lhs->lower() < 0)
        upper = rhs->upper();
     if (rhs->lower() < 0)
         upper = lhs->upper();
 
-    return Range::NewInt32Range(lower, upper);
+    return Range::NewInt32Range(alloc, lower, upper);
 }
 
 Range *
-Range::or_(const Range *lhs, const Range *rhs)
+Range::or_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     JS_ASSERT(lhs->isInt32());
     JS_ASSERT(rhs->isInt32());
     // When one operand is always 0 or always -1, it's a special case where we
     // can compute a fully precise result. Handling these up front also
     // protects the code below from calling CountLeadingZeroes32 with a zero
     // operand or from shifting an int32_t by 32.
     if (lhs->lower() == lhs->upper()) {
         if (lhs->lower() == 0)
-            return new Range(*rhs);
+            return new(alloc) Range(*rhs);
         if (lhs->lower() == -1)
-            return new Range(*lhs);;
+            return new(alloc) Range(*lhs);;
     }
     if (rhs->lower() == rhs->upper()) {
         if (rhs->lower() == 0)
-            return new Range(*lhs);
+            return new(alloc) Range(*lhs);
         if (rhs->lower() == -1)
-            return new Range(*rhs);;
+            return new(alloc) Range(*rhs);;
     }
 
     // The code below uses CountLeadingZeroes32, which has undefined behavior
     // if its operand is 0. We rely on the code above to protect it.
     JS_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
     JS_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
     JS_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
     JS_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
@@ -713,21 +715,21 @@ Range::or_(const Range *lhs, const Range
         }
         if (rhs->upper() < 0) {
             unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
             lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
             upper = -1;
         }
     }
 
-    return Range::NewInt32Range(lower, upper);
+    return Range::NewInt32Range(alloc, lower, upper);
 }
 
 Range *
-Range::xor_(const Range *lhs, const Range *rhs)
+Range::xor_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     JS_ASSERT(lhs->isInt32());
     JS_ASSERT(rhs->isInt32());
     int32_t lhsLower = lhs->lower();
     int32_t lhsUpper = lhs->upper();
     int32_t rhsLower = rhs->lower();
     int32_t rhsUpper = rhs->upper();
     bool invertAfter = false;
@@ -777,28 +779,28 @@ Range::xor_(const Range *lhs, const Rang
     // If we bitwise-negated one (but not both) of the operands above, apply the
     // bitwise-negate to the result, completing ~((~x)^y) == x^y.
     if (invertAfter) {
         lower = ~lower;
         upper = ~upper;
         Swap(lower, upper);
     }
 
-    return Range::NewInt32Range(lower, upper);
+    return Range::NewInt32Range(alloc, lower, upper);
 }
 
 Range *
-Range::not_(const Range *op)
+Range::not_(TempAllocator &alloc, const Range *op)
 {
     JS_ASSERT(op->isInt32());
-    return Range::NewInt32Range(~op->upper(), ~op->lower());
+    return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
 }
 
 Range *
-Range::mul(const Range *lhs, const Range *rhs)
+Range::mul(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     bool fractional = lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart();
 
     uint16_t exponent;
     if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
         // Two finite values.
         exponent = lhs->numBits() + rhs->numBits() - 1;
         if (exponent > Range::MaxFiniteExponent)
@@ -811,147 +813,147 @@ Range::mul(const Range *lhs, const Range
         // Two values that multiplied together won't produce a NaN.
         exponent = Range::IncludesInfinity;
     } else {
         // Could be anything.
         exponent = Range::IncludesInfinityAndNaN;
     }
 
     if (MissingAnyInt32Bounds(lhs, rhs))
-        return new Range(NoInt32LowerBound, NoInt32UpperBound, fractional, exponent);
+        return new(alloc) Range(NoInt32LowerBound, NoInt32UpperBound, fractional, exponent);
     int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
     int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
     int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
     int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
-    return new Range(
+    return new(alloc) Range(
         Min( Min(a, b), Min(c, d) ),
         Max( Max(a, b), Max(c, d) ),
         fractional, exponent);
 }
 
 Range *
-Range::lsh(const Range *lhs, int32_t c)
+Range::lsh(TempAllocator &alloc, const Range *lhs, int32_t c)
 {
     JS_ASSERT(lhs->isInt32());
     int32_t shift = c & 0x1f;
 
     // If the shift doesn't loose bits or shift bits into the sign bit, we
     // can simply compute the correct range by shifting.
     if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) == lhs->lower() &&
         (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) == lhs->upper())
     {
-        return Range::NewInt32Range(
+        return Range::NewInt32Range(alloc,
             uint32_t(lhs->lower()) << shift,
             uint32_t(lhs->upper()) << shift);
     }
 
-    return Range::NewInt32Range(INT32_MIN, INT32_MAX);
+    return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
 }
 
 Range *
-Range::rsh(const Range *lhs, int32_t c)
+Range::rsh(TempAllocator &alloc, const Range *lhs, int32_t c)
 {
     JS_ASSERT(lhs->isInt32());
     int32_t shift = c & 0x1f;
-    return Range::NewInt32Range(
+    return Range::NewInt32Range(alloc,
         lhs->lower() >> shift,
         lhs->upper() >> shift);
 }
 
 Range *
-Range::ursh(const Range *lhs, int32_t c)
+Range::ursh(TempAllocator &alloc, const Range *lhs, int32_t c)
 {
     // ursh's left operand is uint32, not int32, but for range analysis we
     // currently approximate it as int32. We assume here that the range has
     // already been adjusted accordingly by our callers.
     JS_ASSERT(lhs->isInt32());
 
     int32_t shift = c & 0x1f;
 
     // If the value is always non-negative or always negative, we can simply
     // compute the correct range by shifting.
     if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
-        return Range::NewUInt32Range(
+        return Range::NewUInt32Range(alloc,
             uint32_t(lhs->lower()) >> shift,
             uint32_t(lhs->upper()) >> shift);
     }
 
     // Otherwise return the most general range after the shift.
-    return Range::NewUInt32Range(0, UINT32_MAX >> shift);
+    return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
 }
 
 Range *
-Range::lsh(const Range *lhs, const Range *rhs)
+Range::lsh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     JS_ASSERT(lhs->isInt32());
     JS_ASSERT(rhs->isInt32());
-    return Range::NewInt32Range(INT32_MIN, INT32_MAX);
+    return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
 }
 
 Range *
-Range::rsh(const Range *lhs, const Range *rhs)
+Range::rsh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     JS_ASSERT(lhs->isInt32());
     JS_ASSERT(rhs->isInt32());
-    return Range::NewInt32Range(Min(lhs->lower(), 0), Max(lhs->upper(), 0));
+    return Range::NewInt32Range(alloc, Min(lhs->lower(), 0), Max(lhs->upper(), 0));
 }
 
 Range *
-Range::ursh(const Range *lhs, const Range *rhs)
+Range::ursh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     // ursh's left operand is uint32, not int32, but for range analysis we
     // currently approximate it as int32. We assume here that the range has
     // already been adjusted accordingly by our callers.
     JS_ASSERT(lhs->isInt32());
     JS_ASSERT(rhs->isInt32());
-    return Range::NewUInt32Range(0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
+    return Range::NewUInt32Range(alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
 }
 
 Range *
-Range::abs(const Range *op)
+Range::abs(TempAllocator &alloc, const Range *op)
 {
     int32_t l = op->lower_;
     int32_t u = op->upper_;
 
-    return new Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u),
-                     true,
-                     Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
-                     op->hasInt32LowerBound_ && op->hasInt32UpperBound_ && l != INT32_MIN,
-                     op->canHaveFractionalPart_,
-                     op->max_exponent_);
+    return new(alloc) Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u),
+                            true,
+                            Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
+                            op->hasInt32LowerBound_ && op->hasInt32UpperBound_ && l != INT32_MIN,
+                            op->canHaveFractionalPart_,
+                            op->max_exponent_);
 }
 
 Range *
-Range::min(const Range *lhs, const Range *rhs)
+Range::min(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     // If either operand is NaN, the result is NaN.
     if (lhs->canBeNaN() || rhs->canBeNaN())
         return nullptr;
 
-    return new Range(Min(lhs->lower_, rhs->lower_),
-                     lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
-                     Min(lhs->upper_, rhs->upper_),
-                     lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
-                     lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
-                     Max(lhs->max_exponent_, rhs->max_exponent_));
+    return new(alloc) Range(Min(lhs->lower_, rhs->lower_),
+                            lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
+                            Min(lhs->upper_, rhs->upper_),
+                            lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
+                            lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
+                            Max(lhs->max_exponent_, rhs->max_exponent_));
 }
 
 Range *
-Range::max(const Range *lhs, const Range *rhs)
+Range::max(TempAllocator &alloc, const Range *lhs, const Range *rhs)
 {
     // If either operand is NaN, the result is NaN.
     if (lhs->canBeNaN() || rhs->canBeNaN())
         return nullptr;
 
-    return new Range(Max(lhs->lower_, rhs->lower_),
-                     lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
-                     Max(lhs->upper_, rhs->upper_),
-                     lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
-                     lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
-                     Max(lhs->max_exponent_, rhs->max_exponent_));
+    return new(alloc) Range(Max(lhs->lower_, rhs->lower_),
+                            lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
+                            Max(lhs->upper_, rhs->upper_),
+                            lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
+                            lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
+                            Max(lhs->max_exponent_, rhs->max_exponent_));
 }
 
 bool
 Range::negativeZeroMul(const Range *lhs, const Range *rhs)
 {
     // The result can only be negative zero if both sides are finite and they
     // have differing signs.
     return (lhs->canBeFiniteNegative() && rhs->canBeFiniteNonNegative()) ||
@@ -981,17 +983,17 @@ Range::update(const Range *other)
     return changed;
 }
 
 ///////////////////////////////////////////////////////////////////////////////
 // Range Computation for MIR Nodes
 ///////////////////////////////////////////////////////////////////////////////
 
 void
-MPhi::computeRange()
+MPhi::computeRange(TempAllocator &alloc)
 {
     if (type() != MIRType_Int32 && type() != MIRType_Double)
         return;
 
     Range *range = nullptr;
     JS_ASSERT(getOperand(0)->op() != MDefinition::Op_OsrValue);
     for (size_t i = 0, e = numOperands(); i < e; i++) {
         if (getOperand(i)->block()->earlyAbort()) {
@@ -1007,233 +1009,233 @@ MPhi::computeRange()
         if (!getOperand(i)->range())
             return;
 
         Range input(getOperand(i));
 
         if (range)
             range->unionWith(&input);
         else
-            range = new Range(input);
+            range = new(alloc) Range(input);
     }
 
     setRange(range);
 }
 
 void
-MBeta::computeRange()
+MBeta::computeRange(TempAllocator &alloc)
 {
     bool emptyRange = false;
 
     Range opRange(getOperand(0));
-    Range *range = Range::intersect(&opRange, comparison_, &emptyRange);
+    Range *range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
     if (emptyRange) {
         IonSpew(IonSpew_Range, "Marking block for inst %d unexitable", id());
         block()->setEarlyAbort();
     } else {
         setRange(range);
     }
 }
 
 void
-MConstant::computeRange()
+MConstant::computeRange(TempAllocator &alloc)
 {
     if (value().isNumber()) {
         double d = value().toNumber();
-        setRange(Range::NewDoubleRange(d, d));
+        setRange(Range::NewDoubleRange(alloc, d, d));
     } else if (value().isBoolean()) {
         bool b = value().toBoolean();
-        setRange(Range::NewInt32Range(b, b));
+        setRange(Range::NewInt32Range(alloc, b, b));
     }
 }
 
 void
-MCharCodeAt::computeRange()
+MCharCodeAt::computeRange(TempAllocator &alloc)
 {
     // ECMA 262 says that the integer will be non-negative and at most 65535.
-    setRange(Range::NewInt32Range(0, 65535));
+    setRange(Range::NewInt32Range(alloc, 0, 65535));
 }
 
 void
-MClampToUint8::computeRange()
+MClampToUint8::computeRange(TempAllocator &alloc)
 {
-    setRange(Range::NewUInt32Range(0, 255));
+    setRange(Range::NewUInt32Range(alloc, 0, 255));
 }
 
 void
-MBitAnd::computeRange()
+MBitAnd::computeRange(TempAllocator &alloc)
 {
     Range left(getOperand(0));
     Range right(getOperand(1));
     left.wrapAroundToInt32();
     right.wrapAroundToInt32();
 
-    setRange(Range::and_(&left, &right));
+    setRange(Range::and_(alloc, &left, &right));
 }
 
 void
-MBitOr::computeRange()
+MBitOr::computeRange(TempAllocator &alloc)
 {
     Range left(getOperand(0));
     Range right(getOperand(1));
     left.wrapAroundToInt32();
     right.wrapAroundToInt32();
 
-    setRange(Range::or_(&left, &right));
+    setRange(Range::or_(alloc, &left, &right));
 }
 
 void
-MBitXor::computeRange()
+MBitXor::computeRange(TempAllocator &alloc)
 {
     Range left(getOperand(0));
     Range right(getOperand(1));
     left.wrapAroundToInt32();
     right.wrapAroundToInt32();
 
-    setRange(Range::xor_(&left, &right));
+    setRange(Range::xor_(alloc, &left, &right));
 }
 
 void
-MBitNot::computeRange()
+MBitNot::computeRange(TempAllocator &alloc)
 {
     Range op(getOperand(0));
     op.wrapAroundToInt32();
 
-    setRange(Range::not_(&op));
+    setRange(Range::not_(alloc, &op));
 }
 
 void
-MLsh::computeRange()
+MLsh::computeRange(TempAllocator &alloc)
 {
     Range left(getOperand(0));
     Range right(getOperand(1));
     left.wrapAroundToInt32();
 
     MDefinition *rhs = getOperand(1);
     if (!rhs->isConstant()) {
         right.wrapAroundToShiftCount();
-        setRange(Range::lsh(&left, &right));
+        setRange(Range::lsh(alloc, &left, &right));
         return;
     }
 
     int32_t c = rhs->toConstant()->value().toInt32();
-    setRange(Range::lsh(&left, c));
+    setRange(Range::lsh(alloc, &left, c));
 }
 
 void
-MRsh::computeRange()
+MRsh::computeRange(TempAllocator &alloc)
 {
     Range left(getOperand(0));
     Range right(getOperand(1));
     left.wrapAroundToInt32();
 
     MDefinition *rhs = getOperand(1);
     if (!rhs->isConstant()) {
         right.wrapAroundToShiftCount();
-        setRange(Range::rsh(&left, &right));
+        setRange(Range::rsh(alloc, &left, &right));
         return;
     }
 
     int32_t c = rhs->toConstant()->value().toInt32();
-    setRange(Range::rsh(&left, c));
+    setRange(Range::rsh(alloc, &left, c));
 }
 
 void
-MUrsh::computeRange()
+MUrsh::computeRange(TempAllocator &alloc)
 {
     Range left(getOperand(0));
     Range right(getOperand(1));
 
     // ursh can be thought of as converting its left operand to uint32, or it
     // can be thought of as converting its left operand to int32, and then
     // reinterpreting the int32 bits as a uint32 value. Both approaches yield
     // the same result. Since we lack support for full uint32 ranges, we use
     // the second interpretation, though it does cause us to be conservative.
     left.wrapAroundToInt32();
     right.wrapAroundToShiftCount();
 
     MDefinition *rhs = getOperand(1);
     if (!rhs->isConstant()) {
-        setRange(Range::ursh(&left, &right));
+        setRange(Range::ursh(alloc, &left, &right));
     } else {
         int32_t c = rhs->toConstant()->value().toInt32();
-        setRange(Range::ursh(&left, c));
+        setRange(Range::ursh(alloc, &left, c));
     }
 
     JS_ASSERT(range()->lower() >= 0);
 }
 
 void
-MAbs::computeRange()
+MAbs::computeRange(TempAllocator &alloc)
 {
     if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double)
         return;
 
     Range other(getOperand(0));
-    Range *next = Range::abs(&other);
+    Range *next = Range::abs(alloc, &other);
     if (implicitTruncate_)
         next->wrapAroundToInt32();
     setRange(next);
 }
 
 void
-MMinMax::computeRange()
+MMinMax::computeRange(TempAllocator &alloc)
 {
     if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double)
         return;
 
     Range left(getOperand(0));
     Range right(getOperand(1));
-    setRange(isMax() ? Range::max(&left, &right) : Range::min(&left, &right));
+    setRange(isMax() ? Range::max(alloc, &left, &right) : Range::min(alloc, &left, &right));
 }
 
 void
-MAdd::computeRange()
+MAdd::computeRange(TempAllocator &alloc)
 {
     if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
         return;
     Range left(getOperand(0));
     Range right(getOperand(1));
-    Range *next = Range::add(&left, &right);
+    Range *next = Range::add(alloc, &left, &right);
     if (isTruncated())
         next->wrapAroundToInt32();
     setRange(next);
 }
 
 void
-MSub::computeRange()
+MSub::computeRange(TempAllocator &alloc)
 {
     if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
         return;
     Range left(getOperand(0));
     Range right(getOperand(1));
-    Range *next = Range::sub(&left, &right);
+    Range *next = Range::sub(alloc, &left, &right);
     if (isTruncated())
         next->wrapAroundToInt32();
     setRange(next);
 }
 
 void
-MMul::computeRange()
+MMul::computeRange(TempAllocator &alloc)
 {
     if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
         return;
     Range left(getOperand(0));
     Range right(getOperand(1));
     if (canBeNegativeZero())
         canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
-    Range *next = Range::mul(&left, &right);
+    Range *next = Range::mul(alloc, &left, &right);
     // Truncated multiplications could overflow in both directions
     if (isTruncated())
         next->wrapAroundToInt32();
     setRange(next);
 }
 
 void
-MMod::computeRange()
+MMod::computeRange(TempAllocator &alloc)
 {
     if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
         return;
     Range lhs(getOperand(0));
     Range rhs(getOperand(1));
 
     // If either operand is a NaN, the result is NaN. This also conservatively
     // handles Infinity cases.
@@ -1270,17 +1272,17 @@ MMod::computeRange()
             rhsBound = UINT32_MAX;
 
         // The result will never be equal to the rhs, and we shouldn't have
         // any rounding to worry about.
         JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
         --rhsBound;
 
         // This gives us two upper bounds, so we can take the best one.
-        setRange(Range::NewUInt32Range(0, Min(lhsBound, rhsBound)));
+        setRange(Range::NewUInt32Range(alloc, 0, Min(lhsBound, rhsBound)));
         return;
     }
 
     // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
     // First, the absolute value of the result will always be less than the
     // absolute value of rhs. (And if rhs is zero, the result is NaN).
     int64_t a = Abs<int64_t>(rhs.lower());
     int64_t b = Abs<int64_t>(rhs.upper());
@@ -1302,230 +1304,230 @@ MMod::computeRange()
     int64_t absBound = Min(lhsAbsBound, rhsAbsBound);
 
     // Now consider the sign of the result.
     // If lhs is non-negative, the result will be non-negative.
     // If lhs is non-positive, the result will be non-positive.
     int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
     int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
 
-    setRange(new Range(lower, upper, lhs.canHaveFractionalPart() || rhs.canHaveFractionalPart(),
-                       Min(lhs.exponent(), rhs.exponent())));
+    setRange(new(alloc) Range(lower, upper, lhs.canHaveFractionalPart() || rhs.canHaveFractionalPart(),
+                              Min(lhs.exponent(), rhs.exponent())));
 }
 
 void
-MDiv::computeRange()
+MDiv::computeRange(TempAllocator &alloc)
 {
     if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
         return;
     Range lhs(getOperand(0));
     Range rhs(getOperand(1));
 
     // If either operand is a NaN, the result is NaN. This also conservatively
     // handles Infinity cases.
     if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds())
         return;
 
     // Something simple for now: When dividing by a positive rhs, the result
     // won't be further from zero than lhs.
     if (lhs.lower() >= 0 && rhs.lower() >= 1) {
-        setRange(new Range(0, lhs.upper(), true, lhs.exponent()));
+        setRange(new(alloc) Range(0, lhs.upper(), true, lhs.exponent()));
 
         // Also, we can optimize by converting this to an unsigned div.
         if (specialization() == MIRType_Int32 &&
             !lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart())
         {
             unsigned_ = true;
         }
     } else if (unsigned_ && rhs.lower() >= 1) {
         // We shouldn't set the unsigned flag if the inputs can have
         // fractional parts.
         JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
         // Unsigned division by a non-zero rhs will return a uint32 value.
-        setRange(Range::NewUInt32Range(0, UINT32_MAX));
+        setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
     }
 }
 
 void
-MSqrt::computeRange()
+MSqrt::computeRange(TempAllocator &alloc)
 {
     Range input(getOperand(0));
 
     // If either operand is a NaN, the result is NaN. This also conservatively
     // handles Infinity cases.
     if (!input.hasInt32Bounds())
         return;
 
     // Sqrt of a negative non-zero value is NaN.
     if (input.lower() < 0)
         return;
 
     // Something simple for now: When taking the sqrt of a positive value, the
     // result won't be further from zero than the input.
-    setRange(new Range(0, input.upper(), true, input.exponent()));
+    setRange(new(alloc) Range(0, input.upper(), true, input.exponent()));
 }
 
 void
-MToDouble::computeRange()
+MToDouble::computeRange(TempAllocator &alloc)
 {
-    setRange(new Range(getOperand(0)));
+    setRange(new(alloc) Range(getOperand(0)));
 }
 
 void
-MToFloat32::computeRange()
+MToFloat32::computeRange(TempAllocator &alloc)
 {
-    setRange(new Range(getOperand(0)));
+    setRange(new(alloc) Range(getOperand(0)));
 }
 
 void
-MTruncateToInt32::computeRange()
+MTruncateToInt32::computeRange(TempAllocator &alloc)
 {
-    Range *output = new Range(getOperand(0));
+    Range *output = new(alloc) Range(getOperand(0));
     output->wrapAroundToInt32();
     setRange(output);
 }
 
 void
-MToInt32::computeRange()
+MToInt32::computeRange(TempAllocator &alloc)
 {
-    Range *output = new Range(getOperand(0));
+    Range *output = new(alloc) Range(getOperand(0));
     output->clampToInt32();
     setRange(output);
 }
 
-static Range *GetTypedArrayRange(int type)
+static Range *GetTypedArrayRange(TempAllocator &alloc, int type)
 {
     switch (type) {
       case ScalarTypeRepresentation::TYPE_UINT8_CLAMPED:
       case ScalarTypeRepresentation::TYPE_UINT8:
-        return Range::NewUInt32Range(0, UINT8_MAX);
+        return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
       case ScalarTypeRepresentation::TYPE_UINT16:
-        return Range::NewUInt32Range(0, UINT16_MAX);
+        return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
       case ScalarTypeRepresentation::TYPE_UINT32:
-        return Range::NewUInt32Range(0, UINT32_MAX);
+        return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
 
       case ScalarTypeRepresentation::TYPE_INT8:
-        return Range::NewInt32Range(INT8_MIN, INT8_MAX);
+        return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
       case ScalarTypeRepresentation::TYPE_INT16:
-        return Range::NewInt32Range(INT16_MIN, INT16_MAX);
+        return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
       case ScalarTypeRepresentation::TYPE_INT32:
-        return Range::NewInt32Range(INT32_MIN, INT32_MAX);
+        return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
 
       case ScalarTypeRepresentation::TYPE_FLOAT32:
       case ScalarTypeRepresentation::TYPE_FLOAT64:
         break;
     }
 
   return nullptr;
 }
 
 void
-MLoadTypedArrayElement::computeRange()
+MLoadTypedArrayElement::computeRange(TempAllocator &alloc)
 {
     // We have an Int32 type and if this is a UInt32 load it may produce a value
     // outside of our range, but we have a bailout to handle those cases.
-    setRange(GetTypedArrayRange(arrayType()));
+    setRange(GetTypedArrayRange(alloc, arrayType()));
 }
 
 void
-MLoadTypedArrayElementStatic::computeRange()
+MLoadTypedArrayElementStatic::computeRange(TempAllocator &alloc)
 {
     // We don't currently use MLoadTypedArrayElementStatic for uint32, so we
     // don't have to worry about it returning a value outside our type.
     JS_ASSERT(typedArray_->type() != ScalarTypeRepresentation::TYPE_UINT32);
 
-    setRange(GetTypedArrayRange(typedArray_->type()));
+    setRange(GetTypedArrayRange(alloc, typedArray_->type()));
 }
 
 void
-MArrayLength::computeRange()
+MArrayLength::computeRange(TempAllocator &alloc)
 {
     // Array lengths can go up to UINT32_MAX, but we only create MArrayLength
     // nodes when the value is known to be int32 (see the
     // OBJECT_FLAG_LENGTH_OVERFLOW flag).
-    setRange(Range::NewUInt32Range(0, INT32_MAX));
+    setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
 }
 
 void
-MInitializedLength::computeRange()
+MInitializedLength::computeRange(TempAllocator &alloc)
 {
-    setRange(Range::NewUInt32Range(0, JSObject::NELEMENTS_LIMIT));
+    setRange(Range::NewUInt32Range(alloc, 0, JSObject::NELEMENTS_LIMIT));
 }
 
 void
-MTypedArrayLength::computeRange()
+MTypedArrayLength::computeRange(TempAllocator &alloc)
 {
-    setRange(Range::NewUInt32Range(0, INT32_MAX));
+    setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
 }
 
 void
-MStringLength::computeRange()
+MStringLength::computeRange(TempAllocator &alloc)
 {
     static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
                   "NewUInt32Range requires a uint32 value");
-    setRange(Range::NewUInt32Range(0, JSString::MAX_LENGTH));
+    setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
 }
 
 void
-MArgumentsLength::computeRange()
+MArgumentsLength::computeRange(TempAllocator &alloc)
 {
     // This is is a conservative upper bound on what |TooManyArguments| checks.
     // If exceeded, Ion will not be entered in the first place.
     static_assert(SNAPSHOT_MAX_NARGS <= UINT32_MAX,
                   "NewUInt32Range requires a uint32 value");
-    setRange(Range::NewUInt32Range(0, SNAPSHOT_MAX_NARGS));
+    setRange(Range::NewUInt32Range(alloc, 0, SNAPSHOT_MAX_NARGS));
 }
 
 void
-MBoundsCheck::computeRange()
+MBoundsCheck::computeRange(TempAllocator &alloc)
 {
     // Just transfer the incoming index range to the output. The length() is
     // also interesting, but it is handled as a bailout check, and we're
     // computing a pre-bailout range here.
-    setRange(new Range(index()));
+    setRange(new(alloc) Range(index()));
 }
 
 void
-MArrayPush::computeRange()
+MArrayPush::computeRange(TempAllocator &alloc)
 {
     // MArrayPush returns the new array length.
-    setRange(Range::NewUInt32Range(0, UINT32_MAX));
+    setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
 }
 
 void
-MMathFunction::computeRange()
+MMathFunction::computeRange(TempAllocator &alloc)
 {
     Range opRange(getOperand(0));
     switch (function()) {
       case Sin:
       case Cos:
         if (!opRange.canBeInfiniteOrNaN())
-            setRange(Range::NewDoubleRange(-1.0, 1.0));
+            setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
         break;
       case Sign:
         if (!opRange.canBeNaN()) {
             // Note that Math.sign(-0) is -0, and we treat -0 as equal to 0.
             int32_t lower = -1;
             int32_t upper = 1;
             if (opRange.hasInt32LowerBound() && opRange.lower() >= 0)
                 lower = 0;
             if (opRange.hasInt32UpperBound() && opRange.upper() <= 0)
                 upper = 0;
-            setRange(Range::NewInt32Range(lower, upper));
+            setRange(Range::NewInt32Range(alloc, lower, upper));
         }
         break;
     default:
         break;
     }
 }
 
 void
-MRandom::computeRange()
+MRandom::computeRange(TempAllocator &alloc)
 {
-    setRange(Range::NewDoubleRange(0.0, 1.0));
+    setRange(Range::NewDoubleRange(alloc, 0.0, 1.0));
 }
 
 ///////////////////////////////////////////////////////////////////////////////
 // Range Analysis
 ///////////////////////////////////////////////////////////////////////////////
 
 bool
 RangeAnalysis::markBlocksInLoopBody(MBasicBlock *header, MBasicBlock *backedge)
@@ -1762,17 +1764,17 @@ RangeAnalysis::analyzeLoopIterationCount
                 return nullptr;
         }
         if (!bound.add(lhs.constant))
             return nullptr;
     } else {
         return nullptr;
     }
 
-    return new LoopIterationBound(header, test, bound);
+    return new(alloc()) LoopIterationBound(header, test, bound);
 }
 
 void
 RangeAnalysis::analyzeLoopPhi(MBasicBlock *header, LoopIterationBound *loopBound, MPhi *phi)
 {
     // Given a bound on the number of backedges taken, compute an upper and
     // lower bound for a phi node that may change by a constant amount each
     // iteration. Unlike for the case when computing the iteration bound
@@ -1793,17 +1795,17 @@ RangeAnalysis::analyzeLoopPhi(MBasicBloc
         return;
 
     SimpleLinearSum modified = ExtractLinearSum(phi->getOperand(backedge->positionInPhiSuccessor()));
 
     if (modified.term != phi || modified.constant == 0)
         return;
 
     if (!phi->range())
-        phi->setRange(new Range());
+        phi->setRange(new(alloc()) Range());
 
     LinearSum initialSum(alloc());
     if (!initialSum.add(initial, 1))
         return;
 
     // The phi may change by N each iteration, and is either nondecreasing or
     // nonincreasing. initial(phi) is either a lower or upper bound for the
     // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
@@ -1827,23 +1829,23 @@ RangeAnalysis::analyzeLoopPhi(MBasicBloc
     int32_t negativeConstant;
     if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant))
         return;
 
     Range *initRange = initial->range();
     if (modified.constant > 0) {
         if (initRange && initRange->hasInt32LowerBound())
             phi->range()->refineLower(initRange->lower());
-        phi->range()->setSymbolicLower(new SymbolicBound(nullptr, initialSum));
-        phi->range()->setSymbolicUpper(new SymbolicBound(loopBound, limitSum));
+        phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), nullptr, initialSum));
+        phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), loopBound, limitSum));
     } else {
         if (initRange && initRange->hasInt32UpperBound())
             phi->range()->refineUpper(initRange->upper());
-        phi->range()->setSymbolicUpper(new SymbolicBound(nullptr, initialSum));
-        phi->range()->setSymbolicLower(new SymbolicBound(loopBound, limitSum));
+        phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), nullptr, initialSum));
+        phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), loopBound, limitSum));
     }
 
     IonSpew(IonSpew_Range, "added symbolic range on %d", phi->id());
     SpewRange(phi);
 }
 
 // Whether bound is valid at the specified bounds check instruction in a loop,
 // and may be used to hoist ins.
@@ -1870,53 +1872,53 @@ ConvertLinearSum(TempAllocator &alloc, M
     for (size_t i = 0; i < sum.numTerms(); i++) {
         LinearTerm term = sum.term(i);
         JS_ASSERT(!term.term->isConstant());
         if (term.scale == 1) {
             if (def) {
                 def = MAdd::New(alloc, def, term.term);
                 def->toAdd()->setInt32();
                 block->insertBefore(block->lastIns(), def->toInstruction());
-                def->computeRange();
+                def->computeRange(alloc);
             } else {
                 def = term.term;
             }
         } else if (term.scale == -1) {
             if (!def) {
                 def = MConstant::New(alloc, Int32Value(0));
                 block->insertBefore(block->lastIns(), def->toInstruction());
-                def->computeRange();
+                def->computeRange(alloc);
             }
             def = MSub::New(alloc, def, term.term);
             def->toSub()->setInt32();
             block->insertBefore(block->lastIns(), def->toInstruction());
-            def->computeRange();
+            def->computeRange(alloc);
         } else {
             JS_ASSERT(term.scale != 0);
             MConstant *factor = MConstant::New(alloc, Int32Value(term.scale));
             block->insertBefore(block->lastIns(), factor);
             MMul *mul = MMul::New(alloc, term.term, factor);
             mul->setInt32();
             block->insertBefore(block->lastIns(), mul);
-            mul->computeRange();
+            mul->computeRange(alloc);
             if (def) {
                 def = MAdd::New(alloc, def, mul);
                 def->toAdd()->setInt32();
                 block->insertBefore(block->lastIns(), def->toInstruction());
-                def->computeRange();
+                def->computeRange(alloc);
             } else {
                 def = mul;
             }
         }
     }
 
     if (!def) {
         def = MConstant::New(alloc, Int32Value(0));
         block->insertBefore(block->lastIns(), def->toInstruction());
-        def->computeRange();
+        def->computeRange(alloc);
     }
 
     return def;
 }
 
 bool
 RangeAnalysis::tryHoistBoundsCheck(MBasicBlock *header, MBoundsCheck *ins)
 {
@@ -1994,17 +1996,17 @@ RangeAnalysis::analyze()
     IonSpew(IonSpew_Range, "Doing range propagation");
 
     for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
         MBasicBlock *block = *iter;
 
         for (MDefinitionIterator iter(block); iter; iter++) {
             MDefinition *def = *iter;
 
-            def->computeRange();
+            def->computeRange(alloc());
             IonSpew(IonSpew_Range, "computing range on %d", def->id());
             SpewRange(def);
         }
 
         if (block->isLoopHeader()) {
             if (!analyzeLoop(block))
                 return false;
         }
@@ -2069,17 +2071,17 @@ RangeAnalysis::addRangeAssertions()
             // Don't insert assertions if there's nothing interesting to assert.
             if (r.isUnknown() || (ins->type() == MIRType_Int32 && r.isUnknownInt32()))
                 continue;
 
             // Range-checking PassArgs breaks stuff.
             if (ins->isPassArg())
                 continue;
 
-            MAssertRange *guard = MAssertRange::New(alloc(), ins, new Range(r));
+            MAssertRange *guard = MAssertRange::New(alloc(), ins, new(alloc()) Range(r));
 
             // The code that removes beta nodes assumes that it can find them
             // in a contiguous run at the top of each block. Don't insert
             // range assertions in between beta nodes.
             MInstructionIterator insertIter = iter;
             while (insertIter->isBeta())
                 insertIter++;
 
--- a/js/src/jit/RangeAnalysis.h
+++ b/js/src/jit/RangeAnalysis.h
@@ -41,32 +41,38 @@ struct LoopIterationBound : public TempO
       : header(header), test(test), sum(sum)
     {
     }
 };
 
 // A symbolic upper or lower bound computed for a term.
 struct SymbolicBound : public TempObject
 {
+  private:
+    SymbolicBound(LoopIterationBound *loop, LinearSum sum)
+      : loop(loop), sum(sum)
+    {
+    }
+
+  public:
     // Any loop iteration bound from which this was derived.
     //
     // If non-nullptr, then 'sum' is only valid within the loop body, at
     // points dominated by the loop bound's test (see LoopIterationBound).
     //
     // If nullptr, then 'sum' is always valid.
     LoopIterationBound *loop;
 
+    static SymbolicBound *New(TempAllocator &alloc, LoopIterationBound *loop, LinearSum sum) {
+        return new(alloc) SymbolicBound(loop, sum);
+    }
+
     // Computed symbolic bound, see above.
     LinearSum sum;
 
-    SymbolicBound(LoopIterationBound *loop, LinearSum sum)
-      : loop(loop), sum(sum)
-    {
-    }
-
     void print(Sprinter &sp) const;
     void dump() const;
 };
 
 class RangeAnalysis
 {
   protected:
     bool blockDominates(MBasicBlock *b, MBasicBlock *b2);
@@ -347,62 +353,63 @@ class Range : public TempObject {
         assertInvariants();
     }
 
     // Construct a range from the given MDefinition. This differs from the
     // MDefinition's range() method in that it describes the range of values
     // *after* any bailout checks.
     Range(const MDefinition *def);
 
-    static Range *NewInt32Range(int32_t l, int32_t h) {
-        return new Range(l, h, false, MaxInt32Exponent);
+    static Range *NewInt32Range(TempAllocator &alloc, int32_t l, int32_t h) {
+        return new(alloc) Range(l, h, false, MaxInt32Exponent);
     }
 
-    static Range *NewUInt32Range(uint32_t l, uint32_t h) {
+    static Range *NewUInt32Range(TempAllocator &alloc, uint32_t l, uint32_t h) {
         // For now, just pass them to the constructor as int64_t values.
         // They'll become unbounded if they're not in the int32_t range.
-        return new Range(l, h, false, MaxUInt32Exponent);
+        return new(alloc) Range(l, h, false, MaxUInt32Exponent);
     }
 
-    static Range *NewDoubleRange(double l, double h) {
+    static Range *NewDoubleRange(TempAllocator &alloc, double l, double h) {
         if (mozilla::IsNaN(l) && mozilla::IsNaN(h))
             return nullptr;
 
-        Range *r = new Range();
+        Range *r = new(alloc) Range();
         r->setDouble(l, h);
         return r;
     }
 
     void print(Sprinter &sp) const;
     void dump(FILE *fp) const;
     void dump() const;
     bool update(const Range *other);
 
     // Unlike the other operations, unionWith is an in-place
     // modification. This is to avoid a bunch of useless extra
     // copying when chaining together unions when handling Phi
     // nodes.
     void unionWith(const Range *other);
-    static Range * intersect(const Range *lhs, const Range *rhs, bool *emptyRange);
-    static Range * add(const Range *lhs, const Range *rhs);
-    static Range * sub(const Range *lhs, const Range *rhs);
-    static Range * mul(const Range *lhs, const Range *rhs);
-    static Range * and_(const Range *lhs, const Range *rhs);
-    static Range * or_(const Range *lhs, const Range *rhs);
-    static Range * xor_(const Range *lhs, const Range *rhs);
-    static Range * not_(const Range *op);
-    static Range * lsh(const Range *lhs, int32_t c);
-    static Range * rsh(const Range *lhs, int32_t c);
-    static Range * ursh(const Range *lhs, int32_t c);
-    static Range * lsh(const Range *lhs, const Range *rhs);
-    static Range * rsh(const Range *lhs, const Range *rhs);
-    static Range * ursh(const Range *lhs, const Range *rhs);
-    static Range * abs(const Range *op);
-    static Range * min(const Range *lhs, const Range *rhs);
-    static Range * max(const Range *lhs, const Range *rhs);
+    static Range *intersect(TempAllocator &alloc, const Range *lhs, const Range *rhs,
+                             bool *emptyRange);
+    static Range *add(TempAllocator &alloc, const Range *lhs, const Range *rhs);
+    static Range *sub(TempAllocator &alloc, const Range *lhs, const Range *rhs);
+    static Range *mul(TempAllocator &alloc, const Range *lhs, const Range *rhs);
+    static Range *and_(TempAllocator &alloc, const Range *lhs, const Range *rhs);
+    static Range *or_(TempAllocator &alloc, const Range *lhs, const Range *rhs);
+    static Range *xor_(TempAllocator &alloc, const Range *lhs, const Range *rhs);
+    static Range *not_(TempAllocator &alloc, const Range *op);
+    static Range *lsh(TempAllocator &alloc, const Range *lhs, int32_t c);
+    static Range *rsh(TempAllocator &alloc, const Range *lhs, int32_t c);
+    static Range *ursh(TempAllocator &alloc, const Range *lhs, int32_t c);
+    static Range *lsh(TempAllocator &alloc, const Range *lhs, const Range *rhs);
+    static Range *rsh(TempAllocator &alloc, const Range *lhs, const Range *rhs);
+    static Range *ursh(TempAllocator &alloc, const Range *lhs, const Range *rhs);
+    static Range *abs(TempAllocator &alloc, const Range *op);
+    static Range *min(TempAllocator &alloc, const Range *lhs, const Range *rhs);
+    static Range *max(TempAllocator &alloc, const Range *lhs, const Range *rhs);
 
     static bool negativeZeroMul(const Range *lhs, const Range *rhs);
 
     bool isUnknownInt32() const {
         return isInt32() && lower() == INT32_MIN && upper() == INT32_MAX;
     }
 
     bool isUnknown() const {
--- a/js/src/jit/StupidAllocator.cpp
+++ b/js/src/jit/StupidAllocator.cpp
@@ -23,17 +23,17 @@ DefaultStackSlot(uint32_t vreg)
 
 LAllocation *
 StupidAllocator::stackLocation(uint32_t vreg)
 {
     LDefinition *def = virtualRegisters[vreg];
     if (def->policy() == LDefinition::PRESET && def->output()->isArgument())
         return def->output();
 
-    return new LStackSlot(DefaultStackSlot(vreg), def->type() == LDefinition::DOUBLE);
+    return new(alloc()) LStackSlot(DefaultStackSlot(vreg), def->type() == LDefinition::DOUBLE);
 }
 
 StupidAllocator::RegisterIndex
 StupidAllocator::registerIndex(AnyRegister reg)
 {
     for (size_t i = 0; i < registerCount; i++) {
         if (reg == registers[i].reg)
             return i;
@@ -181,17 +181,17 @@ StupidAllocator::allocateRegister(LInstr
     return best;
 }
 
 void
 StupidAllocator::syncRegister(LInstruction *ins, RegisterIndex index)
 {
     if (registers[index].dirty) {
         LMoveGroup *input = getInputMoveGroup(ins->id());
-        LAllocation *source = new LAllocation(registers[index].reg);
+        LAllocation *source = new(alloc()) LAllocation(registers[index].reg);
 
         uint32_t existing = registers[index].vreg;
         LAllocation *dest = stackLocation(existing);
         input->addAfter(source, dest);
 
         registers[index].dirty = false;
     }
 }
@@ -204,17 +204,17 @@ StupidAllocator::evictRegister(LInstruct
 }
 
 void
 StupidAllocator::loadRegister(LInstruction *ins, uint32_t vreg, RegisterIndex index)
 {
     // Load a vreg from its stack location to a register.
     LMoveGroup *input = getInputMoveGroup(ins->id());
     LAllocation *source = stackLocation(vreg);
-    LAllocation *dest = new LAllocation(registers[index].reg);
+    LAllocation *dest = new(alloc()) LAllocation(registers[index].reg);
     input->addAfter(source, dest);
     registers[index].set(vreg, ins);
 }
 
 StupidAllocator::RegisterIndex
 StupidAllocator::findExistingRegister(uint32_t vreg)
 {
     for (size_t i = 0; i < registerCount; i++) {
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -435,17 +435,17 @@ class CodeGeneratorShared : public LInst
                                    jsbytecode *bytecode);
     OutOfLineAbortPar *oolAbortPar(ParallelBailoutCause cause, LInstruction *lir);
     OutOfLinePropagateAbortPar *oolPropagateAbortPar(LInstruction *lir);
     virtual bool visitOutOfLineAbortPar(OutOfLineAbortPar *ool) = 0;
     virtual bool visitOutOfLinePropagateAbortPar(OutOfLinePropagateAbortPar *ool) = 0;
 };
 
 // An out-of-line path is generated at the end of the function.
-class OutOfLineCode : public TempObject
+class OutOfLineCode : public OldTempObject
 {
     Label entry_;
     Label rejoin_;
     uint32_t framePushed_;
     jsbytecode *pc_;
     JSScript *script_;
 
   public: