author | Tom Schuster <evilpies@gmail.com> |
Tue, 04 Dec 2012 19:10:23 +0100 | |
changeset 114938 | 3b419a7a354c8151ba403f5e68decad1a076c2d7 |
parent 114937 | b87db9d6055421a76b05e325a630b9296b67fef5 |
child 114939 | 4a0c471957b73c274d862c24730eb5848534726b |
push id | 23949 |
push user | ryanvm@gmail.com |
push date | Wed, 05 Dec 2012 01:17:54 +0000 |
treeherder | mozilla-central@96343524e1fe [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | jandem |
bugs | 797970 |
milestone | 20.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/js/src/ion/AliasAnalysis.cpp +++ b/js/src/ion/AliasAnalysis.cpp @@ -16,17 +16,17 @@ using namespace js; using namespace js::ion; // Iterates over the flags in an AliasSet. class AliasSetIterator { private: - uint32 flags; + uint32_t flags; unsigned pos; public: AliasSetIterator(AliasSet set) : flags(set.flags()), pos(0) { while (flags && (flags & 1) == 0) { flags >>= 1; @@ -79,17 +79,17 @@ AliasAnalysis::analyze() for (unsigned i=0; i < NUM_ALIAS_SETS; i++) { if (!stores.append(firstIns)) return false; } // Type analysis may have inserted new instructions. Since this pass depends // on the instruction number ordering, all instructions are renumbered. // We start with 1 because some passes use 0 to denote failure. - uint32 newId = 1; + uint32_t newId = 1; for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { if (mir->shouldCancel("Alias Analysis (main loop)")) return false; if (block->isLoopHeader()) { IonSpew(IonSpew_Alias, "Processing loop header %d", block->id()); loop_ = new LoopAliasInfo(loop_, *block);
--- a/js/src/ion/Bailouts.cpp +++ b/js/src/ion/Bailouts.cpp @@ -94,17 +94,17 @@ GetBailedJSScript(JSContext *cx) return NULL; } } void StackFrame::initFromBailout(JSContext *cx, SnapshotIterator &iter) { AutoAssertNoGC nogc; - uint32 exprStackSlots = iter.slots() - script()->nfixed; + uint32_t exprStackSlots = iter.slots() - script()->nfixed; #ifdef TRACK_SNAPSHOTS iter.spewBailingFrom(); #endif IonSpew(IonSpew_Bailouts, " expr stack slots %u, is function frame %u", exprStackSlots, isFunctionFrame()); if (iter.bailoutKind() == Bailout_ArgumentCheck) { @@ -139,31 +139,31 @@ StackFrame::initFromBailout(JSContext *c // constructor running. if (isConstructing()) JS_ASSERT(!thisv.isPrimitive()); JS_ASSERT(iter.slots() >= CountArgSlots(fun())); IonSpew(IonSpew_Bailouts, " frame slots %u, nargs %u, nfixed %u", iter.slots(), fun()->nargs, script()->nfixed); - for (uint32 i = 0; i < fun()->nargs; i++) { + for (uint32_t i = 0; i < fun()->nargs; i++) { Value arg = iter.read(); formals()[i] = arg; } } exprStackSlots -= CountArgSlots(maybeFun()); - for (uint32 i = 0; i < script()->nfixed; i++) { + for (uint32_t i = 0; i < script()->nfixed; i++) { Value slot = iter.read(); slots()[i] = slot; } IonSpew(IonSpew_Bailouts, " pushing %u expression stack slots", exprStackSlots); FrameRegs ®s = cx->regs(); - for (uint32 i = 0; i < exprStackSlots; i++) { + for (uint32_t i = 0; i < exprStackSlots; i++) { Value v; // If coming from an invalidation bailout, and this is the topmost // value, and a value override has been specified, don't read from the // iterator. Otherwise, we risk using a garbage value. if (!iter.moreFrames() && i == exprStackSlots - 1 && cx->runtime->hasIonReturnOverride()) v = iter.skip(); else @@ -215,17 +215,17 @@ PushInlinedFrame(JSContext *cx, StackFra JS_ASSERT(fp == regs.fp()); JS_ASSERT(fp->prev() == callerFrame); fp->formals()[-2].setObject(*fun); return fp; } -static uint32 +static uint32_t ConvertFrames(JSContext *cx, IonActivation *activation, IonBailoutIterator &it) { AssertCanGC(); IonSpew(IonSpew_Bailouts, "Bailing out %s:%u, IonScript %p", it.script()->filename, it.script()->lineno, (void *) it.ionScript()); IonSpew(IonSpew_Bailouts, " reading from snapshot offset %u size %u", it.snapshotOffset(), it.ionScript()->snapshotsSize()); #ifdef DEBUG @@ -349,40 +349,40 @@ EnsureExitFrame(IonCommonFrameLayout *fr frame->changePrevType(IonFrame_Bailed_Rectifier); return; } JS_ASSERT(frame->prevType() == IonFrame_OptimizedJS); frame->changePrevType(IonFrame_Bailed_JS); } -uint32 +uint32_t ion::Bailout(BailoutStack *sp) { AssertCanGC(); JSContext *cx = GetIonContext()->cx; // We don't have an exit frame. cx->runtime->ionTop = NULL; IonActivationIterator ionActivations(cx); IonBailoutIterator iter(ionActivations, sp); IonActivation *activation = ionActivations.activation(); // IonCompartment *ioncompartment = cx->compartment->ionCompartment(); // IonActivation *activation = cx->runtime->ionActivation; // FrameRecovery in = FrameRecoveryFromBailout(ioncompartment, sp); IonSpew(IonSpew_Bailouts, "Took bailout! Snapshot offset: %d", iter.snapshotOffset()); - uint32 retval = ConvertFrames(cx, activation, iter); + uint32_t retval = ConvertFrames(cx, activation, iter); EnsureExitFrame(iter.jsFrame()); return retval; } -uint32 +uint32_t ion::InvalidationBailout(InvalidationBailoutStack *sp, size_t *frameSizeOut) { AssertCanGC(); sp->checkInvariants(); JSContext *cx = GetIonContext()->cx; // We don't have an exit frame. @@ -391,17 +391,17 @@ ion::InvalidationBailout(InvalidationBai IonBailoutIterator iter(ionActivations, sp); IonActivation *activation = ionActivations.activation(); IonSpew(IonSpew_Bailouts, "Took invalidation bailout! Snapshot offset: %d", iter.snapshotOffset()); // Note: the frame size must be computed before we return from this function. *frameSizeOut = iter.topFrameSize(); - uint32 retval = ConvertFrames(cx, activation, iter); + uint32_t retval = ConvertFrames(cx, activation, iter); { IonJSFrameLayout *frame = iter.jsFrame(); IonSpew(IonSpew_Invalidate, "converting to exit frame"); IonSpew(IonSpew_Invalidate, " orig calleeToken %p", (void *) frame->calleeToken()); IonSpew(IonSpew_Invalidate, " orig frameSize %u", unsigned(frame->prevFrameLocalSize())); IonSpew(IonSpew_Invalidate, " orig ra %p", (void *) frame->returnAddress()); @@ -459,18 +459,18 @@ ReflowArgTypes(JSContext *cx) types::AutoEnterTypeInference enter(cx); if (!fp->isConstructing()) types::TypeScript::SetThis(cx, script, fp->thisValue()); for (unsigned i = 0; i < nargs; ++i) types::TypeScript::SetArgument(cx, script, i, fp->unaliasedFormal(i, DONT_CHECK_ALIASING)); } -uint32 -ion::ReflowTypeInfo(uint32 bailoutResult) +uint32_t +ion::ReflowTypeInfo(uint32_t bailoutResult) { JSContext *cx = GetIonContext()->cx; IonActivation *activation = cx->runtime->ionActivation; IonSpew(IonSpew_Bailouts, "reflowing type info"); if (bailoutResult == BAILOUT_RETURN_ARGUMENT_CHECK) { IonSpew(IonSpew_Bailouts, "reflowing type info at argument-checked entry"); @@ -494,17 +494,17 @@ ion::ReflowTypeInfo(uint32 bailoutResult // When a type barrier fails, the bad value is at the top of the stack. Value &result = cx->regs().sp[-1]; types::TypeScript::Monitor(cx, script, pc, result); return true; } -uint32 +uint32_t ion::RecompileForInlining() { JSContext *cx = GetIonContext()->cx; RawScript script = cx->fp()->script().unsafeGet(); IonSpew(IonSpew_Inlining, "Recompiling script to inline calls %s:%d", script->filename, script->lineno); @@ -525,17 +525,17 @@ ion::EnsureHasCallObject(JSContext *cx, fp->fun()->isHeavyweight() && !fp->hasCallObj()) { return fp->initCallObject(cx); } return true; } -uint32 +uint32_t ion::BoundsCheckFailure() { JSContext *cx = GetIonContext()->cx; JSScript *script = GetBailedJSScript(cx); IonSpew(IonSpew_Bailouts, "Bounds check failure %s:%d", script->filename, script->lineno); @@ -546,33 +546,33 @@ ion::BoundsCheckFailure() IonSpew(IonSpew_Invalidate, "Invalidating due to bounds check failure"); return Invalidate(cx, script); } return true; } -uint32 +uint32_t ion::ShapeGuardFailure() { JSContext *cx = GetIonContext()->cx; JSScript *script = GetBailedJSScript(cx); JS_ASSERT(script->hasIonScript()); JS_ASSERT(!script->ion->invalidated()); script->failedShapeGuard = true; IonSpew(IonSpew_Invalidate, "Invalidating due to shape guard failure"); return Invalidate(cx, script); } -uint32 +uint32_t ion::CachedShapeGuardFailure() { JSContext *cx = GetIonContext()->cx; JSScript *script = GetBailedJSScript(cx); JS_ASSERT(script->hasIonScript()); JS_ASSERT(!script->ion->invalidated()); @@ -583,17 +583,17 @@ ion::CachedShapeGuardFailure() for (size_t i = 0; i < script->ion->scriptEntries(); i++) mjit::PurgeCaches(script->ion->getScript(i)); IonSpew(IonSpew_Invalidate, "Invalidating due to shape guard failure"); return Invalidate(cx, script); } -uint32 +uint32_t ion::ThunkToInterpreter(Value *vp) { JSContext *cx = GetIonContext()->cx; IonActivation *activation = cx->runtime->ionActivation; BailoutClosure *br = activation->takeBailout(); if (!EnsureHasCallObject(cx, cx->fp())) return Interpret_Error;
--- a/js/src/ion/Bailouts.h +++ b/js/src/ion/Bailouts.h @@ -86,34 +86,34 @@ namespace ion { // pointers. To account for this we segregate frames into a limited set of // "frame sizes", and create a table for each frame size. We also have the // option of not using bailout tables, for platforms or situations where the // 10 byte cost is more optimal than a bailout table. See IonFrames.h for more // detail. static const BailoutId INVALID_BAILOUT_ID = BailoutId(-1); -static const uint32 BAILOUT_KIND_BITS = 3; -static const uint32 BAILOUT_RESUME_BITS = 1; +static const uint32_t BAILOUT_KIND_BITS = 3; +static const uint32_t BAILOUT_RESUME_BITS = 1; // Keep this arbitrarily small for now, for testing. -static const uint32 BAILOUT_TABLE_SIZE = 16; +static const uint32_t BAILOUT_TABLE_SIZE = 16; // Bailout return codes. // N.B. the relative order of these values is hard-coded into ::GenerateBailoutThunk. -static const uint32 BAILOUT_RETURN_OK = 0; -static const uint32 BAILOUT_RETURN_FATAL_ERROR = 1; -static const uint32 BAILOUT_RETURN_ARGUMENT_CHECK = 2; -static const uint32 BAILOUT_RETURN_TYPE_BARRIER = 3; -static const uint32 BAILOUT_RETURN_MONITOR = 4; -static const uint32 BAILOUT_RETURN_RECOMPILE_CHECK = 5; -static const uint32 BAILOUT_RETURN_BOUNDS_CHECK = 6; -static const uint32 BAILOUT_RETURN_SHAPE_GUARD = 7; -static const uint32 BAILOUT_RETURN_OVERRECURSED = 8; -static const uint32 BAILOUT_RETURN_CACHED_SHAPE_GUARD = 9; +static const uint32_t BAILOUT_RETURN_OK = 0; +static const uint32_t BAILOUT_RETURN_FATAL_ERROR = 1; +static const uint32_t BAILOUT_RETURN_ARGUMENT_CHECK = 2; +static const uint32_t BAILOUT_RETURN_TYPE_BARRIER = 3; +static const uint32_t BAILOUT_RETURN_MONITOR = 4; +static const uint32_t BAILOUT_RETURN_RECOMPILE_CHECK = 5; +static const uint32_t BAILOUT_RETURN_BOUNDS_CHECK = 6; +static const uint32_t BAILOUT_RETURN_SHAPE_GUARD = 7; +static const uint32_t BAILOUT_RETURN_OVERRECURSED = 8; +static const uint32_t BAILOUT_RETURN_CACHED_SHAPE_GUARD = 9; // Attached to the compartment for easy passing through from ::Bailout to // ::ThunkToInterpreter. class BailoutClosure { // These class are used to control the stack usage and the order of // declaration is used by the destructor to restore the stack in the // expected order when classes are created. This class is only created @@ -171,17 +171,17 @@ class InvalidationBailoutStack; // Must be implemented by each architecture. // This iterator is constructed at a time where there is no exit frame at the // moment. They must be initialized to the first JS frame instead of the exit // frame as usually done with IonFrameIterator. class IonBailoutIterator : public IonFrameIterator { MachineState machine_; - uint32 snapshotOffset_; + uint32_t snapshotOffset_; size_t topFrameSize_; IonScript *topIonScript_; public: IonBailoutIterator(const IonActivationIterator &activations, BailoutStack *sp); IonBailoutIterator(const IonActivationIterator &activations, InvalidationBailoutStack *sp); SnapshotOffset snapshotOffset() const { @@ -202,32 +202,32 @@ class IonBailoutIterator : public IonFra } void dump() const; }; bool EnsureHasCallObject(JSContext *cx, StackFrame *fp); // Called from a bailout thunk. Returns a BAILOUT_* error code. -uint32 Bailout(BailoutStack *sp); +uint32_t Bailout(BailoutStack *sp); // Called from the invalidation thunk. Returns a BAILOUT_* error code. -uint32 InvalidationBailout(InvalidationBailoutStack *sp, size_t *frameSizeOut); +uint32_t InvalidationBailout(InvalidationBailoutStack *sp, size_t *frameSizeOut); // Called from a bailout thunk. Interprets the frame(s) that have been bailed // out. -uint32 ThunkToInterpreter(Value *vp); +uint32_t ThunkToInterpreter(Value *vp); -uint32 ReflowTypeInfo(uint32 bailoutResult); +uint32_t ReflowTypeInfo(uint32_t bailoutResult); -uint32 RecompileForInlining(); +uint32_t RecompileForInlining(); -uint32 BoundsCheckFailure(); +uint32_t BoundsCheckFailure(); -uint32 ShapeGuardFailure(); +uint32_t ShapeGuardFailure(); -uint32 CachedShapeGuardFailure(); +uint32_t CachedShapeGuardFailure(); } // namespace ion } // namespace js #endif // jsion_bailouts_h__
--- a/js/src/ion/BitSet.cpp +++ b/js/src/ion/BitSet.cpp @@ -21,17 +21,17 @@ BitSet::New(unsigned int max) } bool BitSet::init() { size_t sizeRequired = numWords() * sizeof(*bits_); TempAllocator *alloc = GetIonContext()->temp; - bits_ = (uint32 *)alloc->allocate(sizeRequired); + bits_ = (uint32_t *)alloc->allocate(sizeRequired); if (!bits_) return false; memset(bits_, 0, sizeRequired); return true; } @@ -85,17 +85,17 @@ BitSet::fixedPointIntersect(const BitSet { JS_ASSERT(bits_); JS_ASSERT(other->max_ == max_); JS_ASSERT(other->bits_); bool changed = false; for (unsigned int i = 0; i < numWords(); i++) { - uint32 old = bits_[i]; + uint32_t old = bits_[i]; bits_[i] &= other->bits_[i]; if (!changed && old != bits_[i]) changed = true; } return changed; }
--- a/js/src/ion/BitSet.h +++ b/js/src/ion/BitSet.h @@ -15,33 +15,33 @@ namespace ion { // Provides constant time set insertion and removal, and fast linear // set operations such as intersection, difference, and union. // N.B. All set operations must be performed on sets with the same maximum. class BitSet : private TempObject { public: static size_t RawLengthForBits(size_t bits) { - return 1 + bits / (8 * sizeof(uint32)); + return 1 + bits / (8 * sizeof(uint32_t)); } private: BitSet(unsigned int max) : max_(max), bits_(NULL) {}; unsigned int max_; - uint32 *bits_; + uint32_t *bits_; - static inline uint32 bitForValue(unsigned int value) { - return 1l << (uint32)(value % (8 * sizeof(uint32))); + static inline uint32_t bitForValue(unsigned int value) { + return 1l << (uint32_t)(value % (8 * sizeof(uint32_t))); } static inline unsigned int wordForValue(unsigned int value) { - return value / (8 * sizeof(uint32)); + return value / (8 * sizeof(uint32_t)); } inline unsigned int numWords() const { return RawLengthForBits(max_); } bool init(); @@ -95,31 +95,31 @@ class BitSet : private TempObject bool fixedPointIntersect(const BitSet *other); // O(max): Does inplace complement of the set. void complement(); // O(max): Clear this set. void clear(); - uint32 *raw() const { + uint32_t *raw() const { return bits_; } size_t rawLength() const { return numWords(); } }; class BitSet::Iterator { private: BitSet &set_; unsigned index_; unsigned word_; - uint32 value_; + uint32_t value_; public: Iterator(BitSet &set) : set_(set), index_(0), word_(0), value_(set.bits_[0]) {
--- a/js/src/ion/C1Spewer.cpp +++ b/js/src/ion/C1Spewer.cpp @@ -112,17 +112,17 @@ void C1Spewer::spewIntervals(FILE *fp, LinearScanAllocator *regalloc, LInstruction *ins, size_t &nextId) { for (size_t k = 0; k < ins->numDefs(); k++) { VirtualRegister *vreg = ®alloc->vregs[ins->getDef(k)->virtualRegister()]; for (size_t i = 0; i < vreg->numIntervals(); i++) { LiveInterval *live = vreg->getInterval(i); if (live->numRanges()) { - fprintf(fp, "%d object \"", (i == 0) ? vreg->id() : int32(nextId++)); + fprintf(fp, "%d object \"", (i == 0) ? vreg->id() : int32_t(nextId++)); LAllocation::PrintAllocation(fp, live->getAllocation()); fprintf(fp, "\" %d -1", vreg->id()); for (size_t j = 0; j < live->numRanges(); j++) { fprintf(fp, " [%d, %d[", live->getRange(j)->from.pos(), live->getRange(j)->to.pos()); } for (UsePositionIterator usePos(live->usesBegin()); usePos != live->usesEnd(); usePos++) fprintf(fp, " %d M", usePos->pos.pos()); @@ -150,24 +150,24 @@ void C1Spewer::spewPass(FILE *fp, MBasicBlock *block) { fprintf(fp, " begin_block\n"); fprintf(fp, " name \"B%d\"\n", block->id()); fprintf(fp, " from_bci -1\n"); fprintf(fp, " to_bci -1\n"); fprintf(fp, " predecessors"); - for (uint32 i = 0; i < block->numPredecessors(); i++) { + for (uint32_t i = 0; i < block->numPredecessors(); i++) { MBasicBlock *pred = block->getPredecessor(i); fprintf(fp, " \"B%d\"", pred->id()); } fprintf(fp, "\n"); fprintf(fp, " successors"); - for (uint32 i = 0; i < block->numSuccessors(); i++) { + for (uint32_t i = 0; i < block->numSuccessors(); i++) { MBasicBlock *successor = block->getSuccessor(i); fprintf(fp, " \"B%d\"", successor->id()); } fprintf(fp, "\n"); fprintf(fp, " xhandlers\n"); fprintf(fp, " flags\n"); @@ -176,17 +176,17 @@ C1Spewer::spewPass(FILE *fp, MBasicBlock fprintf(fp, " last_lir_id %d\n", block->lir()->lastId()); } fprintf(fp, " begin_states\n"); fprintf(fp, " begin_locals\n"); fprintf(fp, " size %d\n", (int)block->numEntrySlots()); fprintf(fp, " method \"None\"\n"); - for (uint32 i = 0; i < block->numEntrySlots(); i++) { + for (uint32_t i = 0; i < block->numEntrySlots(); i++) { MDefinition *ins = block->getEntrySlot(i); fprintf(fp, " "); fprintf(fp, "%d ", i); if (ins->isUnused()) fprintf(fp, "unused"); else ins->printName(fp); fprintf(fp, "\n");
--- a/js/src/ion/CodeGenerator.cpp +++ b/js/src/ion/CodeGenerator.cpp @@ -336,17 +336,17 @@ CodeGenerator::visitNop(LNop *lir) bool CodeGenerator::visitOsiPoint(LOsiPoint *lir) { // Note: markOsiPoint ensures enough space exists between the last // LOsiPoint and this one to patch adjacent call instructions. JS_ASSERT(masm.framePushed() == frameSize()); - uint32 osiCallPointOffset; + uint32_t osiCallPointOffset; if (!markOsiPoint(lir, &osiCallPointOffset)) return false; LSafepoint *safepoint = lir->associatedSafepoint(); JS_ASSERT(!safepoint->osiCallPointOffset()); safepoint->setOsiCallPointOffset(osiCallPointOffset); return true; } @@ -472,37 +472,37 @@ CodeGenerator::visitOsrScopeChain(LOsrSc return true; } bool CodeGenerator::visitStackArgT(LStackArgT *lir) { const LAllocation *arg = lir->getArgument(); MIRType argType = lir->mir()->getArgument()->type(); - uint32 argslot = lir->argslot(); - - int32 stack_offset = StackOffsetOfPassedArg(argslot); + uint32_t argslot = lir->argslot(); + + int32_t stack_offset = StackOffsetOfPassedArg(argslot); Address dest(StackPointer, stack_offset); if (arg->isFloatReg()) masm.storeDouble(ToFloatRegister(arg), dest); else if (arg->isRegister()) masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest); else masm.storeValue(*(arg->toConstant()), dest); return pushedArgumentSlots_.append(StackOffsetToSlot(stack_offset)); } bool CodeGenerator::visitStackArgV(LStackArgV *lir) { ValueOperand val = ToValue(lir, 0); - uint32 argslot = lir->argslot(); - int32 stack_offset = StackOffsetOfPassedArg(argslot); + uint32_t argslot = lir->argslot(); + int32_t stack_offset = StackOffsetOfPassedArg(argslot); masm.storeValue(val, Address(StackPointer, stack_offset)); return pushedArgumentSlots_.append(StackOffsetToSlot(stack_offset)); } bool CodeGenerator::visitInteger(LInteger *lir) { @@ -527,17 +527,17 @@ CodeGenerator::visitSlots(LSlots *lir) masm.loadPtr(slots, ToRegister(lir->output())); return true; } bool CodeGenerator::visitStoreSlotV(LStoreSlotV *store) { Register base = ToRegister(store->slots()); - int32 offset = store->mir()->slot() * sizeof(Value); + int32_t offset = store->mir()->slot() * sizeof(Value); const ValueOperand value = ToValue(store, LStoreSlotV::Value); if (store->mir()->needsBarrier()) emitPreBarrier(Address(base, offset), MIRType_Value); masm.storeValue(value, Address(base, offset)); return true; @@ -598,17 +598,17 @@ CodeGenerator::visitCallNative(LCallNati // Registers used for callWithABI() argument-passing. const Register argJSContextReg = ToRegister(call->getArgJSContextReg()); const Register argUintNReg = ToRegister(call->getArgUintNReg()); const Register argVpReg = ToRegister(call->getArgVpReg()); // Misc. temporary registers. const Register tempReg = ToRegister(call->getTempReg()); - DebugOnly<uint32> initialStack = masm.framePushed(); + DebugOnly<uint32_t> initialStack = masm.framePushed(); masm.checkStackAlignment(); // Native functions have the signature: // bool (*)(JSContext *, unsigned, Value *vp) // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward // are the function arguments. @@ -622,17 +622,17 @@ CodeGenerator::visitCallNative(LCallNati // Preload arguments into registers. masm.loadJSContext(argJSContextReg); masm.move32(Imm32(call->numStackArgs()), argUintNReg); masm.movePtr(StackPointer, argVpReg); masm.Push(argUintNReg); // Construct native exit frame. - uint32 safepointOffset; + uint32_t safepointOffset; if (!masm.buildFakeExitFrame(tempReg, &safepointOffset)) return false; masm.enterFakeExitFrame(); if (!markSafepointAt(safepointOffset, call)) return false; // Construct and execute call. @@ -682,17 +682,17 @@ CodeGenerator::visitCallDOMNative(LCallD // Registers used for callWithABI() argument-passing. const Register argJSContext = ToRegister(call->getArgJSContext()); const Register argObj = ToRegister(call->getArgObj()); const Register argPrivate = ToRegister(call->getArgPrivate()); const Register argArgc = ToRegister(call->getArgArgc()); const Register argVp = ToRegister(call->getArgVp()); - DebugOnly<uint32> initialStack = masm.framePushed(); + DebugOnly<uint32_t> initialStack = masm.framePushed(); masm.checkStackAlignment(); // DOM methods have the signature: // bool (*)(JSContext *, HandleObject, void *private, unsigned argc, Value *vp) // Where vp[0] is space for an outparam and the callee, vp[1] is |this|, and vp[2] onward // are the function arguments. @@ -718,17 +718,17 @@ CodeGenerator::visitCallDOMNative(LCallD // Push |this| object for passing HandleObject. We push after argc to // maintain the same sp-relative location of the object pointer with other // DOMExitFrames. masm.Push(argObj); masm.movePtr(StackPointer, argObj); // Construct native exit frame. - uint32 safepointOffset; + uint32_t safepointOffset; if (!masm.buildFakeExitFrame(argJSContext, &safepointOffset)) return false; masm.enterFakeExitFrame(ION_FRAME_DOMMETHOD); if (!markSafepointAt(safepointOffset, call)) return false; // Construct and execute call. @@ -781,22 +781,22 @@ static const VMFunction GetIntrinsicValu bool CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue *lir) { pushArg(ImmGCPtr(lir->mir()->name())); return callVM(GetIntrinsicValueInfo, lir); } -typedef bool (*InvokeFunctionFn)(JSContext *, JSFunction *, uint32, Value *, Value *); +typedef bool (*InvokeFunctionFn)(JSContext *, JSFunction *, uint32_t, Value *, Value *); static const VMFunction InvokeFunctionInfo = FunctionInfo<InvokeFunctionFn>(InvokeFunction); bool CodeGenerator::emitCallInvokeFunction(LInstruction *call, Register calleereg, - uint32 argc, uint32 unusedStack) + uint32_t argc, uint32_t unusedStack) { // Nestle %esp up to the argument vector. // Each path must account for framePushed_ separately, for callVM to be valid. masm.freeStack(unusedStack); pushArg(StackPointer); // argv. pushArg(Imm32(argc)); // argc. pushArg(calleereg); // JSFunction *. @@ -821,17 +821,17 @@ static inline int32_t ionOffset(Executio } bool CodeGenerator::visitCallGeneric(LCallGeneric *call) { Register calleereg = ToRegister(call->getFunction()); Register objreg = ToRegister(call->getTempObject()); Register nargsreg = ToRegister(call->getNargsReg()); - uint32 unusedStack = StackOffsetOfPassedArg(call->argslot()); + uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot()); Label invoke, thunk, makeCall, end; // Known-target case is handled by LCallKnown. JS_ASSERT(!call->hasSingleTarget()); // Unknown constructor case is handled by LCallConstructor. JS_ASSERT(!call->mir()->isConstructing()); // Generate an ArgumentsRectifier. @@ -856,17 +856,17 @@ CodeGenerator::visitCallGeneric(LCallGen // Guard that the IonScript has been compiled. masm.branchPtr(Assembler::BelowOrEqual, objreg, ImmWord(ION_COMPILING_SCRIPT), &invoke); // Nestle the StackPointer up to the argument vector. masm.freeStack(unusedStack); // Construct the IonFramePrefix. - uint32 descriptor = MakeFrameDescriptor(masm.framePushed(), IonFrame_OptimizedJS); + uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), IonFrame_OptimizedJS); masm.Push(Imm32(call->numActualArgs())); masm.Push(calleereg); masm.Push(Imm32(descriptor)); // Check whether the provided arguments satisfy target argc. masm.load16ZeroExtend(Address(calleereg, offsetof(JSFunction, nargs)), nargsreg); masm.cmp32(nargsreg, Imm32(call->numStackArgs())); masm.j(Assembler::Above, &thunk); @@ -882,17 +882,17 @@ CodeGenerator::visitCallGeneric(LCallGen JS_ASSERT(ArgumentsRectifierReg != objreg); masm.movePtr(ImmGCPtr(argumentsRectifier), objreg); // Necessary for GC marking. masm.loadPtr(Address(objreg, IonCode::offsetOfCode()), objreg); masm.move32(Imm32(call->numStackArgs()), ArgumentsRectifierReg); } // Finally call the function in objreg. masm.bind(&makeCall); - uint32 callOffset = masm.callIon(objreg); + uint32_t callOffset = masm.callIon(objreg); if (!markSafepointAt(callOffset, call)) return false; // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. // The return address has already been removed from the Ion frame. int prefixGarbage = sizeof(IonJSFrameLayout) - sizeof(void *); masm.adjustStack(prefixGarbage - unusedStack); masm.jump(&end); @@ -908,17 +908,17 @@ CodeGenerator::visitCallGeneric(LCallGen } bool CodeGenerator::visitCallKnown(LCallKnown *call) { JSContext *cx = GetIonContext()->cx; Register calleereg = ToRegister(call->getFunction()); Register objreg = ToRegister(call->getTempObject()); - uint32 unusedStack = StackOffsetOfPassedArg(call->argslot()); + uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot()); JSFunction *target = call->getSingleTarget(); Label end, invoke; // Native single targets are handled by LCallNative. JS_ASSERT(!target->isNative()); // Missing arguments must have been explicitly appended by the IonBuilder. JS_ASSERT(target->nargs <= call->numStackArgs()); @@ -952,23 +952,23 @@ CodeGenerator::visitCallKnown(LCallKnown // Load the start of the target IonCode. masm.loadPtr(Address(objreg, IonScript::offsetOfMethod()), objreg); masm.loadPtr(Address(objreg, IonCode::offsetOfCode()), objreg); // Nestle the StackPointer up to the argument vector. masm.freeStack(unusedStack); // Construct the IonFramePrefix. - uint32 descriptor = MakeFrameDescriptor(masm.framePushed(), IonFrame_OptimizedJS); + uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), IonFrame_OptimizedJS); masm.Push(Imm32(call->numActualArgs())); masm.Push(calleereg); masm.Push(Imm32(descriptor)); // Finally call the function in objreg. - uint32 callOffset = masm.callIon(objreg); + uint32_t callOffset = masm.callIon(objreg); if (!markSafepointAt(callOffset, call)) return false; // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. // The return address has already been removed from the Ion frame. int prefixGarbage = sizeof(IonJSFrameLayout) - sizeof(void *); masm.adjustStack(prefixGarbage - unusedStack); masm.jump(&end); @@ -988,31 +988,31 @@ CodeGenerator::visitCallKnown(LCallKnown masm.loadValue(Address(StackPointer, unusedStack), JSReturnOperand); masm.bind(¬Primitive); } dropArguments(call->numStackArgs() + 1); return true; } -typedef bool (*InvokeConstructorFn)(JSContext *, JSObject *, uint32, Value *, Value *); +typedef bool (*InvokeConstructorFn)(JSContext *, JSObject *, uint32_t, Value *, Value *); static const VMFunction InvokeConstructorInfo = FunctionInfo<InvokeConstructorFn>(ion::InvokeConstructor); bool CodeGenerator::visitCallConstructor(LCallConstructor *call) { JS_ASSERT(call->mir()->isConstructing()); // Holds the function object. const LAllocation *callee = call->getFunction(); Register calleereg = ToRegister(callee); - uint32 callargslot = call->argslot(); - uint32 unusedStack = StackOffsetOfPassedArg(callargslot); + uint32_t callargslot = call->argslot(); + uint32_t unusedStack = StackOffsetOfPassedArg(callargslot); // Nestle %esp up to the argument vector. masm.freeStack(unusedStack); pushArg(StackPointer); // argv. pushArg(Imm32(call->numActualArgs())); // argc. pushArg(calleereg); // JSFunction *. @@ -1209,17 +1209,17 @@ CodeGenerator::visitApplyArgsGeneric(LAp masm.movePtr(ImmGCPtr(argumentsRectifier), objreg); // Necessary for GC marking. masm.loadPtr(Address(objreg, IonCode::offsetOfCode()), objreg); masm.movePtr(argcreg, ArgumentsRectifierReg); } masm.bind(&rejoin); // Finally call the function in objreg, as assigned by one of the paths above. - uint32 callOffset = masm.callIon(objreg); + uint32_t callOffset = masm.callIon(objreg); if (!markSafepointAt(callOffset, apply)) return false; // Recover the number of arguments from the frame descriptor. masm.loadPtr(Address(StackPointer, 0), copyreg); masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), copyreg); masm.subPtr(Imm32(pushed), copyreg); @@ -1240,17 +1240,17 @@ CodeGenerator::visitApplyArgsGeneric(LAp // Pop arguments and continue. masm.bind(&end); emitPopArguments(apply, copyreg); return true; } // Registers safe for use before generatePrologue(). -static const uint32 EntryTempMask = Registers::TempMask & ~(1 << OsrFrameReg.code()); +static const uint32_t EntryTempMask = Registers::TempMask & ~(1 << OsrFrameReg.code()); bool CodeGenerator::generateArgumentsChecks() { MIRGraph &mir = gen->graph(); MResumePoint *rp = mir.entryResumePoint(); // Reserve the amount of stack the actual frame will use. We have to undo @@ -1260,29 +1260,29 @@ CodeGenerator::generateArgumentsChecks() // No registers are allocated yet, so it's safe to grab anything. Register temp = GeneralRegisterSet(EntryTempMask).getAny(); CompileInfo &info = gen->info(); // Indexes need to be shifted by one, to skip the scope chain slot. JS_ASSERT(info.scopeChainSlot() == 0); - static const uint32 START_SLOT = 1; + static const uint32_t START_SLOT = 1; Label mismatched; - for (uint32 i = START_SLOT; i < CountArgSlots(info.fun()); i++) { + for (uint32_t i = START_SLOT; i < CountArgSlots(info.fun()); i++) { // All initial parameters are guaranteed to be MParameters. MParameter *param = rp->getOperand(i)->toParameter(); const types::TypeSet *types = param->typeSet(); if (!types || types->unknown()) continue; // Use ReturnReg as a scratch register here, since not all platforms // have an actual ScratchReg. - int32 offset = ArgToStackOffset((i - START_SLOT) * sizeof(Value)); + int32_t offset = ArgToStackOffset((i - START_SLOT) * sizeof(Value)); masm.guardTypeSet(Address(StackPointer, offset), types, temp, &mismatched); } if (mismatched.used() && !bailoutFrom(&mismatched, graph.entrySnapshot())) return false; masm.freeStack(frameSize()); @@ -1418,17 +1418,17 @@ CodeGenerator::maybeCreateScriptCounts() MBasicBlock *block = graph.getBlock(i)->mir(); // Find a PC offset in the outermost script to use. If this block is // from an inlined script, find a location in the outer script to // associate information about the inling with. MResumePoint *resume = block->entryResumePoint(); while (resume->caller()) resume = resume->caller(); - uint32 offset = resume->pc() - script->code; + uint32_t offset = resume->pc() - script->code; JS_ASSERT(offset < script->length); if (!counts->block(i).init(block->id(), offset, block->numSuccessors())) return NULL; for (size_t j = 0; j < block->numSuccessors(); j++) counts->block(i).setSuccessor(j, block->getSuccessor(j)->id()); } @@ -1438,23 +1438,23 @@ CodeGenerator::maybeCreateScriptCounts() // Structure for managing the state tracked for a block by script counters. struct ScriptCountBlockState { IonBlockCounts █ MacroAssembler &masm; Sprinter printer; - uint32 instructionBytes; - uint32 spillBytes; + uint32_t instructionBytes; + uint32_t spillBytes; // Pointer to instructionBytes, spillBytes, or NULL, depending on the last // instruction processed. - uint32 *last; - uint32 lastLength; + uint32_t *last; + uint32_t lastLength; public: ScriptCountBlockState(IonBlockCounts *block, MacroAssembler *masm) : block(*block), masm(*masm), printer(GetIonContext()->cx), instructionBytes(0), spillBytes(0), last(NULL), lastLength(0) { } @@ -1558,17 +1558,17 @@ class OutOfLineNewArray : public OutOfLi return codegen->visitOutOfLineNewArray(this); } LNewArray *lir() const { return lir_; } }; -typedef JSObject *(*NewInitArrayFn)(JSContext *, uint32, types::TypeObject *); +typedef JSObject *(*NewInitArrayFn)(JSContext *, uint32_t, types::TypeObject *); static const VMFunction NewInitArrayInfo = FunctionInfo<NewInitArrayFn>(NewInitArray); bool CodeGenerator::visitNewArrayCallVM(LNewArray *lir) { Register objReg = ToRegister(lir->output()); @@ -1615,17 +1615,17 @@ CodeGenerator::visitNewSlots(LNewSlots * return true; } bool CodeGenerator::visitNewArray(LNewArray *lir) { Register objReg = ToRegister(lir->output()); JSObject *templateObject = lir->mir()->templateObject(); - uint32 count = lir->mir()->count(); + uint32_t count = lir->mir()->count(); JS_ASSERT(count < JSObject::NELEMENTS_LIMIT); size_t maxArraySlots = gc::GetGCKindSlots(gc::FINALIZE_OBJECT_LAST) - ObjectElements::VALUES_PER_HEADER; // Allocate space using the VMCall // when mir hints it needs to get allocated immediatly, @@ -2415,17 +2415,17 @@ CodeGenerator::visitNotV(LNotV *lir) masm.bind(&join); return true; } bool CodeGenerator::visitBoundsCheck(LBoundsCheck *lir) { if (lir->index()->isConstant()) { - // Use uint32_t so that the comparison is unsigned. + // Use uint32 so that the comparison is unsigned. uint32_t index = ToInt32(lir->index()); if (lir->length()->isConstant()) { uint32_t length = ToInt32(lir->length()); if (index < length) return true; return bailout(lir->snapshot()); } masm.cmp32(ToOperand(lir->length()), Imm32(index)); @@ -2437,24 +2437,24 @@ CodeGenerator::visitBoundsCheck(LBoundsC } masm.cmp32(ToOperand(lir->length()), ToRegister(lir->index())); return bailoutIf(Assembler::BelowOrEqual, lir->snapshot()); } bool CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange *lir) { - int32 min = lir->mir()->minimum(); - int32 max = lir->mir()->maximum(); + int32_t min = lir->mir()->minimum(); + int32_t max = lir->mir()->maximum(); JS_ASSERT(max >= min); Register temp = ToRegister(lir->getTemp(0)); if (lir->index()->isConstant()) { - int32 nmin, nmax; - int32 index = ToInt32(lir->index()); + int32_t nmin, nmax; + int32_t index = ToInt32(lir->index()); if (SafeAdd(index, min, &nmin) && SafeAdd(index, max, &nmax) && nmin >= 0) { masm.cmp32(ToOperand(lir->length()), Imm32(nmax)); return bailoutIf(Assembler::BelowOrEqual, lir->snapshot()); } masm.mov(Imm32(index), temp); } else { masm.mov(ToRegister(lir->index()), temp); } @@ -2462,17 +2462,17 @@ CodeGenerator::visitBoundsCheckRange(LBo // If the minimum and maximum differ then do an underflow check first. // If the two are the same then doing an unsigned comparison on the // length will also catch a negative index. if (min != max) { if (min != 0) { masm.add32(Imm32(min), temp); if (!bailoutIf(Assembler::Overflow, lir->snapshot())) return false; - int32 diff; + int32_t diff; if (SafeSub(max, min, &diff)) max = diff; else masm.sub32(Imm32(min), temp); } masm.cmp32(temp, Imm32(0)); if (!bailoutIf(Assembler::LessThan, lir->snapshot())) @@ -2492,17 +2492,17 @@ CodeGenerator::visitBoundsCheckRange(LBo masm.cmp32(ToOperand(lir->length()), temp); return bailoutIf(Assembler::BelowOrEqual, lir->snapshot()); } bool CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower *lir) { - int32 min = lir->mir()->minimum(); + int32_t min = lir->mir()->minimum(); masm.cmp32(ToRegister(lir->index()), Imm32(min)); return bailoutIf(Assembler::LessThan, lir->snapshot()); } class OutOfLineStoreElementHole : public OutOfLineCodeBase<CodeGenerator> { LInstruction *ins_; Label rejoinStore_; @@ -3101,17 +3101,17 @@ CodeGenerator::visitArgumentsLength(LArg bool CodeGenerator::visitGetArgument(LGetArgument *lir) { ValueOperand result = GetValueOutput(lir); const LAllocation *index = lir->index(); size_t argvOffset = frameSize() + IonJSFrameLayout::offsetOfActualArgs(); if (index->isConstant()) { - int32 i = index->toConstant()->toInt32(); + int32_t i = index->toConstant()->toInt32(); Address argPtr(StackPointer, sizeof(Value) * i + argvOffset); masm.loadValue(argPtr, result); } else { Register i = ToRegister(index); BaseIndex argPtr(StackPointer, i, ScaleFromShift(sizeof(Value)), argvOffset); masm.loadValue(argPtr, result); } return true; @@ -3162,17 +3162,17 @@ CodeGenerator::link() // We encode safepoints after the OSI-point offsets have been determined. encodeSafepoints(); RootedScript script(cx, gen->info().script()); ExecutionMode executionMode = gen->info().executionMode(); JS_ASSERT(!HasIonScript(script, executionMode)); - uint32 scriptFrameSize = frameClass_ == FrameSizeClass::None() + uint32_t scriptFrameSize = frameClass_ == FrameSizeClass::None() ? frameDepth_ : FrameSizeClass::FromDepth(frameDepth_).frameSize(); // Check to make sure we didn't have a mid-build invalidation. If so, we // will trickle to ion::Compile() and return Method_Skipped. if (cx->compartment->types.compiledInfo.compilerOutput(cx)->isInvalidated()) return true; @@ -4339,33 +4339,33 @@ CodeGenerator::visitCallInstanceOf(LCall bool CodeGenerator::visitGetDOMProperty(LGetDOMProperty *ins) { const Register JSContextReg = ToRegister(ins->getJSContextReg()); const Register ObjectReg = ToRegister(ins->getObjectReg()); const Register PrivateReg = ToRegister(ins->getPrivReg()); const Register ValueReg = ToRegister(ins->getValueReg()); - DebugOnly<uint32> initialStack = masm.framePushed(); + DebugOnly<uint32_t> initialStack = masm.framePushed(); masm.checkStackAlignment(); /* Make Space for the outparam */ masm.adjustStack(-int32_t(sizeof(Value))); masm.movePtr(StackPointer, ValueReg); masm.Push(ObjectReg); // GetReservedSlot(obj, DOM_PROTO_INSTANCE_CLASS_SLOT).toPrivate() masm.loadPrivate(Address(ObjectReg, JSObject::getFixedSlotOffset(0)), PrivateReg); // Rooting will happen at GC time. masm.movePtr(StackPointer, ObjectReg); - uint32 safepointOffset; + uint32_t safepointOffset; if (!masm.buildFakeExitFrame(JSContextReg, &safepointOffset)) return false; masm.enterFakeExitFrame(ION_FRAME_DOMGETTER); if (!markSafepointAt(safepointOffset, ins)) return false; masm.setupUnalignedABICall(4, JSContextReg); @@ -4404,34 +4404,34 @@ CodeGenerator::visitGetDOMProperty(LGetD bool CodeGenerator::visitSetDOMProperty(LSetDOMProperty *ins) { const Register JSContextReg = ToRegister(ins->getJSContextReg()); const Register ObjectReg = ToRegister(ins->getObjectReg()); const Register PrivateReg = ToRegister(ins->getPrivReg()); const Register ValueReg = ToRegister(ins->getValueReg()); - DebugOnly<uint32> initialStack = masm.framePushed(); + DebugOnly<uint32_t> initialStack = masm.framePushed(); masm.checkStackAlignment(); // Push thei argument. Rooting will happen at GC time. ValueOperand argVal = ToValue(ins, LSetDOMProperty::Value); masm.Push(argVal); masm.movePtr(StackPointer, ValueReg); masm.Push(ObjectReg); // GetReservedSlot(obj, DOM_PROTO_INSTANCE_CLASS_SLOT).toPrivate() masm.loadPrivate(Address(ObjectReg, JSObject::getFixedSlotOffset(0)), PrivateReg); // Rooting will happen at GC time. masm.movePtr(StackPointer, ObjectReg); - uint32 safepointOffset; + uint32_t safepointOffset; if (!masm.buildFakeExitFrame(JSContextReg, &safepointOffset)) return false; masm.enterFakeExitFrame(ION_FRAME_DOMSETTER); if (!markSafepointAt(safepointOffset, ins)) return false; masm.setupUnalignedABICall(4, JSContextReg);
--- a/js/src/ion/CodeGenerator.h +++ b/js/src/ion/CodeGenerator.h @@ -71,17 +71,17 @@ class CodeGenerator : public CodeGenerat bool visitPointer(LPointer *lir); bool visitSlots(LSlots *lir); bool visitStoreSlotV(LStoreSlotV *store); bool visitElements(LElements *lir); bool visitTypeBarrier(LTypeBarrier *lir); bool visitMonitorTypes(LMonitorTypes *lir); bool visitCallNative(LCallNative *call); bool emitCallInvokeFunction(LInstruction *call, Register callereg, - uint32 argc, uint32 unusedStack); + uint32_t argc, uint32_t unusedStack); bool visitCallGeneric(LCallGeneric *call); bool visitCallKnown(LCallKnown *call); bool visitCallConstructor(LCallConstructor *call); bool emitCallInvokeFunction(LApplyArgsGeneric *apply, Register extraStackSize); void emitPushArguments(LApplyArgsGeneric *apply, Register extraStackSpace); void emitPopArguments(LApplyArgsGeneric *apply, Register extraStackSize); bool visitApplyArgsGeneric(LApplyArgsGeneric *apply); bool visitDoubleToInt32(LDoubleToInt32 *lir);
--- a/js/src/ion/CompactBuffer.h +++ b/js/src/ion/CompactBuffer.h @@ -23,130 +23,130 @@ class CompactBufferWriter; // containing 7 bits of the integer and a bit which specifies whether the next // byte is also part of the integer. // // Fixed-width integers are also available, in case the actual value will not // be known until later. class CompactBufferReader { - const uint8 *buffer_; - const uint8 *end_; + const uint8_t *buffer_; + const uint8_t *end_; - uint32 readVariableLength() { - uint32 val = 0; - uint32 shift = 0; - uint8 byte; + uint32_t readVariableLength() { + uint32_t val = 0; + uint32_t shift = 0; + uint8_t byte; while (true) { JS_ASSERT(shift < 32); byte = readByte(); - val |= (uint32(byte) >> 1) << shift; + val |= (uint32_t(byte) >> 1) << shift; shift += 7; if (!(byte & 1)) return val; } JS_NOT_REACHED("unreachable"); return 0; } public: - CompactBufferReader(const uint8 *start, const uint8 *end) + CompactBufferReader(const uint8_t *start, const uint8_t *end) : buffer_(start), end_(end) { } inline CompactBufferReader(const CompactBufferWriter &writer); - uint8 readByte() { + uint8_t readByte() { JS_ASSERT(buffer_ < end_); return *buffer_++; } - uint32 readFixedUint32() { - uint32 b0 = readByte(); - uint32 b1 = readByte(); - uint32 b2 = readByte(); - uint32 b3 = readByte(); + uint32_t readFixedUint32_t() { + uint32_t b0 = readByte(); + uint32_t b1 = readByte(); + uint32_t b2 = readByte(); + uint32_t b3 = readByte(); return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); } - uint16 readFixedUint16() { - uint32 b0 = readByte(); - uint32 b1 = readByte(); + uint16_t readFixedUint16_t() { + uint32_t b0 = readByte(); + uint32_t b1 = readByte(); return b0 | (b1 << 8); } - uint32 readUnsigned() { + uint32_t readUnsigned() { return readVariableLength(); } - int32 readSigned() { - uint8 b = readByte(); + int32_t readSigned() { + uint8_t b = readByte(); bool isNegative = !!(b & (1 << 0)); bool more = !!(b & (1 << 1)); - int32 result = b >> 2; + int32_t result = b >> 2; if (more) result |= readUnsigned() << 6; if (isNegative) return -result; return result; } bool more() const { JS_ASSERT(buffer_ <= end_); return buffer_ < end_; } }; class CompactBufferWriter { - js::Vector<uint8, 32, SystemAllocPolicy> buffer_; + js::Vector<uint8_t, 32, SystemAllocPolicy> buffer_; bool enoughMemory_; public: CompactBufferWriter() : enoughMemory_(true) { } // Note: writeByte() takes uint32 to catch implicit casts with a runtime // assert. - void writeByte(uint32 byte) { + void writeByte(uint32_t byte) { JS_ASSERT(byte <= 0xFF); enoughMemory_ &= buffer_.append(byte); } - void writeUnsigned(uint32 value) { + void writeUnsigned(uint32_t value) { do { - uint8 byte = ((value & 0x7F) << 1) | (value > 0x7F); + uint8_t byte = ((value & 0x7F) << 1) | (value > 0x7F); writeByte(byte); value >>= 7; } while (value); } - void writeSigned(int32 v) { + void writeSigned(int32_t v) { bool isNegative = v < 0; - uint32 value = isNegative ? -v : v; - uint8 byte = ((value & 0x3F) << 2) | ((value > 0x3F) << 1) | uint32(isNegative); + uint32_t value = isNegative ? -v : v; + uint8_t byte = ((value & 0x3F) << 2) | ((value > 0x3F) << 1) | uint32_t(isNegative); writeByte(byte); // Write out the rest of the bytes, if needed. value >>= 6; if (value == 0) return; writeUnsigned(value); } - void writeFixedUint32(uint32 value) { + void writeFixedUint32_t(uint32_t value) { writeByte(value & 0xFF); writeByte((value >> 8) & 0xFF); writeByte((value >> 16) & 0xFF); writeByte((value >> 24) & 0xFF); } - void writeFixedUint16(uint16 value) { + void writeFixedUint16_t(uint16_t value) { writeByte(value & 0xFF); writeByte(value >> 8); } size_t length() const { return buffer_.length(); } - uint8 *buffer() { + uint8_t *buffer() { return &buffer_[0]; } - const uint8 *buffer() const { + const uint8_t *buffer() const { return &buffer_[0]; } bool oom() const { return !enoughMemory_; } }; CompactBufferReader::CompactBufferReader(const CompactBufferWriter &writer)
--- a/js/src/ion/CompileInfo.h +++ b/js/src/ion/CompileInfo.h @@ -108,40 +108,40 @@ class CompileInfo } unsigned nlocals() const { return script()->nfixed; } unsigned ninvoke() const { return nlocals() + CountArgSlots(fun()); } - uint32 scopeChainSlot() const { + uint32_t scopeChainSlot() const { return 0; } - uint32 thisSlot() const { + uint32_t thisSlot() const { JS_ASSERT(fun()); return 1; } - uint32 firstArgSlot() const { + uint32_t firstArgSlot() const { JS_ASSERT(fun()); return 2; } - uint32 argSlot(uint32 i) const { + uint32_t argSlot(uint32_t i) const { return firstArgSlot() + i; } - uint32 firstLocalSlot() const { + uint32_t firstLocalSlot() const { return CountArgSlots(fun()); } - uint32 localSlot(uint32 i) const { + uint32_t localSlot(uint32_t i) const { return firstLocalSlot() + i; } - uint32 firstStackSlot() const { + uint32_t firstStackSlot() const { return firstLocalSlot() + nlocals(); } - uint32 stackSlot(uint32 i) const { + uint32_t stackSlot(uint32_t i) const { return firstStackSlot() + i; } bool hasArguments() { return script()->argumentsHasVarBinding(); } ExecutionMode executionMode() const {
--- a/js/src/ion/EdgeCaseAnalysis.cpp +++ b/js/src/ion/EdgeCaseAnalysis.cpp @@ -21,17 +21,17 @@ EdgeCaseAnalysis::EdgeCaseAnalysis(MIRGe : mir(mir), graph(graph) { } bool EdgeCaseAnalysis::analyzeLate() { // Renumber definitions for NeedNegativeZeroCheck under analyzeEdgeCasesBackward. - uint32 nextId = 1; + uint32_t nextId = 1; for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) { if (mir->shouldCancel("Analyze Late (first loop)")) return false; for (MDefinitionIterator iter(*block); iter; iter++) { iter->setId(nextId++); iter->analyzeEdgeCasesForward(); }
--- a/js/src/ion/Ion.cpp +++ b/js/src/ion/Ion.cpp @@ -156,17 +156,17 @@ IonRuntime::initialize(JSContext *cx) functionWrappers_ = cx->new_<VMWrapperMap>(cx); if (!functionWrappers_ || !functionWrappers_->init()) return false; if (!bailoutTables_.reserve(FrameSizeClass::ClassLimit().classId())) return false; - for (uint32 id = 0;; id++) { + for (uint32_t id = 0;; id++) { FrameSizeClass class_ = FrameSizeClass::FromClass(id); if (class_ == FrameSizeClass::ClassLimit()) break; bailoutTables_.infallibleAppend(NULL); bailoutTables_[id] = generateBailoutTable(cx, id); if (!bailoutTables_[id]) return false; } @@ -312,17 +312,17 @@ IonActivation::~IonActivation() if (entryfp_) entryfp_->clearRunningInIon(); cx_->runtime->ionActivation = prev(); cx_->runtime->ionTop = prevIonTop_; cx_->runtime->ionJSContext = prevIonJSContext_; } IonCode * -IonCode::New(JSContext *cx, uint8 *code, uint32 bufferSize, JSC::ExecutablePool *pool) +IonCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool) { AssertCanGC(); IonCode *codeObj = gc::NewGCThing<IonCode>(cx, gc::FINALIZE_IONCODE, sizeof(IonCode)); if (!codeObj) { pool->release(); return NULL; } @@ -355,22 +355,22 @@ void IonCode::trace(JSTracer *trc) { // Note that we cannot mark invalidated scripts, since we've basically // corrupted the code stream by injecting bailouts. if (invalidated()) return; if (jumpRelocTableBytes_) { - uint8 *start = code_ + jumpRelocTableOffset(); + uint8_t *start = code_ + jumpRelocTableOffset(); CompactBufferReader reader(start, start + jumpRelocTableBytes_); MacroAssembler::TraceJumpRelocations(trc, this, reader); } if (dataRelocTableBytes_) { - uint8 *start = code_ + dataRelocTableOffset(); + uint8_t *start = code_ + dataRelocTableOffset(); CompactBufferReader reader(start, start + dataRelocTableBytes_); MacroAssembler::TraceDataRelocations(trc, this, reader); } } void IonCode::finalize(FreeOp *fop) { @@ -449,33 +449,33 @@ IonScript::IonScript() recompileInfo_(), slowCallCount(0) { } static const int DataAlignment = 4; IonScript * -IonScript::New(JSContext *cx, uint32 frameSlots, uint32 frameSize, size_t snapshotsSize, +IonScript::New(JSContext *cx, uint32_t frameSlots, uint32_t frameSize, size_t snapshotsSize, size_t bailoutEntries, size_t constants, size_t safepointIndices, size_t osiIndices, size_t cacheEntries, size_t prebarrierEntries, size_t safepointsSize, size_t scriptEntries) { if (snapshotsSize >= MAX_BUFFER_SIZE || - (bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32))) + (bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32_t))) { js_ReportOutOfMemory(cx); return NULL; } // This should not overflow on x86, because the memory is already allocated // *somewhere* and if their total overflowed there would be no memory left // at all. size_t paddedSnapshotsSize = AlignBytes(snapshotsSize, DataAlignment); - size_t paddedBailoutSize = AlignBytes(bailoutEntries * sizeof(uint32), DataAlignment); + size_t paddedBailoutSize = AlignBytes(bailoutEntries * sizeof(uint32_t), DataAlignment); size_t paddedConstantsSize = AlignBytes(constants * sizeof(Value), DataAlignment); size_t paddedSafepointIndicesSize = AlignBytes(safepointIndices * sizeof(SafepointIndex), DataAlignment); size_t paddedOsiIndicesSize = AlignBytes(osiIndices * sizeof(OsiIndex), DataAlignment); size_t paddedCacheEntriesSize = AlignBytes(cacheEntries * sizeof(IonCache), DataAlignment); size_t paddedPrebarrierEntriesSize = AlignBytes(prebarrierEntries * sizeof(CodeOffsetLabel), DataAlignment); size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment); size_t paddedScriptSize = AlignBytes(scriptEntries * sizeof(JSScript *), DataAlignment); @@ -483,24 +483,24 @@ IonScript::New(JSContext *cx, uint32 fra paddedBailoutSize + paddedConstantsSize + paddedSafepointIndicesSize+ paddedOsiIndicesSize + paddedCacheEntriesSize + paddedPrebarrierEntriesSize + paddedSafepointSize + paddedScriptSize; - uint8 *buffer = (uint8 *)cx->malloc_(sizeof(IonScript) + bytes); + uint8_t *buffer = (uint8_t *)cx->malloc_(sizeof(IonScript) + bytes); if (!buffer) return NULL; IonScript *script = reinterpret_cast<IonScript *>(buffer); new (script) IonScript(); - uint32 offsetCursor = sizeof(IonScript); + uint32_t offsetCursor = sizeof(IonScript); script->snapshots_ = offsetCursor; script->snapshotsSize_ = snapshotsSize; offsetCursor += paddedSnapshotsSize; script->bailoutTable_ = offsetCursor; script->bailoutEntries_ = bailoutEntries; offsetCursor += paddedBailoutSize; @@ -553,30 +553,30 @@ IonScript::trace(JSTracer *trc) for (size_t i = 0; i < numConstants(); i++) gc::MarkValue(trc, &getConstant(i), "constant"); } void IonScript::copySnapshots(const SnapshotWriter *writer) { JS_ASSERT(writer->size() == snapshotsSize_); - memcpy((uint8 *)this + snapshots_, writer->buffer(), snapshotsSize_); + memcpy((uint8_t *)this + snapshots_, writer->buffer(), snapshotsSize_); } void IonScript::copySafepoints(const SafepointWriter *writer) { JS_ASSERT(writer->size() == safepointsSize_); - memcpy((uint8 *)this + safepointsStart_, writer->buffer(), safepointsSize_); + memcpy((uint8_t *)this + safepointsStart_, writer->buffer(), safepointsSize_); } void IonScript::copyBailoutTable(const SnapshotOffset *table) { - memcpy(bailoutTable(), table, bailoutEntries_ * sizeof(uint32)); + memcpy(bailoutTable(), table, bailoutEntries_ * sizeof(uint32_t)); } void IonScript::copyConstants(const HeapValue *vp) { for (size_t i = 0; i < constantEntries_; i++) constants()[i].init(vp[i]); } @@ -637,37 +637,37 @@ IonScript::copyPrebarrierEntries(const C memcpy(prebarrierList(), barriers, numPrebarriers() * sizeof(CodeOffsetLabel)); // On ARM, the saved offset may be wrong due to shuffling code buffers. Correct it. for (size_t i = 0; i < numPrebarriers(); i++) getPrebarrier(i).fixup(&masm); } const SafepointIndex * -IonScript::getSafepointIndex(uint32 disp) const +IonScript::getSafepointIndex(uint32_t disp) const { JS_ASSERT(safepointIndexEntries_ > 0); const SafepointIndex *table = safepointIndices(); if (safepointIndexEntries_ == 1) { JS_ASSERT(disp == table[0].displacement()); return &table[0]; } size_t minEntry = 0; size_t maxEntry = safepointIndexEntries_ - 1; - uint32 min = table[minEntry].displacement(); - uint32 max = table[maxEntry].displacement(); + uint32_t min = table[minEntry].displacement(); + uint32_t max = table[maxEntry].displacement(); // Raise if the element is not in the list. JS_ASSERT(min <= disp && disp <= max); // Approximate the location of the FrameInfo. size_t guess = (disp - min) * (maxEntry - minEntry) / (max - min) + minEntry; - uint32 guessDisp = table[guess].displacement(); + uint32_t guessDisp = table[guess].displacement(); if (table[guess].displacement() == disp) return &table[guess]; // Doing a linear scan from the guess should be more efficient in case of // small group which are equally distributed on the code. // // such as: <... ... ... ... . ... ...> @@ -687,38 +687,38 @@ IonScript::getSafepointIndex(uint32 disp } } JS_NOT_REACHED("displacement not found."); return NULL; } const OsiIndex * -IonScript::getOsiIndex(uint32 disp) const +IonScript::getOsiIndex(uint32_t disp) const { for (const OsiIndex *it = osiIndices(), *end = osiIndices() + osiIndexEntries_; it != end; ++it) { if (it->returnPointDisplacement() == disp) return it; } JS_NOT_REACHED("Failed to find OSI point return address"); return NULL; } const OsiIndex * -IonScript::getOsiIndex(uint8 *retAddr) const +IonScript::getOsiIndex(uint8_t *retAddr) const { IonSpew(IonSpew_Invalidate, "IonScript %p has method %p raw %p", (void *) this, (void *) method(), method()->raw()); JS_ASSERT(containsCodeAddress(retAddr)); - uint32 disp = retAddr - method()->raw(); + uint32_t disp = retAddr - method()->raw(); return getOsiIndex(disp); } void IonScript::Trace(JSTracer *trc, IonScript *script) { if (script != ION_DISABLED_SCRIPT) script->trace(trc); @@ -1668,17 +1668,17 @@ ion::FastInvoke(JSContext *cx, HandleFun args.rval().set(result); JS_ASSERT_IF(result.isMagic(), result.isMagic(JS_ION_ERROR)); return result.isMagic() ? IonExec_Error : IonExec_Ok; } static void -InvalidateActivation(FreeOp *fop, uint8 *ionTop, bool invalidateAll) +InvalidateActivation(FreeOp *fop, uint8_t *ionTop, bool invalidateAll) { AutoAssertNoGC nogc; IonSpew(IonSpew_Invalidate, "BEGIN invalidating activation"); size_t frameno = 1; for (IonFrameIterator it(ionTop); !it.done(); ++it, ++frameno) { JS_ASSERT_IF(frameno == 1, it.type() == IonFrame_Exit);
--- a/js/src/ion/Ion.h +++ b/js/src/ion/Ion.h @@ -77,90 +77,90 @@ struct IonOptions // // Default: true iff there are at least two CPUs available bool parallelCompilation; // How many invocations or loop iterations are needed before functions // are compiled. // // Default: 10,240 - uint32 usesBeforeCompile; + uint32_t usesBeforeCompile; // How many invocations or loop iterations are needed before functions // are compiled when JM is disabled. // // Default: 40 - uint32 usesBeforeCompileNoJaeger; + uint32_t usesBeforeCompileNoJaeger; // How many invocations or loop iterations are needed before calls // are inlined. // // Default: 10,240 - uint32 usesBeforeInlining; + uint32_t usesBeforeInlining; // How many actual arguments are accepted on the C stack. // // Default: 4,096 - uint32 maxStackArgs; + uint32_t maxStackArgs; // The maximum inlining depth. // // Default: 3 - uint32 maxInlineDepth; + uint32_t maxInlineDepth; // The bytecode length limit for small function. // // The default for this was arrived at empirically via benchmarking. // We may want to tune it further after other optimizations have gone // in. // // Default: 100 - uint32 smallFunctionMaxBytecodeLength; + uint32_t smallFunctionMaxBytecodeLength; // The inlining limit for small functions. // // This value has been arrived at empirically via benchmarking. // We may want to revisit this tuning after other optimizations have // gone in. // // Default: usesBeforeInlining / 4 - uint32 smallFunctionUsesBeforeInlining; + uint32_t smallFunctionUsesBeforeInlining; // The maximum number of functions to polymorphically inline at a call site. // // Default: 4 - uint32 polyInlineMax; + uint32_t polyInlineMax; // The maximum total bytecode size of an inline call site. // // Default: 800 - uint32 inlineMaxTotalBytecodeLength; + uint32_t inlineMaxTotalBytecodeLength; // Minimal ratio between the use counts of the caller and the callee to // enable inlining of functions. // // Default: 128 - uint32 inlineUseCountRatio; + uint32_t inlineUseCountRatio; // Whether functions are compiled immediately. // // Default: false bool eagerCompilation; // If a function has attempted to make this many calls to // functions that are marked "uncompileable", then // stop running this function in IonMonkey. (default 512) - uint32 slowCallLimit; + uint32_t slowCallLimit; // When caller runs in IM, but callee not, we take a slow path to the interpreter. // This has a significant overhead. In order to decrease the number of times this happens, // the useCount gets incremented faster to compile this function in IM and use the fastpath. // // Default: 5 - uint32 slowCallIncUseCount; + uint32_t slowCallIncUseCount; void setEagerCompilation() { eagerCompilation = true; usesBeforeCompile = usesBeforeCompileNoJaeger = 0; // Eagerly inline calls to improve test coverage. usesBeforeInlining = 0; smallFunctionUsesBeforeInlining = 0;
--- a/js/src/ion/IonAnalysis.cpp +++ b/js/src/ion/IonAnalysis.cpp @@ -74,17 +74,17 @@ ion::EliminateDeadResumePointOperands(MI if (ins->isUnbox() || ins->isParameter()) continue; // Check if this instruction's result is only used within the // current block, and keep track of its last use in a definition // (not resume point). This requires the instructions in the block // to be numbered, ensured by running this immediately after alias // analysis. - uint32 maxDefinition = 0; + uint32_t maxDefinition = 0; for (MUseDefIterator uses(*ins); uses; uses++) { if (uses.def()->block() != *block || uses.def()->isBox() || uses.def()->isPassArg()) { maxDefinition = UINT32_MAX; break; } maxDefinition = Max(maxDefinition, uses.def()->id()); } if (maxDefinition == UINT32_MAX) @@ -168,25 +168,25 @@ IsPhiObservable(MPhi *phi) // which we don't count as actual uses. If the only uses are resume points, // then the SSA name is never consumed by the program. for (MUseDefIterator iter(phi); iter; iter++) { if (!iter.def()->isPhi()) return true; } // If the Phi is of the |this| value, it must always be observable. - uint32 slot = phi->slot(); + uint32_t slot = phi->slot(); if (slot == 1) return true; CompileInfo &info = phi->block()->info(); if (info.fun() && info.hasArguments()) { // We do not support arguments object inside inline frames yet. JS_ASSERT(!phi->block()->callerResumePoint()); - uint32 first = info.firstArgSlot(); + uint32_t first = info.firstArgSlot(); if (first <= slot && slot - first < info.nargs()) return true; } return false; } // Handles cases like: // x is phi(a, x) --> a @@ -872,35 +872,35 @@ ion::AssertGraphCoherency(MIRGraph &grap for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { for (size_t i = 0; i < block->numSuccessors(); i++) JS_ASSERT(CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i))); for (size_t i = 0; i < block->numPredecessors(); i++) JS_ASSERT(CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i))); for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) { - for (uint32 i = 0; i < ins->numOperands(); i++) + for (uint32_t i = 0; i < ins->numOperands(); i++) JS_ASSERT(CheckMarkedAsUse(*ins, ins->getOperand(i))); } } AssertReversePostOrder(graph); #endif } struct BoundsCheckInfo { MBoundsCheck *check; - uint32 validUntil; + uint32_t validUntil; }; -typedef HashMap<uint32, +typedef HashMap<uint32_t, BoundsCheckInfo, - DefaultHasher<uint32>, + DefaultHasher<uint32_t>, IonAllocPolicy> BoundsCheckMap; // Compute a hash for bounds checks which ignores constant offsets in the index. static HashNumber BoundsCheckHashIgnoreOffset(MBoundsCheck *check) { SimpleLinearSum indexSum = ExtractLinearSum(check->index()); uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0; @@ -950,22 +950,22 @@ ion::ExtractLinearSum(MDefinition *ins) SimpleLinearSum lsum = ExtractLinearSum(lhs); SimpleLinearSum rsum = ExtractLinearSum(rhs); if (lsum.term && rsum.term) return SimpleLinearSum(ins, 0); // Check if this is of the form <SUM> + n, n + <SUM> or <SUM> - n. if (ins->isAdd()) { - int32 constant; + int32_t constant; if (!SafeAdd(lsum.constant, rsum.constant, &constant)) return SimpleLinearSum(ins, 0); return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant); } else if (lsum.term) { - int32 constant; + int32_t constant; if (!SafeSub(lsum.constant, rsum.constant, &constant)) return SimpleLinearSum(ins, 0); return SimpleLinearSum(lsum.term, constant); } } } return SimpleLinearSum(ins, 0); @@ -1047,28 +1047,28 @@ TryEliminateBoundsCheck(MBoundsCheck *do // Both terms should be NULL or the same definition. if (sumA.term != sumB.term) return true; // This bounds check is redundant. *eliminated = true; // Normalize the ranges according to the constant offsets in the two indexes. - int32 minimumA, maximumA, minimumB, maximumB; + int32_t minimumA, maximumA, minimumB, maximumB; if (!SafeAdd(sumA.constant, dominating->minimum(), &minimumA) || !SafeAdd(sumA.constant, dominating->maximum(), &maximumA) || !SafeAdd(sumB.constant, dominated->minimum(), &minimumB) || !SafeAdd(sumB.constant, dominated->maximum(), &maximumB)) { return false; } // Update the dominating check to cover both ranges, denormalizing the // result per the constant offset in the index. - int32 newMinimum, newMaximum; + int32_t newMinimum, newMaximum; if (!SafeSub(Min(minimumA, minimumB), sumA.constant, &newMinimum) || !SafeSub(Max(maximumA, maximumB), sumA.constant, &newMaximum)) { return false; } dominating->setMinimum(newMinimum); dominating->setMaximum(newMaximum); @@ -1159,17 +1159,17 @@ ion::EliminateRedundantBoundsChecks(MIRG index++; } JS_ASSERT(index == graph.numBlocks()); return true; } bool -LinearSum::multiply(int32 scale) +LinearSum::multiply(int32_t scale) { for (size_t i = 0; i < terms_.length(); i++) { if (!SafeMul(scale, terms_[i].scale, &terms_[i].scale)) return false; } return SafeMul(scale, constant_, &constant_); } @@ -1179,25 +1179,25 @@ LinearSum::add(const LinearSum &other) for (size_t i = 0; i < other.terms_.length(); i++) { if (!add(other.terms_[i].term, other.terms_[i].scale)) return false; } return add(other.constant_); } bool -LinearSum::add(MDefinition *term, int32 scale) +LinearSum::add(MDefinition *term, int32_t scale) { JS_ASSERT(term); if (scale == 0) return true; if (term->isConstant()) { - int32 constant = term->toConstant()->value().toInt32(); + int32_t constant = term->toConstant()->value().toInt32(); if (!SafeMul(constant, scale, &constant)) return false; return add(constant); } for (size_t i = 0; i < terms_.length(); i++) { if (term == terms_[i].term) { if (!SafeAdd(scale, terms_[i].scale, &terms_[i].scale)) @@ -1210,27 +1210,27 @@ LinearSum::add(MDefinition *term, int32 } } terms_.append(LinearTerm(term, scale)); return true; } bool -LinearSum::add(int32 constant) +LinearSum::add(int32_t constant) { return SafeAdd(constant, constant_, &constant_); } void LinearSum::print(Sprinter &sp) const { for (size_t i = 0; i < terms_.length(); i++) { - int32 scale = terms_[i].scale; - int32 id = terms_[i].term->id(); + int32_t scale = terms_[i].scale; + int32_t id = terms_[i].term->id(); JS_ASSERT(scale); if (scale > 0) { if (i) sp.printf("+"); if (scale == 1) sp.printf("#%d", id); else sp.printf("%d*#%d", scale, id);
--- a/js/src/ion/IonAnalysis.h +++ b/js/src/ion/IonAnalysis.h @@ -50,36 +50,36 @@ bool EliminateRedundantBoundsChecks(MIRGraph &graph); class MDefinition; // Simple linear sum of the form 'n' or 'x + n'. struct SimpleLinearSum { MDefinition *term; - int32 constant; + int32_t constant; - SimpleLinearSum(MDefinition *term, int32 constant) + SimpleLinearSum(MDefinition *term, int32_t constant) : term(term), constant(constant) {} }; SimpleLinearSum ExtractLinearSum(MDefinition *ins); bool ExtractLinearInequality(MTest *test, BranchDirection direction, SimpleLinearSum *plhs, MDefinition **prhs, bool *plessEqual); struct LinearTerm { MDefinition *term; - int32 scale; + int32_t scale; - LinearTerm(MDefinition *term, int32 scale) + LinearTerm(MDefinition *term, int32_t scale) : term(term), scale(scale) { } }; // General linear sum of the form 'x1*n1 + x2*n2 + ... + n' class LinearSum { @@ -91,29 +91,29 @@ class LinearSum LinearSum(const LinearSum &other) : constant_(other.constant_) { for (size_t i = 0; i < other.terms_.length(); i++) terms_.append(other.terms_[i]); } - bool multiply(int32 scale); + bool multiply(int32_t scale); bool add(const LinearSum &other); - bool add(MDefinition *term, int32 scale); - bool add(int32 constant); + bool add(MDefinition *term, int32_t scale); + bool add(int32_t constant); - int32 constant() const { return constant_; } + int32_t constant() const { return constant_; } size_t numTerms() const { return terms_.length(); } LinearTerm term(size_t i) const { return terms_[i]; } void print(Sprinter &sp) const; private: Vector<LinearTerm, 2, IonAllocPolicy> terms_; - int32 constant_; + int32_t constant_; }; } // namespace ion } // namespace js #endif // jsion_ion_analysis_h__
--- a/js/src/ion/IonBuilder.cpp +++ b/js/src/ion/IonBuilder.cpp @@ -22,17 +22,17 @@ #endif using namespace js; using namespace js::ion; using mozilla::DebugOnly; IonBuilder::IonBuilder(JSContext *cx, TempAllocator *temp, MIRGraph *graph, - TypeOracle *oracle, CompileInfo *info, size_t inliningDepth, uint32 loopDepth) + TypeOracle *oracle, CompileInfo *info, size_t inliningDepth, uint32_t loopDepth) : MIRGenerator(cx->compartment, temp, graph, info), backgroundCodegen_(NULL), recompileInfo(cx->compartment->types.compiledInfo), cx(cx), loopDepth_(loopDepth), callerResumePoint_(NULL), callerBuilder_(NULL), oracle(oracle), @@ -70,17 +70,17 @@ void IonBuilder::spew(const char *message) { // Don't call PCToLineNumber in release builds. #ifdef DEBUG IonSpew(IonSpew_MIR, "%s @ %s:%d", message, script_->filename, PCToLineNumber(script_, pc)); #endif } -static inline int32 +static inline int32_t GetJumpOffset(jsbytecode *pc) { JS_ASSERT(js_CodeSpec[JSOp(*pc)].type() == JOF_JUMP); return GET_JUMP_OFFSET(pc); } IonBuilder::CFGState IonBuilder::CFGState::If(jsbytecode *join, MBasicBlock *ifFalse) @@ -143,33 +143,33 @@ IonBuilder::CFGState::LookupSwitch(jsbyt state.lookupswitch.breaks = NULL; state.lookupswitch.bodies = (FixedList<MBasicBlock *> *)GetIonContext()->temp->allocate(sizeof(FixedList<MBasicBlock *>)); state.lookupswitch.currentBlock = 0; return state; } JSFunction * -IonBuilder::getSingleCallTarget(uint32 argc, jsbytecode *pc) +IonBuilder::getSingleCallTarget(uint32_t argc, jsbytecode *pc) { AutoAssertNoGC nogc; types::StackTypeSet *calleeTypes = oracle->getCallTarget(script().get(nogc), argc, pc); if (!calleeTypes) return NULL; RawObject obj = calleeTypes->getSingleton(); if (!obj || !obj->isFunction()) return NULL; return obj->toFunction(); } uint32_t -IonBuilder::getPolyCallTargets(uint32 argc, jsbytecode *pc, +IonBuilder::getPolyCallTargets(uint32_t argc, jsbytecode *pc, AutoObjectVector &targets, uint32_t maxTargets) { types::TypeSet *calleeTypes = oracle->getCallTarget(script_, argc, pc); if (!calleeTypes) return 0; if (calleeTypes->baseFlags() != 0) return 0; @@ -278,17 +278,17 @@ IonBuilder::build() if (!graph().addScript(script_)) return false; if (!initParameters()) return false; // Initialize local variables. - for (uint32 i = 0; i < info().nlocals(); i++) { + for (uint32_t i = 0; i < info().nlocals(); i++) { MConstant *undef = MConstant::New(UndefinedValue()); current->add(undef); current->initSlot(info().localSlot(i), undef); } // Initialize something for the scope chain. We can bail out before the // start instruction, but the snapshot is encoded *at* the start // instruction, which means generating any code that could load into @@ -331,17 +331,17 @@ IonBuilder::build() // v1 = MParameter(1) // -- ResumePoint(v2, v3) // v2 = Unbox(v0, INT32) // v3 = Unbox(v1, INT32) // // So we attach the initial resume point to each parameter, which the type // analysis explicitly checks (this is the same mechanism used for // effectful operations). - for (uint32 i = 0; i < CountArgSlots(info().fun()); i++) { + for (uint32_t i = 0; i < CountArgSlots(info().fun()); i++) { MInstruction *ins = current->getEntrySlot(i)->toInstruction(); if (ins->type() == MIRType_Value) ins->setResumePoint(current->entryResumePoint()); } // Recompile to inline calls if this function is hot. insertRecompileCheck(); @@ -469,17 +469,17 @@ IonBuilder::buildInline(IonBuilder *call for (size_t i = 0; i < nargs; ++i) { MDefinition *arg = args.popCopyFront(); current->initSlot(info().argSlot(i), arg); } IonSpew(IonSpew_Inlining, "Initializing %u local slots", info().nlocals()); // Initialize local variables. - for (uint32 i = 0; i < info().nlocals(); i++) { + for (uint32_t i = 0; i < info().nlocals(); i++) { MConstant *undef = MConstant::New(UndefinedValue()); current->add(undef); current->initSlot(info().localSlot(i), undef); } IonSpew(IonSpew_Inlining, "Inline entry block MResumePoint %p, %u operands", (void *) current->entryResumePoint(), current->entryResumePoint()->numOperands()); @@ -491,19 +491,19 @@ IonBuilder::buildInline(IonBuilder *call // Apply Type Inference information to parameters early on, unboxing them if // they have a definitive type. The actual guards will be emitted by the code // generator, explicitly, as part of the function prologue. void IonBuilder::rewriteParameters() { JS_ASSERT(info().scopeChainSlot() == 0); - static const uint32 START_SLOT = 1; - - for (uint32 i = START_SLOT; i < CountArgSlots(info().fun()); i++) { + static const uint32_t START_SLOT = 1; + + for (uint32_t i = START_SLOT; i < CountArgSlots(info().fun()); i++) { MParameter *param = current->getSlot(i)->toParameter(); // Find the original (not cloned) type set for the MParameter, as we // will be adding constraints to it. types::StackTypeSet *types; if (param->index() == MParameter::THIS_SLOT) types = oracle->thisTypeSet(script_); else @@ -550,17 +550,17 @@ IonBuilder::initParameters() if (!info().fun()) return true; MParameter *param = MParameter::New(MParameter::THIS_SLOT, cloneTypeSet(oracle->thisTypeSet(script_))); current->add(param); current->initSlot(info().thisSlot(), param); - for (uint32 i = 0; i < info().nargs(); i++) { + for (uint32_t i = 0; i < info().nargs(); i++) { param = MParameter::New(i, oracle->parameterTypeSet(script_, i)); current->add(param); current->initSlot(info().argSlot(i), param); } return true; } @@ -2208,17 +2208,17 @@ IonBuilder::tableSwitch(JSOp op, jssrcno tableswitch->addBlock(caseblock); pc2 += JUMP_OFFSET_LEN; } // Move defaultcase to the end, to maintain RPO. graph().moveBlockToEnd(defaultcase); - JS_ASSERT(tableswitch->numCases() == (uint32)(high - low + 1)); + JS_ASSERT(tableswitch->numCases() == (uint32_t)(high - low + 1)); JS_ASSERT(tableswitch->numSuccessors() > 0); // Sort the list of blocks that still needs to get processed by pc qsort(tableswitch->blocks(), tableswitch->numBlocks(), sizeof(MBasicBlock*), CmpSuccessors); // Create info ControlFlowInfo switchinfo(cfgStack_.length(), exitpc); @@ -2470,18 +2470,18 @@ IonBuilder::jsop_andor(JSOp op) current = evalRhs; return true; } bool IonBuilder::jsop_dup2() { - uint32 lhsSlot = current->stackDepth() - 2; - uint32 rhsSlot = current->stackDepth() - 1; + uint32_t lhsSlot = current->stackDepth() - 2; + uint32_t rhsSlot = current->stackDepth() - 1; current->pushSlot(lhsSlot); current->pushSlot(rhsSlot); return true; } bool IonBuilder::jsop_loophead(jsbytecode *pc) { @@ -2813,17 +2813,17 @@ class AutoAccumulateExits } ~AutoAccumulateExits() { graph_.setExitAccumulator(prev_); } }; bool -IonBuilder::jsop_call_inline(HandleFunction callee, uint32 argc, bool constructing, +IonBuilder::jsop_call_inline(HandleFunction callee, uint32_t argc, bool constructing, MConstant *constFun, MBasicBlock *bottom, Vector<MDefinition *, 8, IonAllocPolicy> &retvalDefns) { AssertCanGC(); // Rewrite the stack position containing the function with the constant // function definition, before we take the inlineResumePoint current->rewriteAtDepth(-((int) argc + 2), constFun); @@ -2839,17 +2839,17 @@ IonBuilder::jsop_call_inline(HandleFunct JS_ASSERT(argc == GET_ARGC(inlineResumePoint->pc())); // Gather up the arguments and |this| to the inline function. // Note that we leave the callee on the simulated stack for the // duration of the call. MDefinitionVector argv; if (!argv.resizeUninitialized(argc + 1)) return false; - for (int32 i = argc; i >= 0; i--) + for (int32_t i = argc; i >= 0; i--) argv[i] = current->pop(); // Compilation information is allocated for the duration of the current tempLifoAlloc // lifetime. RootedScript calleeScript(cx, callee->nonLazyScript()); CompileInfo *info = cx->tempLifoAlloc().new_<CompileInfo>(calleeScript.get(), callee, (jsbytecode *)NULL, constructing, SequentialExecution); @@ -2909,17 +2909,17 @@ IonBuilder::jsop_call_inline(HandleFunct if (!bottom->addPredecessorWithoutPhis(exitBlock)) return false; } JS_ASSERT(!retvalDefns.empty()); return true; } bool -IonBuilder::makeInliningDecision(AutoObjectVector &targets, uint32 argc) +IonBuilder::makeInliningDecision(AutoObjectVector &targets, uint32_t argc) { AssertCanGC(); if (inliningDepth >= js_IonOptions.maxInlineDepth) return false; // For "small" functions, we should be more aggressive about inlining. // This is based on the following intuition: @@ -3172,18 +3172,18 @@ IonBuilder::makePolyInlineDispatch(JSCon if (!call) return NULL; // Set up the MPrepCall MPrepareCall *prepCall = new MPrepareCall; fallbackEndBlock->add(prepCall); // Grab the arguments for the call directly from the current block's stack. - for (int32 i = 0; i <= argc; i++) { - int32 argno = argc - i; + for (int32_t i = 0; i <= argc; i++) { + int32_t argno = argc - i; MDefinition *argDefn = fallbackEndBlock->pop(); JS_ASSERT(!argDefn->isPassArg()); MPassArg *passArg = MPassArg::New(argDefn); fallbackEndBlock->add(passArg); call->addArg(argno, passArg); } // Insert an MPrepareCall before the first argument. @@ -3205,32 +3205,32 @@ IonBuilder::makePolyInlineDispatch(JSCon // Create a new MPolyInlineDispatch containing the getprop and the fallback block return MPolyInlineDispatch::New(targetObject, inlinePropTable, fallbackPrepBlock, fallbackBlock, fallbackEndBlock); } bool -IonBuilder::inlineScriptedCall(AutoObjectVector &targets, uint32 argc, bool constructing, +IonBuilder::inlineScriptedCall(AutoObjectVector &targets, uint32_t argc, bool constructing, types::StackTypeSet *types, types::StackTypeSet *barrier) { #ifdef DEBUG - uint32 origStackDepth = current->stackDepth(); + uint32_t origStackDepth = current->stackDepth(); #endif IonSpew(IonSpew_Inlining, "Inlining %d targets", (int) targets.length()); JS_ASSERT(targets.length() > 0); // |top| jumps into the callee subgraph -- save it for later use. MBasicBlock *top = current; // Unwrap all the MPassArgs and replace them with their inputs, and discard the // MPassArgs. - for (int32 i = argc; i >= 0; i--) { + for (int32_t i = argc; i >= 0; i--) { // Unwrap each MPassArg, replacing it with its contents. int argSlotDepth = -((int) i + 1); MPassArg *passArg = top->peek(argSlotDepth)->toPassArg(); MBasicBlock *block = passArg->block(); MDefinition *wrapped = passArg->getArgument(); passArg->replaceAllUsesWith(wrapped); top->rewriteAtDepth(argSlotDepth, wrapped); block->discard(passArg); @@ -3616,17 +3616,17 @@ IonBuilder::createThis(HandleFunction ta // If the prototype could not be hardcoded, emit a GETPROP. if (!createThis) createThis = createThisScripted(callee); return createThis; } bool -IonBuilder::jsop_funcall(uint32 argc) +IonBuilder::jsop_funcall(uint32_t argc) { // Stack for JSOP_FUNCALL: // 1: MPassArg(arg0) // ... // argc: MPassArg(argN) // argc+1: MPassArg(JSFunction *), the 'f' in |f.call()|, in |this| position. // argc+2: The native 'call' function. @@ -3665,17 +3665,17 @@ IonBuilder::jsop_funcall(uint32 argc) argc -= 1; } // Call without inlining. return makeCall(target, argc, false); } bool -IonBuilder::jsop_funapply(uint32 argc) +IonBuilder::jsop_funapply(uint32_t argc) { RootedFunction native(cx, getSingleCallTarget(argc, pc)); if (argc != 2) return makeCall(native, argc, false); // Disable compilation if the second argument to |apply| cannot be guaranteed // to be either definitely |arguments| or definitely not |arguments|. types::StackTypeSet *argObjTypes = oracle->getCallArg(script_, argc, 2, pc); @@ -3735,17 +3735,17 @@ IonBuilder::jsop_funapply(uint32 argc) return false; types::StackTypeSet *barrier; types::StackTypeSet *types = oracle->returnTypeSet(script_, pc, &barrier); return pushTypeBarrier(apply, types, barrier); } bool -IonBuilder::jsop_call(uint32 argc, bool constructing) +IonBuilder::jsop_call(uint32_t argc, bool constructing) { AssertCanGC(); // Acquire known call target if existent. AutoObjectVector targets(cx); uint32_t numTargets = getPolyCallTargets(argc, pc, targets, 4); types::StackTypeSet *barrier; types::StackTypeSet *types = oracle->returnTypeSet(script_, pc, &barrier); @@ -3772,27 +3772,27 @@ IonBuilder::jsop_call(uint32 argc, bool RootedFunction target(cx, NULL); if (numTargets == 1) target = targets[0]->toFunction(); return makeCallBarrier(target, argc, constructing, types, barrier); } MCall * -IonBuilder::makeCallHelper(HandleFunction target, uint32 argc, bool constructing) +IonBuilder::makeCallHelper(HandleFunction target, uint32_t argc, bool constructing) { // This function may be called with mutated stack. // Querying TI for popped types is invalid. - uint32 targetArgs = argc; + uint32_t targetArgs = argc; // Collect number of missing arguments provided that the target is // scripted. Native functions are passed an explicit 'argc' parameter. if (target && !target->isNative()) - targetArgs = Max<uint32>(target->nargs, argc); + targetArgs = Max<uint32_t>(target->nargs, argc); MCall *call = MCall::New(target, targetArgs + 1, argc, constructing); if (!call) return NULL; // Explicitly pad any missing arguments with |undefined|. // This permits skipping the argumentsRectifier. for (int i = targetArgs; i > (int)argc; i--) { @@ -3801,17 +3801,17 @@ IonBuilder::makeCallHelper(HandleFunctio current->add(undef); MPassArg *pass = MPassArg::New(undef); current->add(pass); call->addArg(i, pass); } // Add explicit arguments. // Bytecode order: Function, This, Arg0, Arg1, ..., ArgN, Call. - for (int32 i = argc; i > 0; i--) + for (int32_t i = argc; i > 0; i--) call->addArg(i, current->pop()->toPassArg()); // Place an MPrepareCall before the first passed argument, before we // potentially perform rearrangement. MPrepareCall *start = new MPrepareCall; MPassArg *firstArg = current->peek(-1)->toPassArg(); firstArg->block()->insertBefore(firstArg, start); call->initPrepareCall(start); @@ -3842,34 +3842,34 @@ IonBuilder::makeCallHelper(HandleFunctio call->setDOMFunction(); call->initFunction(fun); current->add(call); return call; } bool -IonBuilder::makeCallBarrier(HandleFunction target, uint32 argc, +IonBuilder::makeCallBarrier(HandleFunction target, uint32_t argc, bool constructing, types::StackTypeSet *types, types::StackTypeSet *barrier) { MCall *call = makeCallHelper(target, argc, constructing); if (!call) return false; current->push(call); if (!resumeAfter(call)) return false; return pushTypeBarrier(call, types, barrier); } bool -IonBuilder::makeCall(HandleFunction target, uint32 argc, bool constructing) +IonBuilder::makeCall(HandleFunction target, uint32_t argc, bool constructing) { types::StackTypeSet *barrier; types::StackTypeSet *types = oracle->returnTypeSet(script_, pc, &barrier); return makeCallBarrier(target, argc, constructing, types, barrier); } bool IonBuilder::jsop_compare(JSOp op) @@ -3884,17 +3884,17 @@ IonBuilder::jsop_compare(JSOp op) ins->infer(cx, oracle->binaryTypes(script_, pc)); if (ins->isEffectful() && !resumeAfter(ins)) return false; return true; } JSObject * -IonBuilder::getNewArrayTemplateObject(uint32 count) +IonBuilder::getNewArrayTemplateObject(uint32_t count) { RootedObject templateObject(cx, NewDenseUnallocatedArray(cx, count)); if (!templateObject) return NULL; RootedScript script(cx, script_); if (types::UseNewTypeForInitializer(cx, script, pc, JSProto_Array)) { if (!JSObject::setSingletonType(cx, templateObject)) @@ -3905,17 +3905,17 @@ IonBuilder::getNewArrayTemplateObject(ui return NULL; templateObject->setType(type); } return templateObject; } bool -IonBuilder::jsop_newarray(uint32 count) +IonBuilder::jsop_newarray(uint32_t count) { JS_ASSERT(script_->compileAndGo); JSObject *templateObject = getNewArrayTemplateObject(count); if (!templateObject) return false; MNewArray *ins = new MNewArray(count, templateObject, MNewArray::NewArray_Allocating); @@ -4059,27 +4059,27 @@ IonBuilder::jsop_initprop(HandleProperty current->add(store); return resumeAfter(store); } MSlots *slots = MSlots::New(obj); current->add(slots); - uint32 slot = templateObject->dynamicSlotIndex(shape->slot()); + uint32_t slot = templateObject->dynamicSlotIndex(shape->slot()); MStoreSlot *store = MStoreSlot::New(slots, slot, value); if (needsBarrier) store->setNeedsBarrier(); current->add(store); return resumeAfter(store); } MBasicBlock * -IonBuilder::addBlock(MBasicBlock *block, uint32 loopDepth) +IonBuilder::addBlock(MBasicBlock *block, uint32_t loopDepth) { if (!block) return NULL; graph().addBlock(block); block->setLoopDepth(loopDepth); return block; } @@ -4104,17 +4104,17 @@ IonBuilder::newBlockAfter(MBasicBlock *a MBasicBlock *block = MBasicBlock::New(graph(), info(), predecessor, pc, MBasicBlock::NORMAL); if (!block) return NULL; graph().insertBlockAfter(at, block); return block; } MBasicBlock * -IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc, uint32 loopDepth) +IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc, uint32_t loopDepth) { MBasicBlock *block = MBasicBlock::New(graph(), info(), predecessor, pc, MBasicBlock::NORMAL); return addBlock(block, loopDepth); } MBasicBlock * IonBuilder::newOsrPreheader(MBasicBlock *predecessor, jsbytecode *loopEntry) { @@ -4129,57 +4129,57 @@ IonBuilder::newOsrPreheader(MBasicBlock if (!osrBlock || !preheader) return NULL; MOsrEntry *entry = MOsrEntry::New(); osrBlock->add(entry); // Initialize |scopeChain|. { - uint32 slot = info().scopeChainSlot(); + uint32_t slot = info().scopeChainSlot(); MOsrScopeChain *scopev = MOsrScopeChain::New(entry); osrBlock->add(scopev); osrBlock->initSlot(slot, scopev); } if (info().fun()) { // Initialize |this| parameter. - uint32 slot = info().thisSlot(); + uint32_t slot = info().thisSlot(); ptrdiff_t offset = StackFrame::offsetOfThis(info().fun()); MOsrValue *thisv = MOsrValue::New(entry, offset); osrBlock->add(thisv); osrBlock->initSlot(slot, thisv); // Initialize arguments. - for (uint32 i = 0; i < info().nargs(); i++) { - uint32 slot = info().argSlot(i); + for (uint32_t i = 0; i < info().nargs(); i++) { + uint32_t slot = info().argSlot(i); ptrdiff_t offset = StackFrame::offsetOfFormalArg(info().fun(), i); MOsrValue *osrv = MOsrValue::New(entry, offset); osrBlock->add(osrv); osrBlock->initSlot(slot, osrv); } } // Initialize locals. - for (uint32 i = 0; i < info().nlocals(); i++) { - uint32 slot = info().localSlot(i); + for (uint32_t i = 0; i < info().nlocals(); i++) { + uint32_t slot = info().localSlot(i); ptrdiff_t offset = StackFrame::offsetOfFixed(i); MOsrValue *osrv = MOsrValue::New(entry, offset); osrBlock->add(osrv); osrBlock->initSlot(slot, osrv); } // Initialize stack. - uint32 numSlots = preheader->stackDepth() - CountArgSlots(info().fun()) - info().nlocals(); - for (uint32 i = 0; i < numSlots; i++) { - uint32 slot = info().stackSlot(i); + uint32_t numSlots = preheader->stackDepth() - CountArgSlots(info().fun()) - info().nlocals(); + for (uint32_t i = 0; i < numSlots; i++) { + uint32_t slot = info().stackSlot(i); ptrdiff_t offset = StackFrame::offsetOfFixed(info().nlocals() + i); MOsrValue *osrv = MOsrValue::New(entry, offset); osrBlock->add(osrv); osrBlock->initSlot(slot, osrv); } // Create an MStart to hold the first valid MResumePoint. @@ -4204,24 +4204,24 @@ IonBuilder::newOsrPreheader(MBasicBlock JS_ASSERT(info().scopeChainSlot() == 0); JS_ASSERT(osrBlock->scopeChain()->type() == MIRType_Object); Vector<MIRType> slotTypes(cx); if (!slotTypes.growByUninitialized(osrBlock->stackDepth())) return NULL; // Fill slotTypes with the types of the predecessor block. - for (uint32 i = 0; i < osrBlock->stackDepth(); i++) + for (uint32_t i = 0; i < osrBlock->stackDepth(); i++) slotTypes[i] = MIRType_Value; // Update slotTypes for slots that may have a different type at this join point. if (!oracle->getOsrTypes(loopEntry, slotTypes)) return NULL; - for (uint32 i = 1; i < osrBlock->stackDepth(); i++) { + for (uint32_t i = 1; i < osrBlock->stackDepth(); i++) { // Unbox the MOsrValue if it is known to be unboxable. switch (slotTypes[i]) { case MIRType_Boolean: case MIRType_Int32: case MIRType_Double: case MIRType_String: case MIRType_Object: { @@ -6290,32 +6290,32 @@ IonBuilder::jsop_lambda(JSFunction *fun) MLambda *ins = MLambda::New(current->scopeChain(), fun); current->add(ins); current->push(ins); return resumeAfter(ins); } bool -IonBuilder::jsop_deflocalfun(uint32 local, JSFunction *fun) +IonBuilder::jsop_deflocalfun(uint32_t local, JSFunction *fun) { JS_ASSERT(script_->analysis()->usesScopeChain()); MLambda *ins = MLambda::New(current->scopeChain(), fun); current->add(ins); current->push(ins); current->setLocal(local); current->pop(); return resumeAfter(ins); } bool -IonBuilder::jsop_defvar(uint32 index) +IonBuilder::jsop_defvar(uint32_t index) { JS_ASSERT(JSOp(*pc) == JSOP_DEFVAR || JSOp(*pc) == JSOP_DEFCONST); PropertyName *name = script_->getName(index); // Bake in attrs. unsigned attrs = JSPROP_ENUMERATE | JSPROP_PERMANENT; if (JSOp(*pc) == JSOP_DEFCONST) @@ -6383,17 +6383,17 @@ IonBuilder::jsop_toid() current->add(ins); current->push(ins); return resumeAfter(ins); } bool -IonBuilder::jsop_iter(uint8 flags) +IonBuilder::jsop_iter(uint8_t flags) { MDefinition *obj = current->pop(); MInstruction *ins = MIteratorStart::New(obj, flags); if (!iterators_.append(ins)) return false; current->add(ins);
--- a/js/src/ion/IonBuilder.h +++ b/js/src/ion/IonBuilder.h @@ -36,22 +36,22 @@ class IonBuilder : public MIRGenerator DeferredEdge(MBasicBlock *block, DeferredEdge *next) : block(block), next(next) { } }; struct ControlFlowInfo { // Entry in the cfgStack. - uint32 cfgEntry; + uint32_t cfgEntry; // Label that continues go to. jsbytecode *continuepc; - ControlFlowInfo(uint32 cfgEntry, jsbytecode *continuepc) + ControlFlowInfo(uint32_t cfgEntry, jsbytecode *continuepc) : cfgEntry(cfgEntry), continuepc(continuepc) { } }; // To avoid recursion, the bytecode analyzer uses a stack where each entry // is a small state machine. As we encounter branches or jumps in the // bytecode, we push information about the edges on the stack so that the @@ -113,31 +113,31 @@ class IonBuilder : public MIRGenerator // Deferred break and continue targets. DeferredEdge *breaks; // MIR instruction MTableSwitch *ins; // The number of current successor that get mapped into a block. - uint32 currentBlock; + uint32_t currentBlock; } tableswitch; struct { // pc immediately after the switch. jsbytecode *exitpc; // Deferred break and continue targets. DeferredEdge *breaks; // Vector of body blocks to process FixedList<MBasicBlock *> *bodies; // The number of current successor that get mapped into a block. - uint32 currentBlock; + uint32_t currentBlock; } lookupswitch; }; inline bool isLoop() const { switch (state) { case DO_WHILE_LOOP_COND: case DO_WHILE_LOOP_BODY: case WHILE_LOOP_COND: @@ -157,39 +157,39 @@ class IonBuilder : public MIRGenerator static CFGState TableSwitch(jsbytecode *exitpc, MTableSwitch *ins); static CFGState LookupSwitch(jsbytecode *exitpc); }; static int CmpSuccessors(const void *a, const void *b); public: IonBuilder(JSContext *cx, TempAllocator *temp, MIRGraph *graph, - TypeOracle *oracle, CompileInfo *info, size_t inliningDepth = 0, uint32 loopDepth = 0); + TypeOracle *oracle, CompileInfo *info, size_t inliningDepth = 0, uint32_t loopDepth = 0); bool build(); bool buildInline(IonBuilder *callerBuilder, MResumePoint *callerResumePoint, MDefinition *thisDefn, MDefinitionVector &args); private: bool traverseBytecode(); ControlStatus snoopControlFlow(JSOp op); void markPhiBytecodeUses(jsbytecode *pc); bool processIterators(); bool inspectOpcode(JSOp op); - uint32 readIndex(jsbytecode *pc); + uint32_t readIndex(jsbytecode *pc); JSAtom *readAtom(jsbytecode *pc); bool abort(const char *message, ...); void spew(const char *message); static bool inliningEnabled() { return js_IonOptions.inlining; } - JSFunction *getSingleCallTarget(uint32 argc, jsbytecode *pc); - unsigned getPolyCallTargets(uint32 argc, jsbytecode *pc, + JSFunction *getSingleCallTarget(uint32_t argc, jsbytecode *pc); + unsigned getPolyCallTargets(uint32_t argc, jsbytecode *pc, AutoObjectVector &targets, uint32_t maxTargets); bool canInlineTarget(JSFunction *target); void popCfgStack(); bool processDeferredContinues(CFGState &state); ControlStatus processControlEnd(); ControlStatus processCfgStack(); ControlStatus processCfgEntry(CFGState &state); @@ -213,19 +213,19 @@ class IonBuilder : public MIRGenerator ControlStatus processThrow(); ControlStatus processContinue(JSOp op, jssrcnote *sn); ControlStatus processBreak(JSOp op, jssrcnote *sn); ControlStatus maybeLoop(JSOp op, jssrcnote *sn); bool pushLoop(CFGState::State state, jsbytecode *stopAt, MBasicBlock *entry, jsbytecode *bodyStart, jsbytecode *bodyEnd, jsbytecode *exitpc, jsbytecode *continuepc = NULL); - MBasicBlock *addBlock(MBasicBlock *block, uint32 loopDepth); + MBasicBlock *addBlock(MBasicBlock *block, uint32_t loopDepth); MBasicBlock *newBlock(MBasicBlock *predecessor, jsbytecode *pc); - MBasicBlock *newBlock(MBasicBlock *predecessor, jsbytecode *pc, uint32 loopDepth); + MBasicBlock *newBlock(MBasicBlock *predecessor, jsbytecode *pc, uint32_t loopDepth); MBasicBlock *newBlock(MBasicBlock *predecessor, jsbytecode *pc, MResumePoint *priorResumePoint); MBasicBlock *newBlockAfter(MBasicBlock *at, MBasicBlock *predecessor, jsbytecode *pc); MBasicBlock *newOsrPreheader(MBasicBlock *header, jsbytecode *loopEntry); MBasicBlock *newPendingLoopHeader(MBasicBlock *predecessor, jsbytecode *pc); MBasicBlock *newBlock(jsbytecode *pc) { return newBlock(NULL, pc); } MBasicBlock *newBlockAfter(MBasicBlock *at, jsbytecode *pc) { @@ -271,24 +271,24 @@ class IonBuilder : public MIRGenerator JSObject *getSingletonPrototype(JSFunction *target); MDefinition *createThisNative(); MDefinition *createThisScripted(MDefinition *callee); MDefinition *createThisScriptedSingleton(HandleFunction target, HandleObject proto, MDefinition *callee); MDefinition *createThis(HandleFunction target, MDefinition *callee); MInstruction *createCallObject(MDefinition *callee, MDefinition *scopeObj); - bool makeCall(HandleFunction target, uint32 argc, bool constructing); + bool makeCall(HandleFunction target, uint32_t argc, bool constructing); MDefinition *walkScopeChain(unsigned hops); MInstruction *addBoundsCheck(MDefinition *index, MDefinition *length); MInstruction *addShapeGuard(MDefinition *obj, const Shape *shape, BailoutKind bailoutKind); - JSObject *getNewArrayTemplateObject(uint32 count); + JSObject *getNewArrayTemplateObject(uint32_t count); bool invalidatedIdempotentCache(); bool loadSlot(MDefinition *obj, Shape *shape, MIRType rvalType); bool storeSlot(MDefinition *obj, Shape *shape, MDefinition *value, bool needsBarrier); // jsop_getprop() helpers. bool getPropTryArgumentsLength(bool *emitted); @@ -307,21 +307,21 @@ class IonBuilder : public MIRGenerator bool jsop_add(MDefinition *left, MDefinition *right); bool jsop_bitnot(); bool jsop_bitop(JSOp op); bool jsop_binary(JSOp op); bool jsop_binary(JSOp op, MDefinition *left, MDefinition *right); bool jsop_pos(); bool jsop_neg(); - bool jsop_defvar(uint32 index); + bool jsop_defvar(uint32_t index); bool jsop_notearg(); - bool jsop_funcall(uint32 argc); - bool jsop_funapply(uint32 argc); - bool jsop_call(uint32 argc, bool constructing); + bool jsop_funcall(uint32_t argc); + bool jsop_funapply(uint32_t argc); + bool jsop_call(uint32_t argc, bool constructing); bool jsop_ifeq(JSOp op); bool jsop_andor(JSOp op); bool jsop_dup2(); bool jsop_loophead(jsbytecode *pc); bool jsop_compare(JSOp op); bool jsop_getgname(HandlePropertyName name); bool jsop_setgname(HandlePropertyName name); bool jsop_getname(HandlePropertyName name); @@ -339,29 +339,29 @@ class IonBuilder : public MIRGenerator bool jsop_arguments(); bool jsop_arguments_length(); bool jsop_arguments_getelem(); bool jsop_arguments_setelem(); bool jsop_not(); bool jsop_getprop(HandlePropertyName name); bool jsop_setprop(HandlePropertyName name); bool jsop_delprop(HandlePropertyName name); - bool jsop_newarray(uint32 count); + bool jsop_newarray(uint32_t count); bool jsop_newobject(HandleObject baseObj); bool jsop_initelem(); bool jsop_initelem_dense(); bool jsop_initprop(HandlePropertyName name); bool jsop_regexp(RegExpObject *reobj); bool jsop_object(JSObject *obj); bool jsop_lambda(JSFunction *fun); - bool jsop_deflocalfun(uint32 local, JSFunction *fun); + bool jsop_deflocalfun(uint32_t local, JSFunction *fun); bool jsop_this(); bool jsop_typeof(); bool jsop_toid(); - bool jsop_iter(uint8 flags); + bool jsop_iter(uint8_t flags); bool jsop_iternext(); bool jsop_itermore(); bool jsop_iterend(); bool jsop_in(); bool jsop_in_dense(); bool jsop_instanceof(); bool jsop_getaliasedvar(ScopeCoordinate sc); bool jsop_setaliasedvar(ScopeCoordinate sc); @@ -371,60 +371,60 @@ class IonBuilder : public MIRGenerator enum InliningStatus { InliningStatus_Error, InliningStatus_NotInlined, InliningStatus_Inlined }; // Inlining helpers. - bool discardCallArgs(uint32 argc, MDefinitionVector &argv, MBasicBlock *bb); - bool discardCall(uint32 argc, MDefinitionVector &argv, MBasicBlock *bb); + bool discardCallArgs(uint32_t argc, MDefinitionVector &argv, MBasicBlock *bb); + bool discardCall(uint32_t argc, MDefinitionVector &argv, MBasicBlock *bb); types::StackTypeSet *getInlineReturnTypeSet(); MIRType getInlineReturnType(); - types::StackTypeSet *getInlineArgTypeSet(uint32 argc, uint32 arg); - MIRType getInlineArgType(uint32 argc, uint32 arg); + types::StackTypeSet *getInlineArgTypeSet(uint32_t argc, uint32_t arg); + MIRType getInlineArgType(uint32_t argc, uint32_t arg); // Array natives. - InliningStatus inlineArray(uint32 argc, bool constructing); - InliningStatus inlineArrayPopShift(MArrayPopShift::Mode mode, uint32 argc, bool constructing); - InliningStatus inlineArrayPush(uint32 argc, bool constructing); - InliningStatus inlineArrayConcat(uint32 argc, bool constructing); + InliningStatus inlineArray(uint32_t argc, bool constructing); + InliningStatus inlineArrayPopShift(MArrayPopShift::Mode mode, uint32_t argc, bool constructing); + InliningStatus inlineArrayPush(uint32_t argc, bool constructing); + InliningStatus inlineArrayConcat(uint32_t argc, bool constructing); // Math natives. - InliningStatus inlineMathAbs(uint32 argc, bool constructing); - InliningStatus inlineMathFloor(uint32 argc, bool constructing); - InliningStatus inlineMathRound(uint32 argc, bool constructing); - InliningStatus inlineMathSqrt(uint32 argc, bool constructing); - InliningStatus inlineMathMinMax(bool max, uint32 argc, bool constructing); - InliningStatus inlineMathPow(uint32 argc, bool constructing); - InliningStatus inlineMathRandom(uint32 argc, bool constructing); - InliningStatus inlineMathFunction(MMathFunction::Function function, uint32 argc, + InliningStatus inlineMathAbs(uint32_t argc, bool constructing); + InliningStatus inlineMathFloor(uint32_t argc, bool constructing); + InliningStatus inlineMathRound(uint32_t argc, bool constructing); + InliningStatus inlineMathSqrt(uint32_t argc, bool constructing); + InliningStatus inlineMathMinMax(bool max, uint32_t argc, bool constructing); + InliningStatus inlineMathPow(uint32_t argc, bool constructing); + InliningStatus inlineMathRandom(uint32_t argc, bool constructing); + InliningStatus inlineMathFunction(MMathFunction::Function function, uint32_t argc, bool constructing); // String natives. - InliningStatus inlineStringObject(uint32 argc, bool constructing); - InliningStatus inlineStrCharCodeAt(uint32 argc, bool constructing); - InliningStatus inlineStrFromCharCode(uint32 argc, bool constructing); - InliningStatus inlineStrCharAt(uint32 argc, bool constructing); + InliningStatus inlineStringObject(uint32_t argc, bool constructing); + InliningStatus inlineStrCharCodeAt(uint32_t argc, bool constructing); + InliningStatus inlineStrFromCharCode(uint32_t argc, bool constructing); + InliningStatus inlineStrCharAt(uint32_t argc, bool constructing); // RegExp natives. - InliningStatus inlineRegExpTest(uint32 argc, bool constructing); + InliningStatus inlineRegExpTest(uint32_t argc, bool constructing); - InliningStatus inlineNativeCall(JSNative native, uint32 argc, bool constructing); + InliningStatus inlineNativeCall(JSNative native, uint32_t argc, bool constructing); - bool jsop_call_inline(HandleFunction callee, uint32 argc, bool constructing, + bool jsop_call_inline(HandleFunction callee, uint32_t argc, bool constructing, MConstant *constFun, MBasicBlock *bottom, Vector<MDefinition *, 8, IonAllocPolicy> &retvalDefns); - bool inlineScriptedCall(AutoObjectVector &targets, uint32 argc, bool constructing, + bool inlineScriptedCall(AutoObjectVector &targets, uint32_t argc, bool constructing, types::StackTypeSet *types, types::StackTypeSet *barrier); - bool makeInliningDecision(AutoObjectVector &targets, uint32 argc); + bool makeInliningDecision(AutoObjectVector &targets, uint32_t argc); - MCall *makeCallHelper(HandleFunction target, uint32 argc, bool constructing); - bool makeCallBarrier(HandleFunction target, uint32 argc, bool constructing, + MCall *makeCallHelper(HandleFunction target, uint32_t argc, bool constructing); + bool makeCallBarrier(HandleFunction target, uint32_t argc, bool constructing, types::StackTypeSet *types, types::StackTypeSet *barrier); inline bool TestCommonPropFunc(JSContext *cx, types::StackTypeSet *types, HandleId id, JSFunction **funcp, bool isGetter, bool *isDOM, MDefinition **guardOut); bool annotateGetPropertyCache(JSContext *cx, MDefinition *obj, MGetPropertyCache *getPropCache, @@ -460,17 +460,17 @@ class IonBuilder : public MIRGenerator CodeGenerator *backgroundCodegen() const { return backgroundCodegen_; } void setBackgroundCodegen(CodeGenerator *codegen) { backgroundCodegen_ = codegen; } private: JSContext *cx; jsbytecode *pc; MBasicBlock *current; - uint32 loopDepth_; + uint32_t loopDepth_; /* Information used for inline-call builders. */ MResumePoint *callerResumePoint_; jsbytecode *callerPC() { return callerResumePoint_ ? callerResumePoint_->pc() : NULL; } IonBuilder *callerBuilder_;
--- a/js/src/ion/IonCaches.cpp +++ b/js/src/ion/IonCaches.cpp @@ -465,17 +465,17 @@ struct GetNativePropertyStub Register argUintNReg = regSet.takeGeneral(); Register argVpReg = regSet.takeGeneral(); // Shape has a getter function. bool callNative = IsCacheableGetPropCallNative(obj, holder, shape); JS_ASSERT_IF(!callNative, IsCacheableGetPropCallPropertyOp(obj, holder, shape)); // TODO: ensure stack is aligned? - DebugOnly<uint32> initialStack = masm.framePushed(); + DebugOnly<uint32_t> initialStack = masm.framePushed(); Label success, exception; // Push the IonCode pointer for the stub we're generating. // WARNING: // WARNING: If IonCode ever becomes relocatable, the following code is incorrect. // WARNING: Note that we're not marking the pointer being pushed as an ImmGCPtr. // WARNING: This is not a marking issue since the stub IonCode won't be collected @@ -1014,17 +1014,17 @@ IonCacheSetProperty::attachSetterCall(JS Register scratchReg = regSet.takeGeneral(); Register argJSContextReg = regSet.takeGeneral(); Register argObjReg = regSet.takeGeneral(); Register argIdReg = regSet.takeGeneral(); Register argStrictReg = regSet.takeGeneral(); Register argVpReg = regSet.takeGeneral(); // Ensure stack is aligned. - DebugOnly<uint32> initialStack = masm.framePushed(); + DebugOnly<uint32_t> initialStack = masm.framePushed(); Label success, exception; // Push the IonCode pointer for the stub we're generating. // WARNING: // WARNING: If IonCode ever becomes relocatable, the following code is incorrect. // WARNING: Note that we're not marking the pointer being pushed as an ImmGCPtr. // WARNING: This is not a marking issue since the stub IonCode won't be collected
--- a/js/src/ion/IonCaches.h +++ b/js/src/ion/IonCaches.h @@ -167,19 +167,19 @@ class IonCache // Reset the cache around garbage collection. void reset(); CodeLocationJump lastJump() const { return lastJump_; } CodeLocationLabel cacheLabel() const { return cacheLabel_; } CodeLocationLabel rejoinLabel() const { - uint8 *ptr = initialJump_.raw(); + uint8_t *ptr = initialJump_.raw(); #ifdef JS_CPU_ARM - uint32 i = 0; + uint32_t i = 0; while (i < REJOIN_LABEL_OFFSET) ptr = Assembler::nextInstruction(ptr, &i); #endif return CodeLocationLabel(ptr); } bool pure() { return pure_;
--- a/js/src/ion/IonCode.h +++ b/js/src/ion/IonCode.h @@ -21,65 +21,65 @@ namespace JSC { struct JSScript; namespace js { namespace ion { // The maximum size of any buffer associated with an assembler or code object. // This is chosen to not overflow a signed integer, leaving room for an extra // bit on offsets. -static const uint32 MAX_BUFFER_SIZE = (1 << 30) - 1; +static const uint32_t MAX_BUFFER_SIZE = (1 << 30) - 1; // Maximum number of scripted arg and stack slots. -static const uint32 SNAPSHOT_MAX_NARGS = 127; -static const uint32 SNAPSHOT_MAX_STACK = 127; +static const uint32_t SNAPSHOT_MAX_NARGS = 127; +static const uint32_t SNAPSHOT_MAX_STACK = 127; class MacroAssembler; class CodeOffsetLabel; class IonCode : public gc::Cell { protected: - uint8 *code_; + uint8_t *code_; JSC::ExecutablePool *pool_; - uint32 bufferSize_; // Total buffer size. - uint32 insnSize_; // Instruction stream size. - uint32 dataSize_; // Size of the read-only data area. - uint32 jumpRelocTableBytes_; // Size of the jump relocation table. - uint32 dataRelocTableBytes_; // Size of the data relocation table. + uint32_t bufferSize_; // Total buffer size. + uint32_t insnSize_; // Instruction stream size. + uint32_t dataSize_; // Size of the read-only data area. + uint32_t jumpRelocTableBytes_; // Size of the jump relocation table. + uint32_t dataRelocTableBytes_; // Size of the data relocation table. JSBool invalidated_; // Whether the code object has been invalidated. // This is necessary to prevent GC tracing. IonCode() : code_(NULL), pool_(NULL) { } - IonCode(uint8 *code, uint32 bufferSize, JSC::ExecutablePool *pool) + IonCode(uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool) : code_(code), pool_(pool), bufferSize_(bufferSize), insnSize_(0), dataSize_(0), jumpRelocTableBytes_(0), dataRelocTableBytes_(0), invalidated_(false) { } - uint32 dataOffset() const { + uint32_t dataOffset() const { return insnSize_; } - uint32 jumpRelocTableOffset() const { + uint32_t jumpRelocTableOffset() const { return dataOffset() + dataSize_; } - uint32 dataRelocTableOffset() const { + uint32_t dataRelocTableOffset() const { return jumpRelocTableOffset() + jumpRelocTableBytes_; } public: - uint8 *raw() const { + uint8_t *raw() const { return code_; } size_t instructionsSize() const { return insnSize_; } void trace(JSTracer *trc); void finalize(FreeOp *fop); void setInvalidated() { @@ -94,34 +94,34 @@ class IonCode : public gc::Cell } template <typename T> T as() const { return JS_DATA_TO_FUNC_PTR(T, raw()); } void copyFrom(MacroAssembler &masm); - static IonCode *FromExecutable(uint8 *buffer) { + static IonCode *FromExecutable(uint8_t *buffer) { IonCode *code = *(IonCode **)(buffer - sizeof(IonCode *)); JS_ASSERT(code->raw() == buffer); return code; } static size_t offsetOfCode() { return offsetof(IonCode, code_); } - uint8 *jumpRelocTable() { + uint8_t *jumpRelocTable() { return code_ + jumpRelocTableOffset(); } // Allocates a new IonCode object which will be managed by the GC. If no // object can be allocated, NULL is returned. On failure, |pool| is // automatically released, so the code may be freed. - static IonCode *New(JSContext *cx, uint8 *code, uint32 bufferSize, JSC::ExecutablePool *pool); + static IonCode *New(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool); public: static void readBarrier(IonCode *code); static void writeBarrierPre(IonCode *code); static void writeBarrierPost(IonCode *code, void *addr); }; class SnapshotWriter; @@ -139,120 +139,120 @@ struct IonScript // Deoptimization table used by this method. HeapPtr<IonCode> deoptTable_; // Entrypoint for OSR, or NULL. jsbytecode *osrPc_; // Offset to OSR entrypoint from method_->raw(), or 0. - uint32 osrEntryOffset_; + uint32_t osrEntryOffset_; // Offset of the invalidation epilogue (which pushes this IonScript // and calls the invalidation thunk). - uint32 invalidateEpilogueOffset_; + uint32_t invalidateEpilogueOffset_; // The offset immediately after the IonScript immediate. // NOTE: technically a constant delta from // |invalidateEpilogueOffset_|, so we could hard-code this // per-platform if we want. - uint32 invalidateEpilogueDataOffset_; + uint32_t invalidateEpilogueDataOffset_; // Flag set when we bailout, to avoid frequent bailouts. bool bailoutExpected_; // Offset from the start of the code buffer to its snapshot buffer. - uint32 snapshots_; - uint32 snapshotsSize_; + uint32_t snapshots_; + uint32_t snapshotsSize_; // Table mapping bailout IDs to snapshot offsets. - uint32 bailoutTable_; - uint32 bailoutEntries_; + uint32_t bailoutTable_; + uint32_t bailoutEntries_; // Constant table for constants stored in snapshots. - uint32 constantTable_; - uint32 constantEntries_; + uint32_t constantTable_; + uint32_t constantEntries_; // Map code displacement to safepoint / OSI-patch-delta. - uint32 safepointIndexOffset_; - uint32 safepointIndexEntries_; + uint32_t safepointIndexOffset_; + uint32_t safepointIndexEntries_; // Number of STACK_SLOT_SIZE-length slots this function reserves on the // stack. - uint32 frameSlots_; + uint32_t frameSlots_; // Frame size is the value that can be added to the StackPointer along // with the frame prefix to get a valid IonJSFrameLayout. - uint32 frameSize_; + uint32_t frameSize_; // Map OSI-point displacement to snapshot. - uint32 osiIndexOffset_; - uint32 osiIndexEntries_; + uint32_t osiIndexOffset_; + uint32_t osiIndexEntries_; // State for polymorphic caches in the compiled code. - uint32 cacheList_; - uint32 cacheEntries_; + uint32_t cacheList_; + uint32_t cacheEntries_; // Offset list for patchable pre-barriers. - uint32 prebarrierList_; - uint32 prebarrierEntries_; + uint32_t prebarrierList_; + uint32_t prebarrierEntries_; // Offset to and length of the safepoint table in bytes. - uint32 safepointsStart_; - uint32 safepointsSize_; + uint32_t safepointsStart_; + uint32_t safepointsSize_; // List of compiled/inlined JSScript's. - uint32 scriptList_; - uint32 scriptEntries_; + uint32_t scriptList_; + uint32_t scriptEntries_; // Number of references from invalidation records. size_t refcount_; types::RecompileInfo recompileInfo_; public: // Number of times this function has tried to call a non-IM compileable function - uint32 slowCallCount; + uint32_t slowCallCount; SnapshotOffset *bailoutTable() { - return (SnapshotOffset *)(reinterpret_cast<uint8 *>(this) + bailoutTable_); + return (SnapshotOffset *)(reinterpret_cast<uint8_t *>(this) + bailoutTable_); } HeapValue *constants() { - return (HeapValue *)(reinterpret_cast<uint8 *>(this) + constantTable_); + return (HeapValue *)(reinterpret_cast<uint8_t *>(this) + constantTable_); } const SafepointIndex *safepointIndices() const { return const_cast<IonScript *>(this)->safepointIndices(); } SafepointIndex *safepointIndices() { - return (SafepointIndex *)(reinterpret_cast<uint8 *>(this) + safepointIndexOffset_); + return (SafepointIndex *)(reinterpret_cast<uint8_t *>(this) + safepointIndexOffset_); } const OsiIndex *osiIndices() const { return const_cast<IonScript *>(this)->osiIndices(); } OsiIndex *osiIndices() { - return (OsiIndex *)(reinterpret_cast<uint8 *>(this) + osiIndexOffset_); + return (OsiIndex *)(reinterpret_cast<uint8_t *>(this) + osiIndexOffset_); } IonCache *cacheList() { - return (IonCache *)(reinterpret_cast<uint8 *>(this) + cacheList_); + return (IonCache *)(reinterpret_cast<uint8_t *>(this) + cacheList_); } CodeOffsetLabel *prebarrierList() { - return (CodeOffsetLabel *)(reinterpret_cast<uint8 *>(this) + prebarrierList_); + return (CodeOffsetLabel *)(reinterpret_cast<uint8_t *>(this) + prebarrierList_); } JSScript **scriptList() const { - return (JSScript **)(reinterpret_cast<const uint8 *>(this) + scriptList_); + return (JSScript **)(reinterpret_cast<const uint8_t *>(this) + scriptList_); } private: void trace(JSTracer *trc); public: // Do not call directly, use IonScript::New. This is public for cx->new_. IonScript(); - static IonScript *New(JSContext *cx, uint32 frameLocals, uint32 frameSize, + static IonScript *New(JSContext *cx, uint32_t frameLocals, uint32_t frameSize, size_t snapshotsSize, size_t snapshotEntries, size_t constants, size_t safepointIndexEntries, size_t osiIndexEntries, size_t cacheEntries, size_t prebarrierEntries, size_t safepointsSize, size_t scriptEntries); static void Trace(JSTracer *trc, IonScript *script); static void Destroy(FreeOp *fop, IonScript *script); static inline size_t offsetOfMethod() { @@ -274,61 +274,61 @@ struct IonScript deoptTable_ = code; } void setOsrPc(jsbytecode *osrPc) { osrPc_ = osrPc; } jsbytecode *osrPc() const { return osrPc_; } - void setOsrEntryOffset(uint32 offset) { + void setOsrEntryOffset(uint32_t offset) { JS_ASSERT(!osrEntryOffset_); osrEntryOffset_ = offset; } - uint32 osrEntryOffset() const { + uint32_t osrEntryOffset() const { return osrEntryOffset_; } - bool containsCodeAddress(uint8 *addr) const { + bool containsCodeAddress(uint8_t *addr) const { return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize(); } - bool containsReturnAddress(uint8 *addr) const { + bool containsReturnAddress(uint8_t *addr) const { // This accounts for an off by one error caused by the return address of a // bailout sitting outside the range of the containing function. return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize(); } - void setInvalidationEpilogueOffset(uint32 offset) { + void setInvalidationEpilogueOffset(uint32_t offset) { JS_ASSERT(!invalidateEpilogueOffset_); invalidateEpilogueOffset_ = offset; } - uint32 invalidateEpilogueOffset() const { + uint32_t invalidateEpilogueOffset() const { JS_ASSERT(invalidateEpilogueOffset_); return invalidateEpilogueOffset_; } - void setInvalidationEpilogueDataOffset(uint32 offset) { + void setInvalidationEpilogueDataOffset(uint32_t offset) { JS_ASSERT(!invalidateEpilogueDataOffset_); invalidateEpilogueDataOffset_ = offset; } - uint32 invalidateEpilogueDataOffset() const { + uint32_t invalidateEpilogueDataOffset() const { JS_ASSERT(invalidateEpilogueDataOffset_); return invalidateEpilogueDataOffset_; } void setBailoutExpected() { bailoutExpected_ = true; } bool bailoutExpected() const { return bailoutExpected_; } - const uint8 *snapshots() const { - return reinterpret_cast<const uint8 *>(this) + snapshots_; + const uint8_t *snapshots() const { + return reinterpret_cast<const uint8_t *>(this) + snapshots_; } size_t snapshotsSize() const { return snapshotsSize_; } - const uint8 *safepoints() const { - return reinterpret_cast<const uint8 *>(this) + safepointsStart_; + const uint8_t *safepoints() const { + return reinterpret_cast<const uint8_t *>(this) + safepointsStart_; } size_t safepointsSize() const { return safepointsSize_; } JSScript *getScript(size_t i) const { JS_ASSERT(i < scriptEntries_); return scriptList()[i]; } @@ -340,33 +340,33 @@ struct IonScript } HeapValue &getConstant(size_t index) { JS_ASSERT(index < numConstants()); return constants()[index]; } size_t numConstants() const { return constantEntries_; } - uint32 frameSlots() const { + uint32_t frameSlots() const { return frameSlots_; } - uint32 frameSize() const { + uint32_t frameSize() const { return frameSize_; } - SnapshotOffset bailoutToSnapshot(uint32 bailoutId) { + SnapshotOffset bailoutToSnapshot(uint32_t bailoutId) { JS_ASSERT(bailoutId < bailoutEntries_); return bailoutTable()[bailoutId]; } - const SafepointIndex *getSafepointIndex(uint32 disp) const; - const SafepointIndex *getSafepointIndex(uint8 *retAddr) const { + const SafepointIndex *getSafepointIndex(uint32_t disp) const; + const SafepointIndex *getSafepointIndex(uint8_t *retAddr) const { JS_ASSERT(containsCodeAddress(retAddr)); return getSafepointIndex(retAddr - method()->raw()); } - const OsiIndex *getOsiIndex(uint32 disp) const; - const OsiIndex *getOsiIndex(uint8 *retAddr) const; + const OsiIndex *getOsiIndex(uint32_t disp) const; + const OsiIndex *getOsiIndex(uint8_t *retAddr) const; inline IonCache &getCache(size_t index); size_t numCaches() const { return cacheEntries_; } inline CodeOffsetLabel &getPrebarrier(size_t index); size_t numPrebarriers() const { return prebarrierEntries_; } @@ -402,113 +402,113 @@ struct IonScript } }; // Execution information for a basic block which may persist after the // accompanying IonScript is destroyed, for use during profiling. struct IonBlockCounts { private: - uint32 id_; + uint32_t id_; // Approximate bytecode in the outer (not inlined) script this block // was generated from. - uint32 offset_; + uint32_t offset_; // ids for successors of this block. - uint32 numSuccessors_; - uint32 *successors_; + uint32_t numSuccessors_; + uint32_t *successors_; // Hit count for this block. - uint64 hitCount_; + uint64_t hitCount_; // Text information about the code generated for this block. char *code_; // Number of bytes of code generated in this block. Spill code is counted // separately from other, instruction implementing code. - uint32 instructionBytes_; - uint32 spillBytes_; + uint32_t instructionBytes_; + uint32_t spillBytes_; public: - bool init(uint32 id, uint32 offset, uint32 numSuccessors) { + bool init(uint32_t id, uint32_t offset, uint32_t numSuccessors) { id_ = id; offset_ = offset; numSuccessors_ = numSuccessors; if (numSuccessors) { - successors_ = (uint32 *) js_calloc(numSuccessors * sizeof(uint32)); + successors_ = (uint32_t *) js_calloc(numSuccessors * sizeof(uint32_t)); if (!successors_) return false; } return true; } void destroy() { if (successors_) js_free(successors_); if (code_) js_free(code_); } - uint32 id() const { + uint32_t id() const { return id_; } - uint32 offset() const { + uint32_t offset() const { return offset_; } size_t numSuccessors() const { return numSuccessors_; } - void setSuccessor(size_t i, uint32 id) { + void setSuccessor(size_t i, uint32_t id) { JS_ASSERT(i < numSuccessors_); successors_[i] = id; } - uint32 successor(size_t i) const { + uint32_t successor(size_t i) const { JS_ASSERT(i < numSuccessors_); return successors_[i]; } - uint64 *addressOfHitCount() { + uint64_t *addressOfHitCount() { return &hitCount_; } - uint64 hitCount() const { + uint64_t hitCount() const { return hitCount_; } void setCode(const char *code) { char *ncode = (char *) js_malloc(strlen(code) + 1); if (ncode) { strcpy(ncode, code); code_ = ncode; } } const char *code() const { return code_; } - void setInstructionBytes(uint32 bytes) { + void setInstructionBytes(uint32_t bytes) { instructionBytes_ = bytes; } - uint32 instructionBytes() const { + uint32_t instructionBytes() const { return instructionBytes_; } - void setSpillBytes(uint32 bytes) { + void setSpillBytes(uint32_t bytes) { spillBytes_ = bytes; } - uint32 spillBytes() const { + uint32_t spillBytes() const { return spillBytes_; } }; // Execution information for a compiled script which may persist after the // IonScript is destroyed, for use during profiling. struct IonScriptCounts {
--- a/js/src/ion/IonCompartment.h +++ b/js/src/ion/IonCompartment.h @@ -56,17 +56,17 @@ class IonRuntime // Map VMFunction addresses to the IonCode of the wrapper. typedef WeakCache<const VMFunction *, IonCode *> VMWrapperMap; VMWrapperMap *functionWrappers_; private: IonCode *generateEnterJIT(JSContext *cx); IonCode *generateArgumentsRectifier(JSContext *cx); - IonCode *generateBailoutTable(JSContext *cx, uint32 frameClass); + IonCode *generateBailoutTable(JSContext *cx, uint32_t frameClass); IonCode *generateBailoutHandler(JSContext *cx); IonCode *generateInvalidator(JSContext *cx); IonCode *generatePreBarrier(JSContext *cx, MIRType type); IonCode *generateVMWrapper(JSContext *cx, const VMFunction &f); public: IonRuntime(); ~IonRuntime(); @@ -151,34 +151,34 @@ class BailoutClosure; class IonActivation { private: JSContext *cx_; JSCompartment *compartment_; IonActivation *prev_; StackFrame *entryfp_; BailoutClosure *bailout_; - uint8 *prevIonTop_; + uint8_t *prevIonTop_; JSContext *prevIonJSContext_; // When creating an activation without a StackFrame, this field is used // to communicate the calling pc for StackIter. jsbytecode *prevpc_; public: IonActivation(JSContext *cx, StackFrame *fp); ~IonActivation(); StackFrame *entryfp() const { return entryfp_; } IonActivation *prev() const { return prev_; } - uint8 *prevIonTop() const { + uint8_t *prevIonTop() const { return prevIonTop_; } jsbytecode *prevpc() const { JS_ASSERT_IF(entryfp_, entryfp_->callingIntoIon()); return prevpc_; } void setEntryFp(StackFrame *fp) { JS_ASSERT_IF(fp, !entryfp_);
--- a/js/src/ion/IonFrameIterator.h +++ b/js/src/ion/IonFrameIterator.h @@ -56,48 +56,48 @@ class IonJSFrameLayout; class IonExitFrameLayout; class IonActivation; class IonActivationIterator; class IonFrameIterator { protected: - uint8 *current_; + uint8_t *current_; FrameType type_; - uint8 *returnAddressToFp_; + uint8_t *returnAddressToFp_; size_t frameSize_; private: mutable const SafepointIndex *cachedSafepointIndex_; const IonActivation *activation_; public: - IonFrameIterator(uint8 *top) + IonFrameIterator(uint8_t *top) : current_(top), type_(IonFrame_Exit), returnAddressToFp_(NULL), frameSize_(0), cachedSafepointIndex_(NULL), activation_(NULL) { } IonFrameIterator(const IonActivationIterator &activations); IonFrameIterator(IonJSFrameLayout *fp); // Current frame information. FrameType type() const { return type_; } - uint8 *fp() const { + uint8_t *fp() const { return current_; } inline IonCommonFrameLayout *current() const; - inline uint8 *returnAddress() const; + inline uint8_t *returnAddress() const; IonJSFrameLayout *jsFrame() const { JS_ASSERT(type() == IonFrame_OptimizedJS); return (IonJSFrameLayout *) fp(); } IonExitFrameLayout *exitFrame() const { JS_ASSERT(type() == IonFrame_Exit); @@ -130,24 +130,24 @@ class IonFrameIterator JSFunction *maybeCallee() const; unsigned numActualArgs() const; JSScript *script() const; Value *nativeVp() const; Value *actualArgs() const; // Returns the return address of the frame above this one (that is, the // return address that returns back to the current frame). - uint8 *returnAddressToFp() const { + uint8_t *returnAddressToFp() const { return returnAddressToFp_; } // Previous frame information extracted from the current frame. inline size_t prevFrameLocalSize() const; inline FrameType prevType() const; - uint8 *prevFp() const; + uint8_t *prevFp() const; // Returns the stack space used by the current frame, in bytes. This does // not include the size of its fixed header. inline size_t frameSize() const; // Functions used to iterate on frames. When prevType is IonFrame_Entry, // the current frame is the last frame. inline bool done() const { @@ -169,32 +169,32 @@ class IonFrameIterator uintptr_t *spillBase() const; MachineState machineState() const; void dump() const; }; class IonActivationIterator { - uint8 *top_; + uint8_t *top_; IonActivation *activation_; private: void settle(); public: IonActivationIterator(JSContext *cx); IonActivationIterator(JSRuntime *rt); IonActivationIterator &operator++(); IonActivation *activation() const { return activation_; } - uint8 *top() const { + uint8_t *top() const { return top_; } bool more() const; // Returns the bottom and top addresses of the current activation. void ionStackRange(uintptr_t *&min, uintptr_t *&end); }; @@ -262,17 +262,17 @@ class InlineFrameIterator { const IonFrameIterator *frame_; SnapshotIterator start_; SnapshotIterator si_; unsigned framesRead_; HeapPtr<JSFunction> callee_; HeapPtr<JSScript> script_; jsbytecode *pc_; - uint32 numActualArgs_; + uint32_t numActualArgs_; private: void findNextFrame(); public: InlineFrameIterator(const IonFrameIterator *iter); InlineFrameIterator(const IonBailoutIterator *iter);
--- a/js/src/ion/IonFrames-inl.h +++ b/js/src/ion/IonFrames-inl.h @@ -47,17 +47,17 @@ SizeOfFramePrefix(FrameType type) } inline IonCommonFrameLayout * IonFrameIterator::current() const { return (IonCommonFrameLayout *)current_; } -inline uint8 * +inline uint8_t * IonFrameIterator::returnAddress() const { IonCommonFrameLayout *current = (IonCommonFrameLayout *) current_; return current->returnAddress(); } inline size_t IonFrameIterator::prevFrameLocalSize() const
--- a/js/src/ion/IonFrames.cpp +++ b/js/src/ion/IonFrames.cpp @@ -32,17 +32,17 @@ IonFrameIterator::IonFrameIterator(const returnAddressToFp_(NULL), frameSize_(0), cachedSafepointIndex_(NULL), activation_(activations.activation()) { } IonFrameIterator::IonFrameIterator(IonJSFrameLayout *fp) - : current_((uint8 *)fp), + : current_((uint8_t *)fp), type_(IonFrame_OptimizedJS), returnAddressToFp_(fp->returnAddress()), frameSize_(fp->prevFrameLocalSize()) { } bool IonFrameIterator::checkInvalidation() const @@ -50,28 +50,28 @@ IonFrameIterator::checkInvalidation() co IonScript *dummy; return checkInvalidation(&dummy); } bool IonFrameIterator::checkInvalidation(IonScript **ionScriptOut) const { AutoAssertNoGC nogc; - uint8 *returnAddr = returnAddressToFp(); + uint8_t *returnAddr = returnAddressToFp(); RawScript script = this->script(); // N.B. the current IonScript is not the same as the frame's // IonScript if the frame has since been invalidated. IonScript *currentIonScript = script->ion; bool invalidated = !script->hasIonScript() || !currentIonScript->containsReturnAddress(returnAddr); if (!invalidated) return false; - int32 invalidationDataOffset = ((int32 *) returnAddr)[-1]; - uint8 *ionScriptDataOffset = returnAddr + invalidationDataOffset; + int32_t invalidationDataOffset = ((int32_t *) returnAddr)[-1]; + uint8_t *ionScriptDataOffset = returnAddr + invalidationDataOffset; IonScript *ionScript = (IonScript *) Assembler::getPointer(ionScriptDataOffset); JS_ASSERT(ionScript->containsReturnAddress(returnAddr)); *ionScriptOut = ionScript; return true; } CalleeToken IonFrameIterator::calleeToken() const @@ -173,17 +173,17 @@ IonFrameIterator::nativeVp() const } Value * IonFrameIterator::actualArgs() const { return jsFrame()->argv() + 1; } -uint8 * +uint8_t * IonFrameIterator::prevFp() const { size_t currentSize = SizeOfFramePrefix(type_); // This quick fix must be removed as soon as bug 717297 land. This is // needed because the descriptor size of JS-to-JS frame which is just after // a Rectifier frame should not change. (cf EnsureExitFrame function) if (prevType() == IonFrame_Bailed_Rectifier || prevType() == IonFrame_Bailed_JS) { JS_ASSERT(type_ == IonFrame_Exit); @@ -205,17 +205,17 @@ IonFrameIterator::operator++() // since the entry and first frames overlap. if (current()->prevType() == IonFrame_Entry) { type_ = IonFrame_Entry; return *this; } // Note: prevFp() needs the current type, so set it after computing the // next frame. - uint8 *prev = prevFp(); + uint8_t *prev = prevFp(); type_ = current()->prevType(); if (type_ == IonFrame_Bailed_JS) type_ = IonFrame_OptimizedJS; returnAddressToFp_ = current()->returnAddress(); current_ = prev; return *this; } @@ -242,24 +242,24 @@ IonFrameIterator::machineState() const MachineState machine; for (GeneralRegisterIterator iter(reader.allSpills()); iter.more(); iter++) machine.setRegisterLocation(*iter, --spill); return machine; } static void -CloseLiveIterator(JSContext *cx, const InlineFrameIterator &frame, uint32 localSlot) +CloseLiveIterator(JSContext *cx, const InlineFrameIterator &frame, uint32_t localSlot) { AssertCanGC(); SnapshotIterator si = frame.snapshotIterator(); // Skip stack slots until we reach the iterator object. - uint32 base = CountArgSlots(frame.maybeCallee()) + frame.script()->nfixed; - uint32 skipSlots = base + localSlot - 1; + uint32_t base = CountArgSlots(frame.maybeCallee()) + frame.script()->nfixed; + uint32_t skipSlots = base + localSlot - 1; for (unsigned i = 0; i < skipSlots; i++) si.skip(); Value v = si.read(); RootedObject obj(cx, &v.toObject()); if (cx->isExceptionPending()) @@ -276,30 +276,30 @@ CloseLiveIterators(JSContext *cx, const jsbytecode *pc = frame.pc(); if (!script->hasTrynotes()) return; JSTryNote *tn = script->trynotes()->vector; JSTryNote *tnEnd = tn + script->trynotes()->length; - uint32 pcOffset = uint32(pc - script->main()); + uint32_t pcOffset = uint32_t(pc - script->main()); for (; tn != tnEnd; ++tn) { if (pcOffset < tn->start) continue; if (pcOffset >= tn->start + tn->length) continue; if (tn->kind != JSTRY_ITER) continue; JS_ASSERT(JSOp(*(script->main() + tn->start + tn->length)) == JSOP_ENDITER); JS_ASSERT(tn->stackDepth > 0); - uint32 localSlot = tn->stackDepth; + uint32_t localSlot = tn->stackDepth; CloseLiveIterator(cx, frame, localSlot); } } void ion::HandleException(ResumeFromException *rfe) { AssertCanGC(); @@ -414,21 +414,21 @@ MarkCalleeToken(JSTracer *trc, CalleeTok static inline uintptr_t ReadAllocation(const IonFrameIterator &frame, const LAllocation *a) { if (a->isGeneralReg()) { Register reg = a->toGeneralReg()->reg(); return frame.machineState().read(reg); } if (a->isStackSlot()) { - uint32 slot = a->toStackSlot()->slot(); + uint32_t slot = a->toStackSlot()->slot(); return *frame.jsFrame()->slotRef(slot); } - uint32 index = a->toArgument()->index(); - uint8 *argv = reinterpret_cast<uint8 *>(frame.jsFrame()->argv()); + uint32_t index = a->toArgument()->index(); + uint8_t *argv = reinterpret_cast<uint8_t *>(frame.jsFrame()->argv()); return *reinterpret_cast<uintptr_t *>(argv + index); } static void MarkIonJSFrame(JSTracer *trc, const IonFrameIterator &frame) { IonJSFrameLayout *layout = (IonJSFrameLayout *)frame.fp(); @@ -458,17 +458,17 @@ MarkIonJSFrame(JSTracer *trc, const IonF } const SafepointIndex *si = ionScript->getSafepointIndex(frame.returnAddressToFp()); SafepointReader safepoint(ionScript, si); // Scan through slots which contain pointers (or on punboxing systems, // actual values). - uint32 slot; + uint32_t slot; while (safepoint.getGcSlot(&slot)) { uintptr_t *ref = layout->slotRef(slot); gc::MarkGCThingRoot(trc, reinterpret_cast<void **>(ref), "ion-gc-slot"); } while (safepoint.getValueSlot(&slot)) { Value *v = (Value *)layout->slotRef(slot); gc::MarkValueRoot(trc, v, "ion-gc-slot"); @@ -575,18 +575,18 @@ MarkIonExitFrame(JSTracer *trc, const Io MarkIonCodeRoot(trc, footer->addressOfIonCode(), "ion-exit-code"); const VMFunction *f = footer->function(); if (f == NULL || f->explicitArgs == 0) return; // Mark arguments of the VM wrapper. - uint8 *argBase = frame.exitFrame()->argBase(); - for (uint32 explicitArg = 0; explicitArg < f->explicitArgs; explicitArg++) { + uint8_t *argBase = frame.exitFrame()->argBase(); + for (uint32_t explicitArg = 0; explicitArg < f->explicitArgs; explicitArg++) { switch (f->argRootType(explicitArg)) { case VMFunction::RootNone: break; case VMFunction::RootObject: { // Sometimes we can bake in HandleObjects to NULL. JSObject **pobj = reinterpret_cast<JSObject **>(argBase); if (*pobj) gc::MarkObjectRoot(trc, pobj, "ion-vm-args"); @@ -706,17 +706,17 @@ ion::GetPcScript(JSContext *cx, MutableH } void OsiIndex::fixUpOffset(MacroAssembler &masm) { callPointDisplacement_ = masm.actualOffset(callPointDisplacement_); } -uint32 +uint32_t OsiIndex::returnPointDisplacement() const { // In general, pointer arithmetic on code is bad, but in this case, // getting the return address from a call instruction, stepping over pools // would be wrong. return callPointDisplacement_ + Assembler::patchWrite_NearCallSize(); }
--- a/js/src/ion/IonFrames.h +++ b/js/src/ion/IonFrames.h @@ -93,75 +93,75 @@ ScriptFromCalleeToken(CalleeToken token) class LSafepoint; // Two-tuple that lets you look up the safepoint entry given the // displacement of a call instruction within the JIT code. class SafepointIndex { // The displacement is the distance from the first byte of the JIT'd code // to the return address (of the call that the safepoint was generated for). - uint32 displacement_; + uint32_t displacement_; union { LSafepoint *safepoint_; // Offset to the start of the encoded safepoint in the safepoint stream. - uint32 safepointOffset_; + uint32_t safepointOffset_; }; mozilla::DebugOnly<bool> resolved; public: - SafepointIndex(uint32 displacement, LSafepoint *safepoint) + SafepointIndex(uint32_t displacement, LSafepoint *safepoint) : displacement_(displacement), safepoint_(safepoint), resolved(false) { } void resolve(); LSafepoint *safepoint() { JS_ASSERT(!resolved); return safepoint_; } - uint32 displacement() const { + uint32_t displacement() const { return displacement_; } - uint32 safepointOffset() const { + uint32_t safepointOffset() const { return safepointOffset_; } - void adjustDisplacement(uint32 offset) { + void adjustDisplacement(uint32_t offset) { JS_ASSERT(offset >= displacement_); displacement_ = offset; } inline SnapshotOffset snapshotOffset() const; inline bool hasSnapshotOffset() const; }; class MacroAssembler; // The OSI point is patched to a call instruction. Therefore, the // returnPoint for an OSI call is the address immediately following that // call instruction. The displacement of that point within the assembly // buffer is the |returnPointDisplacement|. class OsiIndex { - uint32 callPointDisplacement_; - uint32 snapshotOffset_; + uint32_t callPointDisplacement_; + uint32_t snapshotOffset_; public: - OsiIndex(uint32 callPointDisplacement, uint32 snapshotOffset) + OsiIndex(uint32_t callPointDisplacement, uint32_t snapshotOffset) : callPointDisplacement_(callPointDisplacement), snapshotOffset_(snapshotOffset) { } - uint32 returnPointDisplacement() const; - uint32 callPointDisplacement() const { + uint32_t returnPointDisplacement() const; + uint32_t callPointDisplacement() const { return callPointDisplacement_; } - uint32 snapshotOffset() const { + uint32_t snapshotOffset() const { return snapshotOffset_; } void fixUpOffset(MacroAssembler &masm); }; // The layout of an Ion frame on the C stack is roughly: // argN _ // ... \ - These are jsvals @@ -195,42 +195,42 @@ static const uintptr_t FRAMETYPE_MASK = // // Jump tables are big. To control the amount of jump tables we generate, each // platform chooses how to segregate stack size classes based on its // architecture. // // On some architectures, these jump tables are not used at all, or frame // size segregation is not needed. Thus, there is an option for a frame to not // have any frame size class, and to be totally dynamic. -static const uint32 NO_FRAME_SIZE_CLASS_ID = uint32(-1); +static const uint32_t NO_FRAME_SIZE_CLASS_ID = uint32_t(-1); class FrameSizeClass { - uint32 class_; + uint32_t class_; - explicit FrameSizeClass(uint32 class_) : class_(class_) + explicit FrameSizeClass(uint32_t class_) : class_(class_) { } public: FrameSizeClass() { } static FrameSizeClass None() { return FrameSizeClass(NO_FRAME_SIZE_CLASS_ID); } - static FrameSizeClass FromClass(uint32 class_) { + static FrameSizeClass FromClass(uint32_t class_) { return FrameSizeClass(class_); } // These functions are implemented in specific CodeGenerator-* files. - static FrameSizeClass FromDepth(uint32 frameDepth); + static FrameSizeClass FromDepth(uint32_t frameDepth); static FrameSizeClass ClassLimit(); - uint32 frameSize() const; + uint32_t frameSize() const; - uint32 classId() const { + uint32_t classId() const { JS_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID); return class_; } bool operator ==(const FrameSizeClass &other) const { return class_ == other.class_; } bool operator !=(const FrameSizeClass &other) const { @@ -244,18 +244,18 @@ struct ResumeFromException void *stackPointer; }; void HandleException(ResumeFromException *rfe); void MarkIonActivations(JSRuntime *rt, JSTracer *trc); void MarkIonCompilerRoots(JSTracer *trc); -static inline uint32 -MakeFrameDescriptor(uint32 frameSize, FrameType type) +static inline uint32_t +MakeFrameDescriptor(uint32_t frameSize, FrameType type) { return (frameSize << FRAMESIZE_SHIFT) | type; } } // namespace ion } // namespace js #if defined(JS_CPU_X86) || defined (JS_CPU_X64) @@ -275,32 +275,32 @@ GetTopIonJSScript(JSContext *cx, void **returnAddrOut = NULL); void GetPcScript(JSContext *cx, MutableHandleScript scriptRes, jsbytecode **pcRes); // Given a slot index, returns the offset, in bytes, of that slot from an // IonJSFrameLayout. Slot distances are uniform across architectures, however, // the distance does depend on the size of the frame header. -static inline int32 -OffsetOfFrameSlot(int32 slot) +static inline int32_t +OffsetOfFrameSlot(int32_t slot) { if (slot <= 0) return sizeof(IonJSFrameLayout) + -slot; return -(slot * STACK_SLOT_SIZE); } static inline uintptr_t -ReadFrameSlot(IonJSFrameLayout *fp, int32 slot) +ReadFrameSlot(IonJSFrameLayout *fp, int32_t slot) { return *(uintptr_t *)((char *)fp + OffsetOfFrameSlot(slot)); } static inline double -ReadFrameDoubleSlot(IonJSFrameLayout *fp, int32 slot) +ReadFrameDoubleSlot(IonJSFrameLayout *fp, int32_t slot) { return *(double *)((char *)fp + OffsetOfFrameSlot(slot)); } } /* namespace ion */ } /* namespace js */ #endif // jsion_frames_h__
--- a/js/src/ion/IonLinker.h +++ b/js/src/ion/IonLinker.h @@ -36,26 +36,26 @@ class Linker if (masm.oom()) return fail(cx); JSC::ExecutablePool *pool; size_t bytesNeeded = masm.bytesNeeded() + sizeof(IonCode *) + CodeAlignment; if (bytesNeeded >= MAX_BUFFER_SIZE) return fail(cx); - uint8 *result = (uint8 *)comp->execAlloc()->alloc(bytesNeeded, &pool, JSC::ION_CODE); + uint8_t *result = (uint8_t *)comp->execAlloc()->alloc(bytesNeeded, &pool, JSC::ION_CODE); if (!result) return fail(cx); // The IonCode pointer will be stored right before the code buffer. - uint8 *codeStart = result + sizeof(IonCode *); + uint8_t *codeStart = result + sizeof(IonCode *); // Bump the code up to a nice alignment. - codeStart = (uint8 *)AlignBytes((uintptr_t)codeStart, CodeAlignment); - uint32 headerSize = codeStart - result; + codeStart = (uint8_t *)AlignBytes((uintptr_t)codeStart, CodeAlignment); + uint32_t headerSize = codeStart - result; IonCode *code = IonCode::New(cx, codeStart, bytesNeeded - headerSize, pool); if (!code) return NULL; code->copyFrom(masm); masm.link(code); return code; }
--- a/js/src/ion/IonMacroAssembler.h +++ b/js/src/ion/IonMacroAssembler.h @@ -546,38 +546,38 @@ class MacroAssembler : public MacroAssem leaveSPSFrame(); MacroAssemblerSpecific::handleException(); // Doesn't actually emit code, but balances the leave() if (sps_) sps_->reenter(*this, InvalidReg); } // see above comment for what is returned - uint32 callIon(const Register &callee) { + uint32_t callIon(const Register &callee) { leaveSPSFrame(); MacroAssemblerSpecific::callIon(callee); - uint32 ret = currentOffset(); + uint32_t ret = currentOffset(); reenterSPSFrame(); return ret; } // see above comment for what is returned - uint32 callWithExitFrame(IonCode *target) { + uint32_t callWithExitFrame(IonCode *target) { leaveSPSFrame(); MacroAssemblerSpecific::callWithExitFrame(target); - uint32 ret = currentOffset(); + uint32_t ret = currentOffset(); reenterSPSFrame(); return ret; } // see above comment for what is returned - uint32 callWithExitFrame(IonCode *target, Register dynStack) { + uint32_t callWithExitFrame(IonCode *target, Register dynStack) { leaveSPSFrame(); MacroAssemblerSpecific::callWithExitFrame(target, dynStack); - uint32 ret = currentOffset(); + uint32_t ret = currentOffset(); reenterSPSFrame(); return ret; } private: // These two functions are helpers used around call sites throughout the // assembler. They are called from the above call wrappers to emit the // necessary instrumentation.
--- a/js/src/ion/IonSpewer.cpp +++ b/js/src/ion/IonSpewer.cpp @@ -20,17 +20,17 @@ using namespace js; using namespace js::ion; // IonSpewer singleton. static IonSpewer ionspewer; static bool LoggingChecked = false; -static uint32 LoggingBits = 0; +static uint32_t LoggingBits = 0; static const char *ChannelNames[] = { #define IONSPEW_CHANNEL(name) #name, IONSPEW_CHANNEL_LIST(IONSPEW_CHANNEL) #undef IONSPEW_CHANNEL }; @@ -230,17 +230,17 @@ ion::CheckLogging() EnableChannel(IonSpew_Safepoints); if (ContainsFlag(env, "pools")) EnableChannel(IonSpew_Pools); if (ContainsFlag(env, "cacheflush")) EnableChannel(IonSpew_CacheFlush); if (ContainsFlag(env, "logs")) EnableIonDebugLogging(); if (ContainsFlag(env, "all")) - LoggingBits = uint32(-1); + LoggingBits = uint32_t(-1); if (LoggingBits != 0) EnableIonDebugLogging(); IonSpewFile = stderr; } void @@ -313,27 +313,27 @@ ion::IonSpewHeader(IonSpewChannel channe fprintf(stderr, "[%s] ", ChannelNames[channel]); } bool ion::IonSpewEnabled(IonSpewChannel channel) { JS_ASSERT(LoggingChecked); - return LoggingBits & (1 << uint32(channel)); + return LoggingBits & (1 << uint32_t(channel)); } void ion::EnableChannel(IonSpewChannel channel) { JS_ASSERT(LoggingChecked); - LoggingBits |= (1 << uint32(channel)); + LoggingBits |= (1 << uint32_t(channel)); } void ion::DisableChannel(IonSpewChannel channel) { JS_ASSERT(LoggingChecked); - LoggingBits &= ~(1 << uint32(channel)); + LoggingBits &= ~(1 << uint32_t(channel)); } #endif /* DEBUG */
--- a/js/src/ion/IonTypes.h +++ b/js/src/ion/IonTypes.h @@ -8,29 +8,20 @@ #ifndef jsion_types_h_ #define jsion_types_h_ #include <jstypes.h> namespace js { namespace ion { -typedef uint64_t uint64; -typedef int64_t int64; -typedef uint32_t uint32; -typedef int32_t int32; -typedef uint16_t uint16; -typedef int16_t int16; -typedef uint8_t uint8; -typedef int8_t int8; +typedef uint32_t SnapshotOffset; +typedef uint32_t BailoutId; -typedef uint32 SnapshotOffset; -typedef uint32 BailoutId; - -static const SnapshotOffset INVALID_SNAPSHOT_OFFSET = uint32(-1); +static const SnapshotOffset INVALID_SNAPSHOT_OFFSET = uint32_t(-1); // Different kinds of bailouts. When extending this enum, make sure to check // the bits reserved for bailout kinds in Bailouts.h enum BailoutKind { // A normal bailout triggered from type, shape, and assorted overflow // guards in the compiler. Bailout_Normal,
--- a/js/src/ion/LIR-Common.h +++ b/js/src/ion/LIR-Common.h @@ -118,26 +118,26 @@ class LMoveGroup : public LInstructionHe const LMove &getMove(size_t i) const { return moves_[i]; } }; // Constant 32-bit integer. class LInteger : public LInstructionHelper<1, 0, 0> { - int32 i32_; + int32_t i32_; public: LIR_HEADER(Integer); - LInteger(int32 i32) + LInteger(int32_t i32) : i32_(i32) { } - int32 getValue() const { + int32_t getValue() const { return i32_; } }; // Constant pointer. class LPointer : public LInstructionHelper<1, 0, 0> { public: @@ -460,69 +460,69 @@ class LReturnFromCtor : public LInstruct static const size_t ValueIndex = 0; static const size_t ObjectIndex = BOX_PIECES; }; // Writes a typed argument for a function call to the frame's argument vector. class LStackArgT : public LInstructionHelper<0, 1, 0> { - uint32 argslot_; // Index into frame-scope argument vector. + uint32_t argslot_; // Index into frame-scope argument vector. public: LIR_HEADER(StackArgT); - LStackArgT(uint32 argslot, const LAllocation &arg) + LStackArgT(uint32_t argslot, const LAllocation &arg) : argslot_(argslot) { setOperand(0, arg); } MPassArg *mir() const { return this->mir_->toPassArg(); } - uint32 argslot() const { + uint32_t argslot() const { return argslot_; } const LAllocation *getArgument() { return getOperand(0); } }; // Writes an untyped argument for a function call to the frame's argument vector. class LStackArgV : public LInstructionHelper<0, BOX_PIECES, 0> { - uint32 argslot_; // Index into frame-scope argument vector. + uint32_t argslot_; // Index into frame-scope argument vector. public: LIR_HEADER(StackArgV); - LStackArgV(uint32 argslot) + LStackArgV(uint32_t argslot) : argslot_(argslot) { } - uint32 argslot() const { + uint32_t argslot() const { return argslot_; } }; // Common code for LIR descended from MCall. template <size_t Defs, size_t Operands, size_t Temps> class LJSCallInstructionHelper : public LCallInstructionHelper<Defs, Operands, Temps> { // Slot below which %esp should be adjusted to make the call. // Zero for a function without arguments. - uint32 argslot_; + uint32_t argslot_; public: - LJSCallInstructionHelper(uint32 argslot) + LJSCallInstructionHelper(uint32_t argslot) : argslot_(argslot) { } - uint32 argslot() const { + uint32_t argslot() const { return argslot_; } MCall *mir() const { return this->mir_->toCall(); } bool hasSingleTarget() const { return getSingleTarget() != NULL; @@ -530,36 +530,36 @@ class LJSCallInstructionHelper : public JSFunction *getSingleTarget() const { return mir()->getSingleTarget(); } // The number of stack arguments is the max between the number of formal // arguments and the number of actual arguments. The number of stack // argument includes the |undefined| padding added in case of underflow. // Does not include |this|. - uint32 numStackArgs() const { + uint32_t numStackArgs() const { JS_ASSERT(mir()->numStackArgs() >= 1); return mir()->numStackArgs() - 1; // |this| is not a formal argument. } // Does not include |this|. - uint32 numActualArgs() const { + uint32_t numActualArgs() const { return mir()->numActualArgs(); } typedef LJSCallInstructionHelper<Defs, Operands, Temps> JSCallHelper; }; // Generates a polymorphic callsite, wherein the function being called is // unknown and anticipated to vary. class LCallGeneric : public LJSCallInstructionHelper<BOX_PIECES, 1, 2> { public: LIR_HEADER(CallGeneric); - LCallGeneric(const LAllocation &func, uint32 argslot, + LCallGeneric(const LAllocation &func, uint32_t argslot, const LDefinition &nargsreg, const LDefinition &tmpobjreg) : JSCallHelper(argslot) { setOperand(0, func); setTemp(0, nargsreg); setTemp(1, tmpobjreg); } @@ -575,17 +575,17 @@ class LCallGeneric : public LJSCallInstr }; // Generates a hardcoded callsite for a known, non-native target. class LCallKnown : public LJSCallInstructionHelper<BOX_PIECES, 1, 1> { public: LIR_HEADER(CallKnown); - LCallKnown(const LAllocation &func, uint32 argslot, const LDefinition &tmpobjreg) + LCallKnown(const LAllocation &func, uint32_t argslot, const LDefinition &tmpobjreg) : JSCallHelper(argslot) { setOperand(0, func); setTemp(0, tmpobjreg); } const LAllocation *getFunction() { return getOperand(0); @@ -596,17 +596,17 @@ class LCallKnown : public LJSCallInstruc }; // Generates a hardcoded callsite for a known, native target. class LCallNative : public LJSCallInstructionHelper<BOX_PIECES, 0, 4> { public: LIR_HEADER(CallNative); - LCallNative(uint32 argslot, + LCallNative(uint32_t argslot, const LDefinition &argJSContext, const LDefinition &argUintN, const LDefinition &argVp, const LDefinition &tmpreg) : JSCallHelper(argslot) { // Registers used for callWithABI(). setTemp(0, argJSContext); setTemp(1, argUintN); setTemp(2, argVp); @@ -630,17 +630,17 @@ class LCallNative : public LJSCallInstru }; // Generates a hardcoded callsite for a known, DOM-native target. class LCallDOMNative : public LJSCallInstructionHelper<BOX_PIECES, 0, 5> { public: LIR_HEADER(CallDOMNative); - LCallDOMNative(uint32 argslot, + LCallDOMNative(uint32_t argslot, const LDefinition &argJSContext, const LDefinition &argObj, const LDefinition &argPrivate, const LDefinition &argArgc, const LDefinition &argVp) : JSCallHelper(argslot) { setTemp(0, argJSContext); setTemp(1, argObj); setTemp(2, argPrivate); @@ -667,17 +667,17 @@ class LCallDOMNative : public LJSCallIns // Generates a polymorphic callsite for |new|, where |this| has not been // pre-allocated by the caller. class LCallConstructor : public LJSCallInstructionHelper<BOX_PIECES, 1, 0> { public: LIR_HEADER(CallConstructor); - LCallConstructor(const LAllocation &func, uint32 argslot) + LCallConstructor(const LAllocation &func, uint32_t argslot) : JSCallHelper(argslot) { setOperand(0, func); } const LAllocation *getFunction() { return getOperand(0); } @@ -1719,29 +1719,29 @@ class LStart : public LInstructionHelper }; // Passed the StackFrame address in the OsrFrameReg by SideCannon(). // Forwards this object to the LOsrValues for Value materialization. class LOsrEntry : public LInstructionHelper<1, 0, 0> { protected: Label label_; - uint32 frameDepth_; + uint32_t frameDepth_; public: LIR_HEADER(OsrEntry); LOsrEntry() : frameDepth_(0) { } - void setFrameDepth(uint32 depth) { + void setFrameDepth(uint32_t depth) { frameDepth_ = depth; } - uint32 getFrameDepth() { + uint32_t getFrameDepth() { return frameDepth_; } Label *label() { return &label_; } }; @@ -3097,17 +3097,17 @@ class LGuardClass : public LInstructionH class MPhi; // Phi is a pseudo-instruction that emits no code, and is an annotation for the // register allocator. Like its equivalent in MIR, phis are collected at the // top of blocks and are meant to be executed in parallel, choosing the input // corresponding to the predecessor taken in the control flow graph. class LPhi : public LInstruction { - uint32 numInputs_; + uint32_t numInputs_; LAllocation *inputs_; LDefinition def_; bool init(MIRGenerator *gen); LPhi(MPhi *mir); public:
--- a/js/src/ion/LIR.cpp +++ b/js/src/ion/LIR.cpp @@ -21,17 +21,17 @@ LIRGraph::LIRGraph(MIRGraph *mir) argumentSlotCount_(0), entrySnapshot_(NULL), osrBlock_(NULL), mir_(*mir) { } bool -LIRGraph::addConstantToPool(const Value &v, uint32 *index) +LIRGraph::addConstantToPool(const Value &v, uint32_t *index) { *index = constantPool_.length(); return constantPool_.append(v); } bool LIRGraph::noteNeedsSafepoint(LInstruction *ins) { @@ -43,30 +43,30 @@ LIRGraph::noteNeedsSafepoint(LInstructio } Label * LBlock::label() { return begin()->toLabel()->label(); } -uint32 +uint32_t LBlock::firstId() { if (phis_.length()) { return phis_[0]->id(); } else { for (LInstructionIterator i(instructions_.begin()); i != instructions_.end(); i++) { if (i->id()) return i->id(); } } return 0; } -uint32 +uint32_t LBlock::lastId() { LInstruction *last = *instructions_.rbegin(); JS_ASSERT(last->id()); if (last->numDefs()) return last->getDef(last->numDefs() - 1)->virtualRegister(); return last->id(); }
--- a/js/src/ion/LIR.h +++ b/js/src/ion/LIR.h @@ -34,27 +34,27 @@ class LFloatReg; class LStackSlot; class LArgument; class LConstantIndex; class MBasicBlock; class MTableSwitch; class MIRGenerator; class MSnapshot; -static const uint32 MAX_VIRTUAL_REGISTERS = (1 << 21) - 1; -static const uint32 VREG_INCREMENT = 1; +static const uint32_t MAX_VIRTUAL_REGISTERS = (1 << 21) - 1; +static const uint32_t VREG_INCREMENT = 1; -static const uint32 THIS_FRAME_SLOT = 0; +static const uint32_t THIS_FRAME_SLOT = 0; #if defined(JS_NUNBOX32) # define BOX_PIECES 2 -static const uint32 VREG_TYPE_OFFSET = 0; -static const uint32 VREG_DATA_OFFSET = 1; -static const uint32 TYPE_INDEX = 0; -static const uint32 PAYLOAD_INDEX = 1; +static const uint32_t VREG_TYPE_OFFSET = 0; +static const uint32_t VREG_DATA_OFFSET = 1; +static const uint32_t TYPE_INDEX = 0; +static const uint32_t PAYLOAD_INDEX = 1; #elif defined(JS_PUNBOX64) # define BOX_PIECES 1 #else # error "Unknown!" #endif // Represents storage for an operand. For constants, the pointer is tagged // with a single bit, and the untagged pointer is a pointer to a Value. @@ -64,17 +64,17 @@ class LAllocation : public TempObject protected: static const uintptr_t TAG_BIT = 1; static const uintptr_t TAG_SHIFT = 0; static const uintptr_t TAG_MASK = 1 << TAG_SHIFT; static const uintptr_t KIND_BITS = 3; static const uintptr_t KIND_SHIFT = TAG_SHIFT + TAG_BIT; static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1; - static const uintptr_t DATA_BITS = (sizeof(uint32) * 8) - KIND_BITS - TAG_BIT; + static const uintptr_t DATA_BITS = (sizeof(uint32_t) * 8) - KIND_BITS - TAG_BIT; static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS; static const uintptr_t DATA_MASK = (1 << DATA_BITS) - 1; public: enum Kind { USE, // Use of a virtual register, with physical allocation policy. CONSTANT_VALUE, // Constant js::Value. CONSTANT_INDEX, // Constant arbitrary index. @@ -85,30 +85,30 @@ class LAllocation : public TempObject ARGUMENT // Argument slot. }; protected: bool isTagged() const { return !!(bits_ & TAG_MASK); } - int32 data() const { - return int32(bits_) >> DATA_SHIFT; + int32_t data() const { + return int32_t(bits_) >> DATA_SHIFT; } - void setData(int32 data) { - JS_ASSERT(int32(data) <= int32(DATA_MASK)); + void setData(int32_t data) { + JS_ASSERT(int32_t(data) <= int32_t(DATA_MASK)); bits_ &= ~(DATA_MASK << DATA_SHIFT); bits_ |= (data << DATA_SHIFT); } - void setKindAndData(Kind kind, uint32 data) { - JS_ASSERT(int32(data) <= int32(DATA_MASK)); - bits_ = (uint32(kind) << KIND_SHIFT) | data << DATA_SHIFT; + void setKindAndData(Kind kind, uint32_t data) { + JS_ASSERT(int32_t(data) <= int32_t(DATA_MASK)); + bits_ = (uint32_t(kind) << KIND_SHIFT) | data << DATA_SHIFT; } - LAllocation(Kind kind, uint32 data) { + LAllocation(Kind kind, uint32_t data) { setKindAndData(kind, data); } explicit LAllocation(Kind kind) { setKindAndData(kind, 0); } public: LAllocation() : bits_(0) @@ -198,32 +198,32 @@ class LAllocation : public TempObject return bits_; } static void PrintAllocation(FILE *fp, const LAllocation *a); }; class LUse : public LAllocation { - static const uint32 POLICY_BITS = 3; - static const uint32 POLICY_SHIFT = 0; - static const uint32 POLICY_MASK = (1 << POLICY_BITS) - 1; - static const uint32 REG_BITS = 5; - static const uint32 REG_SHIFT = POLICY_SHIFT + POLICY_BITS; - static const uint32 REG_MASK = (1 << REG_BITS) - 1; + static const uint32_t POLICY_BITS = 3; + static const uint32_t POLICY_SHIFT = 0; + static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1; + static const uint32_t REG_BITS = 5; + static const uint32_t REG_SHIFT = POLICY_SHIFT + POLICY_BITS; + static const uint32_t REG_MASK = (1 << REG_BITS) - 1; // Whether the physical register for this operand may be reused for a def. - static const uint32 USED_AT_START_BITS = 1; - static const uint32 USED_AT_START_SHIFT = REG_SHIFT + REG_BITS; - static const uint32 USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1; + static const uint32_t USED_AT_START_BITS = 1; + static const uint32_t USED_AT_START_SHIFT = REG_SHIFT + REG_BITS; + static const uint32_t USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1; // Virtual registers get the remaining 20 bits. - static const uint32 VREG_BITS = DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS); - static const uint32 VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS; - static const uint32 VREG_MASK = (1 << VREG_BITS) - 1; + static const uint32_t VREG_BITS = DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS); + static const uint32_t VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS; + static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1; public: enum Policy { // Input should be in a read-only register or stack slot. ANY, // Input must be in a read-only register. REGISTER, @@ -239,62 +239,62 @@ class LUse : public LAllocation // For snapshot inputs, indicates that the associated instruction will // write this input to its output register before bailing out. // The register allocator may thus allocate that output register, and // does not need to keep the virtual register alive (alternatively, // this may be treated as KEEPALIVE). RECOVERED_INPUT }; - void set(Policy policy, uint32 reg, bool usedAtStart) { + void set(Policy policy, uint32_t reg, bool usedAtStart) { setKindAndData(USE, (policy << POLICY_SHIFT) | (reg << REG_SHIFT) | ((usedAtStart ? 1 : 0) << USED_AT_START_SHIFT)); } public: - LUse(uint32 vreg, Policy policy, bool usedAtStart = false) { + LUse(uint32_t vreg, Policy policy, bool usedAtStart = false) { set(policy, 0, usedAtStart); setVirtualRegister(vreg); } LUse(Policy policy, bool usedAtStart = false) { set(policy, 0, usedAtStart); } LUse(Register reg, bool usedAtStart = false) { set(FIXED, reg.code(), usedAtStart); } LUse(FloatRegister reg, bool usedAtStart = false) { set(FIXED, reg.code(), usedAtStart); } - LUse(Register reg, uint32 virtualRegister) { + LUse(Register reg, uint32_t virtualRegister) { set(FIXED, reg.code(), false); setVirtualRegister(virtualRegister); } - LUse(FloatRegister reg, uint32 virtualRegister) { + LUse(FloatRegister reg, uint32_t virtualRegister) { set(FIXED, reg.code(), false); setVirtualRegister(virtualRegister); } - void setVirtualRegister(uint32 index) { + void setVirtualRegister(uint32_t index) { JS_STATIC_ASSERT(VREG_MASK <= MAX_VIRTUAL_REGISTERS); JS_ASSERT(index < VREG_MASK); - uint32 old = data() & ~(VREG_MASK << VREG_SHIFT); + uint32_t old = data() & ~(VREG_MASK << VREG_SHIFT); setData(old | (index << VREG_SHIFT)); } Policy policy() const { Policy policy = (Policy)((data() >> POLICY_SHIFT) & POLICY_MASK); return policy; } - uint32 virtualRegister() const { - uint32 index = (data() >> VREG_SHIFT) & VREG_MASK; + uint32_t virtualRegister() const { + uint32_t index = (data() >> VREG_SHIFT) & VREG_MASK; return index; } - uint32 registerCode() const { + uint32_t registerCode() const { JS_ASSERT(policy() == FIXED); return (data() >> REG_SHIFT) & REG_MASK; } bool isFixedRegister() const { return policy() == FIXED; } bool usedAtStart() const { return !!((data() >> USED_AT_START_SHIFT) & USED_AT_START_MASK); @@ -323,92 +323,92 @@ class LFloatReg : public LAllocation FloatRegister reg() const { return FloatRegister::FromCode(data()); } }; // Arbitrary constant index. class LConstantIndex : public LAllocation { - explicit LConstantIndex(uint32 index) + explicit LConstantIndex(uint32_t index) : LAllocation(CONSTANT_INDEX, index) { } public: // Used as a placeholder for inputs that can be ignored. static LConstantIndex Bogus() { return LConstantIndex(0); } - static LConstantIndex FromIndex(uint32 index) { + static LConstantIndex FromIndex(uint32_t index) { return LConstantIndex(index); } - uint32 index() const { + uint32_t index() const { return data(); } }; // Stack slots are indexes into the stack, given that each slot is size // STACK_SLOT_SIZE. class LStackSlot : public LAllocation { public: - explicit LStackSlot(uint32 slot, bool isDouble = false) + explicit LStackSlot(uint32_t slot, bool isDouble = false) : LAllocation(isDouble ? DOUBLE_SLOT : STACK_SLOT, slot) { } bool isDouble() const { return kind() == DOUBLE_SLOT; } - uint32 slot() const { + uint32_t slot() const { return data(); } }; // Arguments are reverse indexes into the stack, and like LStackSlot, each // index is measured in increments of STACK_SLOT_SIZE. class LArgument : public LAllocation { public: - explicit LArgument(int32 index) + explicit LArgument(int32_t index) : LAllocation(ARGUMENT, index) { } - int32 index() const { + int32_t index() const { return data(); } }; // Represents storage for a definition. class LDefinition { // Bits containing policy, type, and virtual register. - uint32 bits_; + uint32_t bits_; // Before register allocation, this optionally contains a fixed policy. // Register allocation assigns this field to a physical policy if none is // preset. // // Right now, pre-allocated outputs are limited to the following: // * Physical argument stack slots. // * Physical registers. LAllocation output_; - static const uint32 TYPE_BITS = 3; - static const uint32 TYPE_SHIFT = 0; - static const uint32 TYPE_MASK = (1 << TYPE_BITS) - 1; - static const uint32 POLICY_BITS = 2; - static const uint32 POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS; - static const uint32 POLICY_MASK = (1 << POLICY_BITS) - 1; + static const uint32_t TYPE_BITS = 3; + static const uint32_t TYPE_SHIFT = 0; + static const uint32_t TYPE_MASK = (1 << TYPE_BITS) - 1; + static const uint32_t POLICY_BITS = 2; + static const uint32_t POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS; + static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1; - static const uint32 VREG_BITS = (sizeof(uint32) * 8) - (POLICY_BITS + TYPE_BITS); - static const uint32 VREG_SHIFT = POLICY_SHIFT + POLICY_BITS; - static const uint32 VREG_MASK = (1 << VREG_BITS) - 1; + static const uint32_t VREG_BITS = (sizeof(uint32_t) * 8) - (POLICY_BITS + TYPE_BITS); + static const uint32_t VREG_SHIFT = POLICY_SHIFT + POLICY_BITS; + static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1; public: // Note that definitions, by default, are always allocated a register, // unless the policy specifies that an input can be re-used and that input // is a stack slot. enum Policy { // A random register of an appropriate class will be assigned. DEFAULT, @@ -441,37 +441,37 @@ class LDefinition // register, as both will be tracked as a single gcthing. TYPE, PAYLOAD #else BOX // Joined box, for punbox systems. (GPR, gcthing) #endif }; - void set(uint32 index, Type type, Policy policy) { + void set(uint32_t index, Type type, Policy policy) { JS_STATIC_ASSERT(MAX_VIRTUAL_REGISTERS <= VREG_MASK); bits_ = (index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT); } public: - LDefinition(uint32 index, Type type, Policy policy = DEFAULT) { + LDefinition(uint32_t index, Type type, Policy policy = DEFAULT) { set(index, type, policy); } LDefinition(Type type, Policy policy = DEFAULT) { set(0, type, policy); } LDefinition(Type type, const LAllocation &a) : output_(a) { set(0, type, PRESET); } - LDefinition(uint32 index, Type type, const LAllocation &a) + LDefinition(uint32_t index, Type type, const LAllocation &a) : output_(a) { set(index, type, PRESET); } LDefinition() : bits_(0) { } @@ -480,47 +480,47 @@ class LDefinition } Policy policy() const { return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK); } Type type() const { return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); } - uint32 virtualRegister() const { + uint32_t virtualRegister() const { return (bits_ >> VREG_SHIFT) & VREG_MASK; } LAllocation *output() { return &output_; } const LAllocation *output() const { return &output_; } bool isPreset() const { return policy() == PRESET; } bool isBogusTemp() const { return isPreset() && output()->isConstantIndex(); } - void setVirtualRegister(uint32 index) { + void setVirtualRegister(uint32_t index) { JS_ASSERT(index < VREG_MASK); bits_ &= ~(VREG_MASK << VREG_SHIFT); bits_ |= index << VREG_SHIFT; } void setOutput(const LAllocation &a) { output_ = a; if (!a.isUse()) { bits_ &= ~(POLICY_MASK << POLICY_SHIFT); bits_ |= PRESET << POLICY_SHIFT; } } - void setReusedInput(uint32 operand) { + void setReusedInput(uint32_t operand) { output_ = LConstantIndex::FromIndex(operand); } - uint32 getReusedInput() const { + uint32_t getReusedInput() const { JS_ASSERT(policy() == LDefinition::MUST_REUSE_INPUT); return output_.toConstantIndex()->index(); } static inline Type TypeFrom(MIRType type) { switch (type) { case MIRType_Boolean: case MIRType_Int32: @@ -556,17 +556,17 @@ class LDefinition class LSnapshot; class LSafepoint; class LInstructionVisitor; class LInstruction : public TempObject, public InlineListNode<LInstruction> { - uint32 id_; + uint32_t id_; // This snapshot could be set after a ResumePoint. It is used to restart // from the resume point pc. LSnapshot *snapshot_; // Structure capturing the set of stack slots and registers which are known // to hold either gcthings or Values. LSafepoint *safepoint_; @@ -619,20 +619,20 @@ class LInstruction // register is an LUse with a TEMPORARY policy, or a fixed register. virtual size_t numTemps() const = 0; virtual LDefinition *getTemp(size_t index) = 0; virtual void setTemp(size_t index, const LDefinition &a) = 0; virtual bool isCall() const { return false; }; - uint32 id() const { + uint32_t id() const { return id_; } - void setId(uint32 id) { + void setId(uint32_t id) { JS_ASSERT(!id_); JS_ASSERT(id); id_ = id; } LSnapshot *snapshot() const { return snapshot_; } LSafepoint *safepoint() const { @@ -768,18 +768,18 @@ class LBlock : public TempObject } void insertAfter(LInstruction *at, LInstruction *ins) { instructions_.insertAfter(at, ins); } void insertBefore(LInstruction *at, LInstruction *ins) { JS_ASSERT(!at->isLabel()); instructions_.insertBefore(at, ins); } - uint32 firstId(); - uint32 lastId(); + uint32_t firstId(); + uint32_t lastId(); Label *label(); LMoveGroup *getEntryMoveGroup(); LMoveGroup *getExitMoveGroup(); }; template <size_t Defs, size_t Operands, size_t Temps> class LInstructionHelper : public LInstruction { @@ -844,17 +844,17 @@ class LCallInstructionHelper : public LI // An LSnapshot is the reflection of an MResumePoint in LIR. Unlike MResumePoints, // they cannot be shared, as they are filled in by the register allocator in // order to capture the precise low-level stack state in between an // instruction's input and output. During code generation, LSnapshots are // compressed and saved in the compiled script. class LSnapshot : public TempObject { private: - uint32 numSlots_; + uint32_t numSlots_; LAllocation *slots_; MResumePoint *mir_; SnapshotOffset snapshotOffset_; BailoutId bailoutId_; BailoutKind bailoutKind_; LSnapshot(MResumePoint *mir, BailoutKind kind); bool init(MIRGenerator *gen); @@ -924,46 +924,46 @@ struct SafepointNunboxEntry { { } }; class LSafepoint : public TempObject { typedef SafepointNunboxEntry NunboxEntry; public: - typedef Vector<uint32, 0, IonAllocPolicy> SlotList; + typedef Vector<uint32_t, 0, IonAllocPolicy> SlotList; typedef Vector<NunboxEntry, 0, IonAllocPolicy> NunboxList; private: // The set of registers which are live after the safepoint. This is empty // for instructions marked as calls. RegisterSet liveRegs_; // The set of registers which contain gcthings. GeneralRegisterSet gcRegs_; // Offset to a position in the safepoint stream, or // INVALID_SAFEPOINT_OFFSET. - uint32 safepointOffset_; + uint32_t safepointOffset_; // Assembler buffer displacement to OSI point's call location. - uint32 osiCallPointOffset_; + uint32_t osiCallPointOffset_; // List of stack slots which have gc pointers. SlotList gcSlots_; // List of stack slots which have Values. SlotList valueSlots_; #ifdef JS_NUNBOX32 // List of registers which contain pieces of values. NunboxList nunboxParts_; // Number of nunboxParts which are not completely filled in. - uint32 partialNunboxes_; + uint32_t partialNunboxes_; #elif JS_PUNBOX64 // List of registers which contain values. GeneralRegisterSet valueRegs_; #endif public: LSafepoint() : safepointOffset_(INVALID_SAFEPOINT_OFFSET) @@ -979,17 +979,17 @@ class LSafepoint : public TempObject return liveRegs_; } void addGcRegister(Register reg) { gcRegs_.addUnchecked(reg); } GeneralRegisterSet gcRegs() const { return gcRegs_; } - bool addGcSlot(uint32 slot) { + bool addGcSlot(uint32_t slot) { return gcSlots_.append(slot); } SlotList &gcSlots() { return gcSlots_; } void addGcPointer(LAllocation alloc) { if (alloc.isRegister()) @@ -1007,80 +1007,80 @@ class LSafepoint : public TempObject return true; } return false; } JS_ASSERT(alloc.isArgument()); return true; } - bool addValueSlot(uint32 slot) { + bool addValueSlot(uint32_t slot) { return valueSlots_.append(slot); } SlotList &valueSlots() { return valueSlots_; } - bool hasValueSlot(uint32 slot) { + bool hasValueSlot(uint32_t slot) { for (size_t i = 0; i < valueSlots_.length(); i++) { if (valueSlots_[i] == slot) return true; } return false; } #ifdef JS_NUNBOX32 bool addNunboxParts(LAllocation type, LAllocation payload) { return nunboxParts_.append(NunboxEntry(type, payload)); } - bool addNunboxType(uint32 typeVreg, LAllocation type) { + bool addNunboxType(uint32_t typeVreg, LAllocation type) { for (size_t i = 0; i < nunboxParts_.length(); i++) { if (nunboxParts_[i].type == type) return true; if (nunboxParts_[i].type == LUse(LUse::ANY, typeVreg)) { nunboxParts_[i].type = type; partialNunboxes_--; return true; } } partialNunboxes_++; // vregs for nunbox pairs are adjacent, with the type coming first. - uint32 payloadVreg = typeVreg + 1; + uint32_t payloadVreg = typeVreg + 1; return nunboxParts_.append(NunboxEntry(type, LUse(payloadVreg, LUse::ANY))); } bool hasNunboxType(LAllocation type) { if (type.isArgument()) return true; if (type.isStackSlot() && hasValueSlot(type.toStackSlot()->slot() + 1)) return true; for (size_t i = 0; i < nunboxParts_.length(); i++) { if (nunboxParts_[i].type == type) return true; } return false; } - bool addNunboxPayload(uint32 payloadVreg, LAllocation payload) { + bool addNunboxPayload(uint32_t payloadVreg, LAllocation payload) { for (size_t i = 0; i < nunboxParts_.length(); i++) { if (nunboxParts_[i].payload == payload) return true; if (nunboxParts_[i].payload == LUse(LUse::ANY, payloadVreg)) { partialNunboxes_--; nunboxParts_[i].payload = payload; return true; } } partialNunboxes_++; // vregs for nunbox pairs are adjacent, with the type coming first. - uint32 typeVreg = payloadVreg - 1; + uint32_t typeVreg = payloadVreg - 1; return nunboxParts_.append(NunboxEntry(LUse(typeVreg, LUse::ANY), payload)); } bool hasNunboxPayload(LAllocation payload) { if (payload.isArgument()) return true; if (payload.isStackSlot() && hasValueSlot(payload.toStackSlot()->slot())) return true; @@ -1090,17 +1090,17 @@ class LSafepoint : public TempObject } return false; } NunboxList &nunboxParts() { return nunboxParts_; } - uint32 partialNunboxes() { + uint32_t partialNunboxes() { return partialNunboxes_; } #elif JS_PUNBOX64 void addValueRegister(Register reg) { valueRegs_.add(reg); } @@ -1111,17 +1111,17 @@ class LSafepoint : public TempObject bool addBoxedValue(LAllocation alloc) { if (alloc.isRegister()) { Register reg = alloc.toRegister().gpr(); if (!valueRegs().has(reg)) addValueRegister(reg); return true; } if (alloc.isStackSlot()) { - uint32 slot = alloc.toStackSlot()->slot(); + uint32_t slot = alloc.toStackSlot()->slot(); for (size_t i = 0; i < valueSlots().length(); i++) { if (valueSlots()[i] == slot) return true; } return addValueSlot(slot); } JS_ASSERT(alloc.isArgument()); return true; @@ -1136,33 +1136,33 @@ class LSafepoint : public TempObject return true; } #endif // JS_PUNBOX64 bool encoded() const { return safepointOffset_ != INVALID_SAFEPOINT_OFFSET; } - uint32 offset() const { + uint32_t offset() const { JS_ASSERT(encoded()); return safepointOffset_; } - void setOffset(uint32 offset) { + void setOffset(uint32_t offset) { safepointOffset_ = offset; } - uint32 osiReturnPointOffset() const { + uint32_t osiReturnPointOffset() const { // In general, pointer arithmetic on code is bad, but in this case, // getting the return address from a call instruction, stepping over pools // would be wrong. return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize(); } - uint32 osiCallPointOffset() const { + uint32_t osiCallPointOffset() const { return osiCallPointOffset_; } - void setOsiCallPointOffset(uint32 osiCallPointOffset) { + void setOsiCallPointOffset(uint32_t osiCallPointOffset) { JS_ASSERT(!osiCallPointOffset_); osiCallPointOffset_ = osiCallPointOffset; } void fixupOffset(MacroAssembler *masm) { osiCallPointOffset_ = masm->actualOffset(osiCallPointOffset_); safepointOffset_ = masm->actualOffset(safepointOffset_); } }; @@ -1230,23 +1230,23 @@ public: }; class LIRGraph { Vector<LBlock *, 16, IonAllocPolicy> blocks_; Vector<HeapValue, 0, IonAllocPolicy> constantPool_; Vector<LInstruction *, 0, IonAllocPolicy> safepoints_; Vector<LInstruction *, 0, IonAllocPolicy> nonCallSafepoints_; - uint32 numVirtualRegisters_; - uint32 numInstructions_; + uint32_t numVirtualRegisters_; + uint32_t numInstructions_; // Number of stack slots needed for local spills. - uint32 localSlotCount_; + uint32_t localSlotCount_; // Number of stack slots needed for argument construction for calls. - uint32 argumentSlotCount_; + uint32_t argumentSlotCount_; // Snapshot taken before any LIR has been lowered. LSnapshot *entrySnapshot_; // LBlock containing LOsrEntry, or NULL. LBlock *osrBlock_; MIRGraph &mir_; @@ -1258,53 +1258,53 @@ class LIRGraph return mir_; } size_t numBlocks() const { return blocks_.length(); } LBlock *getBlock(size_t i) const { return blocks_[i]; } - uint32 numBlockIds() const { + uint32_t numBlockIds() const { return mir_.numBlockIds(); } bool addBlock(LBlock *block) { return blocks_.append(block); } - uint32 getVirtualRegister() { + uint32_t getVirtualRegister() { numVirtualRegisters_ += VREG_INCREMENT; return numVirtualRegisters_; } - uint32 numVirtualRegisters() const { + uint32_t numVirtualRegisters() const { // Virtual registers are 1-based, not 0-based, so add one as a // convenience for 0-based arrays. return numVirtualRegisters_ + 1; } - uint32 getInstructionId() { + uint32_t getInstructionId() { return numInstructions_++; } - uint32 numInstructions() const { + uint32_t numInstructions() const { return numInstructions_; } - void setLocalSlotCount(uint32 localSlotCount) { + void setLocalSlotCount(uint32_t localSlotCount) { localSlotCount_ = localSlotCount; } - uint32 localSlotCount() const { + uint32_t localSlotCount() const { return localSlotCount_; } - void setArgumentSlotCount(uint32 argumentSlotCount) { + void setArgumentSlotCount(uint32_t argumentSlotCount) { argumentSlotCount_ = argumentSlotCount; } - uint32 argumentSlotCount() const { + uint32_t argumentSlotCount() const { return argumentSlotCount_; } - uint32 totalSlotCount() const { + uint32_t totalSlotCount() const { return localSlotCount() + (argumentSlotCount() * sizeof(Value) / STACK_SLOT_SIZE); } - bool addConstantToPool(const Value &v, uint32 *index); + bool addConstantToPool(const Value &v, uint32_t *index); size_t numConstants() const { return constantPool_.length(); } HeapValue *constantPool() { return &constantPool_[0]; } const HeapValue &getConstant(size_t index) const { return constantPool_[index];
--- a/js/src/ion/LinearScan.cpp +++ b/js/src/ion/LinearScan.cpp @@ -492,17 +492,17 @@ LinearScanAllocator::isSpilledAt(LiveInt return interval->getAllocation() == reg->canonicalSpill(); } bool LinearScanAllocator::populateSafepoints() { size_t firstSafepoint = 0; - for (uint32 i = 0; i < vregs.numVirtualRegisters(); i++) { + for (uint32_t i = 0; i < vregs.numVirtualRegisters(); i++) { LinearScanVirtualRegister *reg = &vregs[i]; if (!reg->def() || (!IsTraceable(reg) && !IsNunbox(reg))) continue; firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint); if (firstSafepoint >= graph.numSafepoints()) break; @@ -582,18 +582,18 @@ LinearScanAllocator::populateSafepoints( if (payloadAlloc->isArgument()) continue; if (isSpilledAt(typeInterval, inputOf(ins)) && isSpilledAt(payloadInterval, inputOf(ins))) { // These two components of the Value are spilled // contiguously, so simply keep track of the base slot. - uint32 payloadSlot = payload->canonicalSpillSlot(); - uint32 slot = BaseOfNunboxSlot(LDefinition::PAYLOAD, payloadSlot); + uint32_t payloadSlot = payload->canonicalSpillSlot(); + uint32_t slot = BaseOfNunboxSlot(LDefinition::PAYLOAD, payloadSlot); if (!safepoint->addValueSlot(slot)) return false; } if (!ins->isCall() && (!isSpilledAt(typeInterval, inputOf(ins)) || payloadAlloc->isGeneralReg())) { // Either the payload is on the stack but the type is @@ -758,18 +758,18 @@ LinearScanAllocator::assign(LAllocation // it at its definition. reg->setSpillAtDefinition(outputOf(reg->ins())); } else { reg->setCanonicalSpill(current->getAllocation()); // If this spill is inside a loop, and the definition is outside // the loop, instead move the spill to outside the loop. InstructionData *other = &insData[current->start()]; - uint32 loopDepthAtDef = reg->block()->mir()->loopDepth(); - uint32 loopDepthAtSpill = other->block()->mir()->loopDepth(); + uint32_t loopDepthAtDef = reg->block()->mir()->loopDepth(); + uint32_t loopDepthAtSpill = other->block()->mir()->loopDepth(); if (loopDepthAtSpill > loopDepthAtDef) reg->setSpillAtDefinition(outputOf(reg->ins())); } } active.pushBack(current); return true; @@ -782,17 +782,17 @@ LinearScanAllocator::otherHalfOfNunbox(V signed offset = OffsetToOtherHalfOfNunbox(vreg->type()); LinearScanVirtualRegister *other = &vregs[vreg->def()->virtualRegister() + offset]; AssertTypesFormANunbox(vreg->type(), other->type()); return other; } #endif -uint32 +uint32_t LinearScanAllocator::allocateSlotFor(const LiveInterval *interval) { LinearScanVirtualRegister *reg = &vregs[interval->vreg()]; SlotList *freed; if (reg->type() == LDefinition::DOUBLE || IsNunbox(reg)) freed = &finishedDoubleSlots_; else @@ -838,17 +838,17 @@ LinearScanAllocator::spill() LinearScanVirtualRegister *reg = &vregs[current->vreg()]; if (reg->canonicalSpill()) { IonSpew(IonSpew_RegAlloc, " Allocating canonical spill location"); return assign(*reg->canonicalSpill()); } - uint32 stackSlot; + uint32_t stackSlot; #if defined JS_NUNBOX32 if (IsNunbox(reg)) { LinearScanVirtualRegister *other = otherHalfOfNunbox(reg); if (other->canonicalSpill()) { // The other half of this nunbox already has a spill slot. To // ensure the Value is spilled contiguously, use the other half (it // was allocated double-wide). @@ -1002,17 +1002,17 @@ LinearScanAllocator::findBestFreeRegiste AnyRegister hintReg = other->getAllocation()->toRegister(); if (freeUntilPos[hintReg.code()] > hint->pos()) bestCode = hintReg.code(); } } if (bestCode == AnyRegister::Invalid) { // If all else fails, search freeUntilPos for largest value - for (uint32 i = 0; i < AnyRegister::Total; i++) { + for (uint32_t i = 0; i < AnyRegister::Total; i++) { if (freeUntilPos[i] == CodePosition::MIN) continue; if (bestCode == AnyRegister::Invalid || freeUntilPos[i] > freeUntilPos[bestCode]) bestCode = AnyRegister::Code(i); } } if (bestCode != AnyRegister::Invalid)
--- a/js/src/ion/LinearScan.h +++ b/js/src/ion/LinearScan.h @@ -97,17 +97,17 @@ class LinearScanAllocator : public LiveR bool allocateRegisters(); bool resolveControlFlow(); bool reifyAllocations(); bool populateSafepoints(); // Optimization for the UnsortedQueue. void enqueueVirtualRegisterIntervals(); - uint32 allocateSlotFor(const LiveInterval *interval); + uint32_t allocateSlotFor(const LiveInterval *interval); bool splitInterval(LiveInterval *interval, CodePosition pos); bool splitBlockingIntervals(LAllocation allocation); bool assign(LAllocation allocation); bool spill(); void freeAllocation(LiveInterval *interval, LAllocation *alloc); void finishInterval(LiveInterval *interval); AnyRegister::Code findBestFreeRegister(CodePosition *freeUntil); AnyRegister::Code findBestBlockedRegister(CodePosition *nextUsed);
--- a/js/src/ion/LiveRangeAllocator.cpp +++ b/js/src/ion/LiveRangeAllocator.cpp @@ -390,17 +390,17 @@ LiveRangeAllocator<VREG>::init() if (mir->shouldCancel("LSRA create data structures (main loop)")) return false; LBlock *block = graph.getBlock(i); for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) { for (size_t j = 0; j < ins->numDefs(); j++) { LDefinition *def = ins->getDef(j); if (def->policy() != LDefinition::PASSTHROUGH) { - uint32 reg = def->virtualRegister(); + uint32_t reg = def->virtualRegister(); if (!vregs[reg].init(reg, block, *ins, def, /* isTemp */ false)) return false; } } for (size_t j = 0; j < ins->numTemps(); j++) { LDefinition *def = ins->getTemp(j); if (def->isBogusTemp()) @@ -473,17 +473,17 @@ LiveRangeAllocator<VREG>::buildLivenessI } // Add successor phis if (mblock->successorWithPhis()) { LBlock *phiSuccessor = mblock->successorWithPhis()->lir(); for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) { LPhi *phi = phiSuccessor->getPhi(j); LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor()); - uint32 reg = use->toUse()->virtualRegister(); + uint32_t reg = use->toUse()->virtualRegister(); live->insert(reg); } } // Variables are assumed alive for the entire block, a define shortens // the interval to the point of definition. for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
--- a/js/src/ion/LiveRangeAllocator.h +++ b/js/src/ion/LiveRangeAllocator.h @@ -50,32 +50,32 @@ class Requirement // Only useful as a hint, encodes where the fixed requirement is used to // avoid allocating a fixed register too early. Requirement(LAllocation fixed, CodePosition at) : kind_(FIXED), allocation_(fixed), position_(at) { } - Requirement(uint32 vreg, CodePosition at) + Requirement(uint32_t vreg, CodePosition at) : kind_(SAME_AS_OTHER), allocation_(LUse(vreg, LUse::ANY)), position_(at) { } Kind kind() const { return kind_; } LAllocation allocation() const { JS_ASSERT(!allocation_.isUse()); return allocation_; } - uint32 virtualRegister() const { + uint32_t virtualRegister() const { JS_ASSERT(allocation_.isUse()); return allocation_.toUse()->virtualRegister(); } CodePosition pos() const { return position_; } @@ -177,32 +177,32 @@ class LiveInterval // The end of this range, exclusive. CodePosition to; }; private: Vector<Range, 1, IonAllocPolicy> ranges_; LAllocation alloc_; - uint32 vreg_; - uint32 index_; + uint32_t vreg_; + uint32_t index_; Requirement requirement_; Requirement hint_; InlineForwardList<UsePosition> uses_; size_t lastProcessedRange_; public: - LiveInterval(uint32 vreg, uint32 index) + LiveInterval(uint32_t vreg, uint32_t index) : vreg_(vreg), index_(index), lastProcessedRange_(size_t(-1)) { } - LiveInterval(uint32 index) + LiveInterval(uint32_t index) : vreg_(UINT32_MAX), index_(index), lastProcessedRange_(size_t(-1)) { } bool addRange(CodePosition from, CodePosition to); bool addRangeAtHead(CodePosition from, CodePosition to); void setFrom(CodePosition from); @@ -242,24 +242,24 @@ class LiveInterval return &alloc_; } void setAllocation(LAllocation alloc) { alloc_ = alloc; } bool hasVreg() const { return vreg_ != UINT32_MAX; } - uint32 vreg() const { + uint32_t vreg() const { JS_ASSERT(hasVreg()); return vreg_; } - uint32 index() const { + uint32_t index() const { return index_; } - void setIndex(uint32 index) { + void setIndex(uint32_t index) { index_ = index; } Requirement *requirement() { return &requirement_; } void setRequirement(const Requirement &requirement) { // A SAME_AS_OTHER requirement complicates regalloc too much; it // should only be used as hint. @@ -303,38 +303,38 @@ class LiveInterval /* * Represents all of the register allocation state associated with a virtual * register, including all associated intervals and pointers to relevant LIR * structures. */ class VirtualRegister { - uint32 id_; + uint32_t id_; LBlock *block_; LInstruction *ins_; LDefinition *def_; Vector<LiveInterval *, 1, IonAllocPolicy> intervals_; // Whether def_ is a temp or an output. bool isTemp_ : 1; public: - bool init(uint32 id, LBlock *block, LInstruction *ins, LDefinition *def, bool isTemp) { + bool init(uint32_t id, LBlock *block, LInstruction *ins, LDefinition *def, bool isTemp) { id_ = id; block_ = block; ins_ = ins; def_ = def; isTemp_ = isTemp; LiveInterval *initial = new LiveInterval(def->virtualRegister(), 0); if (!initial) return false; return intervals_.append(initial); } - uint32 id() { + uint32_t id() { return id_; } LBlock *block() { return block_; } LInstruction *ins() { return ins_; } @@ -383,25 +383,25 @@ class VirtualRegister // Index of the virtual registers in a graph. VREG is a subclass of // VirtualRegister extended with any allocator specific state for the vreg. template <typename VREG> class VirtualRegisterMap { private: VREG *vregs_; - uint32 numVregs_; + uint32_t numVregs_; public: VirtualRegisterMap() : vregs_(NULL), numVregs_(0) { } - bool init(MIRGenerator *gen, uint32 numVregs) { + bool init(MIRGenerator *gen, uint32_t numVregs) { vregs_ = gen->allocate<VREG>(numVregs); numVregs_ = numVregs; if (!vregs_) return false; memset(vregs_, 0, sizeof(VREG) * numVregs); return true; } VREG &operator[](unsigned int index) { @@ -412,17 +412,17 @@ class VirtualRegisterMap JS_ASSERT(alloc->isUse()); JS_ASSERT(alloc->toUse()->virtualRegister() < numVregs_); return vregs_[alloc->toUse()->virtualRegister()]; } VREG &operator[](const LDefinition *def) { JS_ASSERT(def->virtualRegister() < numVregs_); return vregs_[def->virtualRegister()]; } - uint32 numVirtualRegisters() const { + uint32_t numVirtualRegisters() const { return numVregs_; } }; static inline AnyRegister GetFixedRegister(LDefinition *def, LUse *use) { return def->type() == LDefinition::DOUBLE
--- a/js/src/ion/Lowering.cpp +++ b/js/src/ion/Lowering.cpp @@ -202,17 +202,17 @@ LIRGenerator::visitPrepareCall(MPrepareC allocateArguments(ins->argc()); return true; } bool LIRGenerator::visitPassArg(MPassArg *arg) { MDefinition *opd = arg->getArgument(); - uint32 argslot = getArgumentSlot(arg->getArgnum()); + uint32_t argslot = getArgumentSlot(arg->getArgnum()); // Pass through the virtual register of the operand. // This causes snapshots to correctly copy the operand on the stack. // // This keeps the backing store around longer than strictly required. // We could do better by informing snapshots about the argument vector. arg->setVirtualRegister(opd->virtualRegister()); @@ -257,17 +257,17 @@ bool LIRGenerator::visitCall(MCall *call) { JS_ASSERT(CallTempReg0 != CallTempReg1); JS_ASSERT(CallTempReg0 != ArgumentsRectifierReg); JS_ASSERT(CallTempReg1 != ArgumentsRectifierReg); JS_ASSERT(call->getFunction()->type() == MIRType_Object); // Height of the current argument vector. - uint32 argslot = getArgumentSlotForCall(); + uint32_t argslot = getArgumentSlotForCall(); freeArguments(call->numStackArgs()); JSFunction *target = call->getSingleTarget(); // Call DOM functions. if (call->isDOMFunction()) { JS_ASSERT(target && target->isNative()); LCallDOMNative *lir = new LCallDOMNative(argslot, tempFixed(CallTempReg0), @@ -379,17 +379,17 @@ LIRGenerator::visitTest(MTest *test) // Constant Double operand. if (opd->type() == MIRType_Double && opd->isConstant()) { bool result = ToBoolean(opd->toConstant()->value()); return add(new LGoto(result ? ifTrue : ifFalse)); } // Constant Int32 operand. if (opd->type() == MIRType_Int32 && opd->isConstant()) { - int32 num = opd->toConstant()->value().toInt32(); + int32_t num = opd->toConstant()->value().toInt32(); return add(new LGoto(num ? ifTrue : ifFalse)); } // Constant Boolean operand. if (opd->type() == MIRType_Boolean && opd->isConstant()) { bool result = opd->toConstant()->value().toBoolean(); return add(new LGoto(result ? ifTrue : ifFalse)); } @@ -1130,17 +1130,17 @@ LIRGenerator::visitToInt32(MToInt32 *con return false; case MIRType_Object: // Objects might be effectful. IonSpew(IonSpew_Abort, "Object to Int32 not supported yet."); return false; case MIRType_Undefined: - IonSpew(IonSpew_Abort, "Undefined coerces to NaN, not int32."); + IonSpew(IonSpew_Abort, "Undefined coerces to NaN, not int32_t."); return false; default: JS_ASSERT(!"unexpected type"); return false; } } @@ -2126,33 +2126,33 @@ void LIRGenerator::updateResumeState(MBasicBlock *block) { lastResumePoint_ = block->entryResumePoint(); if (IonSpewEnabled(IonSpew_Snapshots)) SpewResumePoint(block, NULL, lastResumePoint_); } void -LIRGenerator::allocateArguments(uint32 argc) +LIRGenerator::allocateArguments(uint32_t argc) { argslots_ += argc; if (argslots_ > maxargslots_) maxargslots_ = argslots_; } -uint32 -LIRGenerator::getArgumentSlot(uint32 argnum) +uint32_t +LIRGenerator::getArgumentSlot(uint32_t argnum) { // First slot has index 1. JS_ASSERT(argnum < argslots_); return argslots_ - argnum ; } void -LIRGenerator::freeArguments(uint32 argc) +LIRGenerator::freeArguments(uint32_t argc) { JS_ASSERT(argc <= argslots_); argslots_ -= argc; } bool LIRGenerator::visitBlock(MBasicBlock *block) { @@ -2169,17 +2169,17 @@ LIRGenerator::visitBlock(MBasicBlock *bl if (!visitInstruction(*iter)) return false; } if (block->successorWithPhis()) { // If we have a successor with phis, lower the phi input now that we // are approaching the join point. MBasicBlock *successor = block->successorWithPhis(); - uint32 position = block->positionInPhiSuccessor(); + uint32_t position = block->positionInPhiSuccessor(); size_t lirIndex = 0; for (MPhiIterator phi(successor->phisBegin()); phi != successor->phisEnd(); phi++) { MDefinition *opd = phi->getOperand(position); if (!ensureDefined(opd)) return false; JS_ASSERT(opd->type() == phi->type());
--- a/js/src/ion/Lowering.h +++ b/js/src/ion/Lowering.h @@ -29,19 +29,19 @@ namespace js { namespace ion { class LIRGenerator : public LIRGeneratorSpecific { void updateResumeState(MInstruction *ins); void updateResumeState(MBasicBlock *block); // The active depth of the (perhaps nested) call argument vectors. - uint32 argslots_; + uint32_t argslots_; // The maximum depth, for framesizeclass determination. - uint32 maxargslots_; + uint32_t maxargslots_; public: LIRGenerator(MIRGenerator *gen, MIRGraph &graph, LIRGraph &lirGraph) : LIRGeneratorSpecific(gen, graph, lirGraph), argslots_(0), maxargslots_(0) { } bool generate(); @@ -55,23 +55,23 @@ class LIRGenerator : public LIRGenerator bool lowerBitOp(JSOp op, MInstruction *ins); bool lowerShiftOp(JSOp op, MShiftInstruction *ins); bool lowerBinaryV(JSOp op, MBinaryInstruction *ins); bool precreatePhi(LBlock *block, MPhi *phi); bool definePhis(); // Allocate argument slots for a future function call. - void allocateArguments(uint32 argc); + void allocateArguments(uint32_t argc); // Map an MPassArg's argument number to a slot in the frame arg vector. // Slots are indexed from 1. argnum is indexed from 0. - uint32 getArgumentSlot(uint32 argnum); - uint32 getArgumentSlotForCall() { return argslots_; } + uint32_t getArgumentSlot(uint32_t argnum); + uint32_t getArgumentSlotForCall() { return argslots_; } // Free argument slots following a function call. - void freeArguments(uint32 argc); + void freeArguments(uint32_t argc); public: bool visitInstruction(MInstruction *ins); bool visitBlock(MBasicBlock *block); // Visitor hooks are explicit, to give CPU-specific versions a chance to // intercept without a bunch of explicit gunk in the .cpp. bool visitParameter(MParameter *param);
--- a/js/src/ion/MCallOptimize.cpp +++ b/js/src/ion/MCallOptimize.cpp @@ -13,17 +13,17 @@ #include "IonBuilder.h" #include "vm/StringObject-inl.h" namespace js { namespace ion { IonBuilder::InliningStatus -IonBuilder::inlineNativeCall(JSNative native, uint32 argc, bool constructing) +IonBuilder::inlineNativeCall(JSNative native, uint32_t argc, bool constructing) { // Array natives. if (native == js_Array) return inlineArray(argc, constructing); if (native == js::array_pop) return inlineArrayPopShift(MArrayPopShift::Pop, argc, constructing); if (native == js::array_shift) return inlineArrayPopShift(MArrayPopShift::Shift, argc, constructing); @@ -73,38 +73,38 @@ IonBuilder::inlineNativeCall(JSNative na return inlineRegExpTest(argc, constructing); if (native == regexp_test) return inlineRegExpTest(argc, constructing); return InliningStatus_NotInlined; } bool -IonBuilder::discardCallArgs(uint32 argc, MDefinitionVector &argv, MBasicBlock *bb) +IonBuilder::discardCallArgs(uint32_t argc, MDefinitionVector &argv, MBasicBlock *bb) { if (!argv.resizeUninitialized(argc + 1)) return false; - for (int32 i = argc; i >= 0; i--) { + for (int32_t i = argc; i >= 0; i--) { // Unwrap each MPassArg, replacing it with its contents. MPassArg *passArg = bb->pop()->toPassArg(); MBasicBlock *block = passArg->block(); MDefinition *wrapped = passArg->getArgument(); passArg->replaceAllUsesWith(wrapped); block->discard(passArg); // Remember contents in vector. argv[i] = wrapped; } return true; } bool -IonBuilder::discardCall(uint32 argc, MDefinitionVector &argv, MBasicBlock *bb) +IonBuilder::discardCall(uint32_t argc, MDefinitionVector &argv, MBasicBlock *bb) { if (!discardCallArgs(argc, argv, bb)) return false; // Function MDefinition implicitly consumed by inlining. bb->pop(); return true; } @@ -122,32 +122,32 @@ IonBuilder::getInlineReturnTypeSet() MIRType IonBuilder::getInlineReturnType() { types::StackTypeSet *returnTypes = getInlineReturnTypeSet(); return MIRTypeFromValueType(returnTypes->getKnownTypeTag()); } types::StackTypeSet * -IonBuilder::getInlineArgTypeSet(uint32 argc, uint32 arg) +IonBuilder::getInlineArgTypeSet(uint32_t argc, uint32_t arg) { types::StackTypeSet *argTypes = oracle->getCallArg(script_, argc, arg, pc); JS_ASSERT(argTypes); return argTypes; } MIRType -IonBuilder::getInlineArgType(uint32 argc, uint32 arg) +IonBuilder::getInlineArgType(uint32_t argc, uint32_t arg) { types::StackTypeSet *argTypes = getInlineArgTypeSet(argc, arg); return MIRTypeFromValueType(argTypes->getKnownTypeTag()); } IonBuilder::InliningStatus -IonBuilder::inlineMathFunction(MMathFunction::Function function, uint32 argc, bool constructing) +IonBuilder::inlineMathFunction(MMathFunction::Function function, uint32_t argc, bool constructing) { if (constructing) return InliningStatus_NotInlined; if (argc != 1) return InliningStatus_NotInlined; if (getInlineReturnType() != MIRType_Double) @@ -165,17 +165,17 @@ IonBuilder::inlineMathFunction(MMathFunc MMathFunction *ins = MMathFunction::New(argv[1], function, cache); current->add(ins); current->push(ins); return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineArray(uint32 argc, bool constructing) +IonBuilder::inlineArray(uint32_t argc, bool constructing) { uint32_t initLength = 0; MNewArray::AllocatingBehaviour allocating = MNewArray::NewArray_Unallocating; // Multiple arguments imply array initialization, not just construction. if (argc >= 2) { initLength = argc; allocating = MNewArray::NewArray_Allocating; @@ -231,17 +231,17 @@ IonBuilder::inlineArray(uint32 argc, boo if (!resumeAfter(length)) return InliningStatus_Error; } return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineArrayPopShift(MArrayPopShift::Mode mode, uint32 argc, bool constructing) +IonBuilder::inlineArrayPopShift(MArrayPopShift::Mode mode, uint32_t argc, bool constructing) { if (constructing) return InliningStatus_NotInlined; MIRType returnType = getInlineReturnType(); if (returnType == MIRType_Undefined || returnType == MIRType_Null) return InliningStatus_NotInlined; if (getInlineArgType(argc, 0) != MIRType_Object) @@ -277,17 +277,17 @@ IonBuilder::inlineArrayPopShift(MArrayPo ins->setResultType(returnType); if (!resumeAfter(ins)) return InliningStatus_Error; return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineArrayPush(uint32 argc, bool constructing) +IonBuilder::inlineArrayPush(uint32_t argc, bool constructing) { if (argc != 1 || constructing) return InliningStatus_NotInlined; if (getInlineReturnType() != MIRType_Int32) return InliningStatus_NotInlined; if (getInlineArgType(argc, 0) != MIRType_Object) return InliningStatus_NotInlined; @@ -310,17 +310,17 @@ IonBuilder::inlineArrayPush(uint32 argc, current->push(ins); if (!resumeAfter(ins)) return InliningStatus_Error; return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineArrayConcat(uint32 argc, bool constructing) +IonBuilder::inlineArrayConcat(uint32_t argc, bool constructing) { if (argc != 1 || constructing) return InliningStatus_NotInlined; // Ensure |this|, argument and result are objects. if (getInlineReturnType() != MIRType_Object) return InliningStatus_NotInlined; if (getInlineArgType(argc, 0) != MIRType_Object) @@ -394,17 +394,17 @@ IonBuilder::inlineArrayConcat(uint32 arg current->push(ins); if (!resumeAfter(ins)) return InliningStatus_Error; return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineMathAbs(uint32 argc, bool constructing) +IonBuilder::inlineMathAbs(uint32_t argc, bool constructing) { if (constructing) return InliningStatus_NotInlined; if (argc != 1) return InliningStatus_NotInlined; MIRType returnType = getInlineReturnType(); @@ -421,17 +421,17 @@ IonBuilder::inlineMathAbs(uint32 argc, b MAbs *ins = MAbs::New(argv[1], returnType); current->add(ins); current->push(ins); return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineMathFloor(uint32 argc, bool constructing) +IonBuilder::inlineMathFloor(uint32_t argc, bool constructing) { if (constructing) return InliningStatus_NotInlined; if (argc != 1) return InliningStatus_NotInlined; @@ -457,17 +457,17 @@ IonBuilder::inlineMathFloor(uint32 argc, current->push(ins); return InliningStatus_Inlined; } return InliningStatus_NotInlined; } IonBuilder::InliningStatus -IonBuilder::inlineMathRound(uint32 argc, bool constructing) +IonBuilder::inlineMathRound(uint32_t argc, bool constructing) { if (constructing) return InliningStatus_NotInlined; if (argc != 1) return InliningStatus_NotInlined; MIRType returnType = getInlineReturnType(); @@ -491,17 +491,17 @@ IonBuilder::inlineMathRound(uint32 argc, current->push(ins); return InliningStatus_Inlined; } return InliningStatus_NotInlined; } IonBuilder::InliningStatus -IonBuilder::inlineMathSqrt(uint32 argc, bool constructing) +IonBuilder::inlineMathSqrt(uint32_t argc, bool constructing) { if (constructing) return InliningStatus_NotInlined; if (argc != 1) return InliningStatus_NotInlined; MIRType argType = getInlineArgType(argc, 1); @@ -516,17 +516,17 @@ IonBuilder::inlineMathSqrt(uint32 argc, MSqrt *sqrt = MSqrt::New(argv[1]); current->add(sqrt); current->push(sqrt); return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineMathPow(uint32 argc, bool constructing) +IonBuilder::inlineMathPow(uint32_t argc, bool constructing) { if (constructing) return InliningStatus_NotInlined; if (argc != 2) return InliningStatus_NotInlined; // Typechecking. @@ -616,17 +616,17 @@ IonBuilder::inlineMathPow(uint32 argc, b MPow *ins = MPow::New(argv[1], argv[2], arg2Type); current->add(ins); current->push(ins); return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineMathRandom(uint32 argc, bool constructing) +IonBuilder::inlineMathRandom(uint32_t argc, bool constructing) { if (constructing) return InliningStatus_NotInlined; if (getInlineReturnType() != MIRType_Double) return InliningStatus_NotInlined; MDefinitionVector argv; @@ -635,17 +635,17 @@ IonBuilder::inlineMathRandom(uint32 argc MRandom *rand = MRandom::New(); current->add(rand); current->push(rand); return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineMathMinMax(bool max, uint32 argc, bool constructing) +IonBuilder::inlineMathMinMax(bool max, uint32_t argc, bool constructing) { if (argc != 2 || constructing) return InliningStatus_NotInlined; MIRType returnType = getInlineReturnType(); if (returnType != MIRType_Double && returnType != MIRType_Int32) return InliningStatus_NotInlined; @@ -669,17 +669,17 @@ IonBuilder::inlineMathMinMax(bool max, u MMinMax *ins = MMinMax::New(argv[1], argv[2], returnType, max); current->add(ins); current->push(ins); return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineStringObject(uint32 argc, bool constructing) +IonBuilder::inlineStringObject(uint32_t argc, bool constructing) { if (argc != 1 || !constructing) return InliningStatus_NotInlined; // MToString only supports int32 or string values. MIRType type = getInlineArgType(argc, 1); if (type != MIRType_Int32 && type != MIRType_String) return InliningStatus_NotInlined; @@ -699,17 +699,17 @@ IonBuilder::inlineStringObject(uint32 ar if (!resumeAfter(ins)) return InliningStatus_Error; return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineStrCharCodeAt(uint32 argc, bool constructing) +IonBuilder::inlineStrCharCodeAt(uint32_t argc, bool constructing) { if (argc != 1 || constructing) return InliningStatus_NotInlined; if (getInlineReturnType() != MIRType_Int32) return InliningStatus_NotInlined; if (getInlineArgType(argc, 0) != MIRType_String) return InliningStatus_NotInlined; @@ -730,17 +730,17 @@ IonBuilder::inlineStrCharCodeAt(uint32 a MCharCodeAt *charCode = MCharCodeAt::New(argv[0], index); current->add(charCode); current->push(charCode); return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineStrFromCharCode(uint32 argc, bool constructing) +IonBuilder::inlineStrFromCharCode(uint32_t argc, bool constructing) { if (argc != 1 || constructing) return InliningStatus_NotInlined; if (getInlineReturnType() != MIRType_String) return InliningStatus_NotInlined; if (getInlineArgType(argc, 1) != MIRType_Int32) return InliningStatus_NotInlined; @@ -754,17 +754,17 @@ IonBuilder::inlineStrFromCharCode(uint32 MFromCharCode *string = MFromCharCode::New(charCode); current->add(string); current->push(string); return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineStrCharAt(uint32 argc, bool constructing) +IonBuilder::inlineStrCharAt(uint32_t argc, bool constructing) { if (argc != 1 || constructing) return InliningStatus_NotInlined; if (getInlineReturnType() != MIRType_String) return InliningStatus_NotInlined; if (getInlineArgType(argc, 0) != MIRType_String) return InliningStatus_NotInlined; @@ -789,17 +789,17 @@ IonBuilder::inlineStrCharAt(uint32 argc, MFromCharCode *string = MFromCharCode::New(charCode); current->add(string); current->push(string); return InliningStatus_Inlined; } IonBuilder::InliningStatus -IonBuilder::inlineRegExpTest(uint32 argc, bool constructing) +IonBuilder::inlineRegExpTest(uint32_t argc, bool constructing) { if (argc != 1 || constructing) return InliningStatus_NotInlined; // TI can infer a NULL return type of regexp_test with eager compilation. if (CallResultEscapes(pc) && getInlineReturnType() != MIRType_Boolean) return InliningStatus_NotInlined;
--- a/js/src/ion/MIR.cpp +++ b/js/src/ion/MIR.cpp @@ -86,18 +86,18 @@ EvaluateConstantOperands(MBinaryInstruct break; case MDefinition::Op_Lsh: ret = Int32Value(lhs.toInt32() << (rhs.toInt32() & 0x1F)); break; case MDefinition::Op_Rsh: ret = Int32Value(lhs.toInt32() >> (rhs.toInt32() & 0x1F)); break; case MDefinition::Op_Ursh: { - uint32 unsignedLhs = (uint32_t)lhs.toInt32(); - ret.setNumber(uint32(unsignedLhs >> (rhs.toInt32() & 0x1F))); + uint32_t unsignedLhs = (uint32_t)lhs.toInt32(); + ret.setNumber(uint32_t(unsignedLhs >> (rhs.toInt32() & 0x1F))); break; } case MDefinition::Op_Add: ret.setNumber(lhs.toNumber() + rhs.toNumber()); break; case MDefinition::Op_Sub: ret.setNumber(lhs.toNumber() - rhs.toNumber()); break; @@ -131,17 +131,17 @@ MDefinition::printName(FILE *fp) fprintf(fp, "-vn%u", valueNumber()); } HashNumber MDefinition::valueHash() const { HashNumber out = op(); for (size_t i = 0; i < numOperands(); i++) { - uint32 valueNumber = getOperand(i)->valueNumber(); + uint32_t valueNumber = getOperand(i)->valueNumber(); out = valueNumber + (out << 6) + (out << 16) - out; } return out; } bool MDefinition::congruentIfOperandsEqual(MDefinition * const &ins) const { @@ -264,17 +264,17 @@ MDefinition::replaceAllUsesWith(MDefinit MUse *use = *i; i = uses_.removeAt(i); use->node()->setOperand(use->index(), dom); dom->linkUse(use); } } static inline bool -IsPowerOfTwo(uint32 n) +IsPowerOfTwo(uint32_t n) { return (n > 0) && ((n & (n - 1)) == 0); } MConstant * MConstant::New(const Value &v) { return new MConstant(v); @@ -342,17 +342,17 @@ MConstant::printOpcode(FILE *fp) void MConstantElements::printOpcode(FILE *fp) { PrintOpcodeName(fp, op()); fprintf(fp, " %p", value()); } MParameter * -MParameter::New(int32 index, const types::TypeSet *types) +MParameter::New(int32_t index, const types::TypeSet *types) { return new MParameter(index, types); } void MParameter::printOpcode(FILE *fp) { PrintOpcodeName(fp, op()); @@ -411,30 +411,30 @@ MTest::New(MDefinition *ins, MBasicBlock MCompare * MCompare::New(MDefinition *left, MDefinition *right, JSOp op) { return new MCompare(left, right, op); } MTableSwitch * -MTableSwitch::New(MDefinition *ins, int32 low, int32 high) +MTableSwitch::New(MDefinition *ins, int32_t low, int32_t high) { return new MTableSwitch(ins, low, high); } MGoto * MGoto::New(MBasicBlock *target) { JS_ASSERT(target); return new MGoto(target); } MPhi * -MPhi::New(uint32 slot) +MPhi::New(uint32_t slot) { return new MPhi(slot); } MDefinition * MPhi::foldsTo(bool useValueNumbers) { JS_ASSERT(inputs_.length() != 0); @@ -467,17 +467,17 @@ MPhi::congruentTo(MDefinition *const &in bool MPhi::addInput(MDefinition *ins) { ins->addUse(this, inputs_.length()); return inputs_.append(ins); } -uint32 +uint32_t MPrepareCall::argc() const { JS_ASSERT(useCount() == 1); MCall *call = usesBegin()->node()->toDefinition()->toCall(); return call->numStackArgs(); } void @@ -1292,17 +1292,17 @@ MDefinition * MTruncateToInt32::foldsTo(bool useValueNumbers) { MDefinition *input = getOperand(0); if (input->type() == MIRType_Int32) return input; if (input->type() == MIRType_Double && input->isConstant()) { const Value &v = input->toConstant()->value(); - uint32 ret = ToInt32(v.toDouble()); + uint32_t ret = ToInt32(v.toDouble()); return MConstant::New(Int32Value(ret)); } return this; } MDefinition * MToDouble::foldsTo(bool useValueNumbers)
--- a/js/src/ion/MIR.h +++ b/js/src/ion/MIR.h @@ -66,32 +66,32 @@ class MResumePoint; static inline bool isOSRLikeValue (MDefinition *def); // Represents a use of a node. class MUse : public TempObject, public InlineForwardListNode<MUse> { friend class MDefinition; MNode *node_; // The node that is using this operand. - uint32 index_; // The index of this operand in its owner. - - MUse(MNode *owner, uint32 index) + uint32_t index_; // The index of this operand in its owner. + + MUse(MNode *owner, uint32_t index) : node_(owner), index_(index) { } public: - static inline MUse *New(MNode *owner, uint32 index) { + static inline MUse *New(MNode *owner, uint32_t index) { return new MUse(owner, index); } MNode *node() const { return node_; } - uint32 index() const { + uint32_t index() const { return index_; } }; typedef InlineForwardList<MUse>::iterator MUseIterator; // A node is an entry in the MIR graph. It has two kinds: // MInstruction: an instruction which appears in the IR stream. @@ -153,40 +153,40 @@ class MNode : public TempObject virtual void setOperand(size_t index, MDefinition *operand) = 0; // Initializes an operand for the first time. inline void initOperand(size_t index, MDefinition *ins); }; class AliasSet { private: - uint32 flags_; + uint32_t flags_; public: enum Flag { None_ = 0, ObjectFields = 1 << 0, // shape, class, slots, length etc. Element = 1 << 1, // A member of obj->elements. Slot = 1 << 2, // A member of obj->slots. TypedArrayElement = 1 << 3, // A typed array element. Last = TypedArrayElement, Any = Last | (Last - 1), // Indicates load or store. Store_ = 1 << 31 }; - AliasSet(uint32 flags) + AliasSet(uint32_t flags) : flags_(flags) { } public: inline bool isNone() const { return flags_ == None_; } - uint32 flags() const { + uint32_t flags() const { return flags_ & Any; } inline bool isStore() const { return !!(flags_ & Store_); } inline bool isLoad() const { return !isStore() && !isNone(); } @@ -194,21 +194,21 @@ class AliasSet { return AliasSet(flags_ | other.flags_); } inline AliasSet operator &(const AliasSet &other) const { return AliasSet(flags_ & other.flags_); } static AliasSet None() { return AliasSet(None_); } - static AliasSet Load(uint32 flags) { + static AliasSet Load(uint32_t flags) { JS_ASSERT(flags && !(flags & Store_)); return AliasSet(flags); } - static AliasSet Store(uint32 flags) { + static AliasSet Store(uint32_t flags) { JS_ASSERT(flags && !(flags & Store_)); return AliasSet(flags | Store_); } }; static const unsigned NUM_ALIAS_SETS = sizeof(AliasSet) * 8; // An MDefinition is an SSA name. @@ -222,26 +222,26 @@ class MDefinition : public MNode # define DEFINE_OPCODES(op) Op_##op, MIR_OPCODE_LIST(DEFINE_OPCODES) # undef DEFINE_OPCODES Op_Invalid }; private: InlineForwardList<MUse> uses_; // Use chain. - uint32 id_; // Instruction ID, which after block re-ordering + uint32_t id_; // Instruction ID, which after block re-ordering // is sorted within a basic block. ValueNumberData *valueNumber_; // The instruction's value number (see GVN for details in use) Range *range_; // Any computed range for this def. MIRType resultType_; // Representation of result type. - uint32 flags_; // Bit flags. + uint32_t flags_; // Bit flags. union { MDefinition *dependency_; // Implicit dependency (store, call, etc.) of this instruction. // Used by alias analysis, GVN and LICM. - uint32 virtualRegister_; // Used by lowering to map definitions to virtual registers. + uint32_t virtualRegister_; // Used by lowering to map definitions to virtual registers. }; // Track bailouts by storing the current pc in MIR instruction. Also used // for profiling and keeping track of what the last known pc was. jsbytecode *trackedPc_; private: enum Flag { @@ -251,23 +251,23 @@ class MDefinition : public MNode # undef DEFINE_FLAG Total }; void setBlock(MBasicBlock *block) { block_ = block; } - bool hasFlags(uint32 flags) const { + bool hasFlags(uint32_t flags) const { return (flags_ & flags) == flags; } - void removeFlags(uint32 flags) { + void removeFlags(uint32_t flags) { flags_ &= ~flags; } - void setFlags(uint32 flags) { + void setFlags(uint32_t flags) { flags_ |= flags; } public: MDefinition() : id_(0), valueNumber_(NULL), range_(), @@ -312,26 +312,26 @@ class MDefinition : public MNode // Ranges are only computed for definitions whose type is int32. virtual void computeRange() { } MNode::Kind kind() const { return MNode::Definition; } - uint32 id() const { + uint32_t id() const { JS_ASSERT(block_); return id_; } - void setId(uint32 id) { + void setId(uint32_t id) { id_ = id; } - uint32 valueNumber() const; - void setValueNumber(uint32 vn); + uint32_t valueNumber() const; + void setValueNumber(uint32_t vn); ValueNumberData *valueNumberData() { return valueNumber_; } void setValueNumberData(ValueNumberData *vn) { JS_ASSERT(valueNumber_ == NULL); valueNumber_ = vn; } #define FLAG_ACCESSOR(flag) \ @@ -404,23 +404,23 @@ class MDefinition : public MNode // Adds a use from a node that is being recycled during operand // replacement. void linkUse(MUse *use) { JS_ASSERT(use->node()->getOperand(use->index()) == this); uses_.pushFront(use); } - void setVirtualRegister(uint32 vreg) { + void setVirtualRegister(uint32_t vreg) { virtualRegister_ = vreg; #ifdef DEBUG setLoweredUnchecked(); #endif } - uint32 virtualRegister() const { + uint32_t virtualRegister() const { JS_ASSERT(isLowered()); return virtualRegister_; } public: // Opcode testing and casts. # define OPCODE_CASTS(opcode) \ bool is##opcode() const { \ @@ -645,34 +645,34 @@ class MConstant : public MNullaryInstruc return AliasSet::None(); } void computeRange(); }; class MParameter : public MNullaryInstruction { - int32 index_; + int32_t index_; const types::TypeSet *typeSet_; public: - static const int32 THIS_SLOT = -1; - - MParameter(int32 index, const types::TypeSet *types) + static const int32_t THIS_SLOT = -1; + + MParameter(int32_t index, const types::TypeSet *types) : index_(index), typeSet_(types) { setResultType(MIRType_Value); } public: INSTRUCTION_HEADER(Parameter); - static MParameter *New(int32 index, const types::TypeSet *types); - - int32 index() const { + static MParameter *New(int32_t index, const types::TypeSet *types); + + int32_t index() const { return index_; } const types::TypeSet *typeSet() const { return typeSet_; } void printOpcode(FILE *fp); HashNumber valueHash() const; @@ -724,20 +724,20 @@ class MTableSwitch // - First successor = the default case // - Successor 2 and higher = the cases sorted on case index. Vector<MBasicBlock*, 0, IonAllocPolicy> successors_; // Contains the blocks/cases that still need to get build Vector<MBasicBlock*, 0, IonAllocPolicy> blocks_; MDefinition *operand_; - int32 low_; - int32 high_; - - MTableSwitch(MDefinition *ins, int32 low, int32 high) + int32_t low_; + int32_t high_; + + MTableSwitch(MDefinition *ins, int32_t low, int32_t high) : successors_(), blocks_(), low_(low), high_(high) { initOperand(0, ins); } @@ -745,17 +745,17 @@ class MTableSwitch void setOperand(size_t index, MDefinition *operand) { JS_ASSERT(index == 0); operand_ = operand; } public: INSTRUCTION_HEADER(TableSwitch); static MTableSwitch *New(MDefinition *ins, - int32 low, int32 high); + int32_t low, int32_t high); size_t numSuccessors() const { return successors_.length(); } MBasicBlock *getSuccessor(size_t i) const { JS_ASSERT(i < numSuccessors()); return successors_[i]; @@ -769,21 +769,21 @@ class MTableSwitch MBasicBlock** blocks() { return &blocks_[0]; } size_t numBlocks() const { return blocks_.length(); } - int32 low() const { + int32_t low() const { return low_; } - int32 high() const { + int32_t high() const { return high_; } MBasicBlock *getDefault() const { return getSuccessor(0); } MBasicBlock *getCase(size_t i) const { @@ -972,34 +972,34 @@ class MNewArray : public MNullaryInstruc public: enum AllocatingBehaviour { NewArray_Allocating, NewArray_Unallocating }; private: // Number of space to allocate for the array. - uint32 count_; + uint32_t count_; // Template for the created object. CompilerRootObject templateObject_; // Allocate space at initialization or not AllocatingBehaviour allocating_; public: INSTRUCTION_HEADER(NewArray); - MNewArray(uint32 count, JSObject *templateObject, AllocatingBehaviour allocating) + MNewArray(uint32_t count, JSObject *templateObject, AllocatingBehaviour allocating) : count_(count), templateObject_(templateObject), allocating_(allocating) { setResultType(MIRType_Object); } - uint32 count() const { + uint32_t count() const { return count_; } JSObject *templateObject() const { return templateObject_; } bool isAllocating() const { @@ -1085,17 +1085,17 @@ class MPrepareCall : public MNullaryInst { public: INSTRUCTION_HEADER(PrepareCall); MPrepareCall() { } // Get the vector size for the upcoming call by looking at the call. - uint32 argc() const; + uint32_t argc() const; AliasSet getAliasSet() const { return AliasSet::None(); } }; class MVariadicInstruction : public MInstruction { @@ -1131,19 +1131,19 @@ class MCall static const size_t NumNonArgumentOperands = 2; protected: // True if the call is for JSOP_NEW. bool construct_; // Monomorphic cache of single target from TI, or NULL. CompilerRootFunction target_; // Original value of argc from the bytecode. - uint32 numActualArgs_; - - MCall(JSFunction *target, uint32 numActualArgs, bool construct) + uint32_t numActualArgs_; + + MCall(JSFunction *target, uint32_t numActualArgs, bool construct) : construct_(construct), target_(target), numActualArgs_(numActualArgs) { setResultType(MIRType_Value); } public: @@ -1163,39 +1163,39 @@ class MCall return getOperand(FunctionOperandIndex); } void replaceFunction(MInstruction *newfunc) { replaceOperand(FunctionOperandIndex, newfunc); } void addArg(size_t argnum, MPassArg *arg); - MDefinition *getArg(uint32 index) const { + MDefinition *getArg(uint32_t index) const { return getOperand(NumNonArgumentOperands + index); } // For TI-informed monomorphic callsites. JSFunction *getSingleTarget() const { return target_; } bool isConstructing() const { return construct_; } // The number of stack arguments is the max between the number of formal // arguments and the number of actual arguments. The number of stack // argument includes the |undefined| padding added in case of underflow. // Includes |this|. - uint32 numStackArgs() const { + uint32_t numStackArgs() const { return numOperands() - NumNonArgumentOperands; } // Does not include |this|. - uint32 numActualArgs() const { + uint32_t numActualArgs() const { return numActualArgs_; } TypePolicy *typePolicy() { return this; } AliasSet getAliasSet() const { return AliasSet::Store(AliasSet::Any); @@ -1655,17 +1655,17 @@ class MReturnFromCtor // Passes an MDefinition to an MCall. Must occur between an MPrepareCall and // MCall. Boxes the input and stores it to the correct location on stack. // // Arguments are *not* simply pushed onto a call stack: they are evaluated // left-to-right, but stored in the arg vector in C-style, right-to-left. class MPassArg : public MUnaryInstruction { - int32 argnum_; + int32_t argnum_; private: MPassArg(MDefinition *def) : MUnaryInstruction(def), argnum_(-1) { setResultType(def->type()); } @@ -1676,22 +1676,22 @@ class MPassArg : public MUnaryInstructio return new MPassArg(def); } MDefinition *getArgument() const { return getOperand(0); } // Set by the MCall. - void setArgnum(uint32 argnum) { + void setArgnum(uint32_t argnum) { argnum_ = argnum; } - uint32 getArgnum() const { + uint32_t getArgnum() const { JS_ASSERT(argnum_ >= 0); - return (uint32)argnum_; + return (uint32_t)argnum_; } AliasSet getAliasSet() const { return AliasSet::None(); } void printOpcode(FILE *fp); }; // Converts a primitive (either typed or untyped) to a double. If the input is @@ -2729,45 +2729,45 @@ class MFromCharCode virtual AliasSet getAliasSet() const { return AliasSet::None(); } }; class MPhi : public MDefinition, public InlineForwardListNode<MPhi> { js::Vector<MDefinition *, 2, IonAllocPolicy> inputs_; - uint32 slot_; + uint32_t slot_; bool triedToSpecialize_; bool hasBytecodeUses_; bool isIterator_; - MPhi(uint32 slot) + MPhi(uint32_t slot) : slot_(slot), triedToSpecialize_(false), hasBytecodeUses_(false), isIterator_(false) { setResultType(MIRType_Value); } protected: void setOperand(size_t index, MDefinition *operand) { inputs_[index] = operand; } public: INSTRUCTION_HEADER(Phi); - static MPhi *New(uint32 slot); + static MPhi *New(uint32_t slot); MDefinition *getOperand(size_t index) const { return inputs_[index]; } size_t numOperands() const { return inputs_.length(); } - uint32 slot() const { + uint32_t slot() const { return slot_; } bool triedToSpecialize() const { return triedToSpecialize_; } void specialize(MIRType type) { triedToSpecialize_ = true; setResultType(type); @@ -2890,17 +2890,17 @@ class MCheckOverRecursed : public MNulla }; // Check the script's use count and trigger recompilation to inline // calls when the script becomes hot. class MRecompileCheck : public MNullaryInstruction { uint32_t minUses_; - MRecompileCheck(uint32 minUses) + MRecompileCheck(uint32_t minUses) : minUses_(minUses) { setGuard(); } public: INSTRUCTION_HEADER(RecompileCheck); @@ -3392,18 +3392,18 @@ class MNot // Bailout if index + minimum < 0 or index + maximum >= length. The length used // in a bounds check must not be negative, or the wrong result may be computed // (unsigned comparisons may be used). class MBoundsCheck : public MBinaryInstruction { // Range over which to perform the bounds check, may be modified by GVN. - int32 minimum_; - int32 maximum_; + int32_t minimum_; + int32_t maximum_; MBoundsCheck(MDefinition *index, MDefinition *length) : MBinaryInstruction(index, length), minimum_(0), maximum_(0) { setGuard(); setMovable(); JS_ASSERT(index->type() == MIRType_Int32); JS_ASSERT(length->type() == MIRType_Int32); @@ -3419,26 +3419,26 @@ class MBoundsCheck return new MBoundsCheck(index, length); } MDefinition *index() const { return getOperand(0); } MDefinition *length() const { return getOperand(1); } - int32 minimum() const { + int32_t minimum() const { return minimum_; } - void setMinimum(int32 n) { + void setMinimum(int32_t n) { minimum_ = n; } - int32 maximum() const { + int32_t maximum() const { return maximum_; } - void setMaximum(int32 n) { + void setMaximum(int32_t n) { maximum_ = n; } bool congruentTo(MDefinition * const &ins) const { if (!ins->isBoundsCheck()) return false; MBoundsCheck *other = ins->toBoundsCheck(); if (minimum() != other->minimum() || maximum() != other->maximum()) return false; @@ -3448,17 +3448,17 @@ class MBoundsCheck return AliasSet::None(); } }; // Bailout if index < minimum. class MBoundsCheckLower : public MUnaryInstruction { - int32 minimum_; + int32_t minimum_; MBoundsCheckLower(MDefinition *index) : MUnaryInstruction(index), minimum_(0) { setGuard(); setMovable(); JS_ASSERT(index->type() == MIRType_Int32); } @@ -3468,20 +3468,20 @@ class MBoundsCheckLower static MBoundsCheckLower *New(MDefinition *index) { return new MBoundsCheckLower(index); } MDefinition *index() const { return getOperand(0); } - int32 minimum() const { + int32_t minimum() const { return minimum_; } - void setMinimum(int32 n) { + void setMinimum(int32_t n) { minimum_ = n; } AliasSet getAliasSet() const { return AliasSet::None(); } bool fallible(); }; @@ -4558,41 +4558,41 @@ class MGuardClass } }; // Load from vp[slot] (slots that are not inline in an object). class MLoadSlot : public MUnaryInstruction, public SingleObjectPolicy { - uint32 slot_; - - MLoadSlot(MDefinition *slots, uint32 slot) + uint32_t slot_; + + MLoadSlot(MDefinition *slots, uint32_t slot) : MUnaryInstruction(slots), slot_(slot) { setResultType(MIRType_Value); setMovable(); JS_ASSERT(slots->type() == MIRType_Slots); } public: INSTRUCTION_HEADER(LoadSlot); - static MLoadSlot *New(MDefinition *slots, uint32 slot) { + static MLoadSlot *New(MDefinition *slots, uint32_t slot) { return new MLoadSlot(slots, slot); } TypePolicy *typePolicy() { return this; } MDefinition *slots() const { return getOperand(0); } - uint32 slot() const { + uint32_t slot() const { return slot_; } bool congruentTo(MDefinition * const &ins) const { if (!ins->isLoadSlot()) return false; if (slot() != ins->toLoadSlot()->slot()) return false; return congruentIfOperandsEqual(ins); @@ -4626,49 +4626,49 @@ class MFunctionEnvironment } }; // Store to vp[slot] (slots that are not inline in an object). class MStoreSlot : public MBinaryInstruction, public SingleObjectPolicy { - uint32 slot_; + uint32_t slot_; MIRType slotType_; bool needsBarrier_; - MStoreSlot(MDefinition *slots, uint32 slot, MDefinition *value, bool barrier) + MStoreSlot(MDefinition *slots, uint32_t slot, MDefinition *value, bool barrier) : MBinaryInstruction(slots, value), slot_(slot), slotType_(MIRType_Value), needsBarrier_(barrier) { JS_ASSERT(slots->type() == MIRType_Slots); } public: INSTRUCTION_HEADER(StoreSlot); - static MStoreSlot *New(MDefinition *slots, uint32 slot, MDefinition *value) { + static MStoreSlot *New(MDefinition *slots, uint32_t slot, MDefinition *value) { return new MStoreSlot(slots, slot, value, false); } - static MStoreSlot *NewBarriered(MDefinition *slots, uint32 slot, MDefinition *value) { + static MStoreSlot *NewBarriered(MDefinition *slots, uint32_t slot, MDefinition *value) { return new MStoreSlot(slots, slot, value, true); } TypePolicy *typePolicy() { return this; } MDefinition *slots() const { return getOperand(0); } MDefinition *value() const { return getOperand(1); } - uint32 slot() const { + uint32_t slot() const { return slot_; } MIRType slotType() const { return slotType_; } void setSlotType(MIRType slotType) { JS_ASSERT(slotType != MIRType_None); slotType_ = slotType; @@ -5156,38 +5156,38 @@ class MRound return this; } }; class MIteratorStart : public MUnaryInstruction, public SingleObjectPolicy { - uint8 flags_; - - MIteratorStart(MDefinition *obj, uint8 flags) + uint8_t flags_; + + MIteratorStart(MDefinition *obj, uint8_t flags) : MUnaryInstruction(obj), flags_(flags) { setResultType(MIRType_Object); } public: INSTRUCTION_HEADER(IteratorStart); - static MIteratorStart *New(MDefinition *obj, uint8 flags) { + static MIteratorStart *New(MDefinition *obj, uint8_t flags) { return new MIteratorStart(obj, flags); } TypePolicy *typePolicy() { return this; } MDefinition *object() const { return getOperand(0); } - uint8 flags() const { + uint8_t flags() const { return flags_; } }; class MIteratorNext : public MUnaryInstruction, public SingleObjectPolicy { @@ -5669,17 +5669,17 @@ class MResumePoint : public MNode ResumeAfter, // Resume after the current instruction Outer // State before inlining. }; private: friend class MBasicBlock; MDefinition **operands_; - uint32 stackDepth_; + uint32_t stackDepth_; jsbytecode *pc_; MResumePoint *caller_; MInstruction *instruction_; Mode mode_; MResumePoint(MBasicBlock *block, jsbytecode *pc, MResumePoint *parent, Mode mode); bool init(MBasicBlock *state); void inherit(MBasicBlock *state); @@ -5702,27 +5702,27 @@ class MResumePoint : public MNode } MDefinition *getOperand(size_t index) const { JS_ASSERT(index < stackDepth_); return operands_[index]; } jsbytecode *pc() const { return pc_; } - uint32 stackDepth() const { + uint32_t stackDepth() const { return stackDepth_; } MResumePoint *caller() { return caller_; } void setCaller(MResumePoint *caller) { caller_ = caller; } - uint32 frameCount() const { - uint32 count = 1; + uint32_t frameCount() const { + uint32_t count = 1; for (MResumePoint *it = caller_; it; it = it->caller_) count++; return count; } MInstruction *instruction() { return instruction_; } void setInstruction(MInstruction *ins) {
--- a/js/src/ion/MIRGenerator.h +++ b/js/src/ion/MIRGenerator.h @@ -74,17 +74,17 @@ class MIRGenerator public: JSCompartment *compartment; protected: CompileInfo *info_; TempAllocator *temp_; JSFunction *fun_; - uint32 nslots_; + uint32_t nslots_; MIRGraph *graph_; bool error_; size_t cancelBuild_; }; } // namespace ion } // namespace js
--- a/js/src/ion/MIRGraph.cpp +++ b/js/src/ion/MIRGraph.cpp @@ -147,17 +147,17 @@ MBasicBlock::init() return true; } void MBasicBlock::copySlots(MBasicBlock *from) { JS_ASSERT(stackPosition_ == from->stackPosition_); - for (uint32 i = 0; i < stackPosition_; i++) + for (uint32_t i = 0; i < stackPosition_; i++) slots_[i] = from->slots_[i]; } bool MBasicBlock::inherit(MBasicBlock *pred) { if (pred) { stackPosition_ = pred->stackPosition_; @@ -201,17 +201,17 @@ MBasicBlock::inherit(MBasicBlock *pred) return true; } bool MBasicBlock::inheritResumePoint(MBasicBlock *pred) { // Copy slots from the resume point. stackPosition_ = entryResumePoint_->numOperands(); - for (uint32 i = 0; i < stackPosition_; i++) + for (uint32_t i = 0; i < stackPosition_; i++) slots_[i] = entryResumePoint_->getOperand(i); JS_ASSERT(info_.nslots() >= stackPosition_); JS_ASSERT(kind_ != PENDING_LOOP_HEADER); JS_ASSERT(pred != NULL); if (!predecessors_.append(pred)) return false; @@ -232,24 +232,24 @@ MBasicBlock::initEntrySlots() // Create a resume point using our initial stack state. entryResumePoint_ = MResumePoint::New(this, pc(), callerResumePoint(), MResumePoint::ResumeAt); if (!entryResumePoint_) return false; return true; } MDefinition * -MBasicBlock::getSlot(uint32 index) +MBasicBlock::getSlot(uint32_t index) { JS_ASSERT(index < stackPosition_); return slots_[index]; } void -MBasicBlock::initSlot(uint32 slot, MDefinition *ins) +MBasicBlock::initSlot(uint32_t slot, MDefinition *ins) { slots_[slot] = ins; entryResumePoint()->initOperand(slot, ins); } void MBasicBlock::shimmySlots(int discardDepth) { @@ -267,97 +267,97 @@ MBasicBlock::shimmySlots(int discardDept void MBasicBlock::linkOsrValues(MStart *start) { JS_ASSERT(start->startType() == MStart::StartType_Osr); MResumePoint *res = start->resumePoint(); - for (uint32 i = 0; i < stackDepth(); i++) { + for (uint32_t i = 0; i < stackDepth(); i++) { MDefinition *def = slots_[i]; if (i == info().scopeChainSlot()) def->toOsrScopeChain()->setResumePoint(res); else def->toOsrValue()->setResumePoint(res); } } void -MBasicBlock::setSlot(uint32 slot, MDefinition *ins) +MBasicBlock::setSlot(uint32_t slot, MDefinition *ins) { slots_[slot] = ins; } void -MBasicBlock::setVariable(uint32 index) +MBasicBlock::setVariable(uint32_t index) { JS_ASSERT(stackPosition_ > info_.firstStackSlot()); setSlot(index, slots_[stackPosition_ - 1]); } void -MBasicBlock::setArg(uint32 arg) +MBasicBlock::setArg(uint32_t arg) { setVariable(info_.argSlot(arg)); } void -MBasicBlock::setLocal(uint32 local) +MBasicBlock::setLocal(uint32_t local) { setVariable(info_.localSlot(local)); } void -MBasicBlock::setSlot(uint32 slot) +MBasicBlock::setSlot(uint32_t slot) { setVariable(slot); } void -MBasicBlock::rewriteSlot(uint32 slot, MDefinition *ins) +MBasicBlock::rewriteSlot(uint32_t slot, MDefinition *ins) { setSlot(slot, ins); } void -MBasicBlock::rewriteAtDepth(int32 depth, MDefinition *ins) +MBasicBlock::rewriteAtDepth(int32_t depth, MDefinition *ins) { JS_ASSERT(depth < 0); JS_ASSERT(stackPosition_ + depth >= info_.firstStackSlot()); rewriteSlot(stackPosition_ + depth, ins); } void MBasicBlock::push(MDefinition *ins) { JS_ASSERT(stackPosition_ < info_.nslots()); slots_[stackPosition_++] = ins; } void -MBasicBlock::pushVariable(uint32 slot) +MBasicBlock::pushVariable(uint32_t slot) { push(slots_[slot]); } void -MBasicBlock::pushArg(uint32 arg) +MBasicBlock::pushArg(uint32_t arg) { pushVariable(info_.argSlot(arg)); } void -MBasicBlock::pushLocal(uint32 local) +MBasicBlock::pushLocal(uint32_t local) { pushVariable(info_.localSlot(local)); } void -MBasicBlock::pushSlot(uint32 slot) +MBasicBlock::pushSlot(uint32_t slot) { pushVariable(slot); } MDefinition * MBasicBlock::pop() { JS_ASSERT(stackPosition_ > info_.firstStackSlot()); @@ -372,40 +372,40 @@ MBasicBlock::scopeChain() void MBasicBlock::setScopeChain(MDefinition *scopeObj) { setSlot(info().scopeChainSlot(), scopeObj); } void -MBasicBlock::pick(int32 depth) +MBasicBlock::pick(int32_t depth) { // pick take an element and move it to the top. // pick(-2): // A B C D E // A B D C E [ swapAt(-2) ] // A B D E C [ swapAt(-1) ] for (; depth < 0; depth++) swapAt(depth); } void -MBasicBlock::swapAt(int32 depth) +MBasicBlock::swapAt(int32_t depth) { - uint32 lhsDepth = stackPosition_ + depth - 1; - uint32 rhsDepth = stackPosition_ + depth; + uint32_t lhsDepth = stackPosition_ + depth - 1; + uint32_t rhsDepth = stackPosition_ + depth; MDefinition *temp = slots_[lhsDepth]; slots_[lhsDepth] = slots_[rhsDepth]; slots_[rhsDepth] = temp; } MDefinition * -MBasicBlock::peek(int32 depth) +MBasicBlock::peek(int32_t depth) { JS_ASSERT(depth < 0); JS_ASSERT(stackPosition_ + depth >= info_.firstStackSlot()); return getSlot(stackPosition_ + depth); } void MBasicBlock::discardLastIns() @@ -546,17 +546,17 @@ MBasicBlock::addPredecessor(MBasicBlock { JS_ASSERT(pred); JS_ASSERT(predecessors_.length() > 0); // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(pred->lastIns_); JS_ASSERT(pred->stackPosition_ == stackPosition_); - for (uint32 i = 0; i < stackPosition_; i++) { + for (uint32_t i = 0; i < stackPosition_; i++) { MDefinition *mine = getSlot(i); MDefinition *other = pred->getSlot(i); if (mine != other) { MPhi *phi; // If the current instruction is a phi, and it was created in this // basic block, then we have already placed this phi and should @@ -612,34 +612,34 @@ MBasicBlock::assertUsesAreNotWithin(MUse use->node()->toDefinition()->block()->id() < id()); } #endif } bool MBasicBlock::dominates(MBasicBlock *other) { - uint32 high = domIndex() + numDominated(); - uint32 low = domIndex(); + uint32_t high = domIndex() + numDominated(); + uint32_t low = domIndex(); return other->domIndex() >= low && other->domIndex() <= high; } bool MBasicBlock::setBackedge(MBasicBlock *pred) { // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(lastIns_); JS_ASSERT(pred->lastIns_); JS_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth()); // We must be a pending loop header JS_ASSERT(kind_ == PENDING_LOOP_HEADER); // Add exit definitions to each corresponding phi at the entry. - for (uint32 i = 0; i < pred->stackDepth(); i++) { + for (uint32_t i = 0; i < pred->stackDepth(); i++) { MPhi *entryDef = entryResumePoint()->getOperand(i)->toPhi(); MDefinition *exitDef = pred->slots_[i]; // Assert that we already placed phis for each slot. JS_ASSERT(entryDef->block() == this); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant @@ -729,17 +729,17 @@ MBasicBlock::inheritPhis(MBasicBlock *he } void MBasicBlock::dumpStack(FILE *fp) { #ifdef DEBUG fprintf(fp, " %-3s %-16s %-6s %-10s\n", "#", "name", "copyOf", "first/next"); fprintf(fp, "-------------------------------------------\n"); - for (uint32 i = 0; i < stackPosition_; i++) { + for (uint32_t i = 0; i < stackPosition_; i++) { fprintf(fp, " %-3d", i); fprintf(fp, " %-16p\n", (void *)slots_[i]); } #endif } MTest * MBasicBlock::immediateDominatorBranch(BranchDirection *pdirection)
--- a/js/src/ion/MIRGraph.h +++ b/js/src/ion/MIRGraph.h @@ -48,24 +48,24 @@ class MBasicBlock : public TempObject, p bool inheritResumePoint(MBasicBlock *pred); void assertUsesAreNotWithin(MUseIterator use, MUseIterator end); // Does this block do something that forces it to terminate early? bool earlyAbort_; // Sets a slot, taking care to rewrite copies. - void setSlot(uint32 slot, MDefinition *ins); + void setSlot(uint32_t slot, MDefinition *ins); // Pushes a copy of a local variable or argument. - void pushVariable(uint32 slot); + void pushVariable(uint32_t slot); // Sets a variable slot to the top of the stack, correctly creating copies // as needed. - void setVariable(uint32 slot); + void setVariable(uint32_t slot); public: /////////////////////////////////////////////////////// ////////// BEGIN GRAPH BUILDING INSTRUCTIONS ////////// /////////////////////////////////////////////////////// // Creates a new basic block for a MIR generator. If |pred| is not NULL, // its slots and stack depth are initialized from |pred|. @@ -75,68 +75,68 @@ class MBasicBlock : public TempObject, p MBasicBlock *pred, jsbytecode *entryPc, MResumePoint *resumePoint); static MBasicBlock *NewPendingLoopHeader(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, jsbytecode *entryPc); static MBasicBlock *NewSplitEdge(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred); bool dominates(MBasicBlock *other); - void setId(uint32 id) { + void setId(uint32_t id) { id_ = id; } void setEarlyAbort() { earlyAbort_ = true; } void clearEarlyAbort() { earlyAbort_ = false; } bool earlyAbort() { return earlyAbort_; } // Move the definition to the top of the stack. - void pick(int32 depth); + void pick(int32_t depth); // Exchange 2 stack slots at the defined depth - void swapAt(int32 depth); + void swapAt(int32_t depth); // Gets the instruction associated with various slot types. - MDefinition *peek(int32 depth); + MDefinition *peek(int32_t depth); MDefinition *scopeChain(); // Initializes a slot value; must not be called for normal stack // operations, as it will not create new SSA names for copies. - void initSlot(uint32 index, MDefinition *ins); + void initSlot(uint32_t index, MDefinition *ins); // Discard the slot at the given depth, lowering all slots above. void shimmySlots(int discardDepth); // In an OSR block, set all MOsrValues to use the MResumePoint attached to // the MStart. void linkOsrValues(MStart *start); // Sets the instruction associated with various slot types. The // instruction must lie at the top of the stack. - void setLocal(uint32 local); - void setArg(uint32 arg); - void setSlot(uint32 slot); + void setLocal(uint32_t local); + void setArg(uint32_t arg); + void setSlot(uint32_t slot); // Rewrites a slot directly, bypassing the stack transition. This should // not be used under most circumstances. - void rewriteSlot(uint32 slot, MDefinition *ins); + void rewriteSlot(uint32_t slot, MDefinition *ins); // Rewrites a slot based on its depth (same as argument to peek()). - void rewriteAtDepth(int32 depth, MDefinition *ins); + void rewriteAtDepth(int32_t depth, MDefinition *ins); // Tracks an instruction as being pushed onto the operand stack. void push(MDefinition *ins); - void pushArg(uint32 arg); - void pushLocal(uint32 local); - void pushSlot(uint32 slot); + void pushArg(uint32_t arg); + void pushLocal(uint32_t local); + void pushSlot(uint32_t slot); void setScopeChain(MDefinition *ins); // Returns the top of the stack, then decrements the virtual stack pointer. MDefinition *pop(); // Adds an instruction to this block's instruction list. |ins| may be NULL // to simplify OOM checking. void add(MInstruction *ins); @@ -197,31 +197,31 @@ class MBasicBlock : public TempObject, p return graph_; } CompileInfo &info() const { return info_; } jsbytecode *pc() const { return pc_; } - uint32 id() const { + uint32_t id() const { return id_; } - uint32 numPredecessors() const { + uint32_t numPredecessors() const { return predecessors_.length(); } - uint32 domIndex() const { + uint32_t domIndex() const { return domIndex_; } - void setDomIndex(uint32 d) { + void setDomIndex(uint32_t d) { domIndex_ = d; } - MBasicBlock *getPredecessor(uint32 i) const { + MBasicBlock *getPredecessor(uint32_t i) const { return predecessors_[i]; } MControlInstruction *lastIns() const { return lastIns_; } MPhiIterator phisBegin() const { return phis_.begin(); } @@ -272,20 +272,20 @@ class MBasicBlock : public TempObject, p return false; MBasicBlock *lastSuccessor = getSuccessor(numSuccessors() - 1); return lastSuccessor->isLoopHeader() && lastSuccessor->backedge() == this; } bool isSplitEdge() const { return kind_ == SPLIT_EDGE; } - uint32 stackDepth() const { + uint32_t stackDepth() const { return stackPosition_; } - void setStackDepth(uint32 depth) { + void setStackDepth(uint32_t depth) { stackPosition_ = depth; } bool isMarked() const { return mark_; } void mark() { mark_ = true; } @@ -326,17 +326,17 @@ class MBasicBlock : public TempObject, p numDominated_ += n; } bool addImmediatelyDominatedBlock(MBasicBlock *child); // This function retrieves the internal instruction associated with a // slot, and should not be used for normal stack operations. It is an // internal helper that is also used to enhance spew. - MDefinition *getSlot(uint32 index); + MDefinition *getSlot(uint32_t index); MResumePoint *entryResumePoint() const { return entryResumePoint_; } MResumePoint *callerResumePoint() { return entryResumePoint()->caller(); } void setCallerResumePoint(MResumePoint *caller) { @@ -356,39 +356,39 @@ class MBasicBlock : public TempObject, p void assignLir(LBlock *lir) { JS_ASSERT(!lir_); lir_ = lir; } MBasicBlock *successorWithPhis() const { return successorWithPhis_; } - uint32 positionInPhiSuccessor() const { + uint32_t positionInPhiSuccessor() const { return positionInPhiSuccessor_; } - void setSuccessorWithPhis(MBasicBlock *successor, uint32 id) { + void setSuccessorWithPhis(MBasicBlock *successor, uint32_t id) { successorWithPhis_ = successor; positionInPhiSuccessor_ = id; } size_t numSuccessors() const; MBasicBlock *getSuccessor(size_t index) const; // Specifies the closest loop header dominating this block. void setLoopHeader(MBasicBlock *loop) { JS_ASSERT(loop->isLoopHeader()); loopHeader_ = loop; } MBasicBlock *loopHeader() const { return loopHeader_; } - void setLoopDepth(uint32 loopDepth) { + void setLoopDepth(uint32_t loopDepth) { loopDepth_ = loopDepth; } - uint32 loopDepth() const { + uint32_t loopDepth() const { return loopDepth_; } bool strictModeCode() const { return info_.script()->strictModeCode; } void dumpStack(FILE *fp); @@ -405,28 +405,28 @@ class MBasicBlock : public TempObject, p private: MIRGraph &graph_; CompileInfo &info_; // Each block originates from a particular script. InlineList<MInstruction> instructions_; Vector<MBasicBlock *, 1, IonAllocPolicy> predecessors_; InlineForwardList<MPhi> phis_; FixedList<MDefinition *> slots_; - uint32 stackPosition_; + uint32_t stackPosition_; MControlInstruction *lastIns_; jsbytecode *pc_; - uint32 id_; - uint32 domIndex_; // Index in the dominator tree. + uint32_t id_; + uint32_t domIndex_; // Index in the dominator tree. LBlock *lir_; MStart *start_; MResumePoint *entryResumePoint_; MBasicBlock *successorWithPhis_; - uint32 positionInPhiSuccessor_; + uint32_t positionInPhiSuccessor_; Kind kind_; - uint32 loopDepth_; + uint32_t loopDepth_; // Utility mark for traversal algorithms. bool mark_; Vector<MBasicBlock *, 1, IonAllocPolicy> immediatelyDominated_; MBasicBlock *immediateDominator_; size_t numDominated_; MBasicBlock *loopHeader_; @@ -440,18 +440,18 @@ typedef InlineListReverseIterator<MBasic typedef Vector<MBasicBlock *, 1, IonAllocPolicy> MIRGraphExits; class MIRGraph { InlineList<MBasicBlock> blocks_; TempAllocator *alloc_; MIRGraphExits *exitAccumulator_; - uint32 blockIdGen_; - uint32 idGen_; + uint32_t blockIdGen_; + uint32_t idGen_; MBasicBlock *osrBlock_; MStart *osrStart_; // List of compiled/inlined scripts. Vector<JSScript *, 4, IonAllocPolicy> scripts_; #ifdef DEBUG size_t numBlocks_; @@ -540,26 +540,26 @@ class MIRGraph blocks_.remove(block); blocks_.pushBack(block); } #ifdef DEBUG size_t numBlocks() const { return numBlocks_; } #endif - uint32 numBlockIds() const { + uint32_t numBlockIds() const { return blockIdGen_; } void allocDefinitionId(MDefinition *ins) { // This intentionally starts above 0. The id 0 is in places used to // indicate a failure to perform an operation on an instruction. idGen_ += 2; ins->setId(idGen_); } - uint32 getMaxInstructionId() { + uint32_t getMaxInstructionId() { return idGen_; } MResumePoint *entryResumePoint() { return blocks_.begin()->entryResumePoint(); } void copyIds(const MIRGraph &other) { idGen_ = other.idGen_;
--- a/js/src/ion/MoveResolver.h +++ b/js/src/ion/MoveResolver.h @@ -25,32 +25,32 @@ class MoveResolver enum Kind { REG, FLOAT_REG, ADDRESS, EFFECTIVE_ADDRESS }; Kind kind_; - uint32 code_; - int32 disp_; + uint32_t code_; + int32_t disp_; public: enum AddressKind { MEMORY = ADDRESS, EFFECTIVE = EFFECTIVE_ADDRESS }; MoveOperand() { } explicit MoveOperand(const Register ®) : kind_(REG), code_(reg.code()) { } explicit MoveOperand(const FloatRegister ®) : kind_(FLOAT_REG), code_(reg.code()) { } - MoveOperand(const Register ®, int32 disp, AddressKind addrKind = MEMORY) + MoveOperand(const Register ®, int32_t disp, AddressKind addrKind = MEMORY) : kind_((Kind) addrKind), code_(reg.code()), disp_(disp) { } MoveOperand(const MoveOperand &other) : kind_(other.kind_), code_(other.code_), disp_(other.disp_) @@ -77,17 +77,17 @@ class MoveResolver FloatRegister floatReg() const { JS_ASSERT(isFloatReg()); return FloatRegister::FromCode(code_); } Register base() const { JS_ASSERT(isMemory() || isEffectiveAddress()); return Register::FromCode(code_); } - int32 disp() const { + int32_t disp() const { return disp_; } bool operator ==(const MoveOperand &other) const { if (kind_ != other.kind_) return false; if (code_ != other.code_) return false;
--- a/js/src/ion/RangeAnalysis.cpp +++ b/js/src/ion/RangeAnalysis.cpp @@ -131,17 +131,17 @@ RangeAnalysis::addBetaNobes() if (!test || !test->getOperand(0)->isCompare()) continue; MCompare *compare = test->getOperand(0)->toCompare(); MDefinition *left = compare->getOperand(0); MDefinition *right = compare->getOperand(1); - int32 bound; + int32_t bound; MDefinition *val = NULL; JSOp jsop = compare->jsop(); if (branch_dir == FALSE_BRANCH) jsop = analyze::NegateCompareOp(jsop); if (left->isConstant() && left->toConstant()->value().isInt32()) { @@ -430,30 +430,30 @@ Range::mul(const Range *lhs, const Range int64_t c = (int64_t)lhs->upper_ * (int64_t)rhs->lower_; int64_t d = (int64_t)lhs->upper_ * (int64_t)rhs->upper_; return new Range( Min( Min(a, b), Min(c, d) ), Max( Max(a, b), Max(c, d) )); } Range * -Range::shl(const Range *lhs, int32 c) +Range::shl(const Range *lhs, int32_t c) { EnsureRange(&lhs); - int32 shift = c & 0x1f; + int32_t shift = c & 0x1f; return new Range( (int64_t)lhs->lower_ << shift, (int64_t)lhs->upper_ << shift); } Range * -Range::shr(const Range *lhs, int32 c) +Range::shr(const Range *lhs, int32_t c) { EnsureRange(&lhs); - int32 shift = c & 0x1f; + int32_t shift = c & 0x1f; return new Range( (int64_t)lhs->lower_ >> shift, (int64_t)lhs->upper_ >> shift); } bool Range::precisionLossMul(const Range *lhs, const Range *rhs) { @@ -583,29 +583,29 @@ MBitAnd::computeRange() void MLsh::computeRange() { MDefinition *right = getOperand(1); if (!right->isConstant()) return; - int32 c = right->toConstant()->value().toInt32(); + int32_t c = right->toConstant()->value().toInt32(); const Range *other = getOperand(0)->range(); setRange(Range::shl(other, c)); } void MRsh::computeRange() { MDefinition *right = getOperand(1); if (!right->isConstant()) return; - int32 c = right->toConstant()->value().toInt32(); + int32_t c = right->toConstant()->value().toInt32(); Range *other = getOperand(0)->range(); setRange(Range::shr(other, c)); } void MAbs::computeRange() { if (specialization_ != MIRType_Int32) @@ -867,17 +867,17 @@ RangeAnalysis::analyzeLoopIterationCount if (rhs) { if (!bound.add(rhs, 1)) return NULL; } if (!bound.add(lhsInitial, -1)) return NULL; - int32 lhsConstant; + int32_t lhsConstant; if (!SafeSub(0, lhs.constant, &lhsConstant)) return NULL; if (!bound.add(lhsConstant)) return NULL; } else if (lhsModified.constant == -1 && lessEqual) { // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above // case, an upper bound on the number of backedges executed is: // @@ -949,17 +949,17 @@ RangeAnalysis::analyzeLoopPhi(MBasicBloc // at most loopBound - 1 times. Thus, another upper or lower bound for the // phi is initial(phi) + (loopBound - 1) * N, without requiring us to // ensure that loopBound >= 0. LinearSum limitSum(loopBound->sum); if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum)) return; - int32 negativeConstant; + int32_t negativeConstant; if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant)) return; if (modified.constant > 0) { phi->range()->setSymbolicLower(new SymbolicBound(NULL, initialSum)); phi->range()->setSymbolicUpper(new SymbolicBound(loopBound, limitSum)); } else { phi->range()->setSymbolicUpper(new SymbolicBound(NULL, initialSum)); @@ -1070,30 +1070,30 @@ RangeAnalysis::tryHoistBoundsCheck(MBasi return false; // We are checking that index + indexConstant >= 0, and know that // index >= lowerTerm + lowerConstant. Thus, check that: // // lowerTerm + lowerConstant + indexConstant >= 0 // lowerTerm >= -lowerConstant - indexConstant - int32 lowerConstant = 0; + int32_t lowerConstant = 0; if (!SafeSub(lowerConstant, index.constant, &lowerConstant)) return false; if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant)) return false; MBoundsCheckLower *lowerCheck = MBoundsCheckLower::New(lowerTerm); lowerCheck->setMinimum(lowerConstant); // We are checking that index < boundsLength, and know that // index <= upperTerm + upperConstant. Thus, check that: // // upperTerm + upperConstant < boundsLength - int32 upperConstant = index.constant; + int32_t upperConstant = index.constant; if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant)) return false; MBoundsCheck *upperCheck = MBoundsCheck::New(upperTerm, ins->length()); upperCheck->setMinimum(upperConstant); upperCheck->setMaximum(upperConstant); // Hoist the loop invariant upper and lower bounds checks. preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
--- a/js/src/ion/RangeAnalysis.h +++ b/js/src/ion/RangeAnalysis.h @@ -110,19 +110,19 @@ class Range : public TempObject { // be infinite (and could overflow), when using this information to // propagate through other ranges, we disregard this fact; if that code // executes, then the overflow did not occur, so we may safely assume // that the range is [INT_MIN, INT_MAX] instead. // // To facilitate this trick, we maintain the invariants that: // 1) lower_infinite == true implies lower_ == JSVAL_INT_MIN // 2) upper_infinite == true implies upper_ == JSVAL_INT_MAX - int32 lower_; + int32_t lower_; bool lower_infinite_; - int32 upper_; + int32_t upper_; bool upper_infinite_; // Any symbolic lower or upper bound computed for this term. const SymbolicBound *symbolicLower_; const SymbolicBound *symbolicUpper_; public: Range() @@ -174,18 +174,18 @@ class Range : public TempObject { void unionWith(const Range *other); static Range * intersect(const Range *lhs, const Range *rhs, bool *emptyRange); static Range * addTruncate(const Range *lhs, const Range *rhs); static Range * subTruncate(const Range *lhs, const Range *rhs); static Range * add(const Range *lhs, const Range *rhs); static Range * sub(const Range *lhs, const Range *rhs); static Range * mul(const Range *lhs, const Range *rhs); static Range * and_(const Range *lhs, const Range *rhs); - static Range * shl(const Range *lhs, int32 c); - static Range * shr(const Range *lhs, int32 c); + static Range * shl(const Range *lhs, int32_t c); + static Range * shr(const Range *lhs, int32_t c); static bool precisionLossMul(const Range *lhs, const Range *rhs); static bool negativeZeroMul(const Range *lhs, const Range *rhs); inline void makeLowerInfinite() { lower_infinite_ = true; lower_ = JSVAL_INT_MIN; } @@ -204,41 +204,41 @@ class Range : public TempObject { inline bool isUpperInfinite() const { return upper_infinite_; } inline bool isFinite() const { return !isLowerInfinite() && !isUpperInfinite(); } - inline int32 lower() const { + inline int32_t lower() const { return lower_; } - inline int32 upper() const { + inline int32_t upper() const { return upper_; } inline void setLower(int64_t x) { if (x > JSVAL_INT_MAX) { // c.c lower_ = JSVAL_INT_MAX; } else if (x < JSVAL_INT_MIN) { makeLowerInfinite(); } else { - lower_ = (int32)x; + lower_ = (int32_t)x; lower_infinite_ = false; } } inline void setUpper(int64_t x) { if (x > JSVAL_INT_MAX) { makeUpperInfinite(); } else if (x < JSVAL_INT_MIN) { // c.c upper_ = JSVAL_INT_MIN; } else { - upper_ = (int32)x; + upper_ = (int32_t)x; upper_infinite_ = false; } } void set(int64_t l, int64_t h) { setLower(l); setUpper(h); }
--- a/js/src/ion/RegisterAllocator.cpp +++ b/js/src/ion/RegisterAllocator.cpp @@ -38,33 +38,33 @@ AllocationIntegrityState::record() if (!blockInfo.phis.reserve(block->numPhis())) return false; for (size_t j = 0; j < block->numPhis(); j++) { blockInfo.phis.infallibleAppend(InstructionInfo()); InstructionInfo &info = blockInfo.phis[j]; LPhi *phi = block->getPhi(j); for (size_t k = 0; k < phi->numDefs(); k++) { - uint32 vreg = phi->getDef(k)->virtualRegister(); + uint32_t vreg = phi->getDef(k)->virtualRegister(); virtualRegisters[vreg] = phi->getDef(k); if (!info.outputs.append(vreg)) return false; } for (size_t k = 0; k < phi->numOperands(); k++) { if (!info.inputs.append(phi->getOperand(k)->toUse()->virtualRegister())) return false; } } for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { LInstruction *ins = *iter; InstructionInfo &info = instructions[ins->id()]; for (size_t k = 0; k < ins->numDefs(); k++) { - uint32 vreg = ins->getDef(k)->virtualRegister(); + uint32_t vreg = ins->getDef(k)->virtualRegister(); virtualRegisters[vreg] = ins->getDef(k); if (!info.outputs.append(vreg)) return false; } for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { if (!info.inputs.append(alloc->isUse() ? alloc->toUse()->virtualRegister() : UINT32_MAX)) return false; } @@ -117,17 +117,17 @@ AllocationIntegrityState::check(bool pop for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { LBlock *block = graph.getBlock(blockIndex); for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { LInstruction *ins = *iter; const InstructionInfo &info = instructions[ins->id()]; size_t inputIndex = 0; for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { - uint32 vreg = info.inputs[inputIndex++]; + uint32_t vreg = info.inputs[inputIndex++]; if (vreg == UINT32_MAX) continue; // Start checking at the previous instruction, in case this // instruction reuses its input register for an output. LInstructionReverseIterator riter = block->rbegin(ins); riter++; checkIntegrity(block, *riter, vreg, **alloc, populateSafepoints); @@ -144,17 +144,17 @@ AllocationIntegrityState::check(bool pop if (IonSpewEnabled(IonSpew_RegAlloc)) dump(); return true; } bool AllocationIntegrityState::checkIntegrity(LBlock *block, LInstruction *ins, - uint32 vreg, LAllocation alloc, bool populateSafepoints) + uint32_t vreg, LAllocation alloc, bool populateSafepoints) { for (LInstructionReverseIterator iter(block->rbegin(ins)); iter != block->rend(); iter++) { ins = *iter; // Follow values through assignments in move groups. All assignments in // a move group are considered to happen simultaneously, so stop after // the first matching move is found. if (ins->isMoveGroup()) { @@ -249,17 +249,17 @@ AllocationIntegrityState::checkIntegrity // is one which produced this vreg. We need to follow back through the phi // inputs as it is not guaranteed the register allocator filled in physical // allocations for the inputs and outputs of the phis. for (size_t i = 0; i < block->numPhis(); i++) { InstructionInfo &info = blocks[block->mir()->id()].phis[i]; LPhi *phi = block->getPhi(i); if (info.outputs[0] == vreg) { for (size_t j = 0; j < phi->numOperands(); j++) { - uint32 newvreg = info.inputs[j]; + uint32_t newvreg = info.inputs[j]; LBlock *predecessor = graph.getBlock(block->mir()->getPredecessor(j)->id()); if (!addPredecessor(predecessor, newvreg, alloc)) return false; } return true; } } @@ -270,17 +270,17 @@ AllocationIntegrityState::checkIntegrity if (!addPredecessor(predecessor, vreg, alloc)) return false; } return true; } bool -AllocationIntegrityState::addPredecessor(LBlock *block, uint32 vreg, LAllocation alloc) +AllocationIntegrityState::addPredecessor(LBlock *block, uint32_t vreg, LAllocation alloc) { // There is no need to reanalyze if we have already seen this predecessor. // We share the seen allocations across analysis of each use, as there will // likely be common ground between different uses of the same vreg. IntegrityItem item; item.block = block; item.vreg = vreg; item.alloc = alloc; @@ -363,17 +363,17 @@ AllocationIntegrityState::dump() LDefinition *def = ins->getDef(i); printf(" [def v%u ", info.outputs[i]); LAllocation::PrintAllocation(stdout, def->output()); printf("]"); } size_t index = 0; for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { - uint32 vreg = info.inputs[index++]; + uint32_t vreg = info.inputs[index++]; if (vreg == UINT32_MAX) continue; printf(" [use v%u ", vreg); LAllocation::PrintAllocation(stdout, *alloc); printf("]"); } printf("\n"); @@ -423,34 +423,34 @@ RegisterAllocator::init() insData[phi].init(phi, block); } } return true; } LMoveGroup * -RegisterAllocator::getInputMoveGroup(uint32 ins) +RegisterAllocator::getInputMoveGroup(uint32_t ins) { InstructionData *data = &insData[ins]; JS_ASSERT(!data->ins()->isPhi()); JS_ASSERT(!data->ins()->isLabel()); if (data->inputMoves()) return data->inputMoves(); LMoveGroup *moves = new LMoveGroup; data->setInputMoves(moves); data->block()->insertBefore(data->ins(), moves); return moves; } LMoveGroup * -RegisterAllocator::getMoveGroupAfter(uint32 ins) +RegisterAllocator::getMoveGroupAfter(uint32_t ins) { InstructionData *data = &insData[ins]; JS_ASSERT(!data->ins()->isPhi()); if (data->movesAfter()) return data->movesAfter(); LMoveGroup *moves = new LMoveGroup;
--- a/js/src/ion/RegisterAllocator.h +++ b/js/src/ion/RegisterAllocator.h @@ -50,18 +50,18 @@ struct AllocationIntegrityState // For all instructions and phis in the graph, keep track of the virtual // registers for all inputs and outputs of the nodes. These are overwritten // in place during register allocation. This information is kept on the // side rather than in the instructions and phis themselves to avoid // debug-builds-only bloat in the size of the involved structures. struct InstructionInfo { - Vector<uint32, 2, SystemAllocPolicy> inputs; - Vector<uint32, 1, SystemAllocPolicy> outputs; + Vector<uint32_t, 2, SystemAllocPolicy> inputs; + Vector<uint32_t, 1, SystemAllocPolicy> outputs; InstructionInfo() {} InstructionInfo(const InstructionInfo &o) { for (size_t i = 0; i < o.inputs.length(); i++) inputs.append(o.inputs[i]); for (size_t i = 0; i < o.outputs.length(); i++) outputs.append(o.outputs[i]); } }; @@ -80,21 +80,21 @@ struct AllocationIntegrityState Vector<LDefinition*, 20, SystemAllocPolicy> virtualRegisters; // Describes a correspondence that should hold at the end of a block. // The value which was written to vreg in the original LIR should be // physically stored in alloc after the register allocation. struct IntegrityItem { LBlock *block; - uint32 vreg; + uint32_t vreg; LAllocation alloc; // Order of insertion into seen, for sorting. - uint32 index; + uint32_t index; typedef IntegrityItem Lookup; static HashNumber hash(const IntegrityItem &item) { HashNumber hash = item.alloc.hash(); hash = JS_ROTATE_LEFT32(hash, 4) ^ item.vreg; hash = JS_ROTATE_LEFT32(hash, 4) ^ HashNumber(item.block->mir()->id()); return hash; } @@ -107,19 +107,19 @@ struct AllocationIntegrityState // Items still to be processed. Vector<IntegrityItem, 10, SystemAllocPolicy> worklist; // Set of all items that have already been processed. typedef HashSet<IntegrityItem, IntegrityItem, SystemAllocPolicy> IntegrityItemSet; IntegrityItemSet seen; - bool checkIntegrity(LBlock *block, LInstruction *ins, uint32 vreg, LAllocation alloc, + bool checkIntegrity(LBlock *block, LInstruction *ins, uint32_t vreg, LAllocation alloc, bool populateSafepoints); - bool addPredecessor(LBlock *block, uint32 vreg, LAllocation alloc); + bool addPredecessor(LBlock *block, uint32_t vreg, LAllocation alloc); void check(bool cond, const char *msg); void dump(); }; // Represents with better-than-instruction precision a position in the // instruction stream. // @@ -128,49 +128,49 @@ struct AllocationIntegrityState // this instruction, it can be clobbered in the output". Just having ranges // of instruction IDs is insufficiently expressive to denote all possibilities. // This class solves this issue by associating an extra bit with the instruction // ID which indicates whether the position is the input half or output half of // an instruction. class CodePosition { private: - CodePosition(const uint32 &bits) + CodePosition(const uint32_t &bits) : bits_(bits) { } static const unsigned int INSTRUCTION_SHIFT = 1; static const unsigned int SUBPOSITION_MASK = 1; - uint32 bits_; + uint32_t bits_; public: static const CodePosition MAX; static const CodePosition MIN; // This is the half of the instruction this code position represents, as // described in the huge comment above. enum SubPosition { INPUT, OUTPUT }; CodePosition() : bits_(0) { } - CodePosition(uint32 instruction, SubPosition where) { + CodePosition(uint32_t instruction, SubPosition where) { JS_ASSERT(instruction < 0x80000000u); - JS_ASSERT(((uint32)where & SUBPOSITION_MASK) == (uint32)where); - bits_ = (instruction << INSTRUCTION_SHIFT) | (uint32)where; + JS_ASSERT(((uint32_t)where & SUBPOSITION_MASK) == (uint32_t)where); + bits_ = (instruction << INSTRUCTION_SHIFT) | (uint32_t)where; } - uint32 ins() const { + uint32_t ins() const { return bits_ >> INSTRUCTION_SHIFT; } - uint32 pos() const { + uint32_t pos() const { return bits_; } SubPosition subpos() const { return (SubPosition)(bits_ & SUBPOSITION_MASK); } bool operator <(const CodePosition &other) const { @@ -241,42 +241,42 @@ class InstructionData return movesAfter_; } }; // Structure to track all moves inserted next to instructions in a graph. class InstructionDataMap { InstructionData *insData_; - uint32 numIns_; + uint32_t numIns_; public: InstructionDataMap() : insData_(NULL), numIns_(0) { } - bool init(MIRGenerator *gen, uint32 numInstructions) { + bool init(MIRGenerator *gen, uint32_t numInstructions) { insData_ = gen->allocate<InstructionData>(numInstructions); numIns_ = numInstructions; if (!insData_) return false; memset(insData_, 0, sizeof(InstructionData) * numInstructions); return true; } InstructionData &operator[](const CodePosition &pos) { JS_ASSERT(pos.ins() < numIns_); return insData_[pos.ins()]; } InstructionData &operator[](LInstruction *ins) { JS_ASSERT(ins->id() < numIns_); return insData_[ins->id()]; } - InstructionData &operator[](uint32 ins) { + InstructionData &operator[](uint32_t ins) { JS_ASSERT(ins < numIns_); return insData_[ins]; } }; // Common superclass for register allocators. class RegisterAllocator { @@ -301,31 +301,31 @@ class RegisterAllocator { if (FramePointer != InvalidReg && lir->mir()->instrumentedProfiling()) allRegisters_.take(AnyRegister(FramePointer)); } protected: bool init(); - CodePosition outputOf(uint32 pos) { + CodePosition outputOf(uint32_t pos) { return CodePosition(pos, CodePosition::OUTPUT); } CodePosition outputOf(LInstruction *ins) { return CodePosition(ins->id(), CodePosition::OUTPUT); } - CodePosition inputOf(uint32 pos) { + CodePosition inputOf(uint32_t pos) { return CodePosition(pos, CodePosition::INPUT); } CodePosition inputOf(LInstruction *ins) { return CodePosition(ins->id(), CodePosition::INPUT); } - LMoveGroup *getInputMoveGroup(uint32 ins); - LMoveGroup *getMoveGroupAfter(uint32 ins); + LMoveGroup *getInputMoveGroup(uint32_t ins); + LMoveGroup *getMoveGroupAfter(uint32_t ins); LMoveGroup *getInputMoveGroup(CodePosition pos) { return getInputMoveGroup(pos.ins()); } LMoveGroup *getMoveGroupAfter(CodePosition pos) { return getMoveGroupAfter(pos.ins()); } };
--- a/js/src/ion/RegisterSets.h +++ b/js/src/ion/RegisterSets.h @@ -10,20 +10,20 @@ #include "Registers.h" #include "TypeOracle.h" namespace js { namespace ion { struct AnyRegister { - typedef uint32 Code; + typedef uint32_t Code; - static const uint32 Total = Registers::Total + FloatRegisters::Total; - static const uint32 Invalid = UINT_MAX; + static const uint32_t Total = Registers::Total + FloatRegisters::Total; + static const uint32_t Invalid = UINT_MAX; union { Registers::Code gpr_; FloatRegisters::Code fpu_; }; bool isFloat_; AnyRegister() @@ -31,17 +31,17 @@ struct AnyRegister { explicit AnyRegister(Register gpr) { gpr_ = gpr.code(); isFloat_ = false; } explicit AnyRegister(FloatRegister fpu) { fpu_ = fpu.code(); isFloat_ = true; } - static AnyRegister FromCode(uint32 i) { + static AnyRegister FromCode(uint32_t i) { JS_ASSERT(i < Total); AnyRegister r; if (i < Registers::Total) { r.gpr_ = Register::Code(i); r.isFloat_ = false; } else { r.fpu_ = FloatRegister::Code(i - Registers::Total); r.isFloat_ = true; @@ -273,20 +273,20 @@ struct Int32Key { inline bool isConstant() const { return !isRegister_; } }; template <typename T> class TypedRegisterSet { - uint32 bits_; + uint32_t bits_; public: - explicit TypedRegisterSet(uint32 bits) + explicit TypedRegisterSet(uint32_t bits) : bits_(bits) { } TypedRegisterSet() : bits_(0) { } TypedRegisterSet(const TypedRegisterSet<T> &set) : bits_(set.bits_) { } @@ -300,17 +300,17 @@ class TypedRegisterSet static inline TypedRegisterSet Union(const TypedRegisterSet &lhs, const TypedRegisterSet &rhs) { return TypedRegisterSet(lhs.bits_ | rhs.bits_); } static inline TypedRegisterSet Not(const TypedRegisterSet &in) { return TypedRegisterSet(~in.bits_ & T::Codes::AllocatableMask); } static inline TypedRegisterSet VolatileNot(const TypedRegisterSet &in) { - const uint32 allocatableVolatile = + const uint32_t allocatableVolatile = T::Codes::AllocatableMask & T::Codes::VolatileMask; return TypedRegisterSet(~in.bits_ & allocatableVolatile); } static inline TypedRegisterSet Volatile() { return TypedRegisterSet(T::Codes::AllocatableMask & T::Codes::VolatileMask); } void intersect(TypedRegisterSet other) { bits_ &= ~other.bits_; @@ -348,24 +348,24 @@ class TypedRegisterSet JS_ASSERT(!empty()); T reg = getAny(); take(reg); return reg; } void clear() { bits_ = 0; } - uint32 bits() const { + uint32_t bits() const { return bits_; } - uint32 size() const { - uint32 sum2 = (bits_ & 0x55555555) + ((bits_ & 0xaaaaaaaa) >> 1); - uint32 sum4 = (sum2 & 0x33333333) + ((sum2 & 0xcccccccc) >> 2); - uint32 sum8 = (sum4 & 0x0f0f0f0f) + ((sum4 & 0xf0f0f0f0) >> 4); - uint32 sum16 = (sum8 & 0x00ff00ff) + ((sum8 & 0xff00ff00) >> 8); + uint32_t size() const { + uint32_t sum2 = (bits_ & 0x55555555) + ((bits_ & 0xaaaaaaaa) >> 1); + uint32_t sum4 = (sum2 & 0x33333333) + ((sum2 & 0xcccccccc) >> 2); + uint32_t sum8 = (sum4 & 0x0f0f0f0f) + ((sum4 & 0xf0f0f0f0) >> 4); + uint32_t sum16 = (sum8 & 0x00ff00ff) + ((sum8 & 0xff00ff00) >> 8); return sum16; } bool operator ==(const TypedRegisterSet<T> &other) const { return other.bits_ == bits_; } }; typedef TypedRegisterSet<Register> GeneralRegisterSet;
--- a/js/src/ion/Registers.h +++ b/js/src/ion/Registers.h @@ -28,23 +28,23 @@ namespace js { namespace ion { struct Register { typedef Registers Codes; typedef Codes::Code Code; typedef js::ion::Registers::RegisterID RegisterID; Code code_; - static Register FromCode(uint32 i) { + static Register FromCode(uint32_t i) { JS_ASSERT(i < Registers::Total); Register r = { (Registers::Code)i }; return r; } Code code() const { - JS_ASSERT((uint32)code_ < Registers::Total); + JS_ASSERT((uint32_t)code_ < Registers::Total); return code_; } const char *name() const { return Registers::GetName(code()); } bool operator ==(const Register &other) const { return code_ == other.code_; } @@ -57,23 +57,23 @@ struct Register { }; struct FloatRegister { typedef FloatRegisters Codes; typedef Codes::Code Code; Code code_; - static FloatRegister FromCode(uint32 i) { + static FloatRegister FromCode(uint32_t i) { JS_ASSERT(i < FloatRegisters::Total); FloatRegister r = { (FloatRegisters::Code)i }; return r; } Code code() const { - JS_ASSERT((uint32)code_ < FloatRegisters::Total); + JS_ASSERT((uint32_t)code_ < FloatRegisters::Total); return code_; } const char *name() const { return FloatRegisters::GetName(code()); } bool operator ==(const FloatRegister &other) const { return code_ == other.code_; }
--- a/js/src/ion/Safepoints.cpp +++ b/js/src/ion/Safepoints.cpp @@ -8,40 +8,40 @@ #include "Safepoints.h" #include "IonSpewer.h" #include "LIR.h" using namespace js; using namespace ion; bool -SafepointWriter::init(uint32 slotCount) +SafepointWriter::init(uint32_t slotCount) { frameSlots_ = BitSet::New(slotCount); if (!frameSlots_) return false; return true; } -uint32 +uint32_t SafepointWriter::startEntry() { IonSpew(IonSpew_Safepoints, "Encoding safepoint (position %d):", stream_.length()); - return uint32(stream_.length()); + return uint32_t(stream_.length()); } void -SafepointWriter::writeOsiCallPointOffset(uint32 osiCallPointOffset) +SafepointWriter::writeOsiCallPointOffset(uint32_t osiCallPointOffset) { stream_.writeUnsigned(osiCallPointOffset); } static void -WriteRegisterMask(CompactBufferWriter &stream, uint32 bits) +WriteRegisterMask(CompactBufferWriter &stream, uint32_t bits) { if (sizeof(PackedRegisterMask) == 8) stream.writeByte(bits); else stream.writeUnsigned(bits); } void @@ -75,56 +75,56 @@ SafepointWriter::writeGcRegs(LSafepoint : "any"; IonSpew(IonSpew_Safepoints, " %s reg: %s", type, (*iter).name()); } } #endif } static void -MapSlotsToBitset(BitSet *set, CompactBufferWriter &stream, uint32 nslots, uint32 *slots) +MapSlotsToBitset(BitSet *set, CompactBufferWriter &stream, uint32_t nslots, uint32_t *slots) { set->clear(); - for (uint32 i = 0; i < nslots; i++) { + for (uint32_t i = 0; i < nslots; i++) { // Slots are represented at a distance from |fp|. Since the stack grows // down, this means slots start at index 1, so we subtract 1 to pack // the bitset. set->insert(slots[i] - 1); } size_t count = set->rawLength(); - uint32 *words = set->raw(); + uint32_t *words = set->raw(); for (size_t i = 0; i < count; i++) stream.writeUnsigned(words[i]); } void SafepointWriter::writeGcSlots(LSafepoint *safepoint) { LSafepoint::SlotList &slots = safepoint->gcSlots(); #ifdef DEBUG - for (uint32 i = 0; i < slots.length(); i++) + for (uint32_t i = 0; i < slots.length(); i++) IonSpew(IonSpew_Safepoints, " gc slot: %d", slots[i]); #endif MapSlotsToBitset(frameSlots_, stream_, slots.length(), slots.begin()); } void SafepointWriter::writeValueSlots(LSafepoint *safepoint) { LSafepoint::SlotList &slots = safepoint->valueSlots(); #ifdef DEBUG - for (uint32 i = 0; i < slots.length(); i++) + for (uint32_t i = 0; i < slots.length(); i++) IonSpew(IonSpew_Safepoints, " gc value: %d", slots[i]); #endif MapSlotsToBitset(frameSlots_, stream_, slots.length(), slots.begin()); } #if defined(DEBUG) && defined(JS_NUNBOX32) static void @@ -142,42 +142,42 @@ DumpNunboxPart(const LAllocation &a) // Nunbox part encoding: // // Reg = 000 // Stack = 001 // Arg = 010 // // [vwu] nentries: -// uint16: tttp ppXX XXXY YYYY +// uint16_t: tttp ppXX XXXY YYYY // // If ttt = Reg, type is reg XXXXX // If ppp = Reg, payload is reg YYYYY // // If ttt != Reg, type is: // XXXXX if not 11111, otherwise followed by [vwu] // If ppp != Reg, payload is: // YYYYY if not 11111, otherwise followed by [vwu] // enum NunboxPartKind { Part_Reg, Part_Stack, Part_Arg }; -static const uint32 PART_KIND_BITS = 3; -static const uint32 PART_KIND_MASK = (1 << PART_KIND_BITS) - 1; -static const uint32 PART_INFO_BITS = 5; -static const uint32 PART_INFO_MASK = (1 << PART_INFO_BITS) - 1; +static const uint32_t PART_KIND_BITS = 3; +static const uint32_t PART_KIND_MASK = (1 << PART_KIND_BITS) - 1; +static const uint32_t PART_INFO_BITS = 5; +static const uint32_t PART_INFO_MASK = (1 << PART_INFO_BITS) - 1; -static const uint32 MAX_INFO_VALUE = (1 << PART_INFO_BITS) - 1; -static const uint32 TYPE_KIND_SHIFT = 16 - PART_KIND_BITS; -static const uint32 PAYLOAD_KIND_SHIFT = TYPE_KIND_SHIFT - PART_KIND_BITS; -static const uint32 TYPE_INFO_SHIFT = PAYLOAD_KIND_SHIFT - PART_INFO_BITS; -static const uint32 PAYLOAD_INFO_SHIFT = TYPE_INFO_SHIFT - PART_INFO_BITS; +static const uint32_t MAX_INFO_VALUE = (1 << PART_INFO_BITS) - 1; +static const uint32_t TYPE_KIND_SHIFT = 16 - PART_KIND_BITS; +static const uint32_t PAYLOAD_KIND_SHIFT = TYPE_KIND_SHIFT - PART_KIND_BITS; +static const uint32_t TYPE_INFO_SHIFT = PAYLOAD_KIND_SHIFT - PART_INFO_BITS; +static const uint32_t PAYLOAD_INFO_SHIFT = TYPE_INFO_SHIFT - PART_INFO_BITS; JS_STATIC_ASSERT(PAYLOAD_INFO_SHIFT == 0); static inline NunboxPartKind AllocationToPartKind(const LAllocation &a) { if (a.isRegister()) return Part_Reg; @@ -187,17 +187,17 @@ AllocationToPartKind(const LAllocation & return Part_Arg; } // gcc 4.5 doesn't actually inline CanEncodeInfoInHeader when only // using the "inline" keyword, and miscompiles the function as well // when doing block reordering with branch prediction information. // See bug 799295 comment 71. static MOZ_ALWAYS_INLINE bool -CanEncodeInfoInHeader(const LAllocation &a, uint32 *out) +CanEncodeInfoInHeader(const LAllocation &a, uint32_t *out) { if (a.isGeneralReg()) { *out = a.toGeneralReg()->reg().code(); return true; } if (a.isStackSlot()) *out = a.toStackSlot()->slot(); @@ -210,17 +210,17 @@ CanEncodeInfoInHeader(const LAllocation #ifdef JS_NUNBOX32 void SafepointWriter::writeNunboxParts(LSafepoint *safepoint) { LSafepoint::NunboxList &entries = safepoint->nunboxParts(); # ifdef DEBUG if (IonSpewEnabled(IonSpew_Safepoints)) { - for (uint32 i = 0; i < entries.length(); i++) { + for (uint32_t i = 0; i < entries.length(); i++) { IonSpewHeader(IonSpew_Safepoints); fprintf(IonSpewFile, " nunbox (type in "); DumpNunboxPart(entries[i].type); fprintf(IonSpewFile, ", payload in "); DumpNunboxPart(entries[i].payload); fprintf(IonSpewFile, ")\n"); } } @@ -229,62 +229,62 @@ SafepointWriter::writeNunboxParts(LSafep // Safepoints are permitted to have partially filled in entries for nunboxes, // provided that only the type is live and not the payload. Omit these from // the written safepoint. // // Note that partial entries typically appear when one part of a nunbox is // stored in multiple places, in which case we will end up with incomplete // information about all the places the value is stored. This will need to // be fixed when the GC is permitted to move structures. - uint32 partials = safepoint->partialNunboxes(); + uint32_t partials = safepoint->partialNunboxes(); stream_.writeUnsigned(entries.length() - partials); for (size_t i = 0; i < entries.length(); i++) { SafepointNunboxEntry &entry = entries[i]; if (entry.type.isUse() || entry.payload.isUse()) { partials--; continue; } - uint16 header = 0; + uint16_t header = 0; header |= (AllocationToPartKind(entry.type) << TYPE_KIND_SHIFT); header |= (AllocationToPartKind(entry.payload) << PAYLOAD_KIND_SHIFT); - uint32 typeVal; + uint32_t typeVal; bool typeExtra = !CanEncodeInfoInHeader(entry.type, &typeVal); if (!typeExtra) header |= (typeVal << TYPE_INFO_SHIFT); else header |= (MAX_INFO_VALUE << TYPE_INFO_SHIFT); - uint32 payloadVal; + uint32_t payloadVal; bool payloadExtra = !CanEncodeInfoInHeader(entry.payload, &payloadVal); if (!payloadExtra) header |= (payloadVal << PAYLOAD_INFO_SHIFT); else header |= (MAX_INFO_VALUE << PAYLOAD_INFO_SHIFT); - stream_.writeFixedUint16(header); + stream_.writeFixedUint16_t(header); if (typeExtra) stream_.writeUnsigned(typeVal); if (payloadExtra) stream_.writeUnsigned(payloadVal); } JS_ASSERT(partials == 0); } #endif void SafepointWriter::encode(LSafepoint *safepoint) { - uint32 safepointOffset = startEntry(); + uint32_t safepointOffset = startEntry(); JS_ASSERT(safepoint->osiCallPointOffset()); writeOsiCallPointOffset(safepoint->osiCallPointOffset()); writeGcRegs(safepoint); writeGcSlots(safepoint); writeValueSlots(safepoint); @@ -294,17 +294,17 @@ SafepointWriter::encode(LSafepoint *safe endEntry(); safepoint->setOffset(safepointOffset); } void SafepointWriter::endEntry() { - IonSpew(IonSpew_Safepoints, " -- entry ended at %d", uint32(stream_.length())); + IonSpew(IonSpew_Safepoints, " -- entry ended at %d", uint32_t(stream_.length())); } SafepointReader::SafepointReader(IonScript *script, const SafepointIndex *si) : stream_(script->safepoints() + si->safepointOffset(), script->safepoints() + script->safepointsSize()), frameSlots_(script->frameSlots()) { osiCallPointOffset_ = stream_.readUnsigned(); @@ -319,17 +319,17 @@ SafepointReader::SafepointReader(IonScri #ifdef JS_PUNBOX64 valueSpills_ = GeneralRegisterSet(stream_.readUnsigned()); #endif } advanceFromGcRegs(); } -uint32 +uint32_t SafepointReader::osiReturnPointOffset() const { return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize(); } CodeLocationLabel SafepointReader::InvalidationPatchPoint(IonScript *script, const SafepointIndex *si) { @@ -341,60 +341,60 @@ SafepointReader::InvalidationPatchPoint( void SafepointReader::advanceFromGcRegs() { currentSlotChunkNumber_ = 0; currentSlotChunk_ = stream_.readUnsigned(); } bool -SafepointReader::getSlotFromBitmap(uint32 *slot) +SafepointReader::getSlotFromBitmap(uint32_t *slot) { while (currentSlotChunk_ == 0) { currentSlotChunkNumber_++; // Are there any more chunks to read? if (currentSlotChunkNumber_ == BitSet::RawLengthForBits(frameSlots_)) return false; // Yes, read the next chunk. currentSlotChunk_ = stream_.readUnsigned(); } // The current chunk still has bits in it, so get the next bit, then mask // it out of the slot chunk. - uint32 bit; + uint32_t bit; JS_FLOOR_LOG2(bit, currentSlotChunk_); currentSlotChunk_ &= ~(1 << bit); // Return the slot, taking care to add 1 back in since it was subtracted // when added in the original bitset. - *slot = (currentSlotChunkNumber_ * sizeof(uint32) * 8) + bit + 1; + *slot = (currentSlotChunkNumber_ * sizeof(uint32_t) * 8) + bit + 1; return true; } bool -SafepointReader::getGcSlot(uint32 *slot) +SafepointReader::getGcSlot(uint32_t *slot) { if (getSlotFromBitmap(slot)) return true; advanceFromGcSlots(); return false; } void SafepointReader::advanceFromGcSlots() { // No, reset the counter. currentSlotChunkNumber_ = 0; currentSlotChunk_ = stream_.readUnsigned(); } bool -SafepointReader::getValueSlot(uint32 *slot) +SafepointReader::getValueSlot(uint32_t *slot) { if (getSlotFromBitmap(slot)) return true; advanceFromValueSlots(); return false; } void @@ -403,17 +403,17 @@ SafepointReader::advanceFromValueSlots() #ifdef JS_NUNBOX32 nunboxSlotsRemaining_ = stream_.readUnsigned(); #else nunboxSlotsRemaining_ = 0; #endif } static inline LAllocation -PartFromStream(CompactBufferReader &stream, NunboxPartKind kind, uint32 info) +PartFromStream(CompactBufferReader &stream, NunboxPartKind kind, uint32_t info) { if (kind == Part_Reg) return LGeneralReg(Register::FromCode(info)); if (info == MAX_INFO_VALUE) info = stream.readUnsigned(); if (kind == Part_Stack) @@ -424,19 +424,19 @@ PartFromStream(CompactBufferReader &stre } bool SafepointReader::getNunboxSlot(LAllocation *type, LAllocation *payload) { if (!nunboxSlotsRemaining_--) return false; - uint16_t header = stream_.readFixedUint16(); + uint16_t header = stream_.readFixedUint16_t(); NunboxPartKind typeKind = (NunboxPartKind)((header >> TYPE_KIND_SHIFT) & PART_KIND_MASK); NunboxPartKind payloadKind = (NunboxPartKind)((header >> PAYLOAD_KIND_SHIFT) & PART_KIND_MASK); - uint32 typeInfo = (header >> TYPE_INFO_SHIFT) & PART_INFO_MASK; - uint32 payloadInfo = (header >> PAYLOAD_INFO_SHIFT) & PART_INFO_MASK; + uint32_t typeInfo = (header >> TYPE_INFO_SHIFT) & PART_INFO_MASK; + uint32_t payloadInfo = (header >> PAYLOAD_INFO_SHIFT) & PART_INFO_MASK; *type = PartFromStream(stream_, typeKind, typeInfo); *payload = PartFromStream(stream_, payloadKind, payloadInfo); return true; }
--- a/js/src/ion/Safepoints.h +++ b/js/src/ion/Safepoints.h @@ -16,94 +16,94 @@ namespace js { namespace ion { struct SafepointNunboxEntry; class LAllocation; class LSafepoint; -static const uint32 INVALID_SAFEPOINT_OFFSET = uint32(-1); +static const uint32_t INVALID_SAFEPOINT_OFFSET = uint32_t(-1); class SafepointWriter { CompactBufferWriter stream_; BitSet *frameSlots_; public: - bool init(uint32 slotCount); + bool init(uint32_t slotCount); private: // A safepoint entry is written in the order these functions appear. - uint32 startEntry(); + uint32_t startEntry(); - void writeOsiCallPointOffset(uint32 osiPointOffset); + void writeOsiCallPointOffset(uint32_t osiPointOffset); void writeGcRegs(LSafepoint *safepoint); void writeGcSlots(LSafepoint *safepoint); void writeValueSlots(LSafepoint *safepoint); #ifdef JS_NUNBOX32 void writeNunboxParts(LSafepoint *safepoint); #endif void endEntry(); public: void encode(LSafepoint *safepoint); size_t size() const { return stream_.length(); } - const uint8 *buffer() const { + const uint8_t *buffer() const { return stream_.buffer(); } }; class SafepointReader { CompactBufferReader stream_; - uint32 frameSlots_; - uint32 currentSlotChunk_; - uint32 currentSlotChunkNumber_; - uint32 osiCallPointOffset_; + uint32_t frameSlots_; + uint32_t currentSlotChunk_; + uint32_t currentSlotChunkNumber_; + uint32_t osiCallPointOffset_; GeneralRegisterSet gcSpills_; GeneralRegisterSet valueSpills_; GeneralRegisterSet allSpills_; - uint32 nunboxSlotsRemaining_; + uint32_t nunboxSlotsRemaining_; private: void advanceFromGcRegs(); void advanceFromGcSlots(); void advanceFromValueSlots(); - bool getSlotFromBitmap(uint32 *slot); + bool getSlotFromBitmap(uint32_t *slot); public: SafepointReader(IonScript *script, const SafepointIndex *si); static CodeLocationLabel InvalidationPatchPoint(IonScript *script, const SafepointIndex *si); - uint32 osiCallPointOffset() const { + uint32_t osiCallPointOffset() const { return osiCallPointOffset_; } GeneralRegisterSet gcSpills() const { return gcSpills_; } GeneralRegisterSet valueSpills() const { return valueSpills_; } GeneralRegisterSet allSpills() const { return allSpills_; } - uint32 osiReturnPointOffset() const; + uint32_t osiReturnPointOffset() const; // Returns true if a slot was read, false if there are no more slots. - bool getGcSlot(uint32 *slot); + bool getGcSlot(uint32_t *slot); // Returns true if a slot was read, false if there are no more value slots. - bool getValueSlot(uint32 *slot); + bool getValueSlot(uint32_t *slot); // Returns true if a nunbox slot was read, false if there are no more // nunbox slots. bool getNunboxSlot(LAllocation *type, LAllocation *payload); }; } // namespace ion } // namespace js
--- a/js/src/ion/SnapshotReader.h +++ b/js/src/ion/SnapshotReader.h @@ -22,36 +22,36 @@ class LInstruction; // A snapshot reader reads the entries out of the compressed snapshot buffer in // a script. These entries describe the stack state of an Ion frame at a given // position in JIT code. class SnapshotReader { CompactBufferReader reader_; - uint32 pcOffset_; // Offset from script->code. - uint32 slotCount_; // Number of slots. - uint32 frameCount_; + uint32_t pcOffset_; // Offset from script->code. + uint32_t slotCount_; // Number of slots. + uint32_t frameCount_; BailoutKind bailoutKind_; - uint32 framesRead_; // Number of frame headers that have been read. - uint32 slotsRead_; // Number of slots that have been read. + uint32_t framesRead_; // Number of frame headers that have been read. + uint32_t slotsRead_; // Number of slots that have been read. bool resumeAfter_; #ifdef DEBUG // In debug mode we include the JSScript in order to make a few assertions. JSScript *script_; #endif #ifdef TRACK_SNAPSHOTS private: - uint32 pcOpcode_; - uint32 mirOpcode_; - uint32 mirId_; - uint32 lirOpcode_; - uint32 lirId_; + uint32_t pcOpcode_; + uint32_t mirOpcode_; + uint32_t mirId_; + uint32_t lirOpcode_; + uint32_t lirId_; public: void spewBailingFrom() const; #endif private: void readSnapshotHeader(); void readFrameHeader(); @@ -71,37 +71,37 @@ class SnapshotReader JS_INT32 // Int32Value(n) }; class Location { friend class SnapshotReader; Register::Code reg_; - int32 stackSlot_; + int32_t stackSlot_; static Location From(const Register ®) { Location loc; loc.reg_ = reg.code(); loc.stackSlot_ = INVALID_STACK_SLOT; return loc; } - static Location From(int32 stackSlot) { + static Location From(int32_t stackSlot) { Location loc; loc.reg_ = Register::Code(0); // Quell compiler warnings. loc.stackSlot_ = stackSlot; return loc; } public: Register reg() const { JS_ASSERT(!isStackSlot()); return Register::FromCode(reg_); } - int32 stackSlot() const { + int32_t stackSlot() const { JS_ASSERT(isStackSlot()); return stackSlot_; } bool isStackSlot() const { return stackSlot_ != INVALID_STACK_SLOT; } }; @@ -122,65 +122,65 @@ class SnapshotReader Location type; Location payload; } unknown_type_; #elif defined(JS_PUNBOX64) struct { Location value; } unknown_type_; #endif - int32 value_; + int32_t value_; }; Slot(SlotMode mode, JSValueType type, const Location &loc) : mode_(mode) { known_type_.type = type; known_type_.payload = loc; } Slot(const FloatRegister ®) : mode_(DOUBLE_REG) { fpu_ = reg.code(); } Slot(SlotMode mode) : mode_(mode) { } - Slot(SlotMode mode, uint32 index) + Slot(SlotMode mode, uint32_t index) : mode_(mode) { JS_ASSERT(mode == CONSTANT || mode == JS_INT32); value_ = index; } public: SlotMode mode() const { return mode_; } - uint32 constantIndex() const { + uint32_t constantIndex() const { JS_ASSERT(mode() == CONSTANT); return value_; } - int32 int32Value() const { + int32_t int32Value() const { JS_ASSERT(mode() == JS_INT32); return value_; } JSValueType knownType() const { JS_ASSERT(mode() == TYPED_REG || mode() == TYPED_STACK); return known_type_.type; } Register reg() const { JS_ASSERT(mode() == TYPED_REG && knownType() != JSVAL_TYPE_DOUBLE); return known_type_.payload.reg(); } FloatRegister floatReg() const { JS_ASSERT(mode() == DOUBLE_REG); return FloatRegister::FromCode(fpu_); } - int32 stackSlot() const { + int32_t stackSlot() const { JS_ASSERT(mode() == TYPED_STACK); return known_type_.payload.stackSlot(); } #if defined(JS_NUNBOX32) Location payload() const { JS_ASSERT(mode() == UNTYPED); return unknown_type_.payload; } @@ -192,22 +192,22 @@ class SnapshotReader Location value() const { JS_ASSERT(mode() == UNTYPED); return unknown_type_.value; } #endif }; public: - SnapshotReader(const uint8 *buffer, const uint8 *end); + SnapshotReader(const uint8_t *buffer, const uint8_t *end); - uint32 pcOffset() const { + uint32_t pcOffset() const { return pcOffset_; } - uint32 slots() const { + uint32_t slots() const { return slotCount_; } BailoutKind bailoutKind() const { return bailoutKind_; } bool resumeAfter() const { if (moreFrames()) return false; @@ -224,17 +224,17 @@ class SnapshotReader Value skip() { readSlot(); return UndefinedValue(); } bool moreSlots() const { return slotsRead_ < slotCount_; } - uint32 frameCount() const { + uint32_t frameCount() const { return frameCount_; } }; } } #endif // jsion_snapshots_h__
--- a/js/src/ion/SnapshotWriter.h +++ b/js/src/ion/SnapshotWriter.h @@ -19,59 +19,59 @@ namespace ion { // Collects snapshots in a contiguous buffer, which is copied into IonScript // memory after code generation. class SnapshotWriter { CompactBufferWriter writer_; // These are only used to assert sanity. - uint32 nslots_; - uint32 slotsWritten_; - uint32 nframes_; - uint32 framesWritten_; + uint32_t nslots_; + uint32_t slotsWritten_; + uint32_t nframes_; + uint32_t framesWritten_; SnapshotOffset lastStart_; - void writeSlotHeader(JSValueType type, uint32 regCode); + void writeSlotHeader(JSValueType type, uint32_t regCode); public: - SnapshotOffset startSnapshot(uint32 frameCount, BailoutKind kind, bool resumeAfter); - void startFrame(JSFunction *fun, JSScript *script, jsbytecode *pc, uint32 exprStack); + SnapshotOffset startSnapshot(uint32_t frameCount, BailoutKind kind, bool resumeAfter); + void startFrame(JSFunction *fun, JSScript *script, jsbytecode *pc, uint32_t exprStack); #ifdef TRACK_SNAPSHOTS - void trackFrame(uint32 pcOpcode, uint32 mirOpcode, uint32 mirId, - uint32 lirOpcode, uint32 lirId); + void trackFrame(uint32_t pcOpcode, uint32_t mirOpcode, uint32_t mirId, + uint32_t lirOpcode, uint32_t lirId); #endif void endFrame(); void addSlot(const FloatRegister ®); void addSlot(JSValueType type, const Register ®); - void addSlot(JSValueType type, int32 stackIndex); + void addSlot(JSValueType type, int32_t stackIndex); void addUndefinedSlot(); void addNullSlot(); - void addInt32Slot(int32 value); - void addConstantPoolSlot(uint32 index); + void addInt32Slot(int32_t value); + void addConstantPoolSlot(uint32_t index); #if defined(JS_NUNBOX32) void addSlot(const Register &type, const Register &payload); - void addSlot(const Register &type, int32 payloadStackIndex); - void addSlot(int32 typeStackIndex, const Register &payload); - void addSlot(int32 typeStackIndex, int32 payloadStackIndex); + void addSlot(const Register &type, int32_t payloadStackIndex); + void addSlot(int32_t typeStackIndex, const Register &payload); + void addSlot(int32_t typeStackIndex, int32_t payloadStackIndex); #elif defined(JS_PUNBOX64) void addSlot(const Register &value); - void addSlot(int32 valueStackSlot); + void addSlot(int32_t valueStackSlot); #endif void endSnapshot(); bool oom() const { return writer_.oom() || writer_.length() >= MAX_BUFFER_SIZE; } size_t size() const { return writer_.length(); } - const uint8 *buffer() const { + const uint8_t *buffer() const { return writer_.buffer(); } }; } } #endif // jsion_snapshot_writer_h__
--- a/js/src/ion/Snapshots.cpp +++ b/js/src/ion/Snapshots.cpp @@ -90,39 +90,39 @@ using namespace js::ion; // // PUNBOX64: // "reg" is InvalidReg1: byte is followed by a [vws] stack // offset containing a Value. // // Otherwise, "reg" is a register containing a Value. // -SnapshotReader::SnapshotReader(const uint8 *buffer, const uint8 *end) +SnapshotReader::SnapshotReader(const uint8_t *buffer, const uint8_t *end) : reader_(buffer, end), slotCount_(0), frameCount_(0), slotsRead_(0) { if (!buffer) return; IonSpew(IonSpew_Snapshots, "Creating snapshot reader"); readSnapshotHeader(); nextFrame(); } -static const uint32 BAILOUT_KIND_SHIFT = 0; -static const uint32 BAILOUT_KIND_MASK = (1 << BAILOUT_KIND_BITS) - 1; -static const uint32 BAILOUT_RESUME_SHIFT = BAILOUT_KIND_SHIFT + BAILOUT_KIND_BITS; -static const uint32 BAILOUT_FRAMECOUNT_SHIFT = BAILOUT_KIND_BITS + BAILOUT_RESUME_BITS; -static const uint32 BAILOUT_FRAMECOUNT_BITS = (8 * sizeof(uint32)) - BAILOUT_FRAMECOUNT_SHIFT; +static const uint32_t BAILOUT_KIND_SHIFT = 0; +static const uint32_t BAILOUT_KIND_MASK = (1 << BAILOUT_KIND_BITS) - 1; +static const uint32_t BAILOUT_RESUME_SHIFT = BAILOUT_KIND_SHIFT + BAILOUT_KIND_BITS; +static const uint32_t BAILOUT_FRAMECOUNT_SHIFT = BAILOUT_KIND_BITS + BAILOUT_RESUME_BITS; +static const uint32_t BAILOUT_FRAMECOUNT_BITS = (8 * sizeof(uint32_t)) - BAILOUT_FRAMECOUNT_SHIFT; void SnapshotReader::readSnapshotHeader() { - uint32 bits = reader_.readUnsigned(); + uint32_t bits = reader_.readUnsigned(); frameCount_ = bits >> BAILOUT_FRAMECOUNT_SHIFT; JS_ASSERT(frameCount_ > 0); bailoutKind_ = BailoutKind((bits >> BAILOUT_KIND_SHIFT) & BAILOUT_KIND_MASK); resumeAfter_ = !!(bits & (1 << BAILOUT_RESUME_SHIFT)); framesRead_ = 0; IonSpew(IonSpew_Snapshots, "Read snapshot header with frameCount %u, bailout kind %u (ra: %d)", frameCount_, bailoutKind_, resumeAfter_); @@ -132,17 +132,17 @@ void SnapshotReader::readFrameHeader() { JS_ASSERT(moreFrames()); JS_ASSERT(slotsRead_ == slotCount_); #ifdef DEBUG union { JSScript *script; - uint8 bytes[sizeof(JSScript *)]; + uint8_t bytes[sizeof(JSScript *)]; } u; for (size_t i = 0; i < sizeof(JSScript *); i++) u.bytes[i] = reader_.readByte(); script_ = u.script; #endif pcOffset_ = reader_.readUnsigned(); slotCount_ = reader_.readUnsigned(); @@ -172,40 +172,40 @@ SnapshotReader::spewBailingFrom() const LInstruction::printName(IonSpewFile, LInstruction::Opcode(lirOpcode_)); fprintf(IonSpewFile, " [%u]", lirId_); fprintf(IonSpewFile, "\n"); } } #endif #ifdef JS_NUNBOX32 -static const uint32 NUNBOX32_STACK_STACK = 0; -static const uint32 NUNBOX32_STACK_REG = 1; -static const uint32 NUNBOX32_REG_STACK = 2; -static const uint32 NUNBOX32_REG_REG = 3; +static const uint32_t NUNBOX32_STACK_STACK = 0; +static const uint32_t NUNBOX32_STACK_REG = 1; +static const uint32_t NUNBOX32_REG_STACK = 2; +static const uint32_t NUNBOX32_REG_REG = 3; #endif -static const uint32 MAX_TYPE_FIELD_VALUE = 7; +static const uint32_t MAX_TYPE_FIELD_VALUE = 7; -static const uint32 MAX_REG_FIELD_VALUE = 31; -static const uint32 ESC_REG_FIELD_INDEX = 31; -static const uint32 ESC_REG_FIELD_CONST = 30; -static const uint32 MIN_REG_FIELD_ESC = 30; +static const uint32_t MAX_REG_FIELD_VALUE = 31; +static const uint32_t ESC_REG_FIELD_INDEX = 31; +static const uint32_t ESC_REG_FIELD_CONST = 30; +static const uint32_t MIN_REG_FIELD_ESC = 30; SnapshotReader::Slot SnapshotReader::readSlot() { JS_ASSERT(slotsRead_ < slotCount_); IonSpew(IonSpew_Snapshots, "Reading slot %u", slotsRead_); slotsRead_++; - uint8 b = reader_.readByte(); + uint8_t b = reader_.readByte(); JSValueType type = JSValueType(b & 0x7); - uint32 code = b >> 3; + uint32_t code = b >> 3; switch (type) { case JSVAL_TYPE_DOUBLE: if (code < MIN_REG_FIELD_ESC) return Slot(FloatRegister::FromCode(code)); JS_ASSERT(code == ESC_REG_FIELD_INDEX); return Slot(TYPED_STACK, type, Location::From(reader_.readSigned())); @@ -232,17 +232,17 @@ SnapshotReader::readSlot() return Slot(CONSTANT, reader_.readUnsigned()); return Slot(CONSTANT, code); default: { JS_ASSERT(type == JSVAL_TYPE_MAGIC); if (code == ESC_REG_FIELD_CONST) { - uint8 reg2 = reader_.readUnsigned(); + uint8_t reg2 = reader_.readUnsigned(); Location loc; if (reg2 != ESC_REG_FIELD_INDEX) loc = Location::From(Register::FromCode(reg2)); else loc = Location::From(reader_.readSigned()); return Slot(TYPED_REG, type, loc); } @@ -279,74 +279,74 @@ SnapshotReader::readSlot() } } JS_NOT_REACHED("huh?"); return Slot(JS_UNDEFINED); } SnapshotOffset -SnapshotWriter::startSnapshot(uint32 frameCount, BailoutKind kind, bool resumeAfter) +SnapshotWriter::startSnapshot(uint32_t frameCount, BailoutKind kind, bool resumeAfter) { nframes_ = frameCount; framesWritten_ = 0; lastStart_ = writer_.length(); IonSpew(IonSpew_Snapshots, "starting snapshot with frameCount %u, bailout kind %u", frameCount, kind); JS_ASSERT(frameCount > 0); JS_ASSERT(frameCount < (1 << BAILOUT_FRAMECOUNT_BITS)); - JS_ASSERT(uint32(kind) < (1 << BAILOUT_KIND_BITS)); + JS_ASSERT(uint32_t(kind) < (1 << BAILOUT_KIND_BITS)); - uint32 bits = (uint32(kind) << BAILOUT_KIND_SHIFT) | + uint32_t bits = (uint32_t(kind) << BAILOUT_KIND_SHIFT) | (frameCount << BAILOUT_FRAMECOUNT_SHIFT); if (resumeAfter) bits |= (1 << BAILOUT_RESUME_SHIFT); writer_.writeUnsigned(bits); return lastStart_; } void -SnapshotWriter::startFrame(JSFunction *fun, JSScript *script, jsbytecode *pc, uint32 exprStack) +SnapshotWriter::startFrame(JSFunction *fun, JSScript *script, jsbytecode *pc, uint32_t exprStack) { JS_ASSERT(CountArgSlots(fun) < SNAPSHOT_MAX_NARGS); JS_ASSERT(exprStack < SNAPSHOT_MAX_STACK); - uint32 formalArgs = CountArgSlots(fun); + uint32_t formalArgs = CountArgSlots(fun); nslots_ = formalArgs + script->nfixed + exprStack; slotsWritten_ = 0; IonSpew(IonSpew_Snapshots, "Starting frame; formals %u, fixed %u, exprs %u", formalArgs, script->nfixed, exprStack); #ifdef DEBUG union { JSScript *script; - uint8 bytes[sizeof(JSScript *)]; + uint8_t bytes[sizeof(JSScript *)]; } u; u.script = script; for (size_t i = 0; i < sizeof(JSScript *); i++) writer_.writeByte(u.bytes[i]); #endif JS_ASSERT(script->code <= pc && pc <= script->code + script->length); - uint32 pcoff = uint32(pc - script->code); + uint32_t pcoff = uint32_t(pc - script->code); IonSpew(IonSpew_Snapshots, "Writing pc offset %u, nslots %u", pcoff, nslots_); writer_.writeUnsigned(pcoff); writer_.writeUnsigned(nslots_); } #ifdef TRACK_SNAPSHOTS void -SnapshotWriter::trackFrame(uint32 pcOpcode, uint32 mirOpcode, uint32 mirId, - uint32 lirOpcode, uint32 lirId) +SnapshotWriter::trackFrame(uint32_t pcOpcode, uint32_t mirOpcode, uint32_t mirId, + uint32_t lirOpcode, uint32_t lirId) { writer_.writeUnsigned(pcOpcode); writer_.writeUnsigned(mirOpcode); writer_.writeUnsigned(mirId); writer_.writeUnsigned(lirOpcode); writer_.writeUnsigned(lirId); } #endif @@ -356,23 +356,23 @@ SnapshotWriter::endFrame() { // Check that the last write succeeded. JS_ASSERT(nslots_ == slotsWritten_); nslots_ = slotsWritten_ = 0; framesWritten_++; } void -SnapshotWriter::writeSlotHeader(JSValueType type, uint32 regCode) +SnapshotWriter::writeSlotHeader(JSValueType type, uint32_t regCode) { - JS_ASSERT(uint32(type) <= MAX_TYPE_FIELD_VALUE); - JS_ASSERT(uint32(regCode) <= MAX_REG_FIELD_VALUE); + JS_ASSERT(uint32_t(type) <= MAX_TYPE_FIELD_VALUE); + JS_ASSERT(uint32_t(regCode) <= MAX_REG_FIELD_VALUE); JS_STATIC_ASSERT(Registers::Total < MIN_REG_FIELD_ESC); - uint8 byte = uint32(type) | (regCode << 3); + uint8_t byte = uint32_t(type) | (regCode << 3); writer_.writeByte(byte); slotsWritten_++; JS_ASSERT(slotsWritten_ <= nslots_); } void SnapshotWriter::addSlot(const FloatRegister ®) @@ -382,17 +382,17 @@ SnapshotWriter::addSlot(const FloatRegis writeSlotHeader(JSVAL_TYPE_DOUBLE, reg.code()); } static const char * ValTypeToString(JSValueType type) { switch (type) { case JSVAL_TYPE_INT32: - return "int32"; + return "int32_t"; case JSVAL_TYPE_DOUBLE: return "double"; case JSVAL_TYPE_STRING: return "string"; case JSVAL_TYPE_BOOLEAN: return "boolean"; case JSVAL_TYPE_OBJECT: return "object"; @@ -410,17 +410,17 @@ SnapshotWriter::addSlot(JSValueType type IonSpew(IonSpew_Snapshots, " slot %u: %s (%s)", slotsWritten_, ValTypeToString(type), reg.name()); JS_ASSERT(type != JSVAL_TYPE_DOUBLE); writeSlotHeader(type, reg.code()); } void -SnapshotWriter::addSlot(JSValueType type, int32 stackIndex) +SnapshotWriter::addSlot(JSValueType type, int32_t stackIndex) { IonSpew(IonSpew_Snapshots, " slot %u: %s (stack %d)", slotsWritten_, ValTypeToString(type), stackIndex); writeSlotHeader(type, ESC_REG_FIELD_INDEX); writer_.writeSigned(stackIndex); } @@ -432,39 +432,39 @@ SnapshotWriter::addSlot(const Register & slotsWritten_, type.name(), payload.name()); writeSlotHeader(JSVAL_TYPE_MAGIC, NUNBOX32_REG_REG); writer_.writeByte(type.code()); writer_.writeByte(payload.code()); } void -SnapshotWriter::addSlot(const Register &type, int32 payloadStackIndex) +SnapshotWriter::addSlot(const Register &type, int32_t payloadStackIndex) { IonSpew(IonSpew_Snapshots, " slot %u: value (t=%s, d=%d)", slotsWritten_, type.name(), payloadStackIndex); writeSlotHeader(JSVAL_TYPE_MAGIC, NUNBOX32_REG_STACK); writer_.writeByte(type.code()); writer_.writeSigned(payloadStackIndex); } void -SnapshotWriter::addSlot(int32 typeStackIndex, const Register &payload) +SnapshotWriter::addSlot(int32_t typeStackIndex, const Register &payload) { IonSpew(IonSpew_Snapshots, " slot %u: value (t=%d, d=%s)", slotsWritten_, typeStackIndex, payload.name()); writeSlotHeader(JSVAL_TYPE_MAGIC, NUNBOX32_STACK_REG); writer_.writeSigned(typeStackIndex); writer_.writeByte(payload.code()); } void -SnapshotWriter::addSlot(int32 typeStackIndex, int32 payloadStackIndex) +SnapshotWriter::addSlot(int32_t typeStackIndex, int32_t payloadStackIndex) { IonSpew(IonSpew_Snapshots, " slot %u: value (t=%d, d=%d)", slotsWritten_, typeStackIndex, payloadStackIndex); writeSlotHeader(JSVAL_TYPE_MAGIC, NUNBOX32_STACK_STACK); writer_.writeSigned(typeStackIndex); writer_.writeSigned(payloadStackIndex); } @@ -474,17 +474,17 @@ void SnapshotWriter::addSlot(const Register &value) { IonSpew(IonSpew_Snapshots, " slot %u: value (reg %s)", slotsWritten_, value.name()); writeSlotHeader(JSVAL_TYPE_MAGIC, value.code()); } void -SnapshotWriter::addSlot(int32 valueStackSlot) +SnapshotWriter::addSlot(int32_t valueStackSlot) { IonSpew(IonSpew_Snapshots, " slot %u: value (stack %d)", slotsWritten_, valueStackSlot); writeSlotHeader(JSVAL_TYPE_MAGIC, ESC_REG_FIELD_INDEX); writer_.writeSigned(valueStackSlot); } #endif @@ -510,34 +510,34 @@ SnapshotWriter::endSnapshot() JS_ASSERT(nframes_ == framesWritten_); // Place a sentinel for asserting on the other end. #ifdef DEBUG writer_.writeSigned(-1); #endif IonSpew(IonSpew_Snapshots, "ending snapshot total size: %u bytes (start %u)", - uint32(writer_.length() - lastStart_), lastStart_); + uint32_t(writer_.length() - lastStart_), lastStart_); } void -SnapshotWriter::addInt32Slot(int32 value) +SnapshotWriter::addInt32Slot(int32_t value) { - IonSpew(IonSpew_Snapshots, " slot %u: int32 %d", slotsWritten_, value); + IonSpew(IonSpew_Snapshots, " slot %u: int32_t %d", slotsWritten_, value); - if (value >= 0 && uint32(value) < MIN_REG_FIELD_ESC) { + if (value >= 0 && uint32_t(value) < MIN_REG_FIELD_ESC) { writeSlotHeader(JSVAL_TYPE_NULL, value); } else { writeSlotHeader(JSVAL_TYPE_NULL, ESC_REG_FIELD_INDEX); writer_.writeSigned(value); } } void -SnapshotWriter::addConstantPoolSlot(uint32 index) +SnapshotWriter::addConstantPoolSlot(uint32_t index) { IonSpew(IonSpew_Snapshots, " slot %u: constant pool index %u", slotsWritten_, index); if (index < MIN_REG_FIELD_ESC) { writeSlotHeader(JSVAL_TYPE_UNDEFINED, index); } else { writeSlotHeader(JSVAL_TYPE_UNDEFINED, ESC_REG_FIELD_INDEX); writer_.writeUnsigned(index);
--- a/js/src/ion/StackSlotAllocator.h +++ b/js/src/ion/StackSlotAllocator.h @@ -10,56 +10,56 @@ #include "Registers.h" namespace js { namespace ion { class StackSlotAllocator { - js::Vector<uint32, 4, SystemAllocPolicy> normalSlots; - js::Vector<uint32, 4, SystemAllocPolicy> doubleSlots; - uint32 height_; + js::Vector<uint32_t, 4, SystemAllocPolicy> normalSlots; + js::Vector<uint32_t, 4, SystemAllocPolicy> doubleSlots; + uint32_t height_; public: StackSlotAllocator() : height_(0) { } - void freeSlot(uint32 index) { + void freeSlot(uint32_t index) { normalSlots.append(index); } - void freeDoubleSlot(uint32 index) { + void freeDoubleSlot(uint32_t index) { doubleSlots.append(index); } - void freeValueSlot(uint32 index) { + void freeValueSlot(uint32_t index) { freeDoubleSlot(index); } - uint32 allocateDoubleSlot() { + uint32_t allocateDoubleSlot() { if (!doubleSlots.empty()) return doubleSlots.popCopy(); if (ComputeByteAlignment(height_, DOUBLE_STACK_ALIGNMENT)) normalSlots.append(++height_); height_ += (sizeof(double) / STACK_SLOT_SIZE); return height_; } - uint32 allocateSlot() { + uint32_t allocateSlot() { if (!normalSlots.empty()) return normalSlots.popCopy(); if (!doubleSlots.empty()) { - uint32 index = doubleSlots.popCopy(); + uint32_t index = doubleSlots.popCopy(); normalSlots.append(index - 1); return index; } return ++height_; } - uint32 allocateValueSlot() { + uint32_t allocateValueSlot() { return allocateDoubleSlot(); } - uint32 stackHeight() const { + uint32_t stackHeight() const { return height_; } }; } // namespace ion } // namespace js #endif // jsion_stack_slot_allocator_h_
--- a/js/src/ion/StupidAllocator.cpp +++ b/js/src/ion/StupidAllocator.cpp @@ -5,28 +5,28 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "StupidAllocator.h" using namespace js; using namespace js::ion; -static inline uint32 -DefaultStackSlot(uint32 vreg) +static inline uint32_t +DefaultStackSlot(uint32_t vreg) { #if JS_BITS_PER_WORD == 32 return vreg * 2 + 2; #else return vreg + 1; #endif } LAllocation * -StupidAllocator::stackLocation(uint32 vreg) +StupidAllocator::stackLocation(uint32_t vreg) { LDefinition *def = virtualRegisters[vreg]; if (def->policy() == LDefinition::PRESET && def->output()->kind() == LAllocation::ARGUMENT) return def->output(); return new LStackSlot(DefaultStackSlot(vreg), def->type() == LDefinition::DOUBLE); } @@ -66,17 +66,17 @@ StupidAllocator::init() if (def->isBogusTemp()) continue; virtualRegisters[def->virtualRegister()] = def; } } for (size_t j = 0; j < block->numPhis(); j++) { LPhi *phi = block->getPhi(j); LDefinition *def = phi->getDef(0); - uint32 vreg = def->virtualRegister(); + uint32_t vreg = def->virtualRegister(); virtualRegisters[vreg] = def; } } // Assign physical registers to the tracked allocation. { registerCount = 0; @@ -119,17 +119,17 @@ RegisterIsReserved(LInstruction *ins, An for (size_t i = 0; i < ins->numDefs(); i++) { if (AllocationRequiresRegister(ins->getDef(i)->output(), reg)) return true; } return false; } AnyRegister -StupidAllocator::ensureHasRegister(LInstruction *ins, uint32 vreg) +StupidAllocator::ensureHasRegister(LInstruction *ins, uint32_t vreg) { // Ensure that vreg is held in a register before ins. // Check if the virtual register is already held in a physical register. RegisterIndex existing = findExistingRegister(vreg); if (existing != UINT32_MAX) { if (RegisterIsReserved(ins, registers[existing].reg)) { evictRegister(ins, existing); @@ -141,17 +141,17 @@ StupidAllocator::ensureHasRegister(LInst RegisterIndex best = allocateRegister(ins, vreg); loadRegister(ins, vreg, best); return registers[best].reg; } StupidAllocator::RegisterIndex -StupidAllocator::allocateRegister(LInstruction *ins, uint32 vreg) +StupidAllocator::allocateRegister(LInstruction *ins, uint32_t vreg) { // Pick a register for vreg, evicting an existing register if necessary. // Spill code will be placed before ins, and no existing allocated input // for ins will be touched. JS_ASSERT(ins); LDefinition *def = virtualRegisters[vreg]; JS_ASSERT(def); @@ -182,44 +182,44 @@ StupidAllocator::allocateRegister(LInstr void StupidAllocator::syncRegister(LInstruction *ins, RegisterIndex index) { if (registers[index].dirty) { LMoveGroup *input = getInputMoveGroup(ins->id()); LAllocation *source = new LAllocation(registers[index].reg); - uint32 existing = registers[index].vreg; + uint32_t existing = registers[index].vreg; LAllocation *dest = stackLocation(existing); input->addAfter(source, dest); registers[index].dirty = false; } } void StupidAllocator::evictRegister(LInstruction *ins, RegisterIndex index) { syncRegister(ins, index); registers[index].set(MISSING_ALLOCATION); } void -StupidAllocator::loadRegister(LInstruction *ins, uint32 vreg, RegisterIndex index) +StupidAllocator::loadRegister(LInstruction *ins, uint32_t vreg, RegisterIndex index) { // Load a vreg from its stack location to a register. LMoveGroup *input = getInputMoveGroup(ins->id()); LAllocation *source = stackLocation(vreg); LAllocation *dest = new LAllocation(registers[index].reg); input->addAfter(source, dest); registers[index].set(vreg, ins); } StupidAllocator::RegisterIndex -StupidAllocator::findExistingRegister(uint32 vreg) +StupidAllocator::findExistingRegister(uint32_t vreg) { for (size_t i = 0; i < registerCount; i++) { if (registers[i].vreg == vreg) return i; } return UINT32_MAX; } @@ -278,23 +278,23 @@ StupidAllocator::syncForBlockEnd(LBlock for (size_t i = 0; i < registerCount; i++) syncRegister(ins, i); LMoveGroup *group = NULL; MBasicBlock *successor = block->mir()->successorWithPhis(); if (successor) { - uint32 position = block->mir()->positionInPhiSuccessor(); + uint32_t position = block->mir()->positionInPhiSuccessor(); LBlock *lirsuccessor = graph.getBlock(successor->id()); for (size_t i = 0; i < lirsuccessor->numPhis(); i++) { LPhi *phi = lirsuccessor->getPhi(i); - uint32 sourcevreg = phi->getOperand(position)->toUse()->virtualRegister(); - uint32 destvreg = phi->getDef(0)->virtualRegister(); + uint32_t sourcevreg = phi->getOperand(position)->toUse()->virtualRegister(); + uint32_t destvreg = phi->getDef(0)->virtualRegister(); if (sourcevreg == destvreg) continue; LAllocation *source = stackLocation(sourcevreg); LAllocation *dest = stackLocation(destvreg); if (!group) { @@ -323,17 +323,17 @@ StupidAllocator::allocateForInstruction( syncRegister(ins, i); } // Allocate for inputs which are required to be in registers. for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { if (!alloc->isUse()) continue; LUse *use = alloc->toUse(); - uint32 vreg = use->virtualRegister(); + uint32_t vreg = use->virtualRegister(); if (use->policy() == LUse::REGISTER) { AnyRegister reg = ensureHasRegister(ins, vreg); alloc.replace(LAllocation(reg)); } else if (use->policy() == LUse::FIXED) { AnyRegister reg = AnyRegister::FromCode(use->registerCode()); RegisterIndex index = registerIndex(reg); if (registers[index].vreg != vreg) { evictRegister(ins, index); @@ -362,17 +362,17 @@ StupidAllocator::allocateForInstruction( allocateForDefinition(ins, def); } // Allocate for remaining inputs which do not need to be in registers. for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { if (!alloc->isUse()) continue; LUse *use = alloc->toUse(); - uint32 vreg = use->virtualRegister(); + uint32_t vreg = use->virtualRegister(); JS_ASSERT(use->policy() != LUse::REGISTER && use->policy() != LUse::FIXED); RegisterIndex index = findExistingRegister(vreg); if (index == UINT32_MAX) { LAllocation *stack = stackLocation(use->virtualRegister()); alloc.replace(*stack); } else { registers[index].age = ins->id(); @@ -387,17 +387,17 @@ StupidAllocator::allocateForInstruction( registers[i].set(MISSING_ALLOCATION); } } } void StupidAllocator::allocateForDefinition(LInstruction *ins, LDefinition *def) { - uint32 vreg = def->virtualRegister(); + uint32_t vreg = def->virtualRegister(); CodePosition from; if ((def->output()->isRegister() && def->policy() == LDefinition::PRESET) || def->policy() == LDefinition::MUST_REUSE_INPUT) { // Result will be in a specific register, spill any vreg held in // that register before the instruction. RegisterIndex index =
--- a/js/src/ion/StupidAllocator.h +++ b/js/src/ion/StupidAllocator.h @@ -12,44 +12,44 @@ // Simple register allocator that only carries registers within basic blocks. namespace js { namespace ion { class StupidAllocator : public RegisterAllocator { - static const uint32 MAX_REGISTERS = Registers::Allocatable + FloatRegisters::Allocatable; - static const uint32 MISSING_ALLOCATION = UINT32_MAX; + static const uint32_t MAX_REGISTERS = Registers::Allocatable + FloatRegisters::Allocatable; + static const uint32_t MISSING_ALLOCATION = UINT32_MAX; struct AllocatedRegister { AnyRegister reg; // Virtual register this physical reg backs, or MISSING_ALLOCATION. - uint32 vreg; + uint32_t vreg; // id of the instruction which most recently used this register. - uint32 age; + uint32_t age; // Whether the physical register is not synced with the backing stack slot. bool dirty; - void set(uint32 vreg, LInstruction *ins = NULL, bool dirty = false) { + void set(uint32_t vreg, LInstruction *ins = NULL, bool dirty = false) { this->vreg = vreg; this->age = ins ? ins->id() : 0; this->dirty = dirty; } }; // Active allocation for the current code position. AllocatedRegister registers[MAX_REGISTERS]; - uint32 registerCount; + uint32_t registerCount; // Type indicating an index into registers. - typedef uint32 RegisterIndex; + typedef uint32_t RegisterIndex; // Information about each virtual register. Vector<LDefinition*, 0, SystemAllocPolicy> virtualRegisters; public: StupidAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph) : RegisterAllocator(mir, lir, graph) { @@ -59,26 +59,26 @@ class StupidAllocator : public RegisterA private: bool init(); void syncForBlockEnd(LBlock *block, LInstruction *ins); void allocateForInstruction(LInstruction *ins); void allocateForDefinition(LInstruction *ins, LDefinition *def); - LAllocation *stackLocation(uint32 vreg); + LAllocation *stackLocation(uint32_t vreg); RegisterIndex registerIndex(AnyRegister reg); - AnyRegister ensureHasRegister(LInstruction *ins, uint32 vreg); - RegisterIndex allocateRegister(LInstruction *ins, uint32 vreg); + AnyRegister ensureHasRegister(LInstruction *ins, uint32_t vreg); + RegisterIndex allocateRegister(LInstruction *ins, uint32_t vreg); void syncRegister(LInstruction *ins, RegisterIndex index); void evictRegister(LInstruction *ins, RegisterI