author | Dan Gohman <sunfish@mozilla.com> |
Sat, 10 Jan 2015 14:52:36 -0800 | |
changeset 223143 | 5f41156dbd4c15193f07ae77a88313153b5782cf |
parent 223142 | 29229def6b2e26f1e7d10016f6abe14e1182d9cb |
child 223144 | 3928ee1b0381453833c00fbe1e1b72a26143f13a |
push id | 53831 |
push user | dgohman@mozilla.com |
push date | Sat, 10 Jan 2015 22:53:25 +0000 |
treeherder | mozilla-inbound@5f41156dbd4c [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | sstangl |
bugs | 1077742 |
milestone | 37.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/js/src/jit/BacktrackingAllocator.cpp +++ b/js/src/jit/BacktrackingAllocator.cpp @@ -1003,19 +1003,19 @@ BacktrackingAllocator::resolveControlFlo MOZ_ASSERT(from); if (!moveAtExit(predecessor, from, to, def->type())) return false; } } // Resolve split intervals with moves - BitSet *live = liveIn[mSuccessor->id()]; + BitSet &live = liveIn[mSuccessor->id()]; - for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { + for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { VirtualRegister ® = vregs[*liveRegId]; for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) { LBlock *predecessor = mSuccessor->getPredecessor(j)->lir(); for (size_t k = 0; k < reg.numIntervals(); k++) { LiveInterval *to = reg.getInterval(k); if (!to->covers(entryOf(successor)))
--- a/js/src/jit/BitSet.cpp +++ b/js/src/jit/BitSet.cpp @@ -4,25 +4,16 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "jit/BitSet.h" using namespace js; using namespace js::jit; -BitSet * -BitSet::New(TempAllocator &alloc, unsigned int numBits) -{ - BitSet *result = new(alloc) BitSet(numBits); - if (!result->init(alloc)) - return nullptr; - return result; -} - bool BitSet::init(TempAllocator &alloc) { size_t sizeRequired = numWords() * sizeof(*bits_); bits_ = (uint32_t *)alloc.allocate(sizeRequired); if (!bits_) return false; @@ -40,66 +31,66 @@ BitSet::empty() const for (unsigned int i = 0, e = numWords(); i < e; i++) { if (bits[i]) return false; } return true; } void -BitSet::insertAll(const BitSet *other) +BitSet::insertAll(const BitSet &other) { MOZ_ASSERT(bits_); - MOZ_ASSERT(other->numBits_ == numBits_); - MOZ_ASSERT(other->bits_); + MOZ_ASSERT(other.numBits_ == numBits_); + MOZ_ASSERT(other.bits_); uint32_t *bits = bits_; - const uint32_t *otherBits = other->bits_; + const uint32_t *otherBits = other.bits_; for (unsigned int i = 0, e = numWords(); i < e; i++) bits[i] |= otherBits[i]; } void -BitSet::removeAll(const BitSet *other) +BitSet::removeAll(const BitSet &other) { MOZ_ASSERT(bits_); - MOZ_ASSERT(other->numBits_ == numBits_); - MOZ_ASSERT(other->bits_); + MOZ_ASSERT(other.numBits_ == numBits_); + MOZ_ASSERT(other.bits_); uint32_t *bits = bits_; - const uint32_t *otherBits = other->bits_; + const uint32_t *otherBits = other.bits_; for (unsigned int i = 0, e = numWords(); i < e; i++) bits[i] &= ~otherBits[i]; } void -BitSet::intersect(const BitSet *other) +BitSet::intersect(const BitSet &other) { MOZ_ASSERT(bits_); - MOZ_ASSERT(other->numBits_ == numBits_); - MOZ_ASSERT(other->bits_); + MOZ_ASSERT(other.numBits_ == numBits_); + MOZ_ASSERT(other.bits_); uint32_t *bits = bits_; - const uint32_t *otherBits = other->bits_; + const uint32_t *otherBits = other.bits_; for (unsigned int i = 0, e = numWords(); i < e; i++) bits[i] &= otherBits[i]; } // returns true if the intersection caused the contents of the set to change. bool -BitSet::fixedPointIntersect(const BitSet *other) +BitSet::fixedPointIntersect(const BitSet &other) { MOZ_ASSERT(bits_); - MOZ_ASSERT(other->numBits_ == numBits_); - MOZ_ASSERT(other->bits_); + MOZ_ASSERT(other.numBits_ == numBits_); + MOZ_ASSERT(other.bits_); bool changed = false; uint32_t *bits = bits_; - const uint32_t *otherBits = other->bits_; + const uint32_t *otherBits = other.bits_; for (unsigned int i = 0, e = numWords(); i < e; i++) { uint32_t old = bits[i]; bits[i] &= otherBits[i]; if (!changed && old != bits[i]) changed = true; } return changed;
--- a/js/src/jit/BitSet.h +++ b/js/src/jit/BitSet.h @@ -13,51 +13,52 @@ namespace js { namespace jit { // Provides constant time set insertion and removal, and fast linear // set operations such as intersection, difference, and union. // N.B. All set operations must be performed on sets with the same number // of bits. -class BitSet : private TempObject +class BitSet { public: static const size_t BitsPerWord = 8 * sizeof(uint32_t); static size_t RawLengthForBits(size_t bits) { return (bits + BitsPerWord - 1) / BitsPerWord; } private: - explicit BitSet(unsigned int numBits) : - bits_(nullptr), - numBits_(numBits) {} - uint32_t *bits_; const unsigned int numBits_; static inline uint32_t bitForValue(unsigned int value) { return 1l << uint32_t(value % BitsPerWord); } static inline unsigned int wordForValue(unsigned int value) { return value / BitsPerWord; } inline unsigned int numWords() const { return RawLengthForBits(numBits_); } - bool init(TempAllocator &alloc); + BitSet(const BitSet &) = delete; + void operator=(const BitSet &) = delete; public: class Iterator; - static BitSet *New(TempAllocator &alloc, unsigned int numBits); + explicit BitSet(unsigned int numBits) : + bits_(nullptr), + numBits_(numBits) {} + + bool init(TempAllocator &alloc); unsigned int getNumBits() const { return numBits_; } // O(1): Check if this set contains the given value. bool contains(unsigned int value) const { MOZ_ASSERT(bits_); @@ -73,35 +74,35 @@ class BitSet : private TempObject void insert(unsigned int value) { MOZ_ASSERT(bits_); MOZ_ASSERT(value < numBits_); bits_[wordForValue(value)] |= bitForValue(value); } // O(numBits): Insert every element of the given set into this set. - void insertAll(const BitSet *other); + void insertAll(const BitSet &other); // O(1): Remove the given value from this set. void remove(unsigned int value) { MOZ_ASSERT(bits_); MOZ_ASSERT(value < numBits_); bits_[wordForValue(value)] &= ~bitForValue(value); } // O(numBits): Remove the every element of the given set from this set. - void removeAll(const BitSet *other); + void removeAll(const BitSet &other); // O(numBits): Intersect this set with the given set. - void intersect(const BitSet *other); + void intersect(const BitSet &other); // O(numBits): Intersect this set with the given set; return whether the // intersection caused the set to change. - bool fixedPointIntersect(const BitSet *other); + bool fixedPointIntersect(const BitSet &other); // O(numBits): Does inplace complement of the set. void complement(); // O(numBits): Clear this set. void clear(); uint32_t *raw() const { @@ -154,25 +155,24 @@ class BitSet::Iterator inline bool more() const { return word_ < set_.numWords(); } inline operator bool() const { return more(); } - inline Iterator& operator++(int dummy) { + inline void operator++() { MOZ_ASSERT(more()); MOZ_ASSERT(index_ < set_.numBits_); index_++; value_ >>= 1; skipEmpty(); - return *this; } unsigned int operator *() { MOZ_ASSERT(index_ < set_.numBits_); return index_; } };
--- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -6931,17 +6931,17 @@ CodeGenerator::generate() jsbytecode *startPC = tree->script()->code(); BytecodeSite *startSite = new(gen->alloc()) BytecodeSite(tree, startPC); if (!addNativeToBytecodeEntry(startSite)) return false; if (!snapshots_.init()) return false; - if (!safepoints_.init(gen->alloc(), graph.totalSlotCount())) + if (!safepoints_.init(gen->alloc())) return false; if (!generatePrologue()) return false; // Before generating any code, we generate type checks for all parameters. // This comes before deoptTable_, because we can't use deopt tables without // creating the actual frame.
--- a/js/src/jit/LinearScan.cpp +++ b/js/src/jit/LinearScan.cpp @@ -251,19 +251,19 @@ LinearScanAllocator::resolveControlFlow( LMoveGroup *moves = successor->getEntryMoveGroup(alloc()); if (!moves->add(to->getAllocation(), vregs[to->vreg()].canonicalSpill(), def->type())) return false; } } // Resolve split intervals with moves - BitSet *live = liveIn[mSuccessor->id()]; + BitSet &live = liveIn[mSuccessor->id()]; - for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { + for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { LinearScanVirtualRegister *vreg = &vregs[*liveRegId]; LiveInterval *to = vreg->intervalFor(entryOf(successor)); MOZ_ASSERT(to); for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) { LBlock *predecessor = mSuccessor->getPredecessor(j)->lir(); LiveInterval *from = vregs[*liveRegId].intervalFor(exitOf(predecessor)); MOZ_ASSERT(from);
--- a/js/src/jit/LiveRangeAllocator.cpp +++ b/js/src/jit/LiveRangeAllocator.cpp @@ -494,17 +494,17 @@ IsInputReused(LInstruction *ins, LUse *u */ template <typename VREG, bool forLSRA> bool LiveRangeAllocator<VREG, forLSRA>::init() { if (!RegisterAllocator::init()) return false; - liveIn = mir->allocate<BitSet*>(graph.numBlockIds()); + liveIn = mir->allocate<BitSet>(graph.numBlockIds()); if (!liveIn) return false; // Initialize fixed intervals. for (size_t i = 0; i < AnyRegister::Total; i++) { AnyRegister reg = AnyRegister::FromCode(i); LiveInterval *interval = LiveInterval::New(alloc(), 0); interval->setAllocation(LAllocation(reg)); @@ -590,54 +590,54 @@ bool LiveRangeAllocator<VREG, forLSRA>::buildLivenessInfo() { JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis"); if (!init()) return false; Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList; - BitSet *loopDone = BitSet::New(alloc(), graph.numBlockIds()); - if (!loopDone) + BitSet loopDone(graph.numBlockIds()); + if (!loopDone.init(alloc())) return false; for (size_t i = graph.numBlocks(); i > 0; i--) { if (mir->shouldCancel("Build Liveness Info (main loop)")) return false; LBlock *block = graph.getBlock(i - 1); MBasicBlock *mblock = block->mir(); - BitSet *live = BitSet::New(alloc(), graph.numVirtualRegisters()); - if (!live) + BitSet &live = liveIn[mblock->id()]; + new (&live) BitSet(graph.numVirtualRegisters()); + if (!live.init(alloc())) return false; - liveIn[mblock->id()] = live; // Propagate liveIn from our successors to us for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) { MBasicBlock *successor = mblock->lastIns()->getSuccessor(i); // Skip backedges, as we fix them up at the loop header. if (mblock->id() < successor->id()) - live->insertAll(liveIn[successor->id()]); + live.insertAll(liveIn[successor->id()]); } // Add successor phis if (mblock->successorWithPhis()) { LBlock *phiSuccessor = mblock->successorWithPhis()->lir(); for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) { LPhi *phi = phiSuccessor->getPhi(j); LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor()); uint32_t reg = use->toUse()->virtualRegister(); - live->insert(reg); + live.insert(reg); } } // Variables are assumed alive for the entire block, a define shortens // the interval to the point of definition. - for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { + for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(entryOf(block), exitOf(block).next())) { return false; } } // Shorten the front end of live intervals for live variables to their @@ -710,17 +710,17 @@ LiveRangeAllocator<VREG, forLSRA>::build interval->setFrom(from); // Ensure that if there aren't any uses, there's at least // some interval for the output to go into. if (interval->numRanges() == 0) { if (!interval->addRangeAtHead(from, from.next())) return false; } - live->remove(def->virtualRegister()); + live.remove(def->virtualRegister()); } for (size_t i = 0; i < ins->numTemps(); i++) { LDefinition *temp = ins->getTemp(i); if (temp->isBogusTemp()) continue; if (forLSRA) { @@ -847,28 +847,28 @@ LiveRangeAllocator<VREG, forLSRA>::build } } LiveInterval *interval = vregs[use].getInterval(0); if (!interval->addRangeAtHead(entryOf(block), forLSRA ? to : to.next())) return false; interval->addUse(new(alloc()) UsePosition(use, to)); - live->insert(use->virtualRegister()); + live.insert(use->virtualRegister()); } } } // Phis have simultaneous assignment semantics at block begin, so at // the beginning of the block we can be sure that liveIn does not // contain any phi outputs. for (unsigned int i = 0; i < block->numPhis(); i++) { LDefinition *def = block->getPhi(i)->getDef(0); - if (live->contains(def->virtualRegister())) { - live->remove(def->virtualRegister()); + if (live.contains(def->virtualRegister())) { + live.remove(def->virtualRegister()); } else { // This is a dead phi, so add a dummy range over all phis. This // can go away if we have an earlier dead code elimination pass. CodePosition entryPos = entryOf(block); if (!vregs[def].getInterval(0)->addRangeAtHead(entryPos, entryPos.next())) return false; } } @@ -883,34 +883,34 @@ LiveRangeAllocator<VREG, forLSRA>::build while (true) { // Blocks must already have been visited to have a liveIn set. MOZ_ASSERT(loopBlock->id() >= mblock->id()); // Add an interval for this entire loop block CodePosition from = entryOf(loopBlock->lir()); CodePosition to = exitOf(loopBlock->lir()).next(); - for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { + for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) { if (!vregs[*liveRegId].getInterval(0)->addRange(from, to)) return false; } // Fix up the liveIn set to account for the new interval - liveIn[loopBlock->id()]->insertAll(live); + liveIn[loopBlock->id()].insertAll(live); // Make sure we don't visit this node again - loopDone->insert(loopBlock->id()); + loopDone.insert(loopBlock->id()); // If this is the loop header, any predecessors are either the // backedge or out of the loop, so skip any predecessors of // this block if (loopBlock != mblock) { for (size_t i = 0; i < loopBlock->numPredecessors(); i++) { MBasicBlock *pred = loopBlock->getPredecessor(i); - if (loopDone->contains(pred->id())) + if (loopDone.contains(pred->id())) continue; if (!loopWorkList.append(pred)) return false; } } // Terminate loop if out of work. if (loopWorkList.empty()) @@ -927,20 +927,20 @@ LiveRangeAllocator<VREG, forLSRA>::build // If end is reached without finding a non-OSR block, then no more work items were found. if (loopBlock == osrBlock) { MOZ_ASSERT(loopWorkList.empty()); break; } } // Clear the done set for other loops - loopDone->clear(); + loopDone.clear(); } - MOZ_ASSERT_IF(!mblock->numPredecessors(), live->empty()); + MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty()); } validateVirtualRegisters(); // If the script has an infinite loop, there may be no MReturn and therefore // no fixed intervals. Add a small range to fixedIntervalsUnion so that the // rest of the allocator can assume it has at least one range. if (fixedIntervalsUnion->numRanges() == 0) {
--- a/js/src/jit/LiveRangeAllocator.h +++ b/js/src/jit/LiveRangeAllocator.h @@ -599,17 +599,17 @@ typedef InlineList<LiveInterval>::revers // fixed uses of registers, and shifting the boundaries of live ranges by small // amounts. This exists because different allocators handle live ranges // differently; ideally, they would all treat live ranges in the same way. template <typename VREG, bool forLSRA> class LiveRangeAllocator : protected RegisterAllocator { protected: // Computed inforamtion - BitSet **liveIn; + BitSet *liveIn; VirtualRegisterMap<VREG> vregs; mozilla::Array<LiveInterval *, AnyRegister::Total> fixedIntervals; // Union of all ranges in fixedIntervals, used to quickly determine // whether an interval intersects with a fixed register. LiveInterval *fixedIntervalsUnion; // Allocation state
--- a/js/src/jit/Safepoints.cpp +++ b/js/src/jit/Safepoints.cpp @@ -12,24 +12,24 @@ #include "jit/JitSpewer.h" #include "jit/LIR.h" using namespace js; using namespace jit; using mozilla::FloorLog2; +SafepointWriter::SafepointWriter(uint32_t slotCount) + : frameSlots_(slotCount / sizeof(intptr_t)) +{ } + bool -SafepointWriter::init(TempAllocator &alloc, uint32_t slotCount) +SafepointWriter::init(TempAllocator &alloc) { - frameSlots_ = BitSet::New(alloc, slotCount / sizeof(intptr_t)); - if (!frameSlots_) - return false; - - return true; + return frameSlots_.init(alloc); } uint32_t SafepointWriter::startEntry() { JitSpew(JitSpew_Safepoints, "Encoding safepoint (position %d):", stream_.length()); return uint32_t(stream_.length()); } @@ -124,32 +124,32 @@ SafepointWriter::writeGcRegs(LSafepoint } for (FloatRegisterForwardIterator iter(spilledFloat); iter.more(); iter++) JitSpew(JitSpew_Safepoints, " float reg: %s", (*iter).name()); } #endif } static void -MapSlotsToBitset(BitSet *set, CompactBufferWriter &stream, uint32_t nslots, uint32_t *slots) +MapSlotsToBitset(BitSet &set, CompactBufferWriter &stream, uint32_t nslots, uint32_t *slots) { - set->clear(); + set.clear(); for (uint32_t i = 0; i < nslots; i++) { // Slots are represented at a distance from |fp|. We divide by the // pointer size, since we only care about pointer-sized/aligned slots // here. Since the stack grows down, this means slots start at index 1, // so we subtract 1 to pack the bitset. MOZ_ASSERT(slots[i] % sizeof(intptr_t) == 0); MOZ_ASSERT(slots[i] / sizeof(intptr_t) > 0); - set->insert(slots[i] / sizeof(intptr_t) - 1); + set.insert(slots[i] / sizeof(intptr_t) - 1); } - size_t count = set->rawLength(); - const uint32_t *words = set->raw(); + size_t count = set.rawLength(); + const uint32_t *words = set.raw(); for (size_t i = 0; i < count; i++) stream.writeUnsigned(words[i]); } void SafepointWriter::writeGcSlots(LSafepoint *safepoint) { LSafepoint::SlotList &slots = safepoint->gcSlots();
--- a/js/src/jit/Safepoints.h +++ b/js/src/jit/Safepoints.h @@ -2,36 +2,37 @@ * vim: set ts=8 sts=4 et sw=4 tw=99: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef jit_Safepoints_h #define jit_Safepoints_h +#include "jit/BitSet.h" #include "jit/CompactBuffer.h" #include "jit/shared/Assembler-shared.h" namespace js { namespace jit { -class BitSet; struct SafepointNunboxEntry; class LAllocation; class LSafepoint; static const uint32_t INVALID_SAFEPOINT_OFFSET = uint32_t(-1); class SafepointWriter { CompactBufferWriter stream_; - BitSet *frameSlots_; + BitSet frameSlots_; public: - bool init(TempAllocator &alloc, uint32_t slotCount); + explicit SafepointWriter(uint32_t slotCount); + bool init(TempAllocator &alloc); private: // A safepoint entry is written in the order these functions appear. uint32_t startEntry(); void writeOsiCallPointOffset(uint32_t osiPointOffset); void writeGcRegs(LSafepoint *safepoint); void writeGcSlots(LSafepoint *safepoint);
--- a/js/src/jit/shared/CodeGenerator-shared.cpp +++ b/js/src/jit/shared/CodeGenerator-shared.cpp @@ -47,16 +47,17 @@ CodeGeneratorShared::CodeGeneratorShared current(nullptr), snapshots_(), recovers_(), deoptTable_(nullptr), #ifdef DEBUG pushedArgs_(0), #endif lastOsiPointOffset_(0), + safepoints_(graph->totalSlotCount()), nativeToBytecodeMap_(nullptr), nativeToBytecodeMapSize_(0), nativeToBytecodeTableOffset_(0), nativeToBytecodeNumRegions_(0), nativeToBytecodeScriptList_(nullptr), nativeToBytecodeScriptListLength_(0), sps_(&GetJitContext()->runtime->spsProfiler(), &lastNotInlinedPC_), osrEntryOffset_(0),