author | Sean Stangl <sstangl@mozilla.com> |
Fri, 08 May 2015 11:55:34 -0700 | |
changeset 243426 | 913091cadf6378dc6d57d2df202aa643e38abf14 |
parent 243425 | 86efbb9287a4cdb68908aa9322ef45008394d349 |
child 243427 | ef3e09a6a0c3d0c8c77625d27b27d47831115f59 |
push id | 28738 |
push user | cbook@mozilla.com |
push date | Tue, 12 May 2015 14:11:31 +0000 |
treeherder | mozilla-central@bedce1b405a3 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | dougc |
bugs | 1163168 |
milestone | 40.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
js/src/jit/shared/IonAssemblerBuffer.h | file | annotate | diff | comparison | revisions | |
js/src/jit/shared/IonAssemblerBufferWithConstantPools.h | file | annotate | diff | comparison | revisions |
--- a/js/src/jit/shared/IonAssemblerBuffer.h +++ b/js/src/jit/shared/IonAssemblerBuffer.h @@ -2,291 +2,351 @@ * vim: set ts=8 sts=4 et sw=4 tw=99: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef jit_shared_IonAssemblerBuffer_h #define jit_shared_IonAssemblerBuffer_h -// needed for the definition of Label :( +#include "mozilla/Assertions.h" + #include "jit/shared/Assembler-shared.h" namespace js { namespace jit { -// This should theoretically reside inside of AssemblerBuffer, but that won't be -// nice AssemblerBuffer is templated, BufferOffset would be indirectly. -// -// A BufferOffset is the offset into a buffer, expressed in bytes of -// instructions. - +// The offset into a buffer, in bytes. class BufferOffset { int offset; + public: friend BufferOffset nextOffset(); - explicit BufferOffset(int offset_) : offset(offset_) {} - // Return the offset as a raw integer. + + BufferOffset() + : offset(INT_MIN) + { } + + explicit BufferOffset(int offset_) + : offset(offset_) + { } + + explicit BufferOffset(Label* l) + : offset(l->offset()) + { } + + explicit BufferOffset(RepatchLabel* l) + : offset(l->offset()) + { } + int getOffset() const { return offset; } + bool assigned() const { return offset != INT_MIN; } // A BOffImm is a Branch Offset Immediate. It is an architecture-specific // structure that holds the immediate for a pc relative branch. diffB takes // the label for the destination of the branch, and encodes the immediate // for the branch. This will need to be fixed up later, since A pool may be // inserted between the branch and its destination. template <class BOffImm> BOffImm diffB(BufferOffset other) const { return BOffImm(offset - other.offset); } template <class BOffImm> BOffImm diffB(Label* other) const { MOZ_ASSERT(other->bound()); return BOffImm(offset - other->offset()); } - - explicit BufferOffset(Label* l) : offset(l->offset()) { - } - explicit BufferOffset(RepatchLabel* l) : offset(l->offset()) { - } - - BufferOffset() : offset(INT_MIN) {} - bool assigned() const { return offset != INT_MIN; } }; template<int SliceSize> -struct BufferSlice { +class BufferSlice +{ protected: BufferSlice<SliceSize>* prev_; BufferSlice<SliceSize>* next_; - // How much data has been added to the current node. - uint32_t nodeSize_; + + size_t bytelength_; + + public: + mozilla::Array<uint8_t, SliceSize> instructions; + public: + explicit BufferSlice() + : prev_(nullptr), next_(nullptr), bytelength_(0) + { } + + size_t length() const { return bytelength_; } + static inline size_t Capacity() { return SliceSize; } + BufferSlice* getNext() const { return next_; } BufferSlice* getPrev() const { return prev_; } + void setNext(BufferSlice<SliceSize>* next) { MOZ_ASSERT(next_ == nullptr); MOZ_ASSERT(next->prev_ == nullptr); next_ = next; next->prev_ = this; } - mozilla::Array<uint8_t, SliceSize> instructions; - size_t size() const { - return nodeSize_; - } - explicit BufferSlice() : prev_(nullptr), next_(nullptr), nodeSize_(0) {} - void putBlob(uint32_t instSize, uint8_t* inst) { - if (inst != nullptr) - memcpy(&instructions[size()], inst, instSize); - nodeSize_ += instSize; + void putBytes(size_t numBytes, const uint8_t* source) { + MOZ_ASSERT(bytelength_ + numBytes <= SliceSize); + if (source) + memcpy(&instructions[length()], source, numBytes); + bytelength_ += numBytes; } }; template<int SliceSize, class Inst> -struct AssemblerBuffer +class AssemblerBuffer { - public: - explicit AssemblerBuffer() : head(nullptr), tail(nullptr), m_oom(false), - m_bail(false), bufferSize(0), lifoAlloc_(8192) {} - protected: typedef BufferSlice<SliceSize> Slice; typedef AssemblerBuffer<SliceSize, Inst> AssemblerBuffer_; + + protected: + // Doubly-linked list of BufferSlices, with the most recent in tail position. Slice* head; Slice* tail; + public: bool m_oom; bool m_bail; - // How much data has been added to the buffer thus far. + + // How many bytes has been committed to the buffer thus far. + // Does not include tail. uint32_t bufferSize; uint32_t lastInstSize; + + // Finger for speeding up accesses. + Slice* finger; + int finger_offset; + + LifoAlloc lifoAlloc_; + + public: + explicit AssemblerBuffer() + : head(nullptr), + tail(nullptr), + m_oom(false), + m_bail(false), + bufferSize(0), + lastInstSize(0), + finger(nullptr), + finger_offset(0), + lifoAlloc_(8192) + { } + + public: bool isAligned(int alignment) const { - // Make sure the requested alignment is a power of two. MOZ_ASSERT(IsPowerOfTwo(alignment)); return !(size() & (alignment - 1)); } + virtual Slice* newSlice(LifoAlloc& a) { Slice* tmp = static_cast<Slice*>(a.alloc(sizeof(Slice))); if (!tmp) { - m_oom = true; + fail_oom(); return nullptr; } - new (tmp) Slice; - return tmp; + return new (tmp) Slice; } + bool ensureSpace(int size) { - if (tail != nullptr && tail->size() + size <= SliceSize) + // Space can exist in the most recent Slice. + if (tail && tail->length() + size <= tail->Capacity()) return true; - Slice* tmp = newSlice(lifoAlloc_); - if (tmp == nullptr) - return false; - if (tail != nullptr) { - bufferSize += tail->size(); - tail->setNext(tmp); + + // Otherwise, a new Slice must be added. + Slice* slice = newSlice(lifoAlloc_); + if (slice == nullptr) + return fail_oom(); + + // If this is the first Slice in the buffer, add to head position. + if (!head) { + head = slice; + finger = slice; + finger_offset = 0; } - tail = tmp; - if (head == nullptr) { - finger = tmp; - finger_offset = 0; - head = tmp; + + // Finish the last Slice and add the new Slice to the linked list. + if (tail) { + bufferSize += tail->length(); + tail->setNext(slice); } + tail = slice; + return true; } BufferOffset putByte(uint8_t value) { - return putBlob(sizeof(value), (uint8_t*)&value); + return putBytes(sizeof(value), (uint8_t*)&value); } BufferOffset putShort(uint16_t value) { - return putBlob(sizeof(value), (uint8_t*)&value); + return putBytes(sizeof(value), (uint8_t*)&value); } BufferOffset putInt(uint32_t value) { - return putBlob(sizeof(value), (uint8_t*)&value); + return putBytes(sizeof(value), (uint8_t*)&value); } - BufferOffset putBlob(uint32_t instSize, uint8_t* inst) { + BufferOffset putBytes(uint32_t instSize, uint8_t* inst) { if (!ensureSpace(instSize)) return BufferOffset(); + BufferOffset ret = nextOffset(); - tail->putBlob(instSize, inst); + tail->putBytes(instSize, inst); return ret; } + unsigned int size() const { - int executableSize; - if (tail != nullptr) - executableSize = bufferSize + tail->size(); - else - executableSize = bufferSize; - return executableSize; + if (tail) + return bufferSize + tail->length(); + return bufferSize; } - bool oom() const { - return m_oom || m_bail; + + bool oom() const { return m_oom || m_bail; } + bool bail() const { return m_bail; } + + bool fail_oom() { + m_oom = true; + return false; + } + bool fail_bail() { + m_bail = true; + return false; + } + void update_finger(Slice* finger_, int fingerOffset_) { + finger = finger_; + finger_offset = fingerOffset_; } - bool bail() const { - return m_bail; - } - void fail_oom() { - m_oom = true; + + private: + static const unsigned SliceDistanceRequiringFingerUpdate = 3; + + Inst* getInstForwards(BufferOffset off, Slice* start, int startOffset, bool updateFinger = false) { + const int offset = off.getOffset(); + + int cursor = startOffset; + unsigned slicesSkipped = 0; + + MOZ_ASSERT(offset >= cursor); + + for (Slice *slice = start; slice != nullptr; slice = slice->getNext()) { + const int slicelen = slice->length(); + + // Is the offset within the bounds of this slice? + if (offset < cursor + slicelen) { + if (updateFinger || slicesSkipped >= SliceDistanceRequiringFingerUpdate) + update_finger(slice, cursor); + + MOZ_ASSERT(offset - cursor < (int)slice->length()); + return (Inst*)&slice->instructions[offset - cursor]; + } + + cursor += slicelen; + slicesSkipped++; + } + + MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid instruction cursor."); } - void fail_bail() { - m_bail = true; + + Inst* getInstBackwards(BufferOffset off, Slice* start, int startOffset, bool updateFinger = false) { + const int offset = off.getOffset(); + + int cursor = startOffset; // First (lowest) offset in the start Slice. + unsigned slicesSkipped = 0; + + MOZ_ASSERT(offset < int(cursor + start->length())); + + for (Slice* slice = start; slice != nullptr; ) { + // Is the offset within the bounds of this slice? + if (offset >= cursor) { + if (updateFinger || slicesSkipped >= SliceDistanceRequiringFingerUpdate) + update_finger(slice, cursor); + + MOZ_ASSERT(offset - cursor < (int)slice->length()); + return (Inst*)&slice->instructions[offset - cursor]; + } + + // Move the cursor to the start of the previous slice. + Slice* prev = slice->getPrev(); + cursor -= prev->length(); + + slice = prev; + slicesSkipped++; + } + + MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid instruction cursor."); } - // Finger for speeding up accesses. - Slice* finger; - unsigned int finger_offset; + + public: Inst* getInst(BufferOffset off) { - int local_off = off.getOffset(); - // Don't update the structure's finger in place, so there is the option - // to not update it. - Slice* cur = nullptr; - int cur_off; - // Get the offset that we'd be dealing with by walking through - // backwards. - int end_off = bufferSize - local_off; - // If end_off is negative, then it is in the last chunk, and there is no - // real work to be done. - if (end_off <= 0) - return (Inst*)&tail->instructions[-end_off]; - bool used_finger = false; - int finger_off = abs((int)(local_off - finger_offset)); - if (finger_off < Min(local_off, end_off)) { - // The finger offset is minimal, use the finger. - cur = finger; - cur_off = finger_offset; - used_finger = true; - } else if (local_off < end_off) { - // It is closest to the start. - cur = head; - cur_off = 0; - } else { - // It is closest to the end. - cur = tail; - cur_off = bufferSize; + const int offset = off.getOffset(); + + // Is the instruction in the last slice? + if (offset >= int(bufferSize)) + return (Inst*)&tail->instructions[offset - bufferSize]; + + // How close is this offset to the previous one we looked up? + // If it is sufficiently far from the start and end of the buffer, + // use the finger to start midway through the list. + int finger_dist = abs(offset - finger_offset); + if (finger_dist < Min(offset, int(bufferSize - offset))) { + if (finger_offset < offset) + return getInstForwards(off, finger, finger_offset, true); + return getInstBackwards(off, finger, finger_offset, true); } - int count = 0; - if (local_off < cur_off) { - for (; cur != nullptr; cur = cur->getPrev(), cur_off -= cur->size()) { - if (local_off >= cur_off) { - local_off -= cur_off; - break; - } - count++; - } - MOZ_ASSERT(cur != nullptr); - } else { - for (; cur != nullptr; cur = cur->getNext()) { - int cur_size = cur->size(); - if (local_off < cur_off + cur_size) { - local_off -= cur_off; - break; - } - cur_off += cur_size; - count++; - } - MOZ_ASSERT(cur != nullptr); - } - if (count > 2 || used_finger) { - finger = cur; - finger_offset = cur_off; - } - // The offset within this node should not be larger than the node - // itself. - MOZ_ASSERT(local_off < (int)cur->size()); - return (Inst*)&cur->instructions[local_off]; + + // Is the instruction closer to the start or to the end? + if (offset < int(bufferSize - offset)) + return getInstForwards(off, head, 0); + + // The last slice was already checked above, so start at the second-to-last. + Slice* prev = tail->getPrev(); + return getInstBackwards(off, prev, bufferSize - prev->length()); } + BufferOffset nextOffset() const { - if (tail != nullptr) - return BufferOffset(bufferSize + tail->size()); - else - return BufferOffset(bufferSize); - } - BufferOffset prevOffset() const { - MOZ_CRASH("Don't current record lastInstSize"); + if (tail) + return BufferOffset(bufferSize + tail->length()); + return BufferOffset(bufferSize); } // Break the instruction stream so we can go back and edit it at this point void perforate() { - Slice* tmp = newSlice(lifoAlloc_); - if (!tmp) { - m_oom = true; + Slice* slice = newSlice(lifoAlloc_); + if (!slice) { + fail_oom(); return; } - bufferSize += tail->size(); - tail->setNext(tmp); - tail = tmp; + + bufferSize += tail->length(); + tail->setNext(slice); + tail = slice; } - void executableCopy(uint8_t* dest_) { - if (this->oom()) - return; - - for (Slice* cur = head; cur != nullptr; cur = cur->getNext()) { - memcpy(dest_, &cur->instructions, cur->size()); - dest_ += cur->size(); - } - } - - class AssemblerBufferInstIterator { - private: + class AssemblerBufferInstIterator + { BufferOffset bo; AssemblerBuffer_* m_buffer; + public: - explicit AssemblerBufferInstIterator(BufferOffset off, AssemblerBuffer_* buff) - : bo(off), m_buffer(buff) - { - } + explicit AssemblerBufferInstIterator(BufferOffset off, AssemblerBuffer_* buffer) + : bo(off), m_buffer(buffer) + { } + Inst* next() { Inst* i = m_buffer->getInst(bo); bo = BufferOffset(bo.getOffset() + i->size()); return cur(); } + Inst* cur() { return m_buffer->getInst(bo); } }; - public: - LifoAlloc lifoAlloc_; }; -} // ion -} // js -#endif /* jit_shared_IonAssemblerBuffer_h */ +} // namespace ion +} // namespace js + +#endif // jit_shared_IonAssemblerBuffer_h
--- a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h +++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h @@ -110,18 +110,17 @@ namespace js { namespace jit { typedef Vector<BufferOffset, 512, OldJitAllocPolicy> LoadOffsets; // The allocation unit size for pools. typedef int32_t PoolAllocUnit; -struct Pool - : public OldJitAllocPolicy +struct Pool : public OldJitAllocPolicy { private: // The maximum program-counter relative offset below which the instruction // set can encode. Different classes of intructions might support different // ranges but for simplicity the minimum is used here, and for the ARM this // is constrained to 1024 by the float load instructions. const size_t maxOffset_; // An offset to apply to program-counter relative offsets. The ARM has a @@ -150,25 +149,30 @@ struct Pool public: // A record of the code offset of instructions that reference pool // entries. These instructions need to be patched when the actual position // of the instructions and pools are known, and for the code below this // occurs when each pool is finished, see finishPool(). LoadOffsets loadOffsets; explicit Pool(size_t maxOffset, unsigned bias, LifoAlloc& lifoAlloc) - : maxOffset_(maxOffset), bias_(bias), numEntries_(0), buffSize(8), - poolData_(lifoAlloc.newArrayUninitialized<PoolAllocUnit>(buffSize)), - limitingUser(), limitingUsee(INT_MIN), loadOffsets() - { - } + : maxOffset_(maxOffset), + bias_(bias), + numEntries_(0), + buffSize(8), + poolData_(lifoAlloc.newArrayUninitialized<PoolAllocUnit>(buffSize)), + limitingUser(), + limitingUsee(INT_MIN), + loadOffsets() + { } + static const unsigned Garbage = 0xa5a5a5a5; - Pool() : maxOffset_(Garbage), bias_(Garbage) - { - } + Pool() + : maxOffset_(Garbage), bias_(Garbage) + { } PoolAllocUnit* poolData() const { return poolData_; } unsigned numEntries() const { return numEntries_; } @@ -236,78 +240,97 @@ struct Pool return false; new (&loadOffsets) LoadOffsets; limitingUser = BufferOffset(); limitingUsee = -1; return true; } - }; template <size_t SliceSize, size_t InstSize> -struct BufferSliceTail : public BufferSlice<SliceSize> { +struct BufferSliceTail : public BufferSlice<SliceSize> +{ private: // Bit vector to record which instructions in the slice have a branch, so // that they can be patched when the final positions are known. mozilla::Array<uint8_t, (SliceSize / InstSize) / 8> isBranch_; + public: Pool* pool; + // Flag when the last instruction in the slice is a 'natural' pool guard. A // natural pool guard is a branch in the code that was not explicitly added // to branch around the pool. For now an explict guard branch is always // emitted, so this will always be false. bool isNatural : 1; - BufferSliceTail* getNext() const { - return (BufferSliceTail*)this->next_; - } - explicit BufferSliceTail() : pool(nullptr), isNatural(true) { + + public: + explicit BufferSliceTail() + : pool(nullptr), isNatural(true) + { static_assert(SliceSize % (8 * InstSize) == 0, "SliceSize must be a multple of 8 * InstSize."); mozilla::PodArrayZero(isBranch_); } + + public: + bool isBranch(unsigned idx) const { + MOZ_ASSERT(idx < this->bytelength_ / InstSize); + return (isBranch_[idx >> 3] >> (idx & 0x7)) & 1; + } + + bool isNextBranch() const { + size_t size = this->bytelength_; + MOZ_ASSERT(size < SliceSize); + return isBranch(size / InstSize); + } + void markNextAsBranch() { - // The caller is expected to ensure that the nodeSize_ < SliceSize. See + // The caller is expected to ensure that the bytelength_ < SliceSize. See // the assembler's markNextAsBranch() method which firstly creates a new // slice if necessary. - MOZ_ASSERT(this->nodeSize_ % InstSize == 0); - MOZ_ASSERT(this->nodeSize_ < SliceSize); - size_t idx = this->nodeSize_ / InstSize; + MOZ_ASSERT(this->bytelength_ % InstSize == 0); + MOZ_ASSERT(this->bytelength_ < SliceSize); + size_t idx = this->bytelength_ / InstSize; isBranch_[idx >> 3] |= 1 << (idx & 0x7); } - bool isBranch(unsigned idx) const { - MOZ_ASSERT(idx < this->nodeSize_ / InstSize); - return (isBranch_[idx >> 3] >> (idx & 0x7)) & 1; - } - bool isNextBranch() const { - size_t size = this->nodeSize_; - MOZ_ASSERT(size < SliceSize); - return isBranch(size / InstSize); + + BufferSliceTail* getNext() const { + return (BufferSliceTail*)this->next_; } }; // The InstSize is the sizeof(Inst) but is needed here because the buffer is // defined before the Instruction. template <size_t SliceSize, size_t InstSize, class Inst, class Asm> -struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst> { +struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst> +{ private: // The PoolEntry index counter. Each PoolEntry is given a unique index, // counting up from zero, and these can be mapped back to the actual pool // entry offset after finishing the buffer, see poolEntryOffset(). size_t poolEntryCount; + public: - class PoolEntry { + class PoolEntry + { size_t index_; + public: - explicit PoolEntry(size_t index) : index_(index) { - } - PoolEntry() : index_(-1) { - } + explicit PoolEntry(size_t index) + : index_(index) + { } + + PoolEntry() + : index_(-1) + { } + size_t index() const { return index_; } }; private: typedef BufferSliceTail<SliceSize, InstSize> BufferSlice; typedef AssemblerBuffer<SliceSize, Inst> Parent; @@ -379,24 +402,22 @@ struct AssemblerBufferWithConstantPools // followed. const uint32_t nopFillInst_; const unsigned nopFill_; // For inhibiting the insertion of fill NOPs in the dynamic context in which // they are being inserted. bool inhibitNops_; public: - // A unique id within each JitContext, to identify pools in the debug // spew. Set by the MacroAssembler, see getNextAssemblerId(). int id; private: - // The buffer slices are in a double linked list. Pointers to the head and - // tail of this list: + // The buffer slices are in a double linked list. BufferSlice* getHead() const { return (BufferSlice*)this->head; } BufferSlice* getTail() const { return (BufferSlice*)this->tail; } virtual BufferSlice* newSlice(LifoAlloc& a) { @@ -408,25 +429,37 @@ struct AssemblerBufferWithConstantPools return slice; } public: AssemblerBufferWithConstantPools(unsigned guardSize, unsigned headerSize, size_t instBufferAlign, size_t poolMaxOffset, unsigned pcBias, uint32_t alignFillInst, uint32_t nopFillInst, unsigned nopFill = 0) - : poolEntryCount(0), guardSize_(guardSize), headerSize_(headerSize), - poolMaxOffset_(poolMaxOffset), pcBias_(pcBias), - instBufferAlign_(instBufferAlign), - numDumps_(0), poolInfoSize_(8), poolInfo_(nullptr), - canNotPlacePool_(false), alignFillInst_(alignFillInst), - nopFillInst_(nopFillInst), nopFill_(nopFill), inhibitNops_(false), - id(-1) - { - } + : poolEntryCount(0), + guardSize_(guardSize), + headerSize_(headerSize), + poolMaxOffset_(poolMaxOffset), + pcBias_(pcBias), + pool_(), + instBufferAlign_(instBufferAlign), + numDumps_(0), + poolInfoSize_(8), + poolInfo_(nullptr), + canNotPlacePool_(false), +#ifdef DEBUG + canNotPlacePoolStartOffset_(0), + canNotPlacePoolMaxInst_(0), +#endif + alignFillInst_(alignFillInst), + nopFillInst_(nopFillInst), + nopFill_(nopFill), + inhibitNops_(false), + id(-1) + { } // We need to wait until an AutoJitContextAlloc is created by the // MacroAssembler before allocating any space. void initWithAllocator() { poolInfo_ = this->lifoAlloc_.template newArrayUninitialized<PoolInfo>(poolInfoSize_); new (&pool_) Pool (poolMaxOffset_, pcBias_, this->lifoAlloc_); if (pool_.poolData() == nullptr) @@ -521,17 +554,18 @@ struct AssemblerBufferWithConstantPools // when not allocating an entry a dummy value is returned - it is not // expected to be used by the caller. return UINT_MAX; } public: BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries, uint8_t* inst, uint8_t* data, PoolEntry* pe = nullptr, - bool markAsBranch = false) { + bool markAsBranch = false) + { // The alloction of pool entries is not supported in a no-pool region, // check. MOZ_ASSERT_IF(numPoolEntries, !canNotPlacePool_); if (this->oom() && !this->bail()) return BufferOffset(); insertNopFill(); @@ -564,17 +598,17 @@ struct AssemblerBufferWithConstantPools retPE = PoolEntry(poolEntryCount); poolEntryCount += numPoolEntries; } // Now inst is a valid thing to insert into the instruction stream. if (pe != nullptr) *pe = retPE; if (markAsBranch) markNextAsBranch(); - return this->putBlob(numInst * InstSize, inst); + return this->putBytes(numInst * InstSize, inst); } BufferOffset putInt(uint32_t value, bool markAsBranch = false) { return allocEntry(1, 0, (uint8_t*)&value, nullptr, nullptr, markAsBranch); } private: PoolInfo getPoolData(BufferSlice* perforatedSlice, size_t perfOffset) const { @@ -609,17 +643,17 @@ struct AssemblerBufferWithConstantPools // Should not be placing a pool in a no-pool region, check. MOZ_ASSERT(!canNotPlacePool_); // Dump the pool with a guard branch around the pool. BufferOffset branch = this->nextOffset(); // Mark and emit the guard branch. markNextAsBranch(); - this->putBlob(guardSize_ * InstSize, nullptr); + this->putBytes(guardSize_ * InstSize, nullptr); BufferOffset afterPool = this->nextOffset(); Asm::WritePoolGuard(branch, this->getInst(branch), afterPool); // Perforate the buffer which finishes the current slice and allocates a // new slice. This is necessary because Pools are always placed after // the end of a slice. BufferSlice* perforatedSlice = getTail(); BufferOffset perforation = this->nextOffset(); @@ -746,19 +780,17 @@ struct AssemblerBufferWithConstantPools unsigned cur = 0; while (cur < numDumps_ && poolInfo_[cur].offset <= offset) cur++; if (cur == 0) return 0; return poolInfo_[cur - 1].finalPos - poolInfo_[cur - 1].offset; } - void align(unsigned alignment) - { - // Restrict the alignment to a power of two for now. + void align(unsigned alignment) { MOZ_ASSERT(IsPowerOfTwo(alignment)); // A pool many need to be dumped at this point, so insert NOP fill here. insertNopFill(); // Check if the code position can be aligned without dumping a pool. unsigned requiredFill = sizeExcludingCurrentPool() & (alignment - 1); if (requiredFill == 0) @@ -814,17 +846,17 @@ struct AssemblerBufferWithConstantPools MOZ_ASSERT(uintptr_t(dest_) == ((uintptr_t(dest_) + instBufferAlign_ - 1) & ~(instBufferAlign_ - 1))); // Assuming the Instruction size is 4 bytes, check. static_assert(InstSize == sizeof(uint32_t), "Assuming instruction size is 4 bytes"); uint32_t* dest = (uint32_t*)dest_; unsigned curIndex = 0; size_t curInstOffset = 0; for (BufferSlice* cur = getHead(); cur != nullptr; cur = cur->getNext()) { uint32_t* src = (uint32_t*)&cur->instructions; - unsigned numInsts = cur->size() / InstSize; + unsigned numInsts = cur->length() / InstSize; for (unsigned idx = 0; idx < numInsts; idx++, curInstOffset += InstSize) { // Is the current instruction a branch? if (cur->isBranch(idx)) { // It's a branch, fix up the branchiness! patchBranch((Inst*)&src[idx], curIndex, BufferOffset(curInstOffset)); } dest[idx] = src[idx]; } @@ -858,11 +890,12 @@ struct AssemblerBufferWithConstantPools if (size > offset) return pi->finalPos - pi->size + headerSize_ * InstSize + offset; offset -= size; } MOZ_CRASH("Entry is not in a pool"); } }; -} // ion -} // js -#endif /* jit_shared_IonAssemblerBufferWithConstantPools_h */ +} // namespace ion +} // namespace js + +#endif // jit_shared_IonAssemblerBufferWithConstantPools_h