--- a/js/src/jit/BacktrackingAllocator.cpp
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -953,17 +953,17 @@ BacktrackingAllocator::resolveControlFlo
skip = true;
break;
}
}
if (skip)
continue;
CodePosition start = interval->start();
- LInstruction *ins = insData[start];
+ LNode *ins = insData[start];
if (interval->start() > entryOf(ins->block())) {
MOZ_ASSERT(start == inputOf(ins) || start == outputOf(ins));
LiveInterval *prevInterval = reg->intervalFor(start.previous());
if (start.subpos() == CodePosition::INPUT) {
if (!moveInput(inputOf(ins), prevInterval, interval, reg->type()))
return false;
} else {
@@ -1037,25 +1037,25 @@ BacktrackingAllocator::resolveControlFlo
}
}
}
return true;
}
bool
-BacktrackingAllocator::isReusedInput(LUse *use, LInstruction *ins, bool considerCopy)
+BacktrackingAllocator::isReusedInput(LUse *use, LNode *ins, bool considerCopy)
{
if (LDefinition *def = FindReusingDefinition(ins, use))
return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
return false;
}
bool
-BacktrackingAllocator::isRegisterUse(LUse *use, LInstruction *ins, bool considerCopy)
+BacktrackingAllocator::isRegisterUse(LUse *use, LNode *ins, bool considerCopy)
{
switch (use->policy()) {
case LUse::ANY:
return isReusedInput(use, ins, considerCopy);
case LUse::REGISTER:
case LUse::FIXED:
return true;
@@ -1096,17 +1096,17 @@ BacktrackingAllocator::reifyAllocations(
for (size_t j = 0; j < reg->numIntervals(); j++) {
LiveInterval *interval = reg->getInterval(j);
MOZ_ASSERT(interval->index() == j);
if (interval->index() == 0) {
reg->def()->setOutput(*interval->getAllocation());
if (reg->ins()->recoversInput()) {
- LSnapshot *snapshot = reg->ins()->snapshot();
+ LSnapshot *snapshot = reg->ins()->toInstruction()->snapshot();
for (size_t i = 0; i < snapshot->numEntries(); i++) {
LAllocation *entry = snapshot->getEntry(i);
if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
*entry = *reg->def()->output();
}
}
}
@@ -1114,17 +1114,17 @@ BacktrackingAllocator::reifyAllocations(
iter != interval->usesEnd();
iter++)
{
LAllocation *alloc = iter->use;
*alloc = *interval->getAllocation();
// For any uses which feed into MUST_REUSE_INPUT definitions,
// add copies if the use and def have different allocations.
- LInstruction *ins = insData[iter->pos];
+ LNode *ins = insData[iter->pos];
if (LDefinition *def = FindReusingDefinition(ins, alloc)) {
LiveInterval *outputInterval =
vregs[def->virtualRegister()].intervalFor(outputOf(ins));
LAllocation *res = outputInterval->getAllocation();
LAllocation *sourceAlloc = interval->getAllocation();
if (*res != *alloc) {
LMoveGroup *group = getInputMoveGroup(inputOf(ins));
@@ -1358,25 +1358,25 @@ BacktrackingAllocator::computePriority(c
for (size_t j = 0; j < group->registers.length(); j++) {
uint32_t vreg = group->registers[j];
priority += computePriority(vregs[vreg].getInterval(0));
}
return priority;
}
bool
-BacktrackingAllocator::minimalDef(const LiveInterval *interval, LInstruction *ins)
+BacktrackingAllocator::minimalDef(const LiveInterval *interval, LNode *ins)
{
// Whether interval is a minimal interval capturing a definition at ins.
return (interval->end() <= minimalDefEnd(ins).next()) &&
((!ins->isPhi() && interval->start() == inputOf(ins)) || interval->start() == outputOf(ins));
}
bool
-BacktrackingAllocator::minimalUse(const LiveInterval *interval, LInstruction *ins)
+BacktrackingAllocator::minimalUse(const LiveInterval *interval, LNode *ins)
{
// Whether interval is a minimal interval capturing a use at ins.
return (interval->start() == inputOf(ins)) &&
(interval->end() == outputOf(ins) || interval->end() == outputOf(ins).next());
}
bool
BacktrackingAllocator::minimalInterval(const LiveInterval *interval, bool *pfixed)
@@ -1547,17 +1547,17 @@ BacktrackingAllocator::trySplitAfterLast
}
}
for (UsePositionIterator iter(interval->usesBegin());
iter != interval->usesEnd();
iter++)
{
LUse *use = iter->use;
- LInstruction *ins = insData[iter->pos];
+ LNode *ins = insData[iter->pos];
// Uses in the interval should be sorted.
MOZ_ASSERT(iter->pos >= lastUse);
lastUse = inputOf(ins);
if (!conflict || outputOf(ins) < conflict->start()) {
if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
lastRegisterFrom = inputOf(ins);
@@ -1604,17 +1604,17 @@ BacktrackingAllocator::trySplitBeforeFir
CodePosition firstRegisterFrom;
for (UsePositionIterator iter(interval->usesBegin());
iter != interval->usesEnd();
iter++)
{
LUse *use = iter->use;
- LInstruction *ins = insData[iter->pos];
+ LNode *ins = insData[iter->pos];
if (!conflict || outputOf(ins) >= conflict->end()) {
if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
firstRegisterFrom = inputOf(ins);
break;
}
}
}
@@ -1675,17 +1675,17 @@ BacktrackingAllocator::splitAtAllRegiste
return false;
}
}
for (UsePositionIterator iter(interval->usesBegin());
iter != interval->usesEnd();
iter++)
{
- LInstruction *ins = insData[iter->pos];
+ LNode *ins = insData[iter->pos];
if (iter->pos < spillStart) {
newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
} else if (isRegisterUse(iter->use, ins)) {
// For register uses which are not useRegisterAtStart, pick an
// interval that covers both the instruction's input and output, so
// that the register is not reused for an output.
CodePosition from = inputOf(ins);
CodePosition to = iter->pos.next();
@@ -1777,17 +1777,17 @@ BacktrackingAllocator::splitAt(LiveInter
newInterval->setSpillInterval(spillInterval);
if (!newIntervals.append(newInterval))
return false;
lastRegisterUse = interval->start();
}
size_t activeSplitPosition = NextSplitPosition(0, splitPositions, interval->start());
for (UsePositionIterator iter(interval->usesBegin()); iter != interval->usesEnd(); iter++) {
- LInstruction *ins = insData[iter->pos];
+ LNode *ins = insData[iter->pos];
if (iter->pos < spillStart) {
newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
activeSplitPosition = NextSplitPosition(activeSplitPosition, splitPositions, iter->pos);
} else if (isRegisterUse(iter->use, ins)) {
if (lastRegisterUse.bits() == 0 ||
SplitHere(activeSplitPosition, splitPositions, iter->pos))
{
// Place this register use into a different interval from the
--- a/js/src/jit/BacktrackingAllocator.h
+++ b/js/src/jit/BacktrackingAllocator.h
@@ -202,35 +202,35 @@ class BacktrackingAllocator
bool tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group,
bool *psuccess, bool *pfixed, LiveInterval **pconflicting);
bool evictInterval(LiveInterval *interval);
void distributeUses(LiveInterval *interval, const LiveIntervalVector &newIntervals);
bool split(LiveInterval *interval, const LiveIntervalVector &newIntervals);
bool requeueIntervals(const LiveIntervalVector &newIntervals);
void spill(LiveInterval *interval);
- bool isReusedInput(LUse *use, LInstruction *ins, bool considerCopy);
- bool isRegisterUse(LUse *use, LInstruction *ins, bool considerCopy = false);
+ bool isReusedInput(LUse *use, LNode *ins, bool considerCopy);
+ bool isRegisterUse(LUse *use, LNode *ins, bool considerCopy = false);
bool isRegisterDefinition(LiveInterval *interval);
bool addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg,
LiveInterval *spillInterval,
CodePosition from, CodePosition to);
bool resolveControlFlow();
bool reifyAllocations();
bool populateSafepoints();
void dumpRegisterGroups();
void dumpFixedRanges();
void dumpAllocations();
struct PrintLiveIntervalRange;
- bool minimalDef(const LiveInterval *interval, LInstruction *ins);
- bool minimalUse(const LiveInterval *interval, LInstruction *ins);
+ bool minimalDef(const LiveInterval *interval, LNode *ins);
+ bool minimalUse(const LiveInterval *interval, LNode *ins);
bool minimalInterval(const LiveInterval *interval, bool *pfixed = nullptr);
// Heuristic methods.
size_t computePriority(const LiveInterval *interval);
size_t computeSpillWeight(const LiveInterval *interval);
size_t computePriority(const VirtualRegisterGroup *group);
--- a/js/src/jit/C1Spewer.cpp
+++ b/js/src/jit/C1Spewer.cpp
@@ -96,26 +96,26 @@ DumpDefinition(FILE *fp, MDefinition *de
fprintf(fp, "%u %u ", def->id(), unsigned(def->useCount()));
def->printName(fp);
fprintf(fp, " ");
def->printOpcode(fp);
fprintf(fp, " <|@\n");
}
static void
-DumpLIR(FILE *fp, LInstruction *ins)
+DumpLIR(FILE *fp, LNode *ins)
{
fprintf(fp, " ");
fprintf(fp, "%d ", ins->id());
ins->dump(fp);
fprintf(fp, " <|@\n");
}
void
-C1Spewer::spewIntervals(FILE *fp, LinearScanAllocator *regalloc, LInstruction *ins, size_t &nextId)
+C1Spewer::spewIntervals(FILE *fp, LinearScanAllocator *regalloc, LNode *ins, size_t &nextId)
{
for (size_t k = 0; k < ins->numDefs(); k++) {
uint32_t id = ins->getDef(k)->virtualRegister();
VirtualRegister *vreg = ®alloc->vregs[id];
for (size_t i = 0; i < vreg->numIntervals(); i++) {
LiveInterval *live = vreg->getInterval(i);
if (live->numRanges()) {
--- a/js/src/jit/C1Spewer.h
+++ b/js/src/jit/C1Spewer.h
@@ -16,17 +16,17 @@
namespace js {
namespace jit {
class MDefinition;
class MInstruction;
class MBasicBlock;
class MIRGraph;
class LinearScanAllocator;
-class LInstruction;
+class LNode;
class C1Spewer
{
MIRGraph *graph;
FILE *spewout_;
public:
C1Spewer()
@@ -37,17 +37,17 @@ class C1Spewer
void beginFunction(MIRGraph *graph, HandleScript script);
void spewPass(const char *pass);
void spewIntervals(const char *pass, LinearScanAllocator *regalloc);
void endFunction();
void finish();
private:
void spewPass(FILE *fp, MBasicBlock *block);
- void spewIntervals(FILE *fp, LinearScanAllocator *regalloc, LInstruction *ins, size_t &nextId);
+ void spewIntervals(FILE *fp, LinearScanAllocator *regalloc, LNode *ins, size_t &nextId);
void spewIntervals(FILE *fp, MBasicBlock *block, LinearScanAllocator *regalloc, size_t &nextId);
};
} // namespace jit
} // namespace js
#endif /* DEBUG */
--- a/js/src/jit/IonCaches.h
+++ b/js/src/jit/IonCaches.h
@@ -17,16 +17,18 @@
#include "vm/TypedArrayCommon.h"
namespace js {
class LockedJSContext;
namespace jit {
+class LInstruction;
+
#define IONCACHE_KIND_LIST(_) \
_(GetProperty) \
_(SetProperty) \
_(GetElement) \
_(SetElement) \
_(BindName) \
_(Name) \
_(CallsiteClone) \
--- a/js/src/jit/JSONSpewer.cpp
+++ b/js/src/jit/JSONSpewer.cpp
@@ -330,17 +330,17 @@ JSONSpewer::spewMIR(MIRGraph *mir)
endObject();
}
endList();
endObject();
}
void
-JSONSpewer::spewLIns(LInstruction *ins)
+JSONSpewer::spewLIns(LNode *ins)
{
if (!fp_)
return;
beginObject();
integerProperty("id", ins->id());
--- a/js/src/jit/JSONSpewer.h
+++ b/js/src/jit/JSONSpewer.h
@@ -17,17 +17,17 @@ namespace js {
namespace jit {
class MDefinition;
class MInstruction;
class MBasicBlock;
class MIRGraph;
class MResumePoint;
class LinearScanAllocator;
-class LInstruction;
+class LNode;
class JSONSpewer
{
private:
// Set by beginFunction(); unset by endFunction().
// Used to correctly format output in case of abort during compilation.
bool inFunction_;
@@ -58,17 +58,17 @@ class JSONSpewer
~JSONSpewer();
bool init(const char *path);
void beginFunction(JSScript *script);
void beginPass(const char * pass);
void spewMDef(MDefinition *def);
void spewMResumePoint(MResumePoint *rp);
void spewMIR(MIRGraph *mir);
- void spewLIns(LInstruction *ins);
+ void spewLIns(LNode *ins);
void spewLIR(MIRGraph *mir);
void spewIntervals(LinearScanAllocator *regalloc);
void endPass();
void endFunction();
void finish();
};
} // namespace jit
--- a/js/src/jit/LIR-Common.h
+++ b/js/src/jit/LIR-Common.h
@@ -6066,17 +6066,17 @@ class LGuardClass : public LInstructionH
};
class MPhi;
// Phi is a pseudo-instruction that emits no code, and is an annotation for the
// register allocator. Like its equivalent in MIR, phis are collected at the
// top of blocks and are meant to be executed in parallel, choosing the input
// corresponding to the predecessor taken in the control flow graph.
-class LPhi MOZ_FINAL : public LInstruction
+class LPhi MOZ_FINAL : public LNode
{
LAllocation *const inputs_;
LDefinition def_;
public:
LIR_HEADER(Phi)
LPhi(MPhi *ins, LAllocation *inputs)
--- a/js/src/jit/LIR.cpp
+++ b/js/src/jit/LIR.cpp
@@ -312,32 +312,32 @@ LSnapshot::rewriteRecoveredInput(LUse in
// equal to the instruction's result.
for (size_t i = 0; i < numEntries(); i++) {
if (getEntry(i)->isUse() && getEntry(i)->toUse()->virtualRegister() == input.virtualRegister())
setEntry(i, LUse(input.virtualRegister(), LUse::RECOVERED_INPUT));
}
}
void
-LInstruction::printName(FILE *fp, Opcode op)
+LNode::printName(FILE *fp, Opcode op)
{
static const char * const names[] =
{
#define LIROP(x) #x,
LIR_OPCODE_LIST(LIROP)
#undef LIROP
};
const char *name = names[op];
size_t len = strlen(name);
for (size_t i = 0; i < len; i++)
fprintf(fp, "%c", tolower(name[i]));
}
void
-LInstruction::printName(FILE *fp)
+LNode::printName(FILE *fp)
{
printName(fp, op());
}
bool
LAllocation::aliases(const LAllocation &other) const
{
if (isFloatReg() && other.isFloatReg())
@@ -462,17 +462,17 @@ LAllocation::dump() const
void
LDefinition::dump() const
{
fprintf(stderr, "%s\n", toString());
}
void
-LInstruction::printOperands(FILE *fp)
+LNode::printOperands(FILE *fp)
{
for (size_t i = 0, e = numOperands(); i < e; i++) {
fprintf(fp, " (%s)", getOperand(i)->toString());
if (i != numOperands() - 1)
fprintf(fp, ",");
}
}
@@ -489,17 +489,17 @@ LInstruction::assignSnapshot(LSnapshot *
(void *)snapshot, (void *)this);
printName(JitSpewFile);
fprintf(JitSpewFile, ")\n");
}
#endif
}
void
-LInstruction::dump(FILE *fp)
+LNode::dump(FILE *fp)
{
if (numDefs() != 0) {
fprintf(fp, "{");
for (size_t i = 0; i < numDefs(); i++) {
fprintf(fp, "%s", getDef(i)->toString());
if (i != numDefs() - 1)
fprintf(fp, ", ");
}
@@ -526,17 +526,17 @@ LInstruction::dump(FILE *fp)
if (i != numSuccessors() - 1)
fprintf(fp, ", ");
}
fprintf(fp, ")");
}
}
void
-LInstruction::dump()
+LNode::dump()
{
dump(stderr);
fprintf(stderr, "\n");
}
void
LInstruction::initSafepoint(TempAllocator &alloc)
{
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -590,51 +590,35 @@ class LDefinition
// Forward declarations of LIR types.
#define LIROP(op) class L##op;
LIR_OPCODE_LIST(LIROP)
#undef LIROP
class LSnapshot;
class LSafepoint;
-class LInstructionVisitor;
+class LInstruction;
+class LElementVisitor;
-class LInstruction
- : public TempObject,
- public InlineListNode<LInstruction>
+// The common base class for LPhi and LInstruction.
+class LNode
{
uint32_t id_;
-
- // This snapshot could be set after a ResumePoint. It is used to restart
- // from the resume point pc.
- LSnapshot *snapshot_;
-
- // Structure capturing the set of stack slots and registers which are known
- // to hold either gcthings or Values.
- LSafepoint *safepoint_;
-
LBlock *block_;
- LMoveGroup *inputMoves_;
- LMoveGroup *movesAfter_;
protected:
MDefinition *mir_;
- LInstruction()
+ public:
+ LNode()
: id_(0),
- snapshot_(nullptr),
- safepoint_(nullptr),
block_(nullptr),
- inputMoves_(nullptr),
- movesAfter_(nullptr),
mir_(nullptr)
{ }
- public:
- class InputIterator;
enum Opcode {
# define LIROP(name) LOp_##name,
LIR_OPCODE_LIST(LIROP)
# undef LIROP
LOp_Invalid
};
const char *opName() {
@@ -649,19 +633,24 @@ class LInstruction
}
// Hook for opcodes to add extra high level detail about what code will be
// emitted for the op.
virtual const char *extraName() const {
return nullptr;
}
- public:
virtual Opcode op() const = 0;
+ bool isInstruction() const {
+ return op() != LOp_Phi;
+ }
+ inline LInstruction *toInstruction();
+ inline const LInstruction *toInstruction() const;
+
// Returns the number of outputs of this instruction. If an output is
// unallocated, it is an LDefinition, defining a virtual register.
virtual size_t numDefs() const = 0;
virtual LDefinition *getDef(size_t index) = 0;
virtual void setDef(size_t index, const LDefinition &def) = 0;
// Returns information about operands.
virtual size_t numOperands() const = 0;
@@ -686,49 +675,29 @@ class LInstruction
uint32_t id() const {
return id_;
}
void setId(uint32_t id) {
MOZ_ASSERT(!id_);
MOZ_ASSERT(id);
id_ = id;
}
- LSnapshot *snapshot() const {
- return snapshot_;
- }
- LSafepoint *safepoint() const {
- return safepoint_;
- }
void setMir(MDefinition *mir) {
mir_ = mir;
}
MDefinition *mirRaw() const {
/* Untyped MIR for this op. Prefer mir() methods in subclasses. */
return mir_;
}
LBlock *block() const {
return block_;
}
void setBlock(LBlock *block) {
block_ = block;
}
- LMoveGroup *inputMoves() const {
- return inputMoves_;
- }
- void setInputMoves(LMoveGroup *moves) {
- inputMoves_ = moves;
- }
- LMoveGroup *movesAfter() const {
- return movesAfter_;
- }
- void setMovesAfter(LMoveGroup *moves) {
- movesAfter_ = moves;
- }
- void assignSnapshot(LSnapshot *snapshot);
- void initSafepoint(TempAllocator &alloc);
// For an instruction which has a MUST_REUSE_INPUT output, whether that
// output register will be restored to its original value when bailing out.
virtual bool recoversInput() const {
return false;
}
virtual void dump(FILE *fp);
@@ -742,42 +711,105 @@ class LInstruction
# define LIROP(name) \
bool is##name() const { \
return op() == LOp_##name; \
} \
inline L##name *to##name();
LIR_OPCODE_LIST(LIROP)
# undef LIROP
- virtual bool accept(LInstructionVisitor *visitor) = 0;
+ virtual bool accept(LElementVisitor *visitor) = 0;
};
-class LInstructionVisitor
+class LInstruction
+ : public LNode
+ , public TempObject
+ , public InlineListNode<LInstruction>
{
- LInstruction *ins_;
+ // This snapshot could be set after a ResumePoint. It is used to restart
+ // from the resume point pc.
+ LSnapshot *snapshot_;
+
+ // Structure capturing the set of stack slots and registers which are known
+ // to hold either gcthings or Values.
+ LSafepoint *safepoint_;
+
+ LMoveGroup *inputMoves_;
+ LMoveGroup *movesAfter_;
+
+ protected:
+ LInstruction()
+ : snapshot_(nullptr),
+ safepoint_(nullptr),
+ inputMoves_(nullptr),
+ movesAfter_(nullptr)
+ { }
+
+ public:
+ LSnapshot *snapshot() const {
+ return snapshot_;
+ }
+ LSafepoint *safepoint() const {
+ return safepoint_;
+ }
+ LMoveGroup *inputMoves() const {
+ return inputMoves_;
+ }
+ void setInputMoves(LMoveGroup *moves) {
+ inputMoves_ = moves;
+ }
+ LMoveGroup *movesAfter() const {
+ return movesAfter_;
+ }
+ void setMovesAfter(LMoveGroup *moves) {
+ movesAfter_ = moves;
+ }
+ void assignSnapshot(LSnapshot *snapshot);
+ void initSafepoint(TempAllocator &alloc);
+
+ class InputIterator;
+};
+
+LInstruction *
+LNode::toInstruction()
+{
+ MOZ_ASSERT(isInstruction());
+ return static_cast<LInstruction *>(this);
+}
+
+const LInstruction *
+LNode::toInstruction() const
+{
+ MOZ_ASSERT(isInstruction());
+ return static_cast<const LInstruction *>(this);
+}
+
+class LElementVisitor
+{
+ LNode *ins_;
protected:
jsbytecode *lastPC_;
jsbytecode *lastNotInlinedPC_;
- LInstruction *instruction() {
+ LNode *instruction() {
return ins_;
}
public:
- void setInstruction(LInstruction *ins) {
+ void setElement(LNode *ins) {
ins_ = ins;
if (ins->mirRaw()) {
lastPC_ = ins->mirRaw()->trackedPc();
if (ins->mirRaw()->trackedTree())
lastNotInlinedPC_ = ins->mirRaw()->profilerLeavePc();
}
}
- LInstructionVisitor()
+ LElementVisitor()
: ins_(nullptr),
lastPC_(nullptr),
lastNotInlinedPC_(nullptr)
{}
public:
#define VISIT_INS(op) virtual bool visit##op(L##op *) { MOZ_CRASH("NYI: " #op); }
LIR_OPCODE_LIST(VISIT_INS)
@@ -1679,18 +1711,18 @@ LAllocation::toRegister() const
} // namespace jit
} // namespace js
#define LIR_HEADER(opcode) \
Opcode op() const { \
return LInstruction::LOp_##opcode; \
} \
- bool accept(LInstructionVisitor *visitor) { \
- visitor->setInstruction(this); \
+ bool accept(LElementVisitor *visitor) { \
+ visitor->setElement(this); \
return visitor->visit##opcode(this); \
}
#include "jit/LIR-Common.h"
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
# if defined(JS_CODEGEN_X86)
# include "jit/x86/LIR-x86.h"
# elif defined(JS_CODEGEN_X64)
@@ -1708,17 +1740,17 @@ LAllocation::toRegister() const
#endif
#undef LIR_HEADER
namespace js {
namespace jit {
#define LIROP(name) \
- L##name *LInstruction::to##name() \
+ L##name *LNode::to##name() \
{ \
MOZ_ASSERT(is##name()); \
return static_cast<L##name *>(this); \
}
LIR_OPCODE_LIST(LIROP)
#undef LIROP
#define LALLOC_CAST(type) \
--- a/js/src/jit/LinearScan.cpp
+++ b/js/src/jit/LinearScan.cpp
@@ -392,17 +392,17 @@ LinearScanAllocator::reifyAllocations()
MOZ_ASSERT(DefinitionCompatibleWith(reg->ins(), def, *interval->getAllocation()));
def->setOutput(*interval->getAllocation());
spillFrom = interval->getAllocation();
}
if (reg->ins()->recoversInput()) {
- LSnapshot *snapshot = reg->ins()->snapshot();
+ LSnapshot *snapshot = reg->ins()->toInstruction()->snapshot();
for (size_t i = 0; i < snapshot->numEntries(); i++) {
LAllocation *entry = snapshot->getEntry(i);
if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
*entry = *def->output();
}
}
if (reg->mustSpillAtDefinition() && !reg->ins()->isPhi() &&
@@ -432,17 +432,17 @@ LinearScanAllocator::reifyAllocations()
// Don't do this if the interval starts at the first instruction of the
// block; this case should have been handled by resolveControlFlow().
//
// If the interval starts at the output half of an instruction, we have to
// emit the move *after* this instruction, to prevent clobbering an input
// register.
LiveInterval *prevInterval = reg->getInterval(interval->index() - 1);
CodePosition start = interval->start();
- LInstruction *ins = insData[start];
+ LNode *ins = insData[start];
MOZ_ASSERT(start == inputOf(ins) || start == outputOf(ins));
if (start.subpos() == CodePosition::INPUT) {
if (!moveInput(inputOf(ins), prevInterval, interval, reg->type()))
return false;
} else {
if (!moveAfter(outputOf(ins), prevInterval, interval, reg->type()))
@@ -784,17 +784,17 @@ LinearScanAllocator::assign(LAllocation
// This interval is spilled more than once, so just always spill
// it at its definition.
reg->setSpillAtDefinition(outputOf(reg->ins()));
} else {
reg->setCanonicalSpill(current->getAllocation());
// If this spill is inside a loop, and the definition is outside
// the loop, instead move the spill to outside the loop.
- LInstruction *other = insData[current->start()];
+ LNode *other = insData[current->start()];
uint32_t loopDepthAtDef = reg->block()->mir()->loopDepth();
uint32_t loopDepthAtSpill = other->block()->mir()->loopDepth();
if (loopDepthAtSpill > loopDepthAtDef)
reg->setSpillAtDefinition(outputOf(reg->ins()));
}
}
active.pushBack(current);
--- a/js/src/jit/LiveRangeAllocator.h
+++ b/js/src/jit/LiveRangeAllocator.h
@@ -161,17 +161,17 @@ UseCompatibleWith(const LUse *use, LAllo
default:
MOZ_CRASH("Unknown use policy");
}
}
#ifdef DEBUG
static inline bool
-DefinitionCompatibleWith(LInstruction *ins, const LDefinition *def, LAllocation alloc)
+DefinitionCompatibleWith(LNode *ins, const LDefinition *def, LAllocation alloc)
{
if (ins->isPhi()) {
if (def->isFloatReg())
return alloc.isFloatReg() || alloc.isStackSlot();
return alloc.isGeneralReg() || alloc.isStackSlot();
}
switch (def->policy()) {
@@ -188,17 +188,17 @@ DefinitionCompatibleWith(LInstruction *i
default:
MOZ_CRASH("Unknown definition policy");
}
}
#endif // DEBUG
static inline LDefinition *
-FindReusingDefinition(LInstruction *ins, LAllocation *alloc)
+FindReusingDefinition(LNode *ins, LAllocation *alloc)
{
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition *def = ins->getDef(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc)
return def;
}
for (size_t i = 0; i < ins->numTemps(); i++) {
@@ -421,48 +421,48 @@ class LiveInterval
/*
* Represents all of the register allocation state associated with a virtual
* register, including all associated intervals and pointers to relevant LIR
* structures.
*/
class VirtualRegister
{
- LInstruction *ins_;
+ LNode *ins_;
LDefinition *def_;
Vector<LiveInterval *, 1, IonAllocPolicy> intervals_;
// Whether def_ is a temp or an output.
bool isTemp_ : 1;
void operator=(const VirtualRegister &) MOZ_DELETE;
VirtualRegister(const VirtualRegister &) MOZ_DELETE;
protected:
explicit VirtualRegister(TempAllocator &alloc)
: intervals_(alloc)
{}
public:
- bool init(TempAllocator &alloc, LInstruction *ins, LDefinition *def,
+ bool init(TempAllocator &alloc, LNode *ins, LDefinition *def,
bool isTemp)
{
MOZ_ASSERT(ins && !ins_);
ins_ = ins;
def_ = def;
isTemp_ = isTemp;
LiveInterval *initial = LiveInterval::New(alloc, def->virtualRegister(), 0);
if (!initial)
return false;
return intervals_.append(initial);
}
LBlock *block() {
return ins_->block();
}
- LInstruction *ins() {
+ LNode *ins() {
return ins_;
}
LDefinition *def() const {
return def_;
}
LDefinition::Type type() const {
return def()->type();
}
@@ -721,18 +721,20 @@ class LiveRangeAllocator : protected Reg
// Don't add output registers to the safepoint.
CodePosition start = interval->start();
if (interval->index() == 0 && !reg->isTemp()) {
#ifdef CHECK_OSIPOINT_REGISTERS
// We don't add the output register to the safepoint,
// but it still might get added as one of the inputs.
// So eagerly add this reg to the safepoint clobbered registers.
- if (LSafepoint *safepoint = reg->ins()->safepoint())
- safepoint->addClobberedRegister(a->toRegister());
+ if (reg->ins()->isInstruction()) {
+ if (LSafepoint *safepoint = reg->ins()->toInstruction()->safepoint())
+ safepoint->addClobberedRegister(a->toRegister());
+ }
#endif
start = start.next();
}
size_t i = findFirstNonCallSafepoint(start);
for (; i < graph.numNonCallSafepoints(); i++) {
LInstruction *ins = graph.getNonCallSafepoint(i);
CodePosition pos = inputOf(ins);
--- a/js/src/jit/RegisterAllocator.cpp
+++ b/js/src/jit/RegisterAllocator.cpp
@@ -482,35 +482,33 @@ RegisterAllocator::init()
}
return true;
}
LMoveGroup *
RegisterAllocator::getInputMoveGroup(uint32_t id)
{
- LInstruction *ins = insData[id];
- MOZ_ASSERT(!ins->isPhi());
+ LInstruction *ins = insData[id]->toInstruction();
MOZ_ASSERT(!ins->isLabel());
if (ins->inputMoves())
return ins->inputMoves();
LMoveGroup *moves = LMoveGroup::New(alloc());
ins->setInputMoves(moves);
ins->block()->insertBefore(ins, moves);
return moves;
}
LMoveGroup *
RegisterAllocator::getMoveGroupAfter(uint32_t id)
{
- LInstruction *ins = insData[id];
- MOZ_ASSERT(!ins->isPhi());
+ LInstruction *ins = insData[id]->toInstruction();
if (ins->movesAfter())
return ins->movesAfter();
LMoveGroup *moves = LMoveGroup::New(alloc());
ins->setMovesAfter(moves);
if (ins->isLabel())
--- a/js/src/jit/RegisterAllocator.h
+++ b/js/src/jit/RegisterAllocator.h
@@ -215,40 +215,40 @@ class CodePosition
MOZ_ASSERT(*this != MAX);
return CodePosition(bits_ + 1);
}
};
// Structure to track all moves inserted next to instructions in a graph.
class InstructionDataMap
{
- FixedList<LInstruction *> insData_;
+ FixedList<LNode *> insData_;
public:
InstructionDataMap()
: insData_()
{ }
bool init(MIRGenerator *gen, uint32_t numInstructions) {
if (!insData_.init(gen->alloc(), numInstructions))
return false;
- memset(&insData_[0], 0, sizeof(LInstruction *) * numInstructions);
+ memset(&insData_[0], 0, sizeof(LNode *) * numInstructions);
return true;
}
- LInstruction *&operator[](CodePosition pos) {
+ LNode *&operator[](CodePosition pos) {
return operator[](pos.ins());
}
- LInstruction *const &operator[](CodePosition pos) const {
+ LNode *const &operator[](CodePosition pos) const {
return operator[](pos.ins());
}
- LInstruction *&operator[](uint32_t ins) {
+ LNode *&operator[](uint32_t ins) {
return insData_[ins];
}
- LInstruction *const &operator[](uint32_t ins) const {
+ LNode *const &operator[](uint32_t ins) const {
return insData_[ins];
}
};
// Common superclass for register allocators.
class RegisterAllocator
{
void operator=(const RegisterAllocator &) MOZ_DELETE;
@@ -296,30 +296,30 @@ class RegisterAllocator
// read their inputs. Consequently, it doesn't make sense to talk
// about code positions in the middle of a series of phis.
if (insData[pos]->isPhi()) {
while (insData[pos + 1]->isPhi())
++pos;
}
return CodePosition(pos, CodePosition::OUTPUT);
}
- CodePosition outputOf(const LInstruction *ins) const {
+ CodePosition outputOf(const LNode *ins) const {
return outputOf(ins->id());
}
CodePosition inputOf(uint32_t pos) const {
// All phis in a block read their inputs before any of them write their
// outputs. Consequently, it doesn't make sense to talk about code
// positions in the middle of a series of phis.
if (insData[pos]->isPhi()) {
while (pos > 0 && insData[pos - 1]->isPhi())
--pos;
}
return CodePosition(pos, CodePosition::INPUT);
}
- CodePosition inputOf(const LInstruction *ins) const {
+ CodePosition inputOf(const LNode *ins) const {
return inputOf(ins->id());
}
CodePosition entryOf(const LBlock *block) {
return inputOf(block->firstId());
}
CodePosition exitOf(const LBlock *block) {
return outputOf(block->lastId());
}
@@ -329,23 +329,23 @@ class RegisterAllocator
LMoveGroup *getInputMoveGroup(CodePosition pos) {
return getInputMoveGroup(pos.ins());
}
LMoveGroup *getMoveGroupAfter(CodePosition pos) {
return getMoveGroupAfter(pos.ins());
}
- CodePosition minimalDefEnd(LInstruction *ins) {
+ CodePosition minimalDefEnd(LNode *ins) {
// Compute the shortest interval that captures vregs defined by ins.
// Watch for instructions that are followed by an OSI point and/or Nop.
// If moves are introduced between the instruction and the OSI point then
// safepoint information for the instruction may be incorrect.
while (true) {
- LInstruction *next = insData[ins->id() + 1];
+ LNode *next = insData[ins->id() + 1];
if (!next->isNop() && !next->isOsiPoint())
break;
ins = next;
}
return outputOf(ins);
}
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -441,17 +441,17 @@ CodeGeneratorShared::encode(LSnapshot *s
#ifdef TRACK_SNAPSHOTS
uint32_t pcOpcode = 0;
uint32_t lirOpcode = 0;
uint32_t lirId = 0;
uint32_t mirOpcode = 0;
uint32_t mirId = 0;
- if (LInstruction *ins = instruction()) {
+ if (LNode *ins = instruction()) {
lirOpcode = ins->op();
lirId = ins->id();
if (ins->mirRaw()) {
mirOpcode = ins->mirRaw()->op();
mirId = ins->mirRaw()->id();
if (ins->mirRaw()->trackedPc())
pcOpcode = *ins->mirRaw()->trackedPc();
}
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -43,17 +43,17 @@ struct PatchableBackedgeInfo
{}
};
struct ReciprocalMulConstants {
int32_t multiplier;
int32_t shiftAmount;
};
-class CodeGeneratorShared : public LInstructionVisitor
+class CodeGeneratorShared : public LElementVisitor
{
js::Vector<OutOfLineCode *, 0, SystemAllocPolicy> outOfLineCode_;
OutOfLineCode *oolIns;
MacroAssembler &ensureMasm(MacroAssembler *masm);
mozilla::Maybe<MacroAssembler> maybeMasm_;
public: