--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -330,30 +330,22 @@ namespace nanojit
}
}
Register Assembler::findSpecificRegFor(LIns* i, Register w)
{
return findRegFor(i, rmask(w));
}
- // The 'op' argument is the opcode of the instruction containing the
- // displaced i[d] operand we're finding a register for. It is only used
- // for differentiating classes of valid displacement in the native
- // backends; a bit of a hack.
- Register Assembler::getBaseReg(LOpcode op, LIns *i, int &d, RegisterMask allow)
+ Register Assembler::getBaseReg(LIns *i, int &d, RegisterMask allow)
{
#if !PEDANTIC
if (i->isop(LIR_alloc)) {
- int d2 = d;
- d2 += findMemFor(i);
- if (isValidDisplacement(op, d2)) {
- d = d2;
- return FP;
- }
+ d += findMemFor(i);
+ return FP;
}
#else
(void) d;
#endif
return findRegFor(i, allow);
}
// Finds a register in 'allow' to hold the result of 'ins'. Used when we
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -267,17 +267,17 @@ namespace nanojit
void evictAllActiveRegs();
void evictSomeActiveRegs(RegisterMask regs);
void evictScratchRegs();
void intersectRegisterState(RegAlloc& saved);
void unionRegisterState(RegAlloc& saved);
void assignSaved(RegAlloc &saved, RegisterMask skip);
LInsp findVictim(RegisterMask allow);
- Register getBaseReg(LOpcode op, LIns *i, int &d, RegisterMask allow);
+ Register getBaseReg(LIns *i, int &d, RegisterMask allow);
int findMemFor(LIns* i);
Register findRegFor(LIns* i, RegisterMask allow);
void findRegFor2(RegisterMask allow, LIns* ia, Register &ra, LIns *ib, Register &rb);
Register findSpecificRegFor(LIns* i, Register r);
Register findSpecificRegForUnallocated(LIns* i, Register r);
Register prepResultReg(LIns *i, RegisterMask allow);
void freeRsrcOf(LIns *i, bool pop);
void evictIfActive(Register r);
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -222,17 +222,16 @@ namespace nanojit
// Make sure it's word-aligned.
NanoAssert(0 == startOfRoom % sizeof(void*));
return startOfRoom;
}
LInsp LirBufWriter::insStore(LOpcode op, LInsp val, LInsp base, int32_t d)
{
- base = insDisp(op, base, d);
LInsSti* insSti = (LInsSti*)_buf->makeRoom(sizeof(LInsSti));
LIns* ins = insSti->getLIns();
ins->initLInsSti(op, val, base, d);
return ins;
}
LInsp LirBufWriter::ins0(LOpcode op)
{
@@ -263,17 +262,16 @@ namespace nanojit
LInsOp3* insOp3 = (LInsOp3*)_buf->makeRoom(sizeof(LInsOp3));
LIns* ins = insOp3->getLIns();
ins->initLInsOp3(op, o1, o2, o3);
return ins;
}
LInsp LirBufWriter::insLoad(LOpcode op, LInsp base, int32_t d)
{
- base = insDisp(op, base, d);
LInsLd* insLd = (LInsLd*)_buf->makeRoom(sizeof(LInsLd));
LIns* ins = insLd->getLIns();
ins->initLInsLd(op, base, d);
return ins;
}
LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, GuardRecord *gr)
{
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -46,16 +46,61 @@
* register, or not? The other dimension is whether the argument is an integer
* (including pointers) or a floating-point value. In all comments below,
* "integer" means integer of any size, including 64-bit, unless otherwise
* specified. All floating-point values are always 64-bit. Below, "quad" is
* used for a 64-bit value that might be either integer or floating-point.
*/
namespace nanojit
{
+ enum LOpcode
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+#pragma warning(disable:4480) // nonstandard extension used: specifying underlying type for enum
+ : unsigned
+#endif
+ {
+#define OPDEF(op, number, repKind, retType) \
+ LIR_##op = (number),
+#include "LIRopcode.tbl"
+ LIR_sentinel,
+#undef OPDEF
+
+#ifdef NANOJIT_64BIT
+# define PTR_SIZE(a,b) b
+#else
+# define PTR_SIZE(a,b) a
+#endif
+
+ // pointer op aliases
+ LIR_ldp = PTR_SIZE(LIR_ld, LIR_ldq),
+ LIR_ldcp = PTR_SIZE(LIR_ldc, LIR_ldqc),
+ LIR_stpi = PTR_SIZE(LIR_sti, LIR_stqi),
+ LIR_piadd = PTR_SIZE(LIR_add, LIR_qiadd),
+ LIR_piand = PTR_SIZE(LIR_and, LIR_qiand),
+ LIR_pilsh = PTR_SIZE(LIR_lsh, LIR_qilsh),
+ LIR_pirsh = PTR_SIZE(LIR_rsh, LIR_qirsh),
+ LIR_pursh = PTR_SIZE(LIR_ush, LIR_qursh),
+ LIR_pcmov = PTR_SIZE(LIR_cmov, LIR_qcmov),
+ LIR_pior = PTR_SIZE(LIR_or, LIR_qior),
+ LIR_pxor = PTR_SIZE(LIR_xor, LIR_qxor),
+ LIR_addp = PTR_SIZE(LIR_iaddp, LIR_qaddp),
+ LIR_peq = PTR_SIZE(LIR_eq, LIR_qeq),
+ LIR_plt = PTR_SIZE(LIR_lt, LIR_qlt),
+ LIR_pgt = PTR_SIZE(LIR_gt, LIR_qgt),
+ LIR_ple = PTR_SIZE(LIR_le, LIR_qle),
+ LIR_pge = PTR_SIZE(LIR_ge, LIR_qge),
+ LIR_pult = PTR_SIZE(LIR_ult, LIR_qult),
+ LIR_pugt = PTR_SIZE(LIR_ugt, LIR_qugt),
+ LIR_pule = PTR_SIZE(LIR_ule, LIR_qule),
+ LIR_puge = PTR_SIZE(LIR_uge, LIR_quge),
+ LIR_alloc = PTR_SIZE(LIR_ialloc, LIR_qalloc),
+ LIR_pcall = PTR_SIZE(LIR_icall, LIR_qcall),
+ LIR_param = PTR_SIZE(LIR_iparam, LIR_qparam)
+ };
+
struct GuardRecord;
struct SideExit;
enum AbiKind {
ABI_FASTCALL,
ABI_THISCALL,
ABI_STDCALL,
ABI_CDECL
@@ -950,24 +995,16 @@ namespace nanojit
uint32_t LIns::getTableSize() const
{
NanoAssert(isLInsJtbl());
return toLInsJtbl()->size;
}
class LirWriter
{
- protected:
- LInsp insDisp(LOpcode op, LInsp base, int32_t& d) {
- if (!isValidDisplacement(op, d)) {
- base = ins2i(LIR_piadd, base, d);
- d = 0;
- }
- return base;
- }
public:
LirWriter *out;
LirWriter(LirWriter* out)
: out(out) {}
virtual ~LirWriter() {}
virtual LInsp ins0(LOpcode v) {
--- a/js/src/nanojit/Native.h
+++ b/js/src/nanojit/Native.h
@@ -49,63 +49,16 @@
#if PEDANTIC
# define UNLESS_PEDANTIC(...)
# define IF_PEDANTIC(...) __VA_ARGS__
#else
# define UNLESS_PEDANTIC(...) __VA_ARGS__
# define IF_PEDANTIC(...)
#endif
-namespace nanojit {
- enum LOpcode
-#if defined(_MSC_VER) && _MSC_VER >= 1400
-#pragma warning(disable:4480) // nonstandard extension used: specifying underlying type for enum
- : unsigned
-#endif
- {
-#define OPDEF(op, number, repKind, retType) \
- LIR_##op = (number),
-#include "LIRopcode.tbl"
- LIR_sentinel,
-#undef OPDEF
-
-#ifdef NANOJIT_64BIT
-# define PTR_SIZE(a,b) b
-#else
-# define PTR_SIZE(a,b) a
-#endif
-
- // pointer op aliases
- LIR_ldp = PTR_SIZE(LIR_ld, LIR_ldq),
- LIR_ldcp = PTR_SIZE(LIR_ldc, LIR_ldqc),
- LIR_stpi = PTR_SIZE(LIR_sti, LIR_stqi),
- LIR_piadd = PTR_SIZE(LIR_add, LIR_qiadd),
- LIR_piand = PTR_SIZE(LIR_and, LIR_qiand),
- LIR_pilsh = PTR_SIZE(LIR_lsh, LIR_qilsh),
- LIR_pirsh = PTR_SIZE(LIR_rsh, LIR_qirsh),
- LIR_pursh = PTR_SIZE(LIR_ush, LIR_qursh),
- LIR_pcmov = PTR_SIZE(LIR_cmov, LIR_qcmov),
- LIR_pior = PTR_SIZE(LIR_or, LIR_qior),
- LIR_pxor = PTR_SIZE(LIR_xor, LIR_qxor),
- LIR_addp = PTR_SIZE(LIR_iaddp, LIR_qaddp),
- LIR_peq = PTR_SIZE(LIR_eq, LIR_qeq),
- LIR_plt = PTR_SIZE(LIR_lt, LIR_qlt),
- LIR_pgt = PTR_SIZE(LIR_gt, LIR_qgt),
- LIR_ple = PTR_SIZE(LIR_le, LIR_qle),
- LIR_pge = PTR_SIZE(LIR_ge, LIR_qge),
- LIR_pult = PTR_SIZE(LIR_ult, LIR_qult),
- LIR_pugt = PTR_SIZE(LIR_ugt, LIR_qugt),
- LIR_pule = PTR_SIZE(LIR_ule, LIR_qule),
- LIR_puge = PTR_SIZE(LIR_uge, LIR_quge),
- LIR_alloc = PTR_SIZE(LIR_ialloc, LIR_qalloc),
- LIR_pcall = PTR_SIZE(LIR_icall, LIR_qcall),
- LIR_param = PTR_SIZE(LIR_iparam, LIR_qparam)
- };
-}
-
#ifdef NANOJIT_IA32
#include "Nativei386.h"
#elif defined(NANOJIT_ARM)
#include "NativeARM.h"
#elif defined(NANOJIT_PPC)
#include "NativePPC.h"
#elif defined(NANOJIT_SPARC)
#include "NativeSparc.h"
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -1222,21 +1222,21 @@ Assembler::asm_store32(LOpcode op, LIns
if (base->isop(LIR_alloc)) {
rb = FP;
dr += findMemFor(base);
ra = findRegFor(value, GpRegs);
} else {
findRegFor2(GpRegs, value, ra, base, rb);
}
- if (!isS12(dr)) {
+ if (isU12(-dr) || isU12(dr)) {
+ STR(ra, rb, dr);
+ } else {
STR(ra, IP, 0);
asm_add_imm(IP, rb, dr);
- } else {
- STR(ra, rb, dr);
}
}
void
Assembler::asm_restore(LInsp i, Register r)
{
if (i->isop(LIR_alloc)) {
asm_add_imm(r, FP, disp(i));
@@ -1907,17 +1907,17 @@ Assembler::asm_ld_imm(Register d, int32_
}
int offset = PC_OFFSET_FROM(_nSlot, _nIns-1);
// If the offset is out of range, waste literal space until it is in range.
while (offset <= -4096) {
++_nSlot;
offset += sizeof(_nSlot);
}
- NanoAssert(isS12(offset) && (offset <= -8));
+ NanoAssert((isU12(-offset) || isU12(offset)) && (offset <= -8));
// Write the literal.
*(_nSlot++) = imm;
asm_output("## imm= 0x%x", imm);
// Load the literal.
LDR_nochk(d,PC,offset);
NanoAssert(uintptr_t(_nIns) + 8 + offset == uintptr_t(_nSlot-1));
@@ -2481,32 +2481,49 @@ Assembler::asm_neg_not(LInsp ins)
void
Assembler::asm_load32(LInsp ins)
{
LOpcode op = ins->opcode();
LIns* base = ins->oprnd1();
int d = ins->disp();
Register rr = prepResultReg(ins, GpRegs);
- Register ra = getBaseReg(op, base, d, GpRegs);
+ Register ra = getBaseReg(base, d, GpRegs);
- switch(op) {
+ switch (op) {
case LIR_ldzb:
case LIR_ldcb:
- LDRB(rr, ra, d);
+ if (isU12(-d) || isU12(d)) {
+ LDRB(rr, ra, d);
+ } else {
+ LDRB(rr, IP, 0);
+ asm_add_imm(IP, ra, d);
+ }
return;
case LIR_ldzs:
case LIR_ldcs:
- // these are expected to be 2 or 4-byte aligned
- LDRH(rr, ra, d);
+ // These are expected to be 2-byte aligned. (Not all ARM machines
+ // can handle unaligned accesses.)
+ // Similar to the ldcb/ldzb case, but the max offset is smaller.
+ if (isU8(-d) || isU8(d)) {
+ LDRH(rr, ra, d);
+ } else {
+ LDRH(rr, IP, 0);
+ asm_add_imm(IP, ra, d);
+ }
return;
case LIR_ld:
case LIR_ldc:
- // these are expected to be 4-byte aligned
- LDR(rr, ra, d);
+ // These are expected to be 4-byte aligned.
+ if (isU12(-d) || isU12(d)) {
+ LDR(rr, ra, d);
+ } else {
+ LDR(rr, IP, 0);
+ asm_add_imm(IP, ra, d);
+ }
return;
case LIR_ldsb:
case LIR_ldss:
case LIR_ldcsb:
case LIR_ldcss:
NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
return;
default:
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -177,25 +177,18 @@ typedef struct _FragInfo {
static const RegisterMask SavedFpRegs = 0;
static const RegisterMask SavedRegs = 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
static const int NumSavedRegs = 7;
static const RegisterMask FpRegs = 1<<D0 | 1<<D1 | 1<<D2 | 1<<D3 | 1<<D4 | 1<<D5 | 1<<D6; // no D7; S14-S15 are used for i2f/u2f.
static const RegisterMask GpRegs = 0xFFFF;
static const RegisterMask AllowableFlagRegs = 1<<R0 | 1<<R1 | 1<<R2 | 1<<R3 | 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
-#define isS12(offs) ((-(1<<12)) <= (offs) && (offs) < (1<<12))
#define isU12(offs) (((offs) & 0xfff) == (offs))
-static inline bool isValidDisplacement(LOpcode op, int32_t d) {
- if (op == LIR_ldcs)
- return (d >= 0) ? isU8(d) : isU8(-d);
- return isS12(d);
-}
-
#define IsFpReg(_r) ((rmask((Register)_r) & (FpRegs)) != 0)
#define IsGpReg(_r) ((rmask((Register)_r) & (GpRegs)) != 0)
#define FpRegNum(_fpr) ((_fpr) - FirstFloatReg)
#define firstreg() R0
// only good for normal regs
#define imm2register(c) (Register)(c-1)
@@ -659,17 +652,17 @@ enum {
NanoAssert(isU8(_off)); \
*(--_nIns) = (NIns)( COND_AL | (0x1D<<20) | ((_n)<<16) | ((_d)<<12) | ((0xB)<<4) | (((_off)&0xf0)<<4) | ((_off)&0xf) ); \
} \
asm_output("ldrsh %s, [%s,#%d]", gpn(_d),gpn(_n),(_off)); \
} while(0)
#define STR(_d,_n,_off) do { \
NanoAssert(IsGpReg(_d) && IsGpReg(_n)); \
- NanoAssert(isS12(_off)); \
+ NanoAssert(isU12(_off) || isU12(-_off)); \
underrunProtect(4); \
if ((_off)<0) *(--_nIns) = (NIns)( COND_AL | (0x50<<20) | ((_n)<<16) | ((_d)<<12) | ((-(_off))&0xFFF) ); \
else *(--_nIns) = (NIns)( COND_AL | (0x58<<20) | ((_n)<<16) | ((_d)<<12) | ((_off)&0xFFF) ); \
asm_output("str %s, [%s, #%d]", gpn(_d), gpn(_n), (_off)); \
} while(0)
// Encode a breakpoint. The ID is not important and is ignored by the
// processor, but it can be useful as a marker when debugging emitted code.
--- a/js/src/nanojit/NativePPC.cpp
+++ b/js/src/nanojit/NativePPC.cpp
@@ -139,17 +139,17 @@ namespace nanojit
STW(r, d, FP);
freeRsrcOf(ins, false); // if we had a reg in use, emit a ST to flush it to mem
}
void Assembler::asm_load32(LIns *ins) {
LIns* base = ins->oprnd1();
int d = ins->disp();
Register rr = prepResultReg(ins, GpRegs);
- Register ra = getBaseReg(ins->opcode(), base, d, GpRegs);
+ Register ra = getBaseReg(base, d, GpRegs);
switch(ins->opcode()) {
case LIR_ldzb:
case LIR_ldcb:
if (isS16(d)) {
LBZ(rr, d, ra);
} else {
LBZX(rr, ra, R0); // rr = [ra+R0]
@@ -199,17 +199,17 @@ namespace nanojit
NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
return;
default:
NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
return;
}
Register rs = findRegFor(value, GpRegs);
- Register ra = value == base ? rs : getBaseReg(LIR_sti, base, dr, GpRegs & ~rmask(rs));
+ Register ra = value == base ? rs : getBaseReg(base, dr, GpRegs & ~rmask(rs));
#if !PEDANTIC
if (isS16(dr)) {
STW(rs, dr, ra);
return;
}
#endif
@@ -245,17 +245,17 @@ namespace nanojit
// but *not* okay to copy non-doubles with FPR's
rr = prepResultReg(ins, GpRegs);
}
#else
Register rr = prepResultReg(ins, FpRegs);
#endif
int dr = ins->disp();
- Register ra = getBaseReg(ins->opcode(), base, dr, GpRegs);
+ Register ra = getBaseReg(base, dr, GpRegs);
#ifdef NANOJIT_64BIT
if (rmask(rr) & GpRegs) {
#if !PEDANTIC
if (isS16(dr)) {
LD(rr, dr, ra);
return;
}
@@ -320,17 +320,17 @@ namespace nanojit
case LIR_st32f:
NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
return;
default:
NanoAssertMsg(0, "asm_store64 should never receive this LIR opcode");
return;
}
- Register ra = getBaseReg(LIR_stqi, base, dr, GpRegs);
+ Register ra = getBaseReg(base, dr, GpRegs);
#if !PEDANTIC && !defined NANOJIT_64BIT
if (value->isop(LIR_quad) && isS16(dr) && isS16(dr+4)) {
// quad constant and short offset
uint64_t q = value->imm64();
STW(R0, dr, ra); // hi
asm_li(R0, int32_t(q>>32)); // hi
STW(R0, dr+4, ra); // lo
--- a/js/src/nanojit/NativePPC.h
+++ b/js/src/nanojit/NativePPC.h
@@ -253,19 +253,16 @@ namespace nanojit
// R13 reserved for thread-specific storage on ppc64-darwin
static const RegisterMask SavedRegs = 0x7fffc000; // R14-R30 saved
static const int NumSavedRegs = 17; // R14-R30
#else
static const RegisterMask SavedRegs = 0x7fffe000; // R13-R30 saved
static const int NumSavedRegs = 18; // R13-R30
#endif
- static inline bool isValidDisplacement(LOpcode, int32_t) {
- return true;
- }
static inline bool IsFpReg(Register r) {
return r >= F0;
}
verbose_only( extern const char* regNames[]; )
#define DECLARE_PLATFORM_STATS()
#define DECLARE_PLATFORM_REGALLOC()
--- a/js/src/nanojit/NativeSparc.cpp
+++ b/js/src/nanojit/NativeSparc.cpp
@@ -320,17 +320,17 @@ namespace nanojit
default:
NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
return;
}
underrunProtect(20);
if (value->isconst())
{
- Register rb = getBaseReg(LIR_sti, base, dr, GpRegs);
+ Register rb = getBaseReg(base, dr, GpRegs);
int c = value->imm32();
STW32(L2, dr, rb);
SET32(c, L2);
}
else
{
// make sure what is in a register
Register ra, rb;
@@ -764,17 +764,17 @@ namespace nanojit
void Assembler::asm_load32(LInsp ins)
{
underrunProtect(12);
LOpcode op = ins->opcode();
LIns* base = ins->oprnd1();
int d = ins->disp();
Register rr = prepResultReg(ins, GpRegs);
- Register ra = getBaseReg(ins->opcode(), base, d, GpRegs);
+ Register ra = getBaseReg(base, d, GpRegs);
switch(op) {
case LIR_ldzb:
case LIR_ldcb:
LDUB32(ra, d, rr);
break;
case LIR_ldzs:
case LIR_ldcs:
LDUH32(ra, d, rr);
--- a/js/src/nanojit/NativeSparc.h
+++ b/js/src/nanojit/NativeSparc.h
@@ -186,20 +186,16 @@ namespace nanojit
1<<I4 | 1<<I5;
static const RegisterMask GpRegs = SavedRegs | 1<<O0 | 1<<O1 | 1<<O2 |
1<<O3 | 1<<O4 | 1<<O5;
static const RegisterMask FpRegs = 1<<F0 | 1<<F2 | 1<<F4 | 1<<F6 |
1<<F14 | 1<<F16 | 1<<F18 | 1<<F20 |
1<<F22;
static const RegisterMask AllowableFlagRegs = GpRegs;
- static inline bool isValidDisplacement(LOpcode, int32_t) {
- return true;
- }
-
verbose_only( extern const char* regNames[]; )
#define DECLARE_PLATFORM_STATS()
#define DECLARE_PLATFORM_REGALLOC()
#define DECLARE_PLATFORM_ASSEMBLER() \
const static Register argRegs[6], retRegs[1]; \
--- a/js/src/nanojit/NativeX64.cpp
+++ b/js/src/nanojit/NativeX64.cpp
@@ -1363,17 +1363,17 @@ namespace nanojit
// xmm <- gpr: use movq xmm, r/m64 (66 REX.W 0F 6E /r)
MOVQXR(d, s);
}
}
void Assembler::regalloc_load(LIns *ins, RegisterMask allow, Register &rr, int32_t &dr, Register &rb) {
dr = ins->disp();
LIns *base = ins->oprnd1();
- rb = getBaseReg(ins->opcode(), base, dr, BaseRegs);
+ rb = getBaseReg(base, dr, BaseRegs);
if (ins->isUnusedOrHasUnknownReg() || !(allow & rmask(ins->getReg()))) {
rr = prepResultReg(ins, allow & ~rmask(rb));
} else {
// keep already assigned register
rr = ins->getReg();
NanoAssert(allow & rmask(rr));
freeRsrcOf(ins, false);
}
@@ -1441,17 +1441,17 @@ namespace nanojit
NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
break;
}
}
void Assembler::asm_store64(LOpcode op, LIns *value, int d, LIns *base) {
NanoAssert(value->isQuad());
- Register b = getBaseReg(LIR_stqi, base, d, BaseRegs);
+ Register b = getBaseReg(base, d, BaseRegs);
Register r;
// if we have to choose a register, use a GPR, but not the base reg
if (value->isUnusedOrHasUnknownReg()) {
RegisterMask allow;
// If op is LIR_st32f and we have no reg, prefer FPR over GPR: saves an instruction later,
// and the value is almost certainly going to operated on as FP later anyway.
// XXX: isFloat doesn't cover float/fmod! see bug 520208.
@@ -1511,17 +1511,17 @@ namespace nanojit
// quirk of x86-64: reg cannot appear to be ah/bh/ch/dh
// for single-byte stores with REX prefix
const RegisterMask SrcRegs =
(op == LIR_stb) ?
(GpRegs & ~(1<<RSP | 1<<RBP | 1<<RSI | 1<<RDI)) :
GpRegs;
NanoAssert(!value->isQuad());
- Register b = getBaseReg(LIR_sti, base, d, BaseRegs);
+ Register b = getBaseReg(base, d, BaseRegs);
Register r = findRegFor(value, SrcRegs & ~rmask(b));
switch (op) {
case LIR_stb:
MOVBMR(r, d, b);
break;
case LIR_sts:
MOVSMR(r, d, b);
--- a/js/src/nanojit/NativeX64.h
+++ b/js/src/nanojit/NativeX64.h
@@ -325,19 +325,16 @@ namespace nanojit
static const int NumSavedRegs = 7; // rbx, rsi, rdi, r12-15
static const int NumArgRegs = 4;
#else
static const RegisterMask SavedRegs = 1<<RBX | 1<<R12 | 1<<R13 | 1<<R14 | 1<<R15;
static const int NumSavedRegs = 5; // rbx, r12-15
static const int NumArgRegs = 6;
#endif
- static inline bool isValidDisplacement(LOpcode, int32_t) {
- return true;
- }
static inline bool IsFpReg(Register r) {
return ((1<<r) & FpRegs) != 0;
}
static inline bool IsGpReg(Register r) {
return ((1<<r) & GpRegs) != 0;
}
verbose_only( extern const char* regNames[]; )
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -471,17 +471,17 @@ namespace nanojit
asm_load(d,r);
}
}
void Assembler::asm_store32(LOpcode op, LIns* value, int dr, LIns* base)
{
if (value->isconst())
{
- Register rb = getBaseReg(LIR_sti, base, dr, GpRegs);
+ Register rb = getBaseReg(base, dr, GpRegs);
int c = value->imm32();
switch(op) {
case LIR_stb:
ST8i(rb, dr, c);
break;
case LIR_sts:
ST16i(rb, dr, c);
break;
@@ -561,17 +561,17 @@ namespace nanojit
{
LIns* base = ins->oprnd1();
int db = ins->disp();
Register rr = ins->getReg();
if (isKnownReg(rr) && rmask(rr) & XmmRegs)
{
freeRsrcOf(ins, false);
- Register rb = getBaseReg(ins->opcode(), base, db, GpRegs);
+ Register rb = getBaseReg(base, db, GpRegs);
switch (ins->opcode()) {
case LIR_ldq:
case LIR_ldqc:
SSE_LDQ(rr, db, rb);
break;
case LIR_ld32f:
case LIR_ldc32f:
SSE_CVTSS2SD(rr, rr);
@@ -1249,17 +1249,17 @@ namespace nanojit
LDsib(rr, d, rleft, rright, scale);
return;
default:
NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
return;
}
}
- Register ra = getBaseReg(op, base, d, GpRegs);
+ Register ra = getBaseReg(base, d, GpRegs);
switch(op) {
case LIR_ldzb:
case LIR_ldcb:
LD8Z(rr, d, ra);
return;
case LIR_ldsb:
case LIR_ldcsb:
LD8S(rr, d, ra);
--- a/js/src/nanojit/Nativei386.h
+++ b/js/src/nanojit/Nativei386.h
@@ -152,20 +152,16 @@ namespace nanojit
static const RegisterMask GpRegs = SavedRegs | 1<<EAX | 1<<ECX | 1<<EDX;
static const RegisterMask XmmRegs = 1<<XMM0|1<<XMM1|1<<XMM2|1<<XMM3|1<<XMM4|1<<XMM5|1<<XMM6|1<<XMM7;
static const RegisterMask x87Regs = 1<<FST0;
static const RegisterMask FpRegs = x87Regs | XmmRegs;
static const RegisterMask ScratchRegs = 1<<EAX | 1<<ECX | 1<<EDX | FpRegs;
static const RegisterMask AllowableFlagRegs = 1<<EAX |1<<ECX | 1<<EDX | 1<<EBX;
- static inline bool isValidDisplacement(LOpcode, int32_t) {
- return true;
- }
-
#define _rmask_(r) (1<<(r))
#define _is_xmm_reg_(r) ((_rmask_(r)&XmmRegs)!=0)
#define _is_x87_reg_(r) ((_rmask_(r)&x87Regs)!=0)
#define _is_fp_reg_(r) ((_rmask_(r)&FpRegs)!=0)
#define _is_gp_reg_(r) ((_rmask_(r)&GpRegs)!=0)
verbose_only( extern const char* regNames[]; )