--- a/js/src/assembler/assembler/ARMAssembler.h
+++ b/js/src/assembler/assembler/ARMAssembler.h
@@ -1451,17 +1451,22 @@ namespace JSC {
}
ARMWord SM(int reg)
{
ASSERT(reg <= ARMRegisters::d31);
// Encoded as bits [5,3:0].
return ((reg << 5) & 0x20) | ((reg >> 1) & 0xf);
}
-
+ ARMWord SN(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::d31);
+ // Encoded as bits [19:16,7].
+ return ((reg << 15) & 0xf0000) | ((reg & 1) << 7);
+ }
static ARMWord getConditionalField(ARMWord i)
{
return i & 0xf0000000;
}
int genInt(int reg, ARMWord imm, bool positive);
ARMBuffer m_buffer;
@@ -1595,137 +1600,136 @@ namespace JSC {
} else {
js::JaegerSpew(js::JSpew_Insns,
IPFX "%-15s %s, %s, %s\n", MAYBE_PAD, "vmov",
nameFpRegD(rFP), nameGpReg(r1), nameGpReg(r2));
}
emitVFPInst(static_cast<ARMWord>(cc) | VFP_DXFER | VFP_MOV |
(fromFP ? DT_LOAD : 0) |
(isDbl ? VFP_DBL : 0), RD(r1), RN(r2), isDbl ? DM(rFP) : SM(rFP));
-
}
void fcpyd_r(int dd, int dm, Condition cc = AL)
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "%-15s %s, %s\n", MAYBE_PAD, "vmov.f64",
nameFpRegD(dd), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FCPYD, dd, dd, dm);
+ emitVFPInst(static_cast<ARMWord>(cc) | FCPYD, DD(dd), DM(dm), 0);
}
void faddd_r(int dd, int dn, int dm, Condition cc = AL)
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "%-15s %s, %s, %s\n", MAYBE_PAD, "vadd.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FADDD, dd, dn, dm);
+ emitVFPInst(static_cast<ARMWord>(cc) | FADDD, DD(dd), DN(dn), DM(dm));
}
void fnegd_r(int dd, int dm, Condition cc = AL)
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "%-15s %s, %s\n", MAYBE_PAD, "fnegd", nameFpRegD(dd), nameFpRegD(dm));
m_buffer.putInt(static_cast<ARMWord>(cc) | FNEGD | DD(dd) | DM(dm));
}
void fdivd_r(int dd, int dn, int dm, Condition cc = AL)
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "%-15s %s, %s, %s\n", MAYBE_PAD, "vdiv.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FDIVD, dd, dn, dm);
+ emitVFPInst(static_cast<ARMWord>(cc) | FDIVD, DD(dd), DN(dn), DM(dm));
}
void fsubd_r(int dd, int dn, int dm, Condition cc = AL)
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "%-15s %s, %s, %s\n", MAYBE_PAD, "vsub.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FSUBD, dd, dn, dm);
+ emitVFPInst(static_cast<ARMWord>(cc) | FSUBD, DD(dd), DN(dn), DM(dm));
}
void fabsd_r(int dd, int dm, Condition cc = AL)
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "%-15s %s, %s\n", MAYBE_PAD, "fabsd", nameFpRegD(dd), nameFpRegD(dm));
m_buffer.putInt(static_cast<ARMWord>(cc) | FABSD | DD(dd) | DM(dm));
}
void fmuld_r(int dd, int dn, int dm, Condition cc = AL)
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "%-15s %s, %s, %s\n", MAYBE_PAD, "vmul.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FMULD, dd, dn, dm);
+ emitVFPInst(static_cast<ARMWord>(cc) | FMULD, DD(dd), DN(dn), DM(dm));
}
void fcmpd_r(int dd, int dm, Condition cc = AL)
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "%-15s %s, %s\n", MAYBE_PAD, "vcmp.f64", nameFpRegD(dd), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FCMPD, dd, 0, dm);
+ emitVFPInst(static_cast<ARMWord>(cc) | FCMPD, DD(dd), 0, DM(dm));
}
void fsqrtd_r(int dd, int dm, Condition cc = AL)
{
js::JaegerSpew(js::JSpew_Insns,
IPFX "%-15s %s, %s\n", MAYBE_PAD, "vsqrt.f64", nameFpRegD(dd), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FSQRTD, dd, 0, dm);
+ emitVFPInst(static_cast<ARMWord>(cc) | FSQRTD, DD(dd), 0, DM(dm));
}
void fmsr_r(int dd, int rn, Condition cc = AL)
{
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FMSR, rn, dd, 0);
+ emitVFPInst(static_cast<ARMWord>(cc) | FMSR, RD(rn), SN(dd), 0);
}
void fmrs_r(int rd, int dn, Condition cc = AL)
{
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FMRS, rd, dn, 0);
+ emitVFPInst(static_cast<ARMWord>(cc) | FMRS, RD(rd), SN(dn), 0);
}
+ // dear god :(
+ // integer registers ar encoded the same as single registers
void fsitod_r(int dd, int dm, Condition cc = AL)
{
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FSITOD, dd, 0, dm);
+ emitVFPInst(static_cast<ARMWord>(cc) | FSITOD, DD(dd), 0, SM(dm));
}
void fuitod_r(int dd, int dm, Condition cc = AL)
{
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FUITOD, dd, 0, dm);
+ emitVFPInst(static_cast<ARMWord>(cc) | FUITOD, DD(dd), 0, SM(dm));
}
void ftosid_r(int fd, int dm, Condition cc = AL)
{
- // TODO: emitInst doesn't work for VFP instructions, though it
- // seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FTOSID, fd, 0, dm);
+ // TODO: I don't actually know what the encoding is i'm guessing SD and DM.
+ emitVFPInst(static_cast<ARMWord>(cc) | FTOSID, SD(fd), 0, DM(dm));
}
void ftosizd_r(int fd, int dm, Condition cc = AL)
{
- // TODO: emitInst doesn't work for VFP instructions, though it
- // seems to work for current usage.
- emitInst(static_cast<ARMWord>(cc) | FTOSIZD, fd, 0, dm);
+ // TODO: I don't actually know what the encoding is i'm guessing SD and DM.
+ emitVFPInst(static_cast<ARMWord>(cc) | FTOSIZD, SD(fd), 0, DM(dm));
}
void fmstat(Condition cc = AL)
{
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
m_buffer.putInt(static_cast<ARMWord>(cc) | FMSTAT);
}
--- a/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
+++ b/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
@@ -249,26 +249,28 @@ public:
m_allowFlush = allowFlush;
}
#endif
private:
void correctDeltas(int insnSize)
{
m_maxDistance -= insnSize;
+ ASSERT(m_maxDistance >= 0);
m_lastConstDelta -= insnSize;
if (m_lastConstDelta < 0)
m_lastConstDelta = 0;
}
void correctDeltas(int insnSize, int constSize)
{
correctDeltas(insnSize);
m_maxDistance -= m_lastConstDelta;
+ ASSERT(m_maxDistance >= 0);
m_lastConstDelta = constSize;
}
void flushConstantPool(bool useBarrier = true)
{
js::JaegerSpew(js::JSpew_Insns, " -- FLUSHING CONSTANT POOL WITH %d CONSTANTS --\n",
m_numConsts);
ASSERT(m_allowFlush);
@@ -299,31 +301,37 @@ private:
for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
void* loadAddr = reinterpret_cast<void*>(m_buffer + *iter);
AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<void*>(m_buffer + constPoolOffset));
}
m_loadOffsets.clear();
m_numConsts = 0;
m_maxDistance = maxPoolSize;
+ ASSERT(m_maxDistance >= 0);
+
}
void flushIfNoSpaceFor(int nextInsnSize)
{
- if (m_numConsts == 0)
+ if (m_numConsts == 0) {
+ m_maxDistance = maxPoolSize;
return;
+ }
int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
flushConstantPool();
}
void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
{
- if (m_numConsts == 0)
+ if (m_numConsts == 0) {
+ m_maxDistance = maxPoolSize;
return;
+ }
if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
(m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
flushConstantPool();
}
uint32_t* m_pool;
char* m_mask;
LoadOffsets m_loadOffsets;
--- a/js/src/assembler/assembler/MacroAssemblerARM.h
+++ b/js/src/assembler/assembler/MacroAssemblerARM.h
@@ -1287,24 +1287,24 @@ public:
void sqrtDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.fsqrtd_r(dest, src);
}
void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
{
- m_assembler.fmsr_r(dest, src);
- m_assembler.fsitod_r(dest, dest);
+ m_assembler.fmsr_r(floatShadow(dest), src);
+ m_assembler.fsitod_r(dest, floatShadow(dest));
}
void convertUInt32ToDouble(RegisterID src, FPRegisterID dest)
{
- m_assembler.fmsr_r(dest, src);
- m_assembler.fuitod_r(dest, dest);
+ m_assembler.fmsr_r(floatShadow(dest), src);
+ m_assembler.fuitod_r(dest, floatShadow(dest));
}
void convertInt32ToDouble(Address src, FPRegisterID dest)
{
// flds does not worth the effort here
load32(src, ARMRegisters::S1);
convertInt32ToDouble(ARMRegisters::S1, dest);
}
@@ -1332,37 +1332,37 @@ public:
return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
}
// Truncates 'src' to an integer, and places the resulting 'dest'.
// If the result is not representable as a 32 bit value, branch.
// May also branch for some values that are representable in 32 bits
Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
{
- m_assembler.ftosizd_r(ARMRegisters::SD0, src);
+ m_assembler.ftosizd_r(floatShadow(ARMRegisters::SD0), src);
// If FTOSIZD (VCVT.S32.F64) can't fit the result into a 32-bit
// integer, it saturates at INT_MAX or INT_MIN. Testing this is
// probably quicker than testing FPSCR for exception.
- m_assembler.fmrs_r(dest, ARMRegisters::SD0);
+ m_assembler.fmrs_r(dest, floatShadow(ARMRegisters::SD0));
m_assembler.cmn_r(dest, ARMAssembler::getOp2(-0x7fffffff));
m_assembler.cmp_r(dest, ARMAssembler::getOp2(0x80000000), ARMCondition(NonZero));
return Jump(m_assembler.jmp(ARMCondition(Zero)));
}
// Convert 'src' to an integer, and places the resulting 'dest'.
// If the result is not representable as a 32 bit value, branch.
// May also branch for some values that are representable in 32 bits
// (specifically, in this case, 0).
void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
{
- m_assembler.ftosid_r(ARMRegisters::SD0, src);
- m_assembler.fmrs_r(dest, ARMRegisters::SD0);
+ m_assembler.ftosid_r(floatShadow(ARMRegisters::SD0), src);
+ m_assembler.fmrs_r(dest, floatShadow(ARMRegisters::SD0));
// Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
- m_assembler.fsitod_r(ARMRegisters::SD0, ARMRegisters::SD0);
+ m_assembler.fsitod_r(ARMRegisters::SD0, floatShadow(ARMRegisters::SD0));
failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
// If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
failureCases.append(branchTest32(Zero, dest));
}
void zeroDouble(FPRegisterID srcDest)
{
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug685313.js
@@ -0,0 +1,11 @@
+
+function foo() {
+ function D(){}
+ arr = [
+ new (function D ( ) {
+ D += '' + foo;
+ }),
+ new D
+ ];
+}
+foo();
--- a/js/src/jsdbgapi.cpp
+++ b/js/src/jsdbgapi.cpp
@@ -2161,18 +2161,16 @@ JS_GetFunctionCallback(JSContext *cx)
return cx->functionCallback;
}
#endif /* MOZ_TRACE_JSCALLS */
JS_PUBLIC_API(void)
JS_DumpBytecode(JSContext *cx, JSScript *script)
{
- JS_ASSERT(!cx->runtime->gcRunning);
-
#if defined(DEBUG)
AutoArenaAllocator mark(&cx->tempPool);
Sprinter sprinter;
INIT_SPRINTER(cx, &sprinter, &cx->tempPool, 0);
fprintf(stdout, "--- SCRIPT %s:%d ---\n", script->filename, script->lineno);
js_Disassemble(cx, script, true, &sprinter);
fputs(sprinter.base, stdout);
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -1886,20 +1886,18 @@ TypeSet::hasGlobalObject(JSContext *cx,
TypeObject types::emptyTypeObject(NULL, false, true);
void
TypeCompartment::init(JSContext *cx)
{
PodZero(this);
-#ifndef JS_CPU_ARM
if (cx && cx->getRunOptions() & JSOPTION_TYPE_INFERENCE)
inferenceEnabled = true;
-#endif
}
TypeObject *
TypeCompartment::newTypeObject(JSContext *cx, JSScript *script,
JSProtoKey key, JSObject *proto, bool unknown)
{
TypeObject *object = NewGCThing<TypeObject>(cx, gc::FINALIZE_TYPE_OBJECT, sizeof(TypeObject));
if (!object)
@@ -3216,16 +3214,24 @@ ScriptAnalysis::resolveNameAccess(JSCont
access.slot = (kind == ARGUMENT) ? ArgSlot(index) : LocalSlot(script, index);
access.arg = (kind == ARGUMENT);
access.index = index;
return access;
} else if (kind != NONE) {
return access;
}
+ /*
+ * The script's bindings do not contain a name for the function itself,
+ * don't resolve name accesses on lambdas in DeclEnv objects on the
+ * scope chain.
+ */
+ if (atom == CallObjectLambdaName(script->function()))
+ return access;
+
if (!script->nesting()->parent)
return access;
script = script->nesting()->parent;
}
return access;
}
--- a/js/src/methodjit/BaseCompiler.h
+++ b/js/src/methodjit/BaseCompiler.h
@@ -276,23 +276,23 @@ class AutoReserveICSpace {
/* Automatically check the IC space if we didn't already do it manually. */
if (!didCheck) {
check();
}
#endif
}
};
-# define RESERVE_IC_SPACE(__masm) AutoReserveICSpace<128> arics(__masm)
+# define RESERVE_IC_SPACE(__masm) AutoReserveICSpace<256> arics(__masm)
# define CHECK_IC_SPACE() arics.check()
/* The OOL path can need a lot of space because we save and restore a lot of registers. The actual
* sequene varies. However, dumping the literal pool before an OOL block is probably a good idea
* anyway, as we branch directly to the start of the block from the fast path. */
-# define RESERVE_OOL_SPACE(__masm) AutoReserveICSpace<256> arics_ool(__masm)
+# define RESERVE_OOL_SPACE(__masm) AutoReserveICSpace<2048> arics_ool(__masm)
/* Allow the OOL patch to be checked before object destruction. Often, non-patchable epilogues or
* rejoining sequences are emitted, and it isn't necessary to protect these from literal pools. */
# define CHECK_OOL_SPACE() arics_ool.check()
#else
# define RESERVE_IC_SPACE(__masm) /* Do nothing. */
# define CHECK_IC_SPACE() /* Do nothing. */
# define RESERVE_OOL_SPACE(__masm) /* Do nothing. */
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -4501,16 +4501,18 @@ mjit::Compiler::jsop_getprop(JSAtom *ato
if (pic.hasTypeCheck)
labels.setInlineTypeJump(masm, pic.fastPathStart, typeCheck);
#ifdef JS_CPU_X64
labels.setInlineShapeJump(masm, inlineShapeLabel, inlineShapeJump);
#else
labels.setInlineShapeJump(masm, pic.shapeGuard, inlineShapeJump);
#endif
+ CHECK_IC_SPACE();
+
pic.objReg = objReg;
frame.pushRegs(shapeReg, objReg, knownType);
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg, false, false,
/* force = */ pic.canCallHook);
stubcc.rejoin(Changes(1));
pics.append(pic);
@@ -4619,16 +4621,18 @@ mjit::Compiler::jsop_callprop_generic(JS
labels.setValueLoad(masm, pic.fastPathRejoin, fastValueLoad);
labels.setInlineTypeJump(masm, pic.fastPathStart, typeCheck);
#ifdef JS_CPU_X64
labels.setInlineShapeJump(masm, inlineShapeLabel, inlineShapeJump);
#else
labels.setInlineShapeJump(masm, pic.shapeGuard, inlineShapeJump);
#endif
+ CHECK_IC_SPACE();
+
/* Adjust the frame. */
frame.pop();
frame.pushRegs(shapeReg, objReg, knownPushedType(0));
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg, false, false,
/* force = */ pic.canCallHook);
pushSyncedEntry(1);
@@ -4756,16 +4760,18 @@ mjit::Compiler::jsop_callprop_obj(JSAtom
/* Copy the slot value to the expression stack. */
Address slot(objReg, 1 << 24);
Label fastValueLoad = masm.loadValueWithAddressOffsetPatch(slot, shapeReg, objReg);
pic.fastPathRejoin = masm.label();
pic.objReg = objReg;
+ CHECK_IC_SPACE();
+
/*
* 1) Dup the |this| object.
* 2) Store the property value below the |this| value.
* This is safe as a stack transition, because JSOP_CALLPROP has
* JOF_TMPSLOT. It is also safe for correctness, because if we know the LHS
* is an object, it is the resulting vp[1].
*/
frame.dup();
@@ -5340,16 +5346,18 @@ mjit::Compiler::jsop_name(JSAtom *atom,
CHECK_OOL_SPACE();
}
pic.fastPathRejoin = masm.label();
/* Initialize op labels. */
ScopeNameLabels &labels = pic.scopeNameLabels();
labels.setInlineJump(masm, pic.fastPathStart, inlineJump);
+ CHECK_IC_SPACE();
+
/*
* We can't optimize away the PIC for the NAME access itself, but if we've
* only seen a single value pushed by this access, mark it as such and
* recompile if a different value becomes possible.
*/
JSObject *singleton = pushedSingleton(0);
if (singleton) {
frame.push(ObjectValue(*singleton));
@@ -5428,16 +5436,18 @@ mjit::Compiler::jsop_xname(JSAtom *atom)
pic.fastPathRejoin = masm.label();
RETURN_IF_OOM(false);
/* Initialize op labels. */
ScopeNameLabels &labels = pic.scopeNameLabels();
labels.setInlineJumpOffset(masm.differenceBetween(pic.fastPathStart, inlineJump));
+ CHECK_IC_SPACE();
+
frame.pop();
frame.pushRegs(pic.shapeReg, pic.objReg, knownPushedType(0));
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg, /* testUndefined = */ true);
stubcc.rejoin(Changes(1));
pics.append(pic);
@@ -6014,16 +6024,18 @@ mjit::Compiler::jsop_getgname(uint32 ind
/* Allocate any register other than objReg. */
RegisterID treg = frame.allocReg();
/* After dreg is loaded, it's safe to clobber objReg. */
RegisterID dreg = objReg;
ic.load = masm.loadValueWithAddressOffsetPatch(address, treg, dreg);
+ CHECK_IC_SPACE();
+
frame.pushRegs(treg, dreg, type);
/*
* Note: no undefined check is needed for GNAME opcodes. These were not
* declared with 'var', so cannot be undefined without triggering an error
* or having been a pre-existing global whose value is undefined (which
* type inference will know about).
*/
--- a/js/src/methodjit/FastOps.cpp
+++ b/js/src/methodjit/FastOps.cpp
@@ -2146,16 +2146,18 @@ mjit::Compiler::jsop_getelem(bool isCall
if (isCall)
ic.slowPathCall = OOL_STUBCALL(stubs::CallElem, REJOIN_FALLTHROUGH);
else
ic.slowPathCall = OOL_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
#endif
ic.fastPathRejoin = masm.label();
+ CHECK_IC_SPACE();
+
frame.popn(2);
frame.pushRegs(ic.typeReg, ic.objReg, knownPushedType(0));
BarrierState barrier = testBarrier(ic.typeReg, ic.objReg, false);
if (isCall)
frame.pushSynced(knownPushedType(1));
stubcc.rejoin(Changes(isCall ? 2 : 1));
--- a/js/src/methodjit/MachineRegs.h
+++ b/js/src/methodjit/MachineRegs.h
@@ -113,17 +113,17 @@ struct Registers {
#endif
// Register that homes the current JSStackFrame.
#if defined(JS_CPU_X86)
static const RegisterID JSFrameReg = JSC::X86Registers::ebp;
#elif defined(JS_CPU_X64)
static const RegisterID JSFrameReg = JSC::X86Registers::ebx;
#elif defined(JS_CPU_ARM)
- static const RegisterID JSFrameReg = JSC::ARMRegisters::r11;
+ static const RegisterID JSFrameReg = JSC::ARMRegisters::r10;
#elif defined(JS_CPU_SPARC)
static const RegisterID JSFrameReg = JSC::SparcRegisters::l0;
#endif
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
static const RegisterID ReturnReg = JSC::X86Registers::eax;
# if defined(JS_CPU_X86) || defined(_WIN64)
static const RegisterID ArgReg0 = JSC::X86Registers::ecx;
@@ -220,19 +220,18 @@ struct Registers {
// r12 is IP, and is used for stub calls.
static const uint32 SavedRegs =
(1 << JSC::ARMRegisters::r4)
| (1 << JSC::ARMRegisters::r5)
| (1 << JSC::ARMRegisters::r6)
| (1 << JSC::ARMRegisters::r7)
// r8 is reserved as a scratch register for the assembler.
- | (1 << JSC::ARMRegisters::r9)
- | (1 << JSC::ARMRegisters::r10);
- // r11 is reserved for JSFrameReg.
+ | (1 << JSC::ARMRegisters::r9);
+ // r10 is reserved for JSFrameReg.
// r13 is SP and must always point to VMFrame whilst in generated code.
// r14 is LR and is used for return sequences.
// r15 is PC (program counter).
static const uint32 SingleByteRegs = TempRegs | SavedRegs;
#elif defined(JS_CPU_SPARC)
static const uint32 TempRegs =
(1 << JSC::SparcRegisters::o0)
--- a/js/src/methodjit/MethodJIT.cpp
+++ b/js/src/methodjit/MethodJIT.cpp
@@ -477,17 +477,17 @@ JS_STATIC_ASSERT(sizeof(VMFrame)%8 == 0)
JS_STATIC_ASSERT(offsetof(VMFrame, savedLR) == (4*21));
JS_STATIC_ASSERT(offsetof(VMFrame, entryfp) == (4*10));
JS_STATIC_ASSERT(offsetof(VMFrame, stackLimit) == (4*9));
JS_STATIC_ASSERT(offsetof(VMFrame, cx) == (4*8));
JS_STATIC_ASSERT(VMFrame::offsetOfFp == (4*7));
JS_STATIC_ASSERT(offsetof(VMFrame, scratch) == (4*3));
JS_STATIC_ASSERT(offsetof(VMFrame, previous) == (4*2));
-JS_STATIC_ASSERT(JSFrameReg == JSC::ARMRegisters::r11);
+JS_STATIC_ASSERT(JSFrameReg == JSC::ARMRegisters::r10);
JS_STATIC_ASSERT(JSReturnReg_Type == JSC::ARMRegisters::r5);
JS_STATIC_ASSERT(JSReturnReg_Data == JSC::ARMRegisters::r4);
#ifdef MOZ_THUMB2
#define FUNCTION_HEADER_EXTRA \
".align 2\n" \
".thumb\n" \
".thumb_func\n"
@@ -542,34 +542,34 @@ SYMBOL_STRING(JaegerTrampoline) ":"
" push {r3}" "\n" /* stackLimit */
" push {r0}" "\n" /* cx */
" push {r1}" "\n" /* regs.fp */
/* Remaining fields are set elsewhere, but we need to leave space for them. */
" sub sp, sp, #(4*7)" "\n"
/* Preserve 'code' (r2) in an arbitrary callee-saved register. */
" mov r4, r2" "\n"
- /* Preserve 'fp' (r1) in r11 (JSFrameReg). */
-" mov r11, r1" "\n"
+ /* Preserve 'fp' (r1) in r10 (JSFrameReg). */
+" mov r10, r1" "\n"
" mov r0, sp" "\n"
" blx " SYMBOL_STRING_VMFRAME(SetVMFrameRegs) "\n"
" mov r0, sp" "\n"
" blx " SYMBOL_STRING_VMFRAME(PushActiveVMFrame)"\n"
/* Call the compiled JavaScript function. */
" bx r4" "\n"
);
asm (
".text\n"
FUNCTION_HEADER_EXTRA
".globl " SYMBOL_STRING(JaegerTrampolineReturn) "\n"
SYMBOL_STRING(JaegerTrampolineReturn) ":" "\n"
-" strd r4, r5, [r11, #24]" "\n" /* fp->rval type,data */
+" strd r4, r5, [r10, #24]" "\n" /* fp->rval type,data */
/* Tidy up. */
" mov r0, sp" "\n"
" blx " SYMBOL_STRING_VMFRAME(PopActiveVMFrame) "\n"
/* Skip past the parameters we pushed (such as cx and the like). */
" add sp, sp, #(4*7 + 4*6)" "\n"
@@ -605,31 +605,31 @@ SYMBOL_STRING(JaegerThrowpoline) ":"
asm (
".text\n"
FUNCTION_HEADER_EXTRA
".globl " SYMBOL_STRING(JaegerInterpolineScripted) "\n"
SYMBOL_STRING(JaegerInterpolineScripted) ":" "\n"
/* The only difference between JaegerInterpoline and JaegerInpolineScripted is that the
* scripted variant has to walk up to the previous StackFrame first. */
-" ldr r11, [r11, #(4*4)]" "\n" /* Load f->prev_ */
-" str r11, [sp, #(4*7)]" "\n" /* Update f->regs->fp_ */
+" ldr r10, [r10, #(4*4)]" "\n" /* Load f->prev_ */
+" str r10, [sp, #(4*7)]" "\n" /* Update f->regs->fp_ */
/* Fall through into JaegerInterpoline. */
FUNCTION_HEADER_EXTRA
".globl " SYMBOL_STRING(JaegerInterpoline) "\n"
SYMBOL_STRING(JaegerInterpoline) ":" "\n"
" mov r3, sp" "\n" /* f */
" mov r2, r0" "\n" /* returnReg */
" mov r1, r5" "\n" /* returnType */
" mov r0, r4" "\n" /* returnData */
" blx " SYMBOL_STRING_RELOC(js_InternalInterpret) "\n"
" cmp r0, #0" "\n"
-" ldr ip, [sp, #(4*7)]" "\n" /* Load (StackFrame*)f->regs->fp_ */
-" ldrd r4, r5, [ip, #(4*6)]" "\n" /* Load rval payload and type. */
+" ldr r10, [sp, #(4*7)]" "\n" /* Load (StackFrame*)f->regs->fp_ */
+" ldrd r4, r5, [r10, #(4*6)]" "\n" /* Load rval payload and type. */
" ldr r1, [sp, #(4*3)]" "\n" /* Load scratch. */
" it ne" "\n"
" bxne r0" "\n"
/* Tidy up, then return 0. */
" mov r0, sp" "\n"
" blx " SYMBOL_STRING_VMFRAME(PopActiveVMFrame) "\n"
" add sp, sp, #(4*7 + 4*6)" "\n"
" mov r0, #0" "\n"
--- a/js/src/methodjit/PolyIC.cpp
+++ b/js/src/methodjit/PolyIC.cpp
@@ -1216,17 +1216,17 @@ class GetPropCompiler : public PICStubCo
/* Grap vp. */
RegisterID vpReg = t0;
masm.addPtr(Imm32(vpOffset), JSFrameReg, vpReg);
masm.restoreStackBase();
masm.setupABICall(Registers::NormalCall, 4);
masm.storeArg(3, vpReg);
- masm.storeArg(2, ImmPtr((void *) JSID_BITS(shape->propid)));
+ masm.storeArg(2, ImmPtr((void *) JSID_BITS(SHAPE_USERID(shape))));
masm.storeArg(1, holdObjReg);
masm.storeArg(0, cxReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, getter), false);
NativeStubLinker::FinalJump done;
if (!NativeStubEpilogue(f, masm, &done, 0, vpOffset, pic.shapeReg, pic.objReg))
return;
--- a/js/src/methodjit/Retcon.cpp
+++ b/js/src/methodjit/Retcon.cpp
@@ -96,24 +96,38 @@ SetRejoinState(StackFrame *fp, const Cal
fp->setRejoin(ScriptedRejoin(site.pcOffset));
*location = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolineScripted);
} else {
fp->setRejoin(StubRejoin(site.rejoin));
*location = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
}
}
+static inline bool
+CallsiteMatches(uint8 *codeStart, const CallSite &site, void *location)
+{
+ if (codeStart + site.codeOffset == location)
+ return true;
+
+#ifdef JS_CPU_ARM
+ if (codeStart + site.codeOffset + 4 == location)
+ return true;
+#endif
+
+ return false;
+}
+
void
Recompiler::patchCall(JITScript *jit, StackFrame *fp, void **location)
{
uint8* codeStart = (uint8 *)jit->code.m_code.executableAddress();
CallSite *callSites_ = jit->callSites();
for (uint32 i = 0; i < jit->nCallSites; i++) {
- if (callSites_[i].codeOffset + codeStart == *location) {
+ if (CallsiteMatches(codeStart, callSites_[i], *location)) {
JS_ASSERT(callSites_[i].inlineIndex == analyze::CrossScriptSSA::OUTER_FRAME);
SetRejoinState(fp, callSites_[i], location);
return;
}
}
JS_NOT_REACHED("failed to find call site");
}
@@ -292,17 +306,17 @@ Recompiler::expandInlineFrames(JSCompart
JS_ASSERT(f->stubRejoin != REJOIN_NATIVE &&
f->stubRejoin != REJOIN_NATIVE_LOWERED &&
f->stubRejoin != REJOIN_NATIVE_GETTER &&
f->stubRejoin != REJOIN_NATIVE_PATCHED);
innerfp->setRejoin(StubRejoin((RejoinState) f->stubRejoin));
*frameAddr = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
f->stubRejoin = 0;
}
- if (*frameAddr == codeStart + inlined->codeOffset) {
+ if (CallsiteMatches(codeStart, *inlined, *frameAddr)) {
/* The VMFrame returns directly into the expanded frame. */
SetRejoinState(innerfp, *inlined, frameAddr);
}
if (f->fp() == fp) {
JS_ASSERT(f->regs.inlined() == inlined);
f->regs.expandInline(innerfp, innerpc);
}
--- a/js/src/methodjit/StubCalls.cpp
+++ b/js/src/methodjit/StubCalls.cpp
@@ -596,17 +596,17 @@ stubs::SetElem(VMFrame &f)
if (js_PrototypeHasIndexedProperties(cx, obj))
break;
if ((jsuint)i >= obj->getArrayLength())
obj->setArrayLength(cx, i + 1);
}
obj->setDenseArrayElementWithType(cx, i, rval);
goto end_setelem;
} else {
- if (!f.script()->hasAnalysis())
+ if (f.script()->hasAnalysis())
f.script()->analysis()->getCode(f.pc()).arrayWriteHole = true;
}
}
} while (0);
if (!obj->setProperty(cx, id, &rval, strict))
THROW();
end_setelem:
/* :FIXME: Moving the assigned object into the lowest stack slot