--- a/js/src/asmjs/Wasm.cpp
+++ b/js/src/asmjs/Wasm.cpp
@@ -76,16 +76,26 @@ Unify(ExprType one, ExprType two)
return two;
if (two == AnyType)
return one;
if (one == two)
return one;
return ExprType::Void;
}
+static bool
+IsI64Implemented()
+{
+#ifdef JS_CPU_X64
+ return true;
+#else
+ return false;
+#endif
+}
+
class FunctionDecoder
{
JSContext* cx_;
Decoder& d_;
ModuleGenerator& mg_;
FunctionGenerator& fg_;
uint32_t funcIndex_;
const ValTypeVector& locals_;
@@ -103,21 +113,19 @@ class FunctionDecoder
uint32_t funcIndex() const { return funcIndex_; }
const ValTypeVector& locals() const { return locals_; }
const DeclaredSig& sig() const { return mg_.funcSig(funcIndex_); }
bool fail(const char* str) {
return Fail(cx_, d_, str);
}
bool checkI64Support() {
-#ifdef JS_CPU_X64
+ if (!IsI64Implemented())
+ return fail("i64 NYI on this platform");
return true;
-#else
- return fail("i64 NYI on this platform");
-#endif
}
MOZ_WARN_UNUSED_RESULT bool pushBlock() {
return blocks_.append(AnyType);
}
ExprType popBlock() {
return blocks_.popCopy();
}
@@ -1068,24 +1076,26 @@ DecodeFunctionTable(JSContext* cx, Decod
}
return true;
}
static bool
CheckTypeForJS(JSContext* cx, Decoder& d, const Sig& sig)
{
+ bool allowI64 = IsI64Implemented() && JitOptions.wasmTestMode;
+
for (ValType argType : sig.args()) {
- if (argType == ValType::I64)
+ if (argType == ValType::I64 && !allowI64)
return Fail(cx, d, "cannot import/export i64 argument");
if (IsSimdType(argType))
return Fail(cx, d, "cannot import/export SIMD argument");
}
- if (sig.ret() == ExprType::I64)
+ if (sig.ret() == ExprType::I64 && !allowI64)
return Fail(cx, d, "cannot import/export i64 return type");
if (IsSimdType(sig.ret()))
return Fail(cx, d, "cannot import/export SIMD return type");
return true;
}
struct ImportName
--- a/js/src/asmjs/WasmModule.cpp
+++ b/js/src/asmjs/WasmModule.cpp
@@ -1269,16 +1269,34 @@ Module::deoptimizeImportExit(uint32_t im
{
MOZ_ASSERT(dynamicallyLinked_);
const Import& import = imports()[importIndex];
ImportExit& exit = importToExit(import);
exit.code = code() + import.interpExitCodeOffset();
exit.baselineScript = nullptr;
}
+static JSObject*
+CreateI64Object(JSContext* cx, int64_t i64)
+{
+ RootedObject result(cx, JS_NewPlainObject(cx));
+ if (!result)
+ return nullptr;
+
+ RootedValue val(cx, Int32Value(uint32_t(i64)));
+ if (!JS_DefineProperty(cx, result, "low", val, JSPROP_ENUMERATE))
+ return nullptr;
+
+ val = Int32Value(uint32_t(i64 >> 32));
+ if (!JS_DefineProperty(cx, result, "high", val, JSPROP_ENUMERATE))
+ return nullptr;
+
+ return result;
+}
+
bool
Module::callExport(JSContext* cx, uint32_t exportIndex, CallArgs args)
{
MOZ_ASSERT(dynamicallyLinked_);
const Export& exp = exports()[exportIndex];
// Enable/disable profiling in the Module to match the current global
@@ -1306,17 +1324,20 @@ Module::callExport(JSContext* cx, uint32
for (unsigned i = 0; i < exp.sig().args().length(); ++i) {
v = i < args.length() ? args[i] : UndefinedValue();
switch (exp.sig().arg(i)) {
case ValType::I32:
if (!ToInt32(cx, v, (int32_t*)&coercedArgs[i]))
return false;
break;
case ValType::I64:
- MOZ_CRASH("int64");
+ MOZ_ASSERT(JitOptions.wasmTestMode, "no int64 in asm.js/wasm");
+ if (!ReadI64Object(cx, v, (int64_t*)&coercedArgs[i]))
+ return false;
+ break;
case ValType::F32:
if (!RoundFloat32(cx, v, (float*)&coercedArgs[i]))
return false;
break;
case ValType::F64:
if (!ToNumber(cx, v, (double*)&coercedArgs[i]))
return false;
break;
@@ -1369,85 +1390,125 @@ Module::callExport(JSContext* cx, uint32
// returned instead.
PlainObject* obj = NewBuiltinClassInstance<PlainObject>(cx);
if (!obj)
return false;
args.rval().set(ObjectValue(*obj));
return true;
}
- JSObject* simdObj;
+ void* retAddr = &coercedArgs[0];
+ JSObject* retObj = nullptr;
switch (exp.sig().ret()) {
case ExprType::Void:
args.rval().set(UndefinedValue());
break;
case ExprType::I32:
- args.rval().set(Int32Value(*(int32_t*)&coercedArgs[0]));
+ args.rval().set(Int32Value(*(int32_t*)retAddr));
break;
case ExprType::I64:
- MOZ_CRASH("int64");
+ MOZ_ASSERT(JitOptions.wasmTestMode, "no int64 in asm.js/wasm");
+ retObj = CreateI64Object(cx, *(int64_t*)retAddr);
+ if (!retObj)
+ return false;
+ break;
case ExprType::F32:
+ // The entry stub has converted the F32 into a double for us.
case ExprType::F64:
- args.rval().set(NumberValue(*(double*)&coercedArgs[0]));
+ args.rval().set(NumberValue(*(double*)retAddr));
break;
case ExprType::I32x4:
- simdObj = CreateSimd<Int32x4>(cx, (int32_t*)&coercedArgs[0]);
- if (!simdObj)
+ retObj = CreateSimd<Int32x4>(cx, (int32_t*)retAddr);
+ if (!retObj)
return false;
- args.rval().set(ObjectValue(*simdObj));
break;
case ExprType::F32x4:
- simdObj = CreateSimd<Float32x4>(cx, (float*)&coercedArgs[0]);
- if (!simdObj)
+ retObj = CreateSimd<Float32x4>(cx, (float*)retAddr);
+ if (!retObj)
return false;
- args.rval().set(ObjectValue(*simdObj));
break;
case ExprType::B32x4:
- simdObj = CreateSimd<Bool32x4>(cx, (int32_t*)&coercedArgs[0]);
- if (!simdObj)
+ retObj = CreateSimd<Bool32x4>(cx, (int32_t*)retAddr);
+ if (!retObj)
return false;
- args.rval().set(ObjectValue(*simdObj));
break;
case ExprType::Limit:
MOZ_CRASH("Limit");
}
+ if (retObj)
+ args.rval().set(ObjectValue(*retObj));
+
return true;
}
bool
-Module::callImport(JSContext* cx, uint32_t importIndex, unsigned argc, const Value* argv,
+Module::callImport(JSContext* cx, uint32_t importIndex, unsigned argc, const uint64_t* argv,
MutableHandleValue rval)
{
MOZ_ASSERT(dynamicallyLinked_);
const Import& import = imports()[importIndex];
InvokeArgs args(cx);
if (!args.init(argc))
return false;
- for (size_t i = 0; i < argc; i++)
- args[i].set(argv[i]);
+ bool hasI64Arg = false;
+ MOZ_ASSERT(import.sig().args().length() == argc);
+ for (size_t i = 0; i < argc; i++) {
+ switch (import.sig().args()[i]) {
+ case ValType::I32:
+ args[i].set(Int32Value(*(int32_t*)&argv[i]));
+ break;
+ case ValType::F32:
+ args[i].set(JS::CanonicalizedDoubleValue(*(float*)&argv[i]));
+ break;
+ case ValType::F64:
+ args[i].set(JS::CanonicalizedDoubleValue(*(double*)&argv[i]));
+ break;
+ case ValType::I64: {
+ MOZ_ASSERT(JitOptions.wasmTestMode, "no int64 in asm.js/wasm");
+ RootedObject obj(cx, CreateI64Object(cx, *(int64_t*)&argv[i]));
+ if (!obj)
+ return false;
+ args[i].set(ObjectValue(*obj));
+ hasI64Arg = true;
+ break;
+ }
+ case ValType::I32x4:
+ case ValType::F32x4:
+ case ValType::B32x4:
+ case ValType::Limit:
+ MOZ_CRASH("unhandled type in callImport");
+ }
+ }
RootedValue fval(cx, ObjectValue(*importToExit(import).fun));
RootedValue thisv(cx, UndefinedValue());
if (!Call(cx, fval, thisv, args, rval))
return false;
+ // Don't try to optimize if the function has at least one i64 arg or if
+ // it returns an int64. GenerateJitExit relies on this, as does the
+ // type inference code below in this function.
+ if (hasI64Arg || import.sig().ret() == ExprType::I64)
+ return true;
+
ImportExit& exit = importToExit(import);
// The exit may already have become optimized.
void* jitExitCode = code() + import.jitExitCodeOffset();
if (exit.code == jitExitCode)
return true;
// Test if the function is JIT compiled.
if (!exit.fun->hasScript())
return true;
+
JSScript* script = exit.fun->nonLazyScript();
if (!script->hasBaselineScript()) {
MOZ_ASSERT(!script->hasIonScript());
return true;
}
// Don't enable jit entry when we have a pending ion builder.
// Take the interpreter path which will link it and enable
@@ -1468,17 +1529,17 @@ Module::callImport(JSContext* cx, uint32
// the BaselineScript is discarded and when that happens the import exit is
// patched back.
if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType()))
return true;
for (uint32_t i = 0; i < exit.fun->nargs(); i++) {
TypeSet::Type type = TypeSet::UnknownType();
switch (import.sig().args()[i]) {
case ValType::I32: type = TypeSet::Int32Type(); break;
- case ValType::I64: MOZ_CRASH("NYI");
+ case ValType::I64: MOZ_CRASH("can't happen because of above guard");
case ValType::F32: type = TypeSet::DoubleType(); break;
case ValType::F64: type = TypeSet::DoubleType(); break;
case ValType::I32x4: MOZ_CRASH("NYI");
case ValType::F32x4: MOZ_CRASH("NYI");
case ValType::B32x4: MOZ_CRASH("NYI");
case ValType::Limit: MOZ_CRASH("Limit");
}
if (!TypeScript::ArgTypes(script, i)->hasType(type))
--- a/js/src/asmjs/WasmModule.h
+++ b/js/src/asmjs/WasmModule.h
@@ -595,17 +595,17 @@ class Module : public mozilla::LinkedLis
bool callExport(JSContext* cx, uint32_t exportIndex, CallArgs args);
// Initially, calls to imports in wasm code call out through the generic
// callImport method. If the imported callee gets JIT compiled and the types
// match up, callImport will patch the code to instead call through a thunk
// directly into the JIT code. If the JIT code is released, the Module must
// be notified so it can go back to the generic callImport.
- bool callImport(JSContext* cx, uint32_t importIndex, unsigned argc, const Value* argv,
+ bool callImport(JSContext* cx, uint32_t importIndex, unsigned argc, const uint64_t* argv,
MutableHandleValue rval);
void deoptimizeImportExit(uint32_t importIndex);
// At runtime, when $pc is in wasm function code (containsFunctionPC($pc)),
// $pc may be moved abruptly to interrupt() or outOfBounds() by a signal
// handler or SetContext() from another thread.
uint8_t* interrupt() const { MOZ_ASSERT(staticallyLinked_); return interrupt_; }
--- a/js/src/asmjs/WasmStubs.cpp
+++ b/js/src/asmjs/WasmStubs.cpp
@@ -130,16 +130,18 @@ wasm::GenerateEntry(MacroAssembler& masm
masm.loadAsmJSHeapRegisterFromGlobalData();
// Put the 'argv' argument into a non-argument/return register so that we
// can use 'argv' while we fill in the arguments for the asm.js callee.
// Also, save 'argv' on the stack so that we can recover it after the call.
// Use a second non-argument/return register as temporary scratch.
Register argv = ABIArgGenerator::NonArgReturnReg0;
Register scratch = ABIArgGenerator::NonArgReturnReg1;
+ Register64 scratch64(scratch);
+
#if defined(JS_CODEGEN_X86)
masm.loadPtr(Address(masm.getStackPointer(), EntryFrameSize + masm.framePushed()), argv);
#else
masm.movePtr(IntArgReg0, argv);
#endif
masm.Push(argv);
// Save the stack pointer to the saved non-volatile registers. We will use
@@ -159,19 +161,23 @@ wasm::GenerateEntry(MacroAssembler& masm
masm.reserveStack(AlignBytes(StackArgBytes(sig.args()), AsmJSStackAlignment));
// Copy parameters out of argv and into the registers/stack-slots specified by
// the system ABI.
for (ABIArgValTypeIter iter(sig.args()); !iter.done(); iter++) {
unsigned argOffset = iter.index() * Module::SizeOfEntryArg;
Address src(argv, argOffset);
MIRType type = iter.mirType();
+ MOZ_ASSERT_IF(type == MIRType_Int64, JitOptions.wasmTestMode);
switch (iter->kind()) {
case ABIArg::GPR:
- masm.load32(src, iter->gpr());
+ if (type == MIRType_Int32)
+ masm.load32(src, iter->gpr());
+ else if (type == MIRType_Int64)
+ masm.load64(src, iter->gpr64());
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
MOZ_CRASH("wasm uses hardfp for function calls.");
break;
#endif
case ABIArg::FPU: {
static_assert(Module::SizeOfEntryArg >= jit::Simd128DataSize,
@@ -197,16 +203,20 @@ wasm::GenerateEntry(MacroAssembler& masm
break;
}
case ABIArg::Stack:
switch (type) {
case MIRType_Int32:
masm.load32(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
+ case MIRType_Int64:
+ masm.load64(src, scratch64);
+ masm.store64(scratch64, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+ break;
case MIRType_Double:
masm.loadDouble(src, ScratchDoubleReg);
masm.storeDouble(ScratchDoubleReg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType_Float32:
masm.loadFloat32(src, ScratchFloat32Reg);
masm.storeFloat32(ScratchFloat32Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
@@ -240,20 +250,22 @@ wasm::GenerateEntry(MacroAssembler& masm
// Recover the 'argv' pointer which was saved before aligning the stack.
masm.Pop(argv);
// Store the return value in argv[0]
switch (sig.ret()) {
case ExprType::Void:
break;
case ExprType::I32:
- masm.storeValue(JSVAL_TYPE_INT32, ReturnReg, Address(argv, 0));
+ masm.store32(ReturnReg, Address(argv, 0));
break;
case ExprType::I64:
- MOZ_CRASH("no int64 in asm.js");
+ MOZ_ASSERT(JitOptions.wasmTestMode, "no int64 in asm.js/wasm");
+ masm.store64(ReturnReg64, Address(argv, 0));
+ break;
case ExprType::F32:
masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
MOZ_FALLTHROUGH; // as ReturnDoubleReg now contains a Double
case ExprType::F64:
masm.canonicalizeDouble(ReturnDoubleReg);
masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
break;
case ExprType::I32x4:
@@ -275,61 +287,95 @@ wasm::GenerateEntry(MacroAssembler& masm
masm.move32(Imm32(true), ReturnReg);
masm.ret();
offsets.end = masm.currentOffset();
return offsets;
}
+typedef bool ToValue;
+
static void
FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argOffset,
- unsigned offsetToCallerStackArgs, Register scratch)
+ unsigned offsetToCallerStackArgs, Register scratch, ToValue toValue)
{
+ Register64 scratch64(scratch);
for (ABIArgValTypeIter i(args); !i.done(); i++) {
Address dstAddr(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
+
+ MIRType type = i.mirType();
+ MOZ_ASSERT_IF(type == MIRType_Int64, JitOptions.wasmTestMode);
+
switch (i->kind()) {
case ABIArg::GPR:
- masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dstAddr);
+ if (type == MIRType_Int32) {
+ if (toValue)
+ masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dstAddr);
+ else
+ masm.store32(i->gpr(), dstAddr);
+ } else if (type == MIRType_Int64) {
+ // We can't box int64 into Values (yet).
+ if (toValue)
+ masm.breakpoint();
+ else
+ masm.store64(i->gpr64(), dstAddr);
+ } else {
+ MOZ_CRASH("unexpected input type?");
+ }
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
MOZ_CRASH("AsmJS uses hardfp for function calls.");
break;
#endif
case ABIArg::FPU: {
- MOZ_ASSERT(IsFloatingPointType(i.mirType()));
+ MOZ_ASSERT(IsFloatingPointType(type));
FloatRegister srcReg = i->fpu();
- if (i.mirType() == MIRType_Float32) {
- masm.convertFloat32ToDouble(i->fpu(), ScratchDoubleReg);
- srcReg = ScratchDoubleReg;
+ if (toValue) {
+ if (type == MIRType_Float32) {
+ masm.convertFloat32ToDouble(i->fpu(), ScratchDoubleReg);
+ srcReg = ScratchDoubleReg;
+ }
+ masm.canonicalizeDouble(srcReg);
}
- masm.canonicalizeDouble(srcReg);
masm.storeDouble(srcReg, dstAddr);
break;
}
case ABIArg::Stack:
- if (i.mirType() == MIRType_Int32) {
+ if (type == MIRType_Int32) {
Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
masm.load32(src, scratch);
- masm.storeValue(JSVAL_TYPE_INT32, scratch, dstAddr);
-#else
- masm.memIntToValue(src, dstAddr);
-#endif
+ if (toValue)
+ masm.storeValue(JSVAL_TYPE_INT32, scratch, dstAddr);
+ else
+ masm.store32(scratch, dstAddr);
+ } else if (type == MIRType_Int64) {
+ // We can't box int64 into Values (yet).
+ if (toValue) {
+ masm.breakpoint();
+ } else {
+ Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
+ masm.load64(src, scratch64);
+ masm.store64(scratch64, dstAddr);
+ }
} else {
- MOZ_ASSERT(IsFloatingPointType(i.mirType()));
+ MOZ_ASSERT(IsFloatingPointType(type));
Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
- if (i.mirType() == MIRType_Float32) {
- masm.loadFloat32(src, ScratchFloat32Reg);
- masm.convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
+ if (toValue) {
+ if (type == MIRType_Float32) {
+ masm.loadFloat32(src, ScratchFloat32Reg);
+ masm.convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
+ } else {
+ masm.loadDouble(src, ScratchDoubleReg);
+ }
+ masm.canonicalizeDouble(ScratchDoubleReg);
} else {
masm.loadDouble(src, ScratchDoubleReg);
}
- masm.canonicalizeDouble(ScratchDoubleReg);
masm.storeDouble(ScratchDoubleReg, dstAddr);
}
break;
}
}
}
// Generate a stub that is called via the internal ABI derived from the
@@ -358,17 +404,17 @@ wasm::GenerateInterpExit(MacroAssembler&
unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
ProfilingOffsets offsets;
GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, &offsets);
// Fill the argument array.
unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed();
Register scratch = ABIArgGenerator::NonArgReturnReg0;
- FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch);
+ FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(false));
// Prepare the arguments for the call to InvokeImport_*.
ABIArgMIRTypeIter i(invokeArgTypes);
// argument 0: importIndex
if (i->kind() == ABIArg::GPR)
masm.mov(ImmWord(importIndex), i->gpr());
else
@@ -399,20 +445,24 @@ wasm::GenerateInterpExit(MacroAssembler&
switch (sig.ret()) {
case ExprType::Void:
masm.call(SymbolicAddress::InvokeImport_Void);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, JumpTarget::Throw);
break;
case ExprType::I32:
masm.call(SymbolicAddress::InvokeImport_I32);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, JumpTarget::Throw);
- masm.unboxInt32(argv, ReturnReg);
+ masm.load32(argv, ReturnReg);
break;
case ExprType::I64:
- MOZ_CRASH("no int64 in asm.js");
+ MOZ_ASSERT(JitOptions.wasmTestMode);
+ masm.call(SymbolicAddress::InvokeImport_I64);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, JumpTarget::Throw);
+ masm.load64(argv, ReturnReg64);
+ break;
case ExprType::F32:
masm.call(SymbolicAddress::InvokeImport_F64);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, JumpTarget::Throw);
masm.loadDouble(argv, ReturnDoubleReg);
masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
break;
case ExprType::F64:
masm.call(SymbolicAddress::InvokeImport_F64);
@@ -504,17 +554,17 @@ wasm::GenerateJitExit(MacroAssembler& ma
argOffset += sizeof(size_t);
// 4. |this| value
masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(Value);
// 5. Fill the arguments
unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(AsmJSFrame);
- FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch);
+ FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(true));
argOffset += sig.args().length() * sizeof(Value);
MOZ_ASSERT(argOffset == jitFrameBytes);
// 6. Jit code will clobber all registers, even non-volatiles. GlobalReg and
// HeapReg are removed from the general register set for asm.js code, so
// these will not have been saved by the caller like all other registers,
// so they must be explicitly preserved. Only save GlobalReg since
// HeapReg can be reloaded (from global data) after the call.
@@ -654,17 +704,21 @@ wasm::GenerateJitExit(MacroAssembler& ma
switch (sig.ret()) {
case ExprType::Void:
break;
case ExprType::I32:
masm.convertValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg, &oolConvert,
/* -0 check */ false);
break;
case ExprType::I64:
- MOZ_CRASH("no int64 in asm.js");
+ MOZ_ASSERT(JitOptions.wasmTestMode, "no int64 in asm.js/wasm");
+ // We don't expect int64 to be returned from Ion yet, because of a
+ // guard in callImport.
+ masm.breakpoint();
+ break;
case ExprType::F32:
masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg, &oolConvert);
break;
case ExprType::F64:
masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
break;
case ExprType::I32x4:
case ExprType::F32x4:
--- a/js/src/asmjs/WasmTextToBinary.cpp
+++ b/js/src/asmjs/WasmTextToBinary.cpp
@@ -2456,17 +2456,17 @@ ParseConst(WasmParseContext& c, WasmToke
switch (val.kind()) {
case WasmToken::Index:
return new(c.lifo) WasmAstConst(Val(uint64_t(val.index())));
case WasmToken::UnsignedInteger:
return new(c.lifo) WasmAstConst(Val(val.uint()));
case WasmToken::SignedInteger:
return new(c.lifo) WasmAstConst(Val(uint64_t(val.sint())));
case WasmToken::NegativeZero:
- return new(c.lifo) WasmAstConst(Val(uint32_t(0)));
+ return new(c.lifo) WasmAstConst(Val(uint64_t(0)));
default:
break;
}
break;
}
case ValType::F32: {
float result;
if (!ParseFloatLiteral(c, val, &result))
--- a/js/src/asmjs/WasmTypes.cpp
+++ b/js/src/asmjs/WasmTypes.cpp
@@ -110,62 +110,105 @@ CoerceInPlace_ToNumber(MutableHandleValu
val.set(DoubleValue(dbl));
return true;
}
// Use an int32_t return type instead of bool since bool does not have a
// specified width and the caller is assuming a word-sized return.
static int32_t
-InvokeImport_Void(int32_t importIndex, int32_t argc, Value* argv)
+InvokeImport_Void(int32_t importIndex, int32_t argc, uint64_t* argv)
{
WasmActivation* activation = JSRuntime::innermostWasmActivation();
JSContext* cx = activation->cx();
RootedValue rval(cx);
return activation->module().callImport(cx, importIndex, argc, argv, &rval);
}
// Use an int32_t return type instead of bool since bool does not have a
// specified width and the caller is assuming a word-sized return.
static int32_t
-InvokeImport_I32(int32_t importIndex, int32_t argc, Value* argv)
+InvokeImport_I32(int32_t importIndex, int32_t argc, uint64_t* argv)
{
WasmActivation* activation = JSRuntime::innermostWasmActivation();
JSContext* cx = activation->cx();
RootedValue rval(cx);
if (!activation->module().callImport(cx, importIndex, argc, argv, &rval))
return false;
int32_t i32;
if (!ToInt32(cx, rval, &i32))
return false;
- argv[0] = Int32Value(i32);
+ argv[0] = i32;
+ return true;
+}
+
+bool
+js::wasm::ReadI64Object(JSContext* cx, HandleValue v, int64_t* i64)
+{
+ if (!v.isObject()) {
+ JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_FAIL,
+ "i64 JS value must be an object");
+ return false;
+ }
+
+ RootedObject obj(cx, &v.toObject());
+
+ int32_t* i32 = (int32_t*)i64;
+
+ RootedValue val(cx);
+ if (!JS_GetProperty(cx, obj, "low", &val))
+ return false;
+ if (!ToInt32(cx, val, &i32[0]))
+ return false;
+
+ if (!JS_GetProperty(cx, obj, "high", &val))
+ return false;
+ if (!ToInt32(cx, val, &i32[1]))
+ return false;
+
+ return true;
+}
+
+static int32_t
+InvokeImport_I64(int32_t importIndex, int32_t argc, uint64_t* argv)
+{
+ WasmActivation* activation = JSRuntime::innermostWasmActivation();
+ JSContext* cx = activation->cx();
+
+ RootedValue rval(cx);
+ if (!activation->module().callImport(cx, importIndex, argc, argv, &rval))
+ return false;
+
+ if (!ReadI64Object(cx, rval, (int64_t*)argv))
+ return false;
+
return true;
}
// Use an int32_t return type instead of bool since bool does not have a
// specified width and the caller is assuming a word-sized return.
static int32_t
-InvokeImport_F64(int32_t importIndex, int32_t argc, Value* argv)
+InvokeImport_F64(int32_t importIndex, int32_t argc, uint64_t* argv)
{
WasmActivation* activation = JSRuntime::innermostWasmActivation();
JSContext* cx = activation->cx();
RootedValue rval(cx);
if (!activation->module().callImport(cx, importIndex, argc, argv, &rval))
return false;
double dbl;
if (!ToNumber(cx, rval, &dbl))
return false;
- argv[0] = DoubleValue(dbl);
+ ((double*)argv)[0] = dbl;
return true;
}
template <class F>
static inline void*
FuncCast(F* pf, ABIFunctionType type)
{
void *pv = JS_FUNC_TO_DATA_PTR(void*, pf);
@@ -196,16 +239,18 @@ wasm::AddressOf(SymbolicAddress imm, Exc
case SymbolicAddress::UnreachableTrap:
return FuncCast(UnreachableTrap, Args_General0);
case SymbolicAddress::HandleExecutionInterrupt:
return FuncCast(WasmHandleExecutionInterrupt, Args_General0);
case SymbolicAddress::InvokeImport_Void:
return FuncCast(InvokeImport_Void, Args_General3);
case SymbolicAddress::InvokeImport_I32:
return FuncCast(InvokeImport_I32, Args_General3);
+ case SymbolicAddress::InvokeImport_I64:
+ return FuncCast(InvokeImport_I64, Args_General3);
case SymbolicAddress::InvokeImport_F64:
return FuncCast(InvokeImport_F64, Args_General3);
case SymbolicAddress::CoerceInPlace_ToInt32:
return FuncCast(CoerceInPlace_ToInt32, Args_General1);
case SymbolicAddress::CoerceInPlace_ToNumber:
return FuncCast(CoerceInPlace_ToNumber, Args_General1);
case SymbolicAddress::ToInt32:
return FuncCast<int32_t (double)>(JS::ToInt32, Args_Int_Double);
--- a/js/src/asmjs/WasmTypes.h
+++ b/js/src/asmjs/WasmTypes.h
@@ -534,25 +534,30 @@ enum class SymbolicAddress
ReportOverRecursed,
OnOutOfBounds,
OnImpreciseConversion,
BadIndirectCall,
UnreachableTrap,
HandleExecutionInterrupt,
InvokeImport_Void,
InvokeImport_I32,
+ InvokeImport_I64,
InvokeImport_F64,
CoerceInPlace_ToInt32,
CoerceInPlace_ToNumber,
Limit
};
void*
AddressOf(SymbolicAddress imm, ExclusiveContext* cx);
+// Extracts low and high from an int64 object {low: int32, high: int32}, for
+// testing purposes mainly.
+bool ReadI64Object(JSContext* cx, HandleValue v, int64_t* val);
+
// A wasm::JumpTarget represents one of a special set of stubs that can be
// jumped to from any function. Because wasm modules can be larger than the
// range of a plain jump, these potentially out-of-range jumps must be recorded
// and patched specially by the MacroAssembler and ModuleGenerator.
enum class JumpTarget
{
StackOverflow,
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -51,16 +51,17 @@ T overrideDefault(const char* param, T d
} else {
Maybe<int> value = ParseInt(str);
if (value.isSome())
return value.ref();
Warn(param, str);
}
return dflt;
}
+
#define SET_DEFAULT(var, dflt) var = overrideDefault("JIT_OPTION_" #var, dflt)
DefaultJitOptions::DefaultJitOptions()
{
// Whether to perform expensive graph-consistency DEBUG-only assertions.
// It can be useful to disable this to reduce DEBUG-compile time of large
// asm.js programs.
SET_DEFAULT(checkGraphConsistency, true);
@@ -183,16 +184,19 @@ DefaultJitOptions::DefaultJitOptions()
if (const char* env = getenv(forcedRegisterAllocatorEnv)) {
forcedRegisterAllocator = LookupRegisterAllocator(env);
if (!forcedRegisterAllocator.isSome())
Warn(forcedRegisterAllocatorEnv, env);
}
// Toggles whether unboxed plain objects can be created by the VM.
SET_DEFAULT(disableUnboxedObjects, false);
+
+ // Test whether wasm int64 / double NaN bits testing is enabled.
+ SET_DEFAULT(wasmTestMode, false);
}
bool
DefaultJitOptions::isSmallFunction(JSScript* script) const
{
return script->length() <= smallFunctionMaxBytecodeLength_;
}
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -61,16 +61,17 @@ struct DefaultJitOptions
bool disableScalarReplacement;
bool disableSharedStubs;
bool disableSincos;
bool disableSink;
bool eagerCompilation;
bool forceInlineCaches;
bool limitScriptSize;
bool osr;
+ bool wasmTestMode;
uint32_t baselineWarmUpThreshold;
uint32_t exceptionBailoutThreshold;
uint32_t frequentBailoutThreshold;
uint32_t maxStackArgs;
uint32_t osrPcMismatchesBeforeRecompile;
uint32_t smallFunctionMaxBytecodeLength_;
uint32_t jumpThreshold;
mozilla::Maybe<uint32_t> forcedDefaultIonWarmUpThreshold;
--- a/js/src/jit/RegisterSets.h
+++ b/js/src/jit/RegisterSets.h
@@ -1282,16 +1282,23 @@ class ABIArg
#else
bool isGeneralRegPair() const { return false; }
#endif
Register gpr() const {
MOZ_ASSERT(kind() == GPR);
return Register::FromCode(u.gpr_);
}
+ Register64 gpr64() const {
+#ifdef JS_PUNBOX64
+ return Register64(gpr());
+#else
+ MOZ_CRASH("NYI");
+#endif
+ }
Register evenGpr() const {
MOZ_ASSERT(isGeneralRegPair());
return Register::FromCode(u.gpr_);
}
Register oddGpr() const {
MOZ_ASSERT(isGeneralRegPair());
return Register::FromCode(u.gpr_ + 1);
}
--- a/js/src/jit/Registers.h
+++ b/js/src/jit/Registers.h
@@ -41,16 +41,20 @@ struct Register {
Register r = { Encoding(i) };
return r;
}
static Register FromName(const char* name) {
Code code = Registers::FromName(name);
Register r = { Encoding(code) };
return r;
}
+ static Register Invalid() {
+ Register r = { Encoding(Codes::Invalid) };
+ return r;
+ }
MOZ_CONSTEXPR Code code() const {
return Code(reg_);
}
Encoding encoding() const {
MOZ_ASSERT(Code(reg_) < Registers::Total);
return reg_;
}
const char* name() const {
@@ -104,16 +108,20 @@ struct Register64
Register low;
#endif
#ifdef JS_PUNBOX64
explicit MOZ_CONSTEXPR Register64(Register r)
: reg(r)
{}
#else
+ explicit Register64(Register r)
+ : high(Register::Invalid()), low(Register::Invalid())
+ {}
+
MOZ_CONSTEXPR Register64(Register h, Register l)
: high(h), low(l)
{}
#endif
};
class RegisterDump
{
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -113,16 +113,17 @@ static MOZ_CONSTEXPR_VAR Register PreBar
static MOZ_CONSTEXPR_VAR Register InvalidReg = { Registers::invalid_reg };
static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg;
static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = r3;
static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = r2;
static MOZ_CONSTEXPR_VAR Register StackPointer = sp;
static MOZ_CONSTEXPR_VAR Register FramePointer = InvalidReg;
static MOZ_CONSTEXPR_VAR Register ReturnReg = r0;
+static MOZ_CONSTEXPR_VAR Register64 ReturnReg64(InvalidReg, InvalidReg);
static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32Reg = { FloatRegisters::d0, VFPRegister::Single };
static MOZ_CONSTEXPR_VAR FloatRegister ReturnDoubleReg = { FloatRegisters::d0, VFPRegister::Double};
static MOZ_CONSTEXPR_VAR FloatRegister ReturnSimd128Reg = InvalidFloatReg;
static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloat32Reg = { FloatRegisters::d30, VFPRegister::Single };
static MOZ_CONSTEXPR_VAR FloatRegister ScratchDoubleReg = { FloatRegisters::d15, VFPRegister::Double };
static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimd128Reg = InvalidFloatReg;
static MOZ_CONSTEXPR_VAR FloatRegister ScratchUIntReg = { FloatRegisters::d15, VFPRegister::UInt };
static MOZ_CONSTEXPR_VAR FloatRegister ScratchIntReg = { FloatRegisters::d15, VFPRegister::Int };
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -1419,21 +1419,16 @@ class MacroAssemblerARMCompat : public M
// This is the instruction sequence that gcc generated for this
// operation.
ScratchRegisterScope scratch(asMasm());
ma_sub(r, Imm32(0x80000001), scratch);
ma_cmn(scratch, Imm32(3));
ma_b(handleNotAnInt, Above);
}
- void memIntToValue(Address Source, Address Dest) {
- load32(Source, lr);
- storeValue(JSVAL_TYPE_INT32, lr, Dest);
- }
-
void lea(Operand addr, Register dest) {
ma_add(addr.baseReg(), Imm32(addr.disp()), dest);
}
void abiret() {
as_bx(lr);
}
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -50,16 +50,17 @@ static constexpr Register CallTempReg1 =
static constexpr Register CallTempReg2 = { Registers::x11 };
static constexpr Register CallTempReg3 = { Registers::x12 };
static constexpr Register CallTempReg4 = { Registers::x13 };
static constexpr Register CallTempReg5 = { Registers::x14 };
static constexpr Register PreBarrierReg = { Registers::x1 };
static constexpr Register ReturnReg = { Registers::x0 };
+static constexpr Register64 ReturnReg64(ReturnReg);
static constexpr Register JSReturnReg = { Registers::x2 };
static constexpr Register FramePointer = { Registers::fp };
static constexpr Register ZeroRegister = { Registers::sp };
static constexpr ARMRegister ZeroRegister64 = { Registers::sp, 64 };
static constexpr ARMRegister ZeroRegister32 = { Registers::sp, 32 };
static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -1917,25 +1917,16 @@ class MacroAssemblerCompat : public vixl
// FIXME: See CodeGeneratorX64 calls to noteAsmJSGlobalAccess.
void patchAsmJSGlobalAccess(CodeOffset patchAt, uint8_t* code,
uint8_t* globalData, unsigned globalDataOffset)
{
MOZ_CRASH("patchAsmJSGlobalAccess");
}
- void memIntToValue(const Address& src, const Address& dest) {
- vixl::UseScratchRegisterScope temps(this);
- const Register scratch = temps.AcquireX().asUnsized();
- MOZ_ASSERT(scratch != src.base);
- MOZ_ASSERT(scratch != dest.base);
- load32(src, scratch);
- storeValue(JSVAL_TYPE_INT32, scratch, dest);
- }
-
void profilerEnterFrame(Register framePtr, Register scratch) {
AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
loadPtr(activation, scratch);
storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
}
void profilerExitFrame() {
branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -997,21 +997,16 @@ class MacroAssemblerMIPS64Compat : publi
protected:
bool buildOOLFakeExitFrame(void* fakeReturnAddr);
public:
CodeOffset labelForPatch() {
return CodeOffset(nextOffset().getOffset());
}
- void memIntToValue(Address Source, Address Dest) {
- load32(Source, ScratchRegister);
- storeValue(JSVAL_TYPE_INT32, ScratchRegister, Dest);
- }
-
void lea(Operand addr, Register dest) {
ma_daddu(dest, addr.baseReg(), Imm32(addr.disp()));
}
void abiret() {
as_jr(ra);
as_nop();
}
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -6,17 +6,16 @@
#ifndef jit_none_MacroAssembler_none_h
#define jit_none_MacroAssembler_none_h
#include "jit/JitCompartment.h"
#include "jit/MoveResolver.h"
#include "jit/shared/Assembler-shared.h"
-
namespace js {
namespace jit {
static MOZ_CONSTEXPR_VAR Register StackPointer = { Registers::invalid_reg };
static MOZ_CONSTEXPR_VAR Register FramePointer = { Registers::invalid_reg };
static MOZ_CONSTEXPR_VAR Register ReturnReg = { Registers::invalid_reg };
static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32Reg = { FloatRegisters::invalid_reg };
static MOZ_CONSTEXPR_VAR FloatRegister ReturnDoubleReg = { FloatRegisters::invalid_reg };
@@ -67,18 +66,20 @@ static MOZ_CONSTEXPR_VAR Register RegExp
static MOZ_CONSTEXPR_VAR Register RegExpMatcherStickyReg = { Registers::invalid_reg };
static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = { Registers::invalid_reg };
static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = { Registers::invalid_reg };
static MOZ_CONSTEXPR_VAR Register JSReturnReg = { Registers::invalid_reg };
#if defined(JS_NUNBOX32)
static MOZ_CONSTEXPR_VAR ValueOperand JSReturnOperand(InvalidReg, InvalidReg);
+static MOZ_CONSTEXPR_VAR Register64 ReturnReg64(InvalidReg, InvalidReg);
#elif defined(JS_PUNBOX64)
static MOZ_CONSTEXPR_VAR ValueOperand JSReturnOperand(InvalidReg);
+static MOZ_CONSTEXPR_VAR Register64 ReturnReg64(InvalidReg);
#else
#error "Bad architecture"
#endif
static MOZ_CONSTEXPR_VAR uint32_t ABIStackAlignment = 4;
static MOZ_CONSTEXPR_VAR uint32_t CodeAlignment = 4;
static MOZ_CONSTEXPR_VAR uint32_t JitStackAlignment = 8;
static MOZ_CONSTEXPR_VAR uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
@@ -391,17 +392,16 @@ class MacroAssemblerNone : public Assemb
void incrementInt32Value(Address) { MOZ_CRASH(); }
void ensureDouble(ValueOperand, FloatRegister, Label*) { MOZ_CRASH(); }
void handleFailureWithHandlerTail(void*) { MOZ_CRASH(); }
void buildFakeExitFrame(Register, uint32_t*) { MOZ_CRASH(); }
bool buildOOLFakeExitFrame(void*) { MOZ_CRASH(); }
void loadWasmActivation(Register) { MOZ_CRASH(); }
void loadAsmJSHeapRegisterFromGlobalData() { MOZ_CRASH(); }
- void memIntToValue(Address, Address) { MOZ_CRASH(); }
void setPrinter(Sprinter*) { MOZ_CRASH(); }
Operand ToPayload(Operand base) { MOZ_CRASH(); }
static const Register getStackPointer() { MOZ_CRASH(); }
// Instrumentation for entering and leaving the profiler.
void profilerEnterFrame(Register , Register ) { MOZ_CRASH(); }
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -78,16 +78,17 @@ struct ScratchRegisterScope : public Aut
{
explicit ScratchRegisterScope(MacroAssembler& masm)
: AutoRegisterScope(masm, ScratchReg)
{ }
};
static MOZ_CONSTEXPR_VAR Register ReturnReg = rax;
static MOZ_CONSTEXPR_VAR Register HeapReg = r15;
+static MOZ_CONSTEXPR_VAR Register64 ReturnReg64(rax);
static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
static MOZ_CONSTEXPR_VAR FloatRegister ReturnDoubleReg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
static MOZ_CONSTEXPR_VAR FloatRegister ReturnSimd128Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloat32Reg = FloatRegister(X86Encoding::xmm15, FloatRegisters::Single);
static MOZ_CONSTEXPR_VAR FloatRegister ScratchDoubleReg = FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimd128Reg = xmm15;
// Avoid rbp, which is the FramePointer, which is unavailable in some modes.
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -937,21 +937,16 @@ class MacroAssemblerX64 : public MacroAs
void patchAsmJSGlobalAccess(CodeOffset patchAt, uint8_t* code, uint8_t* globalData,
unsigned globalDataOffset)
{
uint8_t* nextInsn = code + patchAt.offset();
MOZ_ASSERT(nextInsn <= globalData);
uint8_t* target = globalData + globalDataOffset;
((int32_t*)nextInsn)[-1] = target - nextInsn;
}
- void memIntToValue(Address Source, Address Dest) {
- ScratchRegisterScope scratch(asMasm());
- load32(Source, scratch);
- storeValue(JSVAL_TYPE_INT32, scratch, Dest);
- }
// Instrumentation for entering and leaving the profiler.
void profilerEnterFrame(Register framePtr, Register scratch);
void profilerExitFrame();
};
typedef MacroAssemblerX64 MacroAssemblerSpecific;
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -39,16 +39,17 @@ static MOZ_CONSTEXPR_VAR FloatRegister x
static MOZ_CONSTEXPR_VAR Register InvalidReg = { X86Encoding::invalid_reg };
static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = FloatRegister();
static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = ecx;
static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = edx;
static MOZ_CONSTEXPR_VAR Register StackPointer = esp;
static MOZ_CONSTEXPR_VAR Register FramePointer = ebp;
static MOZ_CONSTEXPR_VAR Register ReturnReg = eax;
+static MOZ_CONSTEXPR_VAR Register64 ReturnReg64(InvalidReg, InvalidReg);
static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
static MOZ_CONSTEXPR_VAR FloatRegister ReturnDoubleReg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
static MOZ_CONSTEXPR_VAR FloatRegister ReturnSimd128Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloat32Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Single);
static MOZ_CONSTEXPR_VAR FloatRegister ScratchDoubleReg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimd128Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Simd128);
// Avoid ebp, which is the FramePointer, which is unavailable in some modes.
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -6151,16 +6151,19 @@ JS_SetGlobalJitCompilerOption(JSRuntime*
break;
case JSJITCOMPILER_JUMP_THRESHOLD:
if (value == uint32_t(-1)) {
jit::DefaultJitOptions defaultValues;
value = defaultValues.jumpThreshold;
}
jit::JitOptions.jumpThreshold = value;
break;
+ case JSJITCOMPILER_WASM_TEST_MODE:
+ jit::JitOptions.wasmTestMode = !!value;
+ break;
default:
break;
}
}
JS_PUBLIC_API(int)
JS_GetGlobalJitCompilerOption(JSRuntime* rt, JSJitCompilerOption opt)
{
@@ -6177,16 +6180,18 @@ JS_GetGlobalJitCompilerOption(JSRuntime*
case JSJITCOMPILER_ION_ENABLE:
return JS::RuntimeOptionsRef(rt).ion();
case JSJITCOMPILER_BASELINE_ENABLE:
return JS::RuntimeOptionsRef(rt).baseline();
case JSJITCOMPILER_OFFTHREAD_COMPILATION_ENABLE:
return rt->canUseOffthreadIonCompilation();
case JSJITCOMPILER_SIGNALS_ENABLE:
return rt->canUseSignalHandlers();
+ case JSJITCOMPILER_WASM_TEST_MODE:
+ return jit::JitOptions.wasmTestMode ? 1 : 0;
default:
break;
}
#endif
return 0;
}
/************************************************************************/
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -5515,17 +5515,18 @@ JS_SetOffthreadIonCompilationEnabled(JSR
Register(BASELINE_WARMUP_TRIGGER, "baseline.warmup.trigger") \
Register(ION_WARMUP_TRIGGER, "ion.warmup.trigger") \
Register(ION_GVN_ENABLE, "ion.gvn.enable") \
Register(ION_FORCE_IC, "ion.forceinlineCaches") \
Register(ION_ENABLE, "ion.enable") \
Register(BASELINE_ENABLE, "baseline.enable") \
Register(OFFTHREAD_COMPILATION_ENABLE, "offthread-compilation.enable") \
Register(SIGNALS_ENABLE, "signals.enable") \
- Register(JUMP_THRESHOLD, "jump-threshold")
+ Register(JUMP_THRESHOLD, "jump-threshold") \
+ Register(WASM_TEST_MODE, "wasm.test-mode")
typedef enum JSJitCompilerOption {
#define JIT_COMPILER_DECLARE(key, str) \
JSJITCOMPILER_ ## key,
JIT_COMPILER_OPTIONS(JIT_COMPILER_DECLARE)
#undef JIT_COMPILER_DECLARE