Bug 1518331 - corner case optimization. r=luke
authorLars T Hansen <lhansen@mozilla.com>
Tue, 08 Jan 2019 17:48:00 +0100
changeset 510132 0985217ed619be2fc84eeefc895321f4c88dc21e
parent 510131 16819308720e1aa1228ff31f84bd233599fb5e23
child 510133 a4b8887afbc97edd031cd4ac4fd04317dd28650a
push id10547
push userffxbld-merge
push dateMon, 21 Jan 2019 13:03:58 +0000
treeherdermozilla-beta@24ec1916bffe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1518331
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1518331 - corner case optimization. r=luke
js/src/jit/x86/Lowering-x86.cpp
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -251,16 +251,43 @@ void LIRGenerator::visitWasmUnsignedToDo
 
 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
   MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
   LWasmUint32ToFloat32* lir = new (alloc())
       LWasmUint32ToFloat32(useRegisterAtStart(ins->input()), temp());
   define(lir, ins);
 }
 
+// If the base is a constant, and it is zero or its offset is zero, then
+// code generation will fold the values into the access.  Allocate the
+// pointer to a register only if that can't happen.
+
+static bool OptimizableConstantAccess(MDefinition* base,
+                                      const wasm::MemoryAccessDesc& access) {
+  MOZ_ASSERT(base->isConstant());
+  MOZ_ASSERT(base->type() == MIRType::Int32);
+
+  if (!(base->toConstant()->isInt32(0) || access.offset() == 0)) {
+    return false;
+  }
+  if (access.type() == Scalar::Int64) {
+    // For int64 accesses on 32-bit systems we will need to add another offset
+    // of 4 to access the high part of the value; make sure this does not
+    // overflow the value.
+    int32_t v;
+    if (base->toConstant()->isInt32(0)) {
+      v = access.offset();
+    } else {
+      v = base->toConstant()->toInt32();
+    }
+    return v <= int32_t(INT32_MAX - INT64HIGH_OFFSET);
+  }
+  return true;
+}
+
 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
   MDefinition* base = ins->base();
   MOZ_ASSERT(base->type() == MIRType::Int32);
 
   MDefinition* memoryBase = ins->memoryBase();
   MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
 
   if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
@@ -268,23 +295,18 @@ void LIRGenerator::visitWasmLoad(MWasmLo
         LWasmAtomicLoadI64(useRegister(memoryBase), useRegister(base),
                            tempFixed(ecx), tempFixed(ebx));
     defineInt64Fixed(lir, ins,
                      LInt64Allocation(LAllocation(AnyRegister(edx)),
                                       LAllocation(AnyRegister(eax))));
     return;
   }
 
-  // If the base is a constant, and it is zero or its offset is zero, then
-  // code generation will fold the values into the access.  Allocate the
-  // pointer to a register only if that can't happen.
-
   LAllocation baseAlloc;
-  if (!base->isConstant() ||
-      !(base->toConstant()->isInt32(0) || ins->access().offset() == 0)) {
+  if (!base->isConstant() || !OptimizableConstantAccess(base, ins->access())) {
     baseAlloc = ins->type() == MIRType::Int64 ? useRegister(base)
                                               : useRegisterAtStart(base);
   }
 
   if (ins->type() != MIRType::Int64) {
     auto* lir =
         new (alloc()) LWasmLoad(baseAlloc, useRegisterAtStart(memoryBase));
     define(lir, ins);
@@ -321,23 +343,18 @@ void LIRGenerator::visitWasmStore(MWasmS
     auto* lir = new (alloc())
         LWasmAtomicStoreI64(useRegister(memoryBase), useRegister(base),
                             useInt64Fixed(ins->value(), Register64(ecx, ebx)),
                             tempFixed(edx), tempFixed(eax));
     add(lir, ins);
     return;
   }
 
-  // If the base is a constant, and it is zero or its offset is zero, then
-  // code generation will fold the values into the access.  Allocate the
-  // pointer to a register only if that can't happen.
-
   LAllocation baseAlloc;
-  if (!base->isConstant() ||
-      !(base->toConstant()->isInt32(0) || ins->access().offset() == 0)) {
+  if (!base->isConstant() || !OptimizableConstantAccess(base, ins->access())) {
     baseAlloc = useRegisterAtStart(base);
   }
 
   LAllocation valueAlloc;
   switch (ins->access().type()) {
     case Scalar::Int8:
     case Scalar::Uint8:
       // See comment for LIRGeneratorX86::useByteOpRegister.