Backed out changeset a36da0eea7af (bug 1277008) for warning as error in WasmBaselineCompile.cpp on OSX. r=backout on a CLOSED TREE
authorSebastian Hengst <archaeopteryx@coole-files.de>
Fri, 01 Jul 2016 18:54:30 +0200
changeset 303429 fdadd7ef691eb0797103d95193d01c8483de3547
parent 303428 2c507a30066761d77392a3ca3e73ce62b081feb1
child 303430 6668e747f0dfe4e134b66d14a5197e41ecc58c01
push id30388
push usercbook@mozilla.com
push dateSat, 02 Jul 2016 09:15:23 +0000
treeherdermozilla-central@39dffbba7642 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbackout
bugs1277008
milestone50.0a1
backs outa36da0eea7af1bdb7e51b69d5aa9454b98689ac3
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset a36da0eea7af (bug 1277008) for warning as error in WasmBaselineCompile.cpp on OSX. r=backout on a CLOSED TREE
js/src/asmjs/WasmBaselineCompile.cpp
--- a/js/src/asmjs/WasmBaselineCompile.cpp
+++ b/js/src/asmjs/WasmBaselineCompile.cpp
@@ -151,92 +151,51 @@ typedef bool HandleNaNSpecially;
 
 #ifdef JS_CODEGEN_ARM64
 // FIXME: This is not correct, indeed for ARM64 there is no reliable
 // StackPointer and we'll need to change the abstractions that use
 // SP-relative addressing.  There's a guard in emitFunction() below to
 // prevent this workaround from having any consequence.  This hack
 // exists only as a stopgap; there is no ARM64 JIT support yet.
 static const Register StackPointer = RealStackPointer;
+
+// FIXME: This should somehow use vixl::UseScratchRegisterScope, or we
+// should define our own scratch register independent of the masm.
+class ScratchRegisterScope
+{
+  public:
+    ScratchRegisterScope(MacroAssembler& masm) {}
+    operator Register() const { return ScratchReg; }
+};
 #endif
 
 #ifdef JS_CODEGEN_X86
 // The selection of EBX here steps gingerly around: the need for EDX
 // to be allocatable for multiply/divide; ECX to be allocatable for
 // shift/rotate; EAX (= ReturnReg) to be allocatable as the joinreg;
 // EBX not being one of the WasmTableCall registers; and needing a
 // temp register for load/store that has a single-byte persona.
 static const Register ScratchRegX86 = ebx;
+
+// FIXME: We want this to have teeth.  One way to ensure that is to
+// pass BaseCompiler to ScratchRegisterScope instead of masm, and then
+// have a property on BaseCompiler that tracks availability.  On other
+// platforms than x86 we'd delegate from our private
+// ScratchRegisterScope to the standard one by inheritance, passing
+// BaseCompiler->masm to the base constructor.
+class ScratchRegisterScope
+{
+  public:
+    ScratchRegisterScope(MacroAssembler& masm) {}
+    operator Register() const { return ScratchRegX86; }
+};
 #endif
 
 class BaseCompiler
 {
-    // We define our own ScratchRegister abstractions, deferring to
-    // the platform's when possible.
-
-#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
-    typedef ScratchDoubleScope ScratchF64;
-#else
-    class ScratchF64
-    {
-      public:
-        ScratchF64(BaseCompiler& b) {}
-        operator FloatRegister() const {
-            MOZ_CRASH("BaseCompiler platform hook - ScratchF64");
-        }
-    };
-#endif
-
-#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
-    typedef ScratchFloat32Scope ScratchF32;
-#else
-    class ScratchF32
-    {
-      public:
-        ScratchF32(BaseCompiler& b) {}
-        operator FloatRegister() const {
-            MOZ_CRASH("BaseCompiler platform hook - ScratchF32");
-        }
-    };
-#endif
-
-#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
-    typedef ScratchRegisterScope ScratchI32;
-#elif defined(JS_CODEGEN_X86)
-    class ScratchI32
-    {
-        BaseCompiler& bc;
-      public:
-        ScratchI32(BaseCompiler& bc) : bc(bc) {
-#ifdef DEBUG
-            MOZ_ASSERT(!bc.scratchRegisterTaken());
-            bc.setScratchRegisterTaken(true);
-#endif
-        }
-        ~ScratchI32() {
-#ifdef DEBUG
-            MOZ_ASSERT(bc.scratchRegisterTaken());
-            bc.setScratchRegisterTaken(false);
-#endif
-        }
-        operator Register() const {
-            return ScratchRegX86;
-        }
-    };
-#else
-    class ScratchI32
-    {
-      public:
-        ScratchI32(BaseCompiler& b) {}
-        operator Register() const {
-            MOZ_CRASH("BaseCompiler platform hook - ScratchI32");
-        }
-    };
-#endif
-
     // A Label in the code, allocated out of a temp pool in the
     // TempAllocator attached to the compilation.
 
     struct PooledLabel : public Label, public TempObject, public InlineListNode<PooledLabel>
     {
         PooledLabel() : f(nullptr) {}
         explicit PooledLabel(BaseCompiler* f) : f(f) {}
         BaseCompiler* f;
@@ -423,19 +382,16 @@ class BaseCompiler
     Label                       outOfLinePrologue_;
     Label                       bodyLabel_;
 
     FuncCompileResults&         compileResults_;
     MacroAssembler&             masm;            // No '_' suffix - too tedious...
 
     AllocatableGeneralRegisterSet availGPR_;
     AllocatableFloatRegisterSet availFPU_;
-#ifdef DEBUG
-    bool                        scratchRegisterTaken_;
-#endif
 
     TempObjectPool<PooledLabel> labelPool_;
 
     Vector<Local, 8, SystemAllocPolicy> localInfo_;
     Vector<OutOfLineCode*, 8, SystemAllocPolicy> outOfLine_;
 
     // On specific platforms we sometimes need to use specific registers.
 
@@ -472,28 +428,16 @@ class BaseCompiler
     MOZ_MUST_USE
     bool init();
 
     void finish();
 
     MOZ_MUST_USE
     bool emitFunction();
 
-    // Used by some of the ScratchRegister implementations.
-    operator MacroAssembler&() const { return masm; }
-
-#ifdef DEBUG
-    bool scratchRegisterTaken() const {
-        return scratchRegisterTaken_;
-    }
-    void setScratchRegisterTaken(bool state) {
-        scratchRegisterTaken_ = state;
-    }
-#endif
-
   private:
 
     ////////////////////////////////////////////////////////////
     //
     // Out of line code management.
 
     MOZ_MUST_USE
     OutOfLineCode* addOutOfLineCode(OutOfLineCode* ool) {
@@ -1063,31 +1007,35 @@ class BaseCompiler
                 break;
             }
         }
 
         for (size_t i = start; i < lim; i++) {
             Stk& v = stk_[i];
             switch (v.kind()) {
               case Stk::LocalI32: {
-                ScratchI32 scratch(*this);
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+                ScratchRegisterScope scratch(masm);
                 loadLocalI32(scratch, v);
                 masm.Push(scratch);
+#else
+                MOZ_CRASH("BaseCompiler platform hook: sync LocalI32");
+#endif
                 v.setOffs(Stk::MemI32, masm.framePushed());
                 break;
               }
               case Stk::RegisterI32: {
                 masm.Push(v.i32reg().reg);
                 freeI32(v.i32reg());
                 v.setOffs(Stk::MemI32, masm.framePushed());
                 break;
               }
               case Stk::LocalI64: {
 #ifdef JS_PUNBOX64
-                ScratchI32 scratch(*this);
+                ScratchRegisterScope scratch(masm);
                 loadI64(Register64(scratch), v);
                 masm.Push(scratch);
 #else
                 MOZ_CRASH("BaseCompiler platform hook: sync LocalI64");
 #endif
                 v.setOffs(Stk::MemI64, masm.framePushed());
                 break;
               }
@@ -1097,32 +1045,40 @@ class BaseCompiler
                 freeI64(v.i64reg());
 #else
                 MOZ_CRASH("BaseCompiler platform hook: sync RegI64");
 #endif
                 v.setOffs(Stk::MemI64, masm.framePushed());
                 break;
               }
               case Stk::LocalF64: {
-                ScratchF64 scratch(*this);
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
+                ScratchDoubleScope scratch(masm);
                 loadF64(scratch, v);
                 masm.Push(scratch);
+#else
+                MOZ_CRASH("BaseCompiler platform hook: sync LocalF64");
+#endif
                 v.setOffs(Stk::MemF64, masm.framePushed());
                 break;
               }
               case Stk::RegisterF64: {
                 masm.Push(v.f64reg().reg);
                 freeF64(v.f64reg());
                 v.setOffs(Stk::MemF64, masm.framePushed());
                 break;
               }
               case Stk::LocalF32: {
-                ScratchF32 scratch(*this);
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
+                ScratchFloat32Scope scratch(masm);
                 loadF32(scratch, v);
                 masm.Push(scratch);
+#else
+                MOZ_CRASH("BaseCompiler platform hook: sync LocalF32");
+#endif
                 v.setOffs(Stk::MemF32, masm.framePushed());
                 break;
               }
               case Stk::RegisterF32: {
                 masm.Push(v.f32reg().reg);
                 freeF32(v.f32reg());
                 v.setOffs(Stk::MemF32, masm.framePushed());
                 break;
@@ -1743,20 +1699,24 @@ class BaseCompiler
         // TODO / OPTIMIZE: On SSE2 or better SIMD systems we may be
         // able to store 128 bits at a time.  (I suppose on some
         // systems we have 512-bit SIMD for that matter.)
         //
         // TODO / OPTIMIZE: if we have only one initializing store
         // then it's better to store a zero literal, probably.
 
         if (varLow_ < varHigh_) {
-            ScratchI32 scratch(*this);
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+            ScratchRegisterScope scratch(masm);
             masm.mov(ImmWord(0), scratch);
             for (int32_t i = varLow_ ; i < varHigh_ ; i+=4)
                 storeToFrameI32(scratch, i+4);
+#else
+            MOZ_CRASH("BaseCompiler platform hook: init frame");
+#endif
         }
     }
 
     bool endFunction() {
         // Out-of-line prologue.  Assumes that the in-line prologue has
         // been executed and that a frame of size = localSize_ + sizeof(AsmJSFrame)
         // has been allocated.
 
@@ -1920,56 +1880,68 @@ class BaseCompiler
     // we have the outgoing size at low cost, and then we can pass
     // args based on the info we read.
 
     void passArg(FunctionCall& call, ValType type, Stk& arg) {
         switch (type) {
           case ValType::I32: {
             ABIArg argLoc = call.abi_.next(MIRType::Int32);
             if (argLoc.kind() == ABIArg::Stack) {
-                ScratchI32 scratch(*this);
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+                ScratchRegisterScope scratch(masm);
                 loadI32(scratch, arg);
                 masm.store32(scratch, Address(StackPointer, argLoc.offsetFromArgBase()));
+#else
+                MOZ_CRASH("BaseCompiler platform hook: passArg");
+#endif
             } else {
                 loadI32(argLoc.reg().gpr(), arg);
             }
             break;
           }
           case ValType::I64: {
 #ifdef JS_CODEGEN_X64
             ABIArg argLoc = call.abi_.next(MIRType::Int64);
             if (argLoc.kind() == ABIArg::Stack) {
-                ScratchI32 scratch(*this);
+                ScratchRegisterScope scratch(masm);
                 loadI64(Register64(scratch), arg);
                 masm.movq(scratch, Operand(StackPointer, argLoc.offsetFromArgBase()));
             } else {
                 loadI64(Register64(argLoc.reg().gpr()), arg);
             }
 #else
             MOZ_CRASH("BaseCompiler platform hook: passArg I64");
 #endif
             break;
           }
           case ValType::F64: {
             ABIArg argLoc = call.abi_.next(MIRType::Double);
             if (argLoc.kind() == ABIArg::Stack) {
-                ScratchF64 scratch(*this);
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
+                ScratchDoubleScope scratch(masm);
                 loadF64(scratch, arg);
                 masm.storeDouble(scratch, Address(StackPointer, argLoc.offsetFromArgBase()));
+#else
+                MOZ_CRASH("BaseCompiler platform hook: passArg F64");
+#endif
             } else {
                 loadF64(argLoc.reg().fpu(), arg);
             }
             break;
           }
           case ValType::F32: {
             ABIArg argLoc = call.abi_.next(MIRType::Float32);
             if (argLoc.kind() == ABIArg::Stack) {
-                ScratchF32 scratch(*this);
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
+                ScratchFloat32Scope scratch(masm);
                 loadF32(scratch, arg);
                 masm.storeFloat32(scratch, Address(StackPointer, argLoc.offsetFromArgBase()));
+#else
+                MOZ_CRASH("BaseCompiler platform hook: passArg F32");
+#endif
             } else {
                 loadF32(argLoc.reg().fpu(), arg);
             }
             break;
           }
           default:
             MOZ_CRASH("Function argument type");
         }
@@ -2007,17 +1979,17 @@ class BaseCompiler
             masm.branch32(Assembler::Condition::AboveOrEqual, ptrReg, Imm32(length),
                           wasm::JumpTarget::OutOfBounds);
             masm.move32(Imm32(sigIndex), WasmTableCallSigReg);
         }
 
 #if defined(JS_CODEGEN_X64)
         // CodeGeneratorX64::visitAsmJSLoadFuncPtr()
         {
-            ScratchI32 scratch(*this);
+            ScratchRegisterScope scratch(masm);
             CodeOffset label = masm.leaRipRelative(scratch);
             masm.loadPtr(Operand(scratch, ptrReg, TimesEight, 0), ptrReg);
             masm.append(AsmJSGlobalAccess(label, globalDataOffset));
         }
 #elif defined(JS_CODEGEN_X86)
         // CodeGeneratorX86::visitAsmJSLoadFuncPtr()
         CodeOffset label = masm.movlWithPatch(PatchedAbsoluteAddress(), ptrReg, TimesFour, ptrReg);
         masm.append(AsmJSGlobalAccess(label, globalDataOffset));
@@ -2076,17 +2048,17 @@ class BaseCompiler
         }
 #else
         MOZ_CRASH("BaseCompiler platform hook: jumpTable");
 #endif
     }
 
     void tableSwitch(Label* theTable, RegI32 switchValue) {
 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
-        ScratchI32 scratch(*this);
+        ScratchRegisterScope scratch(masm);
         CodeLabel tableCl;
 
         masm.mov(tableCl.patchAt(), scratch);
 
         tableCl.target()->bind(theTable->offset());
         masm.addCodeLabel(tableCl);
 
         masm.jmp(Operand(scratch, switchValue.reg, ScalePointer));
@@ -2229,17 +2201,17 @@ class BaseCompiler
         masm.bind(&notMin);
     }
 
     void checkDivideSignedOverflowI64(RegI64 rhs, RegI64 srcDest, Label* done, bool zeroOnOverflow) {
         MOZ_ASSERT(!isCompilingAsmJS());
 #ifdef JS_CODEGEN_X64
         Label notMin;
         {
-            ScratchI32 scratch(*this);
+            ScratchRegisterScope scratch(masm);
             masm.move64(Imm64(INT64_MIN), Register64(scratch));
             masm.cmpq(scratch, srcDest.reg.reg);
         }
         masm.j(Assembler::NotEqual, &notMin);
         masm.cmpq(Imm32(-1), rhs.reg.reg);
         if (zeroOnOverflow) {
             masm.j(Assembler::NotEqual, &notMin);
             masm.xorq(srcDest.reg.reg, srcDest.reg.reg);
@@ -6063,19 +6035,16 @@ BaseCompiler::BaseCompiler(const ModuleG
       localSize_(0),
       varLow_(0),
       varHigh_(0),
       maxFramePushed_(0),
       compileResults_(compileResults),
       masm(compileResults_.masm()),
       availGPR_(GeneralRegisterSet::All()),
       availFPU_(FloatRegisterSet::All()),
-#ifdef DEBUG
-      scratchRegisterTaken_(false),
-#endif
 #ifdef JS_CODEGEN_X64
       specific_rax(RegI64(Register64(rax))),
       specific_rcx(RegI64(Register64(rcx))),
       specific_rdx(RegI64(Register64(rdx))),
 #endif
 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
       specific_eax(RegI32(eax)),
       specific_ecx(RegI32(ecx)),