Bug 1208674 - Fix ToggleCall to handle constant pools. r=sstangl
authorJakob Olesen <jolesen@mozilla.com>
Sun, 11 Oct 2015 18:13:09 +0200
changeset 267151 b541b27061614b59483c29efe0ac763f1275e775
parent 267150 d6d3fcb7c0bc0e331a38c49cd0d541ccf9b83501
child 267152 2bc41a7237cc1ed2a01217a6d76523ecb6fb56b6
push id66402
push userarchaeopteryx@coole-files.de
push dateSun, 11 Oct 2015 16:15:00 +0000
treeherdermozilla-inbound@ee4cb52e6b15 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssstangl
bugs1208674
milestone44.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1208674 - Fix ToggleCall to handle constant pools. r=sstangl Handle constant pools inserted anywhere, even at the initial pointer location. Don't attempt handling constant pools with natural guard branches. They are not relevant here (and actually never generated currently). Stop flushing constant pools in toggledCall(). Since ToggleCall() does handle constant pools at any point, there is no need to prevent their insertion. Replace the NextInstruction() functions with Instruction::skipPool() which mimics the ARM implementation. Add an Instruction::IsStackPtrSync() which recognizes the instructions inserted by syncStackPtr().
js/src/jit/arm64/Assembler-arm64.cpp
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/arm64/MacroAssembler-arm64.h
js/src/jit/arm64/vixl/Instructions-vixl.h
js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
--- a/js/src/jit/arm64/Assembler-arm64.cpp
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -9,16 +9,17 @@
 #include "mozilla/DebugOnly.h"
 #include "mozilla/MathAlgorithms.h"
 
 #include "jscompartment.h"
 #include "jsutil.h"
 
 #include "gc/Marking.h"
 
+#include "jit/arm64/Architecture-arm64.h"
 #include "jit/arm64/MacroAssembler-arm64.h"
 #include "jit/ExecutableAllocator.h"
 #include "jit/JitCompartment.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::CountLeadingZeroes32;
@@ -379,27 +380,32 @@ Assembler::ToggleToCmp(CodeLocationLabel
     // From the above, there is a safe 19-bit contiguous region from 5:23.
     Emit(i, vixl::ThirtyTwoBits | vixl::AddSubImmediateFixed | vixl::SUB | Flags(vixl::SetFlags) |
             Rd(vixl::xzr) | (imm19 << vixl::Rn_offset));
 }
 
 void
 Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
 {
-    Instruction* first = (Instruction*)inst_.raw();
+    const Instruction* first = reinterpret_cast<Instruction*>(inst_.raw());
     Instruction* load;
     Instruction* call;
 
-    if (first->InstructionBits() == 0x9100039f) {
-        load = (Instruction*)NextInstruction(first);
-        call = NextInstruction(load);
-    } else {
-        load = first;
-        call = NextInstruction(first);
-    }
+    // There might be a constant pool at the very first instruction.
+    first = first->skipPool();
+
+    // Skip the stack pointer restore instruction.
+    if (first->IsStackPtrSync())
+        first = first->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
+
+    load = const_cast<Instruction*>(first);
+
+    // The call instruction follows the load, but there may be an injected
+    // constant pool.
+    call = const_cast<Instruction*>(load->InstructionAtOffset(vixl::kInstructionSize)->skipPool());
 
     if (call->IsBLR() == enabled)
         return;
 
     if (call->IsBLR()) {
         // If the second instruction is blr(), then wehave:
         //   ldr x17, [pc, offset]
         //   blr x17
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -331,46 +331,16 @@ class Assembler : public vixl::Assembler
         uint32_t* raw = (uint32_t*)label.raw();
         // Overwrite the 4 bytes before the return address, which will end up being
         // the call instruction.
         *(raw - 1) = imm.value;
     }
     static uint32_t AlignDoubleArg(uint32_t offset) {
         MOZ_CRASH("AlignDoubleArg()");
     }
-    static Instruction* NextInstruction(Instruction* instruction, uint32_t* count = nullptr) {
-        if (count != nullptr)
-            *count += 4;
-        Instruction* cur = instruction;
-        Instruction* next = cur + 4;
-        // Artificial pool guards can only be B (rather than BR)
-        if (next->IsUncondB()) {
-            uint32_t* snd = (uint32_t*)(instruction + 8);
-            // test both the upper 16 bits, but also bit 15, which should be unset
-            // for an artificial branch guard.
-            if ((*snd & 0xffff8000) == 0xffff0000) {
-                // that was a guard before a pool, step over the pool.
-                int poolSize =  (*snd & 0x7fff);
-                return (Instruction*)(snd + poolSize);
-            }
-        } else if (cur->IsBR() || cur->IsUncondB()) {
-            // natural pool guards can be anything
-            // but they need to have bit 15 set.
-            if ((next->InstructionBits() & 0xffff0000) == 0xffff0000) {
-                int poolSize = (next->InstructionBits() & 0x7fff);
-                Instruction* ret = (next + (poolSize << 2));
-                return ret;
-            }
-        }
-        return (instruction + 4);
-
-    }
-    static uint8_t* NextInstruction(uint8_t* instruction, uint32_t* count = nullptr) {
-        return (uint8_t*)NextInstruction((Instruction*)instruction, count);
-    }
     static uintptr_t GetPointer(uint8_t* ptr) {
         Instruction* i = reinterpret_cast<Instruction*>(ptr);
         uint64_t ret = i->Literal64();
         return ret;
     }
 
     // Toggle a jmp or cmp emitted by toggledJump().
     static void ToggleToJmp(CodeLocationLabel inst_);
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -2865,23 +2865,16 @@ class MacroAssemblerCompat : public vixl
 
     // Emit a BLR or NOP instruction. ToggleCall can be used to patch
     // this instruction.
     CodeOffsetLabel toggledCall(JitCode* target, bool enabled) {
         // The returned offset must be to the first instruction generated,
         // for the debugger to match offset with Baseline's pcMappingEntries_.
         BufferOffset offset = nextOffset();
 
-        // TODO: Random pool insertion between instructions below is terrible.
-        // Unfortunately, we can't forbid pool prevention, because we're trying
-        // to add an entry to a pool. So as a temporary fix, just flush the pool
-        // now, so that it won't add later. If you're changing this, also
-        // check ToggleCall(), which will probably break.
-        armbuffer_.flushPool();
-
         syncStackPtr();
 
         BufferOffset loadOffset;
         {
             vixl::UseScratchRegisterScope temps(this);
 
             // The register used for the load is hardcoded, so that ToggleCall
             // can patch in the branch instruction easily. This could be changed,
--- a/js/src/jit/arm64/vixl/Instructions-vixl.h
+++ b/js/src/jit/arm64/vixl/Instructions-vixl.h
@@ -262,16 +262,20 @@ class Instruction {
   bool IsNOP() const;
   bool IsADR() const;
   bool IsADRP() const;
   bool IsBranchLinkImm() const;
   bool IsTargetReachable(Instruction* target) const;
   ptrdiff_t ImmPCRawOffset() const;
   void SetBits32(int msb, int lsb, unsigned value);
 
+  // Is this a stack pointer synchronization instruction as inserted by
+  // MacroAssembler::syncStackPtr()?
+  bool IsStackPtrSync() const;
+
 #define DEFINE_SETTERS(Name, HighBit, LowBit, Func)  \
   inline void Set##Name(unsigned n) { SetBits32(HighBit, LowBit, n); }
 INSTRUCTION_FIELDS_LIST(DEFINE_SETTERS)
 #undef DEFINE_SETTERS
 
   static int ImmBranchRangeBitwidth(ImmBranchType branch_type);
   static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
 
@@ -382,16 +386,20 @@ INSTRUCTION_FIELDS_LIST(DEFINE_SETTERS)
   double LiteralFP64() const {
     return rawbits_to_double(Literal64());
   }
 
   const Instruction* NextInstruction() const {
     return this + kInstructionSize;
   }
 
+  // Skip any constant pools with artificial guards at this point.
+  // Return either |this| or the first instruction after the pool.
+  const Instruction *skipPool() const;
+
   const Instruction* InstructionAtOffset(int64_t offset) const {
     VIXL_ASSERT(IsWordAligned(this + offset));
     return this + offset;
   }
 
   template<typename T> static Instruction* Cast(T src) {
     return reinterpret_cast<Instruction*>(src);
   }
--- a/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
@@ -19,16 +19,17 @@
 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "jit/arm64/Architecture-arm64.h"
 #include "jit/arm64/vixl/Instructions-vixl.h"
 
 namespace vixl {
 
 bool Instruction::IsUncondB() const {
   return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | B);
 }
 
@@ -124,16 +125,53 @@ ptrdiff_t Instruction::ImmPCRawOffset() 
   } else if (BranchType() == UnknownBranchType) {
     offset = ImmLLiteral();
   } else {
     offset = ImmBranch();
   }
   return offset;
 }
 
+// Is this a stack pointer synchronization instruction as inserted by
+// MacroAssembler::syncStackPtr()?
+bool
+Instruction::IsStackPtrSync() const
+{
+    // The stack pointer sync is a move to the stack pointer.
+    // This is encoded as 'add sp, Rs, #0'.
+    return IsAddSubImmediate() && Rd() == js::jit::Registers::sp && ImmAddSub() == 0;
+}
+
+// Skip over a constant pool at |this| if there is one.
+//
+// If |this| is pointing to the artifical guard branch around a constant pool,
+// return the instruction after the pool. Otherwise return |this| itself.
+//
+// This function does not skip constant pools with a natural guard branch. It
+// is assumed that anyone inspecting the instruction stream understands about
+// branches that were inserted naturally.
+const Instruction*
+Instruction::skipPool() const
+{
+    // Artificial pool guards can only be B (rather than BR), and they must be
+    // forward branches.
+    if (!IsUncondB() || ImmUncondBranch() <= 0)
+        return this;
+
+    // Check for a constant pool header which has the high 16 bits set. See
+    // struct PoolHeader. Bit 15 indicates a natural pool guard when set. It
+    // must be clear which indicates an artificial pool guard.
+    const Instruction *header = InstructionAtOffset(kInstructionSize);
+    if (header->Mask(0xffff8000) != 0xffff0000)
+        return this;
+
+    // OK, this is an artificial jump around a constant pool.
+    return ImmPCOffsetTarget();
+}
+
 
 void Instruction::SetBits32(int msb, int lsb, unsigned value) {
   uint32_t me;
   memcpy(&me, this, sizeof(me));
   uint32_t new_mask = (1 << (msb+1)) - (1 << lsb);
   uint32_t keep_mask = ~new_mask;
   me = (me & keep_mask) | ((value << lsb) & new_mask);
   memcpy(this, &me, sizeof(me));