Bug 1207827 - Remove ARM64 temporary offset buffers. r=nbp
authorJakob Olesen <jolesen@mozilla.com>
Mon, 26 Oct 2015 17:06:09 -0700
changeset 269674 3bb6cd5b7e5635c826c630a0ab237af4aec768de
parent 269673 5beca478137361e9e6c212577d0d694be42d2a56
child 269675 0c6da2dc2bc49cd7c79496cac9ec6e6f597047d7
push id15905
push usercbook@mozilla.com
push dateTue, 27 Oct 2015 09:59:36 +0000
treeherderfx-team@60acc8a9cfb5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnbp
bugs1207827
milestone44.0a1
Bug 1207827 - Remove ARM64 temporary offset buffers. r=nbp The ARM64 assembler no longer needs to keep track of code offsets for later translation to 'final' offsets. The AssemblerBuffer offsets are directly usable now. Remove tmpDataRelocations_, tmpPreBarriers_, tmpJumpRelocations_, and the finalOffset() method.
js/src/jit/arm64/Assembler-arm64.cpp
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/arm64/MacroAssembler-arm64.h
--- a/js/src/jit/arm64/Assembler-arm64.cpp
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -78,32 +78,23 @@ Assembler::finish()
     armbuffer_.flushPool();
 
     // The extended jump table is part of the code buffer.
     ExtendedJumpTable_ = emitExtendedJumpTable();
     Assembler::FinalizeCode();
 
     // The jump relocation table starts with a fixed-width integer pointing
     // to the start of the extended jump table.
-    if (tmpJumpRelocations_.length())
-        jumpRelocations_.writeFixedUint32_t(toFinalOffset(ExtendedJumpTable_));
-
-    for (unsigned int i = 0; i < tmpJumpRelocations_.length(); i++) {
-        JumpRelocation& reloc = tmpJumpRelocations_[i];
-
-        // Each entry in the relocations table is an (offset, extendedTableIndex) pair.
-        jumpRelocations_.writeUnsigned(toFinalOffset(reloc.jump));
-        jumpRelocations_.writeUnsigned(reloc.extendedTableIndex);
+    // Space for this integer is allocated by Assembler::addJumpRelocation()
+    // before writing the first entry.
+    // Don't touch memory if we saw an OOM error.
+    if (jumpRelocations_.length() && !oom()) {
+        MOZ_ASSERT(jumpRelocations_.length() >= sizeof(uint32_t));
+        *(uint32_t*)jumpRelocations_.buffer() = ExtendedJumpTable_.getOffset();
     }
-
-    for (unsigned int i = 0; i < tmpDataRelocations_.length(); i++)
-        dataRelocations_.writeUnsigned(toFinalOffset(tmpDataRelocations_[i]));
-
-    for (unsigned int i = 0; i < tmpPreBarriers_.length(); i++)
-        preBarriers_.writeUnsigned(toFinalOffset(tmpPreBarriers_[i]));
 }
 
 BufferOffset
 Assembler::emitExtendedJumpTable()
 {
     if (!pendingJumps_.length() || oom())
         return BufferOffset();
 
@@ -154,19 +145,19 @@ Assembler::executableCopy(uint8_t* buffe
         if (!rp.target) {
             // The patch target is nullptr for jumps that have been linked to
             // a label within the same code block, but may be repatched later
             // to jump to a different code block.
             continue;
         }
 
         Instruction* target = (Instruction*)rp.target;
-        Instruction* branch = (Instruction*)(buffer + toFinalOffset(rp.offset));
+        Instruction* branch = (Instruction*)(buffer + rp.offset.getOffset());
         JumpTableEntry* extendedJumpTable =
-            reinterpret_cast<JumpTableEntry*>(buffer + toFinalOffset(ExtendedJumpTable_));
+            reinterpret_cast<JumpTableEntry*>(buffer + ExtendedJumpTable_.getOffset());
         if (branch->BranchType() != vixl::UnknownBranchType) {
             if (branch->IsTargetReachable(target)) {
                 branch->SetImmPCOffsetTarget(target);
             } else {
                 JumpTableEntry* entry = &extendedJumpTable[i];
                 branch->SetImmPCOffsetTarget(entry->getLdr());
                 entry->data = target;
             }
@@ -296,18 +287,26 @@ Assembler::trace(JSTracer* trc)
 }
 
 void
 Assembler::addJumpRelocation(BufferOffset src, Relocation::Kind reloc)
 {
     // Only JITCODE relocations are patchable at runtime.
     MOZ_ASSERT(reloc == Relocation::JITCODE);
 
-    // Each relocation requires an entry in the extended jump table.
-    tmpJumpRelocations_.append(JumpRelocation(src, pendingJumps_.length()));
+    // The jump relocation table starts with a fixed-width integer pointing
+    // to the start of the extended jump table. But, we don't know the
+    // actual extended jump table offset yet, so write a 0 which we'll
+    // patch later in Assembler::finish().
+    if (!jumpRelocations_.length())
+        jumpRelocations_.writeFixedUint32_t(0);
+
+    // Each entry in the table is an (offset, extendedTableIndex) pair.
+    jumpRelocations_.writeUnsigned(src.getOffset());
+    jumpRelocations_.writeUnsigned(pendingJumps_.length());
 }
 
 void
 Assembler::addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind reloc)
 {
     MOZ_ASSERT(target.value != nullptr);
 
     if (reloc == Relocation::JITCODE)
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -337,21 +337,16 @@ class Assembler : public vixl::Assembler
     static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
 
     static int32_t ExtractCodeLabelOffset(uint8_t* code);
     static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
 
     static void FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader,
                                     const ObjectVector& nurseryObjects);
 
-    // Convert a BufferOffset to a final byte offset from the start of the code buffer.
-    size_t toFinalOffset(BufferOffset offset) {
-        return size_t(offset.getOffset());
-    }
-
   public:
     // A Jump table entry is 2 instructions, with 8 bytes of raw data
     static const size_t SizeOfJumpTableEntry = 16;
 
     struct JumpTableEntry
     {
         uint32_t ldr;
         uint32_t br;
@@ -397,23 +392,16 @@ class Assembler : public vixl::Assembler
         BufferOffset jump; // Offset to the short jump, from the start of the code buffer.
         uint32_t extendedTableIndex; // Unique index within the extended jump table.
 
         JumpRelocation(BufferOffset jump, uint32_t extendedTableIndex)
           : jump(jump), extendedTableIndex(extendedTableIndex)
         { }
     };
 
-    // Because ARM and A64 use a code buffer that allows for constant pool insertion,
-    // the actual offset of each jump cannot be known until finalization.
-    // These vectors store the WIP offsets.
-    js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpDataRelocations_;
-    js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpPreBarriers_;
-    js::Vector<JumpRelocation, 0, SystemAllocPolicy> tmpJumpRelocations_;
-
     // Structure for fixing up pc-relative loads/jumps when the machine
     // code gets moved (executable copy, gc, etc.).
     struct RelativePatch
     {
         BufferOffset offset;
         void* target;
         Relocation::Kind kind;
 
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -2527,29 +2527,29 @@ class MacroAssemblerCompat : public vixl
         BufferOffset offset = b(label, Always);
         CodeOffsetLabel ret(offset.getOffset());
         return ret;
     }
 
     // load: offset to the load instruction obtained by movePatchablePtr().
     void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) {
         if (ptr.value)
-            tmpDataRelocations_.append(load);
+            dataRelocations_.writeUnsigned(load.getOffset());
     }
     void writeDataRelocation(const Value& val, BufferOffset load) {
         if (val.isMarkable()) {
             gc::Cell* cell = reinterpret_cast<gc::Cell*>(val.toGCThing());
             if (cell && gc::IsInsideNursery(cell))
                 embedsNurseryPointers_ = true;
-            tmpDataRelocations_.append(load);
+            dataRelocations_.writeUnsigned(load.getOffset());
         }
     }
 
     void writePrebarrierOffset(CodeOffsetLabel label) {
-        tmpPreBarriers_.append(BufferOffset(label.offset()));
+        preBarriers_.writeUnsigned(label.offset());
     }
 
     void computeEffectiveAddress(const Address& address, Register dest) {
         Add(ARMRegister(dest, 64), ARMRegister(address.base, 64), Operand(address.offset));
     }
     void computeEffectiveAddress(const BaseIndex& address, Register dest) {
         ARMRegister dest64(dest, 64);
         ARMRegister base64(address.base, 64);