Bug 1205621 - arm64: Handle OOMs in a safer manner. r=lth
authorJakob Olesen <jolesen@mozilla.com>
Thu, 24 Sep 2015 10:21:00 +0200
changeset 265563 0f9b612eebb193830aba1fb1994e516b05553a17
parent 265562 3b7165b9ecb7b44574aa9d07ce202ae74e1978c5
child 265564 60d1c59f129f4d08e7e0bdbff271eee88676f9c5
push id15472
push usercbook@mozilla.com
push dateFri, 02 Oct 2015 11:51:34 +0000
treeherderfx-team@2c33ef6b27e0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerslth
bugs1205621
milestone44.0a1
Bug 1205621 - arm64: Handle OOMs in a safer manner. r=lth When inserting instructions that encode a pc-relative offset, don't use a method that depends on getting a pointer to the newly inserted method. Use the new nextinstrOffset() method when computing the encoding of the pc-relative offset, and only insert each instruction once. Propagate OOM from calls to buffer.allocEntry, folowing the approach in the ARM assembler.
js/src/jit/arm64/Assembler-arm64.cpp
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/arm64/MacroAssembler-arm64.cpp
js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h
js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
--- a/js/src/jit/arm64/Assembler-arm64.cpp
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -175,17 +175,17 @@ Assembler::executableCopy(uint8_t* buffe
 
 BufferOffset
 Assembler::immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op, ARMBuffer::PoolEntry* pe)
 {
     uint32_t inst = op | Rt(dest);
     const size_t numInst = 1;
     const unsigned sizeOfPoolEntryInBytes = 4;
     const unsigned numPoolEntries = sizeof(value) / sizeOfPoolEntryInBytes;
-    return armbuffer_.allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value, pe);
+    return allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value, pe);
 }
 
 BufferOffset
 Assembler::immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe)
 {
     return immPool(dest, (uint8_t*)&value, vixl::LDR_x_lit, pe);
 }
 
@@ -197,17 +197,17 @@ Assembler::immPool64Branch(RepatchLabel*
 
 BufferOffset
 Assembler::fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op)
 {
     uint32_t inst = op | Rt(dest);
     const size_t numInst = 1;
     const unsigned sizeOfPoolEntryInBits = 32;
     const unsigned numPoolEntries = dest.size() / sizeOfPoolEntryInBits;
-    return armbuffer_.allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value);
+    return allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value);
 }
 
 BufferOffset
 Assembler::fImmPool64(ARMFPRegister dest, double value)
 {
     return fImmPool(dest, (uint8_t*)&value, vixl::LDR_d_lit);
 }
 BufferOffset
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -231,20 +231,16 @@ class Assembler : public vixl::Assembler
     }
     size_t bytesNeeded() const {
         return SizeOfCodeGenerated() +
             jumpRelocationTableBytes() +
             dataRelocationTableBytes() +
             preBarrierTableBytes();
     }
 
-    BufferOffset nextOffset() const {
-        return armbuffer_.nextOffset();
-    }
-
     void addCodeLabel(CodeLabel label) {
         propagateOOM(codeLabels_.append(label));
     }
     size_t numCodeLabels() const {
         return codeLabels_.length();
     }
     CodeLabel codeLabel(size_t i) {
         return codeLabels_[i];
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -89,18 +89,18 @@ MacroAssemblerCompat::movePatchablePtr(I
     uint32_t instructionScratch = 0;
 
     // Emit the instruction mask in the scratch space.
     // The offset doesn't matter: it will be fixed up later.
     vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0);
 
     // Add the entry to the pool, fix up the LDR imm19 offset,
     // and add the completed instruction to the buffer.
-    return armbuffer_.allocEntry(numInst, numPoolEntries,
-                                 (uint8_t*)&instructionScratch, literalAddr);
+    return allocEntry(numInst, numPoolEntries, (uint8_t*)&instructionScratch,
+                      literalAddr);
 }
 
 BufferOffset
 MacroAssemblerCompat::movePatchablePtr(ImmWord ptr, Register dest)
 {
     const size_t numInst = 1; // Inserting one load instruction.
     const unsigned numPoolEntries = 2; // Every pool entry is 4 bytes.
     uint8_t* literalAddr = (uint8_t*)(&ptr.value);
@@ -115,18 +115,18 @@ MacroAssemblerCompat::movePatchablePtr(I
     uint32_t instructionScratch = 0;
 
     // Emit the instruction mask in the scratch space.
     // The offset doesn't matter: it will be fixed up later.
     vixl::Assembler::ldr((Instruction*)&instructionScratch, ARMRegister(dest, 64), 0);
 
     // Add the entry to the pool, fix up the LDR imm19 offset,
     // and add the completed instruction to the buffer.
-    return armbuffer_.allocEntry(numInst, numPoolEntries,
-                                 (uint8_t*)&instructionScratch, literalAddr);
+    return allocEntry(numInst, numPoolEntries, (uint8_t*)&instructionScratch,
+                      literalAddr);
 }
 
 void
 MacroAssemblerCompat::loadPrivate(const Address& src, Register dest)
 {
     loadPtr(src, dest);
     asMasm().lshiftPtr(Imm32(1), dest);
 }
--- a/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
@@ -115,163 +115,126 @@ BufferOffset Assembler::b(int imm19, Con
 
 
 void Assembler::b(Instruction* at, int imm19, Condition cond) {
   EmitBranch(at, B_cond | ImmCondBranch(imm19) | cond);
 }
 
 
 BufferOffset Assembler::b(Label* label) {
-  // Flush the instruction buffer if necessary before getting an offset.
-  BufferOffset branch = b(0);
-  Instruction* ins = getInstructionAt(branch);
-  VIXL_ASSERT(ins->IsUncondBranchImm());
-
-  // Encode the relative offset.
-  b(ins, LinkAndGetInstructionOffsetTo(branch, label));
-  return branch;
+  // Encode the relative offset from the inserted branch to the label.
+  return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), label));
 }
 
 
 BufferOffset Assembler::b(Label* label, Condition cond) {
-  // Flush the instruction buffer if necessary before getting an offset.
-  BufferOffset branch = b(0, Always);
-  Instruction* ins = getInstructionAt(branch);
-  VIXL_ASSERT(ins->IsCondBranchImm());
-
-  // Encode the relative offset.
-  b(ins, LinkAndGetInstructionOffsetTo(branch, label), cond);
-  return branch;
+  // Encode the relative offset from the inserted branch to the label.
+  return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), label), cond);
 }
 
 
 void Assembler::bl(int imm26) {
   EmitBranch(BL | ImmUncondBranch(imm26));
 }
 
 
 void Assembler::bl(Instruction* at, int imm26) {
   EmitBranch(at, BL | ImmUncondBranch(imm26));
 }
 
 
 void Assembler::bl(Label* label) {
-  // Flush the instruction buffer if necessary before getting an offset.
-  BufferOffset branch = b(0);
-  Instruction* ins = getInstructionAt(branch);
-
-  // Encode the relative offset.
-  bl(ins, LinkAndGetInstructionOffsetTo(branch, label));
+  // Encode the relative offset from the inserted branch to the label.
+  return bl(LinkAndGetInstructionOffsetTo(nextInstrOffset(), label));
 }
 
 
 void Assembler::cbz(const Register& rt, int imm19) {
   EmitBranch(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
 }
 
 
 void Assembler::cbz(Instruction* at, const Register& rt, int imm19) {
   EmitBranch(at, SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
 }
 
 
 void Assembler::cbz(const Register& rt, Label* label) {
-  // Flush the instruction buffer if necessary before getting an offset.
-  BufferOffset branch = b(0);
-  Instruction* ins = getInstructionAt(branch);
-
-  // Encode the relative offset.
-  cbz(ins, rt, LinkAndGetInstructionOffsetTo(branch, label));
+  // Encode the relative offset from the inserted branch to the label.
+  return cbz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), label));
 }
 
 
 void Assembler::cbnz(const Register& rt, int imm19) {
   EmitBranch(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
 }
 
 
 void Assembler::cbnz(Instruction* at, const Register& rt, int imm19) {
   EmitBranch(at, SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
 }
 
 
 void Assembler::cbnz(const Register& rt, Label* label) {
-  // Flush the instruction buffer if necessary before getting an offset.
-  BufferOffset branch = b(0);
-  Instruction* ins = getInstructionAt(branch);
-
-  // Encode the relative offset.
-  cbnz(ins, rt, LinkAndGetInstructionOffsetTo(branch, label));
+  // Encode the relative offset from the inserted branch to the label.
+  return cbnz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), label));
 }
 
 
 void Assembler::tbz(const Register& rt, unsigned bit_pos, int imm14) {
   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
   EmitBranch(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
 }
 
 
 void Assembler::tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
   EmitBranch(at, TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
 }
 
 
 void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
-  // Flush the instruction buffer if necessary before getting an offset.
-  BufferOffset branch = b(0);
-  Instruction* ins = getInstructionAt(branch);
-
-  // Encode the relative offset.
-  tbz(ins, rt, bit_pos, LinkAndGetInstructionOffsetTo(branch, label));
+  // Encode the relative offset from the inserted branch to the label.
+  return tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), label));
 }
 
 
 void Assembler::tbnz(const Register& rt, unsigned bit_pos, int imm14) {
   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
   EmitBranch(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
 }
 
 
 void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
   EmitBranch(at, TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
 }
 
 
 void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
-  // Flush the instruction buffer if necessary before getting an offset.
-  BufferOffset branch = b(0);
-  Instruction* ins = getInstructionAt(branch);
-
-  // Encode the relative offset.
-  tbnz(ins, rt, bit_pos, LinkAndGetInstructionOffsetTo(branch, label));
+  // Encode the relative offset from the inserted branch to the label.
+  return tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), label));
 }
 
 
 void Assembler::adr(const Register& rd, int imm21) {
   VIXL_ASSERT(rd.Is64Bits());
   EmitBranch(ADR | ImmPCRelAddress(imm21) | Rd(rd));
 }
 
 
 void Assembler::adr(Instruction* at, const Register& rd, int imm21) {
   VIXL_ASSERT(rd.Is64Bits());
   EmitBranch(at, ADR | ImmPCRelAddress(imm21) | Rd(rd));
 }
 
 
 void Assembler::adr(const Register& rd, Label* label) {
-  // Flush the instruction buffer if necessary before getting an offset.
-  // Note that ADR is not a branch, but it encodes an offset like a branch.
-  BufferOffset offset = Emit(0);
-  Instruction* ins = getInstructionAt(offset);
-
-  // Encode the relative offset.
-  adr(ins, rd, LinkAndGetByteOffsetTo(offset, label));
+  // Encode the relative offset from the inserted adr to the label.
+  return adr(rd, LinkAndGetByteOffsetTo(nextInstrOffset(), label));
 }
 
 
 void Assembler::adrp(const Register& rd, int imm21) {
   VIXL_ASSERT(rd.Is64Bits());
   EmitBranch(ADRP | ImmPCRelAddress(imm21) | Rd(rd));
 }
 
@@ -279,23 +242,18 @@ void Assembler::adrp(const Register& rd,
 void Assembler::adrp(Instruction* at, const Register& rd, int imm21) {
   VIXL_ASSERT(rd.Is64Bits());
   EmitBranch(at, ADRP | ImmPCRelAddress(imm21) | Rd(rd));
 }
 
 
 void Assembler::adrp(const Register& rd, Label* label) {
   VIXL_ASSERT(AllowPageOffsetDependentCode());
-
-  // Flush the instruction buffer if necessary before getting an offset.
-  BufferOffset offset = Emit(0);
-  Instruction* ins = getInstructionAt(offset);
-
-  // Encode the relative offset.
-  adrp(ins, rd, LinkAndGetPageOffsetTo(offset, label));
+  // Encode the relative offset from the inserted adr to the label.
+  return adrp(rd, LinkAndGetPageOffsetTo(nextInstrOffset(), label));
 }
 
 
 BufferOffset Assembler::ands(const Register& rd, const Register& rn, const Operand& operand) {
   return Logical(rd, rn, operand, ANDS);
 }
 
 
--- a/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h
+++ b/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h
@@ -83,16 +83,41 @@ class MozBaseAssembler : public js::jit:
   template <typename T>
   inline T GetLabelByteOffset(const js::jit::Label* label) {
     VIXL_ASSERT(label->bound());
     JS_STATIC_ASSERT(sizeof(T) >= sizeof(uint32_t));
     return reinterpret_cast<T>(label->offset());
   }
 
  protected:
+  // Get the buffer offset of the next inserted instruction. This may flush
+  // constant pools.
+  BufferOffset nextInstrOffset() {
+    return armbuffer_.nextInstrOffset();
+  }
+
+  // Get the next usable buffer offset. Note that a constant pool may be placed
+  // here before the next instruction is emitted.
+  BufferOffset nextOffset() const {
+    return armbuffer_.nextOffset();
+  }
+
+  // Allocate memory in the buffer by forwarding to armbuffer_.
+  // Propagate OOM errors.
+  BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
+                          uint8_t* inst, uint8_t* data,
+                          ARMBuffer::PoolEntry* pe = nullptr,
+                          bool markAsBranch = false)
+  {
+    BufferOffset offset = armbuffer_.allocEntry(numInst, numPoolEntries, inst,
+                                                data, pe, markAsBranch);
+    propagateOOM(offset.assigned());
+    return offset;
+  }
+
   // Emit the instruction, returning its offset.
   BufferOffset Emit(Instr instruction, bool isBranch = false) {
     JS_STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
     return armbuffer_.putInt(*(uint32_t*)(&instruction), isBranch);
   }
 
   BufferOffset EmitBranch(Instr instruction) {
     return Emit(instruction, true);
--- a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
+++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
@@ -558,16 +558,33 @@ struct AssemblerBufferWithConstantPools 
 
         // The pool entry index is returned above when allocating an entry, but
         // when not allocating an entry a dummy value is returned - it is not
         // expected to be used by the caller.
         return UINT_MAX;
     }
 
   public:
+    // Get the next buffer offset where an instruction would be inserted.
+    // This may flush the current constant pool before returning nextOffset().
+    BufferOffset nextInstrOffset()
+    {
+        size_t nextOffset = sizeExcludingCurrentPool();
+        // Is there room for a single instruction more?
+        size_t poolOffset =
+          nextOffset + (1 + guardSize_ + headerSize_) * InstSize;
+        if (pool_.checkFull(poolOffset)) {
+            JitSpew(JitSpew_Pools,
+                    "[%d] nextInstrOffset @ %d caused a constant pool spill",
+                    id, nextOffset);
+            finishPool();
+        }
+        return this->nextOffset();
+    }
+
     BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
                             uint8_t* inst, uint8_t* data, PoolEntry* pe = nullptr,
                             bool markAsBranch = false)
     {
         // The alloction of pool entries is not supported in a no-pool region,
         // check.
         MOZ_ASSERT_IF(numPoolEntries, !canNotPlacePool_);