Bug 1546446 - Carry the pool-free size to the finishPool function. r=sstangl, a=jcristau
authorNicolas B. Pierron <nicolas.b.pierron@nbp.name>
Tue, 28 May 2019 14:32:19 +0000
changeset 536644 2e7f918e6874a712a7f9ff07d30f24521a32193a
parent 536643 5588ca581cbeee519064e8e2acd80126ff770017
child 536645 9c05e6a533378ede64849ac3d6b8c1c11f7588e9
push id2082
push userffxbld-merge
push dateMon, 01 Jul 2019 08:34:18 +0000
treeherdermozilla-release@2fb19d0466d2 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssstangl, jcristau
bugs1546446
milestone68.0
Bug 1546446 - Carry the pool-free size to the finishPool function. r=sstangl, a=jcristau Differential Revision: https://phabricator.services.mozilla.com/D28816
js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
--- a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
+++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=2 et sw=2 tw=80:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_shared_IonAssemblerBufferWithConstantPools_h
 #define jit_shared_IonAssemblerBufferWithConstantPools_h
 
+#include "mozilla/CheckedInt.h"
 #include "mozilla/MathAlgorithms.h"
 
 #include <algorithm>
 
 #include "jit/JitSpewer.h"
 #include "jit/shared/IonAssemblerBuffer.h"
 
 // [SMDOC] JIT AssemblerBuffer constant pooling (ARM/ARM64/MIPS)
@@ -778,17 +779,17 @@ struct AssemblerBufferWithConstantPools
     if (!hasSpaceForInsts(numInst, numPoolEntries)) {
       if (numPoolEntries) {
         JitSpew(JitSpew_Pools, "[%d] Inserting pool entry caused a spill", id);
       } else {
         JitSpew(JitSpew_Pools, "[%d] Inserting instruction(%zu) caused a spill",
                 id, sizeExcludingCurrentPool());
       }
 
-      finishPool();
+      finishPool(numInst * InstSize);
       if (this->oom()) {
         return OOM_FAIL;
       }
       return insertEntryForwards(numInst, numPoolEntries, inst, data);
     }
     if (numPoolEntries) {
       unsigned result = pool_.insertEntry(numPoolEntries, data,
                                           this->nextOffset(), this->lifoAlloc_);
@@ -808,17 +809,17 @@ struct AssemblerBufferWithConstantPools
  public:
   // Get the next buffer offset where an instruction would be inserted.
   // This may flush the current constant pool before returning nextOffset().
   BufferOffset nextInstrOffset() {
     if (!hasSpaceForInsts(/* numInsts= */ 1, /* numPoolEntries= */ 0)) {
       JitSpew(JitSpew_Pools,
               "[%d] nextInstrOffset @ %d caused a constant pool spill", id,
               this->nextOffset().getOffset());
-      finishPool();
+      finishPool(ShortRangeBranchHysteresis);
     }
     return this->nextOffset();
   }
 
   MOZ_NEVER_INLINE
   BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
                           uint8_t* inst, uint8_t* data,
                           PoolEntry* pe = nullptr) {
@@ -939,37 +940,48 @@ struct AssemblerBufferWithConstantPools
   void unregisterBranchDeadline(unsigned rangeIdx, BufferOffset deadline) {
     if (!this->oom()) {
       branchDeadlines_.removeDeadline(rangeIdx, deadline);
     }
   }
 
  private:
   // Are any short-range branches about to expire?
-  bool hasExpirableShortRangeBranches() const {
+  bool hasExpirableShortRangeBranches(size_t reservedBytes) const {
     if (branchDeadlines_.empty()) {
       return false;
     }
 
-    // Include branches that would expire in the next N bytes.
-    // The hysteresis avoids the needless creation of many tiny constant
-    // pools.
-    return this->nextOffset().getOffset() + ShortRangeBranchHysteresis >
-           size_t(branchDeadlines_.earliestDeadline().getOffset());
+    // Include branches that would expire in the next N bytes. The reservedBytes
+    // argument avoids the needless creation of many tiny constant pools.
+    //
+    // As the reservedBytes could be of any sizes such as SIZE_MAX, in the case
+    // of flushPool, we have to check for overflow when comparing the deadline
+    // with our expected reserved bytes.
+    size_t deadline = branchDeadlines_.earliestDeadline().getOffset();
+    using CheckedSize = mozilla::CheckedInt<size_t>;
+    CheckedSize current(this->nextOffset().getOffset());
+    CheckedSize poolFreeSpace(reservedBytes);
+    auto future = current + poolFreeSpace;
+    return !future.isValid() || deadline < future.value();
   }
 
-  bool isPoolEmpty() const {
-    return pool_.numEntries() == 0 && !hasExpirableShortRangeBranches();
+  bool isPoolEmptyFor(size_t bytes) const {
+    return pool_.numEntries() == 0 && !hasExpirableShortRangeBranches(bytes);
   }
-  void finishPool() {
+  void finishPool(size_t reservedBytes) {
     JitSpew(JitSpew_Pools,
             "[%d] Attempting to finish pool %zu with %u entries.", id,
             poolInfo_.length(), pool_.numEntries());
 
-    if (isPoolEmpty()) {
+    if (reservedBytes < ShortRangeBranchHysteresis) {
+      reservedBytes = ShortRangeBranchHysteresis;
+    }
+
+    if (isPoolEmptyFor(reservedBytes)) {
       // If there is no data in the pool being dumped, don't dump anything.
       JitSpew(JitSpew_Pools, "[%d] Aborting because the pool is empty", id);
       return;
     }
 
     // Should not be placing a pool in a no-pool region, check.
     MOZ_ASSERT(!canNotPlacePool_);
 
@@ -979,17 +991,17 @@ struct AssemblerBufferWithConstantPools
     BufferOffset data = this->putBytesLarge(pool_.getPoolSize(),
                                             (const uint8_t*)pool_.poolData());
     if (this->oom()) {
       return;
     }
 
     // Now generate branch veneers for any short-range branches that are
     // about to expire.
-    while (hasExpirableShortRangeBranches()) {
+    while (hasExpirableShortRangeBranches(reservedBytes)) {
       unsigned rangeIdx = branchDeadlines_.earliestDeadlineRange();
       BufferOffset deadline = branchDeadlines_.earliestDeadline();
 
       // Stop tracking this branch. The Asm callback below may register
       // new branches to track.
       branchDeadlines_.removeDeadline(rangeIdx, deadline);
 
       // Make room for the veneer. Same as a pool guard branch.
@@ -1047,17 +1059,17 @@ struct AssemblerBufferWithConstantPools
   }
 
  public:
   void flushPool() {
     if (this->oom()) {
       return;
     }
     JitSpew(JitSpew_Pools, "[%d] Requesting a pool flush", id);
-    finishPool();
+    finishPool(SIZE_MAX);
   }
 
   void enterNoPool(size_t maxInst) {
     if (this->oom()) {
       return;
     }
     // Don't allow re-entry.
     MOZ_ASSERT(!canNotPlacePool_);
@@ -1065,17 +1077,18 @@ struct AssemblerBufferWithConstantPools
 
     // Check if the pool will spill by adding maxInst instructions, and if
     // so then finish the pool before entering the no-pool region. It is
     // assumed that no pool entries are allocated in a no-pool region and
     // this is asserted when allocating entries.
     if (!hasSpaceForInsts(maxInst, 0)) {
       JitSpew(JitSpew_Pools, "[%d] No-Pool instruction(%zu) caused a spill.",
               id, sizeExcludingCurrentPool());
-      finishPool();
+      finishPool(maxInst * InstSize);
+      MOZ_ASSERT(hasSpaceForInsts(maxInst, 0));
     }
 
 #ifdef DEBUG
     // Record the buffer position to allow validating maxInst when leaving
     // the region.
     canNotPlacePoolStartOffset_ = this->nextOffset().getOffset();
     canNotPlacePoolMaxInst_ = maxInst;
 #endif
@@ -1100,17 +1113,17 @@ struct AssemblerBufferWithConstantPools
     MOZ_ASSERT(!inhibitNops_);
     inhibitNops_ = true;
   }
   void leaveNoNops() {
     MOZ_ASSERT(inhibitNops_);
     inhibitNops_ = false;
   }
   void assertNoPoolAndNoNops() {
-    MOZ_ASSERT(inhibitNops_ && (isPoolEmpty() || canNotPlacePool_));
+    MOZ_ASSERT(inhibitNops_ && (isPoolEmptyFor(InstSize) || canNotPlacePool_));
   }
 
   void align(unsigned alignment) { align(alignment, alignFillInst_); }
 
   void align(unsigned alignment, uint32_t pattern) {
     MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
     MOZ_ASSERT(alignment >= InstSize);
 
@@ -1125,17 +1138,17 @@ struct AssemblerBufferWithConstantPools
     requiredFill = alignment - requiredFill;
 
     // Add an InstSize because it is probably not useful for a pool to be
     // dumped at the aligned code position.
     if (!hasSpaceForInsts(requiredFill / InstSize + 1, 0)) {
       // Alignment would cause a pool dump, so dump the pool now.
       JitSpew(JitSpew_Pools, "[%d] Alignment of %d at %zu caused a spill.", id,
               alignment, sizeExcludingCurrentPool());
-      finishPool();
+      finishPool(requiredFill);
     }
 
     bool prevInhibitNops = inhibitNops_;
     inhibitNops_ = true;
     while ((sizeExcludingCurrentPool() & (alignment - 1)) && !this->oom()) {
       putInt(pattern);
     }
     inhibitNops_ = prevInhibitNops;