Attempt to fix the issues by disabling IM when we run into a corner case, rev 3, looks good on try (bug 805299, r=dvander)
authorMarty Rosenberg <mrosenberg@mozilla.com>
Sun, 02 Dec 2012 22:21:57 -0500
changeset 115408 21aeb525c630
parent 115407 c38be1281766
child 115409 2451057af100
push id24003
push usereakhgari@mozilla.com
push date2012-12-09 18:17 +0000
treeherdermozilla-central@725eb8792d27 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdvander
bugs805299
milestone20.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Attempt to fix the issues by disabling IM when we run into a corner case, rev 3, looks good on try (bug 805299, r=dvander)
js/src/ion/IonLinker.h
js/src/ion/IonMacroAssembler.h
js/src/ion/arm/Assembler-arm.cpp
js/src/ion/shared/IonAssemblerBuffer.h
js/src/ion/shared/IonAssemblerBufferWithConstantPools.h
--- a/js/src/ion/IonLinker.h
+++ b/js/src/ion/IonLinker.h
@@ -50,16 +50,18 @@ class Linker
 
         // Bump the code up to a nice alignment.
         codeStart = (uint8_t *)AlignBytes((uintptr_t)codeStart, CodeAlignment);
         uint32_t headerSize = codeStart - result;
         IonCode *code = IonCode::New(cx, codeStart,
                                      bytesNeeded - headerSize, pool);
         if (!code)
             return NULL;
+        if (masm.oom())
+            return fail(cx);
         code->copyFrom(masm);
         masm.link(code);
         return code;
     }
 
   public:
     Linker(MacroAssembler &masm)
       : masm(masm)
--- a/js/src/ion/IonMacroAssembler.h
+++ b/js/src/ion/IonMacroAssembler.h
@@ -498,17 +498,17 @@ class MacroAssembler : public MacroAssem
         Push(ImmWord(uintptr_t(NULL)));
     }
 
     void leaveExitFrame() {
         freeStack(IonExitFooterFrame::Size());
     }
 
     void link(IonCode *code) {
-
+        JS_ASSERT(!oom());
         // If this code can transition to C++ code and witness a GC, then we need to store
         // the IonCode onto the stack in order to GC it correctly.  exitCodePatch should
         // be unset if the code never needed to push its IonCode*.
         if (exitCodePatch_.offset() != 0) {
             patchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_),
                                     ImmWord(uintptr_t(code)),
                                     ImmWord(uintptr_t(-1)));
         }
--- a/js/src/ion/arm/Assembler-arm.cpp
+++ b/js/src/ion/arm/Assembler-arm.cpp
@@ -1632,16 +1632,20 @@ Assembler::as_b(BOffImm off, Condition c
     if (c == Always && !isPatchable)
         m_buffer.markGuard();
     return ret;
 }
 
 BufferOffset
 Assembler::as_b(Label *l, Condition c, bool isPatchable)
 {
+    if (m_buffer.oom()) {
+        BufferOffset ret;
+        return ret;
+    }
     m_buffer.markNextAsBranch();
     if (l->bound()) {
         BufferOffset ret = as_nop();
         as_b(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
         return ret;
     }
 
     int32_t old;
--- a/js/src/ion/shared/IonAssemblerBuffer.h
+++ b/js/src/ion/shared/IonAssemblerBuffer.h
@@ -72,24 +72,25 @@ struct BufferSlice : public InlineForwar
             memcpy(&instructions[size()], inst, instSize);
         nodeSize += instSize;
     }
 };
 
 template<int SliceSize, class Inst>
 struct AssemblerBuffer {
   public:
-    AssemblerBuffer() : head(NULL), tail(NULL), m_oom(false), bufferSize(0) {}
+    AssemblerBuffer() : head(NULL), tail(NULL), m_bail(false), m_oom(false), bufferSize(0) {}
   protected:
     typedef BufferSlice<SliceSize> Slice;
     typedef AssemblerBuffer<SliceSize, Inst> AssemblerBuffer_;
     Slice *head;
     Slice *tail;
   public:
     bool m_oom;
+    bool m_bail;
     // How much data has been added to the buffer thusfar.
     uint32_t bufferSize;
     uint32_t lastInstSize;
     bool isAligned(int alignment) const {
         // make sure the requested alignment is a power of two.
         JS_ASSERT((alignment & (alignment-1)) == 0);
         return !(size() & (alignment - 1));
     }
@@ -143,21 +144,27 @@ struct AssemblerBuffer {
         else
             executableSize = bufferSize;
         return executableSize;
     }
     unsigned int uncheckedSize() const {
         return size();
     }
     bool oom() const {
-        return m_oom;
+        return m_oom || m_bail;
+    }
+    bool bail() const {
+        return m_bail;
     }
     void fail_oom() {
         m_oom = true;
     }
+    void fail_bail() {
+        m_bail = true;
+    }
     Inst *getInst(BufferOffset off) {
         unsigned int local_off = off.getOffset();
         Slice *cur = NULL;
         if (local_off > bufferSize) {
             local_off -= bufferSize;
             cur = tail;
         } else {
             for (cur = head; cur != NULL; cur = cur->getNext()) {
--- a/js/src/ion/shared/IonAssemblerBufferWithConstantPools.h
+++ b/js/src/ion/shared/IonAssemblerBufferWithConstantPools.h
@@ -426,17 +426,17 @@ struct AssemblerBufferWithConstantPool :
                 poolDest += footerSize;
                 // at this point, poolDest had better still be aligned to a chunk boundary.
                 dest = (Chunk*) poolDest;
             }
         }
     }
 
     BufferOffset insertEntry(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data, PoolEntry *pe = NULL) {
-        if (this->oom())
+        if (this->oom() && !this->bail())
             return BufferOffset();
         int token;
         if (p != NULL) {
             int poolId = p - pools;
             const char sigil = inBackref ? 'B' : 'F';
 
             IonSpew(IonSpew_Pools, "[%d]{%c} Inserting entry into pool %d", id, sigil, poolId);
             IonSpewStart(IonSpew_Pools, "[%d] data is: 0x", id);
@@ -446,16 +446,18 @@ struct AssemblerBufferWithConstantPool :
         // insert the pool value
         if (inBackref)
             token = insertEntryBackwards(instSize, inst, p, data);
         else
             token = insertEntryForwards(instSize, inst, p, data);
         // now to get an instruction to write
         PoolEntry retPE;
         if (p != NULL) {
+            if (this->oom())
+                return BufferOffset();
             int poolId = p - pools;
             IonSpew(IonSpew_Pools, "[%d] Entry has token %d, offset ~%d", id, token, size());
             Asm::insertTokenIntoTag(instSize, inst, token);
             JS_ASSERT(poolId < (1 << poolKindBits));
             JS_ASSERT(poolId >= 0);
             // Figure out the offset within like-kinded pool entries
             retPE = PoolEntry(entryCount[poolId], poolId);
             entryCount[poolId]++;
@@ -491,16 +493,18 @@ struct AssemblerBufferWithConstantPool :
                 // uh-oh, the backwards pool is full.  Time to finalize it, and
                 // switch to a new forward pool.
                 if (p != NULL)
                     IonSpew(IonSpew_Pools, "[%d]Inserting pool entry caused a spill", id);
                 else
                     IonSpew(IonSpew_Pools, "[%d]Inserting instruction(%d) caused a spill", id, size());
 
                 this->finishPool();
+                if (this->oom())
+                    return uint32_t(-1);
                 return this->insertEntryForwards(instSize, inst, p, data);
             }
             // when moving back to front, calculating the alignment is hard, just be
             // conservative with it.
             poolOffset += tmp->immSize * tmp->numEntries + tmp->getAlignment();
             if (p == tmp) {
                 poolOffset += tmp->immSize;
             }
@@ -721,19 +725,19 @@ struct AssemblerBufferWithConstantPool :
                 if (!Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign)) {
                     // NOTE: if removing this entry happens to change the alignment of the next
                     // block, chances are you will have a bad time.
                     // ADDENDUM: this CANNOT happen on ARM, because the only elements that
                     // fall into this case are doubles loaded via vfp, but they will also be
                     // the last pool, which means it cannot affect the alignment of any other
                     // Sub Pools.
                     IonSpew(IonSpew_Pools, "[%d]***Offset was still out of range!***", id, codeOffset - magicAlign);
-                    outcasts[poolIdx].append(iter->getOffset());
-                    memcpy(&outcastEntries[poolIdx][numSkips * p->immSize], &p->poolData[idx * p->immSize], p->immSize);
-                    numSkips++;
+                    IonSpew(IonSpew_Pools, "[%d] Too complicated; bailingp", id);
+                    this->fail_bail();
+                    return;
                 } else {
                     preservedEntries[idx] = true;
                 }
             }
             // remove the elements of the pool that should not be there (YAY, MEMCPY)
             unsigned int idxDest = 0;
             // If no elements were skipped, no expensive copy is necessary.
             if (numSkips != 0) {