Bug 927782 - Part 8: Record block scope ranges more precisely. r=luke
☠☠ backed out by 94cdaced90bf ☠ ☠
authorAndy Wingo <wingo@igalia.com>
Fri, 06 Dec 2013 18:27:55 +0100
changeset 174945 5f086f95b3059ed80dfe45013081478e3ed5ee82
parent 174944 8c74b1f68590e7791ec4a13f2e12196caf053709
child 174946 b971de7edfff874f74137a241e352ed87d6d14a3
push id445
push userffxbld
push dateMon, 10 Mar 2014 22:05:19 +0000
treeherdermozilla-release@dc38b741b04e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs927782
milestone28.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 927782 - Part 8: Record block scope ranges more precisely. r=luke
js/src/frontend/BytecodeEmitter.cpp
js/src/frontend/BytecodeEmitter.h
js/src/jsscript.cpp
js/src/jsscript.h
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -518,29 +518,48 @@ FlushPops(ExclusiveContext *cx, Bytecode
 static bool
 PopIterator(ExclusiveContext *cx, BytecodeEmitter *bce)
 {
     if (Emit1(cx, bce, JSOP_ENDITER) < 0)
         return false;
     return true;
 }
 
+namespace {
+
+class NonLocalExitScope {
+    ExclusiveContext *cx;
+    BytecodeEmitter *bce;
+    const uint32_t savedScopeIndex;
+    const int savedDepth;
+
+    NonLocalExitScope(const NonLocalExitScope &) MOZ_DELETE;
+
+  public:
+    explicit NonLocalExitScope(ExclusiveContext *cx_, BytecodeEmitter *bce_)
+      : cx(cx_),
+        bce(bce_),
+        savedScopeIndex(bce->blockScopeList.length()),
+        savedDepth(bce->stackDepth) {}
+
+    ~NonLocalExitScope() {
+        for (uint32_t n = savedScopeIndex; n < bce->blockScopeList.length(); n++)
+            bce->blockScopeList.recordEnd(n, bce->offset());
+        bce->stackDepth = savedDepth;
+    }
+
+    bool prepareForNonLocalJump(StmtInfoBCE *toStmt);
+};
+
 /*
  * Emit additional bytecode(s) for non-local jumps.
  */
-static bool
-EmitNonLocalJumpFixup(ExclusiveContext *cx, BytecodeEmitter *bce, StmtInfoBCE *toStmt)
-{
-    /*
-     * The non-local jump fixup we emit will unbalance bce->stackDepth, because
-     * the fixup replicates balanced code such as JSOP_LEAVEWITH emitted at the
-     * end of a with statement, so we save bce->stackDepth here and restore it
-     * just before a successful return.
-     */
-    int depth = bce->stackDepth;
+bool
+NonLocalExitScope::prepareForNonLocalJump(StmtInfoBCE *toStmt)
+{
     int npops = 0;
 
 #define FLUSH_POPS() if (npops && !FlushPops(cx, bce, &npops)) return false
 
     for (StmtInfoBCE *stmt = bce->topStmt; stmt != toStmt; stmt = stmt->down) {
         switch (stmt->type) {
           case STMT_FINALLY:
             FLUSH_POPS();
@@ -575,34 +594,41 @@ EmitNonLocalJumpFixup(ExclusiveContext *
 
           default:;
         }
 
         if (stmt->isBlockScope) {
             FLUSH_POPS();
             JS_ASSERT(stmt->blockObj);
             StaticBlockObject &blockObj = *stmt->blockObj;
+            uint32_t blockScopeIndex = stmt->blockScopeIndex;
+            uint32_t scopeObjectIndex = bce->blockScopeList.findEnclosingScope(blockScopeIndex);
             if (Emit1(cx, bce, JSOP_DEBUGLEAVEBLOCK) < 0)
                 return false;
+            if (!bce->blockScopeList.append(scopeObjectIndex, bce->offset()))
+                return false;
             EMIT_UINT16_IMM_OP(JSOP_LEAVEBLOCK, blockObj.slotCount());
         }
     }
 
     FLUSH_POPS();
-    bce->stackDepth = depth;
     return true;
 
 #undef FLUSH_POPS
 }
 
+}  // anonymous namespace
+
 static ptrdiff_t
 EmitGoto(ExclusiveContext *cx, BytecodeEmitter *bce, StmtInfoBCE *toStmt, ptrdiff_t *lastp,
          SrcNoteType noteType = SRC_NULL)
 {
-    if (!EmitNonLocalJumpFixup(cx, bce, toStmt))
+    NonLocalExitScope nle(cx, bce);
+
+    if (!nle.prepareForNonLocalJump(toStmt))
         return -1;
 
     if (noteType != SRC_NULL) {
         if (NewSrcNote(cx, bce, noteType) < 0)
             return -1;
     }
 
     return EmitBackPatchOp(cx, bce, lastp);
@@ -771,46 +797,46 @@ PopStatementBCE(ExclusiveContext *cx, By
     StmtInfoBCE *stmt = bce->topStmt;
     if (!stmt->isTrying() &&
         (!BackPatch(cx, bce, stmt->breaks, bce->code().end(), JSOP_GOTO) ||
          !BackPatch(cx, bce, stmt->continues, bce->code(stmt->update), JSOP_GOTO)))
     {
         return false;
     }
 
-    if (stmt->isBlockScope)
-        bce->blockScopeList.recordEnd(stmt->blockScopeIndex, bce->offset());
-
     FinishPopStatement(bce);
     return true;
 }
 
 static bool
 LeaveBlockScope(ExclusiveContext *cx, BytecodeEmitter *bce, JSOp op)
 {
+    StmtInfoBCE *stmt = bce->topStmt;
+    JS_ASSERT(stmt->isBlockScope);
+    uint32_t blockScopeIndex = stmt->blockScopeIndex;
+
 #ifdef DEBUG
-    StmtInfoBCE *stmt = bce->topStmt;
-    uint32_t blockScopeIndex = stmt->blockScopeIndex;
-    JS_ASSERT(stmt->isBlockScope);
     JS_ASSERT(bce->blockScopeList.list[blockScopeIndex].length == 0);
     uint32_t blockObjIndex = bce->blockScopeList.list[blockScopeIndex].index;
     ObjectBox *blockObjBox = bce->objectList.find(blockObjIndex);
     StaticBlockObject *blockObj = &blockObjBox->object->as<StaticBlockObject>();
     JS_ASSERT(stmt->blockObj == blockObj);
     JS_ASSERT(blockObj == bce->blockChain);
 #endif
 
     uint32_t slotCount = bce->blockChain->slotCount();
 
     if (!PopStatementBCE(cx, bce))
         return false;
 
     if (Emit1(cx, bce, JSOP_DEBUGLEAVEBLOCK) < 0)
         return false;
 
+    bce->blockScopeList.recordEnd(blockScopeIndex, bce->offset());
+
     JS_ASSERT(op == JSOP_LEAVEBLOCK || op == JSOP_LEAVEBLOCKEXPR);
     EMIT_UINT16_IMM_OP(op, slotCount);
 
     return true;
 }
 
 static bool
 EmitIndex32(ExclusiveContext *cx, JSOp op, uint32_t index, BytecodeEmitter *bce)
@@ -3838,45 +3864,36 @@ EmitCatch(ExclusiveContext *cx, Bytecode
 
         // If the guard expression is false, fall through, pop the block scope,
         // and jump to the next catch block.  Otherwise jump over that code and
         // pop the dupped exception.
         ptrdiff_t guardCheck = EmitJump(cx, bce, JSOP_IFNE, 0);
         if (guardCheck < 0)
             return false;
 
-#ifdef DEBUG
-        uint32_t blockScopeIndex = bce->topStmt->blockScopeIndex;
-        uint32_t blockObjIndex = bce->blockScopeList.list[blockScopeIndex].index;
-        ObjectBox *blockObjBox = bce->objectList.find(blockObjIndex);
-        StaticBlockObject *blockObj = &blockObjBox->object->as<StaticBlockObject>();
-        JS_ASSERT(blockObj == bce->blockChain);
-#endif
-
-        // Save stack depth before popping the block scope.
-        int savedDepth = bce->stackDepth;
-
-        // Move exception back to cx->exception to prepare for
-        // the next catch.
-        if (Emit1(cx, bce, JSOP_THROWING) < 0)
-            return false;
-
-        // Leave the scope for this catch block
-        if (Emit1(cx, bce, JSOP_DEBUGLEAVEBLOCK) < 0)
-            return false;
-        EMIT_UINT16_IMM_OP(JSOP_LEAVEBLOCK, bce->blockChain->slotCount());
-
-        // Jump to the next handler.  The jump target is backpatched by EmitTry.
-        ptrdiff_t guardJump = EmitJump(cx, bce, JSOP_GOTO, 0);
-        if (guardJump < 0)
-            return false;
-        stmt->guardJump() = guardJump;
-
-        // Back to normal control flow.  Restore stack depth after nonlocal exit.
-        bce->stackDepth = savedDepth;
+        {
+            NonLocalExitScope nle(cx, bce);
+
+            // Move exception back to cx->exception to prepare for
+            // the next catch.
+            if (Emit1(cx, bce, JSOP_THROWING) < 0)
+                return false;
+
+            // Leave the scope for this catch block.
+            if (!nle.prepareForNonLocalJump(stmt))
+                return false;
+
+            // Jump to the next handler.  The jump target is backpatched by EmitTry.
+            ptrdiff_t guardJump = EmitJump(cx, bce, JSOP_GOTO, 0);
+            if (guardJump < 0)
+                return false;
+            stmt->guardJump() = guardJump;
+        }
+
+        // Back to normal control flow.
         SetJumpOffsetAt(bce, guardCheck);
 
         // Pop duplicated exception object as we no longer need it.
         if (Emit1(cx, bce, JSOP_POP) < 0)
             return false;
     }
 
     /* Emit the catch body. */
@@ -5059,18 +5076,22 @@ EmitReturn(ExclusiveContext *cx, Bytecod
      *
      * In this case we mutate JSOP_RETURN into JSOP_SETRVAL and add an
      * extra JSOP_RETRVAL after the fixups.
      */
     ptrdiff_t top = bce->offset();
 
     if (Emit1(cx, bce, JSOP_RETURN) < 0)
         return false;
-    if (!EmitNonLocalJumpFixup(cx, bce, nullptr))
-        return false;
+
+    NonLocalExitScope nle(cx, bce);
+
+    if (!nle.prepareForNonLocalJump(nullptr))
+        return false;
+
     if (top + JSOP_RETURN_LENGTH != bce->offset()) {
         bce->code()[top] = JSOP_SETRVAL;
         if (Emit1(cx, bce, JSOP_RETRVAL) < 0)
             return false;
     }
 
     return true;
 }
@@ -6876,16 +6897,40 @@ CGBlockScopeList::append(uint32_t scopeO
     mozilla::PodZero(&note);
 
     note.index = scopeObject;
     note.start = offset;
 
     return list.append(note);
 }
 
+uint32_t
+CGBlockScopeList::findEnclosingScope(uint32_t index)
+{
+    JS_ASSERT(index < length());
+    JS_ASSERT(list[index].index != BlockScopeNote::NoBlockScopeIndex);
+
+    uint32_t pos = list[index].start;
+    while (index--) {
+        JS_ASSERT(list[index].start <= pos);
+        if (list[index].length == 0) {
+            // We are looking for the nearest enclosing live scope.  If the
+            // scope contains POS, it should still be open, so its length should
+            // be zero.
+            return list[index].index;
+        } else {
+            // Conversely, if the length is not zero, it should not contain
+            // POS.
+            JS_ASSERT(list[index].start + list[index].length <= pos);
+        }
+    }
+
+    return BlockScopeNote::NoBlockScopeIndex;
+}
+
 void
 CGBlockScopeList::recordEnd(uint32_t index, uint32_t offset)
 {
     JS_ASSERT(index < length());
     JS_ASSERT(offset >= list[index].start);
     JS_ASSERT(list[index].length == 0);
 
     list[index].length = offset - list[index].start;
--- a/js/src/frontend/BytecodeEmitter.h
+++ b/js/src/frontend/BytecodeEmitter.h
@@ -58,16 +58,17 @@ struct CGTryNoteList {
     void finish(TryNoteArray *array);
 };
 
 struct CGBlockScopeList {
     Vector<BlockScopeNote> list;
     CGBlockScopeList(ExclusiveContext *cx) : list(cx) {}
 
     bool append(uint32_t scopeObject, uint32_t offset);
+    uint32_t findEnclosingScope(uint32_t index);
     void recordEnd(uint32_t index, uint32_t offset);
     size_t length() const { return list.length(); }
     void finish(BlockScopeArray *array);
 };
 
 struct StmtInfoBCE;
 
 // Use zero inline elements because these go on the stack and affect how many
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -2905,18 +2905,22 @@ JSScript::getBlockScope(jsbytecode *pc)
         return nullptr;
 
     BlockScopeArray *scopeArray = blockScopes();
     JSObject *blockChain = nullptr;
     for (uint32_t n = 0; n < scopeArray->length; n++) {
         const BlockScopeNote *note = &scopeArray->vector[n];
         if (note->start > offset)
             break;
-        if (offset <= note->start + note->length)
-            blockChain = getObject(note->index);
+        if (offset < note->start + note->length) {
+            if (note->index == BlockScopeNote::NoBlockScopeIndex)
+                blockChain = nullptr;
+            else
+                blockChain = getObject(note->index);
+        }
     }
 
     return blockChain;
 }
 
 void
 JSScript::setArgumentsHasVarBinding()
 {
--- a/js/src/jsscript.h
+++ b/js/src/jsscript.h
@@ -76,19 +76,37 @@ struct JSTryNote {
     uint16_t        stackDepth; /* stack depth upon exception handler entry */
     uint32_t        start;      /* start of the try statement or loop
                                    relative to script->main */
     uint32_t        length;     /* length of the try statement or loop */
 };
 
 namespace js {
 
+// A block scope has a range in bytecode: it is entered at some offset, and left
+// at some later offset.  Scopes can be nested.  Given an offset, the
+// BlockScopeNote containing that offset whose with the highest start value
+// indicates the block scope.  The block scope list is sorted by increasing
+// start value.
+//
+// It is possible to leave a scope nonlocally, for example via a "break"
+// statement, so there may be short bytecode ranges in a block scope in which we
+// are popping the block chain in preparation for a goto.  These exits are also
+// nested with respect to outer scopes.  The scopes in these exits are indicated
+// by the "index" field, just like any other block.  If a nonlocal exit pops the
+// last block scope, the index will be NoBlockScopeIndex.
+//
 struct BlockScopeNote {
-    uint32_t        index;      // Index of StaticScopeObject in the object array.
-    uint32_t        start;      // Bytecode offset at which this scope starts.
+    static const uint32_t NoBlockScopeIndex = UINT32_MAX;
+
+    uint32_t        index;      // Index of StaticScopeObject in the object
+                                // array, or NoBlockScopeIndex if there is no
+                                // block scope in this range.
+    uint32_t        start;      // Bytecode offset at which this scope starts,
+                                // from script->main().
     uint32_t        length;     // Bytecode length of scope.
     uint32_t        padding;    // Pad to 64-bit boundary.
 };
 
 struct ConstArray {
     js::HeapValue   *vector;    /* array of indexed constant values */
     uint32_t        length;
 };