Bug 1029830 - IonMonkey: GVN: Replace UCE with GVN r=nbp
authorDan Gohman <sunfish@mozilla.com>
Wed, 17 Sep 2014 10:27:25 -0700
changeset 205888 8f27a48a25d5a7acabf69867597f3dfe0f951cdd
parent 205887 ff831540e312febfe640805f1cdc207ad8530185
child 205889 ce0a75f9481b5c33867ddfa0b758685e2561913a
push id27507
push userryanvm@gmail.com
push dateThu, 18 Sep 2014 02:16:54 +0000
treeherdermozilla-central@488d490da742 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnbp
bugs1029830
milestone35.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1029830 - IonMonkey: GVN: Replace UCE with GVN r=nbp
js/src/jit/Ion.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/IonOptimizationLevels.cpp
js/src/jit/IonOptimizationLevels.h
js/src/jit/JitOptions.cpp
js/src/jit/JitOptions.h
js/src/jit/LinearScan.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MIRGraph.cpp
js/src/jit/MIRGraph.h
js/src/jit/ParallelSafetyAnalysis.cpp
js/src/jit/UnreachableCodeElimination.cpp
js/src/jit/UnreachableCodeElimination.h
js/src/jit/ValueNumbering.cpp
js/src/jit/ValueNumbering.h
js/src/jsapi-tests/moz.build
js/src/jsapi-tests/testJitGVN.cpp
js/src/jsapi-tests/testJitMinimalFunc.h
js/src/moz.build
js/src/vm/TraceLogging.cpp
js/src/vm/TraceLogging.h
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -35,17 +35,16 @@
 #include "jit/LIR.h"
 #include "jit/LoopUnroller.h"
 #include "jit/Lowering.h"
 #include "jit/ParallelSafetyAnalysis.h"
 #include "jit/PerfSpewer.h"
 #include "jit/RangeAnalysis.h"
 #include "jit/ScalarReplacement.h"
 #include "jit/StupidAllocator.h"
-#include "jit/UnreachableCodeElimination.h"
 #include "jit/ValueNumbering.h"
 #include "vm/ForkJoin.h"
 #include "vm/HelperThreads.h"
 #include "vm/TraceLogging.h"
 
 #include "jscompartmentinlines.h"
 #include "jsgcinlines.h"
 #include "jsinferinlines.h"
@@ -1467,27 +1466,35 @@ OptimizeMIR(MIRGenerator *mir)
             return false;
         IonSpewPass("Apply types");
         AssertExtendedGraphCoherency(graph);
 
         if (mir->shouldCancel("Apply types"))
             return false;
     }
 
+    // Parallel Safety Analysis. Note that this may delete blocks containing
+    // instructions pointed to by the dependency() field of instructions which
+    // are not deleted, leaving them dangling. This is ok, since we'll rerun
+    // AliasAnalysis, which recomputes them, before they're needed.
     if (graph.entryBlock()->info().executionMode() == ParallelExecution) {
         AutoTraceLog log(logger, TraceLogger::ParallelSafetyAnalysis);
         ParallelSafetyAnalysis analysis(mir, graph);
         if (!analysis.analyze())
             return false;
         IonSpewPass("Parallel Safety Analysis");
         AssertExtendedGraphCoherency(graph);
         if (mir->shouldCancel("Parallel Safety Analysis"))
             return false;
     }
 
+    ValueNumberer gvn(mir, graph);
+    if (!gvn.init())
+        return false;
+
     // Alias analysis is required for LICM and GVN so that we don't move
     // loads across stores.
     if (mir->optimizationInfo().licmEnabled() ||
         mir->optimizationInfo().gvnEnabled())
     {
         AutoTraceLog log(logger, TraceLogger::AliasAnalysis);
         AliasAnalysis analysis(mir, graph);
         if (!analysis.analyze())
@@ -1507,38 +1514,25 @@ OptimizeMIR(MIRGenerator *mir)
 
             if (mir->shouldCancel("Eliminate dead resume point operands"))
                 return false;
         }
     }
 
     if (mir->optimizationInfo().gvnEnabled()) {
         AutoTraceLog log(logger, TraceLogger::GVN);
-        ValueNumberer gvn(mir, graph);
         if (!gvn.run(ValueNumberer::UpdateAliasAnalysis))
             return false;
         IonSpewPass("GVN");
         AssertExtendedGraphCoherency(graph);
 
         if (mir->shouldCancel("GVN"))
             return false;
     }
 
-    if (mir->optimizationInfo().uceEnabled()) {
-        AutoTraceLog log(logger, TraceLogger::UCE);
-        UnreachableCodeElimination uce(mir, graph);
-        if (!uce.analyze())
-            return false;
-        IonSpewPass("UCE");
-        AssertExtendedGraphCoherency(graph);
-
-        if (mir->shouldCancel("UCE"))
-            return false;
-    }
-
     if (mir->optimizationInfo().licmEnabled()) {
         AutoTraceLog log(logger, TraceLogger::LICM);
         // LICM can hoist instructions from conditional branches and trigger
         // repeated bailouts. Disable it if this script is known to bailout
         // frequently.
         JSScript *script = mir->info().script();
         if (!script || !script->hadFrequentBailouts()) {
             if (!LICM(mir, graph))
@@ -1573,30 +1567,28 @@ OptimizeMIR(MIRGenerator *mir)
         if (!r.removeBetaNodes())
             return false;
         IonSpewPass("De-Beta");
         AssertExtendedGraphCoherency(graph);
 
         if (mir->shouldCancel("RA De-Beta"))
             return false;
 
-        if (mir->optimizationInfo().uceEnabled()) {
+        if (mir->optimizationInfo().gvnEnabled()) {
             bool shouldRunUCE = false;
             if (!r.prepareForUCE(&shouldRunUCE))
                 return false;
             IonSpewPass("RA check UCE");
             AssertExtendedGraphCoherency(graph);
 
             if (mir->shouldCancel("RA check UCE"))
                 return false;
 
             if (shouldRunUCE) {
-                UnreachableCodeElimination uce(mir, graph);
-                uce.disableAliasAnalysis();
-                if (!uce.analyze())
+                if (!gvn.run(ValueNumberer::DontUpdateAliasAnalysis))
                     return false;
                 IonSpewPass("UCE After RA");
                 AssertExtendedGraphCoherency(graph);
 
                 if (mir->shouldCancel("UCE After RA"))
                     return false;
             }
         }
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -1573,16 +1573,23 @@ ComputeImmediateDominators(MIRGraph &gra
 
         // For each block in RPO, intersect all dominators.
         for (; block != graph.rpoEnd(); block++) {
             // If a node has once been found to have no exclusive dominator,
             // it will never have an exclusive dominator, so it may be skipped.
             if (block->immediateDominator() == *block)
                 continue;
 
+            // A block with no predecessors is not reachable from any entry, so
+            // it self-dominates.
+            if (MOZ_UNLIKELY(block->numPredecessors() == 0)) {
+                block->setImmediateDominator(*block);
+                continue;
+            }
+
             MBasicBlock *newIdom = block->getPredecessor(0);
 
             // Find the first common dominator.
             for (size_t i = 1; i < block->numPredecessors(); i++) {
                 MBasicBlock *pred = block->getPredecessor(i);
                 if (pred->immediateDominator() == nullptr)
                     continue;
 
@@ -3177,17 +3184,19 @@ jit::MarkLoopBlocks(MIRGraph &graph, MBa
         // This block is in the loop; trace to its predecessors.
         for (size_t p = 0, e = block->numPredecessors(); p != e; ++p) {
             MBasicBlock *pred = block->getPredecessor(p);
             if (pred->isMarked())
                 continue;
 
             // Blocks dominated by the OSR entry are not part of the loop
             // (unless they aren't reachable from the normal entry).
-            if (osrBlock && pred != header && osrBlock->dominates(pred)) {
+            if (osrBlock && pred != header &&
+                osrBlock->dominates(pred) && !osrBlock->dominates(header))
+            {
                 *canOsr = true;
                 continue;
             }
 
             MOZ_ASSERT(pred->id() >= header->id() && pred->id() <= backedge->id(),
                        "Loop block not between loop header and loop backedge");
 
             pred->mark();
--- a/js/src/jit/IonOptimizationLevels.cpp
+++ b/js/src/jit/IonOptimizationLevels.cpp
@@ -25,17 +25,16 @@ OptimizationInfo::initNormalOptimization
 
     eaa_ = true;
     edgeCaseAnalysis_ = true;
     eliminateRedundantChecks_ = true;
     inlineInterpreted_ = true;
     inlineNative_ = true;
     gvn_ = true;
     licm_ = true;
-    uce_ = true;
     rangeAnalysis_ = true;
     loopUnrolling_ = true;
     autoTruncate_ = true;
     registerAllocator_ = RegisterAllocator_LSRA;
 
     inlineMaxTotalBytecodeLength_ = 1000;
     inliningMaxCallerBytecodeLength_ = 10000;
     maxInlineDepth_ = 3;
--- a/js/src/jit/IonOptimizationLevels.h
+++ b/js/src/jit/IonOptimizationLevels.h
@@ -62,19 +62,16 @@ class OptimizationInfo
     bool inlineNative_;
 
     // Toggles whether global value numbering is used.
     bool gvn_;
 
     // Toggles whether loop invariant code motion is performed.
     bool licm_;
 
-    // Toggles whether Unreachable Code Elimination is performed.
-    bool uce_;
-
     // Toggles whether Range Analysis is used.
     bool rangeAnalysis_;
 
     // Toggles whether loop unrolling is performed.
     bool loopUnrolling_;
 
     // Toggles whether Truncation based on Range Analysis is used.
     bool autoTruncate_;
@@ -134,20 +131,16 @@ class OptimizationInfo
     bool gvnEnabled() const {
         return gvn_ && !js_JitOptions.disableGvn;
     }
 
     bool licmEnabled() const {
         return licm_ && !js_JitOptions.disableLicm;
     }
 
-    bool uceEnabled() const {
-        return uce_ && !js_JitOptions.disableUce;
-    }
-
     bool rangeAnalysisEnabled() const {
         return rangeAnalysis_ && !js_JitOptions.disableRangeAnalysis;
     }
 
     bool loopUnrollingEnabled() const {
         return loopUnrolling_ && !js_JitOptions.disableLoopUnrolling;
     }
 
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -52,19 +52,16 @@ JitOptions::JitOptions()
     disableEdgeCaseAnalysis = false;
 
     // Toggles whether Range Analysis is globally disabled.
     disableRangeAnalysis = false;
 
     // Toggles whether Loop Unrolling is globally disabled.
     disableLoopUnrolling = true;
 
-    // Toggles whether Unreachable Code Elimination is globally disabled.
-    disableUce = false;
-
     // Toggles whether Effective Address Analysis is globally disabled.
     disableEaa = false;
 
     // Whether functions are compiled immediately.
     eagerCompilation = false;
 
     // Force how many invocation or loop iterations are needed before compiling
     // a function with the highest ionmonkey optimization level.
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -35,17 +35,16 @@ struct JitOptions
     bool compileTryCatch;
     bool disableScalarReplacement;
     bool disableGvn;
     bool disableLicm;
     bool disableInlining;
     bool disableEdgeCaseAnalysis;
     bool disableRangeAnalysis;
     bool disableLoopUnrolling;
-    bool disableUce;
     bool disableEaa;
     bool eagerCompilation;
     bool forceDefaultIonWarmUpThreshold;
     uint32_t forcedDefaultIonWarmUpThreshold;
     bool forceRegisterAllocator;
     IonRegisterAllocator forcedRegisterAllocator;
     bool limitScriptSize;
     bool osr;
--- a/js/src/jit/LinearScan.cpp
+++ b/js/src/jit/LinearScan.cpp
@@ -1360,20 +1360,22 @@ LinearScanAllocator::setIntervalRequirem
             // Reuse policies get either a FIXED requirement or a SAME_AS hint.
             LUse *use = reg->ins()->getOperand(reg->def()->getReusedInput())->toUse();
             interval->setRequirement(Requirement(Requirement::REGISTER));
             interval->setHint(Requirement(use->virtualRegister(), interval->start().previous()));
         } else if (reg->ins()->isPhi()) {
             // Phis don't have any requirements, but they should prefer
             // their input allocations, so they get a SAME_AS hint of the
             // first input
-            LUse *use = reg->ins()->getOperand(0)->toUse();
-            LBlock *predecessor = reg->block()->mir()->getPredecessor(0)->lir();
-            CodePosition predEnd = exitOf(predecessor);
-            interval->setHint(Requirement(use->virtualRegister(), predEnd));
+            if (reg->ins()->toPhi()->numOperands() != 0) {
+                LUse *use = reg->ins()->toPhi()->getOperand(0)->toUse();
+                LBlock *predecessor = reg->block()->mir()->getPredecessor(0)->lir();
+                CodePosition predEnd = exitOf(predecessor);
+                interval->setHint(Requirement(use->virtualRegister(), predEnd));
+            }
         } else {
             // Non-phis get a REGISTER requirement
             interval->setRequirement(Requirement(Requirement::REGISTER));
         }
     }
 
     UsePosition *fixedOp = nullptr;
     UsePosition *registerOp = nullptr;
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -318,16 +318,19 @@ MTest::foldsTo(TempAllocator &alloc)
     if (op->isNot()) {
         // If the operand of the Not is itself a Not, they cancel out.
         MDefinition *opop = op->getOperand(0);
         if (opop->isNot())
             return MTest::New(alloc, opop->toNot()->input(), ifTrue(), ifFalse());
         return MTest::New(alloc, op->toNot()->input(), ifFalse(), ifTrue());
     }
 
+    if (op->isConstant())
+        return MGoto::New(alloc, op->toConstant()->valueToBoolean() ? ifTrue() : ifFalse());
+
     return this;
 }
 
 void
 MTest::filtersUndefinedOrNull(bool trueBranch, MDefinition **subject, bool *filtersUndefined,
                               bool *filtersNull)
 {
     MDefinition *ins = getOperand(0);
@@ -1159,17 +1162,18 @@ MPhi::removeAllOperands()
     for (size_t i = 0; i < inputs_.length(); i++)
         inputs_[i].releaseProducer();
     inputs_.clear();
 }
 
 MDefinition *
 MPhi::operandIfRedundant()
 {
-    JS_ASSERT(inputs_.length() != 0);
+    if (inputs_.length() == 0)
+        return nullptr;
 
     // If this phi is redundant (e.g., phi(a,a) or b=phi(a,this)),
     // returns the operand that it will always be equal to (a, in
     // those two cases).
     MDefinition *first = getOperand(0);
     for (size_t i = 1, e = numOperands(); i < e; i++) {
         MDefinition *op = getOperand(i);
         if (op != first && op != this)
@@ -3022,20 +3026,24 @@ MNot::trySpecializeFloat32(TempAllocator
         ConvertDefinitionToDouble<0>(alloc, in, this);
 }
 
 void
 MBeta::printOpcode(FILE *fp) const
 {
     MDefinition::printOpcode(fp);
 
-    Sprinter sp(GetIonContext()->cx);
-    sp.init();
-    comparison_->print(sp);
-    fprintf(fp, " %s", sp.string());
+    if (IonContext *context = MaybeGetIonContext()) {
+        Sprinter sp(context->cx);
+        sp.init();
+        comparison_->print(sp);
+        fprintf(fp, " %s", sp.string());
+    } else {
+        fprintf(fp, " ???");
+    }
 }
 
 bool
 MNewObject::shouldUseVM() const
 {
     JSObject *obj = templateObject();
     return obj->hasSingletonType() || obj->hasDynamicSlots();
 }
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -233,22 +233,19 @@ class MNode : public TempObject
     // for setOperand, this is probably what you want.
     virtual void replaceOperand(size_t index, MDefinition *operand) = 0;
 
     // Resets the operand to an uninitialized state, breaking the link
     // with the previous operand's producer.
     void releaseOperand(size_t index) {
         getUseFor(index)->releaseProducer();
     }
-
-#if DEBUG
-    bool operandDiscarded(size_t index) const {
-        return !getUseFor(index)->hasProducer();
-    }
-#endif
+    bool hasOperand(size_t index) const {
+        return getUseFor(index)->hasProducer();
+    }
 
     inline MDefinition *toDefinition();
     inline MResumePoint *toResumePoint();
 
     virtual bool writeRecoverData(CompactBufferWriter &writer) const;
 
     virtual void dump(FILE *fp) const = 0;
     virtual void dump() const = 0;
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -10,16 +10,17 @@
 #include "jit/BytecodeAnalysis.h"
 #include "jit/Ion.h"
 #include "jit/JitSpewer.h"
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
 
 using namespace js;
 using namespace js::jit;
+using mozilla::Swap;
 
 MIRGenerator::MIRGenerator(CompileCompartment *compartment, const JitCompileOptions &options,
                            TempAllocator *alloc, MIRGraph *graph, CompileInfo *info,
                            const OptimizationInfo *optimizationInfo)
   : compartment(compartment),
     info_(info),
     optimizationInfo_(optimizationInfo),
     alloc_(alloc),
@@ -113,16 +114,24 @@ void
 MIRGraph::insertBlockAfter(MBasicBlock *at, MBasicBlock *block)
 {
     block->setId(blockIdGen_++);
     blocks_.insertAfter(at, block);
     numBlocks_++;
 }
 
 void
+MIRGraph::insertBlockBefore(MBasicBlock *at, MBasicBlock *block)
+{
+    block->setId(blockIdGen_++);
+    blocks_.insertBefore(at, block);
+    numBlocks_++;
+}
+
+void
 MIRGraph::renumberBlocksAfter(MBasicBlock *at)
 {
     MBasicBlockIterator iter = begin(at);
     iter++;
 
     uint32_t id = at->id();
     for (; iter != end(); iter++)
         iter->setId(++id);
@@ -818,17 +827,17 @@ MBasicBlock::discard(MInstruction *ins)
     instructions_.remove(ins);
 }
 
 void
 MBasicBlock::discardIgnoreOperands(MInstruction *ins)
 {
 #ifdef DEBUG
     for (size_t i = 0, e = ins->numOperands(); i < e; i++)
-        JS_ASSERT(ins->operandDiscarded(i));
+        MOZ_ASSERT(!ins->hasOperand(i));
 #endif
 
     prepareForDiscard(ins, RefType_IgnoreOperands);
     instructions_.remove(ins);
 }
 
 MInstructionIterator
 MBasicBlock::discardAt(MInstructionIterator &iter)
@@ -1191,16 +1200,54 @@ MBasicBlock::setBackedgeAsmJS(MBasicBloc
 
 void
 MBasicBlock::clearLoopHeader()
 {
     JS_ASSERT(isLoopHeader());
     kind_ = NORMAL;
 }
 
+void
+MBasicBlock::setLoopHeader(MBasicBlock *newBackedge)
+{
+    MOZ_ASSERT(!isLoopHeader());
+    kind_ = LOOP_HEADER;
+
+    size_t numPreds = numPredecessors();
+    MOZ_ASSERT(numPreds != 0);
+
+    size_t lastIndex = numPreds - 1;
+    size_t oldIndex = 0;
+    for (; ; ++oldIndex) {
+        MOZ_ASSERT(oldIndex < numPreds);
+        MBasicBlock *pred = getPredecessor(oldIndex);
+        if (pred == newBackedge)
+            break;
+    }
+
+    // Set the loop backedge to be the last element in predecessors_.
+    Swap(predecessors_[oldIndex], predecessors_[lastIndex]);
+
+    // If we have phis, reorder their operands accordingly.
+    if (!phisEmpty()) {
+        getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex);
+        getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex);
+        for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
+            MPhi *phi = *iter;
+            MDefinition *last = phi->getOperand(oldIndex);
+            MDefinition *old = phi->getOperand(lastIndex);
+            phi->replaceOperand(oldIndex, old);
+            phi->replaceOperand(lastIndex, last);
+        }
+    }
+
+    MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this);
+    MOZ_ASSERT(backedge() == newBackedge);
+}
+
 size_t
 MBasicBlock::numSuccessors() const
 {
     JS_ASSERT(lastIns());
     return lastIns()->numSuccessors();
 }
 
 MBasicBlock *
--- a/js/src/jit/MIRGraph.h
+++ b/js/src/jit/MIRGraph.h
@@ -252,16 +252,21 @@ class MBasicBlock : public TempObject, p
     // phis at the loop header, returns a disabling abort.
     AbortReason setBackedge(MBasicBlock *block);
     bool setBackedgeAsmJS(MBasicBlock *block);
 
     // Resets a LOOP_HEADER block to a NORMAL block.  This is needed when
     // optimizations remove the backedge.
     void clearLoopHeader();
 
+    // Sets a block to a LOOP_HEADER block, with newBackedge as its backedge.
+    // This is needed when optimizations remove the normal entry to a loop
+    // with multiple entries.
+    void setLoopHeader(MBasicBlock *newBackedge);
+
     // Propagates phis placed in a loop header down to this successor block.
     void inheritPhis(MBasicBlock *header);
 
     // Propagates backedge slots into phis operands of the loop header.
     bool inheritPhisFromBackedge(MBasicBlock *backedge, bool *hadTypeChange);
 
     // Compute the types for phis in this block according to their inputs.
     bool specializePhis();
@@ -669,16 +674,17 @@ class MIRGraph
     { }
 
     TempAllocator &alloc() const {
         return *alloc_;
     }
 
     void addBlock(MBasicBlock *block);
     void insertBlockAfter(MBasicBlock *at, MBasicBlock *block);
+    void insertBlockBefore(MBasicBlock *at, MBasicBlock *block);
 
     void renumberBlocksAfter(MBasicBlock *at);
 
     void unmarkBlocks();
 
     void setReturnAccumulator(MIRGraphReturns *accum) {
         returnAccumulator_ = accum;
     }
--- a/js/src/jit/ParallelSafetyAnalysis.cpp
+++ b/js/src/jit/ParallelSafetyAnalysis.cpp
@@ -7,17 +7,16 @@
 #include "jit/ParallelSafetyAnalysis.h"
 
 #include "jit/Ion.h"
 #include "jit/IonAnalysis.h"
 #include "jit/JitSpewer.h"
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
-#include "jit/UnreachableCodeElimination.h"
 
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 
 using namespace js;
 using namespace jit;
 
 using parallel::Spew;
@@ -426,18 +425,19 @@ ParallelSafetyAnalysis::analyze()
                     return false;
             }
         }
     }
 
     Spew(SpewCompile, "Safe");
     IonSpewPass("ParallelSafetyAnalysis");
 
-    UnreachableCodeElimination uce(mir_, graph_);
-    if (!uce.removeUnmarkedBlocks(marked))
+    // Sweep away any unmarked blocks. Note that this doesn't preserve
+    // AliasAnalysis dependencies, but we're not expected to at this point.
+    if (!RemoveUnmarkedBlocks(mir_, graph_, marked))
         return false;
     IonSpewPass("UCEAfterParallelSafetyAnalysis");
     AssertExtendedGraphCoherency(graph_);
 
     return true;
 }
 
 bool
deleted file mode 100644
--- a/js/src/jit/UnreachableCodeElimination.cpp
+++ /dev/null
@@ -1,272 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "jit/UnreachableCodeElimination.h"
-
-#include "jit/AliasAnalysis.h"
-#include "jit/IonAnalysis.h"
-#include "jit/MIRGenerator.h"
-#include "jit/ValueNumbering.h"
-
-using namespace js;
-using namespace jit;
-
-bool
-UnreachableCodeElimination::analyze()
-{
-    // The goal of this routine is to eliminate code that is
-    // unreachable, either because there is no path from the entry
-    // block to the code, or because the path traverses a conditional
-    // branch where the condition is a constant (e.g., "if (false) {
-    // ... }").  The latter can either appear in the source form or
-    // arise due to optimizations.
-    //
-    // The stategy is straightforward.  The pass begins with a
-    // depth-first search.  We set a bit on each basic block that
-    // is visited.  If a block terminates in a conditional branch
-    // predicated on a constant, we rewrite the block to an unconditional
-    // jump and do not visit the now irrelevant basic block.
-    //
-    // Once the initial DFS is complete, we do a second pass over the
-    // blocks to find those that were not reached.  Those blocks are
-    // simply removed wholesale.  We must also correct any phis that
-    // may be affected..
-
-    // Pass 1: Identify unreachable blocks (if any).
-    if (!prunePointlessBranchesAndMarkReachableBlocks())
-        return false;
-
-    return removeUnmarkedBlocksAndCleanup();
-}
-
-bool
-UnreachableCodeElimination::removeUnmarkedBlocks(size_t marked)
-{
-    marked_ = marked;
-    return removeUnmarkedBlocksAndCleanup();
-}
-
-bool
-UnreachableCodeElimination::removeUnmarkedBlocksAndCleanup()
-{
-    // Everything is reachable, no work required.
-    JS_ASSERT(marked_ <= graph_.numBlocks());
-    if (marked_ == graph_.numBlocks()) {
-        graph_.unmarkBlocks();
-        return true;
-    }
-
-    // Pass 2: Remove unmarked blocks (see analyze() above).
-    if (!removeUnmarkedBlocksAndClearDominators())
-        return false;
-    graph_.unmarkBlocks();
-
-    AssertGraphCoherency(graph_);
-
-    IonSpewPass("UCE-mid-point");
-
-    // Pass 3: Recompute dominators and tweak phis.
-    BuildDominatorTree(graph_);
-    if (redundantPhis_ && !EliminatePhis(mir_, graph_, ConservativeObservability))
-        return false;
-
-    // Pass 4: Rerun alias analysis
-    if (rerunAliasAnalysis_) {
-        AliasAnalysis analysis(mir_, graph_);
-        if (!analysis.analyze())
-            return false;
-    }
-
-    // Pass 5: It's important for optimizations to re-run GVN (and in
-    // turn alias analysis) after UCE if we eliminated branches.
-    if (rerunAliasAnalysis_ && mir_->optimizationInfo().gvnEnabled()) {
-        ValueNumberer gvn(mir_, graph_);
-        if (!gvn.run(rerunAliasAnalysis_
-                     ? ValueNumberer::UpdateAliasAnalysis
-                     : ValueNumberer::DontUpdateAliasAnalysis))
-            return false;
-        IonSpewPass("GVN-after-UCE");
-        AssertExtendedGraphCoherency(graph_);
-
-        if (mir_->shouldCancel("GVN-after-UCE"))
-            return false;
-    }
-
-    return true;
-}
-
-bool
-UnreachableCodeElimination::enqueue(MBasicBlock *block, BlockList &list)
-{
-    if (block->isMarked())
-        return true;
-
-    block->mark();
-    marked_++;
-    return list.append(block);
-}
-
-MBasicBlock *
-UnreachableCodeElimination::optimizableSuccessor(MBasicBlock *block)
-{
-    // If the last instruction in `block` is a test instruction of a
-    // constant value, returns the successor that the branch will
-    // always branch to at runtime. Otherwise, returns nullptr.
-
-    MControlInstruction *ins = block->lastIns();
-    if (!ins->isTest())
-        return nullptr;
-
-    MTest *testIns = ins->toTest();
-    MDefinition *v = testIns->getOperand(0);
-    if (!v->isConstant())
-        return nullptr;
-
-    BranchDirection bdir = v->toConstant()->valueToBoolean() ? TRUE_BRANCH : FALSE_BRANCH;
-    return testIns->branchSuccessor(bdir);
-}
-
-bool
-UnreachableCodeElimination::prunePointlessBranchesAndMarkReachableBlocks()
-{
-    BlockList worklist, optimizableBlocks;
-
-    // Process everything reachable from the start block, ignoring any
-    // OSR block.
-    if (!enqueue(graph_.entryBlock(), worklist))
-        return false;
-    while (!worklist.empty()) {
-        if (mir_->shouldCancel("Eliminate Unreachable Code"))
-            return false;
-
-        MBasicBlock *block = worklist.popCopy();
-
-        // If this block is a test on a constant operand, only enqueue
-        // the relevant successor. Also, remember the block for later.
-        if (MBasicBlock *succ = optimizableSuccessor(block)) {
-            if (!optimizableBlocks.append(block))
-                return false;
-            if (!enqueue(succ, worklist))
-                return false;
-        } else {
-            // Otherwise just visit all successors.
-            for (size_t i = 0; i < block->numSuccessors(); i++) {
-                MBasicBlock *succ = block->getSuccessor(i);
-                if (!enqueue(succ, worklist))
-                    return false;
-            }
-        }
-    }
-
-    // Now, if there is an OSR block, check that all of its successors
-    // were reachable (bug 880377). If not, we are in danger of
-    // creating a CFG with two disjoint parts, so simply mark all
-    // blocks as reachable. This generally occurs when the TI info for
-    // stack types is incorrect or incomplete, due to operations that
-    // have not yet executed in baseline.
-    if (graph_.osrBlock()) {
-        MBasicBlock *osrBlock = graph_.osrBlock();
-        JS_ASSERT(!osrBlock->isMarked());
-        if (!enqueue(osrBlock, worklist))
-            return false;
-        for (size_t i = 0; i < osrBlock->numSuccessors(); i++) {
-            if (!osrBlock->getSuccessor(i)->isMarked()) {
-                // OSR block has an otherwise unreachable successor, abort.
-                for (MBasicBlockIterator iter(graph_.begin()); iter != graph_.end(); iter++)
-                    iter->markUnchecked();
-                marked_ = graph_.numBlocks();
-                return true;
-            }
-        }
-    }
-
-    // Now that we know we will not abort due to OSR, go back and
-    // transform any tests on constant operands into gotos.
-    for (uint32_t i = 0; i < optimizableBlocks.length(); i++) {
-        MBasicBlock *block = optimizableBlocks[i];
-        MBasicBlock *succ = optimizableSuccessor(block);
-        JS_ASSERT(succ);
-
-        MGoto *gotoIns = MGoto::New(graph_.alloc(), succ);
-        block->discardLastIns();
-        block->end(gotoIns);
-        MBasicBlock *successorWithPhis = block->successorWithPhis();
-        if (successorWithPhis && successorWithPhis != succ)
-            block->setSuccessorWithPhis(nullptr, 0);
-    }
-
-    return true;
-}
-
-void
-UnreachableCodeElimination::checkDependencyAndRemoveUsesFromUnmarkedBlocks(MDefinition *instr)
-{
-    // When the instruction depends on removed block,
-    // alias analysis needs to get rerun to have the right dependency.
-    if (!disableAliasAnalysis_ && instr->dependency() && !instr->dependency()->block()->isMarked())
-        rerunAliasAnalysis_ = true;
-
-    for (MUseIterator iter(instr->usesBegin()); iter != instr->usesEnd(); ) {
-        MUse *use = *iter++;
-        if (!use->consumer()->block()->isMarked()) {
-            instr->setUseRemovedUnchecked();
-            use->releaseProducer();
-        }
-    }
-}
-
-bool
-UnreachableCodeElimination::removeUnmarkedBlocksAndClearDominators()
-{
-    // Removes blocks that are not marked from the graph.  For blocks
-    // that *are* marked, clears the mark and adjusts the id to its
-    // new value.  Also adds blocks that are immediately reachable
-    // from an unmarked block to the frontier.
-
-    size_t id = marked_;
-    for (PostorderIterator iter(graph_.poBegin()); iter != graph_.poEnd();) {
-        if (mir_->shouldCancel("Eliminate Unreachable Code"))
-            return false;
-
-        MBasicBlock *block = *iter;
-        iter++;
-
-        // Unconditionally clear the dominators.  It's somewhat complex to
-        // adjust the values and relatively fast to just recompute.
-        block->clearDominatorInfo();
-
-        if (block->isMarked()) {
-            block->setId(--id);
-            for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd(); iter++)
-                checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter);
-            for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++)
-                checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter);
-        } else {
-            for (size_t i = 0, c = block->numSuccessors(); i < c; i++) {
-                MBasicBlock *succ = block->getSuccessor(i);
-                if (succ->isMarked()) {
-                    // succ is on the frontier of blocks to be removed:
-                    succ->removePredecessor(block);
-
-                    if (!redundantPhis_) {
-                        for (MPhiIterator iter(succ->phisBegin()); iter != succ->phisEnd(); iter++) {
-                            if (iter->operandIfRedundant()) {
-                                redundantPhis_ = true;
-                                break;
-                            }
-                        }
-                    }
-                }
-            }
-
-            graph_.removeBlock(block);
-        }
-    }
-
-    JS_ASSERT(id == 0);
-
-    return true;
-}
deleted file mode 100644
--- a/js/src/jit/UnreachableCodeElimination.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef jit_UnreachableCodeElimination_h
-#define jit_UnreachableCodeElimination_h
-
-#include "jit/MIRGraph.h"
-
-namespace js {
-namespace jit {
-
-class MIRGraph;
-
-class UnreachableCodeElimination
-{
-    typedef Vector<MBasicBlock *, 16, SystemAllocPolicy> BlockList;
-
-    MIRGenerator *mir_;
-    MIRGraph &graph_;
-    uint32_t marked_;
-    bool redundantPhis_;
-    bool rerunAliasAnalysis_;
-    bool disableAliasAnalysis_;
-
-    bool prunePointlessBranchesAndMarkReachableBlocks();
-    void checkDependencyAndRemoveUsesFromUnmarkedBlocks(MDefinition *instr);
-    bool removeUnmarkedBlocksAndClearDominators();
-    bool removeUnmarkedBlocksAndCleanup();
-
-    bool enqueue(MBasicBlock *block, BlockList &list);
-    MBasicBlock *optimizableSuccessor(MBasicBlock *block);
-
-  public:
-    UnreachableCodeElimination(MIRGenerator *mir, MIRGraph &graph)
-      : mir_(mir),
-        graph_(graph),
-        marked_(0),
-        redundantPhis_(false),
-        rerunAliasAnalysis_(false),
-        disableAliasAnalysis_(false)
-    {}
-
-    // Walks the graph and discovers what is reachable. Removes everything else.
-    bool analyze();
-
-    // Removes any blocks that are not marked.  Assumes that these blocks are not
-    // reachable.  The parameter |marked| should be the number of blocks that
-    // are marked.
-    bool removeUnmarkedBlocks(size_t marked);
-
-    // Call this function to prevent alias analysis to run a second time if we
-    // do not need it.
-    void disableAliasAnalysis() {
-        disableAliasAnalysis_ = true;
-    }
-};
-
-} /* namespace jit */
-} /* namespace js */
-
-#endif /* jit_UnreachableCodeElimination_h */
--- a/js/src/jit/ValueNumbering.cpp
+++ b/js/src/jit/ValueNumbering.cpp
@@ -126,29 +126,30 @@ ValueNumberer::VisibleValues::clear()
 bool
 ValueNumberer::VisibleValues::has(const MDefinition *def) const
 {
     Ptr p = set_.lookup(def);
     return p && *p == def;
 }
 #endif
 
-// Test whether the value would be needed if it had no uses.
+// Test whether |def| would be needed if it had no uses.
 static bool
 DeadIfUnused(const MDefinition *def)
 {
     return !def->isEffectful() && !def->isGuard() && !def->isControlInstruction() &&
            (!def->isInstruction() || !def->toInstruction()->resumePoint());
 }
 
-// Test whether |def| is no longer needed.
+// Test whether |def| may be safely discarded, due to being dead or due to being
+// located in a basic block which has itself been marked for discarding.
 static bool
-IsDead(const MDefinition *def)
+IsDiscardable(const MDefinition *def)
 {
-    return !def->hasUses() && DeadIfUnused(def);
+    return !def->hasUses() && (DeadIfUnused(def) || def->block()->isMarked());
 }
 
 // Call MDefinition::justReplaceAllUsesWith, and add some GVN-specific asserts.
 static void
 ReplaceAllUsesWith(MDefinition *from, MDefinition *to)
 {
     MOZ_ASSERT(from != to, "GVN shouldn't try to replace a value with itself");
     MOZ_ASSERT(from->type() == to->type(), "Def replacement has different type");
@@ -190,16 +191,54 @@ ComputeNewDominator(MBasicBlock *block, 
             }
             now = next;
         }
     }
     MOZ_ASSERT(old != block || old != now, "Missed self-dominating block staying self-dominating");
     return now;
 }
 
+// Test for any defs which look potentially interesting to GVN.
+static bool
+BlockHasInterestingDefs(MBasicBlock *block)
+{
+    return !block->phisEmpty() || *block->begin() != block->lastIns();
+}
+
+// Walk up the dominator tree from |block| to the root and test for any defs
+// which look potentially interesting to GVN.
+static bool
+ScanDominatorsForDefs(MBasicBlock *block)
+{
+    for (MBasicBlock *i = block;;) {
+        if (BlockHasInterestingDefs(block))
+            return true;
+
+        MBasicBlock *immediateDominator = i->immediateDominator();
+        if (immediateDominator == i)
+            break;
+        i = immediateDominator;
+    }
+    return false;
+}
+
+// Walk up the dominator tree from |now| to |old| and test for any defs which
+// look potentially interesting to GVN.
+static bool
+ScanDominatorsForDefs(MBasicBlock *now, MBasicBlock *old)
+{
+    MOZ_ASSERT(old->dominates(now), "Refined dominator not dominated by old dominator");
+
+    for (MBasicBlock *i = now; i != old; i = i->immediateDominator()) {
+        if (BlockHasInterestingDefs(i))
+            return true;
+    }
+    return false;
+}
+
 // Given a block which has had predecessors removed but is still reachable, test
 // whether the block's new dominator will be closer than its old one and whether
 // it will expose potential optimization opportunities.
 static bool
 IsDominatorRefined(MBasicBlock *block)
 {
     MBasicBlock *old = block->immediateDominator();
     MBasicBlock *now = ComputeNewDominator(block, old);
@@ -211,200 +250,378 @@ IsDominatorRefined(MBasicBlock *block)
     if (*block->begin() == control && block->phisEmpty() && control->isGoto() &&
         !block->dominates(control->toGoto()->target()))
     {
         return false;
     }
 
     // We've computed block's new dominator. Test whether there are any
     // newly-dominating definitions which look interesting.
-    MOZ_ASSERT(old->dominates(now), "Refined dominator not dominated by old dominator");
-    for (MBasicBlock *i = now; i != old; i = i->immediateDominator()) {
-        if (!i->phisEmpty() || *i->begin() != i->lastIns())
-            return true;
+    if (block == old)
+        return block != now && ScanDominatorsForDefs(now);
+    MOZ_ASSERT(block != now, "Non-self-dominating block became self-dominating");
+    return ScanDominatorsForDefs(now, old);
+}
+
+// |def| has just had one of its users release it. If it's now dead, enqueue it
+// for discarding, otherwise just make note of it.
+bool
+ValueNumberer::handleUseReleased(MDefinition *def, UseRemovedOption useRemovedOption)
+{
+    if (IsDiscardable(def)) {
+        values_.forget(def);
+        if (!deadDefs_.append(def))
+            return false;
+    } else {
+        if (useRemovedOption == SetUseRemoved)
+            def->setUseRemovedUnchecked();
     }
-
-    return false;
+    return true;
 }
 
 // Discard |def| and anything in its use-def subtree which is no longer needed.
 bool
 ValueNumberer::discardDefsRecursively(MDefinition *def)
 {
     MOZ_ASSERT(deadDefs_.empty(), "deadDefs_ not cleared");
 
     return discardDef(def) && processDeadDefs();
 }
 
-// Assuming |phi| is dead, release its operands. If an operand which is not
-// dominated by |phi| becomes dead, push it to the discard worklist.
+// Assuming |resume| is unreachable, release its operands.
+// It might be nice to integrate this code with prepareForDiscard, however GVN
+// needs it to call handleUseReleased so that it can observe when a definition
+// becomes unused, so it isn't trivial to do.
 bool
-ValueNumberer::releasePhiOperands(MPhi *phi, const MBasicBlock *phiBlock,
-                                  UseRemovedOption useRemovedOption)
+ValueNumberer::releaseResumePointOperands(MResumePoint *resume)
+{
+    for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
+        if (!resume->hasOperand(i))
+            continue;
+        MDefinition *op = resume->getOperand(i);
+        // TODO: We shouldn't leave discarded operands sitting around
+        // (bug 1055690).
+        if (op->isDiscarded())
+            continue;
+        resume->releaseOperand(i);
+
+        // We set the UseRemoved flag when removing resume point operands,
+        // because even though we may think we're certain that a particular
+        // branch might not be taken, the type information might be incomplete.
+        if (!handleUseReleased(op, SetUseRemoved))
+            return false;
+    }
+    return true;
+}
+
+// Assuming |phi| is dead, release and remove its operands. If an operand
+// becomes dead, push it to the discard worklist.
+bool
+ValueNumberer::releaseAndRemovePhiOperands(MPhi *phi)
 {
     // MPhi saves operands in a vector so we iterate in reverse.
     for (int o = phi->numOperands() - 1; o >= 0; --o) {
         MDefinition *op = phi->getOperand(o);
         phi->removeOperand(o);
-        if (IsDead(op) && !phiBlock->dominates(op->block())) {
-            if (!deadDefs_.append(op))
-                return false;
-        } else {
-            if (useRemovedOption == SetUseRemoved)
-                op->setUseRemovedUnchecked();
-        }
+        if (!handleUseReleased(op, DontSetUseRemoved))
+            return false;
     }
     return true;
 }
 
-// Assuming |ins| is dead, release its operands. If an operand becomes dead,
+// Assuming |def| is dead, release its operands. If an operand becomes dead,
 // push it to the discard worklist.
 bool
-ValueNumberer::releaseInsOperands(MInstruction *ins,
-                                  UseRemovedOption useRemovedOption)
+ValueNumberer::releaseOperands(MDefinition *def)
 {
-    for (size_t o = 0, e = ins->numOperands(); o < e; ++o) {
-        MDefinition *op = ins->getOperand(o);
-        ins->releaseOperand(o);
-        if (IsDead(op)) {
-            if (!deadDefs_.append(op))
-                return false;
-        } else {
-            if (useRemovedOption == SetUseRemoved)
-                op->setUseRemovedUnchecked();
-        }
+    for (size_t o = 0, e = def->numOperands(); o < e; ++o) {
+        MDefinition *op = def->getOperand(o);
+        def->releaseOperand(o);
+        if (!handleUseReleased(op, DontSetUseRemoved))
+            return false;
     }
     return true;
 }
 
 // Discard |def| and mine its operands for any subsequently dead defs.
 bool
-ValueNumberer::discardDef(MDefinition *def,
-                         UseRemovedOption useRemovedOption)
+ValueNumberer::discardDef(MDefinition *def)
 {
-    JitSpew(JitSpew_GVN, "      Discarding %s%u", def->opName(), def->id());
-    MOZ_ASSERT(IsDead(def), "Discarding non-dead definition");
-    MOZ_ASSERT(!values_.has(def), "Discarding an instruction still in the set");
+    JitSpew(JitSpew_GVN, "      Discarding %s %s%u",
+            def->block()->isMarked() ? "unreachable" : "dead",
+            def->opName(), def->id());
 
+#ifdef DEBUG
+    MOZ_ASSERT(def != nextDef_, "Invalidating the MDefinition iterator");
+    if (def->block()->isMarked()) {
+        MOZ_ASSERT(!def->hasUses(), "Discarding def that still has uses");
+    } else {
+        MOZ_ASSERT(IsDiscardable(def), "Discarding non-discardable definition");
+        MOZ_ASSERT(!values_.has(def), "Discarding a definition still in the set");
+    }
+#endif
+
+    MBasicBlock *block = def->block();
     if (def->isPhi()) {
         MPhi *phi = def->toPhi();
-        MBasicBlock *phiBlock = phi->block();
-        if (!releasePhiOperands(phi, phiBlock, useRemovedOption))
+        if (!releaseAndRemovePhiOperands(phi))
              return false;
-        MPhiIterator at(phiBlock->phisBegin(phi));
-        phiBlock->discardPhiAt(at);
+        MPhiIterator at(block->phisBegin(phi));
+        block->discardPhiAt(at);
     } else {
         MInstruction *ins = def->toInstruction();
-        if (!releaseInsOperands(ins, useRemovedOption))
+        if (MResumePoint *resume = ins->resumePoint()) {
+            if (!releaseResumePointOperands(resume))
+                return false;
+        }
+        if (!releaseOperands(ins))
              return false;
-        ins->block()->discardIgnoreOperands(ins);
+        block->discardIgnoreOperands(ins);
     }
+
+    // If that was the last definition in the block, it can be safely removed
+    // from the graph.
+    if (block->phisEmpty() && block->begin() == block->end()) {
+        MOZ_ASSERT(block->isMarked(), "Reachable block lacks at least a control instruction");
+
+        // As a special case, don't remove a block which is a dominator tree
+        // root so that we don't invalidate the iterator in visitGraph. We'll
+        // check for this and remove it later.
+        if (block->immediateDominator() != block) {
+            JitSpew(JitSpew_GVN, "      Block block%u is now empty; discarding", block->id());
+            graph_.removeBlock(block);
+            blocksRemoved_ = true;
+        } else {
+            JitSpew(JitSpew_GVN, "      Dominator root block%u is now empty; will discard later",
+                    block->id());
+        }
+    }
+
     return true;
 }
 
 // Recursively discard all the defs on the deadDefs_ worklist.
 bool
 ValueNumberer::processDeadDefs()
 {
+    MDefinition *nextDef = nextDef_;
     while (!deadDefs_.empty()) {
         MDefinition *def = deadDefs_.popCopy();
 
-        values_.forget(def);
+        // Don't invalidate the MDefinition iterator. This is what we're going
+        // to visit next, so we won't miss anything.
+        if (def == nextDef)
+            continue;
+
         if (!discardDef(def))
             return false;
     }
     return true;
 }
 
-// Remove an edge from the CFG. Return true if the block becomes unreachable.
+// Test whether |block|, which is a loop header, has any predecessors other than
+// |loopPred|, the loop predecessor, which it doesn't dominate.
+static bool
+hasNonDominatingPredecessor(MBasicBlock *block, MBasicBlock *loopPred)
+{
+    MOZ_ASSERT(block->isLoopHeader());
+    MOZ_ASSERT(block->loopPredecessor() == loopPred);
+
+    for (uint32_t i = 0, e = block->numPredecessors(); i < e; ++i) {
+        MBasicBlock *pred = block->getPredecessor(i);
+        if (pred != loopPred && !block->dominates(pred))
+            return true;
+    }
+    return false;
+}
+
+// A loop is about to be made reachable only through an OSR entry into one of
+// its nested loops. Fix everything up.
 bool
-ValueNumberer::removePredecessor(MBasicBlock *block, MBasicBlock *pred)
+ValueNumberer::fixupOSROnlyLoop(MBasicBlock *block, MBasicBlock *backedge)
+{
+    // Create an empty and unreachable(!) block which jumps to |block|. This
+    // allows |block| to remain marked as a loop header, so we don't have to
+    // worry about moving a different block into place as the new loop header,
+    // which is hard, especially if the OSR is into a nested loop. Doing all
+    // that would produce slightly more optimal code, but this is so
+    // extraordinarily rare that it isn't worth the complexity.
+    MBasicBlock *fake = MBasicBlock::NewAsmJS(graph_, block->info(),
+                                              nullptr, MBasicBlock::NORMAL);
+    if (fake == nullptr)
+        return false;
+
+    graph_.insertBlockBefore(block, fake);
+    fake->setImmediateDominator(fake);
+    fake->addNumDominated(1);
+
+    // Create zero-input phis to use as inputs for any phis in |block|.
+    // Again, this is a little odd, but it's the least-odd thing we can do
+    // without significant complexity.
+    for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) {
+        MPhi *phi = *iter;
+        MPhi *fakePhi = MPhi::New(graph_.alloc(), phi->type());
+        fake->addPhi(fakePhi);
+        if (!phi->addInputSlow(fakePhi))
+            return false;
+    }
+
+    fake->end(MGoto::New(graph_.alloc(), block));
+
+    if (!block->addPredecessorWithoutPhis(fake))
+        return false;
+
+    // Restore |backedge| as |block|'s loop backedge.
+    block->clearLoopHeader();
+    block->setLoopHeader(backedge);
+
+    JitSpew(JitSpew_GVN, "        Created fake block%u", fake->id());
+    return true;
+}
+
+// Remove the CFG edge between |pred| and |block|, after releasing the phi
+// operands on that edge and discarding any definitions consequently made dead.
+bool
+ValueNumberer::removePredecessorAndDoDCE(MBasicBlock *block, MBasicBlock *pred)
 {
+    MOZ_ASSERT(!block->isMarked(),
+               "Block marked unreachable should have predecessors removed already");
+
+    // Before removing the predecessor edge, scan the phi operands for that edge
+    // for dead code before they get removed.
+    if (!block->phisEmpty()) {
+        uint32_t index = pred->positionInPhiSuccessor();
+        for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) {
+            MPhi *phi = *iter;
+            MOZ_ASSERT(!values_.has(phi), "Visited phi in block having predecessor removed");
+
+            MDefinition *op = phi->getOperand(index);
+            if (op == phi)
+                continue;
+
+            // Set the operand to the phi itself rather than just releasing it
+            // because removePredecessor expects to have something to release.
+            phi->replaceOperand(index, phi);
+
+            if (!handleUseReleased(op, DontSetUseRemoved) || !processDeadDefs())
+                return false;
+        }
+    }
+
+    block->removePredecessor(pred);
+    return true;
+}
+
+// Remove the CFG edge between |pred| and |block|, and if this makes |block|
+// unreachable, mark it so, and remove the rest of its incoming edges too. And
+// discard any instructions made dead by the entailed release of any phi
+// operands.
+bool
+ValueNumberer::removePredecessorAndCleanUp(MBasicBlock *block, MBasicBlock *pred)
+{
+    MOZ_ASSERT(!block->isMarked(), "Removing predecessor on block already marked unreachable");
+
+    // We'll be removing a predecessor, so anything we know about phis in this
+    // block will be wrong.
+    for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter)
+        values_.forget(*iter);
+
+    // If this is a loop header, test whether it will become an unreachable
+    // loop, or whether it needs special OSR-related fixups.
     bool isUnreachableLoop = false;
+    MBasicBlock *origBackedgeForOSRFixup = nullptr;
     if (block->isLoopHeader()) {
         if (block->loopPredecessor() == pred) {
-            // Discarding the entry into the loop makes the loop unreachable.
-            isUnreachableLoop = true;
-            JitSpew(JitSpew_GVN, "      Loop with header block%u is no longer reachable",
-                    block->id());
+            if (MOZ_UNLIKELY(hasNonDominatingPredecessor(block, pred))) {
+                JitSpew(JitSpew_GVN, "      "
+                        "Loop with header block%u is now only reachable through an "
+                        "OSR entry into the middle of the loop!!", block->id());
+                origBackedgeForOSRFixup = block->backedge();
+            } else {
+                // Deleting the entry into the loop makes the loop unreachable.
+                isUnreachableLoop = true;
+                JitSpew(JitSpew_GVN, "      "
+                        "Loop with header block%u is no longer reachable",
+                        block->id());
+            }
 #ifdef DEBUG
         } else if (block->hasUniqueBackedge() && block->backedge() == pred) {
             JitSpew(JitSpew_GVN, "      Loop with header block%u is no longer a loop",
                     block->id());
 #endif
         }
     }
 
-    // TODO: Removing a predecessor removes operands from phis, and these
-    // operands may become dead. We should detect this and discard them. In
-    // practice though, when this happens, we often end up re-running GVN for
-    // other reasons anyway.
-    block->removePredecessor(pred);
-    return block->numPredecessors() == 0 || isUnreachableLoop;
-}
+    // Actually remove the CFG edge.
+    if (!removePredecessorAndDoDCE(block, pred))
+        return false;
 
-// Discard |start| and any block in its dominator subtree.
-bool
-ValueNumberer::removeBlocksRecursively(MBasicBlock *start, const MBasicBlock *dominatorRoot)
-{
-    MOZ_ASSERT(start != graph_.entryBlock(), "Removing normal entry block");
-    MOZ_ASSERT(start != graph_.osrBlock(), "Removing OSR entry block");
+    // We've now edited the CFG; check to see if |block| became unreachable.
+    if (block->numPredecessors() == 0 || isUnreachableLoop) {
+        JitSpew(JitSpew_GVN, "      Disconnecting block%u", block->id());
+
+        // Remove |block| from its dominator parent's subtree. This is the only
+        // immediately-dominated-block information we need to update, because
+        // everything dominated by this block is about to be swept away.
+        MBasicBlock *parent = block->immediateDominator();
+        if (parent != block)
+            parent->removeImmediatelyDominatedBlock(block);
 
-    // Remove this block from its dominator parent's subtree. This is the only
-    // immediately-dominated-block information we need to manually update,
-    // because everything dominated by this block is about to be swept away.
-    MBasicBlock *parent = start->immediateDominator();
-    if (parent != start)
-        parent->removeImmediatelyDominatedBlock(start);
+        // Completely disconnect it from the CFG. We do this now rather than
+        // just doing it later when we arrive there in visitUnreachableBlock
+        // so that we don't leave a partially broken loop sitting around. This
+        // also lets visitUnreachableBlock assert that numPredecessors() == 0,
+        // which is a nice invariant.
+        if (block->isLoopHeader())
+            block->clearLoopHeader();
+        for (size_t i = 0, e = block->numPredecessors(); i < e; ++i) {
+            if (!removePredecessorAndDoDCE(block, block->getPredecessor(i)))
+                return false;
+        }
 
-    if (!unreachableBlocks_.append(start))
-        return false;
-    do {
-        MBasicBlock *block = unreachableBlocks_.popCopy();
-        if (block->isDead())
-            continue;
-
-        // If a block is removed while it is on the worklist, skip it.
-        for (size_t i = 0, e = block->numSuccessors(); i < e; ++i) {
-            MBasicBlock *succ = block->getSuccessor(i);
-            if (succ->isDead())
-                continue;
-            if (removePredecessor(succ, block)) {
-                if (!unreachableBlocks_.append(succ))
-                    return false;
-            } else if (!rerun_) {
-                if (!remainingBlocks_.append(succ))
+        // Clear out the resume point operands, as they can hold things that
+        // don't appear to dominate them live.
+        if (MResumePoint *resume = block->entryResumePoint()) {
+            if (!releaseResumePointOperands(resume) || !processDeadDefs())
+                return false;
+            if (MResumePoint *outer = block->outerResumePoint()) {
+                if (!releaseResumePointOperands(outer) || !processDeadDefs())
                     return false;
             }
+            for (MInstructionIterator iter(block->begin()), end(block->end()); iter != end; ) {
+                MInstruction *ins = *iter++;
+                nextDef_ = *iter;
+                if (MResumePoint *resume = ins->resumePoint()) {
+                    if (!releaseResumePointOperands(resume) || !processDeadDefs())
+                        return false;
+                }
+            }
+        } else {
+#ifdef DEBUG
+            MOZ_ASSERT(block->outerResumePoint() == nullptr,
+                       "Outer resume point in block without an entry resume point");
+            for (MInstructionIterator iter(block->begin()), end(block->end());
+                 iter != end;
+                 ++iter)
+            {
+                MOZ_ASSERT(iter->resumePoint() == nullptr,
+                           "Instruction with resume point in block without entry resume point");
+            }
+#endif
         }
 
-#ifdef DEBUG
-        JitSpew(JitSpew_GVN, "    Discarding block%u%s%s%s", block->id(),
-                block->isLoopHeader() ? " (loop header)" : "",
-                block->isSplitEdge() ? " (split edge)" : "",
-                block->immediateDominator() == block ? " (dominator root)" : "");
-        for (MDefinitionIterator iter(block); iter; ++iter) {
-            MDefinition *def = *iter;
-            JitSpew(JitSpew_GVN, "      Discarding %s%u", def->opName(), def->id());
-        }
-        MControlInstruction *control = block->lastIns();
-        JitSpew(JitSpew_GVN, "      Discarding %s%u", control->opName(), control->id());
-#endif
-
-        // Keep track of how many blocks within dominatorRoot's tree have been discarded.
-        if (dominatorRoot->dominates(block))
-            ++numBlocksDiscarded_;
-
-        // TODO: Removing a block discards the phis, instructions, and resume
-        // points in the block, and their operands may become dead. We should
-        // detect this and discard them. In practice though, when this happens,
-        // we often end up re-running GVN for other reasons anyway (bug 1031412).
-        graph_.removeBlockIncludingPhis(block);
-        blocksRemoved_ = true;
-    } while (!unreachableBlocks_.empty());
+        // Use the mark to note that we've already removed all its predecessors,
+        // and we know it's unreachable.
+        block->mark();
+    } else if (MOZ_UNLIKELY(origBackedgeForOSRFixup != nullptr)) {
+        // The loop is no only reachable through OSR into the middle. Fix it
+        // up so that the CFG can remain valid.
+        if (!fixupOSROnlyLoop(block, origBackedgeForOSRFixup))
+            return false;
+    }
 
     return true;
 }
 
 // Return a simplified form of |def|, if we can.
 MDefinition *
 ValueNumberer::simplified(MDefinition *def) const
 {
@@ -449,26 +666,29 @@ ValueNumberer::hasLeader(const MPhi *phi
 {
     if (VisibleValues::Ptr p = values_.findLeader(phi)) {
         const MDefinition *rep = *p;
         return rep != phi && rep->block()->dominates(phiBlock);
     }
     return false;
 }
 
-// Test whether there are any phis in |backedge|'s loop header which are newly
-// optimizable, as a result of optimizations done inside the loop. This is not a
-// sparse approach, but restarting is rare enough in practice. Termination is
-// ensured by discarding the phi triggering the iteration.
+// Test whether there are any phis in |header| which are newly optimizable, as a
+// result of optimizations done inside the loop. This is not a sparse approach,
+// but restarting is rare enough in practice. Termination is ensured by
+// discarding the phi triggering the iteration.
 bool
-ValueNumberer::loopHasOptimizablePhi(MBasicBlock *backedge) const
+ValueNumberer::loopHasOptimizablePhi(MBasicBlock *header) const
 {
+    // If the header is unreachable, don't bother re-optimizing it.
+    if (header->isMarked())
+        return false;
+
     // Rescan the phis for any that can be simplified, since they may be reading
     // values from backedges.
-    MBasicBlock *header = backedge->loopHeaderOfBackedge();
     for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd()); iter != end; ++iter) {
         MPhi *phi = *iter;
         MOZ_ASSERT(phi->hasUses(), "Missed an unused phi");
 
         if (phi->operandIfRedundant() || hasLeader(phi, header))
             return true; // Phi can be simplified.
     }
     return false;
@@ -476,17 +696,17 @@ ValueNumberer::loopHasOptimizablePhi(MBa
 
 // Visit |def|.
 bool
 ValueNumberer::visitDefinition(MDefinition *def)
 {
     // If this instruction has a dependency() into an unreachable block, we'll
     // need to update AliasAnalysis.
     const MDefinition *dep = def->dependency();
-    if (dep != nullptr && dep->block()->isDead()) {
+    if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) {
         JitSpew(JitSpew_GVN, "      AliasAnalysis invalidated");
         if (updateAliasAnalysis_ && !dependenciesBroken_) {
             // TODO: Recomputing alias-analysis could theoretically expose more
             // GVN opportunities.
             JitSpew(JitSpew_GVN, "        Will recompute!");
             dependenciesBroken_ = true;
         }
         // Clear its dependency for now, to protect foldsTo.
@@ -533,17 +753,17 @@ ValueNumberer::visitDefinition(MDefiniti
             // The node's congruentTo said |def| is congruent to |rep|, and it's
             // dominated by |rep|. If |def| is a guard, it's covered by |rep|,
             // so we can clear |def|'s guard flag and let it be discarded.
             def->setNotGuardUnchecked();
 
             if (DeadIfUnused(def)) {
                 // discardDef should not add anything to the deadDefs, as the
                 // redundant operation should have the same input operands.
-                mozilla::DebugOnly<bool> r = discardDef(def, DontSetUseRemoved);
+                mozilla::DebugOnly<bool> r = discardDef(def);
                 MOZ_ASSERT(r, "discardDef shouldn't have tried to add anything to the worklist, "
                               "so it shouldn't have failed");
                 MOZ_ASSERT(deadDefs_.empty(),
                            "discardDef shouldn't have added anything to the worklist");
             }
             def = rep;
         }
     }
@@ -575,157 +795,252 @@ ValueNumberer::visitControlInstruction(M
     size_t oldNumSuccs = control->numSuccessors();
     size_t newNumSuccs = newControl->numSuccessors();
     if (newNumSuccs != oldNumSuccs) {
         MOZ_ASSERT(newNumSuccs < oldNumSuccs, "New control instruction has too many successors");
         for (size_t i = 0; i != oldNumSuccs; ++i) {
             MBasicBlock *succ = control->getSuccessor(i);
             if (HasSuccessor(newControl, succ))
                 continue;
-            if (removePredecessor(succ, block)) {
-                if (!removeBlocksRecursively(succ, dominatorRoot))
-                    return false;
-            } else if (!rerun_) {
+            if (succ->isMarked())
+                continue;
+            if (!removePredecessorAndCleanUp(succ, block))
+                return false;
+            if (succ->isMarked())
+                continue;
+            if (!rerun_) {
                 if (!remainingBlocks_.append(succ))
                     return false;
             }
         }
     }
 
-    if (!releaseInsOperands(control))
+    if (!releaseOperands(control))
         return false;
     block->discardIgnoreOperands(control);
     block->end(newControl);
     return processDeadDefs();
 }
 
+// |block| is unreachable. Mine it for opportunities to delete more dead
+// code, and then discard it.
+bool
+ValueNumberer::visitUnreachableBlock(MBasicBlock *block)
+{
+    JitSpew(JitSpew_GVN, "    Visiting unreachable block%u%s%s%s", block->id(),
+            block->isLoopHeader() ? " (loop header)" : "",
+            block->isSplitEdge() ? " (split edge)" : "",
+            block->immediateDominator() == block ? " (dominator root)" : "");
+
+    MOZ_ASSERT(block->isMarked(), "Visiting unmarked (and therefore reachable?) block");
+    MOZ_ASSERT(block->numPredecessors() == 0, "Block marked unreachable still has predecessors");
+    MOZ_ASSERT(block != graph_.entryBlock(), "Removing normal entry block");
+    MOZ_ASSERT(block != graph_.osrBlock(), "Removing OSR entry block");
+    MOZ_ASSERT(deadDefs_.empty(), "deadDefs_ not cleared");
+
+    // Disconnect all outgoing CFG edges.
+    for (size_t i = 0, e = block->numSuccessors(); i < e; ++i) {
+        MBasicBlock *succ = block->getSuccessor(i);
+        if (succ->isDead() || succ->isMarked())
+            continue;
+        if (!removePredecessorAndCleanUp(succ, block))
+            return false;
+        if (succ->isMarked())
+            continue;
+        // |succ| is still reachable. Make a note of it so that we can scan
+        // it for interesting dominator tree changes later.
+        if (!rerun_) {
+            if (!remainingBlocks_.append(succ))
+                return false;
+        }
+    }
+
+    // Discard any instructions with no uses. The remaining instructions will be
+    // discarded when their last use is discarded.
+    for (MDefinitionIterator iter(block); iter; ) {
+        MDefinition *def = *iter++;
+        if (def->hasUses())
+            continue;
+        nextDef_ = *iter;
+        if (!discardDefsRecursively(def))
+            return false;
+    }
+
+    nextDef_ = nullptr;
+    MControlInstruction *control = block->lastIns();
+    return discardDefsRecursively(control);
+}
+
 // Visit all the phis and instructions |block|.
 bool
 ValueNumberer::visitBlock(MBasicBlock *block, const MBasicBlock *dominatorRoot)
 {
-    MOZ_ASSERT(!block->unreachable(), "Blocks marked unreachable during GVN");
+    MOZ_ASSERT(!block->isMarked(), "Blocks marked unreachable during GVN");
     MOZ_ASSERT(!block->isDead(), "Block to visit is already dead");
 
     JitSpew(JitSpew_GVN, "    Visiting block%u", block->id());
 
     // Visit the definitions in the block top-down.
     for (MDefinitionIterator iter(block); iter; ) {
         MDefinition *def = *iter++;
 
+        // Remember where our iterator is so that we don't invalidate it.
+        nextDef_ = *iter;
+
         // If the definition is dead, discard it.
-        if (IsDead(def)) {
+        if (IsDiscardable(def)) {
             if (!discardDefsRecursively(def))
                 return false;
             continue;
         }
 
         if (!visitDefinition(def))
             return false;
     }
+    nextDef_ = nullptr;
 
     return visitControlInstruction(block, dominatorRoot);
 }
 
 // Visit all the blocks dominated by dominatorRoot.
 bool
-ValueNumberer::visitDominatorTree(MBasicBlock *dominatorRoot, size_t *totalNumVisited)
+ValueNumberer::visitDominatorTree(MBasicBlock *dominatorRoot)
 {
     JitSpew(JitSpew_GVN, "  Visiting dominator tree (with %llu blocks) rooted at block%u%s",
             uint64_t(dominatorRoot->numDominated()), dominatorRoot->id(),
             dominatorRoot == graph_.entryBlock() ? " (normal entry block)" :
             dominatorRoot == graph_.osrBlock() ? " (OSR entry block)" :
-            " (normal entry and OSR entry merge point)");
-    MOZ_ASSERT(numBlocksDiscarded_ == 0, "numBlocksDiscarded_ wasn't reset");
+            dominatorRoot->numPredecessors() == 0 ? " (odd unreachable block)" :
+            " (merge point from normal entry and OSR entry)");
     MOZ_ASSERT(dominatorRoot->immediateDominator() == dominatorRoot,
             "root is not a dominator tree root");
 
     // Visit all blocks dominated by dominatorRoot, in RPO. This has the nice
     // property that we'll always visit a block before any block it dominates,
     // so we can make a single pass through the list and see every full
     // redundance.
     size_t numVisited = 0;
-    for (ReversePostorderIterator iter(graph_.rpoBegin(dominatorRoot)); ; ++iter) {
+    size_t numDiscarded = 0;
+    for (ReversePostorderIterator iter(graph_.rpoBegin(dominatorRoot)); ; ) {
         MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
-        MBasicBlock *block = *iter;
+        MBasicBlock *block = *iter++;
         // We're only visiting blocks in dominatorRoot's tree right now.
         if (!dominatorRoot->dominates(block))
             continue;
-        // Visit the block!
-        if (!visitBlock(block, dominatorRoot))
-            return false;
-        // If this was the end of a loop, check for optimization in the header.
-        if (!rerun_ && block->isLoopBackedge() && loopHasOptimizablePhi(block)) {
+
+        // If this is a loop backedge, remember the header, as we may not be able
+        // to find it after we simplify the block.
+        MBasicBlock *header = block->isLoopBackedge() ? block->loopHeaderOfBackedge() : nullptr;
+
+        if (block->isMarked()) {
+            // This block has become unreachable; handle it specially.
+            if (!visitUnreachableBlock(block))
+                return false;
+            ++numDiscarded;
+        } else {
+            // Visit the block!
+            if (!visitBlock(block, dominatorRoot))
+                return false;
+            ++numVisited;
+        }
+
+        // If the block is/was a loop backedge, check to see if the block that
+        // is/was its header has optimizable phis, which would want a re-run.
+        if (!rerun_ && header && loopHasOptimizablePhi(header)) {
             JitSpew(JitSpew_GVN, "    Loop phi in block%u can now be optimized; will re-run GVN!",
-                    block->loopHeaderOfBackedge()->id());
+                    header->id());
             rerun_ = true;
             remainingBlocks_.clear();
         }
-        ++numVisited;
-        MOZ_ASSERT(numVisited <= dominatorRoot->numDominated() - numBlocksDiscarded_,
+
+        MOZ_ASSERT(numVisited <= dominatorRoot->numDominated() - numDiscarded,
                    "Visited blocks too many times");
-        if (numVisited >= dominatorRoot->numDominated() - numBlocksDiscarded_)
+        if (numVisited >= dominatorRoot->numDominated() - numDiscarded)
             break;
     }
 
-    *totalNumVisited += numVisited;
+    totalNumVisited_ += numVisited;
     values_.clear();
-    numBlocksDiscarded_ = 0;
     return true;
 }
 
 // Visit all the blocks in the graph.
 bool
 ValueNumberer::visitGraph()
 {
     // Due to OSR blocks, the set of blocks dominated by a blocks may not be
     // contiguous in the RPO. Do a separate traversal for each dominator tree
     // root. There's always the main entry, and sometimes there's an OSR entry,
     // and then there are the roots formed where the OSR paths merge with the
     // main entry paths.
-    size_t totalNumVisited = 0;
-    for (ReversePostorderIterator iter(graph_.rpoBegin()); ; ++iter) {
-         MBasicBlock *block = *iter;
-         if (block->immediateDominator() == block) {
-             if (!visitDominatorTree(block, &totalNumVisited))
-                 return false;
-             MOZ_ASSERT(totalNumVisited <= graph_.numBlocks(), "Visited blocks too many times");
-             if (totalNumVisited >= graph_.numBlocks())
-                 break;
-         }
-         MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
+    for (ReversePostorderIterator iter(graph_.rpoBegin()); ; ) {
+        MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
+        MBasicBlock *block = *iter;
+        if (block->immediateDominator() == block) {
+            if (!visitDominatorTree(block))
+                return false;
+
+            // Normally unreachable blocks would be removed by now, but if this
+            // block is a dominator tree root, it has been special-cased and left
+            // in place in order to avoid invalidating our iterator. Now that
+            // we've finished the tree, increment the iterator, and then if it's
+            // marked for removal, remove it.
+            ++iter;
+            if (block->isMarked()) {
+                JitSpew(JitSpew_GVN, "      Discarding dominator root block%u",
+                        block->id());
+                MOZ_ASSERT(block->begin() == block->end(),
+                           "Unreachable dominator tree root has instructions after tree walk");
+                MOZ_ASSERT(block->phisEmpty(),
+                           "Unreachable dominator tree root has phis after tree walk");
+                graph_.removeBlock(block);
+                blocksRemoved_ = true;
+            }
+
+            MOZ_ASSERT(totalNumVisited_ <= graph_.numBlocks(), "Visited blocks too many times");
+            if (totalNumVisited_ >= graph_.numBlocks())
+                break;
+        } else {
+            // This block a dominator tree root. Proceed to the next one.
+            ++iter;
+        }
     }
+    totalNumVisited_ = 0;
     return true;
 }
 
 ValueNumberer::ValueNumberer(MIRGenerator *mir, MIRGraph &graph)
   : mir_(mir), graph_(graph),
     values_(graph.alloc()),
     deadDefs_(graph.alloc()),
-    unreachableBlocks_(graph.alloc()),
     remainingBlocks_(graph.alloc()),
-    numBlocksDiscarded_(0),
+    nextDef_(nullptr),
+    totalNumVisited_(0),
     rerun_(false),
     blocksRemoved_(false),
     updateAliasAnalysis_(false),
     dependenciesBroken_(false)
 {}
 
 bool
-ValueNumberer::run(UpdateAliasAnalysisFlag updateAliasAnalysis)
+ValueNumberer::init()
 {
-    updateAliasAnalysis_ = updateAliasAnalysis == UpdateAliasAnalysis;
-
     // Initialize the value set. It's tempting to pass in a size here of some
     // function of graph_.getNumInstructionIds(), however if we start out with a
     // large capacity, it will be far larger than the actual element count for
     // most of the pass, so when we remove elements, it would often think it
     // needs to compact itself. Empirically, just letting the HashTable grow as
     // needed on its own seems to work pretty well.
-    if (!values_.init())
-        return false;
+    return values_.init();
+}
+
+bool
+ValueNumberer::run(UpdateAliasAnalysisFlag updateAliasAnalysis)
+{
+    updateAliasAnalysis_ = updateAliasAnalysis == UpdateAliasAnalysis;
 
     JitSpew(JitSpew_GVN, "Running GVN on graph (with %llu blocks)",
             uint64_t(graph_.numBlocks()));
 
     // Top level non-sparse iteration loop. If an iteration performs a
     // significant change, such as discarding a block which changes the
     // dominator tree and may enable more optimization, this loop takes another
     // iteration.
@@ -757,26 +1072,27 @@ ValueNumberer::run(UpdateAliasAnalysisFl
 
         if (mir_->shouldCancel("GVN (outer loop)"))
             return false;
 
         // If no further opportunities have been discovered, we're done.
         if (!rerun_)
             break;
 
-        JitSpew(JitSpew_GVN, "Re-running GVN on graph (run %d, now with %llu blocks)",
-                runs, uint64_t(graph_.numBlocks()));
         rerun_ = false;
 
         // Enforce an arbitrary iteration limit. This is rarely reached, and
         // isn't even strictly necessary, as the algorithm is guaranteed to
         // terminate on its own in a finite amount of time (since every time we
         // re-run we discard the construct which triggered the re-run), but it
         // does help avoid slow compile times on pathological code.
         ++runs;
         if (runs == 6) {
-            JitSpew(JitSpew_GVN, "Re-run cutoff reached. Terminating GVN!");
+            JitSpew(JitSpew_GVN, "Re-run cutoff of %d reached. Terminating GVN!", runs);
             break;
         }
+
+        JitSpew(JitSpew_GVN, "Re-running GVN on graph (run %d, now with %llu blocks)",
+                runs, uint64_t(graph_.numBlocks()));
     }
 
     return true;
 }
--- a/js/src/jit/ValueNumbering.h
+++ b/js/src/jit/ValueNumbering.h
@@ -14,16 +14,17 @@ namespace js {
 namespace jit {
 
 class MDefinition;
 class MBasicBlock;
 class MIRGraph;
 class MPhi;
 class MInstruction;
 class MIRGenerator;
+class MResumePoint;
 
 class ValueNumberer
 {
     // Value numbering data.
     class VisibleValues
     {
         // Hash policy for ValueSet.
         struct ValueHasher
@@ -59,54 +60,56 @@ class ValueNumberer
 
     typedef Vector<MBasicBlock *, 4, IonAllocPolicy> BlockWorklist;
     typedef Vector<MDefinition *, 4, IonAllocPolicy> DefWorklist;
 
     MIRGenerator *const mir_;
     MIRGraph &graph_;
     VisibleValues values_;            // Numbered values
     DefWorklist deadDefs_;            // Worklist for deleting values
-    BlockWorklist unreachableBlocks_; // Worklist for unreachable blocks
     BlockWorklist remainingBlocks_;   // Blocks remaining with fewer preds
-    size_t numBlocksDiscarded_;       // Num discarded blocks in current tree
+    MDefinition *nextDef_;            // The next definition; don't discard
+    size_t totalNumVisited_;          // The number of blocks visited
     bool rerun_;                      // Should we run another GVN iteration?
     bool blocksRemoved_;              // Have any blocks been removed?
     bool updateAliasAnalysis_;        // Do we care about AliasAnalysis?
     bool dependenciesBroken_;         // Have we broken AliasAnalysis?
 
     enum UseRemovedOption {
         DontSetUseRemoved,
         SetUseRemoved
     };
 
+    bool handleUseReleased(MDefinition *def, UseRemovedOption useRemovedOption);
     bool discardDefsRecursively(MDefinition *def);
-    bool releasePhiOperands(MPhi *phi, const MBasicBlock *phiBlock,
-                            UseRemovedOption useRemovedOption = SetUseRemoved);
-    bool releaseInsOperands(MInstruction *ins,
-                            UseRemovedOption useRemovedOption = SetUseRemoved);
-    bool discardDef(MDefinition *def,
-                    UseRemovedOption useRemovedOption = SetUseRemoved);
+    bool releaseResumePointOperands(MResumePoint *resume);
+    bool releaseAndRemovePhiOperands(MPhi *phi);
+    bool releaseOperands(MDefinition *def);
+    bool discardDef(MDefinition *def);
     bool processDeadDefs();
 
-    bool removePredecessor(MBasicBlock *block, MBasicBlock *pred);
-    bool removeBlocksRecursively(MBasicBlock *block, const MBasicBlock *root);
+    bool fixupOSROnlyLoop(MBasicBlock *block, MBasicBlock *backedge);
+    bool removePredecessorAndDoDCE(MBasicBlock *block, MBasicBlock *pred);
+    bool removePredecessorAndCleanUp(MBasicBlock *block, MBasicBlock *pred);
 
     MDefinition *simplified(MDefinition *def) const;
     MDefinition *leader(MDefinition *def);
     bool hasLeader(const MPhi *phi, const MBasicBlock *phiBlock) const;
-    bool loopHasOptimizablePhi(MBasicBlock *backedge) const;
+    bool loopHasOptimizablePhi(MBasicBlock *header) const;
 
     bool visitDefinition(MDefinition *def);
     bool visitControlInstruction(MBasicBlock *block, const MBasicBlock *root);
+    bool visitUnreachableBlock(MBasicBlock *block);
     bool visitBlock(MBasicBlock *block, const MBasicBlock *root);
-    bool visitDominatorTree(MBasicBlock *root, size_t *totalNumVisited);
+    bool visitDominatorTree(MBasicBlock *root);
     bool visitGraph();
 
   public:
     ValueNumberer(MIRGenerator *mir, MIRGraph &graph);
+    bool init();
 
     enum UpdateAliasAnalysisFlag {
         DontUpdateAliasAnalysis,
         UpdateAliasAnalysis
     };
 
     // Optimize the graph, performing expression simplification and
     // canonicalization, eliminating statically fully-redundant expressions,
--- a/js/src/jsapi-tests/moz.build
+++ b/js/src/jsapi-tests/moz.build
@@ -80,16 +80,17 @@ UNIFIED_SOURCES += [
     'testWeakMap.cpp',
     'testXDR.cpp',
 ]
 
 if CONFIG['ENABLE_ION']:
     UNIFIED_SOURCES += [
         'testJitDCEinGVN.cpp',
         'testJitFoldsTo.cpp',
+        'testJitGVN.cpp',
         'testJitMoveEmitterCycles.cpp',
         'testJitRValueAlloc.cpp',
     ]
 
 DEFINES['EXPORT_JS_API'] = True
 # Building against js_static requires that we declare mfbt sybols "exported"
 # on its behalf.
 DEFINES['IMPL_MFBT'] = True
new file mode 100644
--- /dev/null
+++ b/js/src/jsapi-tests/testJitGVN.cpp
@@ -0,0 +1,213 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/IonAnalysis.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "jit/RangeAnalysis.h"
+#include "jit/ValueNumbering.h"
+
+#include "jsapi-tests/testJitMinimalFunc.h"
+#include "jsapi-tests/tests.h"
+
+using namespace js;
+using namespace js::jit;
+
+static MBasicBlock *
+FollowTrivialGotos(MBasicBlock *block)
+{
+    while (block->phisEmpty() && *block->begin() == block->lastIns() && block->lastIns()->isGoto())
+        block = block->lastIns()->toGoto()->getSuccessor(0);
+    return block;
+}
+
+BEGIN_TEST(testJitGVN_FixupOSROnlyLoop)
+{
+    // This is a testcase which constructs the very rare circumstances that
+    // require the FixupOSROnlyLoop logic.
+
+    MinimalFunc func;
+
+    MBasicBlock *entry = func.createEntryBlock();
+    MBasicBlock *osrEntry = func.createOsrEntryBlock();
+    MBasicBlock *outerHeader = func.createBlock(entry);
+    MBasicBlock *merge = func.createBlock(outerHeader);
+    MBasicBlock *innerHeader = func.createBlock(merge);
+    MBasicBlock *innerBackedge = func.createBlock(innerHeader);
+    MBasicBlock *outerBackedge = func.createBlock(innerHeader);
+    MBasicBlock *exit = func.createBlock(outerHeader);
+
+    MConstant *c = MConstant::New(func.alloc, BooleanValue(false));
+    entry->add(c);
+    entry->end(MTest::New(func.alloc, c, outerHeader, exit));
+    osrEntry->end(MGoto::New(func.alloc, merge));
+
+    merge->end(MGoto::New(func.alloc, innerHeader));
+
+    // Use Beta nodes to hide the constants and suppress folding.
+    MConstant *x = MConstant::New(func.alloc, BooleanValue(false));
+    outerHeader->add(x);
+    MBeta *xBeta = MBeta::New(func.alloc, x, Range::NewInt32Range(func.alloc, 0, 1));
+    outerHeader->add(xBeta);
+    outerHeader->end(MTest::New(func.alloc, xBeta, merge, exit));
+
+    MConstant *y = MConstant::New(func.alloc, BooleanValue(false));
+    innerHeader->add(y);
+    MBeta *yBeta = MBeta::New(func.alloc, y, Range::NewInt32Range(func.alloc, 0, 1));
+    innerHeader->add(yBeta);
+    innerHeader->end(MTest::New(func.alloc, yBeta, innerBackedge, outerBackedge));
+
+    MNop *anchor = MNop::New(func.alloc);
+    anchor->setGuard();
+    innerBackedge->add(anchor);
+    innerBackedge->end(MGoto::New(func.alloc, innerHeader));
+    outerBackedge->end(MGoto::New(func.alloc, outerHeader));
+
+    MConstant *u = MConstant::New(func.alloc, UndefinedValue());
+    exit->add(u);
+    exit->end(MReturn::New(func.alloc, u));
+
+    innerHeader->addPredecessorWithoutPhis(innerBackedge);
+    outerHeader->addPredecessorWithoutPhis(outerBackedge);
+    exit->addPredecessorWithoutPhis(entry);
+    merge->addPredecessorWithoutPhis(osrEntry);
+
+    outerHeader->setLoopHeader(outerBackedge);
+    innerHeader->setLoopHeader(innerBackedge);
+
+    if (!func.runGVN())
+        return false;
+
+    // The loops are no longer reachable from the normal entry. They are
+    // doinated by the osrEntry.
+    MOZ_ASSERT(func.graph.osrBlock() == osrEntry);
+    MBasicBlock *newInner = FollowTrivialGotos(osrEntry->lastIns()->toGoto()->target());
+    MBasicBlock *newOuter = FollowTrivialGotos(newInner->lastIns()->toTest()->ifFalse());
+    MBasicBlock *newExit = FollowTrivialGotos(entry);
+    MOZ_ASSERT(newInner->isLoopHeader());
+    MOZ_ASSERT(newOuter->isLoopHeader());
+    MOZ_ASSERT(newExit->lastIns()->isReturn());
+
+    // One more time.
+    ClearDominatorTree(func.graph);
+    if (!func.runGVN())
+        return false;
+
+    // The loops are no longer reachable from the normal entry. They are
+    // doinated by the osrEntry.
+    MOZ_ASSERT(func.graph.osrBlock() == osrEntry);
+    newInner = FollowTrivialGotos(osrEntry->lastIns()->toGoto()->target());
+    newOuter = FollowTrivialGotos(newInner->lastIns()->toTest()->ifFalse());
+    newExit = FollowTrivialGotos(entry);
+    MOZ_ASSERT(newInner->isLoopHeader());
+    MOZ_ASSERT(newOuter->isLoopHeader());
+    MOZ_ASSERT(newExit->lastIns()->isReturn());
+
+    return true;
+}
+END_TEST(testJitGVN_FixupOSROnlyLoop)
+
+BEGIN_TEST(testJitGVN_FixupOSROnlyLoopNested)
+{
+    // Same as testJitGVN_FixupOSROnlyLoop but adds another level of loop
+    // nesting for added excitement.
+
+    MinimalFunc func;
+
+    MBasicBlock *entry = func.createEntryBlock();
+    MBasicBlock *osrEntry = func.createOsrEntryBlock();
+    MBasicBlock *outerHeader = func.createBlock(entry);
+    MBasicBlock *middleHeader = func.createBlock(outerHeader);
+    MBasicBlock *merge = func.createBlock(middleHeader);
+    MBasicBlock *innerHeader = func.createBlock(merge);
+    MBasicBlock *innerBackedge = func.createBlock(innerHeader);
+    MBasicBlock *middleBackedge = func.createBlock(innerHeader);
+    MBasicBlock *outerBackedge = func.createBlock(middleHeader);
+    MBasicBlock *exit = func.createBlock(outerHeader);
+
+    MConstant *c = MConstant::New(func.alloc, BooleanValue(false));
+    entry->add(c);
+    entry->end(MTest::New(func.alloc, c, outerHeader, exit));
+    osrEntry->end(MGoto::New(func.alloc, merge));
+
+    merge->end(MGoto::New(func.alloc, innerHeader));
+
+    // Use Beta nodes to hide the constants and suppress folding.
+    MConstant *x = MConstant::New(func.alloc, BooleanValue(false));
+    outerHeader->add(x);
+    MBeta *xBeta = MBeta::New(func.alloc, x, Range::NewInt32Range(func.alloc, 0, 1));
+    outerHeader->add(xBeta);
+    outerHeader->end(MTest::New(func.alloc, xBeta, middleHeader, exit));
+
+    MConstant *y = MConstant::New(func.alloc, BooleanValue(false));
+    middleHeader->add(y);
+    MBeta *yBeta = MBeta::New(func.alloc, y, Range::NewInt32Range(func.alloc, 0, 1));
+    middleHeader->add(yBeta);
+    middleHeader->end(MTest::New(func.alloc, yBeta, merge, outerBackedge));
+
+    MConstant *w = MConstant::New(func.alloc, BooleanValue(false));
+    innerHeader->add(w);
+    MBeta *wBeta = MBeta::New(func.alloc, w, Range::NewInt32Range(func.alloc, 0, 1));
+    innerHeader->add(wBeta);
+    innerHeader->end(MTest::New(func.alloc, wBeta, innerBackedge, middleBackedge));
+
+    MNop *anchor = MNop::New(func.alloc);
+    anchor->setGuard();
+    innerBackedge->add(anchor);
+    innerBackedge->end(MGoto::New(func.alloc, innerHeader));
+    middleBackedge->end(MGoto::New(func.alloc, middleHeader));
+    outerBackedge->end(MGoto::New(func.alloc, outerHeader));
+
+    MConstant *u = MConstant::New(func.alloc, UndefinedValue());
+    exit->add(u);
+    exit->end(MReturn::New(func.alloc, u));
+
+    innerHeader->addPredecessorWithoutPhis(innerBackedge);
+    middleHeader->addPredecessorWithoutPhis(middleBackedge);
+    outerHeader->addPredecessorWithoutPhis(outerBackedge);
+    exit->addPredecessorWithoutPhis(entry);
+    merge->addPredecessorWithoutPhis(osrEntry);
+
+    outerHeader->setLoopHeader(outerBackedge);
+    middleHeader->setLoopHeader(middleBackedge);
+    innerHeader->setLoopHeader(innerBackedge);
+
+    if (!func.runGVN())
+        return false;
+
+    // The loops are no longer reachable from the normal entry. They are
+    // doinated by the osrEntry.
+    MOZ_ASSERT(func.graph.osrBlock() == osrEntry);
+    MBasicBlock *newInner = FollowTrivialGotos(osrEntry->lastIns()->toGoto()->target());
+    MBasicBlock *newMiddle = FollowTrivialGotos(newInner->lastIns()->toTest()->ifFalse());
+    MBasicBlock *newOuter = FollowTrivialGotos(newMiddle->lastIns()->toTest()->ifFalse());
+    MBasicBlock *newExit = FollowTrivialGotos(entry);
+    MOZ_ASSERT(newInner->isLoopHeader());
+    MOZ_ASSERT(newMiddle->isLoopHeader());
+    MOZ_ASSERT(newOuter->isLoopHeader());
+    MOZ_ASSERT(newExit->lastIns()->isReturn());
+
+    // One more time.
+    ClearDominatorTree(func.graph);
+    if (!func.runGVN())
+        return false;
+
+    // The loops are no longer reachable from the normal entry. They are
+    // doinated by the osrEntry.
+    MOZ_ASSERT(func.graph.osrBlock() == osrEntry);
+    newInner = FollowTrivialGotos(osrEntry->lastIns()->toGoto()->target());
+    newMiddle = FollowTrivialGotos(newInner->lastIns()->toTest()->ifFalse());
+    newOuter = FollowTrivialGotos(newMiddle->lastIns()->toTest()->ifFalse());
+    newExit = FollowTrivialGotos(entry);
+    MOZ_ASSERT(newInner->isLoopHeader());
+    MOZ_ASSERT(newMiddle->isLoopHeader());
+    MOZ_ASSERT(newOuter->isLoopHeader());
+    MOZ_ASSERT(newExit->lastIns()->isReturn());
+
+    return true;
+}
+END_TEST(testJitGVN_FixupOSROnlyLoopNested)
--- a/js/src/jsapi-tests/testJitMinimalFunc.h
+++ b/js/src/jsapi-tests/testJitMinimalFunc.h
@@ -38,16 +38,24 @@ struct MinimalFunc
 
     MBasicBlock *createEntryBlock()
     {
         MBasicBlock *block = MBasicBlock::NewAsmJS(graph, info, nullptr, MBasicBlock::NORMAL);
         graph.addBlock(block);
         return block;
     }
 
+    MBasicBlock *createOsrEntryBlock()
+    {
+        MBasicBlock *block = MBasicBlock::NewAsmJS(graph, info, nullptr, MBasicBlock::NORMAL);
+        graph.addBlock(block);
+        graph.setOsrBlock(block);
+        return block;
+    }
+
     MBasicBlock *createBlock(MBasicBlock *pred)
     {
         MBasicBlock *block = MBasicBlock::NewAsmJS(graph, info, pred, MBasicBlock::NORMAL);
         graph.addBlock(block);
         return block;
     }
 
     MParameter *createParameter()
@@ -59,17 +67,21 @@ struct MinimalFunc
     bool runGVN()
     {
         if (!SplitCriticalEdges(graph))
             return false;
         if (!RenumberBlocks(graph))
             return false;
         if (!BuildDominatorTree(graph))
             return false;
+        if (!BuildPhiReverseMapping(graph))
+            return false;
         ValueNumberer gvn(&mir, graph);
+        if (!gvn.init())
+            return false;
         if (!gvn.run(ValueNumberer::DontUpdateAliasAnalysis))
             return false;
         return true;
     }
 };
 
 } // namespace jit
 } // namespace js
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -192,17 +192,16 @@ UNIFIED_SOURCES += [
     'jit/ScalarReplacement.cpp',
     'jit/shared/BaselineCompiler-shared.cpp',
     'jit/shared/CodeGenerator-shared.cpp',
     'jit/shared/Lowering-shared.cpp',
     'jit/Snapshots.cpp',
     'jit/StupidAllocator.cpp',
     'jit/TypedObjectPrediction.cpp',
     'jit/TypePolicy.cpp',
-    'jit/UnreachableCodeElimination.cpp',
     'jit/ValueNumbering.cpp',
     'jit/VMFunctions.cpp',
     'jsalloc.cpp',
     'jsapi.cpp',
     'jsbool.cpp',
     'jscntxt.cpp',
     'jscompartment.cpp',
     'jscrashreport.cpp',
--- a/js/src/vm/TraceLogging.cpp
+++ b/js/src/vm/TraceLogging.cpp
@@ -830,17 +830,16 @@ TraceLogging::lazyInit()
         enabledTextIds[TraceLogger::SplitCriticalEdges] = true;
         enabledTextIds[TraceLogger::RenumberBlocks] = true;
         enabledTextIds[TraceLogger::DominatorTree] = true;
         enabledTextIds[TraceLogger::PhiAnalysis] = true;
         enabledTextIds[TraceLogger::ApplyTypes] = true;
         enabledTextIds[TraceLogger::ParallelSafetyAnalysis] = true;
         enabledTextIds[TraceLogger::AliasAnalysis] = true;
         enabledTextIds[TraceLogger::GVN] = true;
-        enabledTextIds[TraceLogger::UCE] = true;
         enabledTextIds[TraceLogger::LICM] = true;
         enabledTextIds[TraceLogger::RangeAnalysis] = true;
         enabledTextIds[TraceLogger::LoopUnrolling] = true;
         enabledTextIds[TraceLogger::EffectiveAddressAnalysis] = true;
         enabledTextIds[TraceLogger::EliminateDeadCode] = true;
         enabledTextIds[TraceLogger::EdgeCaseAnalysis] = true;
         enabledTextIds[TraceLogger::EliminateRedundantChecks] = true;
         enabledTextIds[TraceLogger::GenerateLIR] = true;
--- a/js/src/vm/TraceLogging.h
+++ b/js/src/vm/TraceLogging.h
@@ -135,17 +135,16 @@ namespace jit {
     _(ScalarReplacement)                              \
     _(DominatorTree)                                  \
     _(PhiAnalysis)                                    \
     _(MakeLoopsContiguous)                            \
     _(ApplyTypes)                                     \
     _(ParallelSafetyAnalysis)                         \
     _(AliasAnalysis)                                  \
     _(GVN)                                            \
-    _(UCE)                                            \
     _(LICM)                                           \
     _(RangeAnalysis)                                  \
     _(LoopUnrolling)                                  \
     _(EffectiveAddressAnalysis)                       \
     _(EliminateDeadCode)                              \
     _(EdgeCaseAnalysis)                               \
     _(EliminateRedundantChecks)                       \
     _(GenerateLIR)                                    \