Bug 1004363 - IonMonkey: A new value-numbering implementation based on a dom-tree DFS. r=nbp
authorDan Gohman <sunfish@mozilla.com>
Fri, 27 Jun 2014 10:38:44 -0700
changeset 191260 6f2c1e191d9decba8f2e70df1d3ef677b5455863
parent 191259 ef327b5ec34df79e9b6b8e4582ec146052fda530
child 191261 2700ebff7d64911ec97a62668508af24e3e5bdc6
push id1
push userroot
push dateMon, 20 Oct 2014 17:29:22 +0000
reviewersnbp
bugs1004363
milestone33.0a1
Bug 1004363 - IonMonkey: A new value-numbering implementation based on a dom-tree DFS. r=nbp
js/public/HashTable.h
js/src/jit/AliasAnalysis.cpp
js/src/jit/EdgeCaseAnalysis.cpp
js/src/jit/Ion.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/IonAnalysis.h
js/src/jit/IonOptimizationLevels.cpp
js/src/jit/IonOptimizationLevels.h
js/src/jit/JitOptions.cpp
js/src/jit/JitOptions.h
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MIRGraph.cpp
js/src/jit/MIRGraph.h
js/src/jit/UnreachableCodeElimination.cpp
js/src/jit/ValueNumbering.cpp
js/src/jit/ValueNumbering.h
js/src/shell/js.cpp
--- a/js/public/HashTable.h
+++ b/js/public/HashTable.h
@@ -474,16 +474,23 @@ class HashSet
     }
 
     // Infallibly rekey one entry, if present.
     void rekeyAs(const Lookup &old_lookup, const Lookup &new_lookup, const T &new_value) {
         if (Ptr p = lookup(old_lookup))
             impl.rekeyAndMaybeRehash(p, new_lookup, new_value);
     }
 
+    // Infallibly rekey one entry with a new key that is equivalent.
+    void rekeyInPlace(Ptr p, const T &new_value)
+    {
+        MOZ_ASSERT(HashPolicy::match(*p, new_value));
+        impl.rekeyInPlace(p, new_value);
+    }
+
     // HashSet is movable
     HashSet(HashSet &&rhs) : impl(mozilla::Move(rhs.impl)) {}
     void operator=(HashSet &&rhs) {
         MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
         impl = mozilla::Move(rhs.impl);
     }
 
   private:
@@ -1624,15 +1631,23 @@ class HashTable : private AllocPolicy
     }
 
     void rekeyAndMaybeRehash(Ptr p, const Lookup &l, const Key &k)
     {
         rekeyWithoutRehash(p, l, k);
         checkOverRemoved();
     }
 
+    void rekeyInPlace(Ptr p, const Key &k)
+    {
+        MOZ_ASSERT(table);
+        mozilla::ReentrancyGuard g(*this);
+        MOZ_ASSERT(p.found());
+        HashPolicy::rekey(const_cast<Key &>(*p), const_cast<Key &>(k));
+    }
+
 #undef METER
 };
 
 }  // namespace detail
 }  // namespace js
 
 #endif  /* js_HashTable_h */
--- a/js/src/jit/AliasAnalysis.cpp
+++ b/js/src/jit/AliasAnalysis.cpp
@@ -172,18 +172,17 @@ AliasAnalysis::analyze()
         if (!defs.append(firstIns))
             return false;
         if (!stores.append(Move(defs)))
             return false;
     }
 
     // Type analysis may have inserted new instructions. Since this pass depends
     // on the instruction number ordering, all instructions are renumbered.
-    // We start with 1 because some passes use 0 to denote failure.
-    uint32_t newId = 1;
+    uint32_t newId = 0;
 
     for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
         if (mir->shouldCancel("Alias Analysis (main loop)"))
             return false;
 
         if (block->isLoopHeader()) {
             IonSpew(IonSpew_Alias, "Processing loop header %d", block->id());
             loop_ = new(alloc()) LoopAliasInfo(alloc(), loop_, *block);
--- a/js/src/jit/EdgeCaseAnalysis.cpp
+++ b/js/src/jit/EdgeCaseAnalysis.cpp
@@ -16,17 +16,17 @@ EdgeCaseAnalysis::EdgeCaseAnalysis(MIRGe
   : mir(mir), graph(graph)
 {
 }
 
 bool
 EdgeCaseAnalysis::analyzeLate()
 {
     // Renumber definitions for NeedNegativeZeroCheck under analyzeEdgeCasesBackward.
-    uint32_t nextId = 1;
+    uint32_t nextId = 0;
 
     for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
         if (mir->shouldCancel("Analyze Late (first loop)"))
             return false;
         for (MDefinitionIterator iter(*block); iter; iter++) {
             iter->setId(nextId++);
             iter->analyzeEdgeCasesForward();
         }
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -1390,18 +1390,18 @@ OptimizeMIR(MIRGenerator *mir)
             return false;
 
         if (mir->shouldCancel("Eliminate dead resume point operands"))
             return false;
     }
 
     if (mir->optimizationInfo().gvnEnabled()) {
         AutoTraceLog log(logger, TraceLogger::GVN);
-        ValueNumberer gvn(mir, graph, mir->optimizationInfo().gvnKind() == GVN_Optimistic);
-        if (!gvn.analyze())
+        ValueNumberer gvn(mir, graph);
+        if (!gvn.run(ValueNumberer::UpdateAliasAnalysis))
             return false;
         IonSpewPass("GVN");
         AssertExtendedGraphCoherency(graph);
 
         if (mir->shouldCancel("GVN"))
             return false;
     }
 
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -1,16 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/IonAnalysis.h"
 
+#include "jit/AliasAnalysis.h"
 #include "jit/BaselineInspector.h"
 #include "jit/BaselineJIT.h"
 #include "jit/Ion.h"
 #include "jit/IonBuilder.h"
 #include "jit/IonOptimizationLevels.h"
 #include "jit/LIR.h"
 #include "jit/Lowering.h"
 #include "jit/MIRGraph.h"
@@ -1086,16 +1087,69 @@ jit::RenumberBlocks(MIRGraph &graph)
 {
     size_t id = 0;
     for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++)
         block->setId(id++);
 
     return true;
 }
 
+// A utility for code which deletes blocks. Renumber the remaining blocks,
+// recompute dominators, and optionally recompute AliasAnalysis dependencies.
+bool
+jit::AccountForCFGChanges(MIRGenerator *mir, MIRGraph &graph, bool updateAliasAnalysis)
+{
+    // Renumber the blocks and clear out the old dominator info.
+    size_t id = 0;
+    for (ReversePostorderIterator i(graph.rpoBegin()), e(graph.rpoEnd()); i != e; ++i) {
+        i->clearDominatorInfo();
+        i->setId(id++);
+    }
+
+    // Recompute dominator info.
+    if (!BuildDominatorTree(graph))
+        return false;
+
+    // If needed, update alias analysis dependencies.
+    if (updateAliasAnalysis) {
+        if (!AliasAnalysis(mir, graph).analyze())
+             return false;
+    }
+
+    AssertExtendedGraphCoherency(graph);
+    return true;
+}
+
+// Remove all blocks not marked with isMarked(). Unmark all remaining blocks.
+// Alias analysis dependencies may be invalid after calling this function.
+bool
+jit::RemoveUnmarkedBlocks(MIRGenerator *mir, MIRGraph &graph, uint32_t numMarkedBlocks)
+{
+    // If all blocks are marked, the CFG is unmodified. Just clear the marks.
+    if (numMarkedBlocks == graph.numBlocks()) {
+        graph.unmarkBlocks();
+        return true;
+    }
+
+    for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd();) {
+        MBasicBlock *block = *iter++;
+
+        if (block->isMarked()) {
+            block->unmark();
+            continue;
+        }
+
+        for (size_t i = 0, e = block->numSuccessors(); i != e; ++i)
+            block->getSuccessor(i)->removePredecessor(block);
+        graph.removeBlockIncludingPhis(block);
+    }
+
+    return AccountForCFGChanges(mir, graph, /*updateAliasAnalysis=*/false);
+}
+
 // A Simple, Fast Dominance Algorithm by Cooper et al.
 // Modified to support empty intersections for OSR, and in RPO.
 static MBasicBlock *
 IntersectDominators(MBasicBlock *block1, MBasicBlock *block2)
 {
     MBasicBlock *finger1 = block1;
     MBasicBlock *finger2 = block2;
 
@@ -1554,17 +1608,23 @@ BoundsCheckHashIgnoreOffset(MBoundsCheck
     uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0;
     uintptr_t length = uintptr_t(check->length());
     return index ^ length;
 }
 
 static MBoundsCheck *
 FindDominatingBoundsCheck(BoundsCheckMap &checks, MBoundsCheck *check, size_t index)
 {
-    // See the comment in ValueNumberer::findDominatingDef.
+    // Since we are traversing the dominator tree in pre-order, when we
+    // are looking at the |index|-th block, the next numDominated() blocks
+    // we traverse are precisely the set of blocks that are dominated.
+    //
+    // So, this value is visible in all blocks if:
+    // index <= index + ins->block->numDominated()
+    // and becomes invalid after that.
     HashNumber hash = BoundsCheckHashIgnoreOffset(check);
     BoundsCheckMap::Ptr p = checks.lookup(hash);
     if (!p || index >= p->value().validEnd) {
         // We didn't find a dominating bounds check.
         BoundsCheckInfo info;
         info.check = check;
         info.validEnd = index + check->block()->numDominated();
 
--- a/js/src/jit/IonAnalysis.h
+++ b/js/src/jit/IonAnalysis.h
@@ -51,16 +51,22 @@ ApplyTypeInformation(MIRGenerator *mir, 
 
 bool
 MakeMRegExpHoistable(MIRGraph &graph);
 
 bool
 RenumberBlocks(MIRGraph &graph);
 
 bool
+AccountForCFGChanges(MIRGenerator *mir, MIRGraph &graph, bool updateAliasAnalysis);
+
+bool
+RemoveUnmarkedBlocks(MIRGenerator *mir, MIRGraph &graph, uint32_t numMarkedBlocks);
+
+bool
 BuildDominatorTree(MIRGraph &graph);
 
 bool
 BuildPhiReverseMapping(MIRGraph &graph);
 
 void
 AssertBasicGraphCoherency(MIRGraph &graph);
 
--- a/js/src/jit/IonOptimizationLevels.cpp
+++ b/js/src/jit/IonOptimizationLevels.cpp
@@ -24,17 +24,16 @@ OptimizationInfo::initNormalOptimization
     level_ = Optimization_Normal;
 
     eaa_ = true;
     edgeCaseAnalysis_ = true;
     eliminateRedundantChecks_ = true;
     inlineInterpreted_ = true;
     inlineNative_ = true;
     gvn_ = true;
-    gvnKind_ = GVN_Optimistic;
     licm_ = true;
     uce_ = true;
     rangeAnalysis_ = true;
     autoTruncate_ = true;
     registerAllocator_ = RegisterAllocator_LSRA;
 
     inlineMaxTotalBytecodeLength_ = 1000;
     inliningMaxCallerBytecodeLength_ = 10000;
--- a/js/src/jit/IonOptimizationLevels.h
+++ b/js/src/jit/IonOptimizationLevels.h
@@ -61,19 +61,16 @@ class OptimizationInfo
     bool inlineInterpreted_;
 
     // Toggles whether native scripts get inlined.
     bool inlineNative_;
 
     // Toggles whether global value numbering is used.
     bool gvn_;
 
-    // Toggles whether global value numbering is optimistic or pessimistic.
-    IonGvnKind gvnKind_;
-
     // Toggles whether loop invariant code motion is performed.
     bool licm_;
 
     // Toggles whether Unreachable Code Elimination is performed.
     bool uce_;
 
     // Toggles whether Range Analysis is used.
     bool rangeAnalysis_;
@@ -157,22 +154,16 @@ class OptimizationInfo
     bool edgeCaseAnalysisEnabled() const {
         return edgeCaseAnalysis_ && !js_JitOptions.disableEdgeCaseAnalysis;
     }
 
     bool eliminateRedundantChecksEnabled() const {
         return eliminateRedundantChecks_;
     }
 
-    IonGvnKind gvnKind() const {
-        if (!js_JitOptions.forceGvnKind)
-            return gvnKind_;
-        return js_JitOptions.forcedGvnKind;
-    }
-
     IonRegisterAllocator registerAllocator() const {
         if (!js_JitOptions.forceRegisterAllocator)
             return registerAllocator_;
         return js_JitOptions.forcedRegisterAllocator;
     }
 
     uint32_t smallFunctionMaxInlineDepth() const {
         return smallFunctionMaxInlineDepth_;
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -61,21 +61,16 @@ JitOptions::JitOptions()
     eagerCompilation = false;
 
     // Force how many invocation or loop iterations are needed before compiling
     // a function with the highest ionmonkey optimization level.
     // (i.e. OptimizationLevel_Normal)
     forceDefaultIonUsesBeforeCompile = false;
     forcedDefaultIonUsesBeforeCompile = 1000;
 
-    // Force the GVN kind to be optimistic or pessimistic instead of letting
-    // the optimization pass decide.
-    forceGvnKind = false;
-    forcedGvnKind = GVN_Optimistic;
-
     // Force the used register allocator instead of letting the
     // optimization pass decide.
     forceRegisterAllocator = false;
     forcedRegisterAllocator = RegisterAllocator_LSRA;
 
     // Toggles whether large scripts are rejected.
     limitScriptSize = true;
 
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -23,21 +23,16 @@ static const uint32_t MAX_MAIN_THREAD_LO
 
 // Possible register allocators which may be used.
 enum IonRegisterAllocator {
     RegisterAllocator_LSRA,
     RegisterAllocator_Backtracking,
     RegisterAllocator_Stupid
 };
 
-enum IonGvnKind {
-    GVN_Optimistic,
-    GVN_Pessimistic
-};
-
 struct JitOptions
 {
     bool checkGraphConsistency;
 #ifdef CHECK_OSIPOINT_REGISTERS
     bool checkOsiPointRegisters;
 #endif
     bool checkRangeAnalysis;
     bool compileTryCatch;
@@ -46,18 +41,16 @@ struct JitOptions
     bool disableInlining;
     bool disableEdgeCaseAnalysis;
     bool disableRangeAnalysis;
     bool disableUce;
     bool disableEaa;
     bool eagerCompilation;
     bool forceDefaultIonUsesBeforeCompile;
     uint32_t forcedDefaultIonUsesBeforeCompile;
-    bool forceGvnKind;
-    IonGvnKind forcedGvnKind;
     bool forceRegisterAllocator;
     IonRegisterAllocator forcedRegisterAllocator;
     bool limitScriptSize;
     bool osr;
     uint32_t baselineUsesBeforeCompile;
     uint32_t exceptionBailoutThreshold;
     uint32_t frequentBailoutThreshold;
     uint32_t maxStackArgs;
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -64,25 +64,16 @@ MDefinition::PrintOpcodeName(FILE *fp, M
 #undef NAME
     };
     const char *name = names[op];
     size_t len = strlen(name);
     for (size_t i = 0; i < len; i++)
         fprintf(fp, "%c", tolower(name[i]));
 }
 
-static inline bool
-EqualValues(bool useGVN, MDefinition *left, MDefinition *right)
-{
-    if (useGVN)
-        return left->valueNumber() == right->valueNumber();
-
-    return left == right;
-}
-
 static MConstant *
 EvaluateConstantOperands(TempAllocator &alloc, MBinaryInstruction *ins, bool *ptypeChange = nullptr)
 {
     MDefinition *left = ins->getOperand(0);
     MDefinition *right = ins->getOperand(1);
 
     if (!left->isConstant() || !right->isConstant())
         return nullptr;
@@ -143,27 +134,24 @@ EvaluateConstantOperands(TempAllocator &
     return MConstant::New(alloc, ret);
 }
 
 void
 MDefinition::printName(FILE *fp) const
 {
     PrintOpcodeName(fp, op());
     fprintf(fp, "%u", id());
-
-    if (valueNumber() != 0)
-        fprintf(fp, "-vn%u", valueNumber());
 }
 
 HashNumber
 MDefinition::valueHash() const
 {
     HashNumber out = op();
     for (size_t i = 0, e = numOperands(); i < e; i++) {
-        uint32_t valueNumber = getOperand(i)->valueNumber();
+        uint32_t valueNumber = getOperand(i)->id();
         out = valueNumber + (out << 6) + (out << 16) - out;
     }
     return out;
 }
 
 bool
 MDefinition::congruentIfOperandsEqual(const MDefinition *ins) const
 {
@@ -175,25 +163,25 @@ MDefinition::congruentIfOperandsEqual(co
 
     if (isEffectful() || ins->isEffectful())
         return false;
 
     if (numOperands() != ins->numOperands())
         return false;
 
     for (size_t i = 0, e = numOperands(); i < e; i++) {
-        if (getOperand(i)->valueNumber() != ins->getOperand(i)->valueNumber())
+        if (getOperand(i) != ins->getOperand(i))
             return false;
     }
 
     return true;
 }
 
 MDefinition *
-MDefinition::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MDefinition::foldsTo(TempAllocator &alloc)
 {
     // In the default case, there are no constants to fold.
     return this;
 }
 
 void
 MDefinition::analyzeEdgeCasesForward()
 {
@@ -241,17 +229,17 @@ MTest::cacheOperandMightEmulateUndefined
 {
     JS_ASSERT(operandMightEmulateUndefined());
 
     if (!MaybeEmulatesUndefined(getOperand(0)))
         markOperandCantEmulateUndefined();
 }
 
 MDefinition *
-MTest::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MTest::foldsTo(TempAllocator &alloc)
 {
     MDefinition *op = getOperand(0);
 
     if (op->isNot())
         return MTest::New(alloc, op->toNot()->operand(), ifFalse(), ifTrue());
 
     return this;
 }
@@ -803,17 +791,17 @@ MCallDOMNative::getJitInfo() const
 MApplyArgs *
 MApplyArgs::New(TempAllocator &alloc, JSFunction *target, MDefinition *fun, MDefinition *argc,
                 MDefinition *self)
 {
     return new(alloc) MApplyArgs(target, fun, argc, self);
 }
 
 MDefinition*
-MStringLength::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MStringLength::foldsTo(TempAllocator &alloc)
 {
     if ((type() == MIRType_Int32) && (string()->isConstant())) {
         Value value = string()->toConstant()->value();
         JSAtom *atom = &value.toString()->asAtom();
         return MConstant::New(alloc, Int32Value(atom->length()));
     }
 
     return this;
@@ -944,32 +932,41 @@ void
 MPhi::removeAllOperands()
 {
     for (size_t i = 0; i < inputs_.length(); i++)
         inputs_[i].discardProducer();
     inputs_.clear();
 }
 
 MDefinition *
-MPhi::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MPhi::operandIfRedundant()
 {
-    JS_ASSERT(!inputs_.empty());
-
+    JS_ASSERT(inputs_.length() != 0);
+
+    // If this phi is redundant (e.g., phi(a,a) or b=phi(a,this)),
+    // returns the operand that it will always be equal to (a, in
+    // those two cases).
     MDefinition *first = getOperand(0);
-
-    for (size_t i = 1; i < inputs_.length(); i++) {
-        // Phis need dominator information to fold based on value numbers. For
-        // simplicity, we only compare SSA names right now (bug 714727).
-        if (!EqualValues(false, getOperand(i), first))
-            return this;
+    for (size_t i = 1, e = numOperands(); i < e; i++) {
+        MDefinition *op = getOperand(i);
+        if (op != first && op != this)
+            return nullptr;
     }
-
     return first;
 }
 
+MDefinition *
+MPhi::foldsTo(TempAllocator &alloc)
+{
+    if (MDefinition *def = operandIfRedundant())
+        return def;
+
+    return this;
+}
+
 bool
 MPhi::congruentTo(const MDefinition *ins) const
 {
     if (!ins->isPhi())
         return false;
 
     // Phis in different blocks may have different control conditions.
     // For example, these phis:
@@ -1219,17 +1216,17 @@ IsConstant(MDefinition *def, double v)
 {
     if (!def->isConstant())
         return false;
 
     return NumbersAreIdentical(def->toConstant()->value().toNumber(), v);
 }
 
 MDefinition *
-MBinaryBitwiseInstruction::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MBinaryBitwiseInstruction::foldsTo(TempAllocator &alloc)
 {
     if (specialization_ != MIRType_Int32)
         return this;
 
     if (MDefinition *folded = EvaluateConstantOperands(alloc, this))
         return folded;
 
     return this;
@@ -1254,17 +1251,17 @@ MBinaryBitwiseInstruction::foldUnnecessa
         return foldIfZero(1);
 
     if (IsConstant(lhs, -1))
         return foldIfNegOne(0);
 
     if (IsConstant(rhs, -1))
         return foldIfNegOne(1);
 
-    if (EqualValues(false, lhs, rhs))
+    if (lhs == rhs)
         return foldIfEqual();
 
     return this;
 }
 
 void
 MBinaryBitwiseInstruction::infer(BaselineInspector *, jsbytecode *)
 {
@@ -1411,17 +1408,17 @@ NeedNegativeZeroCheck(MDefinition *def)
           default:
             return true;
         }
     }
     return false;
 }
 
 MDefinition *
-MBinaryArithInstruction::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MBinaryArithInstruction::foldsTo(TempAllocator &alloc)
 {
     if (specialization_ == MIRType_None)
         return this;
 
     MDefinition *lhs = getOperand(0);
     MDefinition *rhs = getOperand(1);
     if (MDefinition *folded = EvaluateConstantOperands(alloc, this))
         return folded;
@@ -1478,17 +1475,17 @@ MAbs::trySpecializeFloat32(TempAllocator
         return;
     }
 
     setResultType(MIRType_Float32);
     specialization_ = MIRType_Float32;
 }
 
 MDefinition *
-MDiv::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MDiv::foldsTo(TempAllocator &alloc)
 {
     if (specialization_ == MIRType_None)
         return this;
 
     if (MDefinition *folded = EvaluateConstantOperands(alloc, this))
         return folded;
 
     return this;
@@ -1535,17 +1532,17 @@ MDiv::analyzeEdgeCasesBackward()
 
 bool
 MDiv::fallible() const
 {
     return !isTruncated();
 }
 
 MDefinition *
-MMod::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MMod::foldsTo(TempAllocator &alloc)
 {
     if (specialization_ == MIRType_None)
         return this;
 
     if (MDefinition *folded = EvaluateConstantOperands(alloc, this))
         return folded;
 
     return this;
@@ -1607,26 +1604,26 @@ MSub::fallible() const
     if (truncateKind() >= IndirectTruncate)
         return false;
     if (range() && range()->hasInt32Bounds())
         return false;
     return true;
 }
 
 MDefinition *
-MMul::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MMul::foldsTo(TempAllocator &alloc)
 {
-    MDefinition *out = MBinaryArithInstruction::foldsTo(alloc, useValueNumbers);
+    MDefinition *out = MBinaryArithInstruction::foldsTo(alloc);
     if (out != this)
         return out;
 
     if (specialization() != MIRType_Int32)
         return this;
 
-    if (EqualValues(useValueNumbers, lhs(), rhs()))
+    if (lhs() == rhs())
         setCanBeNegativeZero(false);
 
     return this;
 }
 
 void
 MMul::analyzeEdgeCasesForward()
 {
@@ -2081,17 +2078,17 @@ MBitNot::NewAsmJS(TempAllocator &alloc, 
 {
     MBitNot *ins = new(alloc) MBitNot(input);
     ins->specialization_ = MIRType_Int32;
     JS_ASSERT(ins->type() == MIRType_Int32);
     return ins;
 }
 
 MDefinition *
-MBitNot::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MBitNot::foldsTo(TempAllocator &alloc)
 {
     if (specialization_ != MIRType_Int32)
         return this;
 
     MDefinition *input = getOperand(0);
 
     if (input->isConstant()) {
         js::Value v = Int32Value(~(input->toConstant()->value().toInt32()));
@@ -2102,17 +2099,17 @@ MBitNot::foldsTo(TempAllocator &alloc, b
         JS_ASSERT(input->toBitNot()->getOperand(0)->type() == MIRType_Int32);
         return input->toBitNot()->getOperand(0); // ~~x => x
     }
 
     return this;
 }
 
 MDefinition *
-MTypeOf::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MTypeOf::foldsTo(TempAllocator &alloc)
 {
     // Note: we can't use input->type() here, type analysis has
     // boxed the input.
     JS_ASSERT(input()->type() == MIRType_Value);
 
     JSType type;
 
     switch (inputType()) {
@@ -2330,49 +2327,49 @@ MResumePoint::isObservableOperand(MUse *
 
 bool
 MResumePoint::isObservableOperand(size_t index) const
 {
     return block()->info().isObservableSlot(index);
 }
 
 MDefinition *
-MToInt32::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MToInt32::foldsTo(TempAllocator &alloc)
 {
     MDefinition *input = getOperand(0);
     if (input->type() == MIRType_Int32)
         return input;
     return this;
 }
 
 void
 MToInt32::analyzeEdgeCasesBackward()
 {
     if (!NeedNegativeZeroCheck(this))
         setCanBeNegativeZero(false);
 }
 
 MDefinition *
-MTruncateToInt32::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MTruncateToInt32::foldsTo(TempAllocator &alloc)
 {
     MDefinition *input = getOperand(0);
     if (input->type() == MIRType_Int32)
         return input;
 
     if (input->type() == MIRType_Double && input->isConstant()) {
         const Value &v = input->toConstant()->value();
         int32_t ret = ToInt32(v.toDouble());
         return MConstant::New(alloc, Int32Value(ret));
     }
 
     return this;
 }
 
 MDefinition *
-MToDouble::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MToDouble::foldsTo(TempAllocator &alloc)
 {
     MDefinition *in = input();
     if (in->type() == MIRType_Double)
         return in;
 
     if (in->isConstant()) {
         const Value &v = in->toConstant()->value();
         if (v.isNumber()) {
@@ -2380,17 +2377,17 @@ MToDouble::foldsTo(TempAllocator &alloc,
             return MConstant::New(alloc, DoubleValue(out));
         }
     }
 
     return this;
 }
 
 MDefinition *
-MToFloat32::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MToFloat32::foldsTo(TempAllocator &alloc)
 {
     if (input()->type() == MIRType_Float32)
         return input();
 
     // If x is a Float32, Float32(Double(x)) == x
     if (input()->isToDouble() && input()->toToDouble()->input()->type() == MIRType_Float32)
         return input()->toToDouble()->input();
 
@@ -2402,26 +2399,26 @@ MToFloat32::foldsTo(TempAllocator &alloc
             c->setResultType(MIRType_Float32);
             return c;
         }
     }
     return this;
 }
 
 MDefinition *
-MToString::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MToString::foldsTo(TempAllocator &alloc)
 {
     MDefinition *in = input();
     if (in->type() == MIRType_String)
         return in;
     return this;
 }
 
 MDefinition *
-MClampToUint8::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MClampToUint8::foldsTo(TempAllocator &alloc)
 {
     if (input()->isConstant()) {
         const Value &v = input()->toConstant()->value();
         if (v.isDouble()) {
             int32_t clamped = ClampDoubleToUint8(v.toDouble());
             return MConstant::New(alloc, Int32Value(clamped));
         }
         if (v.isInt32()) {
@@ -2632,17 +2629,17 @@ MCompare::evaluateConstantOperands(bool 
       default:
         return false;
     }
 
     return true;
 }
 
 MDefinition *
-MCompare::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MCompare::foldsTo(TempAllocator &alloc)
 {
     bool result;
 
     if (tryFold(&result) || evaluateConstantOperands(&result)) {
         if (type() == MIRType_Int32)
             return MConstant::New(alloc, Int32Value(result));
 
         JS_ASSERT(type() == MIRType_Boolean);
@@ -2704,17 +2701,17 @@ MNot::cacheOperandMightEmulateUndefined(
 {
     JS_ASSERT(operandMightEmulateUndefined());
 
     if (!MaybeEmulatesUndefined(getOperand(0)))
         markOperandCantEmulateUndefined();
 }
 
 MDefinition *
-MNot::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MNot::foldsTo(TempAllocator &alloc)
 {
     // Fold if the input is constant
     if (operand()->isConstant()) {
         bool result = operand()->toConstant()->valueToBoolean();
         if (type() == MIRType_Int32)
             return MConstant::New(alloc, Int32Value(!result));
 
         // ToBoolean can't cause side effects, so this is safe.
@@ -3002,29 +2999,29 @@ MGetPropertyCache::setBlock(MBasicBlock 
 bool
 MGetPropertyCache::updateForReplacement(MDefinition *ins) {
     MGetPropertyCache *other = ins->toGetPropertyCache();
     location_.append(&other->location_);
     return true;
 }
 
 MDefinition *
-MAsmJSUnsignedToDouble::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MAsmJSUnsignedToDouble::foldsTo(TempAllocator &alloc)
 {
     if (input()->isConstant()) {
         const Value &v = input()->toConstant()->value();
         if (v.isInt32())
             return MConstant::New(alloc, DoubleValue(uint32_t(v.toInt32())));
     }
 
     return this;
 }
 
 MDefinition *
-MAsmJSUnsignedToFloat32::foldsTo(TempAllocator &alloc, bool useValueNumbers)
+MAsmJSUnsignedToFloat32::foldsTo(TempAllocator &alloc)
 {
     if (input()->isConstant()) {
         const Value &v = input()->toConstant()->value();
         if (v.isInt32()) {
             double dval = double(uint32_t(v.toInt32()));
             if (IsFloat32Representable(dval))
                 return MConstant::NewAsmJS(alloc, JS::Float32Value(float(dval)), MIRType_Float32);
         }
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -27,17 +27,16 @@
 
 namespace js {
 
 class StringObject;
 
 namespace jit {
 
 class BaselineInspector;
-class ValueNumberData;
 class Range;
 
 static inline
 MIRType MIRTypeFromValue(const js::Value &vp)
 {
     if (vp.isDouble())
         return MIRType_Double;
     if (vp.isMagic()) {
@@ -318,17 +317,16 @@ class MDefinition : public MNode
 #   undef DEFINE_OPCODES
         Op_Invalid
     };
 
   private:
     InlineList<MUse> uses_;        // Use chain.
     uint32_t id_;                  // Instruction ID, which after block re-ordering
                                    // is sorted within a basic block.
-    ValueNumberData *valueNumber_; // The instruction's value number (see GVN for details in use)
     Range *range_;                 // Any computed range for this def.
     MIRType resultType_;           // Representation of result type.
     types::TemporaryTypeSet *resultTypeSet_; // Optional refinement of the result type.
     uint32_t flags_;                 // Bit flags.
     union {
         MDefinition *dependency_;  // Implicit dependency (store, call, etc.) of this instruction.
                                    // Used by alias analysis, GVN and LICM.
         uint32_t virtualRegister_;   // Used by lowering to map definitions to virtual registers.
@@ -360,17 +358,16 @@ class MDefinition : public MNode
   protected:
     virtual void setBlock(MBasicBlock *block) {
         block_ = block;
     }
 
   public:
     MDefinition()
       : id_(0),
-        valueNumber_(nullptr),
         range_(nullptr),
         resultType_(MIRType_None),
         resultTypeSet_(nullptr),
         flags_(0),
         dependency_(nullptr),
         trackedSite_()
     { }
 
@@ -443,17 +440,17 @@ class MDefinition : public MNode
         range_ = range;
     }
 
     virtual HashNumber valueHash() const;
     virtual bool congruentTo(const MDefinition *ins) const {
         return false;
     }
     bool congruentIfOperandsEqual(const MDefinition *ins) const;
-    virtual MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    virtual MDefinition *foldsTo(TempAllocator &alloc);
     virtual void analyzeEdgeCasesForward();
     virtual void analyzeEdgeCasesBackward();
 
     // When a floating-point value is used by nodes which would prefer to
     // recieve integer inputs, we may be able to help by computing our result
     // into an integer directly.
     //
     // A value can be truncated in 4 differents ways:
@@ -506,42 +503,33 @@ class MDefinition : public MNode
     uint32_t id() const {
         JS_ASSERT(block_);
         return id_;
     }
     void setId(uint32_t id) {
         id_ = id;
     }
 
-    uint32_t valueNumber() const;
-    void setValueNumber(uint32_t vn);
-    ValueNumberData *valueNumberData() {
-        return valueNumber_;
-    }
-    void clearValueNumberData() {
-        valueNumber_ = nullptr;
-    }
-    void setValueNumberData(ValueNumberData *vn) {
-        JS_ASSERT(valueNumber_ == nullptr);
-        valueNumber_ = vn;
-    }
 #define FLAG_ACCESSOR(flag) \
     bool is##flag() const {\
         return hasFlags(1 << flag);\
     }\
     void set##flag() {\
         JS_ASSERT(!hasFlags(1 << flag));\
         setFlags(1 << flag);\
     }\
     void setNot##flag() {\
         JS_ASSERT(hasFlags(1 << flag));\
         removeFlags(1 << flag);\
     }\
     void set##flag##Unchecked() {\
         setFlags(1 << flag);\
+    } \
+    void setNot##flag##Unchecked() {\
+        removeFlags(1 << flag);\
     }
 
     MIR_FLAG_LIST(FLAG_ACCESSOR)
 #undef FLAG_ACCESSOR
 
     // Return the type of this value. This may be speculative, and enforced
     // dynamically with the use of bailout checks. If all the bailout checks
     // pass, the value will have this type.
@@ -864,17 +852,17 @@ class MBinaryInstruction : public MAryIn
     }
 
   protected:
     HashNumber valueHash() const
     {
         MDefinition *lhs = getOperand(0);
         MDefinition *rhs = getOperand(1);
 
-        return op() + lhs->valueNumber() + rhs->valueNumber();
+        return op() + lhs->id() + rhs->id();
     }
     void swapOperands() {
         MDefinition *temp = getOperand(0);
         replaceOperand(0, getOperand(1));
         replaceOperand(1, temp);
     }
 
     bool binaryCongruentTo(const MDefinition *ins) const
@@ -887,33 +875,33 @@ class MBinaryInstruction : public MAryIn
 
         if (isEffectful() || ins->isEffectful())
             return false;
 
         const MDefinition *left = getOperand(0);
         const MDefinition *right = getOperand(1);
         const MDefinition *tmp;
 
-        if (isCommutative() && left->valueNumber() > right->valueNumber()) {
+        if (isCommutative() && left->id() > right->id()) {
             tmp = right;
             right = left;
             left = tmp;
         }
 
         const MBinaryInstruction *bi = static_cast<const MBinaryInstruction *>(ins);
         const MDefinition *insLeft = bi->getOperand(0);
         const MDefinition *insRight = bi->getOperand(1);
-        if (isCommutative() && insLeft->valueNumber() > insRight->valueNumber()) {
+        if (isCommutative() && insLeft->id() > insRight->id()) {
             tmp = insRight;
             insRight = insLeft;
             insLeft = tmp;
         }
 
-        return (left->valueNumber() == insLeft->valueNumber()) &&
-               (right->valueNumber() == insRight->valueNumber());
+        return left == insLeft &&
+               right == insRight;
     }
 
     // Return true if the operands to this instruction are both unsigned,
     // in which case any wrapping operands were replaced with the underlying
     // int32 operands.
     bool tryUseUnsignedOperands();
 };
 
@@ -929,17 +917,17 @@ class MTernaryInstruction : public MAryI
 
   protected:
     HashNumber valueHash() const
     {
         MDefinition *first = getOperand(0);
         MDefinition *second = getOperand(1);
         MDefinition *third = getOperand(2);
 
-        return op() + first->valueNumber() + second->valueNumber() + third->valueNumber();
+        return op() + first->id() + second->id() + third->id();
     }
 };
 
 class MQuaternaryInstruction : public MAryInstruction<4>
 {
   protected:
     MQuaternaryInstruction(MDefinition *first, MDefinition *second,
                            MDefinition *third, MDefinition *fourth)
@@ -953,18 +941,18 @@ class MQuaternaryInstruction : public MA
   protected:
     HashNumber valueHash() const
     {
         MDefinition *first = getOperand(0);
         MDefinition *second = getOperand(1);
         MDefinition *third = getOperand(2);
         MDefinition *fourth = getOperand(3);
 
-        return op() + first->valueNumber() + second->valueNumber() +
-                      third->valueNumber() + fourth->valueNumber();
+        return op() + first->id() + second->id() +
+                      third->id() + fourth->id();
     }
 };
 
 // Generates an LSnapshot without further effect.
 class MStart : public MNullaryInstruction
 {
   public:
     enum StartType {
@@ -1474,17 +1462,17 @@ class MTest
     }
 
     // We cache whether our operand might emulate undefined, but we don't want
     // to do that from New() or the constructor, since those can be called on
     // background threads.  So make callers explicitly call it if they want us
     // to check whether the operand might do this.  If this method is never
     // called, we'll assume our operand can emulate undefined.
     void cacheOperandMightEmulateUndefined();
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
     void filtersUndefinedOrNull(bool trueBranch, MDefinition **subject, bool *filtersUndefined,
                                 bool *filtersNull);
 
     void markOperandCantEmulateUndefined() {
         operandMightEmulateUndefined_ = false;
     }
     bool operandMightEmulateUndefined() const {
         return operandMightEmulateUndefined_;
@@ -2552,17 +2540,17 @@ class MCompare
   public:
     INSTRUCTION_HEADER(Compare)
     static MCompare *New(TempAllocator &alloc, MDefinition *left, MDefinition *right, JSOp op);
     static MCompare *NewAsmJS(TempAllocator &alloc, MDefinition *left, MDefinition *right, JSOp op,
                               CompareType compareType);
 
     bool tryFold(bool *result);
     bool evaluateConstantOperands(bool *result);
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
     void filtersUndefinedOrNull(bool trueBranch, MDefinition **subject, bool *filtersUndefined,
                                 bool *filtersNull);
 
     void infer(BaselineInspector *inspector, jsbytecode *pc);
     CompareType compareType() const {
         return compareType_;
     }
     bool isInt32Comparison() const {
@@ -3198,17 +3186,17 @@ class MToDouble
     ConversionKind conversion() const {
         return conversion_;
     }
 
     TypePolicy *typePolicy() {
         return this;
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
     bool congruentTo(const MDefinition *ins) const {
         if (!ins->isToDouble() || ins->toToDouble()->conversion() != conversion())
             return false;
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
@@ -3271,17 +3259,17 @@ class MToFloat32
     ConversionKind conversion() const {
         return conversion_;
     }
 
     TypePolicy *typePolicy() {
         return this;
     }
 
-    virtual MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    virtual MDefinition *foldsTo(TempAllocator &alloc);
     bool congruentTo(const MDefinition *ins) const {
         if (!ins->isToFloat32() || ins->toToFloat32()->conversion() != conversion())
             return false;
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
@@ -3304,17 +3292,17 @@ class MAsmJSUnsignedToDouble
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSUnsignedToDouble);
     static MAsmJSUnsignedToDouble *NewAsmJS(TempAllocator &alloc, MDefinition *def) {
         return new(alloc) MAsmJSUnsignedToDouble(def);
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
     bool congruentTo(const MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 };
 
@@ -3330,17 +3318,17 @@ class MAsmJSUnsignedToFloat32
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSUnsignedToFloat32);
     static MAsmJSUnsignedToFloat32 *NewAsmJS(TempAllocator &alloc, MDefinition *def) {
         return new(alloc) MAsmJSUnsignedToFloat32(def);
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
     bool congruentTo(const MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 
     bool canProduceFloat32() const { return true; }
@@ -3373,17 +3361,17 @@ class MToInt32
     INSTRUCTION_HEADER(ToInt32)
     static MToInt32 *New(TempAllocator &alloc, MDefinition *def,
                          MacroAssembler::IntConversionInputKind conversion =
                              MacroAssembler::IntConversion_Any)
     {
         return new(alloc) MToInt32(def, conversion);
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
 
     // this only has backwards information flow.
     void analyzeEdgeCasesBackward();
 
     bool canBeNegativeZero() const {
         return canBeNegativeZero_;
     }
     void setCanBeNegativeZero(bool negativeZero) {
@@ -3432,17 +3420,17 @@ class MTruncateToInt32 : public MUnaryIn
     INSTRUCTION_HEADER(TruncateToInt32)
     static MTruncateToInt32 *New(TempAllocator &alloc, MDefinition *def) {
         return new(alloc) MTruncateToInt32(def);
     }
     static MTruncateToInt32 *NewAsmJS(TempAllocator &alloc, MDefinition *def) {
         return new(alloc) MTruncateToInt32(def);
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
 
     bool congruentTo(const MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 
@@ -3469,17 +3457,17 @@ class MToString :
 
   public:
     INSTRUCTION_HEADER(ToString)
     static MToString *New(TempAllocator &alloc, MDefinition *def)
     {
         return new(alloc) MToString(def);
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
 
     TypePolicy *typePolicy() {
         return this;
     }
 
     bool congruentTo(const MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
@@ -3509,17 +3497,17 @@ class MBitNot
     INSTRUCTION_HEADER(BitNot)
     static MBitNot *New(TempAllocator &alloc, MDefinition *input);
     static MBitNot *NewAsmJS(TempAllocator &alloc, MDefinition *input);
 
     TypePolicy *typePolicy() {
         return this;
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
     void infer();
 
     bool congruentTo(const MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         if (specialization_ == MIRType_None)
             return AliasSet::Store(AliasSet::Any);
@@ -3557,17 +3545,17 @@ class MTypeOf
 
     TypePolicy *typePolicy() {
         return this;
     }
     MIRType inputType() const {
         return inputType_;
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
     void cacheInputMaybeCallableOrEmulatesUndefined();
 
     bool inputMaybeCallableOrEmulatesUndefined() const {
         return inputMaybeCallableOrEmulatesUndefined_;
     }
     void markInputNotCallableOrEmulatesUndefined() {
         inputMaybeCallableOrEmulatesUndefined_ = false;
     }
@@ -3626,17 +3614,17 @@ class MBinaryBitwiseInstruction
 
     void specializeAsInt32();
 
   public:
     TypePolicy *typePolicy() {
         return this;
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
     MDefinition *foldUnnecessaryBitop();
     virtual MDefinition *foldIfZero(size_t operand) = 0;
     virtual MDefinition *foldIfNegOne(size_t operand) = 0;
     virtual MDefinition *foldIfEqual()  = 0;
     virtual void infer(BaselineInspector *inspector, jsbytecode *pc);
 
     bool congruentTo(const MDefinition *ins) const {
         return binaryCongruentTo(ins);
@@ -3864,17 +3852,17 @@ class MBinaryArithInstruction
 
     TypePolicy *typePolicy() {
         return this;
     }
     MIRType specialization() const {
         return specialization_;
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
 
     virtual double getIdentity() = 0;
 
     void infer(TempAllocator &alloc, BaselineInspector *inspector, jsbytecode *pc);
 
     void setInt32() {
         specialization_ = MIRType_Int32;
         setResultType(MIRType_Int32);
@@ -4462,17 +4450,17 @@ class MMul : public MBinaryArithInstruct
         return new(alloc) MMul(left, right, MIRType_Value, MMul::Normal);
     }
     static MMul *New(TempAllocator &alloc, MDefinition *left, MDefinition *right, MIRType type,
                      Mode mode = Normal)
     {
         return new(alloc) MMul(left, right, type, mode);
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
     void analyzeEdgeCasesForward();
     void analyzeEdgeCasesBackward();
     void collectRangeInfoPreTrunc();
 
     double getIdentity() {
         return 1;
     }
 
@@ -4557,17 +4545,17 @@ class MDiv : public MBinaryArithInstruct
     {
         MDiv *div = new(alloc) MDiv(left, right, type);
         div->unsigned_ = unsignd;
         if (type == MIRType_Int32)
             div->setTruncateKind(Truncate);
         return div;
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
     void analyzeEdgeCasesForward();
     void analyzeEdgeCasesBackward();
 
     double getIdentity() {
         MOZ_ASSUME_UNREACHABLE("not used");
     }
 
     bool canBeNegativeZero() const {
@@ -4653,17 +4641,17 @@ class MMod : public MBinaryArithInstruct
     {
         MMod *mod = new(alloc) MMod(left, right, type);
         mod->unsigned_ = unsignd;
         if (type == MIRType_Int32)
             mod->setTruncateKind(Truncate);
         return mod;
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
 
     double getIdentity() {
         MOZ_ASSUME_UNREACHABLE("not used");
     }
 
     bool canBeNegativeDividend() const {
         JS_ASSERT(specialization_ == MIRType_Int32);
         return canBeNegativeDividend_;
@@ -4954,17 +4942,17 @@ class MLoadArrowThis
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
         // An arrow function's lexical |this| value is immutable.
         return AliasSet::None();
     }
 };
 
-class MPhi MOZ_FINAL : public MDefinition, public InlineForwardListNode<MPhi>
+class MPhi MOZ_FINAL : public MDefinition, public InlineListNode<MPhi>
 {
     js::Vector<MUse, 2, IonAllocPolicy> inputs_;
 
     bool hasBackedgeType_;
     bool triedToSpecialize_;
     bool isIterator_;
     bool canProduceFloat32_;
     bool canConsumeFloat32_;
@@ -5051,43 +5039,33 @@ class MPhi MOZ_FINAL : public MDefinitio
 
     // Use only if capacity has been reserved by reserveLength
     void addInput(MDefinition *ins);
 
     // Appends a new input to the input vector. May call realloc_().
     // Prefer reserveLength() and addInput() instead, where possible.
     bool addInputSlow(MDefinition *ins, bool *ptypeChange = nullptr);
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
 
     bool congruentTo(const MDefinition *ins) const;
 
     bool isIterator() const {
         return isIterator_;
     }
     void setIterator() {
         isIterator_ = true;
     }
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
     void computeRange(TempAllocator &alloc);
 
-    MDefinition *operandIfRedundant() {
-        // If this phi is redundant (e.g., phi(a,a) or b=phi(a,this)),
-        // returns the operand that it will always be equal to (a, in
-        // those two cases).
-        MDefinition *first = getOperand(0);
-        for (size_t i = 1, e = numOperands(); i < e; i++) {
-            if (getOperand(i) != first && getOperand(i) != this)
-                return nullptr;
-        }
-        return first;
-    }
+    MDefinition *operandIfRedundant();
 
     bool canProduceFloat32() const {
         return canProduceFloat32_;
     }
 
     void setCanProduceFloat32(bool can) {
         canProduceFloat32_ = can;
     }
@@ -6205,17 +6183,17 @@ class MNot
         MNot *ins = new(alloc) MNot(elements);
         ins->setResultType(MIRType_Int32);
         return ins;
     }
 
     INSTRUCTION_HEADER(Not);
 
     void cacheOperandMightEmulateUndefined();
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
 
     void markOperandCantEmulateUndefined() {
         operandMightEmulateUndefined_ = false;
     }
     bool operandMightEmulateUndefined() const {
         return operandMightEmulateUndefined_;
     }
     bool operandIsNeverNaN() const {
@@ -7148,17 +7126,17 @@ class MClampToUint8
 
   public:
     INSTRUCTION_HEADER(ClampToUint8)
 
     static MClampToUint8 *New(TempAllocator &alloc, MDefinition *input) {
         return new(alloc) MClampToUint8(input);
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
 
     TypePolicy *typePolicy() {
         return this;
     }
     bool congruentTo(const MDefinition *ins) const {
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const {
@@ -8895,17 +8873,17 @@ class MStringLength
     }
   public:
     INSTRUCTION_HEADER(StringLength)
 
     static MStringLength *New(TempAllocator &alloc, MDefinition *string) {
         return new(alloc) MStringLength(string);
     }
 
-    MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    MDefinition *foldsTo(TempAllocator &alloc);
 
     TypePolicy *typePolicy() {
         return this;
     }
 
     MDefinition *string() const {
         return getOperand(0);
     }
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -114,16 +114,25 @@ MIRGraph::removeBlock(MBasicBlock *block
     block->discardAllPhiOperands();
 
     block->markAsDead();
     blocks_.remove(block);
     numBlocks_--;
 }
 
 void
+MIRGraph::removeBlockIncludingPhis(MBasicBlock *block)
+{
+    // removeBlock doesn't clear phis because of IonBuilder constraints. Here,
+    // we want to totally clear everything.
+    removeBlock(block);
+    block->discardAllPhis();
+}
+
+void
 MIRGraph::unmarkBlocks()
 {
     for (MBasicBlockIterator i(blocks_.begin()); i != blocks_.end(); i++)
         i->unmark();
 }
 
 MDefinition *
 MIRGraph::forkJoinContext()
@@ -926,16 +935,30 @@ MBasicBlock::addPredecessorWithoutPhis(M
 
 bool
 MBasicBlock::addImmediatelyDominatedBlock(MBasicBlock *child)
 {
     return immediatelyDominated_.append(child);
 }
 
 void
+MBasicBlock::removeImmediatelyDominatedBlock(MBasicBlock *child)
+{
+    for (size_t i = 0; ; ++i) {
+        MOZ_ASSERT(i < immediatelyDominated_.length(),
+                   "Dominated block to remove not present");
+        if (immediatelyDominated_[i] == child) {
+            immediatelyDominated_[i] = immediatelyDominated_.back();
+            immediatelyDominated_.popBack();
+            return;
+        }
+    }
+}
+
+void
 MBasicBlock::assertUsesAreNotWithin(MUseIterator use, MUseIterator end)
 {
 #ifdef DEBUG
     for (; use != end; use++) {
         JS_ASSERT_IF(use->consumer()->isDefinition(),
                      use->consumer()->toDefinition()->block()->id() < id());
     }
 #endif
--- a/js/src/jit/MIRGraph.h
+++ b/js/src/jit/MIRGraph.h
@@ -21,17 +21,17 @@ class BytecodeAnalysis;
 class MBasicBlock;
 class MIRGraph;
 class MStart;
 
 class MDefinitionIterator;
 
 typedef InlineListIterator<MInstruction> MInstructionIterator;
 typedef InlineListReverseIterator<MInstruction> MInstructionReverseIterator;
-typedef InlineForwardListIterator<MPhi> MPhiIterator;
+typedef InlineListIterator<MPhi> MPhiIterator;
 typedef InlineForwardListIterator<MResumePoint> MResumePointIterator;
 
 class LBlock;
 
 class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
 {
   public:
     enum Kind {
@@ -184,19 +184,18 @@ class MBasicBlock : public TempObject, p
     // Replaces an edge for a given block with a new block. This is
     // used for critical edge splitting and also for inserting
     // bailouts during ParallelSafetyAnalysis.
     //
     // Note: If successorWithPhis is set, you must not be replacing it.
     void replacePredecessor(MBasicBlock *old, MBasicBlock *split);
     void replaceSuccessor(size_t pos, MBasicBlock *split);
 
-    // Removes `pred` from the predecessor list.  `pred` should not be
-    // the final predecessor. If this block defines phis, removes the
-    // entry for `pred` and updates the indices of later entries.
+    // Removes `pred` from the predecessor list. If this block defines phis,
+    // removes the entry for `pred` and updates the indices of later entries.
     // This may introduce redundant phis if the new block has fewer
     // than two predecessors.
     void removePredecessor(MBasicBlock *pred);
 
     // Resets all the dominator info so that it can be recomputed.
     void clearDominatorInfo();
 
     // Sets a back edge. This places phi nodes and rewrites instructions within
@@ -287,16 +286,19 @@ class MBasicBlock : public TempObject, p
     }
 #endif
     MControlInstruction *lastIns() const {
         return instructions_.rbegin()->toControlInstruction();
     }
     MPhiIterator phisBegin() const {
         return phis_.begin();
     }
+    MPhiIterator phisBegin(MPhi *at) const {
+        return phis_.begin(at);
+    }
     MPhiIterator phisEnd() const {
         return phis_.end();
     }
     bool phisEmpty() const {
         return phis_.empty();
     }
     MResumePointIterator resumePointsBegin() const {
         return resumePoints_.begin();
@@ -415,18 +417,22 @@ class MBasicBlock : public TempObject, p
         JS_ASSERT(numDominated_ != 0);
         return numDominated_;
     }
 
     void addNumDominated(size_t n) {
         numDominated_ += n;
     }
 
+    // Add |child| to this block's immediately-dominated set.
     bool addImmediatelyDominatedBlock(MBasicBlock *child);
 
+    // Remove |child| from this block's immediately-dominated set.
+    void removeImmediatelyDominatedBlock(MBasicBlock *child);
+
     // This function retrieves the internal instruction associated with a
     // slot, and should not be used for normal stack operations. It is an
     // internal helper that is also used to enhance spew.
     MDefinition *getSlot(uint32_t index);
 
     MResumePoint *entryResumePoint() const {
         return entryResumePoint_;
     }
@@ -501,17 +507,17 @@ class MBasicBlock : public TempObject, p
         return trackedSite_.tree();
     }
 
   private:
     MIRGraph &graph_;
     CompileInfo &info_; // Each block originates from a particular script.
     InlineList<MInstruction> instructions_;
     Vector<MBasicBlock *, 1, IonAllocPolicy> predecessors_;
-    InlineForwardList<MPhi> phis_;
+    InlineList<MPhi> phis_;
     InlineForwardList<MResumePoint> resumePoints_;
     FixedList<MDefinition *> slots_;
     uint32_t stackPosition_;
     jsbytecode *pc_;
     uint32_t id_;
     uint32_t domIndex_; // Index in the dominator tree.
     LBlock *lir_;
     MResumePoint *entryResumePoint_;
@@ -560,17 +566,17 @@ class MIRGraph
     size_t numBlocks_;
     bool hasTryBlock_;
 
   public:
     explicit MIRGraph(TempAllocator *alloc)
       : alloc_(alloc),
         returnAccumulator_(nullptr),
         blockIdGen_(0),
-        idGen_(1),
+        idGen_(0),
         osrBlock_(nullptr),
         osrStart_(nullptr),
         numBlocks_(0),
         hasTryBlock_(false)
     { }
 
     TempAllocator &alloc() const {
         return *alloc_;
@@ -622,16 +628,17 @@ class MIRGraph
     ReversePostorderIterator rpoBegin(MBasicBlock *at) {
         return blocks_.begin(at);
     }
     ReversePostorderIterator rpoEnd() {
         return blocks_.end();
     }
     void removeBlocksAfter(MBasicBlock *block);
     void removeBlock(MBasicBlock *block);
+    void removeBlockIncludingPhis(MBasicBlock *block);
     void moveBlockToEnd(MBasicBlock *block) {
         JS_ASSERT(block->id());
         blocks_.remove(block);
         blocks_.pushBack(block);
     }
     void moveBlockBefore(MBasicBlock *at, MBasicBlock *block) {
         JS_ASSERT(block->id());
         blocks_.remove(block);
--- a/js/src/jit/UnreachableCodeElimination.cpp
+++ b/js/src/jit/UnreachableCodeElimination.cpp
@@ -78,18 +78,20 @@ UnreachableCodeElimination::removeUnmark
         AliasAnalysis analysis(mir_, graph_);
         if (!analysis.analyze())
             return false;
     }
 
     // Pass 5: It's important for optimizations to re-run GVN (and in
     // turn alias analysis) after UCE if we eliminated branches.
     if (rerunAliasAnalysis_ && mir_->optimizationInfo().gvnEnabled()) {
-        ValueNumberer gvn(mir_, graph_, mir_->optimizationInfo().gvnKind() == GVN_Optimistic);
-        if (!gvn.clear() || !gvn.analyze())
+        ValueNumberer gvn(mir_, graph_);
+        if (!gvn.run(rerunAliasAnalysis_
+                     ? ValueNumberer::UpdateAliasAnalysis
+                     : ValueNumberer::DontUpdateAliasAnalysis))
             return false;
         IonSpewPass("GVN-after-UCE");
         AssertExtendedGraphCoherency(graph_);
 
         if (mir_->shouldCancel("GVN-after-UCE"))
             return false;
     }
 
--- a/js/src/jit/ValueNumbering.cpp
+++ b/js/src/jit/ValueNumbering.cpp
@@ -1,576 +1,729 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/ValueNumbering.h"
 
+#include "jit/AliasAnalysis.h"
+#include "jit/IonAnalysis.h"
 #include "jit/IonSpewer.h"
 #include "jit/MIRGenerator.h"
-#include "jit/MIRGraph.h"
 
 using namespace js;
 using namespace js::jit;
 
-ValueNumberer::ValueNumberer(MIRGenerator *mir, MIRGraph &graph, bool optimistic)
-  : mir(mir),
-    graph_(graph),
-    values(graph.alloc()),
-    pessimisticPass_(!optimistic),
-    count_(0)
-{ }
-
-TempAllocator &
-ValueNumberer::alloc() const
-{
-    return graph_.alloc();
-}
+/**
+ * Some notes on the main algorithm here:
+ *  - The SSA identifier id() is the value number. We do replaceAllUsesWith as
+ *    we go, so there's always at most one visible value with a given number.
+ *
+ *  - Consequently, the GVN algorithm is effectively pessimistic. This means it
+ *    is not as powerful as an optimistic GVN would be, but it is simpler and
+ *    faster.
+ *
+ *  - We iterate in RPO, so that when visiting a block, we've already optimized
+ *    and hashed all values in dominating blocks. With occasional exceptions,
+ *    this allows us to do everything in a single pass.
+ *
+ *  - When we do use multiple passes, we just re-run the algorithm on the whole
+ *    graph instead of doing sparse propagation. This is a tradeoff to keep the
+ *    algorithm simpler and lighter on inputs that don't have a lot of
+ *    interesting unreachable blocks or degenerate loop induction variables, at
+ *    the expense of being slower on inputs that do. The loop for this
+ *    always terminates, because it only iterates when code is or will be
+ *    removed, so eventually it must stop iterating.
+ *
+ *  - Values are not immediately removed from the hash set when they go out of
+ *    scope. Instead, we check for dominance after a lookup. If the dominance
+ *    check fails, the value is removed.
+ */
 
-uint32_t
-ValueNumberer::lookupValue(MDefinition *ins)
+HashNumber
+ValueNumberer::VisibleValues::ValueHasher::hash(Lookup ins)
 {
-    ValueMap::AddPtr p = values.lookupForAdd(ins);
-    if (p) {
-        // make sure this is in the correct group
-        setClass(ins, p->key());
-        return p->value();
-    }
-
-    if (!values.add(p, ins, ins->id()))
-        return 0;
-    breakClass(ins);
-
-    return ins->id();
+    return ins->valueHash();
 }
 
-MDefinition *
-ValueNumberer::simplify(MDefinition *def, bool useValueNumbers)
+// Test whether two MDefinitions are congruent.
+bool
+ValueNumberer::VisibleValues::ValueHasher::match(Key k, Lookup l)
 {
-    if (def->isEffectful())
-        return def;
-
-    MDefinition *ins = def->foldsTo(alloc(), useValueNumbers);
-    if (ins == def)
-        return def;
-
-    // Ensure this instruction has a value number.
-    if (!ins->valueNumberData())
-        ins->setValueNumberData(new(alloc()) ValueNumberData);
-
-    if (!ins->block()) {
-        // In this case, we made a new def by constant folding, for
-        // example, we replaced add(#3,#4) with a new const(#7) node.
-
-        // We will only fold a phi into one of its operands.
-        JS_ASSERT(!def->isPhi());
-
-        def->block()->insertAfter(def->toInstruction(), ins->toInstruction());
-        ins->setValueNumber(lookupValue(ins));
-    }
-
-    JS_ASSERT(ins->id() != 0);
-
-    def->replaceAllUsesWith(ins);
-
-    IonSpew(IonSpew_GVN, "Folding %d to be %d", def->id(), ins->id());
-    return ins;
-}
-
-MControlInstruction *
-ValueNumberer::simplifyControlInstruction(MControlInstruction *def)
-{
-    if (def->isEffectful())
-        return def;
-
-    MDefinition *repl = def->foldsTo(alloc(), false);
-    if (repl == def)
-        return def;
+    // If one of the instructions depends on a store, and the other instruction
+    // does not depend on the same store, the instructions are not congruent.
+    if (k->dependency() != l->dependency())
+        return false;
 
-    // Ensure this instruction has a value number.
-    if (!repl->valueNumberData())
-        repl->setValueNumberData(new(alloc()) ValueNumberData);
-
-    MBasicBlock *block = def->block();
-
-    // MControlInstructions should not have consumers.
-    JS_ASSERT(repl->isControlInstruction());
-    JS_ASSERT(!def->hasUses());
-
-    if (def->isInWorklist())
-        repl->setInWorklist();
-
-    block->discardLastIns();
-    block->end(repl->toControlInstruction());
-    return repl->toControlInstruction();
-}
-
-void
-ValueNumberer::markDefinition(MDefinition *def)
-{
-    if (isMarked(def))
-        return;
-
-    IonSpew(IonSpew_GVN, "marked %d", def->id());
-    def->setInWorklist();
-    count_++;
-}
-
-void
-ValueNumberer::unmarkDefinition(MDefinition *def)
-{
-    if (pessimisticPass_)
-        return;
-
-    JS_ASSERT(count_ > 0);
-    IonSpew(IonSpew_GVN, "unmarked %d", def->id());
-    def->setNotInWorklist();
-    count_--;
-}
-
-void
-ValueNumberer::markBlock(MBasicBlock *block)
-{
-    for (MDefinitionIterator iter(block); iter; iter++)
-        markDefinition(*iter);
-    markDefinition(block->lastIns());
+    return k->congruentTo(l); // Ask the values themselves what they think.
 }
 
 void
-ValueNumberer::markConsumers(MDefinition *def)
+ValueNumberer::VisibleValues::ValueHasher::rekey(Key &k, Key newKey)
+{
+    k = newKey;
+}
+
+ValueNumberer::VisibleValues::VisibleValues(TempAllocator &alloc)
+  : set_(alloc)
+{}
+
+// Initialize the set in preparation for holding num elements.
+bool
+ValueNumberer::VisibleValues::init()
 {
-    if (pessimisticPass_)
-        return;
+    return set_.init();
+}
+
+// Look up the first entry for the given def.
+ValueNumberer::VisibleValues::Ptr
+ValueNumberer::VisibleValues::findLeader(const MDefinition *def) const
+{
+    return set_.lookup(def);
+}
+
+// Look up the first entry for the given def.
+ValueNumberer::VisibleValues::AddPtr
+ValueNumberer::VisibleValues::findLeaderForAdd(MDefinition *def)
+{
+    return set_.lookupForAdd(def);
+}
 
-    JS_ASSERT(!def->isInWorklist());
-    JS_ASSERT(!def->isControlInstruction());
-    for (MUseDefIterator use(def); use; use++)
-        markDefinition(use.def());
+// Insert a value into the set.
+bool
+ValueNumberer::VisibleValues::insert(AddPtr p, MDefinition *def)
+{
+    return set_.add(p, def);
+}
+
+// Insert a value onto the set overwriting any existing entry.
+void
+ValueNumberer::VisibleValues::overwrite(AddPtr p, MDefinition *def)
+{
+    set_.rekeyInPlace(p, def);
+}
+
+// The given def will be deleted, so remove it from any sets.
+void
+ValueNumberer::VisibleValues::forget(const MDefinition *def)
+{
+    Ptr p = set_.lookup(def);
+    if (p && *p == def)
+        set_.remove(p);
+}
+
+// Clear all state.
+void
+ValueNumberer::VisibleValues::clear()
+{
+    set_.clear();
+}
+
+// Test whether the value would be needed if it had no uses.
+static bool
+DeadIfUnused(const MDefinition *def)
+{
+    return !def->isEffectful() && !def->isGuard() && !def->isControlInstruction() &&
+           (!def->isInstruction() || !def->toInstruction()->resumePoint());
 }
 
-bool
-ValueNumberer::computeValueNumbers()
+// Test whether the given definition is no longer needed.
+static bool
+IsDead(const MDefinition *def)
+{
+    return !def->hasUses() && DeadIfUnused(def);
+}
+
+// Test whether the given definition will no longer be needed after its user
+// is deleted. TODO: This misses cases where the definition is used multiple
+// times by the same user (bug 1031396).
+static bool
+WillBecomeDead(const MDefinition *def)
 {
-    // At the end of this function, we will have the value numbering stored in
-    // each instruction.
-    //
-    // We also need an "optimistic" value number, for temporary use, which is
-    // stored in a hashtable.
-    //
-    // For the instruction x := y op z, we map (op, VN[y], VN[z]) to a value
-    // number, say v. If it is not in the map, we use the instruction id.
-    //
-    // If the instruction in question's value number is not already
-    // v, we break the congruence and set it to v. We repeat until saturation.
-    // This will take at worst O(d) time, where d is the loop connectedness
-    // of the SSA def/use graph.
-    //
-    // The algorithm is the simple RPO-based algorithm from
-    // "SCC-Based Value Numbering" by Cooper and Simpson.
-    //
-    // If we are performing a pessimistic pass, then we assume that every
-    // definition is in its own congruence class, since we know nothing about
-    // values that enter Phi nodes through back edges. We then make one pass
-    // through the graph, ignoring back edges. This yields less congruences on
-    // any graph with back-edges, but is much faster to perform.
+    return def->hasOneUse() && DeadIfUnused(def);
+}
+
+// Call MDefinition::replaceAllUsesWith, and add some GVN-specific asserts.
+static void
+ReplaceAllUsesWith(MDefinition *from, MDefinition *to)
+{
+    MOZ_ASSERT(from != to, "GVN shouldn't try to replace a value with itself");
+    MOZ_ASSERT(from->type() == to->type(), "Def replacement has different type");
+
+    from->replaceAllUsesWith(to);
+}
+
+// Test whether succ is a successor of newControl.
+static bool
+HasSuccessor(const MControlInstruction *newControl, const MBasicBlock *succ)
+{
+    for (size_t i = 0, e = newControl->numSuccessors(); i != e; ++i) {
+        if (newControl->getSuccessor(i) == succ)
+            return true;
+    }
+    return false;
+}
 
-    IonSpew(IonSpew_GVN, "Numbering instructions");
-
-    if (!values.init())
-        return false;
-    // Stick a VN object onto every mdefinition
-    for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
-        if (mir->shouldCancel("Value Numbering (preparation loop"))
-            return false;
-        for (MDefinitionIterator iter(*block); iter; iter++)
-            iter->setValueNumberData(new(alloc()) ValueNumberData);
-        MControlInstruction *jump = block->lastIns();
-        jump->setValueNumberData(new(alloc()) ValueNumberData);
+// Given a block which has had predecessors removed but is still reachable,
+// test whether the block's new dominator will be closer than its old one
+// and whether it will expose potential optimization opportunities.
+static MBasicBlock *
+ComputeNewDominator(MBasicBlock *block, MBasicBlock *old)
+{
+    MBasicBlock *now = block->getPredecessor(0);
+    for (size_t i = 1, e = block->numPredecessors(); i != e; ++i) {
+        MBasicBlock *pred = block->getPredecessor(i);
+        // Note that dominators haven't been recomputed yet, so we have to check
+        // whether now dominates pred, not block.
+        while (!now->dominates(pred)) {
+            MBasicBlock *next = now->immediateDominator();
+            if (next == old)
+                return old;
+            if (next == now) {
+                MOZ_ASSERT(block == old, "Non-self-dominating block became self-dominating");
+                return block;
+            }
+            now = next;
+        }
     }
+    MOZ_ASSERT(old != block || old != now, "Missed self-dominating block staying self-dominating");
+    return now;
+}
 
-    // Assign unique value numbers if pessimistic.
-    // It might be productive to do this in the MDefinition constructor or
-    // possibly in a previous pass, if it seems reasonable.
-    if (pessimisticPass_) {
-        for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
-            for (MDefinitionIterator iter(*block); iter; iter++)
-                iter->setValueNumber(iter->id());
-        }
-    } else {
-        // For each root block, add all of its instructions to the worklist.
-        markBlock(*(graph_.begin()));
-        if (graph_.osrBlock())
-            markBlock(graph_.osrBlock());
+// Given a block which has had predecessors removed but is still reachable,
+// test whether the block's new dominator will be closer than its old one
+// and whether it will expose potential optimization opportunities.
+static bool
+IsDominatorRefined(MBasicBlock *block)
+{
+    MBasicBlock *old = block->immediateDominator();
+    MBasicBlock *now = ComputeNewDominator(block, old);
+
+    // We've computed block's new dominator. Test whether there are any
+    // newly-dominating definitions which look interesting.
+    MOZ_ASSERT(old->dominates(now), "Refined dominator not dominated by old dominator");
+    for (MBasicBlock *i = now; i != old; i = i->immediateDominator()) {
+        if (!i->phisEmpty() || *i->begin() != i->lastIns())
+            return true;
     }
 
-    while (count_ > 0) {
-#ifdef DEBUG
-        if (!pessimisticPass_) {
-            size_t debugCount = 0;
-            IonSpew(IonSpew_GVN, "The following instructions require processing:");
-            for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
-                for (MDefinitionIterator iter(*block); iter; iter++) {
-                    if (iter->isInWorklist()) {
-                        IonSpew(IonSpew_GVN, "\t%d", iter->id());
-                        debugCount++;
-                    }
-                }
-                if (block->lastIns()->isInWorklist()) {
-                    IonSpew(IonSpew_GVN, "\t%d", block->lastIns()->id());
-                    debugCount++;
-                }
-            }
-            if (!debugCount)
-                IonSpew(IonSpew_GVN, "\tNone");
-            JS_ASSERT(debugCount == count_);
-        }
-#endif
-        for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
-            if (mir->shouldCancel("Value Numbering (main loop)"))
-                return false;
-            for (MDefinitionIterator iter(*block); iter; ) {
+    return false;
+}
 
-                if (!isMarked(*iter)) {
-                    iter++;
-                    continue;
-                }
-
-                JS_ASSERT_IF(!pessimisticPass_, count_ > 0);
-                unmarkDefinition(*iter);
-
-                MDefinition *ins = simplify(*iter, false);
-
-                if (ins != *iter) {
-                    iter = block->discardDefAt(iter);
-                    continue;
-                }
+// Delete the given instruction and anything in its use-def subtree which is no
+// longer needed.
+bool
+ValueNumberer::deleteDefsRecursively(MDefinition *def)
+{
+    def->setInWorklist();
+    return deadDefs_.append(def) && processDeadDefs();
+}
 
-                // Don't bother storing this instruction in the HashMap if
-                // (a) eliminateRedundancies will never eliminate it (because
-                // it's non-movable or effectful) and (b) no other instruction's
-                // value number depends on it.
-                if (!ins->hasDefUses() && (!ins->isMovable() || ins->isEffectful())) {
-                    iter++;
-                    continue;
-                }
-
-                uint32_t value = lookupValue(ins);
-
-                if (!value)
-                    return false; // Hashtable insertion failed
-
-                if (ins->valueNumber() != value) {
-                    IonSpew(IonSpew_GVN,
-                            "Broke congruence for instruction %d (%p) with VN %d (now using %d)",
-                            ins->id(), (void *) ins, ins->valueNumber(), value);
-                    ins->setValueNumber(value);
-                    markConsumers(ins);
-                }
-
-                iter++;
-            }
-            // Process control flow instruction:
-            MControlInstruction *jump = block->lastIns();
-            jump = simplifyControlInstruction(jump);
-
-            // If we are pessimistic, then this will never get set.
-            if (!jump->isInWorklist())
-                continue;
-            unmarkDefinition(jump);
-            if (jump->valueNumber() == 0) {
-                jump->setValueNumber(jump->id());
-                for (size_t i = 0; i < jump->numSuccessors(); i++)
-                    markBlock(jump->getSuccessor(i));
-            }
-
-        }
-
-        // If we are doing a pessimistic pass, we only go once through the
-        // instruction list.
-        if (pessimisticPass_)
-            break;
-    }
-#ifdef DEBUG
-    for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
-        for (MDefinitionIterator iter(*block); iter; iter++) {
-            JS_ASSERT(!iter->isInWorklist());
-            JS_ASSERT_IF(iter->valueNumber() == 0,
-                         !iter->hasDefUses() && (!iter->isMovable() || iter->isEffectful()));
+// Assuming phi is dead, push each dead operand of phi not dominated by the phi
+// to the delete worklist.
+bool
+ValueNumberer::pushDeadPhiOperands(MPhi *phi, const MBasicBlock *phiBlock)
+{
+    for (size_t o = 0, e = phi->numOperands(); o != e; ++o) {
+        MDefinition *op = phi->getOperand(o);
+        if (WillBecomeDead(op) && !op->isInWorklist() &&
+            !phiBlock->dominates(phiBlock->getPredecessor(o)))
+        {
+            op->setInWorklist();
+            if (!deadDefs_.append(op))
+                return false;
+        } else {
+           op->setUseRemovedUnchecked();
         }
     }
-#endif
+    return true;
+}
+
+// Assuming ins is dead, push each dead operand of ins to the delete worklist.
+bool
+ValueNumberer::pushDeadInsOperands(MInstruction *ins)
+{
+    for (size_t o = 0, e = ins->numOperands(); o != e; ++o) {
+        MDefinition *op = ins->getOperand(o);
+        if (WillBecomeDead(op) && !op->isInWorklist()) {
+            op->setInWorklist();
+            if (!deadDefs_.append(op))
+                return false;
+        } else {
+           op->setUseRemovedUnchecked();
+        }
+    }
     return true;
 }
 
-MDefinition *
-ValueNumberer::findDominatingDef(InstructionMap &defs, MDefinition *ins, size_t index)
+// Recursively delete all the defs on the deadDefs_ worklist.
+bool
+ValueNumberer::processDeadDefs()
 {
-    JS_ASSERT(ins->valueNumber() != 0);
-    InstructionMap::Ptr p = defs.lookup(ins->valueNumber());
-    MDefinition *dom;
-    if (!p || index >= p->value().validEnd) {
-        DominatingValue value;
-        value.def = ins;
-        // Since we are traversing the dominator tree in pre-order, when we
-        // are looking at the |index|-th block, the next numDominated() blocks
-        // we traverse are precisely the set of blocks that are dominated.
-        //
-        // So, this value is visible in all blocks if:
-        // index < index + ins->block->numDominated()
-        // and becomes invalid after that.
-        value.validEnd = index + ins->block()->numDominated();
+    while (!deadDefs_.empty()) {
+        MDefinition *def = deadDefs_.popCopy();
+        IonSpew(IonSpew_GVN, "    Deleting %s%u", def->opName(), def->id());
+        MOZ_ASSERT(def->isInWorklist(), "Deleting value not on the worklist");
+        MOZ_ASSERT(IsDead(def), "Deleting non-dead definition");
+
+        values_.forget(def);
 
-        if(!defs.put(ins->valueNumber(), value))
-            return nullptr;
-
-        dom = ins;
-    } else {
-        dom = p->value().def;
+        if (def->isPhi()) {
+            MPhi *phi = def->toPhi();
+            MBasicBlock *phiBlock = phi->block();
+            if (!pushDeadPhiOperands(phi, phiBlock))
+                 return false;
+            MPhiIterator at(phiBlock->phisBegin(phi));
+            phiBlock->discardPhiAt(at);
+        } else {
+            MInstruction *ins = def->toInstruction();
+            if (!pushDeadInsOperands(ins))
+                 return false;
+            ins->block()->discard(ins);
+        }
     }
-
-    return dom;
+    return true;
 }
 
+// Delete an edge from the CFG. Return true if the block becomes unreachable.
 bool
-ValueNumberer::eliminateRedundancies()
+ValueNumberer::removePredecessor(MBasicBlock *block, MBasicBlock *pred)
 {
-    // A definition is 'redundant' iff it is dominated by another definition
-    // with the same value number.
-    //
-    // So, we traverse the dominator tree in pre-order, maintaining a hashmap
-    // from value numbers to instructions.
-    //
-    // For each definition d with value number v, we look up v in the hashmap.
-    //
-    // If there is a definition d' in the hashmap, and the current traversal
-    // index is within that instruction's dominated range, then we eliminate d,
-    // replacing all uses of d with uses of d'.
-    //
-    // If there is no valid definition in the hashtable (the current definition
-    // is not in dominated scope), then we insert the current instruction,
-    // since it is the most dominant instruction with the given value number.
-
-    InstructionMap defs(alloc());
-
-    if (!defs.init())
-        return false;
-
-    IonSpew(IonSpew_GVN, "Eliminating redundant instructions");
-
-    // Stack for pre-order CFG traversal.
-    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist(alloc());
-
-    // The index of the current block in the CFG traversal.
-    size_t index = 0;
-
-    // Add all self-dominating blocks to the worklist.
-    // This includes all roots. Order does not matter.
-    for (MBasicBlockIterator i(graph_.begin()); i != graph_.end(); i++) {
-        MBasicBlock *block = *i;
-        if (block->immediateDominator() == block) {
-            if (!worklist.append(block))
-                return false;
+    bool isUnreachableLoop = false;
+    if (block->isLoopHeader()) {
+        if (block->loopPredecessor() == pred) {
+            // Deleting the entry into the loop makes the loop unreachable.
+            isUnreachableLoop = true;
+            IonSpew(IonSpew_GVN, "    Loop with header block%u is no longer reachable", block->id());
+#ifdef DEBUG
+        } else if (block->hasUniqueBackedge() && block->backedge() == pred) {
+            IonSpew(IonSpew_GVN, "    Loop with header block%u is no longer a loop", block->id());
+#endif
         }
     }
 
-    // Starting from each self-dominating block, traverse the CFG in pre-order.
-    while (!worklist.empty()) {
-        if (mir->shouldCancel("Value Numbering (eliminate loop)"))
-            return false;
-        MBasicBlock *block = worklist.popCopy();
+    // TODO: Removing a predecessor removes operands from phis, and these
+    // operands may become dead. We should detect this and delete them.
+    // In practice though, when this happens, we often end up re-running GVN
+    // for other reasons anyway.
+    block->removePredecessor(pred);
+    return block->numPredecessors() == 0 || isUnreachableLoop;
+}
+
+// Delete the given block and any block in its dominator subtree.
+bool
+ValueNumberer::removeBlocksRecursively(MBasicBlock *start, const MBasicBlock *dominatorRoot)
+{
+    MOZ_ASSERT(start != graph_.entryBlock(), "Removing normal entry block");
+    MOZ_ASSERT(start != graph_.osrBlock(), "Removing OSR entry block");
 
-        IonSpew(IonSpew_GVN, "Looking at block %d", block->id());
+    // Remove this block from its dominator parent's subtree. This is the only
+    // immediately-dominated-block information we need to manually update,
+    // because everything dominated by this block is about to be swept away.
+    MBasicBlock *parent = start->immediateDominator();
+    if (parent != start)
+        parent->removeImmediatelyDominatedBlock(start);
 
-        // Add all immediate dominators to the front of the worklist.
-        if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
-                             block->immediatelyDominatedBlocksEnd())) {
-            return false;
+    if (!unreachableBlocks_.append(start))
+        return false;
+    do {
+        MBasicBlock *block = unreachableBlocks_.popCopy();
+        if (block->isDead())
+            continue;
+
+        // If a block is removed while it is on the worklist, skip it.
+        for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
+            MBasicBlock *succ = block->getSuccessor(i);
+            if (!succ->isDead()) {
+                if (removePredecessor(succ, block)) {
+                    if (!unreachableBlocks_.append(succ))
+                        return false;
+                } else if (!rerun_) {
+                    if (!remainingBlocks_.append(succ))
+                        return false;
+                }
+            }
         }
 
-        // For each instruction, attempt to look up a dominating definition.
-        for (MDefinitionIterator iter(block); iter; ) {
-            MDefinition *ins = simplify(*iter, true);
-
-            // Instruction was replaced, and all uses have already been fixed.
-            if (ins != *iter) {
-                iter = block->discardDefAt(iter);
-                continue;
-            }
-
-            // Instruction has side-effects and cannot be folded.
-            if (!ins->isMovable() || ins->isEffectful()) {
-                iter++;
-                continue;
-            }
-
-            MDefinition *dom = findDominatingDef(defs, ins, index);
-            if (!dom)
-                return false; // Insertion failed.
+#ifdef DEBUG
+        IonSpew(IonSpew_GVN, "    Deleting block%u%s%s%s", block->id(),
+                block->isLoopHeader() ? " (loop header)" : "",
+                block->isSplitEdge() ? " (split edge)" : "",
+                block->immediateDominator() == block ? " (dominator root)" : "");
+        for (MDefinitionIterator iter(block); iter; iter++) {
+            MDefinition *def = *iter;
+            IonSpew(IonSpew_GVN, "      Deleting %s%u", def->opName(), def->id());
+        }
+        MControlInstruction *control = block->lastIns();
+        IonSpew(IonSpew_GVN, "      Deleting %s%u", control->opName(), control->id());
+#endif
 
-            if (dom == ins || !dom->updateForReplacement(ins)) {
-                iter++;
-                continue;
-            }
-
-            IonSpew(IonSpew_GVN, "instruction %d is dominated by instruction %d (from block %d)",
-                    ins->id(), dom->id(), dom->block()->id());
-
-            ins->replaceAllUsesWith(dom);
+        // Keep track of how many blocks within dominatorRoot's tree have been deleted.
+        if (dominatorRoot->dominates(block))
+            ++numBlocksDeleted_;
 
-            JS_ASSERT(!ins->hasUses());
-            JS_ASSERT(ins->block() == block);
-            JS_ASSERT(!ins->isEffectful());
-            JS_ASSERT(ins->isMovable());
+        // TODO: Removing a block deletes the phis, instructions, and resume
+        // points in the block, and their operands may become dead. We should
+        // detect this and delete them. In practice though, when this happens,
+        // we often end up re-running GVN for other reasons anyway (bug 1031412).
+        graph_.removeBlockIncludingPhis(block);
+        blocksRemoved_ = true;
+    } while (!unreachableBlocks_.empty());
 
-            iter = ins->block()->discardDefAt(iter);
-        }
-        index++;
-    }
-
-    JS_ASSERT(index == graph_.numBlocks());
     return true;
 }
 
-// Exported method, called by the compiler.
+// Return a simplified form of the given definition, if we can.
+MDefinition *
+ValueNumberer::simplified(MDefinition *def) const
+{
+    return def->foldsTo(graph_.alloc());
+}
+
+// If an equivalent and dominating value already exists in the set, return it.
+// Otherwise insert the given definition into the set and return it.
+MDefinition *
+ValueNumberer::leader(MDefinition *def)
+{
+    // If the value isn't suitable for eliminating, don't bother hashing it. The
+    // convention is that congruentTo returns false for node kinds that wish to
+    // opt out of redundance elimination.
+    // TODO: It'd be nice to clean up that convention (bug 1031406).
+    if (!def->isEffectful() && def->congruentTo(def)) {
+        // Look for a match.
+        VisibleValues::AddPtr p = values_.findLeaderForAdd(def);
+        if (p) {
+            MDefinition *rep = *p;
+            if (rep->block()->dominates(def->block())) {
+                // We found a dominating congruent value.
+                MOZ_ASSERT(!rep->isInWorklist(), "Dead value in set");
+                return rep;
+            }
+
+            // The congruent value doesn't dominate. It never will again in this
+            // dominator tree, so overwrite it.
+            values_.overwrite(p, def);
+        } else {
+            // No match. Add a new entry.
+            if (!values_.insert(p, def))
+                return nullptr;
+        }
+    }
+
+    return def;
+}
+
+// Test whether the given phi is dominated by a congruent phi.
 bool
-ValueNumberer::analyze()
+ValueNumberer::hasLeader(const MPhi *phi, const MBasicBlock *phiBlock) const
 {
-    return computeValueNumbers() && eliminateRedundancies();
+    if (VisibleValues::Ptr p = values_.findLeader(phi)) {
+        const MDefinition *rep = *p;
+        return rep != phi && rep->block()->dominates(phiBlock);
+    }
+    return false;
 }
 
-// Called by the compiler if we need to re-run GVN.
+// Test whether there are any phis in the backedge's loop header which are
+// newly optimizable, as a result of optimizations done inside the loop. This
+// is not a sparse approach, but restarting is rare enough in practice.
+// Termination is ensured by deleting the phi triggering the iteration.
 bool
-ValueNumberer::clear()
+ValueNumberer::loopHasOptimizablePhi(MBasicBlock *backedge) const
 {
-    IonSpew(IonSpew_GVN, "Clearing value numbers");
+    // Rescan the phis for any that can be simplified, since they may be reading
+    // values from backedges.
+    MBasicBlock *header = backedge->loopHeaderOfBackedge();
+    for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd()); iter != end; ++iter) {
+        MPhi *phi = *iter;
+        MOZ_ASSERT(phi->hasUses(), "Missed an unused phi");
+
+        if (phi->operandIfRedundant() || hasLeader(phi, header))
+            return true; // Phi can be simplified.
+    }
+    return false;
+}
+
+// Visit the given definition.
+bool
+ValueNumberer::visitDefinition(MDefinition *def)
+{
+    // Look for a simplified form of this def.
+    MDefinition *sim = simplified(def);
+    if (sim != def) {
+        if (sim == nullptr)
+            return false;
+
+        // If sim doesn't belong to a block, insert it next to def.
+        if (sim->block() == nullptr)
+            def->block()->insertAfter(def->toInstruction(), sim->toInstruction());
 
-    // Clear the VN of every MDefinition
-    for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
-        if (mir->shouldCancel("Value Numbering (clearing)"))
+        IonSpew(IonSpew_GVN, "    Folded %s%u to %s%u",
+                def->opName(), def->id(), sim->opName(), sim->id());
+        ReplaceAllUsesWith(def, sim);
+        if (IsDead(def) && !deleteDefsRecursively(def))
+            return false;
+        def = sim;
+    }
+
+    // Look for a dominating def which makes this def redundant.
+    MDefinition *rep = leader(def);
+    if (rep != def) {
+        if (rep == nullptr)
             return false;
-        for (MDefinitionIterator iter(*block); iter; iter++)
-            iter->clearValueNumberData();
-        block->lastIns()->clearValueNumberData();
+        if (rep->updateForReplacement(def)) {
+            IonSpew(IonSpew_GVN,
+                    "    Replacing %s%u with %s%u",
+                    def->opName(), def->id(), rep->opName(), rep->id());
+            ReplaceAllUsesWith(def, rep);
+
+            // This is effectively what the old GVN did. It allows isGuard()
+            // instructions to be deleted if they are redundant, and the
+            // replacement is not even guaranteed to have isGuard() set.
+            // TODO: Clean this up (bug 1031410).
+            def->setNotGuardUnchecked();
+
+            if (IsDead(def) && !deleteDefsRecursively(def))
+                return false;
+            def = rep;
+        }
+    }
+
+    // If this instruction has a dependency() into an unreachable block, we'll
+    // need to update AliasAnalysis.
+    if (updateAliasAnalysis_ && !dependenciesBroken_) {
+        const MDefinition *dep = def->dependency();
+        if (dep != nullptr && dep->block()->isDead()) {
+            IonSpew(IonSpew_GVN, "    AliasAnalysis invalidated; will recompute!");
+            dependenciesBroken_ = true;
+        }
     }
 
     return true;
 }
 
-uint32_t
-MDefinition::valueNumber() const
+// Visit the control instruction at the end of the given block.
+bool
+ValueNumberer::visitControlInstruction(MBasicBlock *block, const MBasicBlock *dominatorRoot)
 {
-    JS_ASSERT(block_);
-    if (valueNumber_ == nullptr)
-        return 0;
-    return valueNumber_->valueNumber();
-}
-void
-MDefinition::setValueNumber(uint32_t vn)
-{
-    valueNumber_->setValueNumber(vn);
-}
-// Set the class of this to the given representative value.
-void
-ValueNumberer::setClass(MDefinition *def, MDefinition *rep)
-{
-    def->valueNumberData()->setClass(def, rep);
+    // Look for a simplified form of the control instruction.
+    MControlInstruction *control = block->lastIns();
+    MDefinition *rep = simplified(control);
+    if (rep == control)
+        return true;
+
+    if (rep == nullptr)
+        return false;
+
+    MControlInstruction *newControl = rep->toControlInstruction();
+    MOZ_ASSERT(!newControl->block(),
+               "Control instruction replacement shouldn't already be in a block");
+    IonSpew(IonSpew_GVN, "    Folded control instruction %s%u to %s%u",
+            control->opName(), control->id(), newControl->opName(), graph_.getNumInstructionIds());
+
+    // If the simplification removes any CFG edges, update the CFG and remove
+    // any blocks that become dead.
+    size_t oldNumSuccs = control->numSuccessors();
+    size_t newNumSuccs = newControl->numSuccessors();
+    if (newNumSuccs != oldNumSuccs) {
+        MOZ_ASSERT(newNumSuccs < oldNumSuccs, "New control instruction has too many successors");
+        for (size_t i = 0; i != oldNumSuccs; ++i) {
+            MBasicBlock *succ = control->getSuccessor(i);
+            if (!HasSuccessor(newControl, succ)) {
+                if (removePredecessor(succ, block)) {
+                    if (!removeBlocksRecursively(succ, dominatorRoot))
+                        return false;
+                } else if (!rerun_) {
+                    if (!remainingBlocks_.append(succ))
+                        return false;
+                }
+            }
+        }
+    }
+
+    if (!pushDeadInsOperands(control))
+        return false;
+    block->discardLastIns();
+    block->end(newControl);
+    return processDeadDefs();
 }
 
-MDefinition *
-ValueNumberer::findSplit(MDefinition *def)
+// Visit all the phis and instructions in the given block.
+bool
+ValueNumberer::visitBlock(MBasicBlock *block, const MBasicBlock *dominatorRoot)
 {
-    for (MDefinition *vncheck = def->valueNumberData()->classNext;
-         vncheck != nullptr;
-         vncheck = vncheck->valueNumberData()->classNext) {
-        if (!def->congruentTo(vncheck)) {
-            IonSpew(IonSpew_GVN, "Proceeding with split because %d is not congruent to %d",
-                    def->id(), vncheck->id());
-            return vncheck;
+    MOZ_ASSERT(!block->unreachable(), "Blocks marked unreachable during GVN");
+    MOZ_ASSERT(!block->isDead(), "Block to visit is already dead");
+
+    // Visit the definitions in the block top-down.
+    for (MDefinitionIterator iter(block); iter; ) {
+        MDefinition *def = *iter++;
+
+        // If the definition is dead, delete it.
+        if (IsDead(def)) {
+            if (!deleteDefsRecursively(def))
+                return false;
+            continue;
         }
+
+        if (!visitDefinition(def))
+            return false;
     }
-    return nullptr;
+
+    return visitControlInstruction(block, dominatorRoot);
 }
 
-void
-ValueNumberer::breakClass(MDefinition *def)
+// Visit all the blocks dominated by dominatorRoot.
+bool
+ValueNumberer::visitDominatorTree(MBasicBlock *dominatorRoot, size_t *totalNumVisited)
 {
-    if (def->valueNumber() == def->id()) {
-        IonSpew(IonSpew_GVN, "Breaking congruence with itself: %d", def->id());
-        ValueNumberData *defdata = def->valueNumberData();
-        JS_ASSERT(defdata->classPrev == nullptr);
-        // If the def was the only member of the class, then there is nothing to do.
-        if (defdata->classNext == nullptr)
-            return;
-        // If upon closer inspection, we are still equivalent to this class
-        // then there isn't anything for us to do.
-        MDefinition *newRep = findSplit(def);
-        if (!newRep)
-            return;
-        markConsumers(def);
-        ValueNumberData *newdata = newRep->valueNumberData();
+    IonSpew(IonSpew_GVN, "  Visiting dominator tree (with %llu blocks) rooted at block%u%s",
+            uint64_t(dominatorRoot->numDominated()), dominatorRoot->id(),
+            dominatorRoot == graph_.entryBlock() ? " (normal entry block)" :
+            dominatorRoot == graph_.osrBlock() ? " (OSR entry block)" :
+            " (normal entry and OSR entry merge point)");
+    MOZ_ASSERT(numBlocksDeleted_ == 0, "numBlocksDeleted_ wasn't reset");
+    MOZ_ASSERT(dominatorRoot->immediateDominator() == dominatorRoot,
+            "root is not a dominator tree root");
 
-        // Right now, |defdata| is at the front of the list, and |newdata| is
-        // somewhere in the middle.
-        //
-        // We want to move |defdata| and everything up to but excluding
-        // |newdata| to a new list, with |defdata| still as the canonical
-        // element.
-        //
-        // We then want to take |newdata| and everything after, and
-        // mark them for processing (since |newdata| is now a new canonical
-        // element).
-        //
-        MDefinition *lastOld = newdata->classPrev;
+    // Visit all blocks dominated by dominatorRoot, in RPO. This has the nice property
+    // that we'll always visit a block before any block it dominates, so we can
+    // make a single pass through the list and see every full redundance.
+    size_t numVisited = 0;
+    for (ReversePostorderIterator iter(graph_.rpoBegin(dominatorRoot)); ; ++iter) {
+        MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
+        MBasicBlock *block = *iter;
+        // We're only visiting blocks in dominatorRoot's tree right now.
+        if (!dominatorRoot->dominates(block))
+            continue;
+        // Visit the block!
+        if (!visitBlock(block, dominatorRoot))
+            return false;
+        // If this was the end of a loop, check for optimization in the header.
+        if (!rerun_ && block->isLoopBackedge() && loopHasOptimizablePhi(block)) {
+            IonSpew(IonSpew_GVN, "    Loop phi in block%u can now be optimized; will re-run GVN!",
+                    block->id());
+            rerun_ = true;
+            remainingBlocks_.clear();
+        }
+        ++numVisited;
+        MOZ_ASSERT(numVisited <= dominatorRoot->numDominated() - numBlocksDeleted_,
+                   "Visited blocks too many times");
+        if (numVisited >= dominatorRoot->numDominated() - numBlocksDeleted_)
+            break;
+    }
+
+    *totalNumVisited += numVisited;
+    values_.clear();
+    numBlocksDeleted_ = 0;
+    return true;
+}
 
-        JS_ASSERT(lastOld); // newRep is NOT the first element of the list.
-        JS_ASSERT(lastOld->valueNumberData()->classNext == newRep);
+// Visit all the blocks in the graph.
+bool
+ValueNumberer::visitGraph()
+{
+    // Due to OSR blocks, the set of blocks dominated by a blocks may not be
+    // contiguous in the RPO. Do a separate traversal for each dominator tree
+    // root. There's always the main entry, and sometimes there's an OSR entry,
+    // and then there are the roots formed where the OSR paths merge with the
+    // main entry paths.
+    size_t totalNumVisited = 0;
+    for (ReversePostorderIterator iter(graph_.rpoBegin()); ; ++iter) {
+         MBasicBlock *block = *iter;
+         if (block->immediateDominator() == block) {
+             if (!visitDominatorTree(block, &totalNumVisited))
+                 return false;
+             MOZ_ASSERT(totalNumVisited <= graph_.numBlocks(), "Visited blocks too many times");
+             if (totalNumVisited >= graph_.numBlocks())
+                 break;
+         }
+         MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
+    }
+    return true;
+}
 
-        //lastOld is now the last element of the old list (congruent to
-        //|def|)
-        lastOld->valueNumberData()->classNext = nullptr;
+ValueNumberer::ValueNumberer(MIRGenerator *mir, MIRGraph &graph)
+  : mir_(mir), graph_(graph),
+    values_(graph.alloc()),
+    deadDefs_(graph.alloc()),
+    unreachableBlocks_(graph.alloc()),
+    remainingBlocks_(graph.alloc()),
+    numBlocksDeleted_(0),
+    rerun_(false),
+    blocksRemoved_(false),
+    updateAliasAnalysis_(false),
+    dependenciesBroken_(false)
+{}
 
-#ifdef DEBUG
-        for (MDefinition *tmp = def; tmp != nullptr; tmp = tmp->valueNumberData()->classNext) {
-            JS_ASSERT(tmp->valueNumber() == def->valueNumber());
-            JS_ASSERT(tmp->congruentTo(def));
-            JS_ASSERT(tmp != newRep);
-        }
-#endif
-        //|newRep| is now the first element of a new list, therefore it is the
-        //new canonical element. Mark the remaining elements in the list
-        //(including |newRep|)
-        newdata->classPrev = nullptr;
-        IonSpew(IonSpew_GVN, "Choosing a new representative: %d", newRep->id());
+bool
+ValueNumberer::run(UpdateAliasAnalysisFlag updateAliasAnalysis)
+{
+    updateAliasAnalysis_ = updateAliasAnalysis == UpdateAliasAnalysis;
+
+    // Initialize the value set. It's tempting to pass in a size here of some
+    // function of graph_.getNumInstructionIds(), however if we start out with
+    // a large capacity, it will be far larger than the actual element count
+    // for most of the pass, so when we remove elements, it would often think
+    // it needs to compact itself. Empirically, just letting the HashTable grow
+    // as needed on its own seems to work pretty well.
+    if (!values_.init())
+        return false;
+
+    IonSpew(IonSpew_GVN, "Running GVN on graph (with %llu blocks)",
+            uint64_t(graph_.numBlocks()));
 
-        // make the VN of every member in the class the VN of the new representative number.
-        for (MDefinition *tmp = newRep; tmp != nullptr; tmp = tmp->valueNumberData()->classNext) {
-            // if this instruction is already scheduled to be processed, don't do anything.
-            if (tmp->isInWorklist()) {
-                IonSpew(IonSpew_GVN, "Defer  to a new congruence class: %d", tmp->id());
-                continue;
+    // Top level non-sparse iteration loop. If an iteration performs a
+    // significant change, such as deleting a block which changes the dominator
+    // tree and may enable more optimization, this loop takes another iteration.
+    int runs = 0;
+    for (;;) {
+        if (!visitGraph())
+            return false;
+
+        // Test whether any block which was not removed but which had at least
+        // one predecessor removed will have a new dominator parent.
+        while (!remainingBlocks_.empty()) {
+            MBasicBlock *block = remainingBlocks_.popCopy();
+            if (!block->isDead() && IsDominatorRefined(block)) {
+                IonSpew(IonSpew_GVN, "  Dominator for block%u can now be refined; will re-run GVN!",
+                        block->id());
+                rerun_ = true;
+                remainingBlocks_.clear();
+                break;
             }
-            IonSpew(IonSpew_GVN, "Moving to a new congruence class: %d", tmp->id());
-            tmp->setValueNumber(newRep->id());
-            markConsumers(tmp);
-            markDefinition(tmp);
         }
 
-        // Insert the new representative => number mapping into the table
-        // Logically, there should not be anything in the table currently, but
-        // old values are never removed, so there's a good chance something will
-        // already be there.
-        values.put(newRep, newRep->id());
-    } else {
-        // The element that is breaking from the list isn't the representative element
-        // just strip it from the list
-        ValueNumberData *defdata = def->valueNumberData();
-        if (defdata->classPrev)
-            defdata->classPrev->valueNumberData()->classNext = defdata->classNext;
-        if (defdata->classNext)
-            defdata->classNext->valueNumberData()->classPrev = defdata->classPrev;
+        if (blocksRemoved_) {
+            if (!AccountForCFGChanges(mir_, graph_, dependenciesBroken_))
+                return false;
+
+            blocksRemoved_ = false;
+            dependenciesBroken_ = false;
+        }
+
+        if (mir_->shouldCancel("GVN (outer loop)"))
+            return false;
+
+        // If no further opportunities have been discovered, we're done.
+        if (!rerun_)
+            break;
 
-        // Make sure there is no nastinees accidentally linking elements into the old list later.
-        defdata->classPrev = nullptr;
-        defdata->classNext = nullptr;
+        IonSpew(IonSpew_GVN, "Re-running GVN on graph (run %d, now with %llu blocks)",
+                runs, uint64_t(graph_.numBlocks()));
+        rerun_ = false;
+
+        // Enforce an arbitrary iteration limit. This is rarely reached, and
+        // isn't even strictly necessary, as the algorithm is guaranteed to
+        // terminate on its own in a finite amount of time (since every time we
+        // re-run we delete the construct which triggered the re-run), but it
+        // does help avoid slow compile times on pathlogical code.
+        ++runs;
+        if (runs == 6) {
+            IonSpew(IonSpew_GVN, "Re-run cutoff reached. Terminating GVN!");
+            break;
+        }
     }
+
+    return true;
 }
--- a/js/src/jit/ValueNumbering.h
+++ b/js/src/jit/ValueNumbering.h
@@ -2,137 +2,107 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_ValueNumbering_h
 #define jit_ValueNumbering_h
 
-#include "jit/MIR.h"
+#include "jit/IonAllocPolicy.h"
+#include "js/HashTable.h"
 
 namespace js {
 namespace jit {
 
+class MDefinition;
+class MBasicBlock;
+class MIRGraph;
+class MPhi;
+class MInstruction;
+class MIRGenerator;
+
 class ValueNumberer
 {
-  protected:
-    struct ValueHasher
+    // Value numbering data.
+    class VisibleValues
     {
-        typedef MDefinition * Lookup;
-        typedef MDefinition * Key;
-        static HashNumber hash(const Lookup &ins) {
-            return ins->valueHash();
-        }
+        // Hash policy for ValueSet.
+        struct ValueHasher
+        {
+            typedef const MDefinition *Lookup;
+            typedef MDefinition *Key;
+            static HashNumber hash(Lookup ins);
+            static bool match(Key k, Lookup l);
+            static void rekey(Key &k, Key newKey);
+        };
+
+        typedef HashSet<MDefinition *, ValueHasher, IonAllocPolicy> ValueSet;
 
-        static bool match(const Key &k, const Lookup &l) {
-            // If one of the instructions depends on a store, and the
-            // other instruction does not depend on the same store,
-            // the instructions are not congruent.
-            if (k->dependency() != l->dependency())
-                return false;
-            return k->congruentTo(l);
-        }
+        ValueSet set_;        // Set of visible values
+
+      public:
+        explicit VisibleValues(TempAllocator &alloc);
+        bool init();
+
+        typedef ValueSet::Ptr Ptr;
+        typedef ValueSet::AddPtr AddPtr;
+
+        Ptr findLeader(const MDefinition *def) const;
+        AddPtr findLeaderForAdd(MDefinition *def);
+        bool insert(AddPtr p, MDefinition *def);
+        void overwrite(AddPtr p, MDefinition *def);
+        void forget(const MDefinition *def);
+        void clear();
     };
 
-    typedef HashMap<MDefinition *,
-                    uint32_t,
-                    ValueHasher,
-                    IonAllocPolicy> ValueMap;
+    typedef Vector<MBasicBlock *, 4, IonAllocPolicy> BlockWorklist;
+    typedef Vector<MDefinition *, 4, IonAllocPolicy> DefWorklist;
 
-    struct DominatingValue
-    {
-        MDefinition *def;
-        uint32_t validEnd;
-    };
-
-    typedef HashMap<uint32_t,
-                    DominatingValue,
-                    DefaultHasher<uint32_t>,
-                    IonAllocPolicy> InstructionMap;
+    MIRGenerator *const mir_;
+    MIRGraph &graph_;
+    VisibleValues values_;            // Numbered values
+    DefWorklist deadDefs_;            // Worklist for deleting values
+    BlockWorklist unreachableBlocks_; // Worklist for unreachable blocks
+    BlockWorklist remainingBlocks_;   // Blocks remaining with fewer preds
+    size_t numBlocksDeleted_;         // Num deleted blocks in current tree
+    bool rerun_;                      // Should we run another GVN iteration?
+    bool blocksRemoved_;              // Have any blocks been removed?
+    bool updateAliasAnalysis_;        // Do we care about AliasAnalysis?
+    bool dependenciesBroken_;         // Have we broken AliasAnalysis?
 
-  protected:
-    TempAllocator &alloc() const;
-    uint32_t lookupValue(MDefinition *ins);
-    MDefinition *findDominatingDef(InstructionMap &defs, MDefinition *ins, size_t index);
+    bool deleteDefsRecursively(MDefinition *def);
+    bool pushDeadPhiOperands(MPhi *phi, const MBasicBlock *phiBlock);
+    bool pushDeadInsOperands(MInstruction *ins);
+    bool processDeadDefs();
 
-    MDefinition *simplify(MDefinition *def, bool useValueNumbers);
-    MControlInstruction *simplifyControlInstruction(MControlInstruction *def);
-    bool eliminateRedundancies();
-
-    bool computeValueNumbers();
+    bool removePredecessor(MBasicBlock *block, MBasicBlock *pred);
+    bool removeBlocksRecursively(MBasicBlock *block, const MBasicBlock *root);
 
-    inline bool isMarked(MDefinition *def) {
-        return pessimisticPass_ || def->isInWorklist();
-    }
+    MDefinition *simplified(MDefinition *def) const;
+    MDefinition *leader(MDefinition *def);
+    bool hasLeader(const MPhi *phi, const MBasicBlock *phiBlock) const;
+    bool loopHasOptimizablePhi(MBasicBlock *backedge) const;
 
-    void markDefinition(MDefinition *def);
-    void unmarkDefinition(MDefinition *def);
-
-    void markConsumers(MDefinition *def);
-    void markBlock(MBasicBlock *block);
-    void setClass(MDefinition *toSet, MDefinition *representative);
+    bool visitDefinition(MDefinition *def);
+    bool visitControlInstruction(MBasicBlock *block, const MBasicBlock *root);
+    bool visitBlock(MBasicBlock *block, const MBasicBlock *root);
+    bool visitDominatorTree(MBasicBlock *root, size_t *totalNumVisited);
+    bool visitGraph();
 
   public:
-    static MDefinition *findSplit(MDefinition *);
-    void breakClass(MDefinition*);
+    ValueNumberer(MIRGenerator *mir, MIRGraph &graph);
 
-  protected:
-    MIRGenerator *mir;
-    MIRGraph &graph_;
-    ValueMap values;
-    bool pessimisticPass_;
-    size_t count_;
+    enum UpdateAliasAnalysisFlag {
+        DontUpdateAliasAnalysis,
+        UpdateAliasAnalysis,
+    };
 
-  public:
-    ValueNumberer(MIRGenerator *mir, MIRGraph &graph, bool optimistic);
-    bool analyze();
-    bool clear();
+    // Optimize the graph, performing expression simplification and
+    // canonicalization, eliminating statically fully-redundant expressions,
+    // deleting dead instructions, and removing unreachable blocks.
+    bool run(UpdateAliasAnalysisFlag updateAliasAnalysis);
 };
 
-class ValueNumberData : public TempObject {
-
-    friend void ValueNumberer::breakClass(MDefinition*);
-    friend MDefinition *ValueNumberer::findSplit(MDefinition*);
-    uint32_t number;
-    MDefinition *classNext;
-    MDefinition *classPrev;
-
-  public:
-    ValueNumberData() : number(0), classNext(nullptr), classPrev(nullptr) {}
-
-    void setValueNumber(uint32_t number_) {
-        number = number_;
-    }
-
-    uint32_t valueNumber() {
-        return number;
-    }
-    // Set the class of this to the given representative value.
-    void setClass(MDefinition *thisDef, MDefinition *rep) {
-        JS_ASSERT(thisDef->valueNumberData() == this);
-        // If we are attempting to insert ourself, then nothing needs to be done.
-        // However, if the definition to be inserted already has the correct value number,
-        // it still needs to be inserted, since the value number needs to be updated lazily.
-        // this updating tactic can leave the world in a state where thisDef is not in the
-        // equivalence class of rep, but it has the same value number. Defs in this state
-        // need to be re-processed.
-        if (this == rep->valueNumberData())
-            return;
-
-        if (classNext)
-            classNext->valueNumberData()->classPrev = classPrev;
-        if (classPrev)
-            classPrev->valueNumberData()->classNext = classNext;
-
-
-        classPrev = rep;
-        classNext = rep->valueNumberData()->classNext;
-
-        if (rep->valueNumberData()->classNext)
-            rep->valueNumberData()->classNext->valueNumberData()->classPrev = thisDef;
-        rep->valueNumberData()->classNext = thisDef;
-    }
-};
 } // namespace jit
 } // namespace js
 
 #endif /* jit_ValueNumbering_h */
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -5975,23 +5975,22 @@ SetRuntimeOptions(JSRuntime *rt, const O
     JS::RuntimeOptionsRef(rt).setBaseline(enableBaseline)
                              .setIon(enableIon)
                              .setAsmJS(enableAsmJS)
                              .setNativeRegExp(enableNativeRegExp);
 
     if (const char *str = op.getStringOption("ion-gvn")) {
         if (strcmp(str, "off") == 0) {
             jit::js_JitOptions.disableGvn = true;
-        } else if (strcmp(str, "pessimistic") == 0) {
-            jit::js_JitOptions.forceGvnKind = true;
-            jit::js_JitOptions.forcedGvnKind = jit::GVN_Pessimistic;
-        } else if (strcmp(str, "optimistic") == 0) {
-            jit::js_JitOptions.forceGvnKind = true;
-            jit::js_JitOptions.forcedGvnKind = jit::GVN_Optimistic;
-        } else {
+        } else if (strcmp(str, "on") != 0 &&
+                   strcmp(str, "optimistic") != 0 &&
+                   strcmp(str, "pessimistic") != 0)
+        {
+            // We accept "pessimistic" and "optimistic" as synonyms for "on"
+            // for backwards compatibility.
             return OptionFailure("ion-gvn", str);
         }
     }
 
     if (const char *str = op.getStringOption("ion-licm")) {
         if (strcmp(str, "on") == 0)
             jit::js_JitOptions.disableLicm = false;
         else if (strcmp(str, "off") == 0)
@@ -6264,18 +6263,17 @@ main(int argc, char **argv, char **envp)
 #endif
         || !op.addBoolOption('\0', "ion", "Enable IonMonkey (default)")
         || !op.addBoolOption('\0', "no-ion", "Disable IonMonkey")
         || !op.addBoolOption('\0', "no-asmjs", "Disable asm.js compilation")
         || !op.addBoolOption('\0', "no-native-regexp", "Disable native regexp compilation")
         || !op.addStringOption('\0', "ion-gvn", "[mode]",
                                "Specify Ion global value numbering:\n"
                                "  off: disable GVN\n"
-                               "  pessimistic: use pessimistic GVN\n"
-                               "  optimistic: (default) use optimistic GVN")
+                               "  on:  enable GVN (default)\n")
         || !op.addStringOption('\0', "ion-licm", "on/off",
                                "Loop invariant code motion (default: on, off to disable)")
         || !op.addStringOption('\0', "ion-edgecase-analysis", "on/off",
                                "Find edge cases where Ion can avoid bailouts (default: on, off to disable)")
         || !op.addStringOption('\0', "ion-range-analysis", "on/off",
                                "Range analysis (default: on, off to disable)")
         || !op.addBoolOption('\0', "ion-check-range-analysis",
                                "Range analysis checking")