Bug 949668 - SpiderMonkey: Rename MoveResolver::Move to MoveOp. r=jandem
authorDan Gohman <sunfish@google.com>
Fri, 13 Dec 2013 08:27:46 -0800
changeset 160472 0f9522bcd25994a209c8fe27e98094f7df368480
parent 160471 bce23de3a54ac4910271bb866a8a1cc7557aa955
child 160473 0daa1bff4b161297b1d45e99e79fc14e02e6ef66
push id25834
push userphilringnalda@gmail.com
push dateSun, 15 Dec 2013 02:20:53 +0000
treeherdermozilla-central@9fcc6330dc69 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem
bugs949668
milestone29.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 949668 - SpiderMonkey: Rename MoveResolver::Move to MoveOp. r=jandem
js/src/jit/CodeGenerator.cpp
js/src/jit/MoveResolver.cpp
js/src/jit/MoveResolver.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/CodeGenerator-arm.h
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/arm/MoveEmitter-arm.cpp
js/src/jit/arm/MoveEmitter-arm.h
js/src/jit/arm/Trampoline-arm.cpp
js/src/jit/shared/CodeGenerator-x86-shared.cpp
js/src/jit/shared/CodeGenerator-x86-shared.h
js/src/jit/shared/MoveEmitter-x86-shared.cpp
js/src/jit/shared/MoveEmitter-x86-shared.h
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x64/MacroAssembler-x64.h
js/src/jit/x64/Trampoline-x64.cpp
js/src/jit/x86/MacroAssembler-x86.cpp
js/src/jit/x86/MacroAssembler-x86.h
js/src/jit/x86/Trampoline-x86.cpp
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -1186,19 +1186,17 @@ CodeGenerator::visitMoveGroup(LMoveGroup
         const LAllocation *from = move.from();
         const LAllocation *to = move.to();
 
         // No bogus moves.
         JS_ASSERT(*from != *to);
         JS_ASSERT(!from->isConstant());
         JS_ASSERT(from->isDouble() == to->isDouble());
 
-        MoveResolver::Move::Kind kind = from->isDouble()
-                                        ? MoveResolver::Move::DOUBLE
-                                        : MoveResolver::Move::GENERAL;
+        MoveOp::Kind kind = from->isDouble() ? MoveOp::DOUBLE : MoveOp::GENERAL;
 
         if (!resolver.addMove(toMoveOperand(from), toMoveOperand(to), kind))
             return false;
     }
 
     if (!resolver.resolve())
         return false;
 
--- a/js/src/jit/MoveResolver.cpp
+++ b/js/src/jit/MoveResolver.cpp
@@ -16,17 +16,17 @@ MoveResolver::MoveResolver()
 
 void
 MoveResolver::resetState()
 {
     hasCycles_ = false;
 }
 
 bool
-MoveResolver::addMove(const MoveOperand &from, const MoveOperand &to, Move::Kind kind)
+MoveResolver::addMove(const MoveOperand &from, const MoveOperand &to, MoveOp::Kind kind)
 {
     // Assert that we're not doing no-op moves.
     JS_ASSERT(!(from == to));
     PendingMove *pm = movePool_.allocate();
     if (!pm)
         return false;
     new (pm) PendingMove(from, to, kind);
     pending_.pushBack(pm);
--- a/js/src/jit/MoveResolver.h
+++ b/js/src/jit/MoveResolver.h
@@ -9,181 +9,181 @@
 
 #include "jit/InlineList.h"
 #include "jit/IonAllocPolicy.h"
 #include "jit/Registers.h"
 
 namespace js {
 namespace jit {
 
+// This is similar to Operand, but carries more information. We're also not
+// guaranteed that Operand looks like this on all ISAs.
+class MoveOperand
+{
+    enum Kind {
+        REG,
+        FLOAT_REG,
+        ADDRESS,
+        FLOAT_ADDRESS,
+        EFFECTIVE_ADDRESS
+    };
+
+    Kind kind_;
+    uint32_t code_;
+    int32_t disp_;
+
+  public:
+    enum AddressKind {
+        MEMORY = ADDRESS,
+        EFFECTIVE = EFFECTIVE_ADDRESS,
+        FLOAT = FLOAT_ADDRESS
+    };
+
+    MoveOperand()
+    { }
+    explicit MoveOperand(const Register &reg) : kind_(REG), code_(reg.code())
+    { }
+    explicit MoveOperand(const FloatRegister &reg) : kind_(FLOAT_REG), code_(reg.code())
+    { }
+    MoveOperand(const Register &reg, int32_t disp, AddressKind addrKind = MEMORY)
+        : kind_((Kind) addrKind),
+        code_(reg.code()),
+        disp_(disp)
+    {
+        // With a zero offset, this is a plain reg-to-reg move.
+        if (disp == 0 && addrKind == EFFECTIVE)
+            kind_ = REG;
+    }
+    MoveOperand(const MoveOperand &other)
+      : kind_(other.kind_),
+        code_(other.code_),
+        disp_(other.disp_)
+    { }
+    bool isFloatReg() const {
+        return kind_ == FLOAT_REG;
+    }
+    bool isGeneralReg() const {
+        return kind_ == REG;
+    }
+    bool isDouble() const {
+        return kind_ == FLOAT_REG || kind_ == FLOAT_ADDRESS;
+    }
+    bool isMemory() const {
+        return kind_ == ADDRESS;
+    }
+    bool isFloatAddress() const {
+        return kind_ == FLOAT_ADDRESS;
+    }
+    bool isEffectiveAddress() const {
+        return kind_ == EFFECTIVE_ADDRESS;
+    }
+    Register reg() const {
+        JS_ASSERT(isGeneralReg());
+        return Register::FromCode(code_);
+    }
+    FloatRegister floatReg() const {
+        JS_ASSERT(isFloatReg());
+        return FloatRegister::FromCode(code_);
+    }
+    Register base() const {
+        JS_ASSERT(isMemory() || isEffectiveAddress() || isFloatAddress());
+        return Register::FromCode(code_);
+    }
+    int32_t disp() const {
+        return disp_;
+    }
+
+    bool operator ==(const MoveOperand &other) const {
+        if (kind_ != other.kind_)
+            return false;
+        if (code_ != other.code_)
+            return false;
+        if (isMemory() || isEffectiveAddress())
+            return disp_ == other.disp_;
+        return true;
+    }
+    bool operator !=(const MoveOperand &other) const {
+        return !operator==(other);
+    }
+};
+
+// This represents a move operation.
+class MoveOp
+{
+  protected:
+    MoveOperand from_;
+    MoveOperand to_;
+    bool cycle_;
+
+  public:
+    enum Kind {
+        GENERAL,
+        DOUBLE
+    };
+
+  protected:
+    Kind kind_;
+
+  public:
+    MoveOp()
+    { }
+    MoveOp(const MoveOp &other)
+      : from_(other.from_),
+        to_(other.to_),
+        cycle_(other.cycle_),
+        kind_(other.kind_)
+    { }
+    MoveOp(const MoveOperand &from, const MoveOperand &to, Kind kind, bool cycle = false)
+      : from_(from),
+        to_(to),
+        cycle_(cycle),
+        kind_(kind)
+    { }
+
+    bool inCycle() const {
+        return cycle_;
+    }
+    const MoveOperand &from() const {
+        return from_;
+    }
+    const MoveOperand &to() const {
+        return to_;
+    }
+    Kind kind() const {
+        return kind_;
+    }
+};
+
 class MoveResolver
 {
-  public:
-    // This is similar to Operand, but carries more information. We're also not
-    // guaranteed that Operand looks like this on all ISAs.
-    class MoveOperand
-    {
-        enum Kind {
-            REG,
-            FLOAT_REG,
-            ADDRESS,
-            FLOAT_ADDRESS,
-            EFFECTIVE_ADDRESS
-        };
-
-        Kind kind_;
-        uint32_t code_;
-        int32_t disp_;
-
-      public:
-        enum AddressKind {
-            MEMORY = ADDRESS,
-            EFFECTIVE = EFFECTIVE_ADDRESS,
-            FLOAT = FLOAT_ADDRESS
-        };
-
-        MoveOperand()
-        { }
-        explicit MoveOperand(const Register &reg) : kind_(REG), code_(reg.code())
-        { }
-        explicit MoveOperand(const FloatRegister &reg) : kind_(FLOAT_REG), code_(reg.code())
-        { }
-        MoveOperand(const Register &reg, int32_t disp, AddressKind addrKind = MEMORY)
-            : kind_((Kind) addrKind),
-            code_(reg.code()),
-            disp_(disp)
-        {
-            // With a zero offset, this is a plain reg-to-reg move.
-            if (disp == 0 && addrKind == EFFECTIVE)
-                kind_ = REG;
-        }
-        MoveOperand(const MoveOperand &other)
-          : kind_(other.kind_),
-            code_(other.code_),
-            disp_(other.disp_)
-        { }
-        bool isFloatReg() const {
-            return kind_ == FLOAT_REG;
-        }
-        bool isGeneralReg() const {
-            return kind_ == REG;
-        }
-        bool isDouble() const {
-            return kind_ == FLOAT_REG || kind_ == FLOAT_ADDRESS;
-        }
-        bool isMemory() const {
-            return kind_ == ADDRESS;
-        }
-        bool isFloatAddress() const {
-            return kind_ == FLOAT_ADDRESS;
-        }
-        bool isEffectiveAddress() const {
-            return kind_ == EFFECTIVE_ADDRESS;
-        }
-        Register reg() const {
-            JS_ASSERT(isGeneralReg());
-            return Register::FromCode(code_);
-        }
-        FloatRegister floatReg() const {
-            JS_ASSERT(isFloatReg());
-            return FloatRegister::FromCode(code_);
-        }
-        Register base() const {
-            JS_ASSERT(isMemory() || isEffectiveAddress() || isFloatAddress());
-            return Register::FromCode(code_);
-        }
-        int32_t disp() const {
-            return disp_;
-        }
-
-        bool operator ==(const MoveOperand &other) const {
-            if (kind_ != other.kind_)
-                return false;
-            if (code_ != other.code_)
-                return false;
-            if (isMemory() || isEffectiveAddress())
-                return disp_ == other.disp_;
-            return true;
-        }
-        bool operator !=(const MoveOperand &other) const {
-            return !operator==(other);
-        }
-    };
-
-    class Move
-    {
-      protected:
-        MoveOperand from_;
-        MoveOperand to_;
-        bool cycle_;
-
-      public:
-        enum Kind {
-            GENERAL,
-            DOUBLE
-        };
-
-      protected:
-        Kind kind_;
-
-      public:
-        Move()
-        { }
-        Move(const Move &other)
-          : from_(other.from_),
-            to_(other.to_),
-            cycle_(other.cycle_),
-            kind_(other.kind_)
-        { }
-        Move(const MoveOperand &from, const MoveOperand &to, Kind kind, bool cycle = false)
-          : from_(from),
-            to_(to),
-            cycle_(cycle),
-            kind_(kind)
-        { }
-
-        bool inCycle() const {
-            return cycle_;
-        }
-        const MoveOperand &from() const {
-            return from_;
-        }
-        const MoveOperand &to() const {
-            return to_;
-        }
-        Kind kind() const {
-            return kind_;
-        }
-    };
-
   private:
     struct PendingMove
-      : public Move,
+      : public MoveOp,
         public TempObject,
         public InlineListNode<PendingMove>
     {
         PendingMove()
         { }
         PendingMove(const MoveOperand &from, const MoveOperand &to, Kind kind)
-          : Move(from, to, kind, false)
+          : MoveOp(from, to, kind, false)
         { }
         
         void setInCycle() {
             JS_ASSERT(!inCycle());
             cycle_ = true;
         }
 
     };
 
     typedef InlineList<MoveResolver::PendingMove>::iterator PendingMoveIterator;
 
   private:
     // Moves that are definitely unblocked (constants to registers). These are
     // emitted last.
-    js::Vector<Move, 16, SystemAllocPolicy> orderedMoves_;
+    js::Vector<MoveOp, 16, SystemAllocPolicy> orderedMoves_;
     bool hasCycles_;
 
     TempObjectPool<PendingMove> movePool_;
 
     InlineList<PendingMove> pending_;
 
     PendingMove *findBlockingMove(const PendingMove *last);
 
@@ -195,23 +195,23 @@ class MoveResolver
 
     // Resolves a move group into two lists of ordered moves. These moves must
     // be executed in the order provided. Some moves may indicate that they
     // participate in a cycle. For every cycle there are two such moves, and it
     // is guaranteed that cycles do not nest inside each other in the list.
     //
     // After calling addMove() for each parallel move, resolve() performs the
     // cycle resolution algorithm. Calling addMove() again resets the resolver.
-    bool addMove(const MoveOperand &from, const MoveOperand &to, Move::Kind kind);
+    bool addMove(const MoveOperand &from, const MoveOperand &to, MoveOp::Kind kind);
     bool resolve();
 
     size_t numMoves() const {
         return orderedMoves_.length();
     }
-    const Move &getMove(size_t i) const {
+    const MoveOp &getMove(size_t i) const {
         return orderedMoves_[i];
     }
     bool hasCycles() const {
         return hasCycles_;
     }
     void clearTempObjectPool() {
         movePool_.clear();
     }
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -1015,18 +1015,16 @@ CodeGeneratorARM::visitPowHalfD(LPowHalf
     masm.ma_vimm(0.0, ScratchFloatReg);
     masm.ma_vadd(ScratchFloatReg, input, output);
     masm.ma_vsqrt(output, output);
 
     masm.bind(&done);
     return true;
 }
 
-typedef MoveResolver::MoveOperand MoveOperand;
-
 MoveOperand
 CodeGeneratorARM::toMoveOperand(const LAllocation *a) const
 {
     if (a->isGeneralReg())
         return MoveOperand(ToRegister(a));
     if (a->isFloatReg())
         return MoveOperand(ToFloatRegister(a));
     JS_ASSERT((ToStackOffset(a) & 3) == 0);
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -38,17 +38,17 @@ class CodeGeneratorARM : public CodeGene
     }
     inline Operand ToOperand(const LAllocation *a) {
         return ToOperand(*a);
     }
     inline Operand ToOperand(const LDefinition *def) {
         return ToOperand(def->output());
     }
 
-    MoveResolver::MoveOperand toMoveOperand(const LAllocation *a) const;
+    MoveOperand toMoveOperand(const LAllocation *a) const;
 
     bool bailoutIf(Assembler::Condition condition, LSnapshot *snapshot);
     bool bailoutFrom(Label *label, LSnapshot *snapshot);
     bool bailout(LSnapshot *snapshot);
 
   protected:
     bool generatePrologue();
     bool generateEpilogue();
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -3513,57 +3513,57 @@ MacroAssemblerARMCompat::passABIArg(cons
     MoveOperand to;
     ++passedArgs_;
     if (!enoughMemory_)
         return;
     if (from.isDouble()) {
         FloatRegister fr;
         if (GetFloatArgReg(usedIntSlots_, usedFloatSlots_, &fr)) {
             if (!from.isFloatReg() || from.floatReg() != fr) {
-                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(fr), Move::DOUBLE);
+                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(fr), MoveOp::DOUBLE);
             }
             // else nothing to do; the value is in the right register already
         } else {
             // If (and only if) the integer registers have started spilling, do we
             // need to take the double register's alignment into account
             uint32_t disp = GetFloatArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_);
-            enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), Move::DOUBLE);
+            enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), MoveOp::DOUBLE);
         }
         usedFloatSlots_++;
     } else {
         Register r;
         if (GetIntArgReg(usedIntSlots_, usedFloatSlots_, &r)) {
             if (!from.isGeneralReg() || from.reg() != r) {
-                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(r), Move::GENERAL);
+                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(r), MoveOp::GENERAL);
             }
             // else nothing to do; the value is in the right register already
         } else {
             uint32_t disp = GetIntArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_);
-            enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), Move::GENERAL);
+            enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), MoveOp::GENERAL);
         }
         usedIntSlots_++;
     }
 
 }
 
 #else
 void
 MacroAssemblerARMCompat::passABIArg(const MoveOperand &from)
 {
     MoveOperand to;
     uint32_t increment = 1;
     bool useResolver = true;
     ++passedArgs_;
-    Move::Kind kind = Move::GENERAL;
+    MoveOp::Kind kind = MoveOp::GENERAL;
     if (from.isDouble()) {
         // Double arguments need to be rounded up to the nearest doubleword
         // boundary, even if it is in a register!
         usedSlots_ = (usedSlots_ + 1) & ~1;
         increment = 2;
-        kind = Move::DOUBLE;
+        kind = MoveOp::DOUBLE;
     }
 
     Register destReg;
     MoveOperand dest;
     if (GetIntArgReg(usedSlots_, 0, &destReg)) {
         if (from.isDouble()) {
             floatArgsInGPR[destReg.code() >> 1] = from;
             floatArgsInGPRValid[destReg.code() >> 1] = true;
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -457,17 +457,17 @@ class MacroAssemblerARMCompat : public M
     uint32_t usedSlots_;
 #endif
     bool dynamicAlignment_;
 
     bool enoughMemory_;
 
     // Used to work around the move resolver's lack of support for
     // moving into register pairs, which the softfp ABI needs.
-    mozilla::Array<MoveResolver::MoveOperand, 2> floatArgsInGPR;
+    mozilla::Array<MoveOperand, 2> floatArgsInGPR;
     mozilla::Array<bool, 2> floatArgsInGPRValid;
 
     // Compute space needed for the function call and set the properties of the
     // callee.  It returns the space which has to be allocated for calling the
     // function.
     //
     // arg            Number of arguments of the function.
     void setupABICall(uint32_t arg);
@@ -480,19 +480,16 @@ class MacroAssemblerARMCompat : public M
     // reserved for unexpected spills or C++ function calls. It is maintained
     // by functions which track stack alignment, which for clear distinction
     // use StudlyCaps (for example, Push, Pop).
     uint32_t framePushed_;
     void adjustFrame(int value) {
         setFramePushed(framePushed_ + value);
     }
   public:
-    typedef MoveResolver::MoveOperand MoveOperand;
-    typedef MoveResolver::Move Move;
-
     enum Result {
         GENERAL,
         DOUBLE,
         FLOAT
     };
 
     MacroAssemblerARMCompat()
       : inCall_(false),
--- a/js/src/jit/arm/MoveEmitter-arm.cpp
+++ b/js/src/jit/arm/MoveEmitter-arm.cpp
@@ -95,25 +95,25 @@ MoveEmitterARM::tempReg()
         pushedAtSpill_ = masm.framePushed();
     } else {
         masm.ma_str(spilledReg_, spillSlot());
     }
     return spilledReg_;
 }
 
 void
-MoveEmitterARM::breakCycle(const MoveOperand &from, const MoveOperand &to, Move::Kind kind)
+MoveEmitterARM::breakCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Kind kind)
 {
     // There is some pattern:
     //   (A -> B)
     //   (B -> A)
     //
     // This case handles (A -> B), which we reach first. We save B, then allow
     // the original move to continue.
-    if (kind == Move::DOUBLE) {
+    if (kind == MoveOp::DOUBLE) {
         if (to.isMemory()) {
             FloatRegister temp = ScratchFloatReg;
             masm.ma_vldr(toOperand(to, true), temp);
             masm.ma_vstr(temp, cycleSlot());
         } else {
             masm.ma_vstr(to.floatReg(), cycleSlot());
         }
     } else {
@@ -129,25 +129,25 @@ MoveEmitterARM::breakCycle(const MoveOpe
                 spilledReg_ = InvalidReg;
             }
             masm.ma_str(to.reg(), cycleSlot());
         }
     }
 }
 
 void
-MoveEmitterARM::completeCycle(const MoveOperand &from, const MoveOperand &to, Move::Kind kind)
+MoveEmitterARM::completeCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Kind kind)
 {
     // There is some pattern:
     //   (A -> B)
     //   (B -> A)
     //
     // This case handles (B -> A), which we reach last. We emit a move from the
     // saved value of B, to A.
-    if (kind == Move::DOUBLE) {
+    if (kind == MoveOp::DOUBLE) {
         if (to.isMemory()) {
             FloatRegister temp = ScratchFloatReg;
             masm.ma_vldr(cycleSlot(), temp);
             masm.ma_vstr(temp, toOperand(to, true));
         } else {
             masm.ma_vldr(cycleSlot(), to.floatReg());
         }
     } else {
@@ -227,33 +227,33 @@ MoveEmitterARM::emitDoubleMove(const Mov
         JS_ASSERT(from.isMemory());
         FloatRegister reg = ScratchFloatReg;
         masm.ma_vldr(toOperand(from, true), reg);
         masm.ma_vstr(reg, toOperand(to, true));
     }
 }
 
 void
-MoveEmitterARM::emit(const Move &move)
+MoveEmitterARM::emit(const MoveOp &move)
 {
     const MoveOperand &from = move.from();
     const MoveOperand &to = move.to();
 
     if (move.inCycle()) {
         if (inCycle_) {
             completeCycle(from, to, move.kind());
             inCycle_ = false;
             return;
         }
 
         breakCycle(from, to, move.kind());
         inCycle_ = true;
     }
 
-    if (move.kind() == Move::DOUBLE)
+    if (move.kind() == MoveOp::DOUBLE)
         emitDoubleMove(from, to);
     else
         emitMove(from, to);
 }
 
 void
 MoveEmitterARM::assertDone()
 {
--- a/js/src/jit/arm/MoveEmitter-arm.h
+++ b/js/src/jit/arm/MoveEmitter-arm.h
@@ -12,19 +12,16 @@
 
 namespace js {
 namespace jit {
 
 class CodeGenerator;
 
 class MoveEmitterARM
 {
-    typedef MoveResolver::Move Move;
-    typedef MoveResolver::MoveOperand MoveOperand;
-
     bool inCycle_;
     MacroAssemblerARMCompat &masm;
 
     // Original stack push value.
     uint32_t pushedAtStart_;
 
     // These store stack offsets to spill locations, snapshotting
     // codegen->framePushed_ at the time they were allocated. They are -1 if no
@@ -44,19 +41,19 @@ class MoveEmitterARM
     FloatRegister tempFloatReg();
     Operand cycleSlot() const;
     Operand spillSlot() const;
     Operand doubleSpillSlot() const;
     Operand toOperand(const MoveOperand &operand, bool isFloat) const;
 
     void emitMove(const MoveOperand &from, const MoveOperand &to);
     void emitDoubleMove(const MoveOperand &from, const MoveOperand &to);
-    void breakCycle(const MoveOperand &from, const MoveOperand &to, Move::Kind kind);
-    void completeCycle(const MoveOperand &from, const MoveOperand &to, Move::Kind kind);
-    void emit(const Move &move);
+    void breakCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Kind kind);
+    void completeCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Kind kind);
+    void emit(const MoveOp &move);
 
   public:
     MoveEmitterARM(MacroAssemblerARMCompat &masm);
     ~MoveEmitterARM();
     void emit(const MoveResolver &moves);
     void finish();
 };
 
--- a/js/src/jit/arm/Trampoline-arm.cpp
+++ b/js/src/jit/arm/Trampoline-arm.cpp
@@ -649,18 +649,16 @@ JitRuntime::generateBailoutHandler(JSCon
 #endif
 
     return code;
 }
 
 IonCode *
 JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
 {
-    typedef MoveResolver::MoveOperand MoveOperand;
-
     JS_ASSERT(functionWrappers_);
     JS_ASSERT(functionWrappers_->initialized());
     VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
     if (p)
         return p->value();
 
     // Generate a separated code for the wrapper.
     MacroAssembler masm(cx);
--- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp
@@ -1324,18 +1324,16 @@ CodeGeneratorX86Shared::visitUrshD(LUrsh
         JS_ASSERT(ToRegister(rhs) == ecx);
         masm.shrl_cl(lhs);
     }
 
     masm.convertUInt32ToDouble(lhs, out);
     return true;
 }
 
-typedef MoveResolver::MoveOperand MoveOperand;
-
 MoveOperand
 CodeGeneratorX86Shared::toMoveOperand(const LAllocation *a) const
 {
     if (a->isGeneralReg())
         return MoveOperand(ToRegister(a));
     if (a->isFloatReg())
         return MoveOperand(ToFloatRegister(a));
     return MoveOperand(StackPointer, ToStackOffset(a));
--- a/js/src/jit/shared/CodeGenerator-x86-shared.h
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.h
@@ -44,17 +44,17 @@ class CodeGeneratorX86Shared : public Co
     }
     inline Operand ToOperand(const LAllocation *a) {
         return ToOperand(*a);
     }
     inline Operand ToOperand(const LDefinition *def) {
         return ToOperand(def->output());
     }
 
-    MoveResolver::MoveOperand toMoveOperand(const LAllocation *a) const;
+    MoveOperand toMoveOperand(const LAllocation *a) const;
 
     bool bailoutIf(Assembler::Condition condition, LSnapshot *snapshot);
     bool bailoutIf(Assembler::DoubleCondition condition, LSnapshot *snapshot);
     bool bailoutFrom(Label *label, LSnapshot *snapshot);
     bool bailout(LSnapshot *snapshot);
 
   protected:
     bool generatePrologue();
--- a/js/src/jit/shared/MoveEmitter-x86-shared.cpp
+++ b/js/src/jit/shared/MoveEmitter-x86-shared.cpp
@@ -22,17 +22,17 @@ MoveEmitterX86::MoveEmitterX86(MacroAsse
 // and whether it can be implemented entirely by swaps.
 size_t
 MoveEmitterX86::characterizeCycle(const MoveResolver &moves, size_t i,
                                   bool *allGeneralRegs, bool *allFloatRegs)
 {
     size_t swapCount = 0;
 
     for (size_t j = i; ; j++) {
-        const Move &move = moves.getMove(j);
+        const MoveOp &move = moves.getMove(j);
 
         // If it isn't a cycle of registers of the same kind, we won't be able
         // to optimize it.
         if (!move.to().isGeneralReg())
             *allGeneralRegs = false;
         if (!move.to().isFloatReg())
             *allFloatRegs = false;
         if (!*allGeneralRegs && !*allFloatRegs)
@@ -51,17 +51,17 @@ MoveEmitterX86::characterizeCycle(const 
             *allFloatRegs = false;
             return -1;
         }
 
         swapCount++;
     }
 
     // Check that the last move cycles back to the first move.
-    const Move &move = moves.getMove(i + swapCount);
+    const MoveOp &move = moves.getMove(i + swapCount);
     if (move.from() != moves.getMove(i).to()) {
         *allGeneralRegs = false;
         *allFloatRegs = false;
         return -1;
     }
 
     return swapCount;
 }
@@ -94,17 +94,17 @@ MoveEmitterX86::maybeEmitOptimizedCycle(
 
     return false;
 }
 
 void
 MoveEmitterX86::emit(const MoveResolver &moves)
 {
     for (size_t i = 0; i < moves.numMoves(); i++) {
-        const Move &move = moves.getMove(i);
+        const MoveOp &move = moves.getMove(i);
         const MoveOperand &from = move.from();
         const MoveOperand &to = move.to();
 
         if (move.inCycle()) {
             // If this is the end of a cycle for which we're using the stack,
             // handle the end.
             if (inCycle_) {
                 completeCycle(to, move.kind());
@@ -123,17 +123,17 @@ MoveEmitterX86::emit(const MoveResolver 
             }
 
             // Otherwise use the stack.
             breakCycle(to, move.kind());
             inCycle_ = true;
         }
 
         // A normal move which is not part of a cycle.
-        if (move.kind() == Move::DOUBLE)
+        if (move.kind() == MoveOp::DOUBLE)
             emitDoubleMove(from, to);
         else
             emitGeneralMove(from, to);
     }
 }
 
 MoveEmitterX86::~MoveEmitterX86()
 {
@@ -199,46 +199,46 @@ MoveEmitterX86::toPopOperand(const MoveO
     if (operand.isGeneralReg())
         return Operand(operand.reg());
 
     JS_ASSERT(operand.isFloatReg());
     return Operand(operand.floatReg());
 }
 
 void
-MoveEmitterX86::breakCycle(const MoveOperand &to, Move::Kind kind)
+MoveEmitterX86::breakCycle(const MoveOperand &to, MoveOp::Kind kind)
 {
     // There is some pattern:
     //   (A -> B)
     //   (B -> A)
     //
     // This case handles (A -> B), which we reach first. We save B, then allow
     // the original move to continue.
-    if (kind == Move::DOUBLE) {
+    if (kind == MoveOp::DOUBLE) {
         if (to.isMemory()) {
             masm.loadDouble(toAddress(to), ScratchFloatReg);
             masm.storeDouble(ScratchFloatReg, cycleSlot());
         } else {
             masm.storeDouble(to.floatReg(), cycleSlot());
         }
     } else {
         masm.Push(toOperand(to));
     }
 }
 
 void
-MoveEmitterX86::completeCycle(const MoveOperand &to, Move::Kind kind)
+MoveEmitterX86::completeCycle(const MoveOperand &to, MoveOp::Kind kind)
 {
     // There is some pattern:
     //   (A -> B)
     //   (B -> A)
     //
     // This case handles (B -> A), which we reach last. We emit a move from the
     // saved value of B, to A.
-    if (kind == Move::DOUBLE) {
+    if (kind == MoveOp::DOUBLE) {
         if (to.isMemory()) {
             masm.loadDouble(cycleSlot(), ScratchFloatReg);
             masm.storeDouble(ScratchFloatReg, toAddress(to));
         } else {
             masm.loadDouble(cycleSlot(), to.floatReg());
         }
     } else {
         if (to.isMemory()) {
--- a/js/src/jit/shared/MoveEmitter-x86-shared.h
+++ b/js/src/jit/shared/MoveEmitter-x86-shared.h
@@ -18,19 +18,16 @@
 
 namespace js {
 namespace jit {
 
 class CodeGenerator;
 
 class MoveEmitterX86
 {
-    typedef MoveResolver::Move Move;
-    typedef MoveResolver::MoveOperand MoveOperand;
-
     bool inCycle_;
     MacroAssemblerSpecific &masm;
 
     // Original stack push value.
     uint32_t pushedAtStart_;
 
     // This is a store stack offset for the cycle-break spill slot, snapshotting
     // codegen->framePushed_ at the time it is allocated. -1 if not allocated.
@@ -43,18 +40,18 @@ class MoveEmitterX86
     Operand toPopOperand(const MoveOperand &operand) const;
 
     size_t characterizeCycle(const MoveResolver &moves, size_t i,
                              bool *allGeneralRegs, bool *allFloatRegs);
     bool maybeEmitOptimizedCycle(const MoveResolver &moves, size_t i,
                                  bool allGeneralRegs, bool allFloatRegs, size_t swapCount);
     void emitGeneralMove(const MoveOperand &from, const MoveOperand &to);
     void emitDoubleMove(const MoveOperand &from, const MoveOperand &to);
-    void breakCycle(const MoveOperand &to, Move::Kind kind);
-    void completeCycle(const MoveOperand &to, Move::Kind kind);
+    void breakCycle(const MoveOperand &to, MoveOp::Kind kind);
+    void completeCycle(const MoveOperand &to, MoveOp::Kind kind);
 
   public:
     MoveEmitterX86(MacroAssemblerSpecific &masm);
     ~MoveEmitterX86();
     void emit(const MoveResolver &moves);
     void finish();
 };
 
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -144,30 +144,30 @@ MacroAssemblerX64::passABIArg(const Move
                 // Nothing to do; the value is in the right register already
                 return;
             }
             to = MoveOperand(dest);
         } else {
             to = MoveOperand(StackPointer, stackForCall_);
             stackForCall_ += sizeof(double);
         }
-        enoughMemory_ = moveResolver_.addMove(from, to, Move::DOUBLE);
+        enoughMemory_ = moveResolver_.addMove(from, to, MoveOp::DOUBLE);
     } else {
         Register dest;
         if (GetIntArgReg(passedIntArgs_++, passedFloatArgs_, &dest)) {
             if (from.isGeneralReg() && from.reg() == dest) {
                 // Nothing to do; the value is in the right register already
                 return;
             }
             to = MoveOperand(dest);
         } else {
             to = MoveOperand(StackPointer, stackForCall_);
             stackForCall_ += sizeof(int64_t);
         }
-        enoughMemory_ = moveResolver_.addMove(from, to, Move::GENERAL);
+        enoughMemory_ = moveResolver_.addMove(from, to, MoveOp::GENERAL);
     }
 }
 
 void
 MacroAssemblerX64::passABIArg(const Register &reg)
 {
     passABIArg(MoveOperand(reg));
 }
@@ -259,17 +259,17 @@ IsIntArgReg(Register reg)
 }
 
 void
 MacroAssemblerX64::callWithABI(Address fun, Result result)
 {
     if (IsIntArgReg(fun.base)) {
         // Callee register may be clobbered for an argument. Move the callee to
         // r10, a volatile, non-argument register.
-        moveResolver_.addMove(MoveOperand(fun.base), MoveOperand(r10), Move::GENERAL);
+        moveResolver_.addMove(MoveOperand(fun.base), MoveOperand(r10), MoveOp::GENERAL);
         fun.base = r10;
     }
 
     JS_ASSERT(!IsIntArgReg(fun.base));
 
     uint32_t stackAdjust;
     callWithABIPre(&stackAdjust);
     call(Operand(fun));
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -80,19 +80,16 @@ class MacroAssemblerX64 : public MacroAs
     using MacroAssemblerX86Shared::branch32;
 
     enum Result {
         GENERAL,
         DOUBLE,
         FLOAT
     };
 
-    typedef MoveResolver::MoveOperand MoveOperand;
-    typedef MoveResolver::Move Move;
-
     MacroAssemblerX64()
       : inCall_(false),
         enoughMemory_(true)
     {
     }
 
     // The buffer is about to be linked, make sure any constant pools or excess
     // bookkeeping has been flushed to the instruction stream.
--- a/js/src/jit/x64/Trampoline-x64.cpp
+++ b/js/src/jit/x64/Trampoline-x64.cpp
@@ -498,18 +498,16 @@ JitRuntime::generateBailoutHandler(JSCon
 #endif
 
     return code;
 }
 
 IonCode *
 JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
 {
-    typedef MoveResolver::MoveOperand MoveOperand;
-
     JS_ASSERT(!StackKeptAligned);
     JS_ASSERT(functionWrappers_);
     JS_ASSERT(functionWrappers_->initialized());
     VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
     if (p)
         return p->value();
 
     // Generate a separated code for the wrapper.
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -164,20 +164,20 @@ MacroAssemblerX86::setupUnalignedABICall
 
 void
 MacroAssemblerX86::passABIArg(const MoveOperand &from)
 {
     ++passedArgs_;
     MoveOperand to = MoveOperand(StackPointer, stackForCall_);
     if (from.isDouble()) {
         stackForCall_ += sizeof(double);
-        enoughMemory_ &= moveResolver_.addMove(from, to, Move::DOUBLE);
+        enoughMemory_ &= moveResolver_.addMove(from, to, MoveOp::DOUBLE);
     } else {
         stackForCall_ += sizeof(int32_t);
-        enoughMemory_ &= moveResolver_.addMove(from, to, Move::GENERAL);
+        enoughMemory_ &= moveResolver_.addMove(from, to, MoveOp::GENERAL);
     }
 }
 
 void
 MacroAssemblerX86::passABIArg(const Register &reg)
 {
     passABIArg(MoveOperand(reg));
 }
--- a/js/src/jit/x86/MacroAssembler-x86.h
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -71,19 +71,16 @@ class MacroAssemblerX86 : public MacroAs
     using MacroAssemblerX86Shared::branch32;
 
     enum Result {
         GENERAL,
         DOUBLE,
         FLOAT
     };
 
-    typedef MoveResolver::MoveOperand MoveOperand;
-    typedef MoveResolver::Move Move;
-
     MacroAssemblerX86()
       : inCall_(false),
         enoughMemory_(true)
     {
     }
 
     // The buffer is about to be linked, make sure any constant pools or excess
     // bookkeeping has been flushed to the instruction stream.
--- a/js/src/jit/x86/Trampoline-x86.cpp
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -533,18 +533,16 @@ JitRuntime::generateBailoutHandler(JSCon
 #endif
 
     return code;
 }
 
 IonCode *
 JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
 {
-    typedef MoveResolver::MoveOperand MoveOperand;
-
     JS_ASSERT(!StackKeptAligned);
     JS_ASSERT(functionWrappers_);
     JS_ASSERT(functionWrappers_->initialized());
     VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
     if (p)
         return p->value();
 
     // Generate a separated code for the wrapper.