Bug 1138348 - byte ops on x86_64. r=h4writer
authorLars T Hansen <lhansen@mozilla.com>
Wed, 11 Mar 2015 15:45:57 +0100
changeset 261930 d2747e260b681b4ee64718b52793b90224c3fdfe
parent 261929 e677e55b96c2a6c12df99ec21bca7e8b64b96763
child 261931 fbe97de169968022ca4a7f0a7c95e2131338d853
push id4718
push userraliiev@mozilla.com
push dateMon, 11 May 2015 18:39:53 +0000
treeherdermozilla-beta@c20c4ef55f08 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersh4writer
bugs1138348
milestone39.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1138348 - byte ops on x86_64. r=h4writer
js/src/jit/shared/Lowering-x86-shared.cpp
js/src/jit/shared/Lowering-x86-shared.h
js/src/jit/x64/Lowering-x64.cpp
js/src/jit/x64/Lowering-x64.h
js/src/jit/x86/Lowering-x86.cpp
js/src/jit/x86/Lowering-x86.h
--- a/js/src/jit/shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/shared/Lowering-x86-shared.cpp
@@ -359,17 +359,18 @@ LIRGeneratorX86Shared::lowerTruncateFToI
     MDefinition *opd = ins->input();
     MOZ_ASSERT(opd->type() == MIRType_Float32);
 
     LDefinition maybeTemp = Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempFloat32();
     define(new(alloc()) LTruncateFToInt32(useRegister(opd), maybeTemp), ins);
 }
 
 void
-LIRGeneratorX86Shared::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins)
+LIRGeneratorX86Shared::lowerCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins,
+                                                             bool useI386ByteRegisters)
 {
     MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
     MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
 
     MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
     MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
 
     const LUse elements = useRegister(ins->elements());
@@ -380,51 +381,50 @@ LIRGeneratorX86Shared::visitCompareExcha
     // If the target is an integer register then the target must be
     // eax.
     //
     // If the target is a floating register then we need a temp at the
     // lower level; that temp must be eax.
     //
     // oldval must be in a register.
     //
-    // newval will need to be in a register.  If the source is a byte
-    // array then the newval must be a register that has a byte size:
-    // ebx, ecx, or edx, since eax is taken for the output in this
-    // case.
+    // newval must be in a register.  If the source is a byte array
+    // then newval must be a register that has a byte size: on x86
+    // this must be ebx, ecx, or edx (eax is taken for the output).
     //
-    // Bug #1077036 describes some optimization opportunities.
+    // Bug #1077036 describes some further optimization opportunities.
 
     bool fixedOutput = false;
     LDefinition tempDef = LDefinition::BogusTemp();
     LAllocation newval;
     if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
         tempDef = tempFixed(eax);
         newval = useRegister(ins->newval());
     } else {
         fixedOutput = true;
-        if (ins->isByteArray())
+        if (useI386ByteRegisters && ins->isByteArray())
             newval = useFixed(ins->newval(), ebx);
         else
             newval = useRegister(ins->newval());
     }
 
-    // A register allocator limitation precludes 'useRegisterAtStart()' here.
     const LAllocation oldval = useRegister(ins->oldval());
 
     LCompareExchangeTypedArrayElement *lir =
         new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, tempDef);
 
     if (fixedOutput)
         defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
     else
         define(lir, ins);
 }
 
 void
-LIRGeneratorX86Shared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins)
+LIRGeneratorX86Shared::lowerAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins,
+                                                         bool useI386ByteRegisters)
 {
     MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
     MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
     MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
 
     MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
     MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
 
@@ -447,17 +447,17 @@ LIRGeneratorX86Shared::visitAtomicTypedA
     // L: mov           eax, temp
     //    andl          src, temp
     //    lock cmpxchg  temp, mem  ; reads eax also
     //    jnz           L
     //    ; result in eax
     //
     // Note the placement of L, cmpxchg will update eax with *mem if
     // *mem does not have the expected value, so reloading it at the
-    // top of the loop is redundant.
+    // top of the loop would be redundant.
     //
     // If the array is not a uint32 array then:
     //  - eax should be the output (one result of the cmpxchg)
     //  - there is a temp, which must have a byte register if
     //    the array has 1-byte elements elements
     //
     // If the array is a uint32 array then:
     //  - eax is the first temp
@@ -483,164 +483,36 @@ LIRGeneratorX86Shared::visitAtomicTypedA
         value = useRegister(ins->value());
         fixedOutput = false;
         if (bitOp) {
             tempDef1 = tempFixed(eax);
             tempDef2 = temp();
         } else {
             tempDef1 = temp();
         }
-    } else if (ins->isByteArray()) {
+    } else if (useI386ByteRegisters && ins->isByteArray()) {
         value = useFixed(ins->value(), ebx);
         if (bitOp)
             tempDef1 = tempFixed(ecx);
-    }
-    else {
+    } else {
         value = useRegister(ins->value());
         if (bitOp)
             tempDef1 = temp();
     }
 
     LAtomicTypedArrayElementBinop *lir =
         new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, tempDef1, tempDef2);
 
     if (fixedOutput)
         defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
     else
         define(lir, ins);
 }
 
 void
-LIRGeneratorX86Shared::lowerAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins,
-						     const LDefinition& addrTemp)
-{
-    MDefinition *ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
-
-    bool byteArray = false;
-    switch (ins->accessType()) {
-      case Scalar::Int8:
-      case Scalar::Uint8:
-        byteArray = true;
-        break;
-      case Scalar::Int16:
-      case Scalar::Uint16:
-      case Scalar::Int32:
-      case Scalar::Uint32:
-        break;
-      default:
-        MOZ_CRASH("Unexpected array type");
-    }
-
-    // Register allocation:
-    //
-    // The output must be eax.
-    //
-    // oldval must be in a register (it'll eventually end up in eax so
-    // ideally it's there to begin with).
-    //
-    // newval will need to be in a register.  If the source is a byte
-    // array then the newval must be a register that has a byte size:
-    // ebx, ecx, or edx, since eax is taken for the output in this
-    // case.  We pick ebx but it would be more flexible to pick any of
-    // the three that wasn't being used.
-    //
-    // Bug #1077036 describes some optimization opportunities.
-
-    const LAllocation newval = byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
-    const LAllocation oldval = useRegister(ins->oldValue());
-
-    LAsmJSCompareExchangeHeap *lir =
-        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(ptr), oldval, newval);
-
-    lir->setAddrTemp(addrTemp);
-    defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
-}
-
-void
-LIRGeneratorX86Shared::lowerAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins,
-						 const LDefinition& addrTemp)
-{
-    MDefinition *ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
-
-    bool byteArray = false;
-    switch (ins->accessType()) {
-      case Scalar::Int8:
-      case Scalar::Uint8:
-        byteArray = true;
-        break;
-      case Scalar::Int16:
-      case Scalar::Uint16:
-      case Scalar::Int32:
-      case Scalar::Uint32:
-        break;
-      default:
-        MOZ_CRASH("Unexpected array type");
-    }
-
-    // Register allocation:
-    //
-    // For ADD and SUB we'll use XADD:
-    //
-    //    movl       value, output
-    //    lock xaddl output, mem
-    //
-    // For the 8-bit variants XADD needs a byte register for the
-    // output only, we can still set up with movl; just pin the output
-    // to eax (or ebx / ecx / edx).
-    //
-    // For AND/OR/XOR we need to use a CMPXCHG loop:
-    //
-    //    movl          *mem, eax
-    // L: mov           eax, temp
-    //    andl          value, temp
-    //    lock cmpxchg  temp, mem  ; reads eax also
-    //    jnz           L
-    //    ; result in eax
-    //
-    // Note the placement of L, cmpxchg will update eax with *mem if
-    // *mem does not have the expected value, so reloading it at the
-    // top of the loop is redundant.
-    //
-    // We want to fix eax as the output.  We also need a temp for
-    // the intermediate value.
-    //
-    // For the 8-bit variants the temp must have a byte register.
-    //
-    // There are optimization opportunities:
-    //  - when the result is unused, Bug #1077014.
-    //  - better register allocation and instruction selection, Bug #1077036.
-
-    bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
-    LDefinition tempDef = LDefinition::BogusTemp();
-    LAllocation value;
-
-    // Optimization opportunity: "value" need not be pinned to something that
-    // has a byte register unless the back-end insists on using a byte move
-    // for the setup or the payload computation, which really it need not do.
-
-    if (byteArray) {
-        value = useFixed(ins->value(), ebx);
-        if (bitOp)
-            tempDef = tempFixed(ecx);
-    } else {
-        value = useRegister(ins->value());
-        if (bitOp)
-            tempDef = temp();
-    }
-
-    LAsmJSAtomicBinopHeap *lir =
-        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr), value, tempDef);
-
-    lir->setAddrTemp(addrTemp);
-    defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
-}
-
-void
 LIRGeneratorX86Shared::visitSimdBinaryArith(MSimdBinaryArith *ins)
 {
     MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
     MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
     MOZ_ASSERT(IsSimdType(ins->type()));
 
     MDefinition *lhs = ins->lhs();
     MDefinition *rhs = ins->rhs();
--- a/js/src/jit/shared/Lowering-x86-shared.h
+++ b/js/src/jit/shared/Lowering-x86-shared.h
@@ -51,18 +51,18 @@ class LIRGeneratorX86Shared : public LIR
     void lowerConstantDouble(double d, MInstruction *ins);
     void lowerConstantFloat32(float d, MInstruction *ins);
     void lowerTruncateDToInt32(MTruncateToInt32 *ins);
     void lowerTruncateFToInt32(MTruncateToInt32 *ins);
     void visitSimdBinaryArith(MSimdBinaryArith *ins);
     void visitSimdSelect(MSimdSelect *ins);
     void visitSimdSplatX4(MSimdSplatX4 *ins);
     void visitSimdValueX4(MSimdValueX4 *ins);
-    void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
-    void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
-    void lowerAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins, const LDefinition& addrTemp);
-    void lowerAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins, const LDefinition& addrTemp);
+    void lowerCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins,
+                                               bool useI386ByteRegisters);
+    void lowerAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins,
+                                           bool useI386ByteRegisters);
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_shared_Lowering_x86_shared_h */
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -127,16 +127,28 @@ LIRGeneratorX64::defineUntypedPhi(MPhi *
 
 void
 LIRGeneratorX64::lowerUntypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex)
 {
     lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
 }
 
 void
+LIRGeneratorX64::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins)
+{
+    lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
+}
+
+void
+LIRGeneratorX64::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins)
+{
+    lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ false);
+}
+
+void
 LIRGeneratorX64::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins)
 {
     MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
     LAsmJSUInt32ToDouble *lir = new(alloc()) LAsmJSUInt32ToDouble(useRegisterAtStart(ins->input()));
     define(lir, ins);
 }
 
 void
@@ -195,23 +207,68 @@ LIRGeneratorX64::visitAsmJSStoreHeap(MAs
         MOZ_CRASH("unexpected array type");
     }
     add(lir, ins);
 }
 
 void
 LIRGeneratorX64::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins)
 {
-    lowerAsmJSCompareExchangeHeap(ins, LDefinition::BogusTemp());
+    MDefinition *ptr = ins->ptr();
+    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+
+    const LAllocation oldval = useRegister(ins->oldValue());
+    const LAllocation newval = useRegister(ins->newValue());
+
+    LAsmJSCompareExchangeHeap *lir =
+        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(ptr), oldval, newval);
+
+    defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
 }
 
 void
 LIRGeneratorX64::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins)
 {
-    lowerAsmJSAtomicBinopHeap(ins, LDefinition::BogusTemp());
+    MDefinition *ptr = ins->ptr();
+    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+
+    // Register allocation:
+    //
+    // For ADD and SUB we'll use XADD (with word and byte ops as appropriate):
+    //
+    //    movl       value, output
+    //    lock xaddl output, mem
+    //
+    // For AND/OR/XOR we need to use a CMPXCHG loop:
+    //
+    //    movl          *mem, eax
+    // L: mov           eax, temp
+    //    andl          value, temp
+    //    lock cmpxchg  temp, mem  ; reads eax also
+    //    jnz           L
+    //    ; result in eax
+    //
+    // Note the placement of L, cmpxchg will update eax with *mem if
+    // *mem does not have the expected value, so reloading it at the
+    // top of the loop would be redundant.
+    //
+    // We want to fix eax as the output.  We also need a temp for
+    // the intermediate value.
+    //
+    // There are optimization opportunities:
+    //  - when the result is unused, Bug #1077014.
+
+    bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
+    LAllocation value = useRegister(ins->value());
+    LDefinition tempDef = bitOp ? temp() : LDefinition::BogusTemp();
+
+    LAsmJSAtomicBinopHeap *lir =
+        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr), value, tempDef);
+
+    defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
 }
 
 void
 LIRGeneratorX64::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins)
 {
     define(new(alloc()) LAsmJSLoadFuncPtr(useRegister(ins->index()), temp()), ins);
 }
 
--- a/js/src/jit/x64/Lowering-x64.h
+++ b/js/src/jit/x64/Lowering-x64.h
@@ -37,16 +37,18 @@ class LIRGeneratorX64 : public LIRGenera
     LDefinition tempToUnbox();
 
     bool needTempForPostBarrier() { return false; }
 
   public:
     void visitBox(MBox *box);
     void visitUnbox(MUnbox *unbox);
     void visitReturn(MReturn *ret);
+    void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
+    void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins);
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins);
     void visitAsmJSLoadHeap(MAsmJSLoadHeap *ins);
     void visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
     void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
     void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins);
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins);
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -178,16 +178,28 @@ LIRGeneratorX86::lowerUntypedPhiInput(MP
     MDefinition *operand = phi->getOperand(inputPosition);
     LPhi *type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
     LPhi *payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
     type->setOperand(inputPosition, LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
     payload->setOperand(inputPosition, LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
 }
 
 void
+LIRGeneratorX86::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins)
+{
+    lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ true);
+}
+
+void
+LIRGeneratorX86::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins)
+{
+    lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ true);
+}
+
+void
 LIRGeneratorX86::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins)
 {
     MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
     LAsmJSUInt32ToDouble *lir = new(alloc()) LAsmJSUInt32ToDouble(useRegisterAtStart(ins->input()), temp());
     define(lir, ins);
 }
 
 void
@@ -268,23 +280,111 @@ LIRGeneratorX86::visitStoreTypedArrayEle
     }
 
     add(lir, ins);
 }
 
 void
 LIRGeneratorX86::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins)
 {
-    lowerAsmJSCompareExchangeHeap(ins, temp());
+    MOZ_ASSERT(ins->accessType() < Scalar::Float32);
+
+    MDefinition *ptr = ins->ptr();
+    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+
+    bool byteArray = byteSize(ins->accessType()) == 1;
+
+    // Register allocation:
+    //
+    // The output must be eax.
+    //
+    // oldval must be in a register.
+    //
+    // newval must be in a register.  If the source is a byte array
+    // then newval must be a register that has a byte size: on x86
+    // this must be ebx, ecx, or edx (eax is taken for the output).
+    //
+    // Bug #1077036 describes some optimization opportunities.
+
+    const LAllocation oldval = useRegister(ins->oldValue());
+    const LAllocation newval = byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
+
+    LAsmJSCompareExchangeHeap *lir =
+        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(ptr), oldval, newval);
+
+    lir->setAddrTemp(temp());
+    defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
 }
 
 void
 LIRGeneratorX86::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins)
 {
-    lowerAsmJSAtomicBinopHeap(ins, temp());
+    MOZ_ASSERT(ins->accessType() < Scalar::Float32);
+
+    MDefinition *ptr = ins->ptr();
+    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+
+    bool byteArray = byteSize(ins->accessType()) == 1;
+
+    // Register allocation:
+    //
+    // For ADD and SUB we'll use XADD:
+    //
+    //    movl       value, output
+    //    lock xaddl output, mem
+    //
+    // For the 8-bit variants XADD needs a byte register for the
+    // output only, we can still set up with movl; just pin the output
+    // to eax (or ebx / ecx / edx).
+    //
+    // For AND/OR/XOR we need to use a CMPXCHG loop:
+    //
+    //    movl          *mem, eax
+    // L: mov           eax, temp
+    //    andl          value, temp
+    //    lock cmpxchg  temp, mem  ; reads eax also
+    //    jnz           L
+    //    ; result in eax
+    //
+    // Note the placement of L, cmpxchg will update eax with *mem if
+    // *mem does not have the expected value, so reloading it at the
+    // top of the loop would be redundant.
+    //
+    // We want to fix eax as the output.  We also need a temp for
+    // the intermediate value.
+    //
+    // For the 8-bit variants the temp must have a byte register.
+    //
+    // There are optimization opportunities:
+    //  - when the result is unused, Bug #1077014.
+    //  - better register allocation and instruction selection, Bug #1077036.
+
+    bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
+    LDefinition tempDef = LDefinition::BogusTemp();
+    LAllocation value;
+
+    // Optimization opportunity: "value" need not be pinned to something that
+    // has a byte register unless the back-end insists on using a byte move
+    // for the setup or the payload computation, which really it need not do.
+
+    if (byteArray) {
+        value = useFixed(ins->value(), ebx);
+        if (bitOp)
+            tempDef = tempFixed(ecx);
+    } else {
+        value = useRegister(ins->value());
+        if (bitOp)
+            tempDef = temp();
+    }
+
+    LAsmJSAtomicBinopHeap *lir =
+        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr), value, tempDef);
+
+    lir->setAddrTemp(temp());
+    defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
 }
 
 void
 LIRGeneratorX86::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins)
 {
     define(new(alloc()) LAsmJSLoadFuncPtr(useRegisterAtStart(ins->index())), ins);
 }
 
--- a/js/src/jit/x86/Lowering-x86.h
+++ b/js/src/jit/x86/Lowering-x86.h
@@ -43,16 +43,18 @@ class LIRGeneratorX86 : public LIRGenera
 
     void lowerUntypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex);
     void defineUntypedPhi(MPhi *phi, size_t lirIndex);
 
   public:
     void visitBox(MBox *box);
     void visitUnbox(MUnbox *unbox);
     void visitReturn(MReturn *ret);
+    void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
+    void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins);
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins);
     void visitAsmJSLoadHeap(MAsmJSLoadHeap *ins);
     void visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
     void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
     void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins);
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins);