Bug 1046585 part 8 - Move X86Assembler.h and AssemblerBuffer.h into jit/. r=sunfish
authorJan de Mooij <jdemooij@mozilla.com>
Tue, 19 Aug 2014 14:34:45 +0200
changeset 200258 5482a918ee738fb105676ddfb9e4ed4185649782
parent 200257 32628ddca30c9e8a39be492a787bfa5a6e50825d
child 200259 6ae6e5032735c1189800783ddc7d777a5ed8a7df
push id47859
push userjandemooij@gmail.com
push dateTue, 19 Aug 2014 12:38:20 +0000
treeherdermozilla-inbound@5482a918ee73 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssunfish
bugs1046585
milestone34.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1046585 part 8 - Move X86Assembler.h and AssemblerBuffer.h into jit/. r=sunfish
js/src/asmjs/AsmJSModule.cpp
js/src/asmjs/AsmJSSignalHandlers.cpp
js/src/assembler/assembler/AssemblerBuffer.h
js/src/assembler/assembler/MacroAssemblerX86Common.h
js/src/assembler/assembler/X86Assembler.h
js/src/jit/shared/Assembler-x86-shared.cpp
js/src/jit/shared/Assembler-x86-shared.h
js/src/jit/shared/AssemblerBuffer-x86-shared.h
js/src/jit/shared/BaseAssembler-x86-shared.h
js/src/jit/shared/CodeGenerator-x86-shared.cpp
js/src/jit/shared/MacroAssembler-x86-shared.h
js/src/jit/x64/Architecture-x64.h
js/src/jit/x64/Assembler-x64.cpp
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/MacroAssembler-x64.h
js/src/jit/x86/Architecture-x86.h
js/src/jit/x86/Assembler-x86.cpp
js/src/jit/x86/Assembler-x86.h
--- a/js/src/asmjs/AsmJSModule.cpp
+++ b/js/src/asmjs/AsmJSModule.cpp
@@ -755,34 +755,34 @@ AsmJSModule::initHeap(Handle<ArrayBuffer
     heapDatum() = heap->dataPointer();
 
 #if defined(JS_CODEGEN_X86)
     uint8_t *heapOffset = heap->dataPointer();
     void *heapLength = (void*)heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
         const jit::AsmJSHeapAccess &access = heapAccesses_[i];
         if (access.hasLengthCheck())
-            JSC::X86Assembler::setPointer(access.patchLengthAt(code_), heapLength);
+            X86Assembler::setPointer(access.patchLengthAt(code_), heapLength);
         void *addr = access.patchOffsetAt(code_);
-        uint32_t disp = reinterpret_cast<uint32_t>(JSC::X86Assembler::getPointer(addr));
+        uint32_t disp = reinterpret_cast<uint32_t>(X86Assembler::getPointer(addr));
         JS_ASSERT(disp <= INT32_MAX);
-        JSC::X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
+        X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
     }
 #elif defined(JS_CODEGEN_X64)
     int32_t heapLength = int32_t(intptr_t(heap->byteLength()));
     if (usesSignalHandlersForOOB())
         return;
     // If we cannot use the signal handlers, we need to patch the heap length
     // checks at the right places. All accesses that have been recorded are the
     // only ones that need bound checks (see also
     // CodeGeneratorX64::visitAsmJS{Load,Store}Heap)
     for (size_t i = 0; i < heapAccesses_.length(); i++) {
         const jit::AsmJSHeapAccess &access = heapAccesses_[i];
         if (access.hasLengthCheck())
-            JSC::X86Assembler::setInt32(access.patchLengthAt(code_), heapLength);
+            X86Assembler::setInt32(access.patchLengthAt(code_), heapLength);
     }
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
     uint32_t heapLength = heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
         jit::Assembler::UpdateBoundsCheck(heapLength,
                                           (jit::Instruction*)(heapAccesses_[i].offset() + code_));
     }
 #endif
@@ -820,19 +820,19 @@ AsmJSModule::restoreToInitialState(uint8
 
     if (maybePrevBuffer) {
 #if defined(JS_CODEGEN_X86)
         // Subtract out the base-pointer added by AsmJSModule::initHeap.
         uint8_t *ptrBase = maybePrevBuffer->dataPointer();
         for (unsigned i = 0; i < heapAccesses_.length(); i++) {
             const jit::AsmJSHeapAccess &access = heapAccesses_[i];
             void *addr = access.patchOffsetAt(code_);
-            uint8_t *ptr = reinterpret_cast<uint8_t*>(JSC::X86Assembler::getPointer(addr));
+            uint8_t *ptr = reinterpret_cast<uint8_t*>(X86Assembler::getPointer(addr));
             JS_ASSERT(ptr >= ptrBase);
-            JSC::X86Assembler::setPointer(addr, (void *)(ptr - ptrBase));
+            X86Assembler::setPointer(addr, (void *)(ptr - ptrBase));
         }
 #endif
     }
 }
 
 static void
 AsmJSModuleObject_finalize(FreeOp *fop, JSObject *obj)
 {
@@ -1596,17 +1596,17 @@ AsmJSModule::setProfilingEnabled(bool en
     // prologues:
     for (size_t i = 0; i < callSites_.length(); i++) {
         CallSite &cs = callSites_[i];
         if (cs.kind() != CallSite::Relative)
             continue;
 
         uint8_t *callerRetAddr = code_ + cs.returnAddressOffset();
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-        void *callee = JSC::X86Assembler::getRel32Target(callerRetAddr);
+        void *callee = X86Assembler::getRel32Target(callerRetAddr);
 #elif defined(JS_CODEGEN_ARM)
         uint8_t *caller = callerRetAddr - 4;
         Instruction *callerInsn = reinterpret_cast<Instruction*>(caller);
         BOffImm calleeOffset;
         callerInsn->as<InstBLImm>()->extractImm(&calleeOffset);
         void *callee = calleeOffset.getDest(callerInsn);
 #elif defined(JS_CODEGEN_MIPS)
         Instruction *instr = (Instruction *)(callerRetAddr - 4 * sizeof(uint32_t));
@@ -1624,17 +1624,17 @@ AsmJSModule::setProfilingEnabled(bool en
 
         uint8_t *profilingEntry = code_ + codeRange->begin();
         uint8_t *entry = code_ + codeRange->entry();
         JS_ASSERT_IF(profilingEnabled_, callee == profilingEntry);
         JS_ASSERT_IF(!profilingEnabled_, callee == entry);
         uint8_t *newCallee = enabled ? profilingEntry : entry;
 
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-        JSC::X86Assembler::setRel32(callerRetAddr, newCallee);
+        X86Assembler::setRel32(callerRetAddr, newCallee);
 #elif defined(JS_CODEGEN_ARM)
         new (caller) InstBLImm(BOffImm(newCallee - caller), Assembler::Always);
 #elif defined(JS_CODEGEN_MIPS)
         Assembler::WriteLuiOriInstructions(instr, instr->next(),
                                            ScratchRegister, (uint32_t)newCallee);
         instr[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
 #elif defined(JS_CODEGEN_NONE)
         MOZ_CRASH();
--- a/js/src/asmjs/AsmJSSignalHandlers.cpp
+++ b/js/src/asmjs/AsmJSSignalHandlers.cpp
@@ -378,52 +378,52 @@ ContextToPC(CONTEXT *context)
 }
 
 # if defined(JS_CODEGEN_X64)
 static void
 SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg)
 {
     if (reg.isFloat()) {
         switch (reg.fpu().code()) {
-          case JSC::X86Registers::xmm0:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 0)); break;
-          case JSC::X86Registers::xmm1:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 1)); break;
-          case JSC::X86Registers::xmm2:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 2)); break;
-          case JSC::X86Registers::xmm3:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 3)); break;
-          case JSC::X86Registers::xmm4:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 4)); break;
-          case JSC::X86Registers::xmm5:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 5)); break;
-          case JSC::X86Registers::xmm6:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 6)); break;
-          case JSC::X86Registers::xmm7:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 7)); break;
-          case JSC::X86Registers::xmm8:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 8)); break;
-          case JSC::X86Registers::xmm9:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 9)); break;
-          case JSC::X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 10)); break;
-          case JSC::X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 11)); break;
-          case JSC::X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 12)); break;
-          case JSC::X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 13)); break;
-          case JSC::X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 14)); break;
-          case JSC::X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 15)); break;
+          case X86Registers::xmm0:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 0)); break;
+          case X86Registers::xmm1:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 1)); break;
+          case X86Registers::xmm2:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 2)); break;
+          case X86Registers::xmm3:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 3)); break;
+          case X86Registers::xmm4:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 4)); break;
+          case X86Registers::xmm5:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 5)); break;
+          case X86Registers::xmm6:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 6)); break;
+          case X86Registers::xmm7:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 7)); break;
+          case X86Registers::xmm8:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 8)); break;
+          case X86Registers::xmm9:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 9)); break;
+          case X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 10)); break;
+          case X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 11)); break;
+          case X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 12)); break;
+          case X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 13)); break;
+          case X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 14)); break;
+          case X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 15)); break;
           default: MOZ_CRASH();
         }
     } else {
         switch (reg.gpr().code()) {
-          case JSC::X86Registers::eax: RAX_sig(context) = 0; break;
-          case JSC::X86Registers::ecx: RCX_sig(context) = 0; break;
-          case JSC::X86Registers::edx: RDX_sig(context) = 0; break;
-          case JSC::X86Registers::ebx: RBX_sig(context) = 0; break;
-          case JSC::X86Registers::esp: RSP_sig(context) = 0; break;
-          case JSC::X86Registers::ebp: RBP_sig(context) = 0; break;
-          case JSC::X86Registers::esi: RSI_sig(context) = 0; break;
-          case JSC::X86Registers::edi: RDI_sig(context) = 0; break;
-          case JSC::X86Registers::r8:  R8_sig(context)  = 0; break;
-          case JSC::X86Registers::r9:  R9_sig(context)  = 0; break;
-          case JSC::X86Registers::r10: R10_sig(context) = 0; break;
-          case JSC::X86Registers::r11: R11_sig(context) = 0; break;
-          case JSC::X86Registers::r12: R12_sig(context) = 0; break;
-          case JSC::X86Registers::r13: R13_sig(context) = 0; break;
-          case JSC::X86Registers::r14: R14_sig(context) = 0; break;
-          case JSC::X86Registers::r15: R15_sig(context) = 0; break;
+          case X86Registers::eax: RAX_sig(context) = 0; break;
+          case X86Registers::ecx: RCX_sig(context) = 0; break;
+          case X86Registers::edx: RDX_sig(context) = 0; break;
+          case X86Registers::ebx: RBX_sig(context) = 0; break;
+          case X86Registers::esp: RSP_sig(context) = 0; break;
+          case X86Registers::ebp: RBP_sig(context) = 0; break;
+          case X86Registers::esi: RSI_sig(context) = 0; break;
+          case X86Registers::edi: RDI_sig(context) = 0; break;
+          case X86Registers::r8:  R8_sig(context)  = 0; break;
+          case X86Registers::r9:  R9_sig(context)  = 0; break;
+          case X86Registers::r10: R10_sig(context) = 0; break;
+          case X86Registers::r11: R11_sig(context) = 0; break;
+          case X86Registers::r12: R12_sig(context) = 0; break;
+          case X86Registers::r13: R13_sig(context) = 0; break;
+          case X86Registers::r14: R14_sig(context) = 0; break;
+          case X86Registers::r15: R15_sig(context) = 0; break;
           default: MOZ_CRASH();
         }
     }
 }
 # endif  // JS_CODEGEN_X64
 #endif   // !XP_MACOSX
 
 #if defined(XP_WIN)
@@ -549,56 +549,56 @@ SetRegisterToCoercedUndefined(mach_port_
         x86_float_state64_t fstate;
         unsigned int count = x86_FLOAT_STATE64_COUNT;
         kret = thread_get_state(rtThread, x86_FLOAT_STATE64, (thread_state_t) &fstate, &count);
         if (kret != KERN_SUCCESS)
             return false;
 
         bool f32 = heapAccess.isFloat32Load();
         switch (heapAccess.loadedReg().fpu().code()) {
-          case JSC::X86Registers::xmm0:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm0); break;
-          case JSC::X86Registers::xmm1:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm1); break;
-          case JSC::X86Registers::xmm2:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm2); break;
-          case JSC::X86Registers::xmm3:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm3); break;
-          case JSC::X86Registers::xmm4:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm4); break;
-          case JSC::X86Registers::xmm5:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm5); break;
-          case JSC::X86Registers::xmm6:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm6); break;
-          case JSC::X86Registers::xmm7:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm7); break;
-          case JSC::X86Registers::xmm8:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm8); break;
-          case JSC::X86Registers::xmm9:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm9); break;
-          case JSC::X86Registers::xmm10: SetXMMRegToNaN(f32, &fstate.__fpu_xmm10); break;
-          case JSC::X86Registers::xmm11: SetXMMRegToNaN(f32, &fstate.__fpu_xmm11); break;
-          case JSC::X86Registers::xmm12: SetXMMRegToNaN(f32, &fstate.__fpu_xmm12); break;
-          case JSC::X86Registers::xmm13: SetXMMRegToNaN(f32, &fstate.__fpu_xmm13); break;
-          case JSC::X86Registers::xmm14: SetXMMRegToNaN(f32, &fstate.__fpu_xmm14); break;
-          case JSC::X86Registers::xmm15: SetXMMRegToNaN(f32, &fstate.__fpu_xmm15); break;
+          case X86Registers::xmm0:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm0); break;
+          case X86Registers::xmm1:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm1); break;
+          case X86Registers::xmm2:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm2); break;
+          case X86Registers::xmm3:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm3); break;
+          case X86Registers::xmm4:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm4); break;
+          case X86Registers::xmm5:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm5); break;
+          case X86Registers::xmm6:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm6); break;
+          case X86Registers::xmm7:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm7); break;
+          case X86Registers::xmm8:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm8); break;
+          case X86Registers::xmm9:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm9); break;
+          case X86Registers::xmm10: SetXMMRegToNaN(f32, &fstate.__fpu_xmm10); break;
+          case X86Registers::xmm11: SetXMMRegToNaN(f32, &fstate.__fpu_xmm11); break;
+          case X86Registers::xmm12: SetXMMRegToNaN(f32, &fstate.__fpu_xmm12); break;
+          case X86Registers::xmm13: SetXMMRegToNaN(f32, &fstate.__fpu_xmm13); break;
+          case X86Registers::xmm14: SetXMMRegToNaN(f32, &fstate.__fpu_xmm14); break;
+          case X86Registers::xmm15: SetXMMRegToNaN(f32, &fstate.__fpu_xmm15); break;
           default: MOZ_CRASH();
         }
 
         kret = thread_set_state(rtThread, x86_FLOAT_STATE64, (thread_state_t)&fstate, x86_FLOAT_STATE64_COUNT);
         if (kret != KERN_SUCCESS)
             return false;
     } else {
         switch (heapAccess.loadedReg().gpr().code()) {
-          case JSC::X86Registers::eax: state.__rax = 0; break;
-          case JSC::X86Registers::ecx: state.__rcx = 0; break;
-          case JSC::X86Registers::edx: state.__rdx = 0; break;
-          case JSC::X86Registers::ebx: state.__rbx = 0; break;
-          case JSC::X86Registers::esp: state.__rsp = 0; break;
-          case JSC::X86Registers::ebp: state.__rbp = 0; break;
-          case JSC::X86Registers::esi: state.__rsi = 0; break;
-          case JSC::X86Registers::edi: state.__rdi = 0; break;
-          case JSC::X86Registers::r8:  state.__r8  = 0; break;
-          case JSC::X86Registers::r9:  state.__r9  = 0; break;
-          case JSC::X86Registers::r10: state.__r10 = 0; break;
-          case JSC::X86Registers::r11: state.__r11 = 0; break;
-          case JSC::X86Registers::r12: state.__r12 = 0; break;
-          case JSC::X86Registers::r13: state.__r13 = 0; break;
-          case JSC::X86Registers::r14: state.__r14 = 0; break;
-          case JSC::X86Registers::r15: state.__r15 = 0; break;
+          case X86Registers::eax: state.__rax = 0; break;
+          case X86Registers::ecx: state.__rcx = 0; break;
+          case X86Registers::edx: state.__rdx = 0; break;
+          case X86Registers::ebx: state.__rbx = 0; break;
+          case X86Registers::esp: state.__rsp = 0; break;
+          case X86Registers::ebp: state.__rbp = 0; break;
+          case X86Registers::esi: state.__rsi = 0; break;
+          case X86Registers::edi: state.__rdi = 0; break;
+          case X86Registers::r8:  state.__r8  = 0; break;
+          case X86Registers::r9:  state.__r9  = 0; break;
+          case X86Registers::r10: state.__r10 = 0; break;
+          case X86Registers::r11: state.__r11 = 0; break;
+          case X86Registers::r12: state.__r12 = 0; break;
+          case X86Registers::r13: state.__r13 = 0; break;
+          case X86Registers::r14: state.__r14 = 0; break;
+          case X86Registers::r15: state.__r15 = 0; break;
           default: MOZ_CRASH();
         }
     }
     return true;
 }
 # endif
 
 // This definition was generated by mig (the Mach Interface Generator) for the
--- a/js/src/assembler/assembler/MacroAssemblerX86Common.h
+++ b/js/src/assembler/assembler/MacroAssemblerX86Common.h
@@ -25,17 +25,17 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef assembler_assembler_MacroAssemblerX86Common_h
 #define assembler_assembler_MacroAssemblerX86Common_h
 
-#include "assembler/assembler/X86Assembler.h"
+#include "jit/shared/BaseAssembler-x86-shared.h"
 
 namespace JSC {
 
 class MacroAssemblerX86Common {
 public:
     // As the SSE's were introduced in order, the presence of a later SSE implies
     // the presence of an earlier SSE. For example, SSE4_2 support implies SSE2 support.
     enum SSECheckState {
--- a/js/src/jit/shared/Assembler-x86-shared.cpp
+++ b/js/src/jit/shared/Assembler-x86-shared.cpp
@@ -38,17 +38,17 @@ AssemblerX86Shared::copyPreBarrierTable(
         memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
 }
 
 static void
 TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader)
 {
     while (reader.more()) {
         size_t offset = reader.readUnsigned();
-        void **ptr = JSC::X86Assembler::getPointerRef(buffer + offset);
+        void **ptr = X86Assembler::getPointerRef(buffer + offset);
 
 #ifdef JS_PUNBOX64
         // All pointers on x64 will have the top bits cleared. If those bits
         // are not cleared, this must be a Value.
         uintptr_t *word = reinterpret_cast<uintptr_t *>(ptr);
         if (*word >> JSVAL_TAG_SHIFT) {
             jsval_layout layout;
             layout.asBits = *word;
--- a/js/src/jit/shared/Assembler-x86-shared.h
+++ b/js/src/jit/shared/Assembler-x86-shared.h
@@ -5,18 +5,18 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_shared_Assembler_x86_shared_h
 #define jit_shared_Assembler_x86_shared_h
 
 #include <cstddef>
 
 #include "assembler/assembler/MacroAssemblerX86Common.h"
-#include "assembler/assembler/X86Assembler.h"
 #include "jit/shared/Assembler-shared.h"
+#include "jit/shared/BaseAssembler-x86-shared.h"
 
 namespace js {
 namespace jit {
 
 class Operand
 {
   public:
     enum Kind {
@@ -64,17 +64,17 @@ class Operand
     { }
     Operand(Register reg, int32_t disp)
       : kind_(MEM_REG_DISP),
         base_(reg.code()),
         disp_(disp)
     { }
     explicit Operand(AbsoluteAddress address)
       : kind_(MEM_ADDRESS32),
-        disp_(JSC::X86Assembler::addressImmediate(address.addr))
+        disp_(X86Assembler::addressImmediate(address.addr))
     { }
 
     Address toAddress() const {
         JS_ASSERT(kind() == MEM_REG_DISP);
         return Address(Register::FromCode(base()), disp());
     }
 
     BaseIndex toBaseIndex() const {
@@ -156,40 +156,40 @@ class AssemblerX86Shared : public Assemb
         if (ptr.value)
             dataRelocations_.writeUnsigned(masm.currentOffset());
     }
     void writePrebarrierOffset(CodeOffsetLabel label) {
         preBarriers_.writeUnsigned(label.offset());
     }
 
   protected:
-    JSC::X86Assembler masm;
+    X86Assembler masm;
 
-    typedef JSC::X86Assembler::JmpSrc JmpSrc;
-    typedef JSC::X86Assembler::JmpDst JmpDst;
+    typedef X86Assembler::JmpSrc JmpSrc;
+    typedef X86Assembler::JmpDst JmpDst;
 
   public:
     enum Condition {
-        Equal = JSC::X86Assembler::ConditionE,
-        NotEqual = JSC::X86Assembler::ConditionNE,
-        Above = JSC::X86Assembler::ConditionA,
-        AboveOrEqual = JSC::X86Assembler::ConditionAE,
-        Below = JSC::X86Assembler::ConditionB,
-        BelowOrEqual = JSC::X86Assembler::ConditionBE,
-        GreaterThan = JSC::X86Assembler::ConditionG,
-        GreaterThanOrEqual = JSC::X86Assembler::ConditionGE,
-        LessThan = JSC::X86Assembler::ConditionL,
-        LessThanOrEqual = JSC::X86Assembler::ConditionLE,
-        Overflow = JSC::X86Assembler::ConditionO,
-        Signed = JSC::X86Assembler::ConditionS,
-        NotSigned = JSC::X86Assembler::ConditionNS,
-        Zero = JSC::X86Assembler::ConditionE,
-        NonZero = JSC::X86Assembler::ConditionNE,
-        Parity = JSC::X86Assembler::ConditionP,
-        NoParity = JSC::X86Assembler::ConditionNP
+        Equal = X86Assembler::ConditionE,
+        NotEqual = X86Assembler::ConditionNE,
+        Above = X86Assembler::ConditionA,
+        AboveOrEqual = X86Assembler::ConditionAE,
+        Below = X86Assembler::ConditionB,
+        BelowOrEqual = X86Assembler::ConditionBE,
+        GreaterThan = X86Assembler::ConditionG,
+        GreaterThanOrEqual = X86Assembler::ConditionGE,
+        LessThan = X86Assembler::ConditionL,
+        LessThanOrEqual = X86Assembler::ConditionLE,
+        Overflow = X86Assembler::ConditionO,
+        Signed = X86Assembler::ConditionS,
+        NotSigned = X86Assembler::ConditionNS,
+        Zero = X86Assembler::ConditionE,
+        NonZero = X86Assembler::ConditionNE,
+        Parity = X86Assembler::ConditionP,
+        NoParity = X86Assembler::ConditionNP
     };
 
     // If this bit is set, the ucomisd operands have to be inverted.
     static const int DoubleConditionBitInvert = 0x10;
 
     // Bit set when a DoubleCondition does not map to a single x86 condition.
     // The macro assembler has to special-case these conditions.
     static const int DoubleConditionBitSpecial = 0x20;
@@ -678,17 +678,17 @@ class AssemblerX86Shared : public Assemb
             break;
           default:
             MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
         }
     }
 
   protected:
     JmpSrc jSrc(Condition cond, Label *label) {
-        JmpSrc j = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond));
+        JmpSrc j = masm.jCC(static_cast<X86Assembler::Condition>(cond));
         if (label->bound()) {
             // The jump can be immediately patched to the correct destination.
             masm.linkJump(j, JmpDst(label->offset()));
         } else {
             // Thread the jump list through the unpatched jump targets.
             JmpSrc prev = JmpSrc(label->use(j.offset()));
             masm.setNextJump(j, prev);
         }
@@ -717,17 +717,17 @@ class AssemblerX86Shared : public Assemb
             // Thread the jump list through the unpatched jump targets.
             JmpSrc prev = JmpSrc(label->use(j.offset()));
             masm.setNextJump(j, prev);
         }
         return j;
     }
 
     JmpSrc jSrc(Condition cond, RepatchLabel *label) {
-        JmpSrc j = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond));
+        JmpSrc j = masm.jCC(static_cast<X86Assembler::Condition>(cond));
         if (label->bound()) {
             // The jump can be immediately patched to the correct destination.
             masm.linkJump(j, JmpDst(label->offset()));
         } else {
             label->use(j.offset());
         }
         return j;
     }
@@ -763,48 +763,48 @@ class AssemblerX86Shared : public Assemb
             masm.jmp_r(op.reg());
             break;
           default:
             MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
         }
     }
     void cmpEAX(Label *label) { cmpSrc(label); }
     void bind(Label *label) {
-        JSC::X86Assembler::JmpDst dst(masm.label());
+        X86Assembler::JmpDst dst(masm.label());
         if (label->used()) {
             bool more;
-            JSC::X86Assembler::JmpSrc jmp(label->offset());
+            X86Assembler::JmpSrc jmp(label->offset());
             do {
-                JSC::X86Assembler::JmpSrc next;
+                X86Assembler::JmpSrc next;
                 more = masm.nextJump(jmp, &next);
                 masm.linkJump(jmp, dst);
                 jmp = next;
             } while (more);
         }
         label->bind(dst.offset());
     }
     void bind(RepatchLabel *label) {
-        JSC::X86Assembler::JmpDst dst(masm.label());
+        X86Assembler::JmpDst dst(masm.label());
         if (label->used()) {
-            JSC::X86Assembler::JmpSrc jmp(label->offset());
+            X86Assembler::JmpSrc jmp(label->offset());
             masm.linkJump(jmp, dst);
         }
         label->bind(dst.offset());
     }
     uint32_t currentOffset() {
         return masm.label().offset();
     }
 
     // Re-routes pending jumps to a new label.
     void retarget(Label *label, Label *target) {
         if (label->used()) {
             bool more;
-            JSC::X86Assembler::JmpSrc jmp(label->offset());
+            X86Assembler::JmpSrc jmp(label->offset());
             do {
-                JSC::X86Assembler::JmpSrc next;
+                X86Assembler::JmpSrc next;
                 more = masm.nextJump(jmp, &next);
 
                 if (target->bound()) {
                     // The jump can be immediately patched to the correct destination.
                     masm.linkJump(jmp, JmpDst(target->offset()));
                 } else {
                     // Thread the jump list through the unpatched jump targets.
                     JmpSrc prev = JmpSrc(target->use(jmp.offset()));
@@ -816,25 +816,25 @@ class AssemblerX86Shared : public Assemb
         }
         label->reset();
     }
 
     static void Bind(uint8_t *raw, AbsoluteLabel *label, const void *address) {
         if (label->used()) {
             intptr_t src = label->offset();
             do {
-                intptr_t next = reinterpret_cast<intptr_t>(JSC::X86Assembler::getPointer(raw + src));
-                JSC::X86Assembler::setPointer(raw + src, address);
+                intptr_t next = reinterpret_cast<intptr_t>(X86Assembler::getPointer(raw + src));
+                X86Assembler::setPointer(raw + src, address);
                 src = next;
             } while (src != AbsoluteLabel::INVALID_OFFSET);
         }
         label->bind();
     }
 
-    // See Bind and JSC::X86Assembler::setPointer.
+    // See Bind and X86Assembler::setPointer.
     size_t labelOffsetToPatchOffset(size_t offset) {
         return offset - sizeof(void*);
     }
 
     void ret() {
         masm.ret();
     }
     void retn(Imm32 n) {
@@ -957,17 +957,17 @@ class AssemblerX86Shared : public Assemb
     CodeOffsetLabel cmplWithPatch(Register lhs, Imm32 rhs) {
         masm.cmpl_ir_force32(rhs.value, lhs.code());
         return CodeOffsetLabel(masm.currentOffset());
     }
     void cmpw(Register lhs, Register rhs) {
         masm.cmpw_rr(lhs.code(), rhs.code());
     }
     void setCC(Condition cond, Register r) {
-        masm.setCC_r(static_cast<JSC::X86Assembler::Condition>(cond), r.code());
+        masm.setCC_r(static_cast<X86Assembler::Condition>(cond), r.code());
     }
     void testb(Register lhs, Register rhs) {
         JS_ASSERT(GeneralRegisterSet(Registers::SingleByteRegs).has(lhs));
         JS_ASSERT(GeneralRegisterSet(Registers::SingleByteRegs).has(rhs));
         masm.testb_rr(rhs.code(), lhs.code());
     }
     void testw(Register lhs, Register rhs) {
         masm.testw_rr(rhs.code(), lhs.code());
@@ -1750,23 +1750,23 @@ class AssemblerX86Shared : public Assemb
         JS_ASSERT(HasSSE2());
         masm.sqrtsd_rr(src.code(), dest.code());
     }
     void sqrtss(FloatRegister src, FloatRegister dest) {
         JS_ASSERT(HasSSE2());
         masm.sqrtss_rr(src.code(), dest.code());
     }
     void roundsd(FloatRegister src, FloatRegister dest,
-                 JSC::X86Assembler::RoundingMode mode)
+                 X86Assembler::RoundingMode mode)
     {
         JS_ASSERT(HasSSE41());
         masm.roundsd_rr(src.code(), dest.code(), mode);
     }
     void roundss(FloatRegister src, FloatRegister dest,
-                 JSC::X86Assembler::RoundingMode mode)
+                 X86Assembler::RoundingMode mode)
     {
         JS_ASSERT(HasSSE41());
         masm.roundss_rr(src.code(), dest.code(), mode);
     }
     void minsd(FloatRegister src, FloatRegister dest) {
         JS_ASSERT(HasSSE2());
         masm.minsd_rr(src.code(), dest.code());
     }
rename from js/src/assembler/assembler/AssemblerBuffer.h
rename to js/src/jit/shared/AssemblerBuffer-x86-shared.h
--- a/js/src/assembler/assembler/AssemblerBuffer.h
+++ b/js/src/jit/shared/AssemblerBuffer-x86-shared.h
@@ -22,22 +22,22 @@
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * ***** END LICENSE BLOCK ***** */
 
-#ifndef assembler_assembler_AssemblerBuffer_h
-#define assembler_assembler_AssemblerBuffer_h
+#ifndef jit_shared_AssemblerBuffer_x86_shared_h
+#define jit_shared_AssemblerBuffer_x86_shared_h
 
-#include <string.h>
 #include <limits.h>
 #include <stdarg.h>
+#include <string.h>
 
 #include "jsfriendapi.h"
 #include "jsopcode.h"
 #include "jsutil.h"
 
 #include "jit/ExecutableAllocator.h"
 #include "jit/IonSpewer.h"
 #include "js/RootingAPI.h"
@@ -45,17 +45,18 @@
 #define PRETTY_PRINT_OFFSET(os) (((os)<0)?"-":""), (((os)<0)?-(os):(os))
 
 #define FIXME_INSN_PRINTING                                 \
     do {                                                    \
         spew("FIXME insn printing %s:%d",                   \
              __FILE__, __LINE__);                           \
     } while (0)
 
-namespace JSC {
+namespace js {
+namespace jit {
 
     class AssemblerBuffer {
         static const size_t inlineCapacity = 256;
     public:
         AssemblerBuffer()
             : m_buffer(m_inlineBuffer)
             , m_capacity(inlineCapacity)
             , m_size(0)
@@ -318,11 +319,12 @@ namespace JSC {
                 va_end(va);
 
                 if (i > -1)
                     js::jit::IonSpew(js::jit::IonSpew_Codegen, "%s", buf);
             }
         }
     };
 
-} // namespace JSC
+} // namespace jit
+} // namespace js
 
-#endif /* assembler_assembler_AssemblerBuffer_h */
+#endif /* jit_shared_AssemblerBuffer_x86_shared_h */
rename from js/src/assembler/assembler/X86Assembler.h
rename to js/src/jit/shared/BaseAssembler-x86-shared.h
--- a/js/src/assembler/assembler/X86Assembler.h
+++ b/js/src/jit/shared/BaseAssembler-x86-shared.h
@@ -22,25 +22,27 @@
  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * ***** END LICENSE BLOCK ***** */
 
-#ifndef assembler_assembler_X86Assembler_h
-#define assembler_assembler_X86Assembler_h
+#ifndef jit_shared_BaseAssembler_x86_shared_h
+#define jit_shared_BaseAssembler_x86_shared_h
 
 #include <stdarg.h>
 
-#include "assembler/assembler/AssemblerBuffer.h"
+#include "jit/shared/AssemblerBuffer-x86-shared.h"
+
 #include "js/Vector.h"
 
-namespace JSC {
+namespace js {
+namespace jit {
 
 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
 inline bool CAN_ZERO_EXTEND_8_32(int32_t value) { return value == (int32_t)(unsigned char)value; }
 inline bool CAN_ZERO_EXTEND_8H_32(int32_t value) { return value == (value & 0xff00); }
 inline bool CAN_ZERO_EXTEND_32_64(int32_t value) { return value >= 0; }
 
 namespace X86Registers {
     typedef enum {
@@ -4438,11 +4440,12 @@ private:
         {
             memoryModRM_disp32(reg, address);
         }
 
         AssemblerBuffer m_buffer;
     } m_formatter;
 };
 
-} // namespace JSC
-
-#endif /* assembler_assembler_X86Assembler_h */
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_BaseAssembler_x86_shared_h */
--- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp
@@ -1579,17 +1579,17 @@ CodeGeneratorX86Shared::visitFloor(LFloo
 
     if (AssemblerX86Shared::HasSSE41()) {
         // Bail on negative-zero.
         masm.branchNegativeZero(input, output, &bailout);
         if (!bailoutFrom(&bailout, lir->snapshot()))
             return false;
 
         // Round toward -Infinity.
-        masm.roundsd(input, scratch, JSC::X86Assembler::RoundDown);
+        masm.roundsd(input, scratch, X86Assembler::RoundDown);
 
         if (!bailoutCvttsd2si(scratch, output, lir->snapshot()))
             return false;
     } else {
         Label negative, end;
 
         // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
         masm.xorpd(scratch, scratch);
@@ -1642,17 +1642,17 @@ CodeGeneratorX86Shared::visitFloorF(LFlo
 
     if (AssemblerX86Shared::HasSSE41()) {
         // Bail on negative-zero.
         masm.branchNegativeZeroFloat32(input, output, &bailout);
         if (!bailoutFrom(&bailout, lir->snapshot()))
             return false;
 
         // Round toward -Infinity.
-        masm.roundss(input, scratch, JSC::X86Assembler::RoundDown);
+        masm.roundss(input, scratch, X86Assembler::RoundDown);
 
         if (!bailoutCvttss2si(scratch, output, lir->snapshot()))
             return false;
     } else {
         Label negative, end;
 
         // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
         masm.xorps(scratch, scratch);
@@ -1713,17 +1713,17 @@ CodeGeneratorX86Shared::visitCeil(LCeil 
     masm.branchTest32(Assembler::NonZero, output, Imm32(1), &bailout);
     if (!bailoutFrom(&bailout, lir->snapshot()))
         return false;
 
     if (AssemblerX86Shared::HasSSE41()) {
         // x <= -1 or x > -0
         masm.bind(&lessThanMinusOne);
         // Round toward +Infinity.
-        masm.roundsd(input, scratch, JSC::X86Assembler::RoundUp);
+        masm.roundsd(input, scratch, X86Assembler::RoundUp);
         return bailoutCvttsd2si(scratch, output, lir->snapshot());
     }
 
     // No SSE4.1
     Label end;
 
     // x >= 0 and x is not -0.0, we can truncate (resp. truncate and add 1) for
     // integer (resp. non-integer) values.
@@ -1769,17 +1769,17 @@ CodeGeneratorX86Shared::visitCeilF(LCeil
     masm.branchTest32(Assembler::NonZero, output, Imm32(1), &bailout);
     if (!bailoutFrom(&bailout, lir->snapshot()))
         return false;
 
     if (AssemblerX86Shared::HasSSE41()) {
         // x <= -1 or x > -0
         masm.bind(&lessThanMinusOne);
         // Round toward +Infinity.
-        masm.roundss(input, scratch, JSC::X86Assembler::RoundUp);
+        masm.roundss(input, scratch, X86Assembler::RoundUp);
         return bailoutCvttss2si(scratch, output, lir->snapshot());
     }
 
     // No SSE4.1
     Label end;
 
     // x >= 0 and x is not -0.0, we can truncate (resp. truncate and add 1) for
     // integer (resp. non-integer) values.
@@ -1840,17 +1840,17 @@ CodeGeneratorX86Shared::visitRound(LRoun
 
     // Input is negative, but isn't -0.
     masm.bind(&negative);
 
     if (AssemblerX86Shared::HasSSE41()) {
         // Add 0.5 and round toward -Infinity. The result is stored in the temp
         // register (currently contains 0.5).
         masm.addsd(input, temp);
-        masm.roundsd(temp, scratch, JSC::X86Assembler::RoundDown);
+        masm.roundsd(temp, scratch, X86Assembler::RoundDown);
 
         // Truncate.
         if (!bailoutCvttsd2si(scratch, output, lir->snapshot()))
             return false;
 
         // If the result is positive zero, then the actual result is -0. Bail.
         // Otherwise, the truncation will have produced the correct negative integer.
         masm.testl(output, output);
@@ -1922,17 +1922,17 @@ CodeGeneratorX86Shared::visitRoundF(LRou
 
     // Input is negative, but isn't -0.
     masm.bind(&negative);
 
     if (AssemblerX86Shared::HasSSE41()) {
         // Add 0.5 and round toward -Infinity. The result is stored in the temp
         // register (currently contains 0.5).
         masm.addss(input, temp);
-        masm.roundss(temp, scratch, JSC::X86Assembler::RoundDown);
+        masm.roundss(temp, scratch, X86Assembler::RoundDown);
 
         // Truncate.
         if (!bailoutCvttss2si(scratch, output, lir->snapshot()))
             return false;
 
         // If the result is positive zero, then the actual result is -0. Bail.
         // Otherwise, the truncation will have produced the correct negative integer.
         masm.testl(output, output);
--- a/js/src/jit/shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/shared/MacroAssembler-x86-shared.h
@@ -190,17 +190,17 @@ class MacroAssemblerX86Shared : public A
     void atomic_inc32(const Operand &addr) {
         lock_incl(addr);
     }
     void atomic_dec32(const Operand &addr) {
         lock_decl(addr);
     }
     void atomic_cmpxchg32(Register src, const Operand &addr, Register dest) {
         // %eax must be explicitly provided for calling clarity.
-        MOZ_ASSERT(dest.code() == JSC::X86Registers::eax);
+        MOZ_ASSERT(dest.code() == X86Registers::eax);
         lock_cmpxchg32(src, addr);
     }
 
     void branch16(Condition cond, Register lhs, Register rhs, Label *label) {
         cmpw(lhs, rhs);
         j(cond, label);
     }
     void branch32(Condition cond, const Operand &lhs, Register rhs, Label *label) {
--- a/js/src/jit/x64/Architecture-x64.h
+++ b/js/src/jit/x64/Architecture-x64.h
@@ -2,17 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_x64_Architecture_x64_h
 #define jit_x64_Architecture_x64_h
 
-#include "assembler/assembler/X86Assembler.h"
+#include "jit/shared/BaseAssembler-x86-shared.h"
 
 namespace js {
 namespace jit {
 
 // In bytes: slots needed for potential memory->memory move spills.
 //   +8 for cycles
 //   +8 for gpr spills
 //   +8 for double spills
@@ -21,17 +21,17 @@ static const uint32_t ION_FRAME_SLACK_SI
 #ifdef _WIN64
 static const uint32_t ShadowStackSpace = 32;
 #else
 static const uint32_t ShadowStackSpace = 0;
 #endif
 
 class Registers {
   public:
-    typedef JSC::X86Registers::RegisterID Code;
+    typedef X86Registers::RegisterID Code;
     typedef uint32_t SetType;
     static uint32_t SetSize(SetType x) {
         static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
         return mozilla::CountPopulation32(x);
     }
     static uint32_t FirstBit(SetType x) {
         return mozilla::CountTrailingZeroes32(x);
     }
@@ -49,88 +49,88 @@ class Registers {
     static Code FromName(const char *name) {
         for (size_t i = 0; i < Total; i++) {
             if (strcmp(GetName(Code(i)), name) == 0)
                 return Code(i);
         }
         return Invalid;
     }
 
-    static const Code StackPointer = JSC::X86Registers::esp;
-    static const Code Invalid = JSC::X86Registers::invalid_reg;
+    static const Code StackPointer = X86Registers::esp;
+    static const Code Invalid = X86Registers::invalid_reg;
 
     static const uint32_t Total = 16;
     static const uint32_t TotalPhys = 16;
     static const uint32_t Allocatable = 14;
 
     static const uint32_t AllMask = (1 << Total) - 1;
 
     static const uint32_t ArgRegMask =
 # if !defined(_WIN64)
-        (1 << JSC::X86Registers::edi) |
-        (1 << JSC::X86Registers::esi) |
+        (1 << X86Registers::edi) |
+        (1 << X86Registers::esi) |
 # endif
-        (1 << JSC::X86Registers::edx) |
-        (1 << JSC::X86Registers::ecx) |
-        (1 << JSC::X86Registers::r8) |
-        (1 << JSC::X86Registers::r9);
+        (1 << X86Registers::edx) |
+        (1 << X86Registers::ecx) |
+        (1 << X86Registers::r8) |
+        (1 << X86Registers::r9);
 
     static const uint32_t VolatileMask =
-        (1 << JSC::X86Registers::eax) |
-        (1 << JSC::X86Registers::ecx) |
-        (1 << JSC::X86Registers::edx) |
+        (1 << X86Registers::eax) |
+        (1 << X86Registers::ecx) |
+        (1 << X86Registers::edx) |
 # if !defined(_WIN64)
-        (1 << JSC::X86Registers::esi) |
-        (1 << JSC::X86Registers::edi) |
+        (1 << X86Registers::esi) |
+        (1 << X86Registers::edi) |
 # endif
-        (1 << JSC::X86Registers::r8) |
-        (1 << JSC::X86Registers::r9) |
-        (1 << JSC::X86Registers::r10) |
-        (1 << JSC::X86Registers::r11);
+        (1 << X86Registers::r8) |
+        (1 << X86Registers::r9) |
+        (1 << X86Registers::r10) |
+        (1 << X86Registers::r11);
 
     static const uint32_t NonVolatileMask =
-        (1 << JSC::X86Registers::ebx) |
+        (1 << X86Registers::ebx) |
 #if defined(_WIN64)
-        (1 << JSC::X86Registers::esi) |
-        (1 << JSC::X86Registers::edi) |
+        (1 << X86Registers::esi) |
+        (1 << X86Registers::edi) |
 #endif
-        (1 << JSC::X86Registers::ebp) |
-        (1 << JSC::X86Registers::r12) |
-        (1 << JSC::X86Registers::r13) |
-        (1 << JSC::X86Registers::r14) |
-        (1 << JSC::X86Registers::r15);
+        (1 << X86Registers::ebp) |
+        (1 << X86Registers::r12) |
+        (1 << X86Registers::r13) |
+        (1 << X86Registers::r14) |
+        (1 << X86Registers::r15);
 
     static const uint32_t WrapperMask = VolatileMask;
 
     static const uint32_t SingleByteRegs = VolatileMask | NonVolatileMask;
 
     static const uint32_t NonAllocatableMask =
-        (1 << JSC::X86Registers::esp) |
-        (1 << JSC::X86Registers::r11);      // This is ScratchReg.
+        (1 << X86Registers::esp) |
+        (1 << X86Registers::r11);      // This is ScratchReg.
 
     // Registers that can be allocated without being saved, generally.
     static const uint32_t TempMask = VolatileMask & ~NonAllocatableMask;
 
     static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
 
     // Registers returned from a JS -> JS call.
     static const uint32_t JSCallMask =
-        (1 << JSC::X86Registers::ecx);
+        (1 << X86Registers::ecx);
 
     // Registers returned from a JS -> C call.
     static const uint32_t CallMask =
-        (1 << JSC::X86Registers::eax);
+        (1 << X86Registers::eax);
 };
 
 // Smallest integer type that can hold a register bitmask.
 typedef uint16_t PackedRegisterMask;
 
 class FloatRegisters {
   public:
-    typedef JSC::X86Registers::XMMRegisterID Code;
+    typedef X86Registers::XMMRegisterID Code;
     typedef uint32_t SetType;
     static const char *GetName(Code code) {
         static const char * const Names[] = { "xmm0",  "xmm1",  "xmm2",  "xmm3",
                                               "xmm4",  "xmm5",  "xmm6",  "xmm7",
                                               "xmm8",  "xmm9",  "xmm10", "xmm11",
                                               "xmm12", "xmm13", "xmm14", "xmm15" };
         return Names[code];
     }
@@ -138,43 +138,43 @@ class FloatRegisters {
     static Code FromName(const char *name) {
         for (size_t i = 0; i < Total; i++) {
             if (strcmp(GetName(Code(i)), name) == 0)
                 return Code(i);
         }
         return Invalid;
     }
 
-    static const Code Invalid = JSC::X86Registers::invalid_xmm;
+    static const Code Invalid = X86Registers::invalid_xmm;
 
     static const uint32_t Total = 16;
     static const uint32_t TotalPhys = 16;
 
     static const uint32_t Allocatable = 15;
 
     static const uint32_t AllMask = (1 << Total) - 1;
     static const uint32_t AllDoubleMask = AllMask;
     static const uint32_t VolatileMask =
 #if defined(_WIN64)
-        (1 << JSC::X86Registers::xmm0) |
-        (1 << JSC::X86Registers::xmm1) |
-        (1 << JSC::X86Registers::xmm2) |
-        (1 << JSC::X86Registers::xmm3) |
-        (1 << JSC::X86Registers::xmm4) |
-        (1 << JSC::X86Registers::xmm5);
+        (1 << X86Registers::xmm0) |
+        (1 << X86Registers::xmm1) |
+        (1 << X86Registers::xmm2) |
+        (1 << X86Registers::xmm3) |
+        (1 << X86Registers::xmm4) |
+        (1 << X86Registers::xmm5);
 #else
         AllMask;
 #endif
 
     static const uint32_t NonVolatileMask = AllMask & ~VolatileMask;
 
     static const uint32_t WrapperMask = VolatileMask;
 
     static const uint32_t NonAllocatableMask =
-        (1 << JSC::X86Registers::xmm15);    // This is ScratchFloatReg.
+        (1 << X86Registers::xmm15);    // This is ScratchFloatReg.
 
     static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
 
 };
 
 template <typename T>
 class TypedRegisterSet;
 
--- a/js/src/jit/x64/Assembler-x64.cpp
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -185,31 +185,31 @@ Assembler::executableCopy(uint8_t *buffe
         RelativePatch &rp = jumps_[i];
         uint8_t *src = buffer + rp.offset;
         if (!rp.target) {
             // The patch target is nullptr for jumps that have been linked to
             // a label within the same code block, but may be repatched later
             // to jump to a different code block.
             continue;
         }
-        if (JSC::X86Assembler::canRelinkJump(src, rp.target)) {
-            JSC::X86Assembler::setRel32(src, rp.target);
+        if (X86Assembler::canRelinkJump(src, rp.target)) {
+            X86Assembler::setRel32(src, rp.target);
         } else {
             // An extended jump table must exist, and its offset must be in
             // range.
             JS_ASSERT(extendedJumpTable_);
             JS_ASSERT((extendedJumpTable_ + i * SizeOfJumpTableEntry) <= size() - SizeOfJumpTableEntry);
 
             // Patch the jump to go to the extended jump entry.
             uint8_t *entry = buffer + extendedJumpTable_ + i * SizeOfJumpTableEntry;
-            JSC::X86Assembler::setRel32(src, entry);
+            X86Assembler::setRel32(src, entry);
 
             // Now patch the pointer, note that we need to align it to
             // *after* the extended jump, i.e. after the 64-bit immedate.
-            JSC::X86Assembler::repatchPointer(entry + SizeOfExtendedJump, rp.target);
+            X86Assembler::repatchPointer(entry + SizeOfExtendedJump, rp.target);
         }
     }
 }
 
 class RelocationIterator
 {
     CompactBufferReader reader_;
     uint32_t tableStart_;
@@ -237,23 +237,23 @@ class RelocationIterator
     uint32_t extendedOffset() const {
         return extOffset_;
     }
 };
 
 JitCode *
 Assembler::CodeFromJump(JitCode *code, uint8_t *jump)
 {
-    uint8_t *target = (uint8_t *)JSC::X86Assembler::getRel32Target(jump);
+    uint8_t *target = (uint8_t *)X86Assembler::getRel32Target(jump);
     if (target >= code->raw() && target < code->raw() + code->instructionsSize()) {
         // This jump is within the code buffer, so it has been redirected to
         // the extended jump table.
         JS_ASSERT(target + SizeOfJumpTableEntry <= code->raw() + code->instructionsSize());
 
-        target = (uint8_t *)JSC::X86Assembler::getPointer(target + SizeOfExtendedJump);
+        target = (uint8_t *)X86Assembler::getPointer(target + SizeOfExtendedJump);
     }
 
     return JitCode::FromExecutable(target);
 }
 
 void
 Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
 {
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -11,62 +11,62 @@
 
 #include "jit/IonCode.h"
 #include "jit/JitCompartment.h"
 #include "jit/shared/Assembler-shared.h"
 
 namespace js {
 namespace jit {
 
-static MOZ_CONSTEXPR_VAR Register rax = { JSC::X86Registers::eax };
-static MOZ_CONSTEXPR_VAR Register rbx = { JSC::X86Registers::ebx };
-static MOZ_CONSTEXPR_VAR Register rcx = { JSC::X86Registers::ecx };
-static MOZ_CONSTEXPR_VAR Register rdx = { JSC::X86Registers::edx };
-static MOZ_CONSTEXPR_VAR Register rsi = { JSC::X86Registers::esi };
-static MOZ_CONSTEXPR_VAR Register rdi = { JSC::X86Registers::edi };
-static MOZ_CONSTEXPR_VAR Register rbp = { JSC::X86Registers::ebp };
-static MOZ_CONSTEXPR_VAR Register r8  = { JSC::X86Registers::r8  };
-static MOZ_CONSTEXPR_VAR Register r9  = { JSC::X86Registers::r9  };
-static MOZ_CONSTEXPR_VAR Register r10 = { JSC::X86Registers::r10 };
-static MOZ_CONSTEXPR_VAR Register r11 = { JSC::X86Registers::r11 };
-static MOZ_CONSTEXPR_VAR Register r12 = { JSC::X86Registers::r12 };
-static MOZ_CONSTEXPR_VAR Register r13 = { JSC::X86Registers::r13 };
-static MOZ_CONSTEXPR_VAR Register r14 = { JSC::X86Registers::r14 };
-static MOZ_CONSTEXPR_VAR Register r15 = { JSC::X86Registers::r15 };
-static MOZ_CONSTEXPR_VAR Register rsp = { JSC::X86Registers::esp };
+static MOZ_CONSTEXPR_VAR Register rax = { X86Registers::eax };
+static MOZ_CONSTEXPR_VAR Register rbx = { X86Registers::ebx };
+static MOZ_CONSTEXPR_VAR Register rcx = { X86Registers::ecx };
+static MOZ_CONSTEXPR_VAR Register rdx = { X86Registers::edx };
+static MOZ_CONSTEXPR_VAR Register rsi = { X86Registers::esi };
+static MOZ_CONSTEXPR_VAR Register rdi = { X86Registers::edi };
+static MOZ_CONSTEXPR_VAR Register rbp = { X86Registers::ebp };
+static MOZ_CONSTEXPR_VAR Register r8  = { X86Registers::r8  };
+static MOZ_CONSTEXPR_VAR Register r9  = { X86Registers::r9  };
+static MOZ_CONSTEXPR_VAR Register r10 = { X86Registers::r10 };
+static MOZ_CONSTEXPR_VAR Register r11 = { X86Registers::r11 };
+static MOZ_CONSTEXPR_VAR Register r12 = { X86Registers::r12 };
+static MOZ_CONSTEXPR_VAR Register r13 = { X86Registers::r13 };
+static MOZ_CONSTEXPR_VAR Register r14 = { X86Registers::r14 };
+static MOZ_CONSTEXPR_VAR Register r15 = { X86Registers::r15 };
+static MOZ_CONSTEXPR_VAR Register rsp = { X86Registers::esp };
 
-static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { JSC::X86Registers::xmm0 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { JSC::X86Registers::xmm1 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { JSC::X86Registers::xmm2 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { JSC::X86Registers::xmm3 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { JSC::X86Registers::xmm4 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { JSC::X86Registers::xmm5 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { JSC::X86Registers::xmm6 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { JSC::X86Registers::xmm7 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm8 = { JSC::X86Registers::xmm8 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm9 = { JSC::X86Registers::xmm9 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm10 = { JSC::X86Registers::xmm10 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm11 = { JSC::X86Registers::xmm11 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm12 = { JSC::X86Registers::xmm12 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm13 = { JSC::X86Registers::xmm13 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm14 = { JSC::X86Registers::xmm14 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm15 = { JSC::X86Registers::xmm15 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { X86Registers::xmm0 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { X86Registers::xmm1 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { X86Registers::xmm2 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { X86Registers::xmm3 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { X86Registers::xmm4 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { X86Registers::xmm5 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { X86Registers::xmm6 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { X86Registers::xmm7 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm8 = { X86Registers::xmm8 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm9 = { X86Registers::xmm9 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm10 = { X86Registers::xmm10 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm11 = { X86Registers::xmm11 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm12 = { X86Registers::xmm12 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm13 = { X86Registers::xmm13 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm14 = { X86Registers::xmm14 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm15 = { X86Registers::xmm15 };
 
 // X86-common synonyms.
 static MOZ_CONSTEXPR_VAR Register eax = rax;
 static MOZ_CONSTEXPR_VAR Register ebx = rbx;
 static MOZ_CONSTEXPR_VAR Register ecx = rcx;
 static MOZ_CONSTEXPR_VAR Register edx = rdx;
 static MOZ_CONSTEXPR_VAR Register esi = rsi;
 static MOZ_CONSTEXPR_VAR Register edi = rdi;
 static MOZ_CONSTEXPR_VAR Register ebp = rbp;
 static MOZ_CONSTEXPR_VAR Register esp = rsp;
 
-static MOZ_CONSTEXPR_VAR Register InvalidReg = { JSC::X86Registers::invalid_reg };
-static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { JSC::X86Registers::invalid_xmm };
+static MOZ_CONSTEXPR_VAR Register InvalidReg = { X86Registers::invalid_reg };
+static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { X86Registers::invalid_xmm };
 
 static MOZ_CONSTEXPR_VAR Register StackPointer = rsp;
 static MOZ_CONSTEXPR_VAR Register FramePointer = rbp;
 static MOZ_CONSTEXPR_VAR Register JSReturnReg = rcx;
 // Avoid, except for assertions.
 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = JSReturnReg;
 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = JSReturnReg;
 
@@ -693,17 +693,17 @@ class Assembler : public AssemblerX86Sha
     }
 
     void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
         JmpSrc src = masm.jmp();
         addPendingJump(src, target, reloc);
     }
     void j(Condition cond, ImmPtr target,
            Relocation::Kind reloc = Relocation::HARDCODED) {
-        JmpSrc src = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond));
+        JmpSrc src = masm.jCC(static_cast<X86Assembler::Condition>(cond));
         addPendingJump(src, target, reloc);
     }
 
     void jmp(JitCode *target) {
         jmp(ImmPtr(target->raw()), Relocation::JITCODE);
     }
     void j(Condition cond, JitCode *target) {
         j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
@@ -743,20 +743,20 @@ class Assembler : public AssemblerX86Sha
     void cvtsq2ss(Register src, FloatRegister dest) {
         masm.cvtsq2ss_rr(src.code(), dest.code());
     }
 };
 
 static inline void
 PatchJump(CodeLocationJump jump, CodeLocationLabel label)
 {
-    if (JSC::X86Assembler::canRelinkJump(jump.raw(), label.raw())) {
-        JSC::X86Assembler::setRel32(jump.raw(), label.raw());
+    if (X86Assembler::canRelinkJump(jump.raw(), label.raw())) {
+        X86Assembler::setRel32(jump.raw(), label.raw());
     } else {
-        JSC::X86Assembler::setRel32(jump.raw(), jump.jumpTableEntry());
+        X86Assembler::setRel32(jump.raw(), jump.jumpTableEntry());
         Assembler::PatchJumpEntry(jump.jumpTableEntry(), label.raw());
     }
 }
 static inline void
 PatchBackedge(CodeLocationJump &jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
 {
     PatchJump(jump_, label);
 }
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -594,45 +594,45 @@ class MacroAssemblerX64 : public MacroAs
     void subPtr(Register src, const Address &dest) {
         subq(src, Operand(dest));
     }
     void mulBy3(const Register &src, const Register &dest) {
         lea(Operand(src, src, TimesTwo), dest);
     }
 
     void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label *label) {
-        if (JSC::X86Assembler::isAddressImmediate(lhs.addr)) {
+        if (X86Assembler::isAddressImmediate(lhs.addr)) {
             branch32(cond, Operand(lhs), rhs, label);
         } else {
             mov(ImmPtr(lhs.addr), ScratchReg);
             branch32(cond, Address(ScratchReg, 0), rhs, label);
         }
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label *label) {
-        if (JSC::X86Assembler::isAddressImmediate(lhs.addr)) {
+        if (X86Assembler::isAddressImmediate(lhs.addr)) {
             branch32(cond, Operand(lhs), rhs, label);
         } else {
             mov(ImmPtr(lhs.addr), ScratchReg);
             branch32(cond, Address(ScratchReg, 0), rhs, label);
         }
     }
     void branchTest32(Condition cond, AbsoluteAddress address, Imm32 imm, Label *label) {
-        if (JSC::X86Assembler::isAddressImmediate(address.addr)) {
+        if (X86Assembler::isAddressImmediate(address.addr)) {
             testl(Operand(address), imm);
         } else {
             mov(ImmPtr(address.addr), ScratchReg);
             testl(Operand(ScratchReg, 0), imm);
         }
         j(cond, label);
     }
 
     // Specialization for AbsoluteAddress.
     void branchPtr(Condition cond, AbsoluteAddress addr, Register ptr, Label *label) {
         JS_ASSERT(ptr != ScratchReg);
-        if (JSC::X86Assembler::isAddressImmediate(addr.addr)) {
+        if (X86Assembler::isAddressImmediate(addr.addr)) {
             branchPtr(cond, Operand(addr), ptr, label);
         } else {
             mov(ImmPtr(addr.addr), ScratchReg);
             branchPtr(cond, Operand(ScratchReg, 0x0), ptr, label);
         }
     }
     void branchPtr(Condition cond, AsmJSAbsoluteAddress addr, Register ptr, Label *label) {
         JS_ASSERT(ptr != ScratchReg);
@@ -711,17 +711,17 @@ class MacroAssemblerX64 : public MacroAs
     }
     void movePtr(AsmJSImmPtr imm, Register dest) {
         mov(imm, dest);
     }
     void movePtr(ImmGCPtr imm, Register dest) {
         movq(imm, dest);
     }
     void loadPtr(AbsoluteAddress address, Register dest) {
-        if (JSC::X86Assembler::isAddressImmediate(address.addr)) {
+        if (X86Assembler::isAddressImmediate(address.addr)) {
             movq(Operand(address), dest);
         } else {
             mov(ImmPtr(address.addr), ScratchReg);
             loadPtr(Address(ScratchReg, 0x0), dest);
         }
     }
     void loadPtr(const Address &address, Register dest) {
         movq(Operand(address), dest);
@@ -732,17 +732,17 @@ class MacroAssemblerX64 : public MacroAs
     void loadPtr(const BaseIndex &src, Register dest) {
         movq(Operand(src), dest);
     }
     void loadPrivate(const Address &src, Register dest) {
         loadPtr(src, dest);
         shlq(Imm32(1), dest);
     }
     void load32(AbsoluteAddress address, Register dest) {
-        if (JSC::X86Assembler::isAddressImmediate(address.addr)) {
+        if (X86Assembler::isAddressImmediate(address.addr)) {
             movl(Operand(address), dest);
         } else {
             mov(ImmPtr(address.addr), ScratchReg);
             load32(Address(ScratchReg, 0x0), dest);
         }
     }
     void storePtr(ImmWord imm, const Address &address) {
         if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
@@ -764,25 +764,25 @@ class MacroAssemblerX64 : public MacroAs
     }
     void storePtr(Register src, const BaseIndex &address) {
         movq(src, Operand(address));
     }
     void storePtr(Register src, const Operand &dest) {
         movq(src, dest);
     }
     void storePtr(Register src, AbsoluteAddress address) {
-        if (JSC::X86Assembler::isAddressImmediate(address.addr)) {
+        if (X86Assembler::isAddressImmediate(address.addr)) {
             movq(src, Operand(address));
         } else {
             mov(ImmPtr(address.addr), ScratchReg);
             storePtr(src, Address(ScratchReg, 0x0));
         }
     }
     void store32(Register src, AbsoluteAddress address) {
-        if (JSC::X86Assembler::isAddressImmediate(address.addr)) {
+        if (X86Assembler::isAddressImmediate(address.addr)) {
             movl(src, Operand(address));
         } else {
             mov(ImmPtr(address.addr), ScratchReg);
             store32(src, Address(ScratchReg, 0x0));
         }
     }
     void rshiftPtr(Imm32 imm, Register dest) {
         shrq(imm, dest);
@@ -1294,17 +1294,17 @@ class MacroAssemblerX64 : public MacroAs
         cvtsq2sd(src, dest);
     }
 
     void convertUInt32ToFloat32(Register src, FloatRegister dest) {
         cvtsq2ss(src, dest);
     }
 
     void inc64(AbsoluteAddress dest) {
-        if (JSC::X86Assembler::isAddressImmediate(dest.addr)) {
+        if (X86Assembler::isAddressImmediate(dest.addr)) {
             addPtr(Imm32(1), Operand(dest));
         } else {
             mov(ImmPtr(dest.addr), ScratchReg);
             addPtr(Imm32(1), Address(ScratchReg, 0));
         }
     }
 
     void incrementInt32Value(const Address &addr) {
--- a/js/src/jit/x86/Architecture-x86.h
+++ b/js/src/jit/x86/Architecture-x86.h
@@ -2,17 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_x86_Architecture_x86_h
 #define jit_x86_Architecture_x86_h
 
-#include "assembler/assembler/X86Assembler.h"
+#include "jit/shared/BaseAssembler-x86-shared.h"
 
 namespace js {
 namespace jit {
 
 // In bytes: slots needed for potential memory->memory move spills.
 //   +8 for cycles
 //   +4 for gpr spills
 //   +8 for double spills
@@ -30,17 +30,17 @@ static const int32_t NUNBOX32_PAYLOAD_OF
 // These offsets are related to bailouts.
 ////
 
 // Size of each bailout table entry. On x86 this is a 5-byte relative call.
 static const uint32_t BAILOUT_TABLE_ENTRY_SIZE    = 5;
 
 class Registers {
   public:
-    typedef JSC::X86Registers::RegisterID Code;
+    typedef X86Registers::RegisterID Code;
     typedef uint8_t SetType;
     static uint32_t SetSize(SetType x) {
         static_assert(sizeof(SetType) == 1, "SetType must be 8 bits");
         return mozilla::CountPopulation32(x);
     }
     static uint32_t FirstBit(SetType x) {
         return mozilla::CountTrailingZeroes32(x);
     }
@@ -56,102 +56,102 @@ class Registers {
     static Code FromName(const char *name) {
         for (size_t i = 0; i < Total; i++) {
             if (strcmp(GetName(Code(i)), name) == 0)
                 return Code(i);
         }
         return Invalid;
     }
 
-    static const Code StackPointer = JSC::X86Registers::esp;
-    static const Code Invalid = JSC::X86Registers::invalid_reg;
+    static const Code StackPointer = X86Registers::esp;
+    static const Code Invalid = X86Registers::invalid_reg;
 
     static const uint32_t Total = 8;
     static const uint32_t TotalPhys = 8;
     static const uint32_t Allocatable = 7;
 
     static const uint32_t AllMask = (1 << Total) - 1;
 
     static const uint32_t ArgRegMask = 0;
 
     static const uint32_t VolatileMask =
-        (1 << JSC::X86Registers::eax) |
-        (1 << JSC::X86Registers::ecx) |
-        (1 << JSC::X86Registers::edx);
+        (1 << X86Registers::eax) |
+        (1 << X86Registers::ecx) |
+        (1 << X86Registers::edx);
 
     static const uint32_t NonVolatileMask =
-        (1 << JSC::X86Registers::ebx) |
-        (1 << JSC::X86Registers::esi) |
-        (1 << JSC::X86Registers::edi) |
-        (1 << JSC::X86Registers::ebp);
+        (1 << X86Registers::ebx) |
+        (1 << X86Registers::esi) |
+        (1 << X86Registers::edi) |
+        (1 << X86Registers::ebp);
 
     static const uint32_t WrapperMask =
         VolatileMask |
-        (1 << JSC::X86Registers::ebx);
+        (1 << X86Registers::ebx);
 
     static const uint32_t SingleByteRegs =
-        (1 << JSC::X86Registers::eax) |
-        (1 << JSC::X86Registers::ecx) |
-        (1 << JSC::X86Registers::edx) |
-        (1 << JSC::X86Registers::ebx);
+        (1 << X86Registers::eax) |
+        (1 << X86Registers::ecx) |
+        (1 << X86Registers::edx) |
+        (1 << X86Registers::ebx);
 
     static const uint32_t NonAllocatableMask =
-        (1 << JSC::X86Registers::esp);
+        (1 << X86Registers::esp);
 
     static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
 
     // Registers that can be allocated without being saved, generally.
     static const uint32_t TempMask = VolatileMask & ~NonAllocatableMask;
 
     // Registers returned from a JS -> JS call.
     static const uint32_t JSCallMask =
-        (1 << JSC::X86Registers::ecx) |
-        (1 << JSC::X86Registers::edx);
+        (1 << X86Registers::ecx) |
+        (1 << X86Registers::edx);
 
     // Registers returned from a JS -> C call.
     static const uint32_t CallMask =
-        (1 << JSC::X86Registers::eax);
+        (1 << X86Registers::eax);
 };
 
 // Smallest integer type that can hold a register bitmask.
 typedef uint8_t PackedRegisterMask;
 
 class FloatRegisters {
   public:
-    typedef JSC::X86Registers::XMMRegisterID Code;
+    typedef X86Registers::XMMRegisterID Code;
     typedef uint32_t SetType;
     static const char *GetName(Code code) {
         static const char * const Names[] = { "xmm0", "xmm1", "xmm2", "xmm3",
                                               "xmm4", "xmm5", "xmm6", "xmm7" };
         return Names[code];
     }
 
     static Code FromName(const char *name) {
         for (size_t i = 0; i < Total; i++) {
             if (strcmp(GetName(Code(i)), name) == 0)
                 return Code(i);
         }
         return Invalid;
     }
 
-    static const Code Invalid = JSC::X86Registers::invalid_xmm;
+    static const Code Invalid = X86Registers::invalid_xmm;
 
     static const uint32_t Total = 8;
     static const uint32_t TotalPhys = 8;
     static const uint32_t Allocatable = 7;
 
     static const uint32_t AllMask = (1 << Total) - 1;
     static const uint32_t AllDoubleMask = AllMask;
     static const uint32_t VolatileMask = AllMask;
     static const uint32_t NonVolatileMask = 0;
 
     static const uint32_t WrapperMask = VolatileMask;
 
     static const uint32_t NonAllocatableMask =
-        (1 << JSC::X86Registers::xmm7);
+        (1 << X86Registers::xmm7);
 
     static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
 };
 
 template <typename T>
 class TypedRegisterSet;
 
 struct FloatRegister {
--- a/js/src/jit/x86/Assembler-x86.cpp
+++ b/js/src/jit/x86/Assembler-x86.cpp
@@ -43,17 +43,17 @@ const Register ABIArgGenerator::NonRetur
 
 void
 Assembler::executableCopy(uint8_t *buffer)
 {
     AssemblerX86Shared::executableCopy(buffer);
 
     for (size_t i = 0; i < jumps_.length(); i++) {
         RelativePatch &rp = jumps_[i];
-        JSC::X86Assembler::setRel32(buffer + rp.offset, rp.target);
+        X86Assembler::setRel32(buffer + rp.offset, rp.target);
     }
 }
 
 class RelocationIterator
 {
     CompactBufferReader reader_;
     uint32_t offset_;
 
@@ -72,17 +72,17 @@ class RelocationIterator
     uint32_t offset() const {
         return offset_;
     }
 };
 
 static inline JitCode *
 CodeFromJump(uint8_t *jump)
 {
-    uint8_t *target = (uint8_t *)JSC::X86Assembler::getRel32Target(jump);
+    uint8_t *target = (uint8_t *)X86Assembler::getRel32Target(jump);
     return JitCode::FromExecutable(target);
 }
 
 void
 Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
 {
     RelocationIterator iter(reader);
     while (iter.read()) {
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -4,45 +4,45 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_x86_Assembler_x86_h
 #define jit_x86_Assembler_x86_h
 
 #include "mozilla/ArrayUtils.h"
 
-#include "assembler/assembler/X86Assembler.h"
 #include "jit/CompactBuffer.h"
 #include "jit/IonCode.h"
 #include "jit/JitCompartment.h"
 #include "jit/shared/Assembler-shared.h"
+#include "jit/shared/BaseAssembler-x86-shared.h"
 
 namespace js {
 namespace jit {
 
-static MOZ_CONSTEXPR_VAR Register eax = { JSC::X86Registers::eax };
-static MOZ_CONSTEXPR_VAR Register ecx = { JSC::X86Registers::ecx };
-static MOZ_CONSTEXPR_VAR Register edx = { JSC::X86Registers::edx };
-static MOZ_CONSTEXPR_VAR Register ebx = { JSC::X86Registers::ebx };
-static MOZ_CONSTEXPR_VAR Register esp = { JSC::X86Registers::esp };
-static MOZ_CONSTEXPR_VAR Register ebp = { JSC::X86Registers::ebp };
-static MOZ_CONSTEXPR_VAR Register esi = { JSC::X86Registers::esi };
-static MOZ_CONSTEXPR_VAR Register edi = { JSC::X86Registers::edi };
+static MOZ_CONSTEXPR_VAR Register eax = { X86Registers::eax };
+static MOZ_CONSTEXPR_VAR Register ecx = { X86Registers::ecx };
+static MOZ_CONSTEXPR_VAR Register edx = { X86Registers::edx };
+static MOZ_CONSTEXPR_VAR Register ebx = { X86Registers::ebx };
+static MOZ_CONSTEXPR_VAR Register esp = { X86Registers::esp };
+static MOZ_CONSTEXPR_VAR Register ebp = { X86Registers::ebp };
+static MOZ_CONSTEXPR_VAR Register esi = { X86Registers::esi };
+static MOZ_CONSTEXPR_VAR Register edi = { X86Registers::edi };
 
-static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { JSC::X86Registers::xmm0 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { JSC::X86Registers::xmm1 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { JSC::X86Registers::xmm2 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { JSC::X86Registers::xmm3 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { JSC::X86Registers::xmm4 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { JSC::X86Registers::xmm5 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { JSC::X86Registers::xmm6 };
-static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { JSC::X86Registers::xmm7 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { X86Registers::xmm0 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { X86Registers::xmm1 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { X86Registers::xmm2 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { X86Registers::xmm3 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { X86Registers::xmm4 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { X86Registers::xmm5 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { X86Registers::xmm6 };
+static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { X86Registers::xmm7 };
 
-static MOZ_CONSTEXPR_VAR Register InvalidReg = { JSC::X86Registers::invalid_reg };
-static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { JSC::X86Registers::invalid_xmm };
+static MOZ_CONSTEXPR_VAR Register InvalidReg = { X86Registers::invalid_reg };
+static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { X86Registers::invalid_xmm };
 
 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = ecx;
 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = edx;
 static MOZ_CONSTEXPR_VAR Register StackPointer = esp;
 static MOZ_CONSTEXPR_VAR Register FramePointer = ebp;
 static MOZ_CONSTEXPR_VAR Register ReturnReg = eax;
 static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32Reg = xmm0;
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloat32Reg = xmm7;
@@ -155,17 +155,17 @@ PatchJump(CodeLocationJump jump, CodeLoc
 #ifdef DEBUG
     // Assert that we're overwriting a jump instruction, either:
     //   0F 80+cc <imm32>, or
     //   E9 <imm32>
     unsigned char *x = (unsigned char *)jump.raw() - 5;
     JS_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
               (*x == 0xE9));
 #endif
-    JSC::X86Assembler::setRel32(jump.raw(), label.raw());
+    X86Assembler::setRel32(jump.raw(), label.raw());
 }
 static inline void
 PatchBackedge(CodeLocationJump &jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
 {
     PatchJump(jump_, label);
 }
 
 // Return operand from a JS -> JS call.
@@ -366,17 +366,17 @@ class Assembler : public AssemblerX86Sha
     }
 
     void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
         JmpSrc src = masm.jmp();
         addPendingJump(src, target, reloc);
     }
     void j(Condition cond, ImmPtr target,
            Relocation::Kind reloc = Relocation::HARDCODED) {
-        JmpSrc src = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond));
+        JmpSrc src = masm.jCC(static_cast<X86Assembler::Condition>(cond));
         addPendingJump(src, target, reloc);
     }
 
     void jmp(JitCode *target) {
         jmp(ImmPtr(target->raw()), Relocation::JITCODE);
     }
     void j(Condition cond, JitCode *target) {
         j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
@@ -408,19 +408,19 @@ class Assembler : public AssemblerX86Sha
         return 5;
     }
 
     // Re-routes pending jumps to an external target, flushing the label in the
     // process.
     void retarget(Label *label, ImmPtr target, Relocation::Kind reloc) {
         if (label->used()) {
             bool more;
-            JSC::X86Assembler::JmpSrc jmp(label->offset());
+            X86Assembler::JmpSrc jmp(label->offset());
             do {
-                JSC::X86Assembler::JmpSrc next;
+                X86Assembler::JmpSrc next;
                 more = masm.nextJump(jmp, &next);
                 addPendingJump(jmp, target, reloc);
                 jmp = next;
             } while (more);
         }
         label->reset();
     }