[JAEGER] Somehow, the assembler was never checked in. WHOOOOOPSSSSSSS
authorDavid Anderson <danderson@mozilla.com>
Fri, 28 May 2010 00:29:52 -0700
changeset 52612 d907911777bbc7df41486d480540338477d96d78
parent 52611 4b73e56e7acba439f9119356c9d0e68862609546
child 52613 85d1995d0c5c44438d7034646461538c66ca6f60
push idunknown
push userunknown
push dateunknown
milestone1.9.3a5pre
[JAEGER] Somehow, the assembler was never checked in. WHOOOOOPSSSSSSS
js/src/assembler/TestMain.cpp
js/src/assembler/assembler/ARMAssembler.cpp
js/src/assembler/assembler/ARMAssembler.h
js/src/assembler/assembler/ARMv7Assembler.h
js/src/assembler/assembler/AbstractMacroAssembler.h
js/src/assembler/assembler/AssemblerBuffer.h
js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
js/src/assembler/assembler/CodeLocation.h
js/src/assembler/assembler/LinkBuffer.h
js/src/assembler/assembler/MacroAssembler.h
js/src/assembler/assembler/MacroAssemblerARM.cpp
js/src/assembler/assembler/MacroAssemblerARM.h
js/src/assembler/assembler/MacroAssemblerARMv7.h
js/src/assembler/assembler/MacroAssemblerCodeRef.h
js/src/assembler/assembler/MacroAssemblerX86.h
js/src/assembler/assembler/MacroAssemblerX86Common.h
js/src/assembler/assembler/MacroAssemblerX86_64.h
js/src/assembler/assembler/RepatchBuffer.h
js/src/assembler/assembler/X86Assembler.h
js/src/assembler/jit/ExecutableAllocator.cpp
js/src/assembler/jit/ExecutableAllocator.h
js/src/assembler/jit/ExecutableAllocatorPosix.cpp
js/src/assembler/jit/ExecutableAllocatorWin.cpp
js/src/assembler/moco/MocoStubs.h
js/src/assembler/wtf/Assertions.cpp
js/src/assembler/wtf/Assertions.h
js/src/assembler/wtf/Platform.h
js/src/assembler/wtf/SegmentedVector.h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/TestMain.cpp
@@ -0,0 +1,929 @@
+
+// A short test program with which to experiment with the assembler.
+
+//satisfies  CPU(X86_64)
+//#define WTF_CPU_X86_64
+
+// satisfies  ENABLE(ASSEMBLER)
+#define ENABLE_ASSEMBLER 1
+
+// satisfies  ENABLE(JIT)
+#define ENABLE_JIT 1
+
+#define USE_SYSTEM_MALLOC 1
+// leads to FORCE_SYSTEM_MALLOC in wtf/FastMalloc.cpp
+
+#include <jit/ExecutableAllocator.h>
+#include <assembler/LinkBuffer.h>
+#include <assembler/CodeLocation.h>
+#include <assembler/RepatchBuffer.h>
+
+#include <assembler/MacroAssembler.h>
+
+#include <stdio.h>
+
+/////////////////////////////////////////////////////////////////
+// Temporary scaffolding for selecting the arch
+#undef ARCH_x86
+#undef ARCH_amd64
+#undef ARCH_arm
+
+#if defined(__APPLE__) && defined(__i386__)
+#  define ARCH_x86 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+#  define ARCH_amd64 1
+#elif defined(__linux__) && defined(__i386__)
+#  define ARCH_x86 1
+#elif defined(__linux__) && defined(__x86_64__)
+#  define ARCH_amd64 1
+#elif defined(__linux__) && defined(__arm__)
+#  define ARCH_arm 1
+#elif defined(_MSC_VER) && defined(_M_IX86)
+#  define ARCH_x86 1
+#endif
+/////////////////////////////////////////////////////////////////
+
+// just somewhere convenient to put a breakpoint, before
+// running gdb
+#if WTF_COMPILER_GCC
+__attribute__((noinline))
+#endif
+void pre_run ( void ) { }
+
+/////////////////////////////////////////////////////////////////
+//// test1 (simple straight line code) 
+#if WTF_COMPILER_GCC
+
+void test1 ( void )
+{
+  printf("\n------------ Test 1 (straight line code) ------------\n\n" );
+
+  // Create new assembler
+  JSC::MacroAssembler* am = new JSC::MacroAssembler();
+
+#if defined(ARCH_amd64)
+  JSC::X86Registers::RegisterID areg = JSC::X86Registers::r15;
+  // dump some instructions into it
+  //    xor    %r15,%r15
+  //    add    $0x7b,%r15
+  //    add    $0x141,%r15
+  //    retq
+  am->xorPtr(areg,areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
+  am->ret();
+#endif
+
+#if defined(ARCH_x86)
+  JSC::X86Registers::RegisterID areg = JSC::X86Registers::edi;
+  // dump some instructions into it
+  //    xor    %edi,%edi
+  //    add    $0x7b,%edi
+  //    add    $0x141,%edi
+  //    ret
+  am->xorPtr(areg,areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
+  am->ret();
+#endif
+
+#if defined(ARCH_arm)
+  JSC::ARMRegisters::RegisterID areg = JSC::ARMRegisters::r8;
+  //    eors    r8, r8, r8
+  //    adds    r8, r8, #123    ; 0x7b
+  //    mov     r3, #256        ; 0x100
+  //    orr     r3, r3, #65     ; 0x41
+  //    adds    r8, r8, r3
+  //    mov     pc, lr
+  am->xorPtr(areg,areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
+  am->ret();
+#endif
+
+  // prepare a link buffer, into which we can copy the completed insns
+  JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
+
+  // intermediate step .. get the pool suited for the size of code in 'am'
+  //WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
+  JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
+
+  // constructor for LinkBuffer asks ep to allocate r-x memory,
+  // then copies it there.
+  JSC::LinkBuffer patchBuffer(am, ep);
+
+  // finalize
+  JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
+
+  // cr now holds a pointer to the final runnable code.
+  void* entry = cr.m_code.executableAddress();
+
+  printf("disas %p %p\n",
+         entry, (char*)entry + cr.m_size);
+  pre_run();
+
+  unsigned long result = 0x55555555;
+
+#if defined(ARCH_amd64)
+  // call the generated piece of code.  It puts its result in r15.
+  __asm__ __volatile__(
+     "callq *%1"           "\n\t"
+     "movq  %%r15, %0"     "\n"
+     :/*out*/   "=r"(result)
+     :/*in*/    "r"(entry)
+     :/*trash*/ "r15","cc"
+  );
+#endif
+#if defined(ARCH_x86)
+  // call the generated piece of code.  It puts its result in edi.
+  __asm__ __volatile__(
+     "calll *%1"           "\n\t"
+     "movl  %%edi, %0"     "\n"
+     :/*out*/   "=r"(result)
+     :/*in*/    "r"(entry)
+     :/*trash*/ "edi","cc"
+  );
+#endif
+#if defined(ARCH_arm)
+  // call the generated piece of code.  It puts its result in r8.
+  __asm__ __volatile__(
+     "blx   %1"            "\n\t"
+     "mov   %0, %%r8"      "\n"
+     :/*out*/   "=r"(result)
+     :/*in*/    "r"(entry)
+     :/*trash*/ "r8","cc"
+  );
+#endif
+
+  printf("\n");
+  printf("value computed is %lu (expected 444)\n", result);
+  printf("\n");
+
+  delete eal;
+  delete am;
+}
+
+#endif /* WTF_COMPILER_GCC */
+
+/////////////////////////////////////////////////////////////////
+//// test2 (a simple counting-down loop) 
+#if WTF_COMPILER_GCC
+
+void test2 ( void )
+{
+  printf("\n------------ Test 2 (mini loop) ------------\n\n" );
+
+  // Create new assembler
+  JSC::MacroAssembler* am = new JSC::MacroAssembler();
+
+#if defined(ARCH_amd64)
+  JSC::X86Registers::RegisterID areg = JSC::X86Registers::r15;
+  //    xor    %r15,%r15
+  //    add    $0x7b,%r15
+  //    add    $0x141,%r15
+  //    sub    $0x1,%r15
+  //    mov    $0x0,%r11
+  //    cmp    %r11,%r15
+  //    jne    0x7ff6d3e6a00e
+  //    retq
+  // so r15 always winds up being zero
+  am->xorPtr(areg,areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
+
+  JSC::MacroAssembler::Label loopHeadLabel(am);
+  am->subPtr(JSC::MacroAssembler::Imm32(1), areg);
+
+  JSC::MacroAssembler::Jump j
+     = am->branchPtr(JSC::MacroAssembler::NotEqual,
+                     areg, JSC::MacroAssembler::ImmPtr(0));
+  j.linkTo(loopHeadLabel, am);
+
+  am->ret();
+#endif
+
+#if defined(ARCH_x86)
+  JSC::X86Registers::RegisterID areg = JSC::X86Registers::edi;
+  //    xor    %edi,%edi
+  //    add    $0x7b,%edi
+  //    add    $0x141,%edi
+  //    sub    $0x1,%edi
+  //    test   %edi,%edi
+  //    jne    0xf7f9700b
+  //    ret
+  // so edi always winds up being zero
+  am->xorPtr(areg,areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
+
+  JSC::MacroAssembler::Label loopHeadLabel(am);
+  am->subPtr(JSC::MacroAssembler::Imm32(1), areg);
+
+  JSC::MacroAssembler::Jump j
+     = am->branchPtr(JSC::MacroAssembler::NotEqual,
+                     areg, JSC::MacroAssembler::ImmPtr(0));
+  j.linkTo(loopHeadLabel, am);
+
+  am->ret();
+#endif
+
+#if defined(ARCH_arm)
+  JSC::ARMRegisters::RegisterID areg = JSC::ARMRegisters::r8;
+  //    eors    r8, r8, r8
+  //    adds    r8, r8, #123    ; 0x7b
+  //    mov     r3, #256        ; 0x100
+  //    orr     r3, r3, #65     ; 0x41
+  //    adds    r8, r8, r3
+  //    subs    r8, r8, #1      ; 0x1
+  //    ldr     r3, [pc, #8]    ; 0x40026028
+  //    cmp     r8, r3
+  //    bne     0x40026014
+  //    mov     pc, lr
+  //    andeq   r0, r0, r0         // DATA (0)
+  //    andeq   r0, r0, r4, lsl r0 // DATA (?? what's this for?)
+  // so r8 always winds up being zero
+  am->xorPtr(areg,areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(123), areg);
+  am->addPtr(JSC::MacroAssembler::Imm32(321), areg);
+
+  JSC::MacroAssembler::Label loopHeadLabel(am);
+  am->subPtr(JSC::MacroAssembler::Imm32(1), areg);
+
+  JSC::MacroAssembler::Jump j
+     = am->branchPtr(JSC::MacroAssembler::NotEqual,
+                     areg, JSC::MacroAssembler::ImmPtr(0));
+  j.linkTo(loopHeadLabel, am);
+
+  am->ret();
+#endif
+
+  // prepare a link buffer, into which we can copy the completed insns
+  JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
+
+  // intermediate step .. get the pool suited for the size of code in 'am'
+  //WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
+  JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
+
+  // constructor for LinkBuffer asks ep to allocate r-x memory,
+  // then copies it there.
+  JSC::LinkBuffer patchBuffer(am, ep);
+
+  // finalize
+  JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
+
+  // cr now holds a pointer to the final runnable code.
+  void* entry = cr.m_code.executableAddress();
+
+  printf("disas %p %p\n",
+         entry, (char*)entry + cr.m_size);
+  pre_run();
+
+  unsigned long result = 0x55555555;
+
+#if defined(ARCH_amd64)
+  // call the generated piece of code.  It puts its result in r15.
+  __asm__ __volatile__(
+     "callq *%1"           "\n\t"
+     "movq  %%r15, %0"     "\n"
+     :/*out*/   "=r"(result)
+     :/*in*/    "r"(entry)
+     :/*trash*/ "r15","cc"
+  );
+#endif
+#if defined(ARCH_x86)
+  // call the generated piece of code.  It puts its result in edi.
+  __asm__ __volatile__(
+     "calll *%1"           "\n\t"
+     "movl  %%edi, %0"     "\n"
+     :/*out*/   "=r"(result)
+     :/*in*/    "r"(entry)
+     :/*trash*/ "edi","cc"
+  );
+#endif
+#if defined(ARCH_arm)
+  // call the generated piece of code.  It puts its result in r8.
+  __asm__ __volatile__(
+     "blx   %1"            "\n\t"
+     "mov   %0, %%r8"      "\n"
+     :/*out*/   "=r"(result)
+     :/*in*/    "r"(entry)
+     :/*trash*/ "r8","cc"
+  );
+#endif
+
+  printf("\n");
+  printf("value computed is %lu (expected 0)\n", result);
+  printf("\n");
+
+  delete eal;
+  delete am;
+}
+
+#endif /* WTF_COMPILER_GCC */
+
+/////////////////////////////////////////////////////////////////
+//// test3 (if-then-else) 
+#if WTF_COMPILER_GCC
+
+void test3 ( void )
+{
+  printf("\n------------ Test 3 (if-then-else) ------------\n\n" );
+
+  // Create new assembler
+  JSC::MacroAssembler* am = new JSC::MacroAssembler();
+
+#if defined(ARCH_amd64)
+  JSC::X86Registers::RegisterID areg = JSC::X86Registers::r15;
+  //    mov    $0x64,%r15d
+  //    mov    $0x0,%r11
+  //    cmp    %r11,%r15
+  //    jne    0x7ff6d3e6a024
+  //    mov    $0x40,%r15d
+  //    jmpq   0x7ff6d3e6a02a
+  //    mov    $0x4,%r15d
+  //    retq
+  // so r15 ends up being 4
+
+  // put a value in reg
+  am->move(JSC::MacroAssembler::Imm32(100), areg);
+
+  // test, and conditionally jump to 'else' branch
+  JSC::MacroAssembler::Jump jToElse
+    = am->branchPtr(JSC::MacroAssembler::NotEqual,
+                    areg, JSC::MacroAssembler::ImmPtr(0));
+
+  // 'then' branch
+  am->move(JSC::MacroAssembler::Imm32(64), areg);
+  JSC::MacroAssembler::Jump jToAfter
+    = am->jump();
+
+  // 'else' branch
+  JSC::MacroAssembler::Label elseLbl(am);
+  am->move(JSC::MacroAssembler::Imm32(4), areg);
+
+  // after
+  JSC::MacroAssembler::Label afterLbl(am);
+
+  am->ret();
+#endif
+
+#if defined(ARCH_x86)
+  JSC::X86Registers::RegisterID areg = JSC::X86Registers::edi;
+  //    mov    $0x64,%edi
+  //    test   %edi,%edi
+  //    jne    0xf7f22017
+  //    mov    $0x40,%edi
+  //    jmp    0xf7f2201c
+  //    mov    $0x4,%edi
+  //    ret
+  // so edi ends up being 4
+
+  // put a value in reg
+  am->move(JSC::MacroAssembler::Imm32(100), areg);
+
+  // test, and conditionally jump to 'else' branch
+  JSC::MacroAssembler::Jump jToElse
+    = am->branchPtr(JSC::MacroAssembler::NotEqual,
+                    areg, JSC::MacroAssembler::ImmPtr(0));
+
+  // 'then' branch
+  am->move(JSC::MacroAssembler::Imm32(64), areg);
+  JSC::MacroAssembler::Jump jToAfter
+    = am->jump();
+
+  // 'else' branch
+  JSC::MacroAssembler::Label elseLbl(am);
+  am->move(JSC::MacroAssembler::Imm32(4), areg);
+
+  // after
+  JSC::MacroAssembler::Label afterLbl(am);
+
+  am->ret();
+#endif
+
+#if defined(ARCH_arm)
+  JSC::ARMRegisters::RegisterID areg = JSC::ARMRegisters::r8;
+  //    mov     r8, #100        ; 0x64
+  //    ldr     r3, [pc, #20]   ; 0x40026020
+  //    cmp     r8, r3
+  //    bne     0x40026018
+  //    mov     r8, #64 ; 0x40
+  //    b       0x4002601c
+  //    mov     r8, #4  ; 0x4
+  //    mov     pc, lr
+  //    andeq   r0, r0, r0           // DATA
+  //    andeq   r0, r0, r8, lsl r0   // DATA
+  //    andeq   r0, r0, r12, lsl r0  // DATA
+  //    ldr     r3, [r3, -r3]        // DATA
+  // so r8 ends up being 4
+
+  // put a value in reg
+  am->move(JSC::MacroAssembler::Imm32(100), areg);
+
+  // test, and conditionally jump to 'else' branch
+  JSC::MacroAssembler::Jump jToElse
+    = am->branchPtr(JSC::MacroAssembler::NotEqual,
+                    areg, JSC::MacroAssembler::ImmPtr(0));
+
+  // 'then' branch
+  am->move(JSC::MacroAssembler::Imm32(64), areg);
+  JSC::MacroAssembler::Jump jToAfter
+    = am->jump();
+
+  // 'else' branch
+  JSC::MacroAssembler::Label elseLbl(am);
+  am->move(JSC::MacroAssembler::Imm32(4), areg);
+
+  // after
+  JSC::MacroAssembler::Label afterLbl(am);
+
+  am->ret();
+#endif
+
+  // set branch targets appropriately
+  jToElse.linkTo(elseLbl, am);
+  jToAfter.linkTo(afterLbl, am);
+
+  // prepare a link buffer, into which we can copy the completed insns
+  JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
+
+  // intermediate step .. get the pool suited for the size of code in 'am'
+  //WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
+  JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
+
+  // constructor for LinkBuffer asks ep to allocate r-x memory,
+  // then copies it there.
+  JSC::LinkBuffer patchBuffer(am, ep);
+
+  // finalize
+  JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
+
+  // cr now holds a pointer to the final runnable code.
+  void* entry = cr.m_code.executableAddress();
+
+  printf("disas %p %p\n",
+         entry, (char*)entry + cr.m_size);
+  pre_run();
+
+  unsigned long result = 0x55555555;
+
+#if defined(ARCH_amd64)
+  // call the generated piece of code.  It puts its result in r15.
+  __asm__ __volatile__(
+     "callq *%1"           "\n\t"
+     "movq  %%r15, %0"     "\n"
+     :/*out*/   "=r"(result)
+     :/*in*/    "r"(entry)
+     :/*trash*/ "r15","cc"
+  );
+#endif
+#if defined(ARCH_x86)
+  // call the generated piece of code.  It puts its result in edi.
+  __asm__ __volatile__(
+     "calll *%1"           "\n\t"
+     "movl  %%edi, %0"     "\n"
+     :/*out*/   "=r"(result)
+     :/*in*/    "r"(entry)
+     :/*trash*/ "edi","cc"
+  );
+#endif
+#if defined(ARCH_arm)
+  // call the generated piece of code.  It puts its result in r8.
+  __asm__ __volatile__(
+     "blx   %1"            "\n\t"
+     "mov   %0, %%r8"      "\n"
+     :/*out*/   "=r"(result)
+     :/*in*/    "r"(entry)
+     :/*trash*/ "r8","cc"
+  );
+#endif
+
+  printf("\n");
+  printf("value computed is %lu (expected 4)\n", result);
+  printf("\n");
+
+  delete eal;
+  delete am;
+}
+
+#endif /* WTF_COMPILER_GCC */
+
+/////////////////////////////////////////////////////////////////
+//// test4 (callable function) 
+
+void test4 ( void )
+{
+  printf("\n------------ Test 4 (callable fn) ------------\n\n" );
+
+  // Create new assembler
+  JSC::MacroAssembler* am = new JSC::MacroAssembler();
+
+#if defined(ARCH_amd64)
+  // ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
+  //    push   %rbp
+  //    mov    %rsp,%rbp
+  //    push   %rbx
+  //    push   %r12
+  //    push   %r13
+  //    push   %r14
+  //    push   %r15
+  //    xor    %rax,%rax
+  //    add    $0x7b,%rax
+  //    add    $0x141,%rax
+  //    pop    %r15
+  //    pop    %r14
+  //    pop    %r13
+  //    pop    %r12
+  //    pop    %rbx
+  //    mov    %rbp,%rsp
+  //    pop    %rbp
+  //    retq
+  // callable as a normal function, returns 444
+
+  JSC::X86Registers::RegisterID rreg = JSC::X86Registers::eax;
+  am->push(JSC::X86Registers::ebp);
+  am->move(JSC::X86Registers::esp, JSC::X86Registers::ebp);
+  am->push(JSC::X86Registers::ebx);
+  am->push(JSC::X86Registers::r12);
+  am->push(JSC::X86Registers::r13);
+  am->push(JSC::X86Registers::r14);
+  am->push(JSC::X86Registers::r15);
+
+  am->xorPtr(rreg,rreg);
+  am->addPtr(JSC::MacroAssembler::Imm32(123), rreg);
+  am->addPtr(JSC::MacroAssembler::Imm32(321), rreg);
+
+  am->pop(JSC::X86Registers::r15);
+  am->pop(JSC::X86Registers::r14);
+  am->pop(JSC::X86Registers::r13);
+  am->pop(JSC::X86Registers::r12);
+  am->pop(JSC::X86Registers::ebx);
+  am->move(JSC::X86Registers::ebp, JSC::X86Registers::esp);
+  am->pop(JSC::X86Registers::ebp);
+  am->ret();
+#endif
+
+#if defined(ARCH_x86)
+  // ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
+  //    push   %ebp
+  //    mov    %esp,%ebp
+  //    push   %ebx
+  //    push   %esi
+  //    push   %edi
+  //    xor    %eax,%eax
+  //    add    $0x7b,%eax
+  //    add    $0x141,%eax
+  //    pop    %edi
+  //    pop    %esi
+  //    pop    %ebx
+  //    mov    %ebp,%esp
+  //    pop    %ebp
+  //    ret
+  // callable as a normal function, returns 444
+
+  JSC::X86Registers::RegisterID rreg = JSC::X86Registers::eax;
+
+  am->push(JSC::X86Registers::ebp);
+  am->move(JSC::X86Registers::esp, JSC::X86Registers::ebp);
+  am->push(JSC::X86Registers::ebx);
+  am->push(JSC::X86Registers::esi);
+  am->push(JSC::X86Registers::edi);
+
+  am->xorPtr(rreg,rreg);
+  am->addPtr(JSC::MacroAssembler::Imm32(123), rreg);
+  am->addPtr(JSC::MacroAssembler::Imm32(321), rreg);
+
+  am->pop(JSC::X86Registers::edi);
+  am->pop(JSC::X86Registers::esi);
+  am->pop(JSC::X86Registers::ebx);
+  am->move(JSC::X86Registers::ebp, JSC::X86Registers::esp);
+  am->pop(JSC::X86Registers::ebp);
+  am->ret();
+#endif
+
+#if defined(ARCH_arm)
+  // ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
+  //    push    {r4}            ; (str r4, [sp, #-4]!)
+  //    push    {r5}            ; (str r5, [sp, #-4]!)
+  //    push    {r6}            ; (str r6, [sp, #-4]!)
+  //    push    {r7}            ; (str r7, [sp, #-4]!)
+  //    push    {r8}            ; (str r8, [sp, #-4]!)
+  //    push    {r9}            ; (str r9, [sp, #-4]!)
+  //    push    {r10}           ; (str r10, [sp, #-4]!)
+  //    push    {r11}           ; (str r11, [sp, #-4]!)
+  //    eors    r0, r0, r0
+  //    adds    r0, r0, #123    ; 0x7b
+  //    mov     r3, #256        ; 0x100
+  //    orr     r3, r3, #65     ; 0x41
+  //    adds    r0, r0, r3
+  //    pop     {r11}           ; (ldr r11, [sp], #4)
+  //    pop     {r10}           ; (ldr r10, [sp], #4)
+  //    pop     {r9}            ; (ldr r9, [sp], #4)
+  //    pop     {r8}            ; (ldr r8, [sp], #4)
+  //    pop     {r7}            ; (ldr r7, [sp], #4)
+  //    pop     {r6}            ; (ldr r6, [sp], #4)
+  //    pop     {r5}            ; (ldr r5, [sp], #4)
+  //    pop     {r4}            ; (ldr r4, [sp], #4)
+  //    mov     pc, lr
+  // callable as a normal function, returns 444
+
+  JSC::ARMRegisters::RegisterID rreg = JSC::ARMRegisters::r0;
+
+  am->push(JSC::ARMRegisters::r4);
+  am->push(JSC::ARMRegisters::r5);
+  am->push(JSC::ARMRegisters::r6);
+  am->push(JSC::ARMRegisters::r7);
+  am->push(JSC::ARMRegisters::r8);
+  am->push(JSC::ARMRegisters::r9);
+  am->push(JSC::ARMRegisters::r10);
+  am->push(JSC::ARMRegisters::r11);
+
+  am->xorPtr(rreg,rreg);
+  am->addPtr(JSC::MacroAssembler::Imm32(123), rreg);
+  am->addPtr(JSC::MacroAssembler::Imm32(321), rreg);
+
+  am->pop(JSC::ARMRegisters::r11);
+  am->pop(JSC::ARMRegisters::r10);
+  am->pop(JSC::ARMRegisters::r9);
+  am->pop(JSC::ARMRegisters::r8);
+  am->pop(JSC::ARMRegisters::r7);
+  am->pop(JSC::ARMRegisters::r6);
+  am->pop(JSC::ARMRegisters::r5);
+  am->pop(JSC::ARMRegisters::r4);
+
+  am->ret();
+#endif
+
+  // prepare a link buffer, into which we can copy the completed insns
+  JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
+
+  // intermediate step .. get the pool suited for the size of code in 'am'
+  //WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
+  JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
+
+  // constructor for LinkBuffer asks ep to allocate r-x memory,
+  // then copies it there.
+  JSC::LinkBuffer patchBuffer(am, ep);
+
+  // now fix up any branches/calls
+  //JSC::FunctionPtr target = JSC::FunctionPtr::FunctionPtr( &cube );
+
+  // finalize
+  JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
+
+  // cr now holds a pointer to the final runnable code.
+  void* entry = cr.m_code.executableAddress();
+
+  printf("disas %p %p\n",
+         entry, (char*)entry + cr.m_size);
+  pre_run();
+
+  // call the function
+  unsigned long (*fn)(void) = (unsigned long (*)())entry;
+  unsigned long result = fn();
+
+  printf("\n");
+  printf("value computed is %lu (expected 444)\n", result);
+  printf("\n");
+
+  delete eal;
+  delete am;
+}
+
+
+/////////////////////////////////////////////////////////////////
+//// test5 (call in, out, repatch) 
+
+// a function which we will call from the JIT generated code
+unsigned long cube   ( unsigned long x ) { return x * x * x; }
+unsigned long square ( unsigned long x ) { return x * x; }
+
+void test5 ( void )
+{
+  printf("\n--------- Test 5 (call in, out, repatch) ---------\n\n" );
+
+  // Create new assembler
+  JSC::MacroAssembler* am = new JSC::MacroAssembler();
+  JSC::MacroAssembler::Call cl;
+  ptrdiff_t offset_of_call_insn;
+
+#if defined(ARCH_amd64)
+  // ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
+  // and then call a non-JIT-generated helper from within
+  // this code
+  //    push   %rbp
+  //    mov    %rsp,%rbp
+  //    push   %rbx
+  //    push   %r12
+  //    push   %r13
+  //    push   %r14
+  //    push   %r15
+  //    mov    $0x9,%edi
+  //    mov    $0x40187e,%r11
+  //    callq  *%r11
+  //    pop    %r15
+  //    pop    %r14
+  //    pop    %r13
+  //    pop    %r12
+  //    pop    %rbx
+  //    mov    %rbp,%rsp
+  //    pop    %rbp
+  //    retq
+  JSC::MacroAssembler::Label startOfFnLbl(am);
+  am->push(JSC::X86Registers::ebp);
+  am->move(JSC::X86Registers::esp, JSC::X86Registers::ebp);
+  am->push(JSC::X86Registers::ebx);
+  am->push(JSC::X86Registers::r12);
+  am->push(JSC::X86Registers::r13);
+  am->push(JSC::X86Registers::r14);
+  am->push(JSC::X86Registers::r15);
+
+  // let's compute cube(9).  Move $9 to the first arg reg.
+  am->move(JSC::MacroAssembler::Imm32(9), JSC::X86Registers::edi);
+  cl = am->JSC::MacroAssembler::call();
+
+  // result is now in %rax.  Leave it ther and just return.
+
+  am->pop(JSC::X86Registers::r15);
+  am->pop(JSC::X86Registers::r14);
+  am->pop(JSC::X86Registers::r13);
+  am->pop(JSC::X86Registers::r12);
+  am->pop(JSC::X86Registers::ebx);
+  am->move(JSC::X86Registers::ebp, JSC::X86Registers::esp);
+  am->pop(JSC::X86Registers::ebp);
+  am->ret();
+
+  offset_of_call_insn
+     = am->JSC::MacroAssembler::differenceBetween(startOfFnLbl, cl);
+  if (0) printf("XXXXXXXX offset = %lu\n", offset_of_call_insn);
+#endif
+
+#if defined(ARCH_x86)
+  // ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
+  // and then call a non-JIT-generated helper from within
+  // this code
+  //    push   %ebp
+  //    mov    %esp,%ebp
+  //    push   %ebx
+  //    push   %esi
+  //    push   %edi
+  //    push   $0x9
+  //    call   0x80490e9 <_Z4cubem>
+  //    add    $0x4,%esp
+  //    pop    %edi
+  //    pop    %esi
+  //    pop    %ebx
+  //    mov    %ebp,%esp
+  //    pop    %ebp
+  //    ret
+  JSC::MacroAssembler::Label startOfFnLbl(am);
+  am->push(JSC::X86Registers::ebp);
+  am->move(JSC::X86Registers::esp, JSC::X86Registers::ebp);
+  am->push(JSC::X86Registers::ebx);
+  am->push(JSC::X86Registers::esi);
+  am->push(JSC::X86Registers::edi);
+
+  // let's compute cube(9).  Push $9 on the stack.
+  am->push(JSC::MacroAssembler::Imm32(9));
+  cl = am->JSC::MacroAssembler::call();
+  am->addPtr(JSC::MacroAssembler::Imm32(4), JSC::X86Registers::esp);
+  // result is now in %eax.  Leave it there and just return.
+
+  am->pop(JSC::X86Registers::edi);
+  am->pop(JSC::X86Registers::esi);
+  am->pop(JSC::X86Registers::ebx);
+  am->move(JSC::X86Registers::ebp, JSC::X86Registers::esp);
+  am->pop(JSC::X86Registers::ebp);
+  am->ret();
+
+  offset_of_call_insn
+     = am->JSC::MacroAssembler::differenceBetween(startOfFnLbl, cl);
+  if (0) printf("XXXXXXXX offset = %lu\n",
+                (unsigned long)offset_of_call_insn);
+#endif
+
+#if defined(ARCH_arm)
+  // ADD FN PROLOGUE/EPILOGUE so as to make a mini-function
+  //    push    {r4}            ; (str r4, [sp, #-4]!)
+  //    push    {r5}            ; (str r5, [sp, #-4]!)
+  //    push    {r6}            ; (str r6, [sp, #-4]!)
+  //    push    {r7}            ; (str r7, [sp, #-4]!)
+  //    push    {r8}            ; (str r8, [sp, #-4]!)
+  //    push    {r9}            ; (str r9, [sp, #-4]!)
+  //    push    {r10}           ; (str r10, [sp, #-4]!)
+  //    push    {r11}           ; (str r11, [sp, #-4]!)
+  //    eors    r0, r0, r0
+  //    adds    r0, r0, #123    ; 0x7b
+  //    mov     r3, #256        ; 0x100
+  //    orr     r3, r3, #65     ; 0x41
+  //    adds    r0, r0, r3
+  //    pop     {r11}           ; (ldr r11, [sp], #4)
+  //    pop     {r10}           ; (ldr r10, [sp], #4)
+  //    pop     {r9}            ; (ldr r9, [sp], #4)
+  //    pop     {r8}            ; (ldr r8, [sp], #4)
+  //    pop     {r7}            ; (ldr r7, [sp], #4)
+  //    pop     {r6}            ; (ldr r6, [sp], #4)
+  //    pop     {r5}            ; (ldr r5, [sp], #4)
+  //    pop     {r4}            ; (ldr r4, [sp], #4)
+  //    mov     pc, lr
+  // callable as a normal function, returns 444
+  JSC::MacroAssembler::Label startOfFnLbl(am);
+  am->push(JSC::ARMRegisters::r4);
+  am->push(JSC::ARMRegisters::r5);
+  am->push(JSC::ARMRegisters::r6);
+  am->push(JSC::ARMRegisters::r7);
+  am->push(JSC::ARMRegisters::r8);
+  am->push(JSC::ARMRegisters::r9);
+  am->push(JSC::ARMRegisters::r10);
+  am->push(JSC::ARMRegisters::r11);
+  am->push(JSC::ARMRegisters::lr);
+
+  // let's compute cube(9).  Get $9 into r0.
+  am->move(JSC::MacroAssembler::Imm32(9), JSC::ARMRegisters::r0);
+  cl = am->JSC::MacroAssembler::call();
+  // result is now in r0.  Leave it there and just return.
+
+  am->pop(JSC::ARMRegisters::lr);
+  am->pop(JSC::ARMRegisters::r11);
+  am->pop(JSC::ARMRegisters::r10);
+  am->pop(JSC::ARMRegisters::r9);
+  am->pop(JSC::ARMRegisters::r8);
+  am->pop(JSC::ARMRegisters::r7);
+  am->pop(JSC::ARMRegisters::r6);
+  am->pop(JSC::ARMRegisters::r5);
+  am->pop(JSC::ARMRegisters::r4);
+  am->ret();
+
+  offset_of_call_insn
+     = am->JSC::MacroAssembler::differenceBetween(startOfFnLbl, cl);
+  if (0) printf("XXXXXXXX offset = %lu\n",
+                (unsigned long)offset_of_call_insn);
+#endif
+
+  // prepare a link buffer, into which we can copy the completed insns
+  JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
+
+  // intermediate step .. get the pool suited for the size of code in 'am'
+  //WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
+  JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
+
+  // constructor for LinkBuffer asks ep to allocate r-x memory,
+  // then copies it there.
+  JSC::LinkBuffer patchBuffer(am, ep);
+
+  // now fix up any branches/calls
+  JSC::FunctionPtr target = JSC::FunctionPtr::FunctionPtr( &cube );
+  patchBuffer.link(cl, target);
+
+  JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
+
+  // cr now holds a pointer to the final runnable code.
+  void* entry = cr.m_code.executableAddress();
+
+  printf("disas %p %p\n",
+         entry, (char*)entry + cr.m_size);
+
+
+  pre_run();
+
+  printf("\n");
+
+  unsigned long (*fn)() = (unsigned long(*)())entry;
+  unsigned long result = fn();
+
+  printf("value computed is %lu (expected 729)\n", result);
+  printf("\n");
+
+  // now repatch the call in the JITted code to go elsewhere
+  JSC::JITCode jc = JSC::JITCode::JITCode(entry, cr.m_size);
+  JSC::CodeBlock cb = JSC::CodeBlock::CodeBlock(jc);
+
+  // the address of the call insn, that we want to prod
+  JSC::MacroAssemblerCodePtr cp
+     = JSC::MacroAssemblerCodePtr( ((char*)entry) + offset_of_call_insn );
+
+  JSC::RepatchBuffer repatchBuffer(&cb);
+  repatchBuffer.relink( JSC::CodeLocationCall(cp),
+                        JSC::FunctionPtr::FunctionPtr( &square ));
+ 
+  result = fn();
+  printf("value computed is %lu (expected 81)\n", result);
+  printf("\n\n");
+
+  delete eal;
+  delete am;
+}
+
+/////////////////////////////////////////////////////////////////
+
+int main ( void )
+{
+#if WTF_COMPILER_GCC
+  test1();
+  test2();
+  test3();
+#endif
+  test4();
+  test5();
+  return 0;
+}
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/ARMAssembler.cpp
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <wtf/Platform.h> // MOCO
+
+#if ENABLE_ASSEMBLER && WTF_CPU_ARM_TRADITIONAL
+
+#include "ARMAssembler.h"
+
+namespace JSC {
+
+// Patching helpers
+
+ARMWord* ARMAssembler::getLdrImmAddress(ARMWord* insn, uint32_t* constPool)
+{
+    // Must be an ldr ..., [pc +/- imm]
+    ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
+
+    if (constPool && (*insn & 0x1))
+        return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
+
+    ARMWord addr = reinterpret_cast<ARMWord>(insn) + 2 * sizeof(ARMWord);
+    if (*insn & DT_UP)
+        return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
+    else
+        return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
+}
+
+void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to, int useConstantPool)
+{
+    ARMWord* insn = reinterpret_cast<ARMWord*>(code) + (from.m_offset / sizeof(ARMWord));
+
+    if (!useConstantPool) {
+        int diff = reinterpret_cast<ARMWord*>(to) - reinterpret_cast<ARMWord*>(insn + 2);
+
+        if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
+            *insn = B | getConditionalField(*insn) | (diff & BRANCH_MASK);
+            ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
+            return;
+        }
+    }
+    ARMWord* addr = getLdrImmAddress(insn);
+    *addr = reinterpret_cast<ARMWord>(to);
+    ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
+}
+
+void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+{
+    ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
+    ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
+    ARMWord index = (*ldr & 0xfff) >> 1;
+
+    ASSERT(diff >= 1);
+    if (diff >= 2 || index > 0) {
+        diff = (diff + index - 2) * sizeof(ARMWord);
+        ASSERT(diff <= 0xfff);
+        *ldr = (*ldr & ~0xfff) | diff;
+    } else
+        *ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord);
+}
+
+// Handle immediates
+
+ARMWord ARMAssembler::getOp2(ARMWord imm)
+{
+    int rol;
+
+    if (imm <= 0xff)
+        return OP2_IMM | imm;
+
+    if ((imm & 0xff000000) == 0) {
+        imm <<= 8;
+        rol = 8;
+    }
+    else {
+        imm = (imm << 24) | (imm >> 8);
+        rol = 0;
+    }
+
+    if ((imm & 0xff000000) == 0) {
+        imm <<= 8;
+        rol += 4;
+    }
+
+    if ((imm & 0xf0000000) == 0) {
+        imm <<= 4;
+        rol += 2;
+    }
+
+    if ((imm & 0xc0000000) == 0) {
+        imm <<= 2;
+        rol += 1;
+    }
+
+    if ((imm & 0x00ffffff) == 0)
+        return OP2_IMM | (imm >> 24) | (rol << 8);
+
+    return INVALID_IMM;
+}
+
+int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
+{
+    // Step1: Search a non-immediate part
+    ARMWord mask;
+    ARMWord imm1;
+    ARMWord imm2;
+    int rol;
+
+    mask = 0xff000000;
+    rol = 8;
+    while(1) {
+        if ((imm & mask) == 0) {
+            imm = (imm << rol) | (imm >> (32 - rol));
+            rol = 4 + (rol >> 1);
+            break;
+        }
+        rol += 2;
+        mask >>= 2;
+        if (mask & 0x3) {
+            // rol 8
+            imm = (imm << 8) | (imm >> 24);
+            mask = 0xff00;
+            rol = 24;
+            while (1) {
+                if ((imm & mask) == 0) {
+                    imm = (imm << rol) | (imm >> (32 - rol));
+                    rol = (rol >> 1) - 8;
+                    break;
+                }
+                rol += 2;
+                mask >>= 2;
+                if (mask & 0x3)
+                    return 0;
+            }
+            break;
+        }
+    }
+
+    ASSERT((imm & 0xff) == 0);
+
+    if ((imm & 0xff000000) == 0) {
+        imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
+        imm2 = OP2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
+    } else if (imm & 0xc0000000) {
+        imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+        imm <<= 8;
+        rol += 4;
+
+        if ((imm & 0xff000000) == 0) {
+            imm <<= 8;
+            rol += 4;
+        }
+
+        if ((imm & 0xf0000000) == 0) {
+            imm <<= 4;
+            rol += 2;
+        }
+
+        if ((imm & 0xc0000000) == 0) {
+            imm <<= 2;
+            rol += 1;
+        }
+
+        if ((imm & 0x00ffffff) == 0)
+            imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
+        else
+            return 0;
+    } else {
+        if ((imm & 0xf0000000) == 0) {
+            imm <<= 4;
+            rol += 2;
+        }
+
+        if ((imm & 0xc0000000) == 0) {
+            imm <<= 2;
+            rol += 1;
+        }
+
+        imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+        imm <<= 8;
+        rol += 4;
+
+        if ((imm & 0xf0000000) == 0) {
+            imm <<= 4;
+            rol += 2;
+        }
+
+        if ((imm & 0xc0000000) == 0) {
+            imm <<= 2;
+            rol += 1;
+        }
+
+        if ((imm & 0x00ffffff) == 0)
+            imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
+        else
+            return 0;
+    }
+
+    if (positive) {
+        mov_r(reg, imm1);
+        orr_r(reg, reg, imm2);
+    } else {
+        mvn_r(reg, imm1);
+        bic_r(reg, reg, imm2);
+    }
+
+    return 1;
+}
+
+ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
+{
+    ARMWord tmp;
+
+    // Do it by 1 instruction
+    tmp = getOp2(imm);
+    if (tmp != INVALID_IMM)
+        return tmp;
+
+    tmp = getOp2(~imm);
+    if (tmp != INVALID_IMM) {
+        if (invert)
+            return tmp | OP2_INV_IMM;
+        mvn_r(tmpReg, tmp);
+        return tmpReg;
+    }
+
+    return encodeComplexImm(imm, tmpReg);
+}
+
+void ARMAssembler::moveImm(ARMWord imm, int dest)
+{
+    ARMWord tmp;
+
+    // Do it by 1 instruction
+    tmp = getOp2(imm);
+    if (tmp != INVALID_IMM) {
+        mov_r(dest, tmp);
+        return;
+    }
+
+    tmp = getOp2(~imm);
+    if (tmp != INVALID_IMM) {
+        mvn_r(dest, tmp);
+        return;
+    }
+
+    encodeComplexImm(imm, dest);
+}
+
+ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
+{
+    ARMWord tmp;
+
+#if WTF_ARM_ARCH_VERSION >= 7
+    tmp = getImm16Op2(imm);
+    if (tmp != INVALID_IMM) {
+        movw_r(dest, tmp);
+        return dest;
+    }
+    movw_r(dest, getImm16Op2(imm & 0xffff));
+    movt_r(dest, getImm16Op2(imm >> 16));
+    return dest;
+#else
+    // Do it by 2 instruction
+    if (genInt(dest, imm, true))
+        return dest;
+    if (genInt(dest, ~imm, false))
+        return dest;
+
+    ldr_imm(dest, imm);
+    return dest;
+#endif
+}
+
+// Memory load/store helpers
+
+void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+    if (offset >= 0) {
+        if (offset <= 0xfff)
+            dtr_u(isLoad, srcDst, base, offset);
+        else if (offset <= 0xfffff) {
+            add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
+            dtr_u(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff);
+        } else {
+            ARMWord reg = getImm(offset, ARMRegisters::S0);
+            dtr_ur(isLoad, srcDst, base, reg);
+        }
+    } else {
+        offset = -offset;
+        if (offset <= 0xfff)
+            dtr_d(isLoad, srcDst, base, offset);
+        else if (offset <= 0xfffff) {
+            sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
+            dtr_d(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff);
+        } else {
+            ARMWord reg = getImm(offset, ARMRegisters::S0);
+            dtr_dr(isLoad, srcDst, base, reg);
+        }
+    }
+}
+
+void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+    ARMWord op2;
+
+    ASSERT(scale >= 0 && scale <= 3);
+    op2 = lsl(index, scale);
+
+    if (offset >= 0 && offset <= 0xfff) {
+        add_r(ARMRegisters::S0, base, op2);
+        dtr_u(isLoad, srcDst, ARMRegisters::S0, offset);
+        return;
+    }
+    if (offset <= 0 && offset >= -0xfff) {
+        add_r(ARMRegisters::S0, base, op2);
+        dtr_d(isLoad, srcDst, ARMRegisters::S0, -offset);
+        return;
+    }
+
+    ldr_un_imm(ARMRegisters::S0, offset);
+    add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
+    dtr_ur(isLoad, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset)
+{
+    if (offset & 0x3) {
+        if (offset <= 0x3ff && offset >= 0) {
+            fdtr_u(isLoad, srcDst, base, offset >> 2);
+            return;
+        }
+        if (offset <= 0x3ffff && offset >= 0) {
+            add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
+            fdtr_u(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+            return;
+        }
+        offset = -offset;
+
+        if (offset <= 0x3ff && offset >= 0) {
+            fdtr_d(isLoad, srcDst, base, offset >> 2);
+            return;
+        }
+        if (offset <= 0x3ffff && offset >= 0) {
+            sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
+            fdtr_d(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+            return;
+        }
+        offset = -offset;
+    }
+
+    ldr_un_imm(ARMRegisters::S0, offset);
+    add_r(ARMRegisters::S0, ARMRegisters::S0, base);
+    fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0);
+}
+
+void* ARMAssembler::executableCopy(ExecutablePool* allocator)
+{
+    // 64-bit alignment is required for next constant pool and JIT code as well
+    m_buffer.flushWithoutBarrier(true);
+    if (m_buffer.uncheckedSize() & 0x7)
+        bkpt(0);
+
+    char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator));
+
+    for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+        // The last bit is set if the constant must be placed on constant pool.
+        int pos = (*iter) & (~0x1);
+        ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos);
+        ARMWord offset = *getLdrImmAddress(ldrAddr);
+        if (offset != 0xffffffff) {
+            JmpSrc jmpSrc(pos);
+            linkBranch(data, jmpSrc, data + offset, ((*iter) & 1));
+        }
+    }
+
+    return data;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/ARMAssembler.h
@@ -0,0 +1,1119 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#include <wtf/Platform.h>
+
+// Some debug code uses s(n)printf for instruction logging.
+#include <stdio.h>
+
+#if ENABLE_ASSEMBLER && WTF_CPU_ARM_TRADITIONAL
+
+#include "AssemblerBufferWithConstantPool.h"
+#include <wtf/Assertions.h>
+
+#include "methodjit/Logging.h"
+#define IPFX  "        "
+#ifdef JS_METHODJIT_SPEW
+#define FIXME_INSN_PRINTING                                \
+    do {                                                   \
+        js::JaegerSpew(js::JSpew_Insns,                    \
+                       IPFX "FIXME insn printing %s:%d\n", \
+                       __FILE__, __LINE__);                \
+    } while (0)
+#else
+#define FIXME_INSN_PRINTING ((void) 0)
+#endif
+
+namespace JSC {
+
+    typedef uint32_t ARMWord;
+
+    namespace ARMRegisters {
+        typedef enum {
+            r0 = 0,
+            r1,
+            r2,
+            r3,
+            S0 = r3,
+            r4,
+            r5,
+            r6,
+            r7,
+            r8,
+            S1 = r8,
+            r9,
+            r10,
+            r11,
+            r12,
+            ip = r12,
+            r13,
+            sp = r13,
+            r14,
+            lr = r14,
+            r15,
+            pc = r15
+        } RegisterID;
+
+        typedef enum {
+            d0,
+            d1,
+            d2,
+            d3,
+            SD0 = d3
+        } FPRegisterID;
+
+    } // namespace ARMRegisters
+
+    class ARMAssembler {
+    public:
+        typedef ARMRegisters::RegisterID RegisterID;
+        typedef ARMRegisters::FPRegisterID FPRegisterID;
+        typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
+        typedef SegmentedVector<int, 64> Jumps;
+
+        ARMAssembler() { }
+
+        // ARM conditional constants
+        typedef enum {
+            EQ = 0x00000000, // Zero
+            NE = 0x10000000, // Non-zero
+            CS = 0x20000000,
+            CC = 0x30000000,
+            MI = 0x40000000,
+            PL = 0x50000000,
+            VS = 0x60000000,
+            VC = 0x70000000,
+            HI = 0x80000000,
+            LS = 0x90000000,
+            GE = 0xa0000000,
+            LT = 0xb0000000,
+            GT = 0xc0000000,
+            LE = 0xd0000000,
+            AL = 0xe0000000
+        } Condition;
+
+        // ARM instruction constants
+        enum {
+            AND = (0x0 << 21),
+            EOR = (0x1 << 21),
+            SUB = (0x2 << 21),
+            RSB = (0x3 << 21),
+            ADD = (0x4 << 21),
+            ADC = (0x5 << 21),
+            SBC = (0x6 << 21),
+            RSC = (0x7 << 21),
+            TST = (0x8 << 21),
+            TEQ = (0x9 << 21),
+            CMP = (0xa << 21),
+            CMN = (0xb << 21),
+            ORR = (0xc << 21),
+            MOV = (0xd << 21),
+            BIC = (0xe << 21),
+            MVN = (0xf << 21),
+            MUL = 0x00000090,
+            MULL = 0x00c00090,
+            FADDD = 0x0e300b00,
+            FDIVD = 0x0e800b00,
+            FSUBD = 0x0e300b40,
+            FMULD = 0x0e200b00,
+            FCMPD = 0x0eb40b40,
+            DTR = 0x05000000,
+            LDRH = 0x00100090,
+            STRH = 0x00000090,
+            STMDB = 0x09200000,
+            LDMIA = 0x08b00000,
+            FDTR = 0x0d000b00,
+            B = 0x0a000000,
+            BL = 0x0b000000,
+#ifndef __ARM_ARCH_4__
+            BX = 0x012fff10,    // Only on ARMv4T+!
+#endif
+            FMSR = 0x0e000a10,
+            FMRS = 0x0e100a10,
+            FSITOD = 0x0eb80bc0,
+            FTOSID = 0x0ebd0b40,
+            FMSTAT = 0x0ef1fa10
+#if WTF_ARM_ARCH_VERSION >= 5
+           ,CLZ = 0x016f0f10,
+            BKPT = 0xe120070,
+            BLX_R = 0x012fff30
+#endif
+#if WTF_ARM_ARCH_VERSION >= 7
+           ,MOVW = 0x03000000,
+            MOVT = 0x03400000
+#endif
+        };
+
+        enum {
+            OP2_IMM = (1 << 25),
+            OP2_IMMh = (1 << 22),
+            OP2_INV_IMM = (1 << 26),
+            SET_CC = (1 << 20),
+            OP2_OFSREG = (1 << 25),
+            DT_UP = (1 << 23),
+            DT_WB = (1 << 21),
+            // This flag is inlcuded in LDR and STR
+            DT_PRE = (1 << 24),
+            HDT_UH = (1 << 5),
+            DT_LOAD = (1 << 20)
+        };
+
+        // Masks of ARM instructions
+        enum {
+            BRANCH_MASK = 0x00ffffff,
+            NONARM = 0xf0000000,
+            SDT_MASK = 0x0c000000,
+            SDT_OFFSET_MASK = 0xfff
+        };
+
+        enum {
+            BOFFSET_MIN = -0x00800000,
+            BOFFSET_MAX = 0x007fffff,
+            SDT = 0x04000000
+        };
+
+        enum {
+            padForAlign8  = 0x00,
+            padForAlign16 = 0x0000,
+            padForAlign32 = 0xee120070
+        };
+
+        typedef enum {
+            LSL = 0,
+            LSR = 1,
+            ASR = 2,
+            ROR = 3
+        } Shift;
+
+        static const ARMWord INVALID_IMM = 0xf0000000;
+
+        class JmpSrc {
+            friend class ARMAssembler;
+        public:
+            JmpSrc()
+                : m_offset(-1)
+            {
+            }
+
+        private:
+            JmpSrc(int offset)
+                : m_offset(offset)
+            {
+            }
+
+            int m_offset;
+        };
+
+        class JmpDst {
+            friend class ARMAssembler;
+        public:
+            JmpDst()
+                : m_offset(-1)
+                , m_used(false)
+            {
+            }
+
+            bool isUsed() const { return m_used; }
+            void used() { m_used = true; }
+            bool isValid() const { return m_offset != -1; }
+        private:
+            JmpDst(int offset)
+                : m_offset(offset)
+                , m_used(false)
+            {
+                ASSERT(m_offset == offset);
+            }
+
+            int m_offset : 31;
+            int m_used : 1;
+        };
+
+        // Instruction formating
+
+        void emitInst(ARMWord op, int rd, int rn, ARMWord op2)
+        {
+            ASSERT ( ((op2 & ~OP2_IMM) <= 0xfff) || (((op2 & ~OP2_IMMh) <= 0xfff)) );
+            m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
+        }
+
+        void and_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("and", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | AND, rd, rn, op2);
+        }
+
+        void ands_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("ands", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | AND | SET_CC, rd, rn, op2);
+        }
+
+        void eor_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("eor", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | EOR, rd, rn, op2);
+        }
+
+        void eors_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("eors", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | EOR | SET_CC, rd, rn, op2);
+        }
+
+        void sub_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("sub", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | SUB, rd, rn, op2);
+        }
+
+        void subs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("subs", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | SUB | SET_CC, rd, rn, op2);
+        }
+
+        void rsb_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("rsb", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | RSB, rd, rn, op2);
+        }
+
+        void rsbs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("rsbs", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | RSB | SET_CC, rd, rn, op2);
+        }
+
+        void add_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("add", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | ADD, rd, rn, op2);
+        }
+
+        void adds_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("adds", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | ADD | SET_CC, rd, rn, op2);
+        }
+
+        void adc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("adc", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | ADC, rd, rn, op2);
+        }
+
+        void adcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("adcs", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | ADC | SET_CC, rd, rn, op2);
+        }
+
+        void sbc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("sbc", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | SBC, rd, rn, op2);
+        }
+
+        void sbcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("sbcs", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | SBC | SET_CC, rd, rn, op2);
+        }
+
+        void rsc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("rsc", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | RSC, rd, rn, op2);
+        }
+
+        void rscs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("rscs", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | RSC | SET_CC, rd, rn, op2);
+        }
+
+        void tst_r(int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("tst", cc, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | TST | SET_CC, 0, rn, op2);
+        }
+
+        void teq_r(int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("teq", cc, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | TEQ | SET_CC, 0, rn, op2);
+        }
+
+        void cmp_r(int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("cmp", cc, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | CMP | SET_CC, 0, rn, op2);
+        }
+
+        void orr_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("orr", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | ORR, rd, rn, op2);
+        }
+
+        void orrs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("orrs", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | ORR | SET_CC, rd, rn, op2);
+        }
+
+        void mov_r(int rd, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("mov", cc, rd, op2);
+            emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARMRegisters::r0, op2);
+        }
+
+#if WTF_ARM_ARCH_VERSION >= 7
+        void movw_r(int rd, ARMWord op2, Condition cc = AL)
+        {
+            ASSERT((op2 | 0xf0fff) == 0xf0fff);
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX    "%-15s %s, 0x%04x\n", "movw", nameGpReg(rd), (op2 & 0xfff) | ((op2 >> 4) & 0xf000));
+            m_buffer.putInt(static_cast<ARMWord>(cc) | MOVW | RD(rd) | op2);
+        }
+
+        void movt_r(int rd, ARMWord op2, Condition cc = AL)
+        {
+            ASSERT((op2 | 0xf0fff) == 0xf0fff);
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX    "%-15s %s, 0x%04x\n", "movt", nameGpReg(rd), (op2 & 0xfff) | ((op2 >> 4) & 0xf000));
+            m_buffer.putInt(static_cast<ARMWord>(cc) | MOVT | RD(rd) | op2);
+        }
+#endif
+
+        void movs_r(int rd, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("movs", cc, rd, op2);
+            emitInst(static_cast<ARMWord>(cc) | MOV | SET_CC, rd, ARMRegisters::r0, op2);
+        }
+
+        void bic_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("bic", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | BIC, rd, rn, op2);
+        }
+
+        void bics_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("bics", cc, rd, rn, op2);
+            emitInst(static_cast<ARMWord>(cc) | BIC | SET_CC, rd, rn, op2);
+        }
+
+        void mvn_r(int rd, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("mvn", cc, rd, op2);
+            emitInst(static_cast<ARMWord>(cc) | MVN, rd, ARMRegisters::r0, op2);
+        }
+
+        void mvns_r(int rd, ARMWord op2, Condition cc = AL)
+        {
+            spewInsWithOp2("mvns", cc, rd, op2);
+            emitInst(static_cast<ARMWord>(cc) | MVN | SET_CC, rd, ARMRegisters::r0, op2);
+        }
+
+        void mul_r(int rd, int rn, int rm, Condition cc = AL)
+        {
+            spewInsWithOp2("mul", cc, rd, rn, static_cast<ARMWord>(rm));
+            m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
+        }
+
+        void muls_r(int rd, int rn, int rm, Condition cc = AL)
+        {
+            spewInsWithOp2("muls", cc, rd, rn, static_cast<ARMWord>(rm));
+            m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | SET_CC | RN(rd) | RS(rn) | RM(rm));
+        }
+
+        void mull_r(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, %s, %s, %s\n", "mull", nameGpReg(rdlo), nameGpReg(rdhi), nameGpReg(rn), nameGpReg(rm));
+            m_buffer.putInt(static_cast<ARMWord>(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
+        }
+
+        void faddd_r(int dd, int dn, int dm, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            emitInst(static_cast<ARMWord>(cc) | FADDD, dd, dn, dm);
+        }
+
+        void fdivd_r(int dd, int dn, int dm, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            emitInst(static_cast<ARMWord>(cc) | FDIVD, dd, dn, dm);
+        }
+
+        void fsubd_r(int dd, int dn, int dm, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            emitInst(static_cast<ARMWord>(cc) | FSUBD, dd, dn, dm);
+        }
+
+        void fmuld_r(int dd, int dn, int dm, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            emitInst(static_cast<ARMWord>(cc) | FMULD, dd, dn, dm);
+        }
+
+        void fcmpd_r(int dd, int dm, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            emitInst(static_cast<ARMWord>(cc) | FCMPD, dd, 0, dm);
+        }
+
+        void ldr_imm(int rd, ARMWord imm, Condition cc = AL)
+        {
+            char mnemonic[16];
+            snprintf(mnemonic, 16, "ldr%s", nameCC(cc));
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX    "%-15s %s, =0x%x @ (%d) (reusable pool entry)\n", mnemonic, nameGpReg(rd), imm, static_cast<int32_t>(imm));
+            m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm, true);
+        }
+
+        void ldr_un_imm(int rd, ARMWord imm, Condition cc = AL)
+        {
+            char mnemonic[16];
+            snprintf(mnemonic, 16, "ldr%s", nameCC(cc));
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX    "%-15s %s, =0x%x @ (%d)\n", mnemonic, nameGpReg(rd), imm, static_cast<int32_t>(imm));
+            m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm);
+        }
+
+        // Data transfers like this:
+        //  LDR rd, [rb, +offset]
+        //  STR rd, [rb, +offset]
+        void dtr_u(bool isLoad, int rd, int rb, ARMWord offset, Condition cc = AL)
+        {
+            char const * mnemonic = (isLoad) ? ("ldr") : ("str");
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, [%s, #+%u]\n", mnemonic, nameGpReg(rd), nameGpReg(rb), offset);
+            emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP, rd, rb, offset);
+        }
+
+        // Data transfers like this:
+        //  LDR rd, [rb, +rm]
+        //  STR rd, [rb, +rm]
+        void dtr_ur(bool isLoad, int rd, int rb, int rm, Condition cc = AL)
+        {
+            char const * mnemonic = (isLoad) ? ("ldr") : ("str");
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, [%s, +%s]\n", mnemonic, nameGpReg(rd), nameGpReg(rb), nameGpReg(rm));
+            emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP | OP2_OFSREG, rd, rb, rm);
+        }
+
+        // Data transfers like this:
+        //  LDR rd, [rb, -offset]
+        //  STR rd, [rb, -offset]
+        void dtr_d(bool isLoad, int rd, int rb, ARMWord offset, Condition cc = AL)
+        {
+            char const * mnemonic = (isLoad) ? ("ldr") : ("str");
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, [%s, #-%u]\n", mnemonic, nameGpReg(rd), nameGpReg(rb), offset);
+            emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0), rd, rb, offset);
+        }
+
+        // Data transfers like this:
+        //  LDR rd, [rb, -rm]
+        //  STR rd, [rb, -rm]
+        void dtr_dr(bool isLoad, int rd, int rb, int rm, Condition cc = AL)
+        {
+            char const * mnemonic = (isLoad) ? ("ldr") : ("str");
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, [%s, -%s]\n", mnemonic, nameGpReg(rd), nameGpReg(rb), nameGpReg(rm));
+            emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | OP2_OFSREG, rd, rb, rm);
+        }
+
+        void ldrh_r(int rd, int rb, int rm, Condition cc = AL)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, [%s, +%s]\n", "ldrh", nameGpReg(rd), nameGpReg(rb), nameGpReg(rm));
+            emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rb, rm);
+        }
+
+        void ldrh_d(int rd, int rb, ARMWord offset, Condition cc = AL)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, [%s, #-%u]\n", "ldrh", nameGpReg(rd), nameGpReg(rb), offset);
+            emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_PRE, rd, rb, offset);
+        }
+
+        void ldrh_u(int rd, int rb, ARMWord offset, Condition cc = AL)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, [%s, #+%u]\n", "ldrh", nameGpReg(rd), nameGpReg(rb), offset);
+            emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rb, offset);
+        }
+
+        void strh_r(int rb, int rm, int rd, Condition cc = AL)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, [%s, +%s]\n", "strh", nameGpReg(rd), nameGpReg(rb), nameGpReg(rm));
+            emitInst(static_cast<ARMWord>(cc) | STRH | HDT_UH | DT_UP | DT_PRE, rd, rb, rm);
+        }
+
+        void fdtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            ASSERT(op2 <= 0xff);
+            emitInst(static_cast<ARMWord>(cc) | FDTR | DT_UP | (isLoad ? DT_LOAD : 0), rd, rb, op2);
+        }
+
+        void fdtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            ASSERT(op2 <= 0xff);
+            emitInst(static_cast<ARMWord>(cc) | FDTR | (isLoad ? DT_LOAD : 0), rd, rb, op2);
+        }
+
+        void push_r(int reg, Condition cc = AL)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s {%s}\n", "push", nameGpReg(reg));
+            ASSERT(ARMWord(reg) <= 0xf);
+            m_buffer.putInt(cc | DTR | DT_WB | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+        }
+
+        void pop_r(int reg, Condition cc = AL)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s {%s}\n", "pop", nameGpReg(reg));
+            ASSERT(ARMWord(reg) <= 0xf);
+            m_buffer.putInt(cc | (DTR ^ DT_PRE) | DT_LOAD | DT_UP | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+        }
+
+        inline void poke_r(int reg, Condition cc = AL)
+        {
+            dtr_d(false, ARMRegisters::sp, 0, reg, cc);
+        }
+
+        inline void peek_r(int reg, Condition cc = AL)
+        {
+            dtr_u(true, reg, ARMRegisters::sp, 0, cc);
+        }
+
+        void fmsr_r(int dd, int rn, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            emitInst(static_cast<ARMWord>(cc) | FMSR, rn, dd, 0);
+        }
+
+        void fmrs_r(int rd, int dn, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            emitInst(static_cast<ARMWord>(cc) | FMRS, rd, dn, 0);
+        }
+
+        void fsitod_r(int dd, int dm, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            emitInst(static_cast<ARMWord>(cc) | FSITOD, dd, 0, dm);
+        }
+
+        void ftosid_r(int fd, int dm, Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            emitInst(static_cast<ARMWord>(cc) | FTOSID, fd, 0, dm);
+        }
+
+        void fmstat(Condition cc = AL)
+        {
+            FIXME_INSN_PRINTING;
+            m_buffer.putInt(static_cast<ARMWord>(cc) | FMSTAT);
+        }
+
+#if WTF_ARM_ARCH_VERSION >= 5
+        void clz_r(int rd, int rm, Condition cc = AL)
+        {
+            spewInsWithOp2("clz", cc, rd, static_cast<ARMWord>(rm));
+            m_buffer.putInt(static_cast<ARMWord>(cc) | CLZ | RD(rd) | RM(rm));
+        }
+#endif
+
+        void bkpt(ARMWord value)
+        {
+#if WTF_ARM_ARCH_VERSION >= 5
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s #0x%04x\n", "bkpt", value);
+            m_buffer.putInt(BKPT | ((value & 0xfff0) << 4) | (value & 0xf));
+#else
+            // Cannot access to Zero memory address
+            dtr_dr(true, ARMRegisters::S0, ARMRegisters::S0, ARMRegisters::S0);
+#endif
+        }
+
+        // BX is emitted where possible, or an equivalent sequence on ARMv4.
+        void bx_r(int rm, Condition cc = AL)
+        {
+#if (WTF_ARM_ARCH_VERSION >= 5) || defined(__ARM_ARCH_4T__)
+            // ARMv4T+ has BX <reg>.
+            js::JaegerSpew(
+                    js::JSpew_Insns,
+                    IPFX    "bx%-13s %s\n", nameCC(cc), nameGpReg(rm));
+            m_buffer.putInt(static_cast<ARMWord>(cc) | BX | RM(rm));
+#else   // defined(__ARM_ARCH_4__)
+            // ARMv4 has to do "MOV pc, rm". This works on newer architectures
+            // too, but breaks return stack prediction and doesn't interwork on
+            // ARMv4T, so this becomes a special case of ARMv4.
+            mov_r(ARMRegisters::pc, rm, cc);
+#endif
+        }
+
+        // BLX is emitted where possible, or an equivalent (slower) sequence on
+        // ARMv4 or ARMv4T.
+        void blx_r(int rm, Condition cc = AL)
+        {
+            ASSERT((rm >= 0) && (rm <= 14));
+#if WTF_ARM_ARCH_VERSION >= 5
+            // ARMv5+ is the ideal (fast) case, and can use a proper "BLX rm".
+            js::JaegerSpew(
+                    js::JSpew_Insns,
+                    IPFX    "blx%-12s %s\n", nameCC(cc), nameGpReg(rm));
+            m_buffer.putInt(static_cast<ARMWord>(cc) | BLX_R | RM(rm));
+#else   // defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__)
+            // ARMv4T must do "MOV lr, pc; BX rm".
+            // ARMv4 must do "MOV lr, pc; MOV pc, rm".
+            // Both cases are handled here and by bx_r.
+            ASSERT(rm != 14);
+            ensureSpace(2 * sizeof(ARMWord), 0);
+            mov_r(ARMRegisters::lr, ARMRegisters::pc, cc);
+            bx_r(rm, cc);
+#endif
+        }
+
+        static ARMWord lsl(int reg, ARMWord value)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(value <= 0x1f);
+            return reg | (value << 7) | (LSL << 5);
+        }
+
+        static ARMWord lsr(int reg, ARMWord value)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(value <= 0x1f);
+            return reg | (value << 7) | (LSR << 5);
+        }
+
+        static ARMWord asr(int reg, ARMWord value)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(value <= 0x1f);
+            return reg | (value << 7) | (ASR << 5);
+        }
+
+        static ARMWord lsl_r(int reg, int shiftReg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(shiftReg <= ARMRegisters::pc);
+            return reg | (shiftReg << 8) | (LSL << 5) | 0x10;
+        }
+
+        static ARMWord lsr_r(int reg, int shiftReg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(shiftReg <= ARMRegisters::pc);
+            return reg | (shiftReg << 8) | (LSR << 5) | 0x10;
+        }
+
+        static ARMWord asr_r(int reg, int shiftReg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            ASSERT(shiftReg <= ARMRegisters::pc);
+            return reg | (shiftReg << 8) | (ASR << 5) | 0x10;
+        }
+
+        // General helpers
+
+        int size()
+        {
+            return m_buffer.size();
+        }
+
+        void ensureSpace(int insnSpace, int constSpace)
+        {
+            m_buffer.ensureSpace(insnSpace, constSpace);
+        }
+
+        int sizeOfConstantPool()
+        {
+            return m_buffer.sizeOfConstantPool();
+        }
+
+        JmpDst label()
+        {
+            return JmpDst(m_buffer.size());
+        }
+
+        JmpDst align(int alignment)
+        {
+            while (!m_buffer.isAligned(alignment))
+                mov_r(ARMRegisters::r0, ARMRegisters::r0);
+
+            return label();
+        }
+
+        JmpSrc loadBranchTarget(int rd, Condition cc = AL, int useConstantPool = 0)
+        {
+            ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
+            int s = m_buffer.uncheckedSize();
+            ldr_un_imm(rd, 0xffffffff, cc);
+            m_jumps.append(s | (useConstantPool & 0x1));
+            return JmpSrc(s);
+        }
+
+        JmpSrc jmp(Condition cc = AL, int useConstantPool = 0)
+        {
+            return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
+        }
+
+        void* executableCopy(ExecutablePool* allocator);
+
+        // Patching helpers
+
+        static ARMWord* getLdrImmAddress(ARMWord* insn, uint32_t* constPool = 0);
+        static void linkBranch(void* code, JmpSrc from, void* to, int useConstantPool = 0);
+
+        static void patchPointerInternal(intptr_t from, void* to)
+        {
+            ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+            ARMWord* addr = getLdrImmAddress(insn);
+            *addr = reinterpret_cast<ARMWord>(to);
+            ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
+        }
+
+        static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
+        {
+            value = (value << 1) + 1;
+            ASSERT(!(value & ~0xfff));
+            return (load & ~0xfff) | value;
+        }
+
+        static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+        // Patch pointers
+
+        static void linkPointer(void* code, JmpDst from, void* to)
+        {
+            patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
+        }
+
+        static void repatchInt32(void* from, int32_t to)
+        {
+            patchPointerInternal(reinterpret_cast<intptr_t>(from), reinterpret_cast<void*>(to));
+        }
+
+        static void repatchPointer(void* from, void* to)
+        {
+            patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
+        }
+
+        static void repatchLoadPtrToLEA(void* from)
+        {
+            // On arm, this is a patch from LDR to ADD. It is restricted conversion,
+            // from special case to special case, altough enough for its purpose
+            ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+            ASSERT((*insn & 0x0ff00f00) == 0x05900000);
+
+            *insn = (*insn & 0xf00ff0ff) | 0x02800000;
+            ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
+        }
+
+        static void repatchLEAToLoadPtr(void* from)
+        {
+	    // Like repatchLoadPtrToLEA, this is specialized for our purpose.
+            ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+	    if ((*insn & 0x0ff00f00) == 0x05900000)
+		return;
+            ASSERT((*insn & 0xf00ff0ff) == 0x02800000);
+
+            *insn = (*insn &  0x0ff00f00) | 0x05900000;
+            ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
+        }
+
+        // Linkers
+
+        void linkJump(JmpSrc from, JmpDst to)
+        {
+            ARMWord* insn = reinterpret_cast<ARMWord*>(m_buffer.data()) + (from.m_offset / sizeof(ARMWord));
+            *getLdrImmAddress(insn, m_buffer.poolAddress()) = static_cast<ARMWord>(to.m_offset);
+        }
+
+        static void linkJump(void* code, JmpSrc from, void* to)
+        {
+            linkBranch(code, from, to);
+        }
+
+        static void relinkJump(void* from, void* to)
+        {
+            patchPointerInternal(reinterpret_cast<intptr_t>(from) - sizeof(ARMWord), to);
+        }
+
+        static void linkCall(void* code, JmpSrc from, void* to)
+        {
+            linkBranch(code, from, to, true);
+        }
+
+        static void relinkCall(void* from, void* to)
+        {
+            relinkJump(from, to);
+        }
+
+        // Address operations
+
+        static void* getRelocatedAddress(void* code, JmpSrc jump)
+        {
+            return reinterpret_cast<void*>(reinterpret_cast<ARMWord*>(code) + jump.m_offset / sizeof(ARMWord) + 1);
+        }
+
+        static void* getRelocatedAddress(void* code, JmpDst label)
+        {
+            return reinterpret_cast<void*>(reinterpret_cast<ARMWord*>(code) + label.m_offset / sizeof(ARMWord));
+        }
+
+        // Address differences
+
+        static int getDifferenceBetweenLabels(JmpDst from, JmpSrc to)
+        {
+            return (to.m_offset + sizeof(ARMWord)) - from.m_offset;
+        }
+
+        static int getDifferenceBetweenLabels(JmpDst from, JmpDst to)
+        {
+            return to.m_offset - from.m_offset;
+        }
+
+        static unsigned getCallReturnOffset(JmpSrc call)
+        {
+            return call.m_offset + sizeof(ARMWord);
+        }
+
+        // Handle immediates
+
+        static ARMWord getOp2Byte(ARMWord imm)
+        {
+            ASSERT(imm <= 0xff);
+            return OP2_IMMh | (imm & 0x0f) | ((imm & 0xf0) << 4) ;
+        }
+
+        static ARMWord getOp2(ARMWord imm);
+
+#if WTF_ARM_ARCH_VERSION >= 7
+        static ARMWord getImm16Op2(ARMWord imm)
+        {
+            if (imm <= 0xffff)
+                return (imm & 0xf000) << 4 | (imm & 0xfff);
+            return INVALID_IMM;
+        }
+#endif
+        ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
+        void moveImm(ARMWord imm, int dest);
+        ARMWord encodeComplexImm(ARMWord imm, int dest);
+
+        // Memory load/store helpers
+
+        void dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset);
+        void baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+        void doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset);
+
+        // Constant pool hnadlers
+
+        static ARMWord placeConstantPoolBarrier(int offset)
+        {
+            offset = (offset - sizeof(ARMWord)) >> 2;
+            ASSERT((offset <= BOFFSET_MAX && offset >= BOFFSET_MIN));
+            return AL | B | (offset & BRANCH_MASK);
+        }
+
+    private:
+        static char const * nameGpReg(int reg)
+        {
+            ASSERT(reg <= 16);
+            ASSERT(reg >= 0);
+            static char const * names[] = {
+                "r0", "r1", "r2", "r3",
+                "r4", "r5", "r6", "r7",
+                "r8", "r9", "r10", "r11",
+                "ip", "sp", "lr", "pc"
+            };
+            return names[reg];
+        }
+
+        static char const * nameCC(Condition cc)
+        {
+            ASSERT(cc <= AL);
+            ASSERT(cc >= 0);
+            ASSERT((cc & 0x0fffffff) == 0);
+
+            uint32_t    ccIndex = cc >> 28;
+            static char const * names[] = {
+                "eq", "ne",
+                "cs", "cc",
+                "mi", "pl",
+                "vs", "vc",
+                "hi", "ls",
+                "ge", "lt",
+                "gt", "le",
+                "  "        // AL is the default, so don't show it.
+            };
+            return names[ccIndex];
+        }
+
+        // Decodes operand 2 immediate values (for debug output and assertions).
+        inline uint32_t decOp2Imm(uint32_t op2)
+        {
+            ASSERT((op2 & ~0xfff) == 0);
+
+            uint32_t    imm8 = op2 & 0xff;
+            uint32_t    rot = 32 - ((op2 >> 7) & 0x1e);
+
+            return imm8 << (rot & 0x1f);
+        }
+
+        // Format the operand 2 argument for debug spew. The operand can be
+        // either an immediate or a register specifier.
+        void fmtOp2(char * out, ARMWord op2)
+        {
+            static char const * const shifts[4] = {"LSL", "LSR", "ASR", "ROR"};
+
+            if ((op2 & OP2_IMM) || (op2 & OP2_IMMh)) {
+                // Immediate values.
+                
+                uint32_t    imm = decOp2Imm(op2 & ~(OP2_IMM | OP2_IMMh));
+                sprintf(out, "#0x%x @ (%d)", imm, static_cast<int32_t>(imm));
+            } else {
+                // Register values.
+
+                char const *    rm = nameGpReg(op2 & 0xf);
+                Shift           type = static_cast<Shift>((op2 >> 5) & 0x3);
+
+                // Bit 4 specifies barrel-shifter parameters in operand 2.
+                if (op2 & (1<<4)) {
+                    // Register-shifted register.
+                    // Example: "r0, LSL r6"
+                    char const *    rs = nameGpReg((op2 >> 8) & 0xf);
+                    sprintf(out, "%s, %s %s", rm, shifts[type], rs);
+                } else {
+                    // Immediate-shifted register.
+                    // Example: "r0, ASR #31"
+                    uint32_t        imm = (op2 >> 7) & 0x1f;
+                    
+                    // Deal with special encodings.
+                    if ((type == LSL) && (imm == 0)) {
+                        // "LSL #0" doesn't shift at all (and is the default).
+                        sprintf(out, rm);
+                        return;
+                    }
+
+                    if ((type == ROR) && (imm == 0)) {
+                        // "ROR #0" is a special case ("RRX").
+                        sprintf(out, "%s, RRX", rm);
+                        return;
+                    }
+
+                    if (((type == LSR) || (type == ASR)) && (imm == 0)) {
+                        // Both LSR and ASR have a range of 1-32, with 32
+                        // encoded as 0.                  
+                        imm = 32;
+                    }
+
+                    // Print the result.
+
+                    sprintf(out, "%s, %s #%u", rm, shifts[type], imm);
+                }
+            }
+        }
+
+        void spewInsWithOp2(char const * ins, Condition cc, int rd, int rn, ARMWord op2)
+        {
+            char    mnemonic[16];
+            snprintf(mnemonic, 16, "%s%s", ins, nameCC(cc));
+
+            char    op2_fmt[48];
+            fmtOp2(op2_fmt, op2);
+
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, %s, %s\n", mnemonic, nameGpReg(rd), nameGpReg(rn), op2_fmt);
+        }
+
+        void spewInsWithOp2(char const * ins, Condition cc, int r, ARMWord op2)
+        {
+            char    mnemonic[16];
+            snprintf(mnemonic, 16, "%s%s", ins, nameCC(cc));
+
+            char    op2_fmt[48];
+            fmtOp2(op2_fmt, op2);
+
+            js::JaegerSpew(js::JSpew_Insns,
+                    IPFX   "%-15s %s, %s\n", mnemonic, nameGpReg(r), op2_fmt);
+        }
+
+        ARMWord RM(int reg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            return reg;
+        }
+
+        ARMWord RS(int reg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            return reg << 8;
+        }
+
+        ARMWord RD(int reg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            return reg << 12;
+        }
+
+        ARMWord RN(int reg)
+        {
+            ASSERT(reg <= ARMRegisters::pc);
+            return reg << 16;
+        }
+
+        static ARMWord getConditionalField(ARMWord i)
+        {
+            return i & 0xf0000000;
+        }
+
+        int genInt(int reg, ARMWord imm, bool positive);
+
+        ARMBuffer m_buffer;
+        Jumps m_jumps;
+    };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#endif // ARMAssembler_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/ARMv7Assembler.h
@@ -0,0 +1,1851 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "AssemblerBuffer.h"
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <stdint.h>
+
+namespace JSC {
+
+namespace ARMRegisters {
+    typedef enum {
+        r0,
+        r1,
+        r2,
+        r3,
+        r4,
+        r5,
+        r6,
+        r7, wr = r7,   // thumb work register
+        r8,
+        r9, sb = r9,   // static base
+        r10, sl = r10, // stack limit
+        r11, fp = r11, // frame pointer
+        r12, ip = r12,
+        r13, sp = r13,
+        r14, lr = r14,
+        r15, pc = r15,
+    } RegisterID;
+
+    // s0 == d0 == q0
+    // s4 == d2 == q1
+    // etc
+    typedef enum {
+        s0 = 0,
+        s1 = 1,
+        s2 = 2,
+        s3 = 3,
+        s4 = 4,
+        s5 = 5,
+        s6 = 6,
+        s7 = 7,
+        s8 = 8,
+        s9 = 9,
+        s10 = 10,
+        s11 = 11,
+        s12 = 12,
+        s13 = 13,
+        s14 = 14,
+        s15 = 15,
+        s16 = 16,
+        s17 = 17,
+        s18 = 18,
+        s19 = 19,
+        s20 = 20,
+        s21 = 21,
+        s22 = 22,
+        s23 = 23,
+        s24 = 24,
+        s25 = 25,
+        s26 = 26,
+        s27 = 27,
+        s28 = 28,
+        s29 = 29,
+        s30 = 30,
+        s31 = 31,
+        d0 = 0 << 1,
+        d1 = 1 << 1,
+        d2 = 2 << 1,
+        d3 = 3 << 1,
+        d4 = 4 << 1,
+        d5 = 5 << 1,
+        d6 = 6 << 1,
+        d7 = 7 << 1,
+        d8 = 8 << 1,
+        d9 = 9 << 1,
+        d10 = 10 << 1,
+        d11 = 11 << 1,
+        d12 = 12 << 1,
+        d13 = 13 << 1,
+        d14 = 14 << 1,
+        d15 = 15 << 1,
+        d16 = 16 << 1,
+        d17 = 17 << 1,
+        d18 = 18 << 1,
+        d19 = 19 << 1,
+        d20 = 20 << 1,
+        d21 = 21 << 1,
+        d22 = 22 << 1,
+        d23 = 23 << 1,
+        d24 = 24 << 1,
+        d25 = 25 << 1,
+        d26 = 26 << 1,
+        d27 = 27 << 1,
+        d28 = 28 << 1,
+        d29 = 29 << 1,
+        d30 = 30 << 1,
+        d31 = 31 << 1,
+        q0 = 0 << 2,
+        q1 = 1 << 2,
+        q2 = 2 << 2,
+        q3 = 3 << 2,
+        q4 = 4 << 2,
+        q5 = 5 << 2,
+        q6 = 6 << 2,
+        q7 = 7 << 2,
+        q8 = 8 << 2,
+        q9 = 9 << 2,
+        q10 = 10 << 2,
+        q11 = 11 << 2,
+        q12 = 12 << 2,
+        q13 = 13 << 2,
+        q14 = 14 << 2,
+        q15 = 15 << 2,
+        q16 = 16 << 2,
+        q17 = 17 << 2,
+        q18 = 18 << 2,
+        q19 = 19 << 2,
+        q20 = 20 << 2,
+        q21 = 21 << 2,
+        q22 = 22 << 2,
+        q23 = 23 << 2,
+        q24 = 24 << 2,
+        q25 = 25 << 2,
+        q26 = 26 << 2,
+        q27 = 27 << 2,
+        q28 = 28 << 2,
+        q29 = 29 << 2,
+        q30 = 30 << 2,
+        q31 = 31 << 2,
+    } FPRegisterID;
+}
+
+class ARMv7Assembler;
+class ARMThumbImmediate {
+    friend class ARMv7Assembler;
+
+    typedef uint8_t ThumbImmediateType;
+    static const ThumbImmediateType TypeInvalid = 0;
+    static const ThumbImmediateType TypeEncoded = 1;
+    static const ThumbImmediateType TypeUInt16 = 2;
+
+    typedef union {
+        int16_t asInt;
+        struct {
+            unsigned imm8 : 8;
+            unsigned imm3 : 3;
+            unsigned i    : 1;
+            unsigned imm4 : 4;
+        };
+        // If this is an encoded immediate, then it may describe a shift, or a pattern.
+        struct {
+            unsigned shiftValue7 : 7;
+            unsigned shiftAmount : 5;
+        };
+        struct {
+            unsigned immediate   : 8;
+            unsigned pattern     : 4;
+        };
+    } ThumbImmediateValue;
+
+    // byte0 contains least significant bit; not using an array to make client code endian agnostic.
+    typedef union {
+        int32_t asInt;
+        struct {
+            uint8_t byte0;
+            uint8_t byte1;
+            uint8_t byte2;
+            uint8_t byte3;
+        };
+    } PatternBytes;
+
+    ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
+    {
+        if (value & ~((1<<N)-1)) /* check for any of the top N bits (of 2N bits) are set */ \
+            value >>= N;         /* if any were set, lose the bottom N */ \
+        else                     /* if none of the top N bits are set, */ \
+            zeros += N;          /* then we have identified N leading zeros */
+    }
+
+    static int32_t countLeadingZeros(uint32_t value)
+    {
+        if (!value)
+            return 32;
+
+        int32_t zeros = 0;
+        countLeadingZerosPartial(value, zeros, 16);
+        countLeadingZerosPartial(value, zeros, 8);
+        countLeadingZerosPartial(value, zeros, 4);
+        countLeadingZerosPartial(value, zeros, 2);
+        countLeadingZerosPartial(value, zeros, 1);
+        return zeros;
+    }
+
+    ARMThumbImmediate()
+        : m_type(TypeInvalid)
+    {
+        m_value.asInt = 0;
+    }
+        
+    ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
+        : m_type(type)
+        , m_value(value)
+    {
+    }
+
+    ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
+        : m_type(TypeUInt16)
+    {
+        // Make sure this constructor is only reached with type TypeUInt16;
+        // this extra parameter makes the code a little clearer by making it
+        // explicit at call sites which type is being constructed
+        ASSERT_UNUSED(type, type == TypeUInt16);
+
+        m_value.asInt = value;
+    }
+
+public:
+    static ARMThumbImmediate makeEncodedImm(uint32_t value)
+    {
+        ThumbImmediateValue encoding;
+        encoding.asInt = 0;
+
+        // okay, these are easy.
+        if (value < 256) {
+            encoding.immediate = value;
+            encoding.pattern = 0;
+            return ARMThumbImmediate(TypeEncoded, encoding);
+        }
+
+        int32_t leadingZeros = countLeadingZeros(value);
+        // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
+        ASSERT(leadingZeros < 24);
+
+        // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
+        // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
+        // zero.  count(B) == 8, so the count of bits to be checked is 24 - count(Z).
+        int32_t rightShiftAmount = 24 - leadingZeros;
+        if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
+            // Shift the value down to the low byte position.  The assign to 
+            // shiftValue7 drops the implicit top bit.
+            encoding.shiftValue7 = value >> rightShiftAmount;
+            // The endoded shift amount is the magnitude of a right rotate.
+            encoding.shiftAmount = 8 + leadingZeros;
+            return ARMThumbImmediate(TypeEncoded, encoding);
+        }
+        
+        PatternBytes bytes;
+        bytes.asInt = value;
+
+        if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
+            encoding.immediate = bytes.byte0;
+            encoding.pattern = 3;
+            return ARMThumbImmediate(TypeEncoded, encoding);
+        }
+
+        if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
+            encoding.immediate = bytes.byte0;
+            encoding.pattern = 1;
+            return ARMThumbImmediate(TypeEncoded, encoding);
+        }
+
+        if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
+            encoding.immediate = bytes.byte0;
+            encoding.pattern = 2;
+            return ARMThumbImmediate(TypeEncoded, encoding);
+        }
+
+        return ARMThumbImmediate();
+    }
+
+    static ARMThumbImmediate makeUInt12(int32_t value)
+    {
+        return (!(value & 0xfffff000))
+            ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+            : ARMThumbImmediate();
+    }
+
+    static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
+    {
+        // If this is not a 12-bit unsigned it, try making an encoded immediate.
+        return (!(value & 0xfffff000))
+            ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+            : makeEncodedImm(value);
+    }
+
+    // The 'make' methods, above, return a !isValid() value if the argument
+    // cannot be represented as the requested type.  This methods  is called
+    // 'get' since the argument can always be represented.
+    static ARMThumbImmediate makeUInt16(uint16_t value)
+    {
+        return ARMThumbImmediate(TypeUInt16, value);
+    }
+    
+    bool isValid()
+    {
+        return m_type != TypeInvalid;
+    }
+
+    // These methods rely on the format of encoded byte values.
+    bool isUInt3() { return !(m_value.asInt & 0xfff8); }
+    bool isUInt4() { return !(m_value.asInt & 0xfff0); }
+    bool isUInt5() { return !(m_value.asInt & 0xffe0); }
+    bool isUInt6() { return !(m_value.asInt & 0xffc0); }
+    bool isUInt7() { return !(m_value.asInt & 0xff80); }
+    bool isUInt8() { return !(m_value.asInt & 0xff00); }
+    bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
+    bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
+    bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
+    bool isUInt16() { return m_type == TypeUInt16; }
+    uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
+    uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
+    uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
+    uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
+    uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
+    uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
+    uint8_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
+    uint8_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
+    uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
+    uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
+
+    bool isEncodedImm() { return m_type == TypeEncoded; }
+
+private:
+    ThumbImmediateType m_type;
+    ThumbImmediateValue m_value;
+};
+
+
+typedef enum {
+    SRType_LSL,
+    SRType_LSR,
+    SRType_ASR,
+    SRType_ROR,
+
+    SRType_RRX = SRType_ROR
+} ARMShiftType;
+
+class ARMv7Assembler;
+class ShiftTypeAndAmount {
+    friend class ARMv7Assembler;
+
+public:
+    ShiftTypeAndAmount()
+    {
+        m_u.type = (ARMShiftType)0;
+        m_u.amount = 0;
+    }
+    
+    ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
+    {
+        m_u.type = type;
+        m_u.amount = amount & 31;
+    }
+    
+    unsigned lo4() { return m_u.lo4; }
+    unsigned hi4() { return m_u.hi4; }
+    
+private:
+    union {
+        struct {
+            unsigned lo4 : 4;
+            unsigned hi4 : 4;
+        };
+        struct {
+            unsigned type   : 2;
+            unsigned amount : 5;
+        };
+    } m_u;
+};
+
+
+/*
+Some features of the Thumb instruction set are deprecated in ARMv7. Deprecated features affecting 
+instructions supported by ARMv7-M are as follows: 
+• use of the PC as <Rd> or <Rm> in a 16-bit ADD (SP plus register) instruction 
+• use of the SP as <Rm> in a 16-bit ADD (SP plus register) instruction 
+• use of the SP as <Rm> in a 16-bit CMP (register) instruction 
+• use of MOV (register) instructions in which <Rd> is the SP or PC and <Rm> is also the SP or PC. 
+• use of <Rn> as the lowest-numbered register in the register list of a 16-bit STM instruction with base 
+register writeback 
+*/
+
+class ARMv7Assembler {
+public:
+    ~ARMv7Assembler()
+    {
+        ASSERT(m_jumpsToLink.isEmpty());
+    }
+
+    typedef ARMRegisters::RegisterID RegisterID;
+    typedef ARMRegisters::FPRegisterID FPRegisterID;
+
+    // (HS, LO, HI, LS) -> (AE, B, A, BE)
+    // (VS, VC) -> (O, NO)
+    typedef enum {
+        ConditionEQ,
+        ConditionNE,
+        ConditionHS,
+        ConditionLO,
+        ConditionMI,
+        ConditionPL,
+        ConditionVS,
+        ConditionVC,
+        ConditionHI,
+        ConditionLS,
+        ConditionGE,
+        ConditionLT,
+        ConditionGT,
+        ConditionLE,
+        ConditionAL,
+
+        ConditionCS = ConditionHS,
+        ConditionCC = ConditionLO,
+    } Condition;
+
+    class JmpSrc {
+        friend class ARMv7Assembler;
+        friend class ARMInstructionFormatter;
+    public:
+        JmpSrc()
+            : m_offset(-1)
+        {
+        }
+
+    private:
+        JmpSrc(int offset)
+            : m_offset(offset)
+        {
+        }
+
+        int m_offset;
+    };
+    
+    class JmpDst {
+        friend class ARMv7Assembler;
+        friend class ARMInstructionFormatter;
+    public:
+        JmpDst()
+            : m_offset(-1)
+            , m_used(false)
+        {
+        }
+
+        bool isUsed() const { return m_used; }
+        void used() { m_used = true; }
+    private:
+        JmpDst(int offset)
+            : m_offset(offset)
+            , m_used(false)
+        {
+            ASSERT(m_offset == offset);
+        }
+
+        int m_offset : 31;
+        int m_used : 1;
+    };
+
+private:
+
+    struct LinkRecord {
+        LinkRecord(intptr_t from, intptr_t to)
+            : from(from)
+            , to(to)
+        {
+        }
+
+        intptr_t from;
+        intptr_t to;
+    };
+
+    // ARMv7, Appx-A.6.3
+    bool BadReg(RegisterID reg)
+    {
+        return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
+    }
+
+    bool isSingleRegister(FPRegisterID reg)
+    {
+        // Check that the high bit isn't set (q16+), and that the low bit isn't (s1, s3, etc).
+        return !(reg & ~31);
+    }
+
+    bool isDoubleRegister(FPRegisterID reg)
+    {
+        // Check that the high bit isn't set (q16+), and that the low bit isn't (s1, s3, etc).
+        return !(reg & ~(31 << 1));
+    }
+
+    bool isQuadRegister(FPRegisterID reg)
+    {
+        return !(reg & ~(31 << 2));
+    }
+
+    uint32_t singleRegisterNum(FPRegisterID reg)
+    {
+        ASSERT(isSingleRegister(reg));
+        return reg;
+    }
+
+    uint32_t doubleRegisterNum(FPRegisterID reg)
+    {
+        ASSERT(isDoubleRegister(reg));
+        return reg >> 1;
+    }
+
+    uint32_t quadRegisterNum(FPRegisterID reg)
+    {
+        ASSERT(isQuadRegister(reg));
+        return reg >> 2;
+    }
+
+    uint32_t singleRegisterMask(FPRegisterID rd, int highBitsShift, int lowBitShift)
+    {
+        uint32_t rdNum = singleRegisterNum(rd);
+        uint32_t rdMask = (rdNum >> 1) << highBitsShift;
+        if (rdNum & 1)
+            rdMask |= 1 << lowBitShift;
+        return rdMask;
+    }
+
+    uint32_t doubleRegisterMask(FPRegisterID rd, int highBitShift, int lowBitsShift)
+    {
+        uint32_t rdNum = doubleRegisterNum(rd);
+        uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
+        if (rdNum & 16)
+            rdMask |= 1 << highBitShift;
+        return rdMask;
+    }
+
+    typedef enum {
+        OP_ADD_reg_T1       = 0x1800,
+        OP_ADD_S_reg_T1     = 0x1800,
+        OP_SUB_reg_T1       = 0x1A00,
+        OP_SUB_S_reg_T1     = 0x1A00,
+        OP_ADD_imm_T1       = 0x1C00,
+        OP_ADD_S_imm_T1     = 0x1C00,
+        OP_SUB_imm_T1       = 0x1E00,
+        OP_SUB_S_imm_T1     = 0x1E00,
+        OP_MOV_imm_T1       = 0x2000,
+        OP_CMP_imm_T1       = 0x2800,
+        OP_ADD_imm_T2       = 0x3000,
+        OP_ADD_S_imm_T2     = 0x3000,
+        OP_SUB_imm_T2       = 0x3800,
+        OP_SUB_S_imm_T2     = 0x3800,
+        OP_AND_reg_T1       = 0x4000,
+        OP_EOR_reg_T1       = 0x4040,
+        OP_TST_reg_T1       = 0x4200,
+        OP_CMP_reg_T1       = 0x4280,
+        OP_ORR_reg_T1       = 0x4300,
+        OP_MVN_reg_T1       = 0x43C0,
+        OP_ADD_reg_T2       = 0x4400,
+        OP_MOV_reg_T1       = 0x4600,
+        OP_BLX              = 0x4700,
+        OP_BX               = 0x4700,
+        OP_LDRH_reg_T1      = 0x5A00,
+        OP_STR_reg_T1       = 0x5000,
+        OP_LDR_reg_T1       = 0x5800,
+        OP_STR_imm_T1       = 0x6000,
+        OP_LDR_imm_T1       = 0x6800,
+        OP_LDRH_imm_T1      = 0x8800,
+        OP_STR_imm_T2       = 0x9000,
+        OP_LDR_imm_T2       = 0x9800,
+        OP_ADD_SP_imm_T1    = 0xA800,
+        OP_ADD_SP_imm_T2    = 0xB000,
+        OP_SUB_SP_imm_T1    = 0xB080,
+        OP_BKPT             = 0xBE00,
+        OP_IT               = 0xBF00,
+        OP_NOP_T1           = 0xBF00,
+    } OpcodeID;
+
+    typedef enum {
+        OP_AND_reg_T2   = 0xEA00,
+        OP_TST_reg_T2   = 0xEA10,
+        OP_ORR_reg_T2   = 0xEA40,
+        OP_ASR_imm_T1   = 0xEA4F,
+        OP_LSL_imm_T1   = 0xEA4F,
+        OP_LSR_imm_T1   = 0xEA4F,
+        OP_ROR_imm_T1   = 0xEA4F,
+        OP_MVN_reg_T2   = 0xEA6F,
+        OP_EOR_reg_T2   = 0xEA80,
+        OP_ADD_reg_T3   = 0xEB00,
+        OP_ADD_S_reg_T3 = 0xEB10,
+        OP_SUB_reg_T2   = 0xEBA0,
+        OP_SUB_S_reg_T2 = 0xEBB0,
+        OP_CMP_reg_T2   = 0xEBB0,
+        OP_B_T4a        = 0xF000,
+        OP_AND_imm_T1   = 0xF000,
+        OP_TST_imm      = 0xF010,
+        OP_ORR_imm_T1   = 0xF040,
+        OP_MOV_imm_T2   = 0xF040,
+        OP_MVN_imm      = 0xF060,
+        OP_EOR_imm_T1   = 0xF080,
+        OP_ADD_imm_T3   = 0xF100,
+        OP_ADD_S_imm_T3 = 0xF110,
+        OP_CMN_imm      = 0xF110,
+        OP_SUB_imm_T3   = 0xF1A0,
+        OP_SUB_S_imm_T3 = 0xF1B0,
+        OP_CMP_imm_T2   = 0xF1B0,
+        OP_ADD_imm_T4   = 0xF200,
+        OP_MOV_imm_T3   = 0xF240,
+        OP_SUB_imm_T4   = 0xF2A0,
+        OP_MOVT         = 0xF2C0,
+        OP_NOP_T2a      = 0xF3AF,
+        OP_LDRH_reg_T2  = 0xF830,
+        OP_LDRH_imm_T3  = 0xF830,
+        OP_STR_imm_T4   = 0xF840,
+        OP_STR_reg_T2   = 0xF840,
+        OP_LDR_imm_T4   = 0xF850,
+        OP_LDR_reg_T2   = 0xF850,
+        OP_LDRH_imm_T2  = 0xF8B0,
+        OP_STR_imm_T3   = 0xF8C0,
+        OP_LDR_imm_T3   = 0xF8D0,
+        OP_LSL_reg_T2   = 0xFA00,
+        OP_LSR_reg_T2   = 0xFA20,
+        OP_ASR_reg_T2   = 0xFA40,
+        OP_ROR_reg_T2   = 0xFA60,
+        OP_SMULL_T1     = 0xFB80,
+    } OpcodeID1;
+
+    typedef enum {
+        OP_B_T4b        = 0x9000,
+        OP_NOP_T2b      = 0x8000,
+    } OpcodeID2;
+
+    struct FourFours {
+        FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
+        {
+            m_u.f0 = f0;
+            m_u.f1 = f1;
+            m_u.f2 = f2;
+            m_u.f3 = f3;
+        }
+
+        union {
+            unsigned value;
+            struct {
+                unsigned f0 : 4;
+                unsigned f1 : 4;
+                unsigned f2 : 4;
+                unsigned f3 : 4;
+            };
+        } m_u;
+    };
+
+    class ARMInstructionFormatter;
+
+    // false means else!
+    bool ifThenElseConditionBit(Condition condition, bool isIf)
+    {
+        return isIf ? (condition & 1) : !(condition & 1);
+    }
+    uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+    {
+        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+            | (ifThenElseConditionBit(condition, inst3if) << 2)
+            | (ifThenElseConditionBit(condition, inst4if) << 1)
+            | 1;
+        ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+        return (condition << 4) | mask;
+    }
+    uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+    {
+        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+            | (ifThenElseConditionBit(condition, inst3if) << 2)
+            | 2;
+        ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+        return (condition << 4) | mask;
+    }
+    uint8_t ifThenElse(Condition condition, bool inst2if)
+    {
+        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+            | 4;
+        ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+        return (condition << 4) | mask;
+    }
+
+    uint8_t ifThenElse(Condition condition)
+    {
+        int mask = 8;
+        ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+        return (condition << 4) | mask;
+    }
+
+public:
+
+    void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        // Rd can only be SP if Rn is also SP.
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isValid());
+
+        if (rn == ARMRegisters::sp) {
+            if (!(rd & 8) && imm.isUInt10()) {
+                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, imm.getUInt10() >> 2);
+                return;
+            } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
+                m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, imm.getUInt9() >> 2);
+                return;
+            }
+        } else if (!((rd | rn) & 8)) {
+            if (imm.isUInt3()) {
+                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+                return;
+            } else if ((rd == rn) && imm.isUInt8()) {
+                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+                return;
+            }
+        }
+
+        if (imm.isEncodedImm())
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
+        else {
+            ASSERT(imm.isUInt12());
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
+        }
+    }
+
+    void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    // NOTE: In an IT block, add doesn't modify the flags register.
+    void add(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if (rd == rn)
+            m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
+        else if (rd == rm)
+            m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
+        else if (!((rd | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+        else
+            add(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    // Not allowed in an IT (if then) block.
+    void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        // Rd can only be SP if Rn is also SP.
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isEncodedImm());
+
+        if (!((rd | rn) & 8)) {
+            if (imm.isUInt3()) {
+                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_S_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+                return;
+            } else if ((rd == rn) && imm.isUInt8()) {
+                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_S_imm_T2, rd, imm.getUInt8());
+                return;
+            }
+        }
+
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
+    }
+
+    // Not allowed in an IT (if then) block?
+    void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    // Not allowed in an IT (if then) block.
+    void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if (!((rd | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_S_reg_T1, rm, rn, rd);
+        else
+            add_S(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(imm.isEncodedImm());
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
+    }
+
+    void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if ((rd == rn) && !((rd | rm) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
+        else if ((rd == rm) && !((rd | rn) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
+        else
+            ARM_and(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
+        m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+    }
+
+    // Only allowed in IT (if then) block if last instruction.
+    JmpSrc b()
+    {
+        m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
+        return JmpSrc(m_formatter.size());
+    }
+    
+    // Only allowed in IT (if then) block if last instruction.
+    JmpSrc blx(RegisterID rm)
+    {
+        ASSERT(rm != ARMRegisters::pc);
+        m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
+        return JmpSrc(m_formatter.size());
+    }
+
+    // Only allowed in IT (if then) block if last instruction.
+    JmpSrc bx(RegisterID rm)
+    {
+        m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+        return JmpSrc(m_formatter.size());
+    }
+
+    void bkpt(uint8_t imm=0)
+    {
+        m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
+    }
+
+    void cmn(RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isEncodedImm());
+
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
+    }
+
+    void cmp(RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isEncodedImm());
+
+        if (!(rn & 8) && imm.isUInt8())
+            m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
+        else
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
+    }
+
+    void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+    }
+
+    void cmp(RegisterID rn, RegisterID rm)
+    {
+        if ((rn | rm) & 8)
+            cmp(rn, rm, ShiftTypeAndAmount());
+        else
+            m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
+    }
+
+    // xor is not spelled with an 'e'. :-(
+    void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(imm.isEncodedImm());
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
+    }
+
+    // xor is not spelled with an 'e'. :-(
+    void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    // xor is not spelled with an 'e'. :-(
+    void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if ((rd == rn) && !((rd | rm) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
+        else if ((rd == rm) && !((rd | rn) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
+        else
+            eor(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    void it(Condition cond)
+    {
+        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
+    }
+
+    void it(Condition cond, bool inst2if)
+    {
+        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
+    }
+
+    void it(Condition cond, bool inst2if, bool inst3if)
+    {
+        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
+    }
+
+    void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
+    {
+        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+        ASSERT(imm.isUInt12());
+
+        if (!((rt | rn) & 8) && imm.isUInt7())
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+        else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+            m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, imm.getUInt10() >> 2);
+        else
+            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
+    }
+
+    // If index is set, this is a regular offset or a pre-indexed load;
+    // if index is not set then is is a post-index load.
+    //
+    // If wback is set rn is updated - this is a pre or post index load,
+    // if wback is not set this is a regular offset memory access.
+    //
+    // (-255 <= offset <= 255)
+    // _reg = REG[rn]
+    // _tmp = _reg + offset
+    // MEM[index ? _tmp : _reg] = REG[rt]
+    // if (wback) REG[rn] = _tmp
+    void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(index || wback);
+        ASSERT(!wback | (rt != rn));
+    
+        bool add = true;
+        if (offset < 0) {
+            add = false;
+            offset = -offset;
+        }
+        ASSERT((offset & ~0xff) == 0);
+        
+        offset |= (wback << 8);
+        offset |= (add   << 9);
+        offset |= (index << 10);
+        offset |= (1 << 11);
+        
+        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+    {
+        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+        ASSERT(imm.isUInt12());
+
+        if (!((rt | rn) & 8) && imm.isUInt6())
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
+    }
+
+    // If index is set, this is a regular offset or a pre-indexed load;
+    // if index is not set then is is a post-index load.
+    //
+    // If wback is set rn is updated - this is a pre or post index load,
+    // if wback is not set this is a regular offset memory access.
+    //
+    // (-255 <= offset <= 255)
+    // _reg = REG[rn]
+    // _tmp = _reg + offset
+    // MEM[index ? _tmp : _reg] = REG[rt]
+    // if (wback) REG[rn] = _tmp
+    void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(index || wback);
+        ASSERT(!wback | (rt != rn));
+    
+        bool add = true;
+        if (offset < 0) {
+            add = false;
+            offset = -offset;
+        }
+        ASSERT((offset & ~0xff) == 0);
+        
+        offset |= (wback << 8);
+        offset |= (add   << 9);
+        offset |= (index << 10);
+        offset |= (1 << 11);
+        
+        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
+    }
+
+    void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+    {
+        ASSERT(!BadReg(rt));   // Memory hint
+        ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+
+    void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
+        m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+    }
+
+    void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
+        m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+    }
+
+    void movT3(RegisterID rd, ARMThumbImmediate imm)
+    {
+        ASSERT(imm.isValid());
+        ASSERT(!imm.isEncodedImm());
+        ASSERT(!BadReg(rd));
+        
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
+    }
+
+     void mov(RegisterID rd, ARMThumbImmediate imm)
+    {
+        ASSERT(imm.isValid());
+        ASSERT(!BadReg(rd));
+        
+        if ((rd < 8) && imm.isUInt8())
+            m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
+        else if (imm.isEncodedImm())
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
+        else
+            movT3(rd, imm);
+    }
+
+   void mov(RegisterID rd, RegisterID rm)
+    {
+        m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
+    }
+
+    void movt(RegisterID rd, ARMThumbImmediate imm)
+    {
+        ASSERT(imm.isUInt16());
+        ASSERT(!BadReg(rd));
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
+    }
+
+    void mvn(RegisterID rd, ARMThumbImmediate imm)
+    {
+        ASSERT(imm.isEncodedImm());
+        ASSERT(!BadReg(rd));
+        
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
+    }
+
+    void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    void mvn(RegisterID rd, RegisterID rm)
+    {
+        if (!((rd | rm) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
+        else
+            mvn(rd, rm, ShiftTypeAndAmount());
+    }
+
+    void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(imm.isEncodedImm());
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
+    }
+
+    void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if ((rd == rn) && !((rd | rm) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+        else if ((rd == rm) && !((rd | rn) & 8))
+            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+        else
+            orr(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rm));
+        ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
+        m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rd));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+    }
+
+    void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
+    {
+        ASSERT(!BadReg(rdLo));
+        ASSERT(!BadReg(rdHi));
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        ASSERT(rdLo != rdHi);
+        m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isUInt12());
+
+        if (!((rt | rn) & 8) && imm.isUInt7())
+            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+        else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+            m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, imm.getUInt10() >> 2);
+        else
+            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
+    }
+
+    // If index is set, this is a regular offset or a pre-indexed store;
+    // if index is not set then is is a post-index store.
+    //
+    // If wback is set rn is updated - this is a pre or post index store,
+    // if wback is not set this is a regular offset memory access.
+    //
+    // (-255 <= offset <= 255)
+    // _reg = REG[rn]
+    // _tmp = _reg + offset
+    // MEM[index ? _tmp : _reg] = REG[rt]
+    // if (wback) REG[rn] = _tmp
+    void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+    {
+        ASSERT(rt != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(index || wback);
+        ASSERT(!wback | (rt != rn));
+    
+        bool add = true;
+        if (offset < 0) {
+            add = false;
+            offset = -offset;
+        }
+        ASSERT((offset & ~0xff) == 0);
+        
+        offset |= (wback << 8);
+        offset |= (add   << 9);
+        offset |= (index << 10);
+        offset |= (1 << 11);
+        
+        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
+    }
+
+    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+    void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+    {
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        ASSERT(shift <= 3);
+
+        if (!shift && !((rt | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
+        else
+            m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+    }
+
+    void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        // Rd can only be SP if Rn is also SP.
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isValid());
+
+        if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+            m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
+            return;
+        } else if (!((rd | rn) & 8)) {
+            if (imm.isUInt3()) {
+                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+                return;
+            } else if ((rd == rn) && imm.isUInt8()) {
+                m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+                return;
+            }
+        }
+
+        if (imm.isEncodedImm())
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
+        else {
+            ASSERT(imm.isUInt12());
+            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
+        }
+    }
+
+    void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    // NOTE: In an IT block, add doesn't modify the flags register.
+    void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if (!((rd | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+        else
+            sub(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    // Not allowed in an IT (if then) block.
+    void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+    {
+        // Rd can only be SP if Rn is also SP.
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(imm.isValid());
+
+        if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+            m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
+            return;
+        } else if (!((rd | rn) & 8)) {
+            if (imm.isUInt3()) {
+                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_S_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+                return;
+            } else if ((rd == rn) && imm.isUInt8()) {
+                m_formatter.oneWordOp5Reg3Imm8(OP_SUB_S_imm_T2, rd, imm.getUInt8());
+                return;
+            }
+        }
+
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
+    }
+
+    // Not allowed in an IT (if then) block?
+    void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+        ASSERT(rd != ARMRegisters::pc);
+        ASSERT(rn != ARMRegisters::pc);
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+    }
+
+    // Not allowed in an IT (if then) block.
+    void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
+    {
+        if (!((rd | rn | rm) & 8))
+            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_S_reg_T1, rm, rn, rd);
+        else
+            sub_S(rd, rn, rm, ShiftTypeAndAmount());
+    }
+
+    void tst(RegisterID rn, ARMThumbImmediate imm)
+    {
+        ASSERT(!BadReg(rn));
+        ASSERT(imm.isEncodedImm());
+
+        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
+    }
+
+    void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+    {
+        ASSERT(!BadReg(rn));
+        ASSERT(!BadReg(rm));
+        m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+    }
+
+    void tst(RegisterID rn, RegisterID rm)
+    {
+        if ((rn | rm) & 8)
+            tst(rn, rm, ShiftTypeAndAmount());
+        else
+            m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
+    }
+
+    void vadd_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+    {
+        m_formatter.vfpOp(0x0b00ee30 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+    }
+
+    void vcmp_F64(FPRegisterID rd, FPRegisterID rm)
+    {
+        m_formatter.vfpOp(0x0bc0eeb4 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rm, 21, 16));
+    }
+
+    void vcvt_F64_S32(FPRegisterID fd, FPRegisterID sm)
+    {
+        m_formatter.vfpOp(0x0bc0eeb8 | doubleRegisterMask(fd, 6, 28) | singleRegisterMask(sm, 16, 21));
+    }
+
+    void vcvt_S32_F64(FPRegisterID sd, FPRegisterID fm)
+    {
+        m_formatter.vfpOp(0x0bc0eebd | singleRegisterMask(sd, 28, 6) | doubleRegisterMask(fm, 21, 16));
+    }
+
+    void vldr(FPRegisterID rd, RegisterID rn, int32_t imm)
+    {
+        vmem(rd, rn, imm, true);
+    }
+
+    void vmov(RegisterID rd, FPRegisterID sn)
+    {
+        m_formatter.vfpOp(0x0a10ee10 | (rd << 28) | singleRegisterMask(sn, 0, 23));
+    }
+
+    void vmov(FPRegisterID sn, RegisterID rd)
+    {
+        m_formatter.vfpOp(0x0a10ee00 | (rd << 28) | singleRegisterMask(sn, 0, 23));
+    }
+
+    // move FPSCR flags to APSR.
+    void vmrs_APSR_nzcv_FPSCR()
+    {
+        m_formatter.vfpOp(0xfa10eef1);
+    }
+
+    void vmul_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+    {
+        m_formatter.vfpOp(0x0b00ee20 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+    }
+
+    void vstr(FPRegisterID rd, RegisterID rn, int32_t imm)
+    {
+        vmem(rd, rn, imm, false);
+    }
+
+    void vsub_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+    {
+        m_formatter.vfpOp(0x0b40ee30 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+    }
+
+
+    JmpDst label()
+    {
+        return JmpDst(m_formatter.size());
+    }
+    
+    JmpDst align(int alignment)
+    {
+        while (!m_formatter.isAligned(alignment))
+            bkpt();
+
+        return label();
+    }
+    
+    static void* getRelocatedAddress(void* code, JmpSrc jump)
+    {
+        ASSERT(jump.m_offset != -1);
+
+        return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
+    }
+    
+    static void* getRelocatedAddress(void* code, JmpDst destination)
+    {
+        ASSERT(destination.m_offset != -1);
+
+        return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
+    }
+    
+    static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
+    {
+        return dst.m_offset - src.m_offset;
+    }
+    
+    static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
+    {
+        return dst.m_offset - src.m_offset;
+    }
+    
+    static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
+    {
+        return dst.m_offset - src.m_offset;
+    }
+    
+    // Assembler admin methods:
+
+    size_t size() const
+    {
+        return m_formatter.size();
+    }
+
+    void* executableCopy(ExecutablePool* allocator)
+    {
+        void* copy = m_formatter.executableCopy(allocator);
+
+        unsigned jumpCount = m_jumpsToLink.size();
+        for (unsigned i = 0; i < jumpCount; ++i) {
+            uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].from);
+            uint16_t* target = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].to);
+            linkJumpAbsolute(location, target);
+        }
+        m_jumpsToLink.clear();
+
+        ASSERT(copy);
+        return copy;
+    }
+
+    static unsigned getCallReturnOffset(JmpSrc call)
+    {
+        ASSERT(call.m_offset >= 0);
+        return call.m_offset;
+    }
+
+    // Linking & patching:
+    //
+    // 'link' and 'patch' methods are for use on unprotected code - such as the code
+    // within the AssemblerBuffer, and code being patched by the patch buffer.  Once
+    // code has been finalized it is (platform support permitting) within a non-
+    // writable region of memory; to modify the code in an execute-only execuable
+    // pool the 'repatch' and 'relink' methods should be used.
+
+    void linkJump(JmpSrc from, JmpDst to)
+    {
+        ASSERT(to.m_offset != -1);
+        ASSERT(from.m_offset != -1);
+        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset));
+    }
+
+    static void linkJump(void* code, JmpSrc from, void* to)
+    {
+        ASSERT(from.m_offset != -1);
+        
+        uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+        linkJumpAbsolute(location, to);
+    }
+
+    // bah, this mathod should really be static, since it is used by the LinkBuffer.
+    // return a bool saying whether the link was successful?
+    static void linkCall(void* code, JmpSrc from, void* to)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
+        ASSERT(from.m_offset != -1);
+        ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+        setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
+    }
+
+    static void linkPointer(void* code, JmpDst where, void* value)
+    {
+        setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+    }
+
+    static void relinkJump(void* from, void* to)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
+
+        linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
+
+        ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
+    }
+    
+    static void relinkCall(void* from, void* to)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+        ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+        setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
+
+        ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
+    }
+
+    static void repatchInt32(void* where, int32_t value)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+        
+        setInt32(where, value);
+
+        ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
+    }
+
+    static void repatchPointer(void* where, void* value)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+        
+        setPointer(where, value);
+
+        ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
+    }
+
+    static void repatchLoadPtrToLEA(void* where)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+        uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4;
+        ASSERT((*loadOp & 0xfff0) == OP_LDR_reg_T2);
+
+        *loadOp = OP_ADD_reg_T3 | (*loadOp & 0xf);
+        ExecutableAllocator::cacheFlush(loadOp, sizeof(uint16_t));
+    }
+
+    static void repatchLEAToLoadPtr(void* where)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+        uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4;
+	if ((*loadOp & 0xfff0) == OP_LDR_reg_T2)
+  	    return;
+
+        ASSERT((*loadOp & 0xfff0) == OP_ADD_reg_T3);
+
+        *loadOp = OP_LDR_reg_T2 | (*loadOp & 0xf);
+        ExecutableAllocator::cacheFlush(loadOp, sizeof(uint16_t));
+    }
+
+private:
+
+    // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+    // (i.e. +/-(0..255) 32-bit words)
+    void vmem(FPRegisterID rd, RegisterID rn, int32_t imm, bool isLoad)
+    {
+        bool up;
+        uint32_t offset;
+        if (imm < 0) {
+            offset = -imm;
+            up = false;
+        } else {
+            offset = imm;
+            up = true;
+        }
+
+        // offset is effectively leftshifted by 2 already (the bottom two bits are zero, and not
+        // reperesented in the instruction.  Left shift by 14, to mov it into position 0x00AA0000.
+        ASSERT((offset & ~(0xff << 2)) == 0);
+        offset <<= 14;
+
+        m_formatter.vfpOp(0x0b00ed00 | offset | (up << 7) | (isLoad << 4) | doubleRegisterMask(rd, 6, 28) | rn);
+    }
+
+    static void setInt32(void* code, uint32_t value)
+    {
+        uint16_t* location = reinterpret_cast<uint16_t*>(code);
+        ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+
+        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
+        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
+        location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+        location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
+        location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+        location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
+
+        ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t));
+    }
+
+    static void setPointer(void* code, void* value)
+    {
+        setInt32(code, reinterpret_cast<uint32_t>(value));
+    }
+
+    static bool isB(void* address)
+    {
+        uint16_t* instruction = static_cast<uint16_t*>(address);
+        return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
+    }
+
+    static bool isBX(void* address)
+    {
+        uint16_t* instruction = static_cast<uint16_t*>(address);
+        return (instruction[0] & 0xff87) == OP_BX;
+    }
+
+    static bool isMOV_imm_T3(void* address)
+    {
+        uint16_t* instruction = static_cast<uint16_t*>(address);
+        return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
+    }
+
+    static bool isMOVT(void* address)
+    {
+        uint16_t* instruction = static_cast<uint16_t*>(address);
+        return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
+    }
+
+    static bool isNOP_T1(void* address)
+    {
+        uint16_t* instruction = static_cast<uint16_t*>(address);
+        return instruction[0] == OP_NOP_T1;
+    }
+
+    static bool isNOP_T2(void* address)
+    {
+        uint16_t* instruction = static_cast<uint16_t*>(address);
+        return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
+    }
+
+    static void linkJumpAbsolute(uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(
+        const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+        ASSERT( (isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
+            || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)) );
+
+        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+        if (((relative << 7) >> 7) == relative) {
+            // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+            if (relative >= 0)
+                relative ^= 0xC00000;
+
+            // All branch offsets should be an even distance.
+            ASSERT(!(relative & 1));
+            // There may be a better way to fix this, but right now put the NOPs first, since in the
+            // case of an conditional branch this will be coming after an ITTT predicating *three*
+            // instructions!  Looking backwards to modify the ITTT to an IT is not easy, due to
+            // variable wdith encoding - the previous instruction might *look* like an ITTT but
+            // actually be the second half of a 2-word op.
+            instruction[-5] = OP_NOP_T1;
+            instruction[-4] = OP_NOP_T2a;
+            instruction[-3] = OP_NOP_T2b;
+            instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+            instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+        } else {
+            ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+            ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+            instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+            instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+            instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+            instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+            instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+        }
+    }
+
+    static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
+    {
+        return op | (imm.m_value.i << 10) | imm.m_value.imm4;
+    }
+    static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
+    {
+        return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
+    }
+
+    class ARMInstructionFormatter {
+    public:
+        void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
+        {
+            m_buffer.putShort(op | (rd << 8) | imm);
+        }
+        
+        void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
+        {
+            m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
+        }
+
+        void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
+        {
+            m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
+        }
+
+        void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
+        {
+            m_buffer.putShort(op | imm);
+        }
+
+        void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
+        {
+            m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
+        }
+        void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
+        {
+            m_buffer.putShort(op | imm);
+        }
+
+        void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
+        {
+            m_buffer.putShort(op | (reg1 << 3) | reg2);
+        }
+
+        void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
+        {
+            m_buffer.putShort(op | reg);
+            m_buffer.putShort(ff.m_u.value);
+        }
+        
+        void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
+        {
+            m_buffer.putShort(op);
+            m_buffer.putShort(ff.m_u.value);
+        }
+        
+        void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
+        {
+            m_buffer.putShort(op1);
+            m_buffer.putShort(op2);
+        }
+
+        void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
+        {
+            ARMThumbImmediate newImm = imm;
+            newImm.m_value.imm4 = imm4;
+
+            m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
+            m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
+        }
+
+        void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
+        {
+            m_buffer.putShort(op | reg1);
+            m_buffer.putShort((reg2 << 12) | imm);
+        }
+
+        void vfpOp(int32_t op)
+        {
+            m_buffer.putInt(op);
+        }
+
+
+        // Administrative methods:
+
+        size_t size() const { return m_buffer.size(); }
+        bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+        void* data() const { return m_buffer.data(); }
+        void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
+
+    private:
+        AssemblerBuffer m_buffer;
+    } m_formatter;
+
+    Vector<LinkRecord> m_jumpsToLink;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#endif // ARMAssembler_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/AbstractMacroAssembler.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef AbstractMacroAssembler_h
+#define AbstractMacroAssembler_h
+
+#include <wtf/Platform.h>
+#include <assembler/MacroAssemblerCodeRef.h>
+#include <assembler/CodeLocation.h>
+#include "jsstdint.h"
+
+#if ENABLE_ASSEMBLER
+
+namespace JSC {
+
+class LinkBuffer;
+class RepatchBuffer;
+
+template <class AssemblerType>
+class AbstractMacroAssembler {
+public:
+    typedef AssemblerType AssemblerType_T;
+
+    typedef MacroAssemblerCodePtr CodePtr;
+    typedef MacroAssemblerCodeRef CodeRef;
+
+    class Jump;
+
+    typedef typename AssemblerType::RegisterID RegisterID;
+    typedef typename AssemblerType::FPRegisterID FPRegisterID;
+    typedef typename AssemblerType::JmpSrc JmpSrc;
+    typedef typename AssemblerType::JmpDst JmpDst;
+
+
+    // Section 1: MacroAssembler operand types
+    //
+    // The following types are used as operands to MacroAssembler operations,
+    // describing immediate  and memory operands to the instructions to be planted.
+
+
+    enum Scale {
+        TimesOne,
+        TimesTwo,
+        TimesFour,
+        TimesEight
+    };
+
+    // Address:
+    //
+    // Describes a simple base-offset address.
+    struct Address {
+        explicit Address(RegisterID base, int32_t offset = 0)
+            : base(base)
+            , offset(offset)
+        {
+        }
+
+        RegisterID base;
+        int32_t offset;
+    };
+
+    // ImplicitAddress:
+    //
+    // This class is used for explicit 'load' and 'store' operations
+    // (as opposed to situations in which a memory operand is provided
+    // to a generic operation, such as an integer arithmetic instruction).
+    //
+    // In the case of a load (or store) operation we want to permit
+    // addresses to be implicitly constructed, e.g. the two calls:
+    //
+    //     load32(Address(addrReg), destReg);
+    //     load32(addrReg, destReg);
+    //
+    // Are equivalent, and the explicit wrapping of the Address in the former
+    // is unnecessary.
+    struct ImplicitAddress {
+        ImplicitAddress(RegisterID base)
+            : base(base)
+            , offset(0)
+        {
+        }
+
+        ImplicitAddress(Address address)
+            : base(address.base)
+            , offset(address.offset)
+        {
+        }
+
+        RegisterID base;
+        int32_t offset;
+    };
+
+    // BaseIndex:
+    //
+    // Describes a complex addressing mode.
+    struct BaseIndex {
+        BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
+            : base(base)
+            , index(index)
+            , scale(scale)
+            , offset(offset)
+        {
+        }
+
+        RegisterID base;
+        RegisterID index;
+        Scale scale;
+        int32_t offset;
+    };
+
+    // AbsoluteAddress:
+    //
+    // Describes an memory operand given by a pointer.  For regular load & store
+    // operations an unwrapped void* will be used, rather than using this.
+    struct AbsoluteAddress {
+        explicit AbsoluteAddress(void* ptr)
+            : m_ptr(ptr)
+        {
+        }
+
+        void* m_ptr;
+    };
+
+    // ImmPtr:
+    //
+    // A pointer sized immediate operand to an instruction - this is wrapped
+    // in a class requiring explicit construction in order to differentiate
+    // from pointers used as absolute addresses to memory operations
+    struct ImmPtr {
+        explicit ImmPtr(void* value)
+            : m_value(value)
+        {
+        }
+
+        intptr_t asIntptr()
+        {
+            return reinterpret_cast<intptr_t>(m_value);
+        }
+
+        void* m_value;
+    };
+
+    // Imm32:
+    //
+    // A 32bit immediate operand to an instruction - this is wrapped in a
+    // class requiring explicit construction in order to prevent RegisterIDs
+    // (which are implemented as an enum) from accidentally being passed as
+    // immediate values.
+    struct Imm32 {
+        explicit Imm32(int32_t value)
+            : m_value(value)
+#if WTF_CPU_ARM
+            , m_isPointer(false)
+#endif
+        {
+        }
+
+#if !WTF_CPU_X86_64
+        explicit Imm32(ImmPtr ptr)
+            : m_value(ptr.asIntptr())
+#if WTF_CPU_ARM
+            , m_isPointer(true)
+#endif
+        {
+        }
+#endif
+
+        int32_t m_value;
+#if WTF_CPU_ARM
+        // We rely on being able to regenerate code to recover exception handling
+        // information.  Since ARMv7 supports 16-bit immediates there is a danger
+        // that if pointer values change the layout of the generated code will change.
+        // To avoid this problem, always generate pointers (and thus Imm32s constructed
+        // from ImmPtrs) with a code sequence that is able  to represent  any pointer
+        // value - don't use a more compact form in these cases.
+        bool m_isPointer;
+#endif
+    };
+
+
+    // Section 2: MacroAssembler code buffer handles
+    //
+    // The following types are used to reference items in the code buffer
+    // during JIT code generation.  For example, the type Jump is used to
+    // track the location of a jump instruction so that it may later be
+    // linked to a label marking its destination.
+
+
+    // Label:
+    //
+    // A Label records a point in the generated instruction stream, typically such that
+    // it may be used as a destination for a jump.
+    class Label {
+        template<class TemplateAssemblerType>
+        friend class AbstractMacroAssembler;
+        friend class Jump;
+        friend class MacroAssemblerCodeRef;
+        friend class LinkBuffer;
+
+    public:
+        Label()
+        {
+        }
+
+        Label(AbstractMacroAssembler<AssemblerType>* masm)
+            : m_label(masm->m_assembler.label())
+        {
+        }
+        
+        bool isUsed() const { return m_label.isUsed(); }
+        void used() { m_label.used(); }
+        bool isValid() const { return m_label.isValid(); }
+    private:
+        JmpDst m_label;
+    };
+
+    // DataLabelPtr:
+    //
+    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+    // patched after the code has been generated.
+    class DataLabelPtr {
+        template<class TemplateAssemblerType>
+        friend class AbstractMacroAssembler;
+        friend class LinkBuffer;
+    public:
+        DataLabelPtr()
+        {
+        }
+
+        DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
+            : m_label(masm->m_assembler.label())
+        {
+        }
+        
+    private:
+        JmpDst m_label;
+    };
+
+    // DataLabel32:
+    //
+    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+    // patched after the code has been generated.
+    class DataLabel32 {
+        template<class TemplateAssemblerType>
+        friend class AbstractMacroAssembler;
+        friend class LinkBuffer;
+    public:
+        DataLabel32()
+        {
+        }
+
+        DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
+            : m_label(masm->m_assembler.label())
+        {
+        }
+
+    private:
+        JmpDst m_label;
+    };
+
+    // Call:
+    //
+    // A Call object is a reference to a call instruction that has been planted
+    // into the code buffer - it is typically used to link the call, setting the
+    // relative offset such that when executed it will call to the desired
+    // destination.
+    class Call {
+        template<class TemplateAssemblerType>
+        friend class AbstractMacroAssembler;
+
+    public:
+        enum Flags {
+            None = 0x0,
+            Linkable = 0x1,
+            Near = 0x2,
+            LinkableNear = 0x3
+        };
+
+        Call()
+            : m_flags(None)
+        {
+        }
+        
+        Call(JmpSrc jmp, Flags flags)
+            : m_jmp(jmp)
+            , m_flags(flags)
+        {
+        }
+
+        bool isFlagSet(Flags flag)
+        {
+            return !!(m_flags & flag);
+        }
+
+        static Call fromTailJump(Jump jump)
+        {
+            return Call(jump.m_jmp, Linkable);
+        }
+
+        JmpSrc m_jmp;
+    private:
+        Flags m_flags;
+    };
+
+    // Jump:
+    //
+    // A jump object is a reference to a jump instruction that has been planted
+    // into the code buffer - it is typically used to link the jump, setting the
+    // relative offset such that when executed it will jump to the desired
+    // destination.
+    class Jump {
+        template<class TemplateAssemblerType>
+        friend class AbstractMacroAssembler;
+        friend class Call;
+        friend class LinkBuffer;
+    public:
+        Jump()
+        {
+        }
+        
+        Jump(JmpSrc jmp)    
+            : m_jmp(jmp)
+        {
+        }
+        
+        void link(AbstractMacroAssembler<AssemblerType>* masm)
+        {
+            masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label());
+        }
+        
+        void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
+        {
+            masm->m_assembler.linkJump(m_jmp, label.m_label);
+        }
+
+    private:
+        JmpSrc m_jmp;
+    };
+
+    // JumpList:
+    //
+    // A JumpList is a set of Jump objects.
+    // All jumps in the set will be linked to the same destination.
+    class JumpList {
+        friend class LinkBuffer;
+
+    public:
+        typedef js::Vector<Jump, 16 ,js::SystemAllocPolicy > JumpVector;
+
+        void link(AbstractMacroAssembler<AssemblerType>* masm)
+        {
+            size_t size = m_jumps.size();
+            for (size_t i = 0; i < size; ++i)
+                m_jumps[i].link(masm);
+            m_jumps.clear();
+        }
+        
+        void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
+        {
+            size_t size = m_jumps.size();
+            for (size_t i = 0; i < size; ++i)
+                m_jumps[i].linkTo(label, masm);
+            m_jumps.clear();
+        }
+        
+        void append(Jump jump)
+        {
+            m_jumps.append(jump);
+        }
+        
+        void append(JumpList& other)
+        {
+            m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
+        }
+
+        bool empty()
+        {
+            return !m_jumps.size();
+        }
+        
+        const JumpVector& jumps() { return m_jumps; }
+
+    private:
+        JumpVector m_jumps;
+    };
+
+
+    // Section 3: Misc admin methods
+
+    static CodePtr trampolineAt(CodeRef ref, Label label)
+    {
+        return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
+    }
+
+    size_t size()
+    {
+        return m_assembler.size();
+    }
+
+    unsigned char *buffer()
+    {
+        return m_assembler.buffer();
+    }
+
+    Label label()
+    {
+        return Label(this);
+    }
+    
+    Label align()
+    {
+        m_assembler.align(16);
+        return Label(this);
+    }
+
+    ptrdiff_t differenceBetween(Label from, Jump to)
+    {
+        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+    }
+
+    ptrdiff_t differenceBetween(Label from, Call to)
+    {
+        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+    }
+
+    ptrdiff_t differenceBetween(Label from, Label to)
+    {
+        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+    }
+
+    ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
+    {
+        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+    }
+
+    ptrdiff_t differenceBetween(Label from, DataLabel32 to)
+    {
+        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+    }
+
+    ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
+    {
+        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+    }
+
+    ptrdiff_t differenceBetween(DataLabelPtr from, DataLabelPtr to)
+    {
+        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+    }
+
+    ptrdiff_t differenceBetween(DataLabelPtr from, Call to)
+    {
+        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+    }
+
+protected:
+    AssemblerType m_assembler;
+
+    friend class LinkBuffer;
+    friend class RepatchBuffer;
+
+    static void linkJump(void* code, Jump jump, CodeLocationLabel target)
+    {
+        AssemblerType::linkJump(code, jump.m_jmp, target.dataLocation());
+    }
+
+    static void linkPointer(void* code, typename AssemblerType::JmpDst label, void* value)
+    {
+        AssemblerType::linkPointer(code, label, value);
+    }
+
+    static void* getLinkerAddress(void* code, typename AssemblerType::JmpSrc label)
+    {
+        return AssemblerType::getRelocatedAddress(code, label);
+    }
+
+    static void* getLinkerAddress(void* code, typename AssemblerType::JmpDst label)
+    {
+        return AssemblerType::getRelocatedAddress(code, label);
+    }
+
+    static unsigned getLinkerCallReturnOffset(Call call)
+    {
+        return AssemblerType::getCallReturnOffset(call.m_jmp);
+    }
+
+    static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
+    {
+        AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+    }
+
+    static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+    {
+        AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
+    {
+        AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+    }
+
+    static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+    {
+        AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+    }
+
+    static void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
+    {
+        AssemblerType::repatchLoadPtrToLEA(instruction.dataLocation());
+    }
+
+    static void repatchLEAToLoadPtr(CodeLocationInstruction instruction)
+    {
+        AssemblerType::repatchLEAToLoadPtr(instruction.dataLocation());
+    }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AbstractMacroAssembler_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/AssemblerBuffer.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef AssemblerBuffer_h
+#define AssemblerBuffer_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE_ASSEMBLER
+
+#include <string.h>
+#include <jit/ExecutableAllocator.h>
+#include <wtf/Assertions.h>
+#include "jsstdint.h"
+
+namespace JSC {
+
+    class AssemblerBuffer {
+        static const int inlineCapacity = 256;
+    public:
+        AssemblerBuffer()
+            : m_buffer(m_inlineBuffer)
+            , m_capacity(inlineCapacity)
+            , m_size(0)
+        {
+        }
+
+        ~AssemblerBuffer()
+        {
+            if (m_buffer != m_inlineBuffer)
+                //fastFree(m_buffer);
+                free(m_buffer);
+        }
+
+        void ensureSpace(int space)
+        {
+            if (m_size > m_capacity - space)
+                grow();
+        }
+
+        bool isAligned(int alignment) const
+        {
+            return !(m_size & (alignment - 1));
+        }
+
+        void putByteUnchecked(int value)
+        {
+            ASSERT(!(m_size > m_capacity - 4));
+            m_buffer[m_size] = char(value);
+            m_size++;
+        }
+
+        void putByte(int value)
+        {
+            if (m_size > m_capacity - 4)
+                grow();
+            putByteUnchecked(value);
+        }
+
+        void putShortUnchecked(int value)
+        {
+            ASSERT(!(m_size > m_capacity - 4));
+            *reinterpret_cast<short*>(&m_buffer[m_size]) = short(value);
+            m_size += 2;
+        }
+
+        void putShort(int value)
+        {
+            if (m_size > m_capacity - 4)
+                grow();
+            putShortUnchecked(value);
+        }
+
+        void putIntUnchecked(int value)
+        {
+            ASSERT(!(m_size > m_capacity - 4));
+            *reinterpret_cast<int*>(&m_buffer[m_size]) = value;
+            m_size += 4;
+        }
+
+        void putInt64Unchecked(int64_t value)
+        {
+            ASSERT(!(m_size > m_capacity - 8));
+            *reinterpret_cast<int64_t*>(&m_buffer[m_size]) = value;
+            m_size += 8;
+        }
+
+        void putInt(int value)
+        {
+            if (m_size > m_capacity - 4)
+                grow();
+            putIntUnchecked(value);
+        }
+
+        void* data() const
+        {
+            return m_buffer;
+        }
+
+        int size() const
+        {
+            return m_size;
+        }
+
+        void* executableCopy(ExecutablePool* allocator)
+        {
+            if (!m_size)
+                return 0;
+
+            void* result = allocator->alloc(m_size);
+
+            if (!result)
+                return 0;
+
+            ExecutableAllocator::makeWritable(result, m_size);
+
+            return memcpy(result, m_buffer, m_size);
+        }
+
+        unsigned char *buffer() const {
+            return reinterpret_cast<unsigned char *>(m_buffer);
+        }
+
+    protected:
+        void append(const char* data, int size)
+        {
+            if (m_size > m_capacity - size)
+                grow(size);
+
+            memcpy(m_buffer + m_size, data, size);
+            m_size += size;
+        }
+
+        void grow(int extraCapacity = 0)
+        {
+            m_capacity += m_capacity / 2 + extraCapacity;
+
+            if (m_buffer == m_inlineBuffer) {
+                //char* newBuffer = static_cast<char*>(fastMalloc(m_capacity));
+                char* newBuffer = static_cast<char*>(malloc(m_capacity));
+                m_buffer = static_cast<char*>(memcpy(newBuffer, m_buffer, m_size));
+            } else
+                //m_buffer = static_cast<char*>(fastRealloc(m_buffer, m_capacity));
+                m_buffer = static_cast<char*>(realloc(m_buffer, m_capacity));
+        }
+
+        char m_inlineBuffer[inlineCapacity];
+        char* m_buffer;
+        int m_capacity;
+        int m_size;
+    };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBuffer_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBufferWithConstantPool_h
+#define AssemblerBufferWithConstantPool_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE_ASSEMBLER
+
+#include "AssemblerBuffer.h"
+#include <wtf/SegmentedVector.h>
+
+#define ASSEMBLER_HAS_CONSTANT_POOL 1
+
+namespace JSC {
+
+/*
+    On a constant pool 4 or 8 bytes data can be stored. The values can be
+    constants or addresses. The addresses should be 32 or 64 bits. The constants
+    should be double-precisions float or integer numbers which are hard to be
+    encoded as few machine instructions.
+
+    TODO: The pool is desinged to handle both 32 and 64 bits values, but
+    currently only the 4 bytes constants are implemented and tested.
+
+    The AssemblerBuffer can contain multiple constant pools. Each pool is inserted
+    into the instruction stream - protected by a jump instruction from the
+    execution flow.
+
+    The flush mechanism is called when no space remain to insert the next instruction
+    into the pool. Three values are used to determine when the constant pool itself
+    have to be inserted into the instruction stream (Assembler Buffer):
+
+    - maxPoolSize: size of the constant pool in bytes, this value cannot be
+        larger than the maximum offset of a PC relative memory load
+
+    - barrierSize: size of jump instruction in bytes which protects the
+        constant pool from execution
+
+    - maxInstructionSize: maximum length of a machine instruction in bytes
+
+    There are some callbacks which solve the target architecture specific
+    address handling:
+
+    - TYPE patchConstantPoolLoad(TYPE load, int value):
+        patch the 'load' instruction with the index of the constant in the
+        constant pool and return the patched instruction.
+
+    - void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr):
+        patch the a PC relative load instruction at 'loadAddr' address with the
+        final relative offset. The offset can be computed with help of
+        'constPoolAddr' (the address of the constant pool) and index of the
+        constant (which is stored previously in the load instruction itself).
+
+    - TYPE placeConstantPoolBarrier(int size):
+        return with a constant pool barrier instruction which jumps over the
+        constant pool.
+
+    The 'put*WithConstant*' functions should be used to place a data into the
+    constant pool.
+*/
+
+template <int maxPoolSize, int barrierSize, int maxInstructionSize, class AssemblerType>
+class AssemblerBufferWithConstantPool: public AssemblerBuffer {
+    typedef SegmentedVector<uint32_t, 512> LoadOffsets;
+public:
+    enum {
+        UniqueConst,
+        ReusableConst,
+        UnusedEntry
+    };
+
+    AssemblerBufferWithConstantPool()
+        : AssemblerBuffer()
+        , m_numConsts(0)
+        , m_maxDistance(maxPoolSize)
+        , m_lastConstDelta(0)
+    {
+        //m_pool = static_cast<uint32_t*>(fastMalloc(maxPoolSize));
+        m_pool = static_cast<uint32_t*>(malloc(maxPoolSize));
+        //m_mask = static_cast<char*>(fastMalloc(maxPoolSize / sizeof(uint32_t)));
+        m_mask = static_cast<char*>(malloc(maxPoolSize / sizeof(uint32_t)));
+    }
+
+    ~AssemblerBufferWithConstantPool()
+    {
+        //fastFree(m_mask);
+        free(m_mask);
+        //fastFree(m_pool);
+        free(m_pool);
+    }
+
+    void ensureSpace(int space)
+    {
+        flushIfNoSpaceFor(space);
+        AssemblerBuffer::ensureSpace(space);
+    }
+
+    void ensureSpace(int insnSpace, int constSpace)
+    {
+        flushIfNoSpaceFor(insnSpace, constSpace);
+        AssemblerBuffer::ensureSpace(insnSpace);
+    }
+
+    bool isAligned(int alignment)
+    {
+        flushIfNoSpaceFor(alignment);
+        return AssemblerBuffer::isAligned(alignment);
+    }
+
+    void putByteUnchecked(int value)
+    {
+        AssemblerBuffer::putByteUnchecked(value);
+        correctDeltas(1);
+    }
+
+    void putByte(int value)
+    {
+        flushIfNoSpaceFor(1);
+        AssemblerBuffer::putByte(value);
+        correctDeltas(1);
+    }
+
+    void putShortUnchecked(int value)
+    {
+        AssemblerBuffer::putShortUnchecked(value);
+        correctDeltas(2);
+    }
+
+    void putShort(int value)
+    {
+        flushIfNoSpaceFor(2);
+        AssemblerBuffer::putShort(value);
+        correctDeltas(2);
+    }
+
+    void putIntUnchecked(int value)
+    {
+        AssemblerBuffer::putIntUnchecked(value);
+        correctDeltas(4);
+    }
+
+    void putInt(int value)
+    {
+        flushIfNoSpaceFor(4);
+        AssemblerBuffer::putInt(value);
+        correctDeltas(4);
+    }
+
+    void putInt64Unchecked(int64_t value)
+    {
+        AssemblerBuffer::putInt64Unchecked(value);
+        correctDeltas(8);
+    }
+
+    int size()
+    {
+        flushIfNoSpaceFor(maxInstructionSize, sizeof(uint64_t));
+        return AssemblerBuffer::size();
+    }
+
+    int uncheckedSize()
+    {
+        return AssemblerBuffer::size();
+    }
+
+    void* executableCopy(ExecutablePool* allocator)
+    {
+        flushConstantPool(false);
+        return AssemblerBuffer::executableCopy(allocator);
+    }
+
+    void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
+    {
+        flushIfNoSpaceFor(4, 4);
+
+        m_loadOffsets.append(AssemblerBuffer::size());
+        if (isReusable)
+            for (int i = 0; i < m_numConsts; ++i) {
+                if (m_mask[i] == ReusableConst && m_pool[i] == constant) {
+                    AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, i));
+                    correctDeltas(4);
+                    return;
+                }
+            }
+
+        m_pool[m_numConsts] = constant;
+        m_mask[m_numConsts] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
+
+        AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, m_numConsts));
+        ++m_numConsts;
+
+        correctDeltas(4, 4);
+    }
+
+    // This flushing mechanism can be called after any unconditional jumps.
+    void flushWithoutBarrier(bool isForced = false)
+    {
+        // Flush if constant pool is more than 60% full to avoid overuse of this function.
+        if (isForced || 5 * m_numConsts > 3 * maxPoolSize / sizeof(uint32_t))
+            flushConstantPool(false);
+    }
+
+    uint32_t* poolAddress()
+    {
+        return m_pool;
+    }
+
+    int sizeOfConstantPool()
+    {
+        return m_numConsts;
+    }
+
+private:
+    void correctDeltas(int insnSize)
+    {
+        m_maxDistance -= insnSize;
+        m_lastConstDelta -= insnSize;
+        if (m_lastConstDelta < 0)
+            m_lastConstDelta = 0;
+    }
+
+    void correctDeltas(int insnSize, int constSize)
+    {
+        correctDeltas(insnSize);
+
+        m_maxDistance -= m_lastConstDelta;
+        m_lastConstDelta = constSize;
+    }
+
+    void flushConstantPool(bool useBarrier = true)
+    {
+        if (m_numConsts == 0)
+            return;
+        int alignPool = (AssemblerBuffer::size() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
+
+        if (alignPool)
+            alignPool = sizeof(uint64_t) - alignPool;
+
+        // Callback to protect the constant pool from execution
+        if (useBarrier)
+            AssemblerBuffer::putInt(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
+
+        if (alignPool) {
+            if (alignPool & 1)
+                AssemblerBuffer::putByte(AssemblerType::padForAlign8);
+            if (alignPool & 2)
+                AssemblerBuffer::putShort(AssemblerType::padForAlign16);
+            if (alignPool & 4)
+                AssemblerBuffer::putInt(AssemblerType::padForAlign32);
+        }
+
+        int constPoolOffset = AssemblerBuffer::size();
+        append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t));
+
+        // Patch each PC relative load
+        for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
+            void* loadAddr = reinterpret_cast<void*>(m_buffer + *iter);
+            AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<void*>(m_buffer + constPoolOffset));
+        }
+
+        m_loadOffsets.clear();
+        m_numConsts = 0;
+        m_maxDistance = maxPoolSize;
+    }
+
+    void flushIfNoSpaceFor(int nextInsnSize)
+    {
+        if (m_numConsts == 0)
+            return;
+        int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
+        if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
+            flushConstantPool();
+    }
+
+    void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
+    {
+        if (m_numConsts == 0)
+            return;
+        if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
+            (m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
+            flushConstantPool();
+    }
+
+    uint32_t* m_pool;
+    char* m_mask;
+    LoadOffsets m_loadOffsets;
+
+    int m_numConsts;
+    int m_maxDistance;
+    int m_lastConstDelta;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBufferWithConstantPool_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/CodeLocation.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef CodeLocation_h
+#define CodeLocation_h
+
+#include <wtf/Platform.h>
+#include <assembler/MacroAssemblerCodeRef.h>
+
+#if ENABLE_ASSEMBLER
+
+namespace JSC {
+
+class CodeLocationInstruction;
+class CodeLocationLabel;
+class CodeLocationJump;
+class CodeLocationCall;
+class CodeLocationNearCall;
+class CodeLocationDataLabel32;
+class CodeLocationDataLabelPtr;
+
+// The CodeLocation* types are all pretty much do-nothing wrappers around
+// CodePtr (or MacroAssemblerCodePtr, to give it its full name).  These
+// classes only exist to provide type-safety when linking and patching code.
+//
+// The one new piece of functionallity introduced by these classes is the
+// ability to create (or put another way, to re-discover) another CodeLocation
+// at an offset from one you already know.  When patching code to optimize it
+// we often want to patch a number of instructions that are short, fixed
+// offsets apart.  To reduce memory overhead we will only retain a pointer to
+// one of the instructions, and we will use the *AtOffset methods provided by
+// CodeLocationCommon to find the other points in the code to modify.
+class CodeLocationCommon : public MacroAssemblerCodePtr {
+public:
+    CodeLocationInstruction instructionAtOffset(int offset);
+    CodeLocationLabel labelAtOffset(int offset);
+    CodeLocationJump jumpAtOffset(int offset);
+    CodeLocationCall callAtOffset(int offset);
+    CodeLocationNearCall nearCallAtOffset(int offset);
+    CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
+    CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
+
+protected:
+    CodeLocationCommon()
+    {
+    }
+
+    CodeLocationCommon(MacroAssemblerCodePtr location)
+        : MacroAssemblerCodePtr(location)
+    {
+    }
+};
+
+class CodeLocationInstruction : public CodeLocationCommon {
+public:
+    CodeLocationInstruction() {}
+    explicit CodeLocationInstruction(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationInstruction(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationLabel : public CodeLocationCommon {
+public:
+    CodeLocationLabel() {}
+    explicit CodeLocationLabel(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationLabel(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationJump : public CodeLocationCommon {
+public:
+    CodeLocationJump() {}
+    explicit CodeLocationJump(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationJump(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationCall : public CodeLocationCommon {
+public:
+    CodeLocationCall() {}
+    explicit CodeLocationCall(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationCall(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationNearCall : public CodeLocationCommon {
+public:
+    CodeLocationNearCall() {}
+    explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationNearCall(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabel32 : public CodeLocationCommon {
+public:
+    CodeLocationDataLabel32() {}
+    explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationDataLabel32(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabelPtr : public CodeLocationCommon {
+public:
+    CodeLocationDataLabelPtr() {}
+    explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location)
+        : CodeLocationCommon(location) {}
+    explicit CodeLocationDataLabelPtr(void* location)
+        : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset)
+{
+    ASSERT_VALID_CODE_OFFSET(offset);
+    return CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // CodeLocation_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/LinkBuffer.h
@@ -0,0 +1,206 @@
+/* vim: set ts=4 sw=4 tw=99 et:
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef LinkBuffer_h
+#define LinkBuffer_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE_ASSEMBLER
+
+#include <assembler/MacroAssembler.h>
+
+namespace JSC {
+
+// LinkBuffer:
+//
+// This class assists in linking code generated by the macro assembler, once code generation
+// has been completed, and the code has been copied to is final location in memory.  At this
+// time pointers to labels within the code may be resolved, and relative offsets to external
+// addresses may be fixed.
+//
+// Specifically:
+//   * Jump objects may be linked to external targets,
+//   * The address of Jump objects may taken, such that it can later be relinked.
+//   * The return address of a Call may be acquired.
+//   * The address of a Label pointing into the code may be resolved.
+//   * The value referenced by a DataLabel may be set.
+//
+class LinkBuffer {
+    typedef MacroAssemblerCodeRef CodeRef;
+    typedef MacroAssembler::Label Label;
+    typedef MacroAssembler::Jump Jump;
+    typedef MacroAssembler::JumpList JumpList;
+    typedef MacroAssembler::Call Call;
+    typedef MacroAssembler::DataLabel32 DataLabel32;
+    typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+
+public:
+    // Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
+    //       First, executablePool is copied into m_executablePool, then the initialization of
+    //       m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
+    //LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool)
+    LinkBuffer(MacroAssembler* masm, ExecutablePool* executablePool)
+        : m_executablePool(executablePool)
+        //, m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
+        , m_code(masm->m_assembler.executableCopy(m_executablePool))
+        , m_size(masm->m_assembler.size())
+#ifndef NDEBUG
+        , m_completed(false)
+#endif
+    {
+    }
+
+    LinkBuffer(uint8* ncode, size_t size)
+        : m_executablePool(NULL)
+        , m_code(ncode)
+        , m_size(size)
+        , m_completed(false)
+    {
+    }
+
+    ~LinkBuffer()
+    {
+        ASSERT(!m_executablePool || m_completed);
+    }
+
+    // These methods are used to link or set values at code generation time.
+
+    void link(Call call, FunctionPtr function)
+    {
+        ASSERT(call.isFlagSet(Call::Linkable));
+        MacroAssembler::linkCall(code(), call, function);
+    }
+    
+    void link(Jump jump, CodeLocationLabel label)
+    {
+        MacroAssembler::linkJump(code(), jump, label);
+    }
+
+    void link(JumpList list, CodeLocationLabel label)
+    {
+        //for (unsigned i = 0; i < list.m_jumps.size(); ++i)
+        for (unsigned i = 0; i < list.m_jumps.length(); ++i)
+            MacroAssembler::linkJump(code(), list.m_jumps[i], label);
+    }
+
+    void patch(DataLabelPtr label, void* value)
+    {
+        MacroAssembler::linkPointer(code(), label.m_label, value);
+    }
+
+    void patch(DataLabelPtr label, CodeLocationLabel value)
+    {
+        MacroAssembler::linkPointer(code(), label.m_label, value.executableAddress());
+    }
+
+    // These methods are used to obtain handles to allow the code to be relinked / repatched later.
+
+    CodeLocationCall locationOf(Call call)
+    {
+        ASSERT(call.isFlagSet(Call::Linkable));
+        ASSERT(!call.isFlagSet(Call::Near));
+        return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+    }
+
+    CodeLocationNearCall locationOfNearCall(Call call)
+    {
+        ASSERT(call.isFlagSet(Call::Linkable));
+        ASSERT(call.isFlagSet(Call::Near));
+        return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+    }
+
+    CodeLocationLabel locationOf(Label label)
+    {
+        return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), label.m_label));
+    }
+
+    CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
+    {
+        return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), label.m_label));
+    }
+
+    CodeLocationDataLabel32 locationOf(DataLabel32 label)
+    {
+        return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), label.m_label));
+    }
+
+    // This method obtains the return address of the call, given as an offset from
+    // the start of the code.
+    unsigned returnAddressOffset(Call call)
+    {
+        return MacroAssembler::getLinkerCallReturnOffset(call);
+    }
+
+    // Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
+    // once to complete generation of the code.  'finalizeCode()' is suited to situations
+    // where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
+    // suited to adding to an existing allocation.
+    CodeRef finalizeCode()
+    {
+        performFinalization();
+
+        return CodeRef(m_code, m_executablePool, m_size);
+    }
+    CodeLocationLabel finalizeCodeAddendum()
+    {
+        performFinalization();
+
+        return CodeLocationLabel(code());
+    }
+
+private:
+    // Keep this private! - the underlying code should only be obtained externally via 
+    // finalizeCode() or finalizeCodeAddendum().
+    void* code()
+    {
+        return m_code;
+    }
+
+    void performFinalization()
+    {
+#ifndef NDEBUG
+        ASSERT(!m_completed);
+        m_completed = true;
+#endif
+
+        ExecutableAllocator::makeExecutable(code(), m_size);
+        ExecutableAllocator::cacheFlush(code(), m_size);
+    }
+
+    //RefPtr<ExecutablePool> m_executablePool;
+    ExecutablePool* m_executablePool;
+    void* m_code;
+    size_t m_size;
+#ifndef NDEBUG
+    bool m_completed;
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // LinkBuffer_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/MacroAssembler.h
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef MacroAssembler_h
+#define MacroAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE_ASSEMBLER
+
+#if WTF_CPU_ARM_THUMB2
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }
+
+#elif WTF_CPU_ARM_TRADITIONAL
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }
+
+#elif WTF_CPU_X86
+#include "MacroAssemblerX86.h"
+namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }
+
+#elif WTF_CPU_X86_64
+#include "MacroAssemblerX86_64.h"
+namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }
+
+#else
+#error "The MacroAssembler is not supported on this platform."
+#endif
+
+
+namespace JSC {
+
+class MacroAssembler : public MacroAssemblerBase {
+public:
+
+    using MacroAssemblerBase::pop;
+    using MacroAssemblerBase::jump;
+    using MacroAssemblerBase::branch32;
+    using MacroAssemblerBase::branch16;
+#if WTF_CPU_X86_64
+    using MacroAssemblerBase::branchPtr;
+    using MacroAssemblerBase::branchTestPtr;
+#endif
+
+
+    // Platform agnostic onvenience functions,
+    // described in terms of other macro assembly methods.
+    void pop()
+    {
+        addPtr(Imm32(sizeof(void*)), stackPointerRegister);
+    }
+    
+    void peek(RegisterID dest, int index = 0)
+    {
+        loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+    }
+
+    void poke(RegisterID src, int index = 0)
+    {
+        storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));
+    }
+
+    void poke(Imm32 value, int index = 0)
+    {
+        store32(value, Address(stackPointerRegister, (index * sizeof(void*))));
+    }
+
+    void poke(ImmPtr imm, int index = 0)
+    {
+        storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));
+    }
+
+
+    // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
+    void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target)
+    {
+        branchPtr(cond, op1, imm).linkTo(target, this);
+    }
+
+    void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target)
+    {
+        branch32(cond, op1, op2).linkTo(target, this);
+    }
+
+    void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target)
+    {
+        branch32(cond, op1, imm).linkTo(target, this);
+    }
+
+    void branch32(Condition cond, RegisterID left, Address right, Label target)
+    {
+        branch32(cond, left, right).linkTo(target, this);
+    }
+
+    void branch16(Condition cond, BaseIndex left, RegisterID right, Label target)
+    {
+        branch16(cond, left, right).linkTo(target, this);
+    }
+    
+    void branchTestPtr(Condition cond, RegisterID reg, Label target)
+    {
+        branchTestPtr(cond, reg).linkTo(target, this);
+    }
+
+    void jump(Label target)
+    {
+        jump().linkTo(target, this);
+    }
+
+
+    // Ptr methods
+    // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
+    // FIXME: should this use a test for 32-bitness instead of this specific exception?
+#if !WTF_CPU_X86_64
+    void addPtr(RegisterID src, RegisterID dest)
+    {
+        add32(src, dest);
+    }
+
+    void addPtr(Imm32 imm32, Address address)
+    {
+        add32(imm32, address);
+    }
+
+    void addPtr(Imm32 imm, RegisterID srcDest)
+    {
+        add32(imm, srcDest);
+    }
+
+    void addPtr(ImmPtr imm, RegisterID dest)
+    {
+        add32(Imm32(imm), dest);
+    }
+
+    void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        add32(imm, src, dest);
+    }
+
+    void andPtr(RegisterID src, RegisterID dest)
+    {
+        and32(src, dest);
+    }
+
+    void andPtr(Address address, RegisterID srcDest)
+    {
+        and32(address, srcDest);
+    }
+
+    void andPtr(Imm32 imm, RegisterID srcDest)
+    {
+        and32(imm, srcDest);
+    }
+
+    void andPtr(ImmPtr ptr, RegisterID srcDest)
+    {
+        and32(Imm32(ptr), srcDest);
+    }
+
+    void notPtr(RegisterID srcDest)
+    {
+        not32(srcDest);
+    }
+
+    void orPtr(RegisterID src, RegisterID dest)
+    {
+        or32(src, dest);
+    }
+
+    void orPtr(ImmPtr imm, RegisterID dest)
+    {
+        or32(Imm32(imm), dest);
+    }
+
+    void orPtr(Imm32 imm, RegisterID dest)
+    {
+        or32(imm, dest);
+    }
+
+    void orPtr(Address address, RegisterID srcDest)
+    {
+        or32(address, srcDest);
+    }
+
+    void subPtr(RegisterID src, RegisterID dest)
+    {
+        sub32(src, dest);
+    }
+    
+    void subPtr(Imm32 imm, RegisterID dest)
+    {
+        sub32(imm, dest);
+    }
+    
+    void subPtr(ImmPtr imm, RegisterID dest)
+    {
+        sub32(Imm32(imm), dest);
+    }
+
+    void xorPtr(RegisterID src, RegisterID dest)
+    {
+        xor32(src, dest);
+    }
+
+    void xorPtr(Imm32 imm, RegisterID srcDest)
+    {
+        xor32(imm, srcDest);
+    }
+
+
+    void loadPtr(ImplicitAddress address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+
+    void loadPtr(BaseIndex address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+
+    void loadPtr(void* address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+
+    DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        return load32WithAddressOffsetPatch(address, dest);
+    }
+
+    void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+    {
+        set32(cond, left, right, dest);
+    }
+
+    void storePtr(RegisterID src, ImplicitAddress address)
+    {
+        store32(src, address);
+    }
+
+    void storePtr(RegisterID src, BaseIndex address)
+    {
+        store32(src, address);
+    }
+
+    void storePtr(RegisterID src, void* address)
+    {
+        store32(src, address);
+    }
+
+    void storePtr(ImmPtr imm, ImplicitAddress address)
+    {
+        store32(Imm32(imm), address);
+    }
+
+    void storePtr(ImmPtr imm, BaseIndex address)
+    {
+        store32(Imm32(imm), address);
+    }
+
+    void storePtr(ImmPtr imm, void* address)
+    {
+        store32(Imm32(imm), address);
+    }
+
+    DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        return store32WithAddressOffsetPatch(src, address);
+    }
+
+
+    Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
+    {
+        return branch32(cond, left, Imm32(right));
+    }
+
+    Jump branchPtr(Condition cond, RegisterID left, Imm32 right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branchPtr(Condition cond, RegisterID left, Address right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branchPtr(Condition cond, Address left, RegisterID right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branchPtr(Condition cond, Address left, ImmPtr right)
+    {
+        return branch32(cond, left, Imm32(right));
+    }
+
+    Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right)
+    {
+        return branch32(cond, left, Imm32(right));
+    }
+
+    Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
+    {
+        return branchTest32(cond, reg, mask);
+    }
+
+    Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+    {
+        return branchTest32(cond, reg, mask);
+    }
+
+    Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
+    {
+        return branchTest32(cond, address, mask);
+    }
+
+    Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+    {
+        return branchTest32(cond, address, mask);
+    }
+
+
+    Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
+    {
+        return branchAdd32(cond, src, dest);
+    }
+
+    Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
+    {
+        return branchSub32(cond, imm, dest);
+    }
+
+    void rshiftPtr(Imm32 imm, RegisterID dest)
+    {
+        rshift32(imm, dest);
+    }
+
+    void lshiftPtr(Imm32 imm, RegisterID dest)
+    {
+        lshift32(imm, dest);
+    }
+#endif
+
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssembler_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/MacroAssemblerARM.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <wtf/Platform.h>
+
+#if ENABLE_ASSEMBLER && WTF_CPU_ARM_TRADITIONAL
+
+#include "MacroAssemblerARM.h"
+
+#if WTF_PLATFORM_LINUX
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <elf.h>
+#include <asm/hwcap.h>
+#endif
+
+namespace JSC {
+
+static bool isVFPPresent()
+{
+#if WTF_PLATFORM_LINUX
+    int fd = open("/proc/self/auxv", O_RDONLY);
+    if (fd > 0) {
+        Elf32_auxv_t aux;
+        while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
+            if (aux.a_type == AT_HWCAP) {
+                close(fd);
+                return aux.a_un.a_val & HWCAP_VFP;
+            }
+        }
+        close(fd);
+    }
+#endif
+
+    return false;
+}
+
+const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
+
+#if WTF_CPU_ARMV5_OR_LOWER
+/* On ARMv5 and below, natural alignment is required. */
+void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+{
+    ARMWord op2;
+
+    ASSERT(address.scale >= 0 && address.scale <= 3);
+    op2 = m_assembler.lsl(address.index, static_cast<int>(address.scale));
+
+    if (address.offset >= 0 && address.offset + 0x2 <= 0xff) {
+        m_assembler.add_r(ARMRegisters::S0, address.base, op2);
+        m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset));
+        m_assembler.ldrh_u(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset + 0x2));
+    } else if (address.offset < 0 && address.offset >= -0xff) {
+        m_assembler.add_r(ARMRegisters::S0, address.base, op2);
+        m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset));
+        m_assembler.ldrh_d(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset - 0x2));
+    } else {
+        m_assembler.ldr_un_imm(ARMRegisters::S0, address.offset);
+        m_assembler.add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
+        m_assembler.ldrh_r(dest, address.base, ARMRegisters::S0);
+        m_assembler.add_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::OP2_IMM | 0x2);
+        m_assembler.ldrh_r(ARMRegisters::S0, address.base, ARMRegisters::S0);
+    }
+    m_assembler.orr_r(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16));
+}
+#endif
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/MacroAssemblerARM.h
@@ -0,0 +1,1044 @@
+/*
+ * Copyright (C) 2008 Apple Inc.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARM_h
+#define MacroAssemblerARM_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE_ASSEMBLER && WTF_CPU_ARM_TRADITIONAL
+
+#include "ARMAssembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
+    static const int DoubleConditionMask = 0x0f;
+    static const int DoubleConditionBitSpecial = 0x10;
+    COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
+public:
+    enum Condition {
+        Equal = ARMAssembler::EQ,
+        NotEqual = ARMAssembler::NE,
+        Above = ARMAssembler::HI,
+        AboveOrEqual = ARMAssembler::CS,
+        Below = ARMAssembler::CC,
+        BelowOrEqual = ARMAssembler::LS,
+        GreaterThan = ARMAssembler::GT,
+        GreaterThanOrEqual = ARMAssembler::GE,
+        LessThan = ARMAssembler::LT,
+        LessThanOrEqual = ARMAssembler::LE,
+        Overflow = ARMAssembler::VS,
+        Signed = ARMAssembler::MI,
+        Zero = ARMAssembler::EQ,
+        NonZero = ARMAssembler::NE
+    };
+
+    enum DoubleCondition {
+        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        DoubleEqual = ARMAssembler::EQ,
+        DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
+        DoubleGreaterThan = ARMAssembler::GT,
+        DoubleGreaterThanOrEqual = ARMAssembler::GE,
+        DoubleLessThan = ARMAssembler::CC,
+        DoubleLessThanOrEqual = ARMAssembler::LS,
+        // If either operand is NaN, these conditions always evaluate to true.
+        DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
+        DoubleNotEqualOrUnordered = ARMAssembler::NE,
+        DoubleGreaterThanOrUnordered = ARMAssembler::HI,
+        DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
+        DoubleLessThanOrUnordered = ARMAssembler::LT,
+        DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE
+    };
+
+    static const RegisterID stackPointerRegister = ARMRegisters::sp;
+    static const RegisterID linkRegister = ARMRegisters::lr;
+
+    static const Scale ScalePtr = TimesFour;
+    static const unsigned int TotalRegisters = 16;
+
+    void add32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.adds_r(dest, dest, src);
+    }
+
+    void add32(Imm32 imm, Address address)
+    {
+        load32(address, ARMRegisters::S1);
+        add32(imm, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address);
+    }
+
+    void add32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void add32(Address src, RegisterID dest)
+    {
+        load32(src, ARMRegisters::S1);
+        add32(ARMRegisters::S1, dest);
+    }
+
+    void and32(Address src, RegisterID dest)
+    {
+        load32(src, ARMRegisters::S1);
+        and32(ARMRegisters::S1, dest);
+    }
+
+    void and32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.ands_r(dest, dest, src);
+    }
+
+    void and32(Imm32 imm, RegisterID dest)
+    {
+        ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+        if (w & ARMAssembler::OP2_INV_IMM)
+            m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM);
+        else
+            m_assembler.ands_r(dest, dest, w);
+    }
+
+    void lshift32(RegisterID shift_amount, RegisterID dest)
+    {
+        ARMWord w = ARMAssembler::getOp2(0x1f);
+        ASSERT(w != ARMAssembler::INVALID_IMM);
+        m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
+
+        m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0));
+    }
+
+    void lshift32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
+    }
+
+    void mul32(RegisterID src, RegisterID dest)
+    {
+        if (src == dest) {
+            move(src, ARMRegisters::S0);
+            src = ARMRegisters::S0;
+        }
+        m_assembler.muls_r(dest, dest, src);
+    }
+
+    void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        move(imm, ARMRegisters::S0);
+        m_assembler.muls_r(dest, src, ARMRegisters::S0);
+    }
+
+    void neg32(RegisterID srcDest)
+    {
+        m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0));
+    }
+
+    void not32(RegisterID dest)
+    {
+        m_assembler.mvns_r(dest, dest);
+    }
+
+    void or32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.orrs_r(dest, dest, src);
+    }
+
+    void or32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void rshift32(RegisterID shift_amount, RegisterID dest)
+    {
+        ARMWord w = ARMAssembler::getOp2(0x1f);
+        ASSERT(w != ARMAssembler::INVALID_IMM);
+        m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
+
+        m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0));
+    }
+
+    void rshift32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f));
+    }
+
+    void sub32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.subs_r(dest, dest, src);
+    }
+
+    void sub32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void sub32(Imm32 imm, Address address)
+    {
+        load32(address, ARMRegisters::S1);
+        sub32(imm, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address);
+    }
+
+    void sub32(Address src, RegisterID dest)
+    {
+        load32(src, ARMRegisters::S1);
+        sub32(ARMRegisters::S1, dest);
+    }
+
+    void or32(Address address, RegisterID dest)
+    {
+        load32(address, ARMRegisters::S1);
+        or32(ARMRegisters::S1, dest);
+    }
+
+    void xor32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.eors_r(dest, dest, src);
+    }
+
+    void xor32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void load32(ImplicitAddress address, RegisterID dest)
+    {
+        m_assembler.dataTransfer32(true, dest, address.base, address.offset);
+    }
+
+    void load32(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+    }
+
+#if WTF_CPU_ARMV5_OR_LOWER
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
+#else
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+#endif
+
+    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabel32 dataLabel(this);
+        m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
+        m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0);
+        return dataLabel;
+    }
+
+    Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+    {
+        Label label(this);
+        load32(address, dest);
+        return label;
+    }
+
+    void load16(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.add_r(ARMRegisters::S0, address.base, m_assembler.lsl(address.index, address.scale));
+        if (address.offset>=0)
+            m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset));
+        else
+            m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset));
+    }
+
+    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        DataLabel32 dataLabel(this);
+        m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
+        m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0);
+        return dataLabel;
+    }
+
+    void store32(RegisterID src, ImplicitAddress address)
+    {
+        m_assembler.dataTransfer32(false, src, address.base, address.offset);
+    }
+
+    void store32(RegisterID src, BaseIndex address)
+    {
+        m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+    }
+
+    void store32(Imm32 imm, BaseIndex address)
+    {
+        if (imm.m_isPointer)
+            m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
+        else
+            move(imm, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address);
+    }
+
+    void store32(Imm32 imm, ImplicitAddress address)
+    {
+        if (imm.m_isPointer)
+            m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
+        else
+            move(imm, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address);
+    }
+
+    void store32(RegisterID src, void* address)
+    {
+        m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+        m_assembler.dtr_u(false, src, ARMRegisters::S0, 0);
+    }
+
+    void store32(Imm32 imm, void* address)
+    {
+        m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+        if (imm.m_isPointer)
+            m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
+        else
+            m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
+        m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
+    }
+
+    void pop(RegisterID dest)
+    {
+        m_assembler.pop_r(dest);
+    }
+
+    void push(RegisterID src)
+    {
+        m_assembler.push_r(src);
+    }
+
+    void push(Address address)
+    {
+        load32(address, ARMRegisters::S1);
+        push(ARMRegisters::S1);
+    }
+
+    void push(Imm32 imm)
+    {
+        move(imm, ARMRegisters::S0);
+        push(ARMRegisters::S0);
+    }
+
+    void move(Imm32 imm, RegisterID dest)
+    {
+        if (imm.m_isPointer)
+            m_assembler.ldr_un_imm(dest, imm.m_value);
+        else
+            m_assembler.moveImm(imm.m_value, dest);
+    }
+
+    void move(RegisterID src, RegisterID dest)
+    {
+        m_assembler.mov_r(dest, src);
+    }
+
+    void move(ImmPtr imm, RegisterID dest)
+    {
+        move(Imm32(imm), dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        m_assembler.mov_r(ARMRegisters::S0, reg1);
+        m_assembler.mov_r(reg1, reg2);
+        m_assembler.mov_r(reg2, ARMRegisters::S0);
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            move(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            move(src, dest);
+    }
+
+    Jump branch32(Condition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
+    {
+        m_assembler.cmp_r(left, right);
+        return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+    }
+
+    Jump branch32(Condition cond, RegisterID left, Imm32 right, int useConstantPool = 0)
+    {
+        if (right.m_isPointer) {
+            m_assembler.ldr_un_imm(ARMRegisters::S0, right.m_value);
+            m_assembler.cmp_r(left, ARMRegisters::S0);
+        } else
+            m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+        return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+    }
+
+    Jump branch32(Condition cond, RegisterID left, Address right)
+    {
+        load32(right, ARMRegisters::S1);
+        return branch32(cond, left, ARMRegisters::S1);
+    }
+
+    Jump branch32(Condition cond, Address left, RegisterID right)
+    {
+        load32(left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branch32(Condition cond, Address left, Imm32 right)
+    {
+        load32(left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+    {
+        load32(left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
+    {
+        load32WithUnalignedHalfWords(left, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+    {
+        (void)(cond);
+        (void)(left);
+        (void)(right);
+        ASSERT_NOT_REACHED();
+        return jump();
+    }
+
+    Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+    {
+        load16(left, ARMRegisters::S0);
+        move(right, ARMRegisters::S1);
+        m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S1);
+        return m_assembler.jmp(ARMCondition(cond));
+    }
+
+    Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        m_assembler.tst_r(reg, mask);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
+        if (w & ARMAssembler::OP2_INV_IMM)
+            m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
+        else
+            m_assembler.tst_r(reg, w);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+    {
+        load32(address, ARMRegisters::S1);
+        return branchTest32(cond, ARMRegisters::S1, mask);
+    }
+
+    Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+    {
+        load32(address, ARMRegisters::S1);
+        return branchTest32(cond, ARMRegisters::S1, mask);
+    }
+
+    Jump jump()
+    {
+        return Jump(m_assembler.jmp());
+    }
+
+    void jump(RegisterID target)
+    {
+        m_assembler.bx_r(target);
+    }
+
+    void jump(Address address)
+    {
+        load32(address, ARMRegisters::pc);
+    }
+
+    Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        add32(src, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        add32(imm, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
+    {
+        if (src1 == dest) {
+            move(src1, ARMRegisters::S0);
+            src1 = ARMRegisters::S0;
+        }
+        m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1);
+        m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31));
+    }
+
+    Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            mull32(src, dest, dest);
+            cond = NonZero;
+        }
+        else
+            mul32(src, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        if (cond == Overflow) {
+            move(imm, ARMRegisters::S0);
+            mull32(ARMRegisters::S0, src, dest);
+            cond = NonZero;
+        }
+        else
+            mul32(imm, src, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        sub32(src, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        sub32(imm, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+        or32(src, dest);
+        return Jump(m_assembler.jmp(ARMCondition(cond)));
+    }
+
+    void breakpoint()
+    {
+        m_assembler.bkpt(0);
+    }
+
+    Call nearCall()
+    {
+#if WTF_ARM_ARCH_VERSION >= 5
+        Call    call(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true), Call::LinkableNear);
+        m_assembler.blx_r(ARMRegisters::S1);
+        return call;
+#else
+        prepareCall();
+        return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear);
+#endif
+    }
+
+    Call call(RegisterID target)
+    {
+        m_assembler.blx_r(target);
+        JmpSrc jmpSrc;
+        return Call(jmpSrc, Call::None);
+    }
+
+    void call(Address address)
+    {
+        call32(address.base, address.offset);
+    }
+
+    void ret()
+    {
+        m_assembler.bx_r(linkRegister);
+    }
+
+    void set32(Condition cond, Address left, RegisterID right, RegisterID dest)
+    {
+        load32(left, ARMRegisters::S1);
+        set32(cond, ARMRegisters::S1, right, dest);
+    }
+
+    void set32(Condition cond, RegisterID left, Address right, RegisterID dest)
+    {
+        load32(right, ARMRegisters::S1);
+        set32(cond, left, ARMRegisters::S1, dest);
+    }
+
+    void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmp_r(left, right);
+        m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+        m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+    }
+
+    void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+    {
+        m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+        m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+        m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+    }
+
+    void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        // ARM doesn't have byte registers
+        set32(cond, left, right, dest);
+    }
+
+    void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
+    {
+        // ARM doesn't have byte registers
+        load32(left, ARMRegisters::S1);
+        set32(cond, ARMRegisters::S1, right, dest);
+    }
+
+    void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+    {
+        // ARM doesn't have byte registers
+        set32(cond, left, right, dest);
+    }
+
+    void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+    {
+        load32(address, ARMRegisters::S1);
+        if (mask.m_value == -1)
+            m_assembler.cmp_r(0, ARMRegisters::S1);
+        else
+            m_assembler.tst_r(ARMRegisters::S1, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
+        m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+        m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+    }
+
+    void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
+    {
+        // ARM doesn't have byte registers
+        setTest32(cond, address, mask, dest);
+    }
+
+    void add32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+    }
+
+    void lea(Address address, RegisterID dest)
+    {
+        m_assembler.add_r(dest, address.base, m_assembler.getImm(address.offset, ARMRegisters::S0));
+    }
+
+    void lea(BaseIndex address, RegisterID dest)
+    {
+        /* This could be better? */
+        move(address.index, ARMRegisters::S1);
+        if (address.scale != 0)
+            lshift32(Imm32(address.scale), ARMRegisters::S1);
+        if (address.offset)
+            add32(Imm32(address.offset), ARMRegisters::S1);
+        add32(address.base, ARMRegisters::S1);
+        move(ARMRegisters::S1, dest);
+    }
+
+    void add32(Imm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
+        m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
+        add32(imm, ARMRegisters::S1);
+        m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
+        m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
+    }
+
+    void sub32(Imm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
+        m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
+        sub32(imm, ARMRegisters::S1);
+        m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
+        m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
+    }
+
+    void load32(void* address, RegisterID dest)
+    {
+        m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+        m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0);
+    }
+
+    Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load32(left.m_ptr, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+    {
+        load32(left.m_ptr, ARMRegisters::S1);
+        return branch32(cond, ARMRegisters::S1, right);
+    }
+
+    Call call()
+    {
+#if WTF_ARM_ARCH_VERSION >= 5
+        Call    call(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true), Call::Linkable);
+        m_assembler.blx_r(ARMRegisters::S1);
+        return call;
+#else
+        prepareCall();
+        return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable);
+#endif
+    }
+
+    Call tailRecursiveCall()
+    {
+        return Call::fromTailJump(jump());
+    }
+
+    Call makeTailRecursiveCall(Jump oldJump)
+    {
+        return Call::fromTailJump(oldJump);
+    }
+
+    DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
+    {
+        DataLabelPtr dataLabel(this);
+        m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
+        return dataLabel;
+    }
+
+    Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+    {
+        dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
+        Jump jump = branch32(cond, left, ARMRegisters::S1, true);
+        return jump;
+    }
+
+    Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+    {
+        load32(left, ARMRegisters::S1);
+        dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
+        Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
+        return jump;
+    }
+
+    DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+    {
+        DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
+        store32(ARMRegisters::S1, address);
+        return dataLabel;
+    }
+
+    DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+    {
+        return storePtrWithPatch(ImmPtr(0), address);
+    }
+
+    // Floating point operators
+    bool supportsFloatingPoint() const
+    {
+        return s_isVFPPresent;
+    }
+
+    bool supportsFloatingPointTruncate() const
+    {
+        return false;
+    }
+
+    void loadDouble(ImplicitAddress address, FPRegisterID dest)
+    {
+        m_assembler.doubleTransfer(true, dest, address.base, address.offset);
+    }
+
+    void loadDouble(void* address, FPRegisterID dest)
+    {
+        m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address);
+        m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0);
+    }
+
+    void storeDouble(FPRegisterID src, ImplicitAddress address)
+    {
+        m_assembler.doubleTransfer(false, src, address.base, address.offset);
+    }
+
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.faddd_r(dest, dest, src);
+    }
+
+    void addDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, ARMRegisters::SD0);
+        addDouble(ARMRegisters::SD0, dest);
+    }
+
+    void divDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fdivd_r(dest, dest, src);
+    }
+
+    void divDouble(Address src, FPRegisterID dest)
+    {
+        ASSERT_NOT_REACHED(); // Untested
+        loadDouble(src, ARMRegisters::SD0);
+        divDouble(ARMRegisters::SD0, dest);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fsubd_r(dest, dest, src);
+    }
+
+    void subDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, ARMRegisters::SD0);
+        subDouble(ARMRegisters::SD0, dest);
+    }
+
+    void mulDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fmuld_r(dest, dest, src);
+    }
+
+    void mulDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, ARMRegisters::SD0);
+        mulDouble(ARMRegisters::SD0, dest);
+    }
+
+    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.fmsr_r(dest, src);
+        m_assembler.fsitod_r(dest, dest);
+    }
+
+    void convertInt32ToDouble(Address src, FPRegisterID dest)
+    {
+        ASSERT_NOT_REACHED(); // Untested
+        // flds does not worth the effort here
+        load32(src, ARMRegisters::S1);
+        convertInt32ToDouble(ARMRegisters::S1, dest);
+    }
+
+    void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+    {
+        ASSERT_NOT_REACHED(); // Untested
+        // flds does not worth the effort here
+        m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr);
+        m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
+        convertInt32ToDouble(ARMRegisters::S1, dest);
+    }
+
+    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        m_assembler.fcmpd_r(left, right);
+        m_assembler.fmstat();
+        if (cond & DoubleConditionBitSpecial)
+            m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
+        return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
+    }
+
+    // Truncates 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, INT_MIN).
+    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+    {
+        (void)(src);
+        (void)(dest);
+        ASSERT_NOT_REACHED();
+        return jump();
+    }
+
+    // Convert 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, 0).
+    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+    {
+        m_assembler.ftosid_r(ARMRegisters::SD0, src);
+        m_assembler.fmrs_r(dest, ARMRegisters::SD0);
+
+        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+        m_assembler.fsitod_r(ARMRegisters::SD0, ARMRegisters::SD0);
+        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
+
+        // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
+        failureCases.append(branchTest32(Zero, dest));
+    }
+
+    void zeroDouble(FPRegisterID srcDest)
+    {
+        m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
+        convertInt32ToDouble(ARMRegisters::S0, srcDest);
+    }
+
+protected:
+    ARMAssembler::Condition ARMCondition(Condition cond)
+    {
+        return static_cast<ARMAssembler::Condition>(cond);
+    }
+
+    void ensureSpace(int insnSpace, int constSpace)
+    {
+        m_assembler.ensureSpace(insnSpace, constSpace);
+    }
+
+    int sizeOfConstantPool()
+    {
+        return m_assembler.sizeOfConstantPool();
+    }
+
+#if WTF_ARM_ARCH_VERSION < 5
+    void prepareCall()
+    {
+        ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
+
+        m_assembler.mov_r(linkRegister, ARMRegisters::pc);
+    }
+#endif
+
+#if WTF_ARM_ARCH_VERSION < 5
+    void call32(RegisterID base, int32_t offset)
+    {
+        if (base == ARMRegisters::sp)
+            offset += 4;
+
+        if (offset >= 0) {
+            if (offset <= 0xfff) {
+                prepareCall();
+                m_assembler.dtr_u(true, ARMRegisters::pc, base, offset);
+            } else if (offset <= 0xfffff) {
+                m_assembler.add_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+                prepareCall();
+                m_assembler.dtr_u(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff);
+            } else {
+                ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
+                prepareCall();
+                m_assembler.dtr_ur(true, ARMRegisters::pc, base, reg);
+            }
+        } else  {
+            offset = -offset;
+            if (offset <= 0xfff) {
+                prepareCall();
+                m_assembler.dtr_d(true, ARMRegisters::pc, base, offset);
+            } else if (offset <= 0xfffff) {
+                m_assembler.sub_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+                prepareCall();
+                m_assembler.dtr_d(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff);
+            } else {
+                ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
+                prepareCall();
+                m_assembler.dtr_dr(true, ARMRegisters::pc, base, reg);
+            }
+        }
+    }
+#else
+    void call32(RegisterID base, int32_t offset)
+    {
+        // TODO: Why is SP special?
+        if (base == ARMRegisters::sp)
+            offset += 4;
+
+        // Branch to the address stored in base+offset, using one of the
+        // following sequences:
+        // ----
+        //  LDR     ip, [base, ±offset]
+        //  BLX     ip
+        // ----
+        //  ADD/SUB ip, base, #(offset & 0xff000)
+        //  LDR     ip, [ip, #(offset & 0xfff)]
+        //  BLX     ip
+        // ----
+        //  LDR     ip, =offset
+        //  LDR     ip, [base, ±ip]
+        //  BLX     ip
+
+        if (offset >= 0) {
+            if (offset <= 0xfff) {
+                m_assembler.dtr_u(true, ARMRegisters::S0, base, offset);
+            } else if (offset <= 0xfffff) {
+                m_assembler.add_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+                m_assembler.dtr_u(true, ARMRegisters::S0, ARMRegisters::S0, offset & 0xfff);
+            } else {
+                ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
+                m_assembler.dtr_ur(true, ARMRegisters::S0, base, reg);
+            }
+        } else  {
+            offset = -offset;
+            if (offset <= 0xfff) {
+                m_assembler.dtr_d(true, ARMRegisters::S0, base, offset);
+            } else if (offset <= 0xfffff) {
+                m_assembler.sub_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+                m_assembler.dtr_d(true, ARMRegisters::S0, ARMRegisters::S0, offset & 0xfff);
+            } else {
+                ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
+                m_assembler.dtr_dr(true, ARMRegisters::S0, base, reg);
+            }
+        }
+        m_assembler.blx_r(ARMRegisters::S0);
+    }
+#endif
+
+private:
+    friend class LinkBuffer;
+    friend class RepatchBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        ARMAssembler::linkCall(code, call.m_jmp, function.value());
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+    static const bool s_isVFPPresent;
+};
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#endif // MacroAssemblerARM_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/MacroAssemblerARMv7.h
@@ -0,0 +1,1133 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef MacroAssemblerARMv7_h
+#define MacroAssemblerARMv7_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARMv7Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
+    // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
+    //        - dTR is likely used more than aTR, and we'll get better instruction
+    //        encoding if it's in the low 8 registers.
+    static const ARMRegisters::RegisterID dataTempRegister = ARMRegisters::ip;
+    static const RegisterID addressTempRegister = ARMRegisters::r3;
+    static const FPRegisterID fpTempRegister = ARMRegisters::d7;
+    static const unsigned int TotalRegisters = 16;
+
+    struct ArmAddress {
+        enum AddressType {
+            HasOffset,
+            HasIndex,
+        } type;
+        RegisterID base;
+        union {
+            int32_t offset;
+            struct {
+                RegisterID index;
+                Scale scale;
+            };
+        } u;
+        
+        explicit ArmAddress(RegisterID base, int32_t offset = 0)
+            : type(HasOffset)
+            , base(base)
+        {
+            u.offset = offset;
+        }
+        
+        explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
+            : type(HasIndex)
+            , base(base)
+        {
+            u.index = index;
+            u.scale = scale;
+        }
+    };
+    
+public:
+
+    static const Scale ScalePtr = TimesFour;
+
+    enum Condition {
+        Equal = ARMv7Assembler::ConditionEQ,
+        NotEqual = ARMv7Assembler::ConditionNE,
+        Above = ARMv7Assembler::ConditionHI,
+        AboveOrEqual = ARMv7Assembler::ConditionHS,
+        Below = ARMv7Assembler::ConditionLO,
+        BelowOrEqual = ARMv7Assembler::ConditionLS,
+        GreaterThan = ARMv7Assembler::ConditionGT,
+        GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+        LessThan = ARMv7Assembler::ConditionLT,
+        LessThanOrEqual = ARMv7Assembler::ConditionLE,
+        Overflow = ARMv7Assembler::ConditionVS,
+        Signed = ARMv7Assembler::ConditionMI,
+        Zero = ARMv7Assembler::ConditionEQ,
+        NonZero = ARMv7Assembler::ConditionNE
+    };
+    enum DoubleCondition {
+        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        DoubleEqual = ARMv7Assembler::ConditionEQ,
+        DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
+        DoubleGreaterThan = ARMv7Assembler::ConditionGT,
+        DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+        DoubleLessThan = ARMv7Assembler::ConditionLO,
+        DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
+        // If either operand is NaN, these conditions always evaluate to true.
+        DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
+        DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
+        DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
+        DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
+        DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
+        DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
+    };
+
+    static const RegisterID stackPointerRegister = ARMRegisters::sp;
+    static const RegisterID linkRegister = ARMRegisters::lr;
+
+    // Integer arithmetic operations:
+    //
+    // Operations are typically two operand - operation(source, srcDst)
+    // For many operations the source may be an Imm32, the srcDst operand
+    // may often be a memory location (explictly described using an Address
+    // object).
+
+    void add32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.add(dest, dest, src);
+    }
+
+    void add32(Imm32 imm, RegisterID dest)
+    {
+        add32(imm, dest, dest);
+    }
+
+    void add32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.add(dest, src, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.add(dest, src, dataTempRegister);
+        }
+    }
+
+    void add32(Imm32 imm, Address address)
+    {
+        load32(address, dataTempRegister);
+
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+        else {
+            // Hrrrm, since dataTempRegister holds the data loaded,
+            // use addressTempRegister to hold the immediate.
+            move(imm, addressTempRegister);
+            m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+        }
+
+        store32(dataTempRegister, address);
+    }
+
+    void add32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        add32(dataTempRegister, dest);
+    }
+
+    void add32(Imm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, dataTempRegister);
+
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+        else {
+            // Hrrrm, since dataTempRegister holds the data loaded,
+            // use addressTempRegister to hold the immediate.
+            move(imm, addressTempRegister);
+            m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+        }
+
+        store32(dataTempRegister, address.m_ptr);
+    }
+
+    void and32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.ARM_and(dest, dest, src);
+    }
+
+    void and32(Imm32 imm, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.ARM_and(dest, dest, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.ARM_and(dest, dest, dataTempRegister);
+        }
+    }
+
+    void lshift32(RegisterID shift_amount, RegisterID dest)
+    {
+        // Clamp the shift to the range 0..31
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+        ASSERT(armImm.isValid());
+        m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
+
+        m_assembler.lsl(dest, dest, dataTempRegister);
+    }
+
+    void lshift32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.lsl(dest, dest, imm.m_value & 0x1f);
+    }
+
+    void mul32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.smull(dest, dataTempRegister, dest, src);
+    }
+
+    void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        move(imm, dataTempRegister);
+        m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+    }
+
+    void not32(RegisterID srcDest)
+    {
+        m_assembler.mvn(srcDest, srcDest);
+    }
+
+    void or32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.orr(dest, dest, src);
+    }
+
+    void or32(Imm32 imm, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.orr(dest, dest, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.orr(dest, dest, dataTempRegister);
+        }
+    }
+
+    void rshift32(RegisterID shift_amount, RegisterID dest)
+    {
+        // Clamp the shift to the range 0..31
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+        ASSERT(armImm.isValid());
+        m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
+
+        m_assembler.asr(dest, dest, dataTempRegister);
+    }
+
+    void rshift32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.asr(dest, dest, imm.m_value & 0x1f);
+    }
+
+    void sub32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.sub(dest, dest, src);
+    }
+
+    void sub32(Imm32 imm, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.sub(dest, dest, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.sub(dest, dest, dataTempRegister);
+        }
+    }
+
+    void sub32(Imm32 imm, Address address)
+    {
+        load32(address, dataTempRegister);
+
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+        else {
+            // Hrrrm, since dataTempRegister holds the data loaded,
+            // use addressTempRegister to hold the immediate.
+            move(imm, addressTempRegister);
+            m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+        }
+
+        store32(dataTempRegister, address);
+    }
+
+    void sub32(Address src, RegisterID dest)
+    {
+        load32(src, dataTempRegister);
+        sub32(dataTempRegister, dest);
+    }
+
+    void sub32(Imm32 imm, AbsoluteAddress address)
+    {
+        load32(address.m_ptr, dataTempRegister);
+
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+        else {
+            // Hrrrm, since dataTempRegister holds the data loaded,
+            // use addressTempRegister to hold the immediate.
+            move(imm, addressTempRegister);
+            m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+        }
+
+        store32(dataTempRegister, address.m_ptr);
+    }
+
+    void xor32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.eor(dest, dest, src);
+    }
+
+    void xor32(Imm32 imm, RegisterID dest)
+    {
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.eor(dest, dest, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.eor(dest, dest, dataTempRegister);
+        }
+    }
+    
+
+    // Memory access operations:
+    //
+    // Loads are of the form load(address, destination) and stores of the form
+    // store(source, address).  The source for a store may be an Imm32.  Address
+    // operand objects to loads and store will be implicitly constructed if a
+    // register is passed.
+
+private:
+    void load32(ArmAddress address, RegisterID dest)
+    {
+        if (address.type == ArmAddress::HasIndex)
+            m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
+        else if (address.u.offset >= 0) {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+            ASSERT(armImm.isValid());
+            m_assembler.ldr(dest, address.base, armImm);
+        } else {
+            ASSERT(address.u.offset >= -255);
+            m_assembler.ldr(dest, address.base, address.u.offset, true, false);
+        }
+    }
+
+    void load16(ArmAddress address, RegisterID dest)
+    {
+        if (address.type == ArmAddress::HasIndex)
+            m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
+        else if (address.u.offset >= 0) {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+            ASSERT(armImm.isValid());
+            m_assembler.ldrh(dest, address.base, armImm);
+        } else {
+            ASSERT(address.u.offset >= -255);
+            m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
+        }
+    }
+
+    void store32(RegisterID src, ArmAddress address)
+    {
+        if (address.type == ArmAddress::HasIndex)
+            m_assembler.str(src, address.base, address.u.index, address.u.scale);
+        else if (address.u.offset >= 0) {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+            ASSERT(armImm.isValid());
+            m_assembler.str(src, address.base, armImm);
+        } else {
+            ASSERT(address.u.offset >= -255);
+            m_assembler.str(src, address.base, address.u.offset, true, false);
+        }
+    }
+
+public:
+    void load32(ImplicitAddress address, RegisterID dest)
+    {
+        load32(setupArmAddress(address), dest);
+    }
+
+    void load32(BaseIndex address, RegisterID dest)
+    {
+        load32(setupArmAddress(address), dest);
+    }
+
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+    {
+        load32(setupArmAddress(address), dest);
+    }
+
+    void load32(void* address, RegisterID dest)
+    {
+        move(ImmPtr(address), addressTempRegister);
+        m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+    }
+
+    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
+        load32(ArmAddress(address.base, dataTempRegister), dest);
+        return label;
+    }
+
+    Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+    {
+        Label label(this);
+        moveFixedWidthEncoding(Imm32(address.offset), dataTempRegister);
+        load32(ArmAddress(address.base, dataTempRegister), dest);
+        return label;
+    }
+
+    void load16(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
+    }
+
+    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
+        store32(src, ArmAddress(address.base, dataTempRegister));
+        return label;
+    }
+
+    void store32(RegisterID src, ImplicitAddress address)
+    {
+        store32(src, setupArmAddress(address));
+    }
+
+    void store32(RegisterID src, BaseIndex address)
+    {
+        store32(src, setupArmAddress(address));
+    }
+
+    void store32(Imm32 imm, ImplicitAddress address)
+    {
+        move(imm, dataTempRegister);
+        store32(dataTempRegister, setupArmAddress(address));
+    }
+
+    void store32(RegisterID src, void* address)
+    {
+        move(ImmPtr(address), addressTempRegister);
+        m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+    }
+
+    void store32(Imm32 imm, void* address)
+    {
+        move(imm, dataTempRegister);
+        store32(dataTempRegister, address);
+    }
+
+
+    // Floating-point operations:
+
+    bool supportsFloatingPoint() const { return true; }
+    // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer.
+    // If a value is not representable as an integer, and possibly for some values that are,
+    // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input)
+    // a branch will  be taken.  It is not clear whether this interface will be well suited to
+    // other platforms.  On ARMv7 the hardware truncation operation produces multiple possible
+    // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0).  This is a
+    // temporary solution while we work out what this interface should be.  Either we need to
+    // decide to make this interface work on all platforms, rework the interface to make it more
+    // generic, or decide that the MacroAssembler cannot practically be used to abstracted these
+    // operations, and make clients go directly to the m_assembler to plant truncation instructions.
+    // In short, FIXME:.
+    bool supportsFloatingPointTruncate() const { return false; }
+
+    void loadDouble(ImplicitAddress address, FPRegisterID dest)
+    {
+        RegisterID base = address.base;
+        int32_t offset = address.offset;
+
+        // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+        if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+            add32(Imm32(offset), base, addressTempRegister);
+            base = addressTempRegister;
+            offset = 0;
+        }
+        
+        m_assembler.vldr(dest, base, offset);
+    }
+
+    void storeDouble(FPRegisterID src, ImplicitAddress address)
+    {
+        RegisterID base = address.base;
+        int32_t offset = address.offset;
+
+        // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+        if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+            add32(Imm32(offset), base, addressTempRegister);
+            base = addressTempRegister;
+            offset = 0;
+        }
+        
+        m_assembler.vstr(src, base, offset);
+    }
+
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vadd_F64(dest, dest, src);
+    }
+
+    void addDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        addDouble(fpTempRegister, dest);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vsub_F64(dest, dest, src);
+    }
+
+    void subDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        subDouble(fpTempRegister, dest);
+    }
+
+    void mulDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vmul_F64(dest, dest, src);
+    }
+
+    void mulDouble(Address src, FPRegisterID dest)
+    {
+        loadDouble(src, fpTempRegister);
+        mulDouble(fpTempRegister, dest);
+    }
+
+    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        m_assembler.vmov(fpTempRegister, src);
+        m_assembler.vcvt_F64_S32(dest, fpTempRegister);
+    }
+
+    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        m_assembler.vcmp_F64(left, right);
+        m_assembler.vmrs_APSR_nzcv_FPSCR();
+
+        if (cond == DoubleNotEqual) {
+            // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+            Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+            Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+            unordered.link(this);
+            return result;
+        }
+        if (cond == DoubleEqualOrUnordered) {
+            Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+            Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+            unordered.link(this);
+            // We get here if either unordered, or equal.
+            Jump result = makeJump();
+            notEqual.link(this);
+            return result;
+        }
+        return makeBranch(cond);
+    }
+
+    Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID)
+    {
+        ASSERT_NOT_REACHED();
+        return jump();
+    }
+
+
+    // Stack manipulation operations:
+    //
+    // The ABI is assumed to provide a stack abstraction to memory,
+    // containing machine word sized units of data.  Push and pop
+    // operations add and remove a single register sized unit of data
+    // to or from the stack.  Peek and poke operations read or write
+    // values on the stack, without moving the current stack position.
+    
+    void pop(RegisterID dest)
+    {
+        // store postindexed with writeback
+        m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
+    }
+
+    void push(RegisterID src)
+    {
+        // store preindexed with writeback
+        m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+    }
+
+    void push(Address address)
+    {
+        load32(address, dataTempRegister);
+        push(dataTempRegister);
+    }
+
+    void push(Imm32 imm)
+    {
+        move(imm, dataTempRegister);
+        push(dataTempRegister);
+    }
+
+    // Register move operations:
+    //
+    // Move values in registers.
+
+    void move(Imm32 imm, RegisterID dest)
+    {
+        uint32_t value = imm.m_value;
+
+        if (imm.m_isPointer)
+            moveFixedWidthEncoding(imm, dest);
+        else {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
+
+            if (armImm.isValid())
+                m_assembler.mov(dest, armImm);
+            else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
+                m_assembler.mvn(dest, armImm);
+            else {
+                m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
+                if (value & 0xffff0000)
+                    m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
+            }
+        }
+    }
+
+    void move(RegisterID src, RegisterID dest)
+    {
+        m_assembler.mov(dest, src);
+    }
+
+    void move(ImmPtr imm, RegisterID dest)
+    {
+        move(Imm32(imm), dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        move(reg1, dataTempRegister);
+        move(reg2, reg1);
+        move(dataTempRegister, reg2);
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            move(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            move(src, dest);
+    }
+
+
+    // Forwards / external control flow operations:
+    //
+    // This set of jump and conditional branch operations return a Jump
+    // object which may linked at a later point, allow forwards jump,
+    // or jumps that will require external linkage (after the code has been
+    // relocated).
+    //
+    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+    // used (representing the names 'below' and 'above').
+    //
+    // Operands to the comparision are provided in the expected order, e.g.
+    // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
+    // treated as a signed 32bit value, is less than or equal to 5.
+    //
+    // jz and jnz test whether the first operand is equal to zero, and take
+    // an optional second operand of a mask under which to perform the test.
+private:
+
+    // Should we be using TEQ for equal/not-equal?
+    void compare32(RegisterID left, Imm32 right)
+    {
+        int32_t imm = right.m_value;
+        if (!imm)
+            m_assembler.tst(left, left);
+        else {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+            if (armImm.isValid())
+                m_assembler.cmp(left, armImm);
+            if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+                m_assembler.cmn(left, armImm);
+            else {
+                move(Imm32(imm), dataTempRegister);
+                m_assembler.cmp(left, dataTempRegister);
+            }
+        }
+    }
+
+    void test32(RegisterID reg, Imm32 mask)
+    {
+        int32_t imm = mask.m_value;
+
+        if (imm == -1)
+            m_assembler.tst(reg, reg);
+        else {
+            ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+            if (armImm.isValid())
+                m_assembler.tst(reg, armImm);
+            else {
+                move(mask, dataTempRegister);
+                m_assembler.tst(reg, dataTempRegister);
+            }
+        }
+    }
+
+public:
+    Jump branch32(Condition cond, RegisterID left, RegisterID right)
+    {
+        m_assembler.cmp(left, right);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch32(Condition cond, RegisterID left, Imm32 right)
+    {
+        compare32(left, right);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branch32(Condition cond, RegisterID left, Address right)
+    {
+        load32(right, dataTempRegister);
+        return branch32(cond, left, dataTempRegister);
+    }
+
+    Jump branch32(Condition cond, Address left, RegisterID right)
+    {
+        load32(left, dataTempRegister);
+        return branch32(cond, dataTempRegister, right);
+    }
+
+    Jump branch32(Condition cond, Address left, Imm32 right)
+    {
+        // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+        load32(left, addressTempRegister);
+        return branch32(cond, addressTempRegister, right);
+    }
+
+    Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+    {
+        // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+        load32(left, addressTempRegister);
+        return branch32(cond, addressTempRegister, right);
+    }
+
+    Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
+    {
+        // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+        load32WithUnalignedHalfWords(left, addressTempRegister);
+        return branch32(cond, addressTempRegister, right);
+    }
+
+    Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+    {
+        load32(left.m_ptr, dataTempRegister);
+        return branch32(cond, dataTempRegister, right);
+    }
+
+    Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+    {
+        // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+        load32(left.m_ptr, addressTempRegister);
+        return branch32(cond, addressTempRegister, right);
+    }
+
+    Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+    {
+        load16(left, dataTempRegister);
+        m_assembler.lsl(addressTempRegister, right, 16);
+        m_assembler.lsl(dataTempRegister, dataTempRegister, 16);
+        return branch32(cond, dataTempRegister, addressTempRegister);
+    }
+
+    Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+    {
+        // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+        load16(left, addressTempRegister);
+        m_assembler.lsl(addressTempRegister, addressTempRegister, 16);
+        return branch32(cond, addressTempRegister, Imm32(right.m_value << 16));
+    }
+
+    Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        m_assembler.tst(reg, mask);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        test32(reg, mask);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+        load32(address, addressTempRegister);
+        return branchTest32(cond, addressTempRegister, mask);
+    }
+
+    Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+        load32(address, addressTempRegister);
+        return branchTest32(cond, addressTempRegister, mask);
+    }
+
+    Jump jump()
+    {
+        return Jump(makeJump());
+    }
+
+    void jump(RegisterID target)
+    {
+        m_assembler.bx(target);
+    }
+
+    // Address is a memory location containing the address to jump to
+    void jump(Address address)
+    {
+        load32(address, dataTempRegister);
+        m_assembler.bx(dataTempRegister);
+    }
+
+
+    // Arithmetic control flow operations:
+    //
+    // This set of conditional branch operations branch based
+    // on the result of an arithmetic operation.  The operation
+    // is performed as normal, storing the result.
+    //
+    // * jz operations branch if the result is zero.
+    // * jo operations branch if the (signed) arithmetic
+    //   operation caused an overflow to occur.
+    
+    Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        m_assembler.add_S(dest, dest, src);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.add_S(dest, dest, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.add_S(dest, dest, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT(cond == Overflow);
+        m_assembler.smull(dest, dataTempRegister, dest, src);
+        m_assembler.asr(addressTempRegister, dest, 31);
+        return branch32(NotEqual, addressTempRegister, dataTempRegister);
+    }
+
+    Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        ASSERT(cond == Overflow);
+        move(imm, dataTempRegister);
+        m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+        m_assembler.asr(addressTempRegister, dest, 31);
+        return branch32(NotEqual, addressTempRegister, dataTempRegister);
+    }
+
+    Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        m_assembler.sub_S(dest, dest, src);
+        return Jump(makeBranch(cond));
+    }
+
+    Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+        if (armImm.isValid())
+            m_assembler.sub_S(dest, dest, armImm);
+        else {
+            move(imm, dataTempRegister);
+            m_assembler.sub_S(dest, dest, dataTempRegister);
+        }
+        return Jump(makeBranch(cond));
+    }
+    
+
+    // Miscellaneous operations:
+
+    void breakpoint()
+    {
+        m_assembler.bkpt();
+    }
+
+    Call nearCall()
+    {
+        moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+        return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+    }
+
+    Call call()
+    {
+        moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+        return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+    }
+
+    Call call(RegisterID target)
+    {
+        return Call(m_assembler.blx(target), Call::None);
+    }
+
+    Call call(Address address)
+    {
+        load32(address, dataTempRegister);
+        return Call(m_assembler.blx(dataTempRegister), Call::None);
+    }
+
+    void ret()
+    {
+        m_assembler.bx(linkRegister);
+    }
+
+    void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmp(left, right);
+        m_assembler.it(armV7Condition(cond), false);
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+    }
+
+    void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+    {
+        compare32(left, right);
+        m_assembler.it(armV7Condition(cond), false);
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+    }
+
+    // FIXME:
+    // The mask should be optional... paerhaps the argument order should be
+    // dest-src, operations always have a dest? ... possibly not true, considering
+    // asm ops like test, or pseudo ops like pop().
+    void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+    {
+        load32(address, dataTempRegister);
+        test32(dataTempRegister, mask);
+        m_assembler.it(armV7Condition(cond), false);
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+        m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+    }
+
+
+    DataLabel32 moveWithPatch(Imm32 imm, RegisterID dst)
+    {
+        moveFixedWidthEncoding(imm, dst);
+        return DataLabel32(this);
+    }
+
+    DataLabelPtr moveWithPatch(ImmPtr imm, RegisterID dst)
+    {
+        moveFixedWidthEncoding(Imm32(imm), dst);
+        return DataLabelPtr(this);
+    }
+
+    Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+    {
+        dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+        return branch32(cond, left, dataTempRegister);
+    }
+
+    Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+    {
+        load32(left, addressTempRegister);
+        dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+        return branch32(cond, addressTempRegister, dataTempRegister);
+    }
+
+    DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+    {
+        DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
+        store32(dataTempRegister, address);
+        return label;
+    }
+    DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(ImmPtr(0), address); }
+
+
+    Call tailRecursiveCall()
+    {
+        // Like a normal call, but don't link.
+        moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+        return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+    }
+
+    Call makeTailRecursiveCall(Jump oldJump)
+    {
+        oldJump.link(this);
+        return tailRecursiveCall();
+    }
+
+
+protected:
+    ARMv7Assembler::JmpSrc makeJump()
+    {
+        moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+        return m_assembler.bx(dataTempRegister);
+    }
+
+    ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
+    {
+        m_assembler.it(cond, true, true);
+        moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+        return m_assembler.bx(dataTempRegister);
+    }
+    ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
+    ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
+
+    ArmAddress setupArmAddress(BaseIndex address)
+    {
+        if (address.offset) {
+            ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+            if (imm.isValid())
+                m_assembler.add(addressTempRegister, address.base, imm);
+            else {
+                move(Imm32(address.offset), addressTempRegister);
+                m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+            }
+
+            return ArmAddress(addressTempRegister, address.index, address.scale);
+        } else
+            return ArmAddress(address.base, address.index, address.scale);
+    }
+
+    ArmAddress setupArmAddress(Address address)
+    {
+        if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+            return ArmAddress(address.base, address.offset);
+
+        move(Imm32(address.offset), addressTempRegister);
+        return ArmAddress(address.base, addressTempRegister);
+    }
+
+    ArmAddress setupArmAddress(ImplicitAddress address)
+    {
+        if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+            return ArmAddress(address.base, address.offset);
+
+        move(Imm32(address.offset), addressTempRegister);
+        return ArmAddress(address.base, addressTempRegister);
+    }
+
+    RegisterID makeBaseIndexBase(BaseIndex address)
+    {
+        if (!address.offset)
+            return address.base;
+
+        ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+        if (imm.isValid())
+            m_assembler.add(addressTempRegister, address.base, imm);
+        else {
+            move(Imm32(address.offset), addressTempRegister);
+            m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+        }
+
+        return addressTempRegister;
+    }
+
+    void moveFixedWidthEncoding(Imm32 imm, RegisterID dst)
+    {
+        uint32_t value = imm.m_value;
+        m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
+        m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
+    }
+
+    ARMv7Assembler::Condition armV7Condition(Condition cond)
+    {
+        return static_cast<ARMv7Assembler::Condition>(cond);
+    }
+
+    ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
+    {
+        return static_cast<ARMv7Assembler::Condition>(cond);
+    }
+
+private:
+    friend class LinkBuffer;
+    friend class RepatchBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        ARMv7Assembler::linkCall(code, call.m_jmp, function.value());
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerARMv7_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/MacroAssemblerCodeRef.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef MacroAssemblerCodeRef_h
+#define MacroAssemblerCodeRef_h
+
+#include <wtf/Platform.h>
+#include <jit/ExecutableAllocator.h>
+
+#if ENABLE_ASSEMBLER
+
+// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
+// instruction address on the platform (for example, check any alignment requirements).
+#if WTF_CPU_ARM_THUMB2
+// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
+// into the processor are decorated with the bottom bit set, indicating that this is
+// thumb code (as oposed to 32-bit traditional ARM).  The first test checks for both
+// decorated and undectorated null, and the second test ensures that the pointer is
+// decorated.
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+    ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
+    ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
+#define ASSERT_VALID_CODE_OFFSET(offset) \
+    ASSERT(!(offset & 1)) // Must be multiple of 2.
+#else
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+    ASSERT(ptr)
+#define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
+#endif
+
+namespace JSC {
+
+// FunctionPtr:
+//
+// FunctionPtr should be used to wrap pointers to C/C++ functions in JSC
+// (particularly, the stub functions).
+class FunctionPtr {
+public:
+    FunctionPtr()
+        : m_value(0)
+    {
+    }
+
+    template<typename FunctionType>
+    explicit FunctionPtr(FunctionType* value)
+#if WTF_COMPILER_RVCT
+     // RVTC compiler needs C-style cast as it fails with the following error
+     // Error:  #694: reinterpret_cast cannot cast away const or other type qualifiers
+        : m_value((void*)(value))
+#else
+        : m_value(reinterpret_cast<void*>(value))
+#endif
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    void* value() const { return m_value; }
+    void* executableAddress() const { return m_value; }
+
+
+private:
+    void* m_value;
+};
+
+// ReturnAddressPtr:
+//
+// ReturnAddressPtr should be used to wrap return addresses generated by processor
+// 'call' instructions exectued in JIT code.  We use return addresses to look up
+// exception and optimization information, and to repatch the call instruction
+// that is the source of the return address.
+class ReturnAddressPtr {
+public:
+    ReturnAddressPtr()
+        : m_value(0)
+    {
+    }
+
+    explicit ReturnAddressPtr(void* value)
+        : m_value(value)
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    explicit ReturnAddressPtr(FunctionPtr function)
+        : m_value(function.value())
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    void* value() const { return m_value; }
+
+private:
+    void* m_value;
+};
+
+// MacroAssemblerCodePtr:
+//
+// MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code.
+class MacroAssemblerCodePtr {
+public:
+    MacroAssemblerCodePtr()
+        : m_value(0)
+    {
+    }
+
+    explicit MacroAssemblerCodePtr(void* value)
+#if WTF_CPU_ARM_THUMB2
+        // Decorate the pointer as a thumb code pointer.
+        : m_value(reinterpret_cast<char*>(value) + 1)
+#else
+        : m_value(value)
+#endif
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
+        : m_value(ra.value())
+    {
+        ASSERT_VALID_CODE_POINTER(m_value);
+    }
+
+    void* executableAddress() const { return m_value; }
+#if WTF_CPU_ARM_THUMB2
+    // To use this pointer as a data address remove the decoration.
+    void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
+#else
+    void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
+#endif
+
+    bool operator!()
+    {
+        return !m_value;
+    }
+
+private:
+    void* m_value;
+};
+
+// MacroAssemblerCodeRef:
+//
+// A reference to a section of JIT generated code.  A CodeRef consists of a
+// pointer to the code, and a ref pointer to the pool from within which it
+// was allocated.
+class MacroAssemblerCodeRef {
+public:
+    MacroAssemblerCodeRef()
+        : m_size(0)
+    {
+    }
+
+  //MacroAssemblerCodeRef(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
+    MacroAssemblerCodeRef(void* code, ExecutablePool* executablePool, size_t size)
+        : m_code(code)
+        , m_executablePool(executablePool)
+        , m_size(size)
+    {
+    }
+
+    MacroAssemblerCodePtr m_code;
+    //RefPtr<ExecutablePool> m_executablePool;
+    ExecutablePool* m_executablePool;
+    size_t m_size;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerCodeRef_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/MacroAssemblerX86.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef MacroAssemblerX86_h
+#define MacroAssemblerX86_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE_ASSEMBLER && WTF_CPU_X86
+
+#include "MacroAssemblerX86Common.h"
+
+namespace JSC {
+
+class MacroAssemblerX86 : public MacroAssemblerX86Common {
+public:
+    MacroAssemblerX86()
+        : m_isSSE2Present(isSSE2Present())
+    {
+    }
+
+    static const Scale ScalePtr = TimesFour;
+    static const unsigned int TotalRegisters = 8;
+
+    using MacroAssemblerX86Common::add32;
+    using MacroAssemblerX86Common::and32;
+    using MacroAssemblerX86Common::sub32;
+    using MacroAssemblerX86Common::or32;
+    using MacroAssemblerX86Common::load32;
+    using MacroAssemblerX86Common::store32;
+    using MacroAssemblerX86Common::branch32;
+    using MacroAssemblerX86Common::call;
+    using MacroAssemblerX86Common::loadDouble;
+    using MacroAssemblerX86Common::convertInt32ToDouble;
+
+    void add32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        m_assembler.leal_mr(imm.m_value, src, dest);
+    }
+
+    void lea(Address address, RegisterID dest)
+    {
+        m_assembler.leal_mr(address.offset, address.base, dest);
+    }
+
+    void lea(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.leal_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+
+    void add32(Imm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.addl_im(imm.m_value, address.m_ptr);
+    }
+    
+    void addWithCarry32(Imm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.adcl_im(imm.m_value, address.m_ptr);
+    }
+    
+    void and32(Imm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.andl_im(imm.m_value, address.m_ptr);
+    }
+    
+    void or32(Imm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.orl_im(imm.m_value, address.m_ptr);
+    }
+
+    void sub32(Imm32 imm, AbsoluteAddress address)
+    {
+        m_assembler.subl_im(imm.m_value, address.m_ptr);
+    }
+
+    void load32(void* address, RegisterID dest)
+    {
+        m_assembler.movl_mr(address, dest);
+    }
+
+    void loadDouble(void* address, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movsd_mr(address, dest);
+    }
+
+    void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+    {
+        m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
+    }
+
+    void store32(Imm32 imm, void* address)
+    {
+        m_assembler.movl_i32m(imm.m_value, address);
+    }
+
+    void store32(RegisterID src, void* address)
+    {
+        m_assembler.movl_rm(src, address);
+    }
+
+    Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+    {
+        m_assembler.cmpl_rm(right, left.m_ptr);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+    {
+        m_assembler.cmpl_im(right.m_value, left.m_ptr);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Call call()
+    {
+        return Call(m_assembler.call(), Call::Linkable);
+    }
+
+    Call tailRecursiveCall()
+    {
+        return Call::fromTailJump(jump());
+    }
+
+    Call makeTailRecursiveCall(Jump oldJump)
+    {
+        return Call::fromTailJump(oldJump);
+    }
+
+
+    DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
+    {
+        m_assembler.movl_i32r(initialValue.asIntptr(), dest);
+        return DataLabelPtr(this);
+    }
+
+    Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+    {
+        m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
+        dataLabel = DataLabelPtr(this);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+    {
+        m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
+        dataLabel = DataLabelPtr(this);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+    {
+        m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
+        return DataLabelPtr(this);
+    }
+
+    Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+    {
+        Label label(this);
+        load32(address, dest);
+        return label;
+    }
+
+    bool supportsFloatingPoint() const { return m_isSSE2Present; }
+    // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+    bool supportsFloatingPointTruncate() const { return m_isSSE2Present; }
+
+private:
+    const bool m_isSSE2Present;
+
+    friend class LinkBuffer;
+    friend class RepatchBuffer;
+
+    static void linkCall(void* code, Call call, FunctionPtr function)
+    {
+        X86Assembler::linkCall(code, call.m_jmp, function.value());
+    }
+
+    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+    {
+        X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+
+    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+    {
+        X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+    }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/MacroAssemblerX86Common.h
@@ -0,0 +1,1055 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef MacroAssemblerX86Common_h
+#define MacroAssemblerX86Common_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE_ASSEMBLER
+
+#include "X86Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
+    static const int DoubleConditionBitInvert = 0x10;
+    static const int DoubleConditionBitSpecial = 0x20;
+    static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
+
+public:
+
+    enum Condition {
+        Equal = X86Assembler::ConditionE,
+        NotEqual = X86Assembler::ConditionNE,
+        Above = X86Assembler::ConditionA,
+        AboveOrEqual = X86Assembler::ConditionAE,
+        Below = X86Assembler::ConditionB,
+        BelowOrEqual = X86Assembler::ConditionBE,
+        GreaterThan = X86Assembler::ConditionG,
+        GreaterThanOrEqual = X86Assembler::ConditionGE,
+        LessThan = X86Assembler::ConditionL,
+        LessThanOrEqual = X86Assembler::ConditionLE,
+        Overflow = X86Assembler::ConditionO,
+        Signed = X86Assembler::ConditionS,
+        Zero = X86Assembler::ConditionE,
+        NonZero = X86Assembler::ConditionNE
+    };
+
+    enum DoubleCondition {
+        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
+        DoubleNotEqual = X86Assembler::ConditionNE,
+        DoubleGreaterThan = X86Assembler::ConditionA,
+        DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
+        DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
+        DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
+        // If either operand is NaN, these conditions always evaluate to true.
+        DoubleEqualOrUnordered = X86Assembler::ConditionE,
+        DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
+        DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
+        DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
+        DoubleLessThanOrUnordered = X86Assembler::ConditionB,
+        DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE
+    };
+    COMPILE_ASSERT(
+        !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
+        DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
+
+    static const RegisterID stackPointerRegister = X86Registers::esp;
+
+    static inline bool CanUse8Bit(RegisterID reg) {
+        return !!((1 << reg) & ~((1 << X86Registers::esp) |
+                                 (1 << X86Registers::edi) |
+                                 (1 << X86Registers::esi) |
+                                 (1 << X86Registers::ebp)));
+    }
+
+    // Integer arithmetic operations:
+    //
+    // Operations are typically two operand - operation(source, srcDst)
+    // For many operations the source may be an Imm32, the srcDst operand
+    // may often be a memory location (explictly described using an Address
+    // object).
+
+    void add32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.addl_rr(src, dest);
+    }
+
+    void add32(Imm32 imm, Address address)
+    {
+        m_assembler.addl_im(imm.m_value, address.offset, address.base);
+    }
+
+    void add32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.addl_ir(imm.m_value, dest);
+    }
+    
+    void add32(Address src, RegisterID dest)
+    {
+        m_assembler.addl_mr(src.offset, src.base, dest);
+    }
+
+    void add32(RegisterID src, Address dest)
+    {
+        m_assembler.addl_rm(src, dest.offset, dest.base);
+    }
+    
+    void and32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.andl_rr(src, dest);
+    }
+
+    void and32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.andl_ir(imm.m_value, dest);
+    }
+
+    void and32(RegisterID src, Address dest)
+    {
+        m_assembler.andl_rm(src, dest.offset, dest.base);
+    }
+
+    void and32(Address src, RegisterID dest)
+    {
+        m_assembler.andl_mr(src.offset, src.base, dest);
+    }
+
+    void and32(Imm32 imm, Address address)
+    {
+        m_assembler.andl_im(imm.m_value, address.offset, address.base);
+    }
+
+    void lshift32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.shll_i8r(imm.m_value, dest);
+    }
+    
+    void lshift32(RegisterID shift_amount, RegisterID dest)
+    {
+        // On x86 we can only shift by ecx; if asked to shift by another register we'll
+        // need rejig the shift amount into ecx first, and restore the registers afterwards.
+        if (shift_amount != X86Registers::ecx) {
+            swap(shift_amount, X86Registers::ecx);
+
+            // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
+            if (dest == shift_amount)
+                m_assembler.shll_CLr(X86Registers::ecx);
+            // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
+            else if (dest == X86Registers::ecx)
+                m_assembler.shll_CLr(shift_amount);
+            // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
+            else
+                m_assembler.shll_CLr(dest);
+        
+            swap(shift_amount, X86Registers::ecx);
+        } else
+            m_assembler.shll_CLr(dest);
+    }
+    
+    void mul32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.imull_rr(src, dest);
+    }
+
+    void mul32(Address src, RegisterID dest)
+    {
+        m_assembler.imull_mr(src.offset, src.base, dest);
+    }
+    
+    void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        m_assembler.imull_i32r(src, imm.m_value, dest);
+    }
+
+    void neg32(RegisterID srcDest)
+    {
+        m_assembler.negl_r(srcDest);
+    }
+
+    void neg32(Address srcDest)
+    {
+        m_assembler.negl_m(srcDest.offset, srcDest.base);
+    }
+
+    void not32(RegisterID srcDest)
+    {
+        m_assembler.notl_r(srcDest);
+    }
+
+    void not32(Address srcDest)
+    {
+        m_assembler.notl_m(srcDest.offset, srcDest.base);
+    }
+    
+    void or32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.orl_rr(src, dest);
+    }
+
+    void or32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.orl_ir(imm.m_value, dest);
+    }
+
+    void or32(RegisterID src, Address dest)
+    {
+        m_assembler.orl_rm(src, dest.offset, dest.base);
+    }
+
+    void or32(Address src, RegisterID dest)
+    {
+        m_assembler.orl_mr(src.offset, src.base, dest);
+    }
+
+    void or32(Imm32 imm, Address address)
+    {
+        m_assembler.orl_im(imm.m_value, address.offset, address.base);
+    }
+
+    void rshift32(RegisterID shift_amount, RegisterID dest)
+    {
+        // On x86 we can only shift by ecx; if asked to shift by another register we'll
+        // need rejig the shift amount into ecx first, and restore the registers afterwards.
+        if (shift_amount != X86Registers::ecx) {
+            swap(shift_amount, X86Registers::ecx);
+
+            // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
+            if (dest == shift_amount)
+                m_assembler.sarl_CLr(X86Registers::ecx);
+            // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
+            else if (dest == X86Registers::ecx)
+                m_assembler.sarl_CLr(shift_amount);
+            // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
+            else
+                m_assembler.sarl_CLr(dest);
+        
+            swap(shift_amount, X86Registers::ecx);
+        } else
+            m_assembler.sarl_CLr(dest);
+    }
+
+    void rshift32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.sarl_i8r(imm.m_value, dest);
+    }
+
+    void sub32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.subl_rr(src, dest);
+    }
+    
+    void sub32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.subl_ir(imm.m_value, dest);
+    }
+    
+    void sub32(Imm32 imm, Address address)
+    {
+        m_assembler.subl_im(imm.m_value, address.offset, address.base);
+    }
+
+    void sub32(Address src, RegisterID dest)
+    {
+        m_assembler.subl_mr(src.offset, src.base, dest);
+    }
+
+    void sub32(RegisterID src, Address dest)
+    {
+        m_assembler.subl_rm(src, dest.offset, dest.base);
+    }
+
+
+    void xor32(RegisterID src, RegisterID dest)
+    {
+        m_assembler.xorl_rr(src, dest);
+    }
+
+    void xor32(Imm32 imm, Address dest)
+    {
+        m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
+    }
+
+    void xor32(Imm32 imm, RegisterID dest)
+    {
+        m_assembler.xorl_ir(imm.m_value, dest);
+    }
+
+    void xor32(RegisterID src, Address dest)
+    {
+        m_assembler.xorl_rm(src, dest.offset, dest.base);
+    }
+
+    void xor32(Address src, RegisterID dest)
+    {
+        m_assembler.xorl_mr(src.offset, src.base, dest);
+    }
+    
+
+    // Memory access operations:
+    //
+    // Loads are of the form load(address, destination) and stores of the form
+    // store(source, address).  The source for a store may be an Imm32.  Address
+    // operand objects to loads and store will be implicitly constructed if a
+    // register is passed.
+
+    void load32(ImplicitAddress address, RegisterID dest)
+    {
+        m_assembler.movl_mr(address.offset, address.base, dest);
+    }
+
+    void load32(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+
+    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
+
+    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        m_assembler.movl_mr_disp32(address.offset, address.base, dest);
+        return DataLabel32(this);
+    }
+
+    void load16(Address address, RegisterID dest)
+    {
+        m_assembler.movzwl_mr(address.offset, address.base, dest);
+    }
+
+    void load16(BaseIndex address, RegisterID dest)
+    {
+        m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
+    }
+
+    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+    {
+        m_assembler.movl_rm_disp32(src, address.offset, address.base);
+        return DataLabel32(this);
+    }
+
+    void store32(RegisterID src, ImplicitAddress address)
+    {
+        m_assembler.movl_rm(src, address.offset, address.base);
+    }
+
+    void store32(Imm32 imm, BaseIndex address)
+    {
+        m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
+    }
+
+    void store32(RegisterID src, BaseIndex address)
+    {
+        m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
+    }
+
+    void store32(Imm32 imm, ImplicitAddress address)
+    {
+        m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
+    }
+
+
+    // Floating-point operation:
+    //
+    // Presently only supports SSE, not x87 floating point.
+
+    void loadDouble(ImplicitAddress address, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movsd_mr(address.offset, address.base, dest);
+    }
+
+    void storeDouble(FPRegisterID src, ImplicitAddress address)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.movsd_rm(src, address.offset, address.base);
+    }
+
+    void addDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.addsd_rr(src, dest);
+    }
+
+    void addDouble(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.addsd_mr(src.offset, src.base, dest);
+    }
+
+    void divDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.divsd_rr(src, dest);
+    }
+
+    void divDouble(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.divsd_mr(src.offset, src.base, dest);
+    }
+
+    void subDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.subsd_rr(src, dest);
+    }
+
+    void subDouble(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.subsd_mr(src.offset, src.base, dest);
+    }
+
+    void mulDouble(FPRegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.mulsd_rr(src, dest);
+    }
+
+    void mulDouble(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.mulsd_mr(src.offset, src.base, dest);
+    }
+
+    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsi2sd_rr(src, dest);
+    }
+
+    void convertInt32ToDouble(Address src, FPRegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
+    }
+
+    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+
+        if (cond == DoubleEqual) {
+            Jump isUnordered(m_assembler.jp());
+            Jump result = Jump(m_assembler.je());
+            isUnordered.link(this);
+            return result;
+        } else if (cond == DoubleNotEqualOrUnordered) {
+            Jump isUnordered(m_assembler.jp());
+            Jump isEqual(m_assembler.je());
+            isUnordered.link(this);
+            Jump result = jump();
+            isEqual.link(this);
+            return result;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
+    }
+
+    // Truncates 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, INT_MIN).
+    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvttsd2si_rr(src, dest);
+        return branch32(Equal, dest, Imm32(0x80000000));
+    }
+
+    // Convert 'src' to an integer, and places the resulting 'dest'.
+    // If the result is not representable as a 32 bit value, branch.
+    // May also branch for some values that are representable in 32 bits
+    // (specifically, in this case, 0).
+    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.cvttsd2si_rr(src, dest);
+
+        // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+        failureCases.append(branchTest32(Zero, dest));
+
+        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+        convertInt32ToDouble(dest, fpTemp);
+        m_assembler.ucomisd_rr(fpTemp, src);
+        failureCases.append(m_assembler.jp());
+        failureCases.append(m_assembler.jne());
+    }
+
+    void zeroDouble(FPRegisterID srcDest)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.xorpd_rr(srcDest, srcDest);
+    }
+
+
+    // Stack manipulation operations:
+    //
+    // The ABI is assumed to provide a stack abstraction to memory,
+    // containing machine word sized units of data.  Push and pop
+    // operations add and remove a single register sized unit of data
+    // to or from the stack.  Peek and poke operations read or write
+    // values on the stack, without moving the current stack position.
+    
+    void pop(RegisterID dest)
+    {
+        m_assembler.pop_r(dest);
+    }
+
+    void push(RegisterID src)
+    {
+        m_assembler.push_r(src);
+    }
+
+    void push(Address address)
+    {
+        m_assembler.push_m(address.offset, address.base);
+    }
+
+    void push(Imm32 imm)
+    {
+        m_assembler.push_i32(imm.m_value);
+    }
+
+
+    // Register move operations:
+    //
+    // Move values in registers.
+
+    void move(Imm32 imm, RegisterID dest)
+    {
+        // Note: on 64-bit the Imm32 value is zero extended into the register, it
+        // may be useful to have a separate version that sign extends the value?
+        if (!imm.m_value)
+            m_assembler.xorl_rr(dest, dest);
+        else
+            m_assembler.movl_i32r(imm.m_value, dest);
+    }
+
+#if WTF_CPU_X86_64
+    void move(RegisterID src, RegisterID dest)
+    {
+        // Note: on 64-bit this is is a full register move; perhaps it would be
+        // useful to have separate move32 & movePtr, with move32 zero extending?
+        if (src != dest)
+            m_assembler.movq_rr(src, dest);
+    }
+
+    void move(ImmPtr imm, RegisterID dest)
+    {
+        m_assembler.movq_i64r(imm.asIntptr(), dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        if (reg1 != reg2)
+            m_assembler.xchgq_rr(reg1, reg2);
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movsxd_rr(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        m_assembler.movl_rr(src, dest);
+    }
+#else
+    void move(RegisterID src, RegisterID dest)
+    {
+        if (src != dest)
+            m_assembler.movl_rr(src, dest);
+    }
+
+    void move(ImmPtr imm, RegisterID dest)
+    {
+        m_assembler.movl_i32r(imm.asIntptr(), dest);
+    }
+
+    void swap(RegisterID reg1, RegisterID reg2)
+    {
+        if (reg1 != reg2)
+            m_assembler.xchgl_rr(reg1, reg2);
+    }
+
+    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        move(src, dest);
+    }
+
+    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    {
+        move(src, dest);
+    }
+#endif
+
+
+    // Forwards / external control flow operations:
+    //
+    // This set of jump and conditional branch operations return a Jump
+    // object which may linked at a later point, allow forwards jump,
+    // or jumps that will require external linkage (after the code has been
+    // relocated).
+    //
+    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+    // used (representing the names 'below' and 'above').
+    //
+    // Operands to the comparision are provided in the expected order, e.g.
+    // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
+    // treated as a signed 32bit value, is less than or equal to 5.
+    //
+    // jz and jnz test whether the first operand is equal to zero, and take
+    // an optional second operand of a mask under which to perform the test.
+
+public:
+    Jump branch32(Condition cond, RegisterID left, RegisterID right)
+    {
+        m_assembler.cmpl_rr(right, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(Condition cond, RegisterID left, Imm32 right)
+    {
+        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+            m_assembler.testl_rr(left, left);
+        else
+            m_assembler.cmpl_ir(right.m_value, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    // Branch based on a 32-bit comparison, forcing the size of the
+    // immediate operand to 32 bits in the native code stream.
+    Jump branch32_force32(Condition cond, RegisterID left, Imm32 right)
+    {
+        m_assembler.cmpl_ir_force32(right.m_value, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(Condition cond, RegisterID left, Address right)
+    {
+        m_assembler.cmpl_mr(right.offset, right.base, left);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branch32(Condition cond, Address left, RegisterID right)
+    {
+        m_assembler.cmpl_rm(right, left.offset, left.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(Condition cond, Address left, Imm32 right)
+    {
+        m_assembler.cmpl_im(right.m_value, left.offset, left.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+    {
+        m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
+    {
+        return branch32(cond, left, right);
+    }
+
+    Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+    {
+        m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+    {
+        ASSERT(!(right.m_value & 0xFFFF0000));
+
+        m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        m_assembler.testl_rr(reg, mask);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        // if we are only interested in the low seven bits, this can be tested with a testb
+        if (mask.m_value == -1)
+            m_assembler.testl_rr(reg, reg);
+        else if (CanUse8Bit(reg) && (mask.m_value & ~0x7f) == 0)
+            m_assembler.testb_i8r(mask.m_value, reg);
+        else
+            m_assembler.testl_i32r(mask.m_value, reg);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        if (mask.m_value == -1)
+            m_assembler.cmpl_im(0, address.offset, address.base);
+        else
+            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+    {
+        ASSERT((cond == Zero) || (cond == NonZero));
+        if (mask.m_value == -1)
+            m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
+        else
+            m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump jump()
+    {
+        return Jump(m_assembler.jmp());
+    }
+
+    void jump(RegisterID target)
+    {
+        m_assembler.jmp_r(target);
+    }
+
+    // Address is a memory location containing the address to jump to
+    void jump(Address address)
+    {
+        m_assembler.jmp_m(address.offset, address.base);
+    }
+
+
+    // Arithmetic control flow operations:
+    //
+    // This set of conditional branch operations branch based
+    // on the result of an arithmetic operation.  The operation
+    // is performed as normal, storing the result.
+    //
+    // * jz operations branch if the result is zero.
+    // * jo operations branch if the (signed) arithmetic
+    //   operation caused an overflow to occur.
+    
+    Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        add32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        add32(imm, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchAdd32(Condition cond, Imm32 src, Address dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+        add32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchAdd32(Condition cond, RegisterID src, Address dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+        add32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchAdd32(Condition cond, Address src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+        add32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT(cond == Overflow);
+        mul32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchMul32(Condition cond, Address src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+        mul32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        ASSERT(cond == Overflow);
+        mul32(imm, src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        sub32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+    
+    Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+        sub32(imm, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchSub32(Condition cond, Imm32 imm, Address dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+        sub32(imm, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchSub32(Condition cond, RegisterID src, Address dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+        sub32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchSub32(Condition cond, Address src, RegisterID dest)
+    {
+        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+        sub32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+    Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
+    {
+        ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+        or32(src, dest);
+        return Jump(m_assembler.jCC(x86Condition(cond)));
+    }
+
+
+    // Miscellaneous operations:
+
+    void breakpoint()
+    {
+        m_assembler.int3();
+    }
+
+    Call nearCall()
+    {
+        return Call(m_assembler.call(), Call::LinkableNear);
+    }
+
+    Call call(RegisterID target)
+    {
+        return Call(m_assembler.call(target), Call::None);
+    }
+
+    void call(Address address)
+    {
+        m_assembler.call_m(address.offset, address.base);
+    }
+
+    void ret()
+    {
+        m_assembler.ret();
+    }
+
+    void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmpl_rr(right, left);
+        m_assembler.setCC_r(x86Condition(cond), dest);
+    }
+
+    void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmpl_mr(left.offset, left.base, right);
+        m_assembler.setCC_r(x86Condition(cond), dest);
+    }
+
+    void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+    {
+        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+            m_assembler.testl_rr(left, left);
+        else
+            m_assembler.cmpl_ir(right.m_value, left);
+        m_assembler.setCC_r(x86Condition(cond), dest);
+    }
+
+    void set32(Condition cond, Address left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmpl_rm(right, left.offset, left.base);
+        m_assembler.setCC_r(x86Condition(cond), dest);
+        m_assembler.movzbl_rr(dest, dest);
+    }
+
+    void set32(Condition cond, RegisterID left, Address right, RegisterID dest)
+    {
+        m_assembler.cmpl_mr(right.offset, right.base, left);
+        m_assembler.setCC_r(x86Condition(cond), dest);
+        m_assembler.movzbl_rr(dest, dest);
+    }
+
+    void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        m_assembler.cmpl_rr(right, left);
+        m_assembler.setCC_r(x86Condition(cond), dest);
+        m_assembler.movzbl_rr(dest, dest);
+    }
+
+    void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+    {
+        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+            m_assembler.testl_rr(left, left);
+        else
+            m_assembler.cmpl_ir(right.m_value, left);
+        m_assembler.setCC_r(x86Condition(cond), dest);
+        m_assembler.movzbl_rr(dest, dest);
+    }
+
+    // FIXME:
+    // The mask should be optional... paerhaps the argument order should be
+    // dest-src, operations always have a dest? ... possibly not true, considering
+    // asm ops like test, or pseudo ops like pop().
+
+    void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
+    {
+        if (mask.m_value == -1)
+            m_assembler.cmpl_im(0, address.offset, address.base);
+        else
+            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+        m_assembler.setCC_r(x86Condition(cond), dest);
+    }
+
+    void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+    {
+        if (mask.m_value == -1)
+            m_assembler.cmpl_im(0, address.offset, address.base);
+        else
+            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+        m_assembler.setCC_r(x86Condition(cond), dest);
+        m_assembler.movzbl_rr(dest, dest);
+    }
+
+protected:
+    X86Assembler::Condition x86Condition(Condition cond)
+    {
+        return static_cast<X86Assembler::Condition>(cond);
+    }
+
+private:
+    // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
+    // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
+    friend class MacroAssemblerX86;
+
+#if WTF_CPU_X86
+#if WTF_PLATFORM_MAC
+
+    // All X86 Macs are guaranteed to support at least SSE2,
+    static bool isSSE2Present()
+    {
+        return true;
+    }
+
+#else // PLATFORM(MAC)
+
+    enum SSE2CheckState {
+        NotCheckedSSE2,
+        HasSSE2,
+        NoSSE2
+    };
+
+    static bool isSSE2Present()
+    {
+        if (s_sse2CheckState == NotCheckedSSE2) {
+            // Default the flags value to zero; if the compiler is
+            // not MSVC or GCC we will read this as SSE2 not present.
+            int flags = 0;
+#if WTF_COMPILER_MSVC
+            _asm {
+                mov eax, 1 // cpuid function 1 gives us the standard feature set
+                cpuid;
+                mov flags, edx;
+            }
+#elif WTF_COMPILER_GCC
+            asm (
+                 "movl $0x1, %%eax;"
+                 "pushl %%ebx;"
+                 "cpuid;"
+                 "popl %%ebx;"
+                 "movl %%edx, %0;"
+                 : "=g" (flags)
+                 :
+                 : "%eax", "%ecx", "%edx"
+                 );
+#endif
+            static const int SSE2FeatureBit = 1 << 26;
+            s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
+        }
+        // Only check once.
+        ASSERT(s_sse2CheckState != NotCheckedSSE2);
+
+        return s_sse2CheckState == HasSSE2;
+    }
+    
+    static SSE2CheckState s_sse2CheckState;
+
+#endif // PLATFORM(MAC)
+#elif !defined(NDEBUG) // CPU(X86)
+
+    // On x86-64 we should never be checking for SSE2 in a non-debug build,
+    // but non debug add this method to keep the asserts above happy.
+    static bool isSSE2Present()
+    {
+        return true;
+    }
+
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86Common_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/MacroAssemblerX86_64.h
@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the