Bug 1181612: Merge macro assemblers; r=luke
authorBenjamin Bouvier <benj@benj.me>
Tue, 24 Nov 2015 21:26:07 +0100
changeset 308336 4c1c5106ea3f9668e82f6b0586fdcbe6cd7cf990
parent 308335 55ab2a060b85d2a55359421e8d1ce3f0b39746f7
child 308337 a813fc2595b94533d41ac66c7e16a43e34f1876a
push id5513
push userraliiev@mozilla.com
push dateMon, 25 Jan 2016 13:55:34 +0000
treeherdermozilla-beta@5ee97dd05b5c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1181612
milestone45.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1181612: Merge macro assemblers; r=luke
js/src/asmjs/AsmJSModule.cpp
js/src/jit/BaselineCompiler.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/Label.h
js/src/jit/MacroAssembler-inl.h
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm64/Assembler-arm64.cpp
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/mips-shared/Assembler-mips-shared.h
js/src/jit/mips32/Assembler-mips32.cpp
js/src/jit/mips32/Assembler-mips32.h
js/src/jit/mips64/Assembler-mips64.cpp
js/src/jit/mips64/Assembler-mips64.h
js/src/jit/none/MacroAssembler-none.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x64/MacroAssembler-x64.h
js/src/jit/x64/Trampoline-x64.cpp
js/src/jit/x86-shared/Assembler-x86-shared.cpp
js/src/jit/x86-shared/Assembler-x86-shared.h
js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
js/src/jit/x86-shared/BaseAssembler-x86-shared.h
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
js/src/jit/x86-shared/MacroAssembler-x86-shared.h
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/MacroAssembler-x86.cpp
js/src/jit/x86/MacroAssembler-x86.h
js/src/jit/x86/Trampoline-x86.cpp
--- a/js/src/asmjs/AsmJSModule.cpp
+++ b/js/src/asmjs/AsmJSModule.cpp
@@ -336,42 +336,32 @@ AsmJSModule::finish(ExclusiveContext* cx
     }
 
     // Relative link metadata: absolute addresses that refer to another point within
     // the asm.js module.
 
     // CodeLabels are used for switch cases and loads from floating-point /
     // SIMD values in the constant pool.
     for (size_t i = 0; i < masm.numCodeLabels(); i++) {
-        CodeLabel src = masm.codeLabel(i);
-        int32_t labelOffset = src.dest()->offset();
-        int32_t targetOffset = src.src()->offset();
-        // The patched uses of a label embed a linked list where the
-        // to-be-patched immediate is the offset of the next to-be-patched
-        // instruction.
-        while (labelOffset != LabelBase::INVALID_OFFSET) {
-            size_t patchAtOffset = masm.labelOffsetToPatchOffset(labelOffset);
-            RelativeLink link(RelativeLink::CodeLabel);
-            link.patchAtOffset = patchAtOffset;
-            link.targetOffset = targetOffset;
-            if (!staticLinkData_.relativeLinks.append(link))
-                return false;
-
-            labelOffset = Assembler::ExtractCodeLabelOffset(code_ + patchAtOffset);
-        }
+        CodeLabel cl = masm.codeLabel(i);
+        RelativeLink link(RelativeLink::CodeLabel);
+        link.patchAtOffset = masm.labelToPatchOffset(*cl.patchAt());
+        link.targetOffset = cl.target()->offset();
+        if (!staticLinkData_.relativeLinks.append(link))
+            return false;
     }
 
 #if defined(JS_CODEGEN_X86)
     // Global data accesses in x86 need to be patched with the absolute
     // address of the global. Globals are allocated sequentially after the
     // code section so we can just use an RelativeLink.
     for (size_t i = 0; i < masm.numAsmJSGlobalAccesses(); i++) {
         AsmJSGlobalAccess a = masm.asmJSGlobalAccess(i);
         RelativeLink link(RelativeLink::InstructionImmediate);
-        link.patchAtOffset = masm.labelOffsetToPatchOffset(a.patchAt.offset());
+        link.patchAtOffset = masm.labelToPatchOffset(a.patchAt);
         link.targetOffset = offsetOfGlobalData() + a.globalDataOffset;
         if (!staticLinkData_.relativeLinks.append(link))
             return false;
     }
 #endif
 
 #if defined(JS_CODEGEN_MIPS32)
     // On MIPS we need to update all the long jumps because they contain an
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -886,32 +886,32 @@ BaselineCompiler::emitProfilerEnterFrame
     // Store stack position to lastProfilingFrame variable, guarded by a toggled jump.
     // Starts off initially disabled.
     Label noInstrument;
     CodeOffsetLabel toggleOffset = masm.toggledJump(&noInstrument);
     masm.profilerEnterFrame(masm.getStackPointer(), R0.scratchReg());
     masm.bind(&noInstrument);
 
     // Store the start offset in the appropriate location.
-    MOZ_ASSERT(profilerEnterFrameToggleOffset_.offset() == 0);
+    MOZ_ASSERT(!profilerEnterFrameToggleOffset_.used());
     profilerEnterFrameToggleOffset_ = toggleOffset;
 }
 
 void
 BaselineCompiler::emitProfilerExitFrame()
 {
     // Store previous frame to lastProfilingFrame variable, guarded by a toggled jump.
     // Starts off initially disabled.
     Label noInstrument;
     CodeOffsetLabel toggleOffset = masm.toggledJump(&noInstrument);
     masm.profilerExitFrame();
     masm.bind(&noInstrument);
 
     // Store the start offset in the appropriate location.
-    MOZ_ASSERT(profilerExitFrameToggleOffset_.offset() == 0);
+    MOZ_ASSERT(!profilerExitFrameToggleOffset_.used());
     profilerExitFrameToggleOffset_ = toggleOffset;
 }
 
 MethodStatus
 BaselineCompiler::emitBody()
 {
     MOZ_ASSERT(pc == script->code());
 
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -7800,16 +7800,17 @@ CodeGenerator::generateAsmJS(AsmJSFuncti
     // The only remaining work needed to compile this function is to patch the
     // switch-statement jump tables (the entries of the table need the absolute
     // address of the cases). These table entries are accmulated as CodeLabels
     // in the MacroAssembler's codeLabels_ list and processed all at once at in
     // the "static-link" phase of module compilation. It is critical that there
     // is nothing else to do after this point since the LifoAlloc memory
     // holding the MIR graph is about to be popped and reused. In particular,
     // every step in CodeGenerator::link must be a nop, as asserted here:
+    MOZ_ASSERT(!masm.failureLabel()->used());
     MOZ_ASSERT(snapshots_.listSize() == 0);
     MOZ_ASSERT(snapshots_.RVATableSize() == 0);
     MOZ_ASSERT(recovers_.size() == 0);
     MOZ_ASSERT(bailouts_.empty());
     MOZ_ASSERT(graph.numConstants() == 0);
     MOZ_ASSERT(safepointIndices_.empty());
     MOZ_ASSERT(osiIndices_.empty());
     MOZ_ASSERT(cacheList_.empty());
--- a/js/src/jit/Label.h
+++ b/js/src/jit/Label.h
@@ -32,40 +32,47 @@ struct LabelBase
     // future incoming edges will be immediately patched.
     bool bound() const {
         return bound_;
     }
     int32_t offset() const {
         MOZ_ASSERT(bound() || used());
         return offset_;
     }
+    void offsetBy(int32_t delta) {
+        MOZ_ASSERT(bound() || used());
+        MOZ_ASSERT(offset() + delta >= offset(), "no overflow");
+        mozilla::DebugOnly<int32_t> oldOffset(offset());
+        offset_ += delta;
+        MOZ_ASSERT(offset_ == delta + oldOffset, "new offset fits in 31 bits");
+    }
     // Returns whether the label is not bound, but has incoming uses.
     bool used() const {
         return !bound() && offset_ > INVALID_OFFSET;
     }
     // Binds the label, fixing its final position in the code stream.
     void bind(int32_t offset) {
         MOZ_ASSERT(!bound());
         offset_ = offset;
         bound_ = true;
-        MOZ_ASSERT(offset_ == offset);
+        MOZ_ASSERT(offset_ == offset, "offset fits in 31 bits");
     }
     // Marks the label as neither bound nor used.
     void reset() {
         offset_ = INVALID_OFFSET;
         bound_ = false;
     }
     // Sets the label's latest used position, returning the old use position in
     // the process.
     int32_t use(int32_t offset) {
         MOZ_ASSERT(!bound());
 
         int32_t old = offset_;
         offset_ = offset;
-        MOZ_ASSERT(offset_ == offset);
+        MOZ_ASSERT(offset_ == offset, "offset fits in 31 bits");
 
         return old;
     }
 };
 
 // A label represents a position in an assembly buffer that may or may not have
 // already been generated. Labels can either be "bound" or "unbound", the
 // former meaning that its position is known and the latter that its position
--- a/js/src/jit/MacroAssembler-inl.h
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -305,17 +305,17 @@ void
 MacroAssembler::leaveExitFrame(size_t extraFrame)
 {
     freeStack(ExitFooterFrame::Size() + extraFrame);
 }
 
 bool
 MacroAssembler::hasSelfReference() const
 {
-    return selfReferencePatch_.offset() != 0;
+    return selfReferencePatch_.used();
 }
 
 //}}} check_macroassembler_style
 // ===============================================================
 
 void
 MacroAssembler::branchFunctionKind(Condition cond, JSFunction::FunctionKind kind, Register fun,
                                    Register scratch, Label* label)
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -1997,16 +1997,22 @@ MacroAssembler::convertTypedOrValueToInt
       case MIRType_Object:
         jump(fail);
         break;
       default:
         MOZ_CRASH("Bad MIRType");
     }
 }
 
+bool
+MacroAssembler::asmMergeWith(const MacroAssembler& other)
+{
+    return MacroAssemblerSpecific::asmMergeWith(other);
+}
+
 void
 MacroAssembler::finish()
 {
     if (failureLabel_.used()) {
         bind(&failureLabel_);
         handleFailure();
     }
 
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1331,16 +1331,17 @@ class MacroAssembler : public MacroAssem
         // Exceptions are currently handled the same way as sequential failures.
         return &failureLabel_;
     }
 
     Label* failureLabel() {
         return &failureLabel_;
     }
 
+    bool asmMergeWith(const MacroAssembler& masm);
     void finish();
     void link(JitCode* code);
 
     void assumeUnreachable(const char* output);
 
     template<typename T>
     void assertTestInt32(Condition cond, const T& value, const char* output);
 
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -628,16 +628,25 @@ jit::PatchJump(CodeLocationJump& jump_, 
 void
 Assembler::finish()
 {
     flush();
     MOZ_ASSERT(!isFinished);
     isFinished = true;
 }
 
+bool
+Assembler::asmMergeWith(const Assembler& other)
+{
+    flush();
+    if (!AssemblerShared::asmMergeWith(size(), other))
+        return false;
+    return m_buffer.appendBuffer(other.m_buffer);
+}
+
 void
 Assembler::executableCopy(uint8_t* buffer)
 {
     MOZ_ASSERT(isFinished);
     m_buffer.executableCopy(buffer);
     AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
 }
 
@@ -932,38 +941,29 @@ Assembler::trace(JSTracer* trc)
     }
 }
 
 void
 Assembler::processCodeLabels(uint8_t* rawCode)
 {
     for (size_t i = 0; i < codeLabels_.length(); i++) {
         CodeLabel label = codeLabels_[i];
-        Bind(rawCode, label.dest(), rawCode + label.src()->offset());
+        Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
     }
 }
 
 void
-Assembler::writeCodePointer(AbsoluteLabel* absoluteLabel) {
-    MOZ_ASSERT(!absoluteLabel->bound());
+Assembler::writeCodePointer(CodeOffsetLabel* label) {
     BufferOffset off = writeInst(LabelBase::INVALID_OFFSET);
-
-    // The x86/x64 makes general use of AbsoluteLabel and weaves a linked list
-    // of uses of an AbsoluteLabel through the assembly. ARM only uses labels
-    // for the case statements of switch jump tables. Thus, for simplicity, we
-    // simply treat the AbsoluteLabel as a label and bind it to the offset of
-    // the jump table entry that needs to be patched.
-    LabelBase* label = absoluteLabel;
-    label->bind(off.getOffset());
+    label->use(off.getOffset());
 }
 
 void
-Assembler::Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address)
+Assembler::Bind(uint8_t* rawCode, CodeOffsetLabel* label, const void* address)
 {
-    // See writeCodePointer comment.
     *reinterpret_cast<const void**>(rawCode + label->offset()) = address;
 }
 
 Assembler::Condition
 Assembler::InvertCondition(Condition cond)
 {
     const uint32_t ConditionInversionBit = 0x10000000;
     return Condition(ConditionInversionBit ^ cond);
@@ -2871,16 +2871,50 @@ Assembler::retarget(Label* label, Label*
             DebugOnly<uint32_t> prev = target->use(label->offset());
             MOZ_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
         }
     }
     label->reset();
 
 }
 
+void
+Assembler::retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target)
+{
+    if (!label->used())
+        return;
+
+    MOZ_ASSERT(!target->bound());
+    bool more;
+    BufferOffset labelBranchOffset(label->offset() + baseOffset);
+    do {
+        BufferOffset next;
+        more = nextLink(labelBranchOffset, &next);
+
+        Instruction branch = *editSrc(labelBranchOffset);
+        Condition c = branch.extractCond();
+        int32_t prev = target->use(labelBranchOffset.getOffset());
+
+        MOZ_RELEASE_ASSERT(prev == Label::INVALID_OFFSET || unsigned(prev) < size());
+
+        BOffImm newOffset;
+        if (prev != Label::INVALID_OFFSET)
+            newOffset = BOffImm(prev);
+
+        if (branch.is<InstBImm>())
+            as_b(newOffset, c, labelBranchOffset);
+        else if (branch.is<InstBLImm>())
+            as_bl(newOffset, c, labelBranchOffset);
+        else
+            MOZ_CRASH("crazy fixup!");
+
+        labelBranchOffset = BufferOffset(next.getOffset() + baseOffset);
+    } while (more);
+}
+
 static int stopBKPT = -1;
 void
 Assembler::as_bkpt()
 {
     // This is a count of how many times a breakpoint instruction has been
     // generated. It is embedded into the instruction for debugging
     // purposes. Gdb will print "bkpt xxx" when you attempt to dissassemble a
     // breakpoint with the number xxx embedded into it. If this breakpoint is
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -1392,16 +1392,17 @@ class Assembler : public AssemblerShared
     static const Register getStackPointer() {
         return StackPointer;
     }
 
   private:
     bool isFinished;
   public:
     void finish();
+    bool asmMergeWith(const Assembler& other);
     void executableCopy(void* buffer);
     void copyJumpRelocationTable(uint8_t* dest);
     void copyDataRelocationTable(uint8_t* dest);
     void copyPreBarrierTable(uint8_t* dest);
 
     // Size of the instruction stream, in bytes, after pools are flushed.
     size_t size() const;
     // Size of the jump relocation table, in bytes.
@@ -1424,17 +1425,17 @@ class Assembler : public AssemblerShared
     // be overwritten subsequently.
     BufferOffset allocBranchInst();
 
     // A static variant for the cases where we don't want to have an assembler
     // object.
     static void WriteInstStatic(uint32_t x, uint32_t* dest);
 
   public:
-    void writeCodePointer(AbsoluteLabel* label);
+    void writeCodePointer(CodeOffsetLabel* label);
 
     void haltingAlign(int alignment);
     void nopAlign(int alignment);
     BufferOffset as_nop();
     BufferOffset as_alu(Register dest, Register src1, Operand2 op2,
                         ALUOp op, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_mov(Register dest,
                         Operand2 op2, SBit s = LeaveCC, Condition c = Always);
@@ -1685,25 +1686,26 @@ class Assembler : public AssemblerShared
 
     // Label operations.
     bool nextLink(BufferOffset b, BufferOffset* next);
     void bind(Label* label, BufferOffset boff = BufferOffset());
     void bind(RepatchLabel* label);
     uint32_t currentOffset() {
         return nextOffset().getOffset();
     }
+    void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target);
     void retarget(Label* label, Label* target);
     // I'm going to pretend this doesn't exist for now.
     void retarget(Label* label, void* target, Relocation::Kind reloc);
 
-    void Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address);
+    void Bind(uint8_t* rawCode, CodeOffsetLabel* label, const void* address);
 
     // See Bind
-    size_t labelOffsetToPatchOffset(size_t offset) {
-        return offset;
+    size_t labelToPatchOffset(CodeOffsetLabel label) {
+        return label.offset();
     }
 
     void as_bkpt();
 
   public:
     static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
     static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
 
@@ -1905,19 +1907,16 @@ class Assembler : public AssemblerShared
 
     static uint8_t* BailoutTableStart(uint8_t* code);
 
     static size_t ToggledCallSize(uint8_t* code);
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
     static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
     void processCodeLabels(uint8_t* rawCode);
-    static int32_t ExtractCodeLabelOffset(uint8_t* code) {
-        return *(uintptr_t*)code;
-    }
 
     bool bailed() {
         return m_buffer.bail();
     }
 
     void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
                                      const Disassembler::HeapAccess& heapAccess)
     {
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -1026,17 +1026,17 @@ CodeGeneratorARM::visitOutOfLineTableSwi
     for (size_t i = 0; i < numCases; i++) {
         LBlock* caseblock = skipTrivialBlocks(mir->getCase(numCases - 1 - i))->lir();
         Label* caseheader = caseblock->label();
         uint32_t caseoffset = caseheader->offset();
 
         // The entries of the jump table need to be absolute addresses and thus
         // must be patched after codegen is finished.
         CodeLabel cl = ool->codeLabel(i);
-        cl.src()->bind(caseoffset);
+        cl.target()->use(caseoffset);
         masm.addCodeLabel(cl);
     }
 }
 
 void
 CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base)
 {
     // The code generated by this is utter hax.
@@ -1080,17 +1080,17 @@ CodeGeneratorARM::emitTableSwitchDispatc
     masm.ma_b(defaultcase);
 
     // To fill in the CodeLabels for the case entries, we need to first generate
     // the case entries (we don't yet know their offsets in the instruction
     // stream).
     OutOfLineTableSwitch* ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir);
     for (int32_t i = 0; i < cases; i++) {
         CodeLabel cl;
-        masm.writeCodePointer(cl.dest());
+        masm.writeCodePointer(cl.patchAt());
         ool->addCodeLabel(cl);
     }
     addOutOfLineCode(ool, mir);
 }
 
 void
 CodeGeneratorARM::visitMathD(LMathD* math)
 {
--- a/js/src/jit/arm64/Assembler-arm64.cpp
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -626,22 +626,16 @@ Assembler::FixupNurseryObjects(JSContext
         if (!hasNurseryPointers && IsInsideNursery(obj))
             hasNurseryPointers = true;
     }
 
     if (hasNurseryPointers)
         cx->runtime()->gc.storeBuffer.putWholeCell(code);
 }
 
-int32_t
-Assembler::ExtractCodeLabelOffset(uint8_t* code)
-{
-    return *(int32_t*)code;
-}
-
 void
 Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
 {
     MOZ_CRASH("PatchInstructionImmediate()");
 }
 
 void
 Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -172,16 +172,19 @@ class Assembler : public vixl::Assembler
   public:
     Assembler()
       : vixl::Assembler()
     { }
 
     typedef vixl::Condition Condition;
 
     void finish();
+    bool asmMergeWith(const Assembler& other) {
+        MOZ_CRASH("NYI");
+    }
     void trace(JSTracer* trc);
 
     // Emit the jump table, returning the BufferOffset to the first entry in the table.
     BufferOffset emitExtendedJumpTable();
     BufferOffset ExtendedJumpTable_;
     void executableCopy(uint8_t* buffer);
 
     BufferOffset immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op,
@@ -231,37 +234,42 @@ class Assembler : public vixl::Assembler
             jumpRelocationTableBytes() +
             dataRelocationTableBytes() +
             preBarrierTableBytes();
     }
 
     void processCodeLabels(uint8_t* rawCode) {
         for (size_t i = 0; i < codeLabels_.length(); i++) {
             CodeLabel label = codeLabels_[i];
-            Bind(rawCode, label.dest(), rawCode + label.src()->offset());
+            Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
         }
     }
 
-    void Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address) {
+    void Bind(uint8_t* rawCode, CodeOffsetLabel* label, const void* address) {
         *reinterpret_cast<const void**>(rawCode + label->offset()) = address;
     }
 
     void retarget(Label* cur, Label* next);
+    void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target) {
+        MOZ_CRASH("NYI");
+    }
 
     // The buffer is about to be linked. Ensure any constant pools or
     // excess bookkeeping has been flushed to the instruction stream.
     void flush() {
         armbuffer_.flushPool();
     }
 
     int actualIndex(int curOffset) {
         ARMBuffer::PoolEntry pe(curOffset);
         return armbuffer_.poolEntryOffset(pe);
     }
-    size_t labelOffsetToPatchOffset(size_t labelOff) { return labelOff; }
+    size_t labelToPatchOffset(CodeOffsetLabel label) {
+        return label.offset();
+    }
     static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index) {
         return code->raw() + index;
     }
     void setPrinter(Sprinter* sp) {
     }
 
     static bool SupportsFloatingPoint() { return true; }
     static bool SupportsSimd() { return js::jit::SupportsSimd; }
@@ -320,17 +328,16 @@ class Assembler : public vixl::Assembler
     // Toggle a jmp or cmp emitted by toggledJump().
     static void ToggleToJmp(CodeLocationLabel inst_);
     static void ToggleToCmp(CodeLocationLabel inst_);
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
     static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
     static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
 
-    static int32_t ExtractCodeLabelOffset(uint8_t* code);
     static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
 
     static void FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader,
                                     const ObjectVector& nurseryObjects);
 
   public:
     // A Jump table entry is 2 instructions, with 8 bytes of raw data
     static const size_t SizeOfJumpTableEntry = 16;
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -792,16 +792,19 @@ class AssemblerMIPSShared : public Assem
     static const Register getStackPointer() {
         return StackPointer;
     }
 
   protected:
     bool isFinished;
   public:
     void finish();
+    bool asmMergeWith(const AssemblerMIPSShared& other) {
+        MOZ_CRASH("NYI");
+    }
     void executableCopy(void* buffer);
     void copyJumpRelocationTable(uint8_t* dest);
     void copyDataRelocationTable(uint8_t* dest);
     void copyPreBarrierTable(uint8_t* dest);
 
     // Size of the instruction stream, in bytes.
     size_t size() const;
     // Size of the jump relocation table, in bytes.
@@ -1031,19 +1034,22 @@ class AssemblerMIPSShared : public Assem
     // label operations
     void bind(Label* label, BufferOffset boff = BufferOffset());
     virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
     virtual void Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address) = 0;
     uint32_t currentOffset() {
         return nextOffset().getOffset();
     }
     void retarget(Label* label, Label* target);
+    void retargetWithOffset(size_t offset, const LabelBase* label, Label* target) {
+        MOZ_CRASH("NYI");
+    }
 
     // See Bind
-    size_t labelOffsetToPatchOffset(size_t offset) { return offset; }
+    size_t labelToPatchOffset(CodeOffsetLabel label) { return label.offset(); }
 
     void call(Label* label);
     void call(void* target);
 
     void as_break(uint32_t code);
     void as_sync(uint32_t stype = 0);
 
   public:
--- a/js/src/jit/mips32/Assembler-mips32.cpp
+++ b/js/src/jit/mips32/Assembler-mips32.cpp
@@ -234,22 +234,16 @@ Assembler::trace(JSTracer* trc)
         }
     }
     if (dataRelocations_.length()) {
         CompactBufferReader reader(dataRelocations_);
         ::TraceDataRelocations(trc, &m_buffer, reader);
     }
 }
 
-int32_t
-Assembler::ExtractCodeLabelOffset(uint8_t* code) {
-    InstImm* inst = (InstImm*)code;
-    return Assembler::ExtractLuiOriValue(inst, inst->next());
-}
-
 void
 Assembler::Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address)
 {
     if (label->used()) {
         int32_t src = label->offset();
         do {
             Instruction* inst = (Instruction*) (rawCode + src);
             uint32_t next = Assembler::ExtractLuiOriValue(inst, inst->next());
--- a/js/src/jit/mips32/Assembler-mips32.h
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -148,17 +148,16 @@ class Assembler : public AssemblerMIPSSh
     static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                         PatchedImmPtr expectedValue);
 
     static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
 
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
     static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
-    static int32_t ExtractCodeLabelOffset(uint8_t* code);
 }; // Assembler
 
 static const uint32_t NumIntArgRegs = 4;
 
 static inline bool
 GetIntArgReg(uint32_t usedArgSlots, Register* out)
 {
     if (usedArgSlots < NumIntArgRegs) {
--- a/js/src/jit/mips64/Assembler-mips64.cpp
+++ b/js/src/jit/mips64/Assembler-mips64.cpp
@@ -228,23 +228,16 @@ Assembler::trace(JSTracer* trc)
         }
     }
     if (dataRelocations_.length()) {
         CompactBufferReader reader(dataRelocations_);
         ::TraceDataRelocations(trc, &m_buffer, reader);
     }
 }
 
-int64_t
-Assembler::ExtractCodeLabelOffset(uint8_t* code)
-{
-    Instruction* inst = (Instruction*)code;
-    return Assembler::ExtractLoad64Value(inst);
-}
-
 void
 Assembler::Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address)
 {
     if (label->used()) {
         int64_t src = label->offset();
         do {
             Instruction* inst = (Instruction*) (rawCode + src);
             uint64_t next = Assembler::ExtractLoad64Value(inst);
--- a/js/src/jit/mips64/Assembler-mips64.h
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -150,17 +150,16 @@ class Assembler : public AssemblerMIPSSh
     static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                         PatchedImmPtr expectedValue);
 
     static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
 
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
     static void UpdateBoundsCheck(uint64_t logHeapSize, Instruction* inst);
-    static int64_t ExtractCodeLabelOffset(uint8_t* code);
 }; // Assembler
 
 static const uint32_t NumIntArgRegs = 8;
 static const uint32_t NumFloatArgRegs = NumIntArgRegs;
 
 static inline bool
 GetIntArgReg(uint32_t usedArgSlots, Register* out)
 {
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -119,17 +119,16 @@ class Assembler : public AssemblerShared
 
     template <typename T, typename S>
     static void PatchDataWithValueCheck(CodeLocationLabel, T, S) { MOZ_CRASH(); }
     static void PatchWrite_Imm32(CodeLocationLabel, Imm32) { MOZ_CRASH(); }
 
     static void PatchWrite_NearCall(CodeLocationLabel, CodeLocationLabel) { MOZ_CRASH(); }
     static uint32_t PatchWrite_NearCallSize() { MOZ_CRASH(); }
     static void PatchInstructionImmediate(uint8_t*, PatchedImmPtr) { MOZ_CRASH(); }
-    static int32_t ExtractCodeLabelOffset(uint8_t*) { MOZ_CRASH(); }
 
     static void ToggleToJmp(CodeLocationLabel) { MOZ_CRASH(); }
     static void ToggleToCmp(CodeLocationLabel) { MOZ_CRASH(); }
     static void ToggleCall(CodeLocationLabel, bool) { MOZ_CRASH(); }
 
     static uintptr_t GetPointer(uint8_t*) { MOZ_CRASH(); }
 
     void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
@@ -184,17 +183,17 @@ class MacroAssemblerNone : public Assemb
 
     template <typename T> void bind(T) { MOZ_CRASH(); }
     template <typename T> void j(Condition, T) { MOZ_CRASH(); }
     template <typename T> void jump(T) { MOZ_CRASH(); }
     void haltingAlign(size_t) { MOZ_CRASH(); }
     void nopAlign(size_t) { MOZ_CRASH(); }
     void checkStackAlignment() { MOZ_CRASH(); }
     uint32_t currentOffset() { MOZ_CRASH(); }
-    uint32_t labelOffsetToPatchOffset(uint32_t) { MOZ_CRASH(); }
+    uint32_t labelToPatchOffset(CodeOffsetLabel) { MOZ_CRASH(); }
     CodeOffsetLabel labelForPatch() { MOZ_CRASH(); }
 
     void nop() { MOZ_CRASH(); }
     void breakpoint() { MOZ_CRASH(); }
     void abiret() { MOZ_CRASH(); }
     void ret() { MOZ_CRASH(); }
 
     CodeOffsetLabel toggledJump(Label*) { MOZ_CRASH(); }
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -417,41 +417,80 @@ struct AbsoluteLabel : public LabelBase
     void bind() {
         bound_ = true;
 
         // These labels cannot be used after being bound.
         offset_ = -1;
     }
 };
 
+class CodeOffsetLabel
+{
+    size_t offset_;
+
+    static const size_t NOT_USED = size_t(-1);
+
+  public:
+    explicit CodeOffsetLabel(size_t offset) : offset_(offset) {}
+    CodeOffsetLabel() : offset_(NOT_USED) {}
+
+    size_t offset() const {
+        MOZ_ASSERT(used());
+        return offset_;
+    }
+
+    void use(size_t offset) {
+        MOZ_ASSERT(!used());
+        offset_ = offset;
+        MOZ_ASSERT(used());
+    }
+    bool used() const {
+        return offset_ != NOT_USED;
+    }
+
+    void offsetBy(size_t delta) {
+        MOZ_ASSERT(used());
+        MOZ_ASSERT(offset_ + delta >= offset_, "no overflow");
+        offset_ += delta;
+    }
+};
+
 // A code label contains an absolute reference to a point in the code. Thus, it
 // cannot be patched until after linking.
 // When the source label is resolved into a memory address, this address is
 // patched into the destination address.
 class CodeLabel
 {
     // The destination position, where the absolute reference should get
     // patched into.
-    AbsoluteLabel dest_;
+    CodeOffsetLabel patchAt_;
 
     // The source label (relative) in the code to where the destination should
     // get patched to.
-    Label src_;
+    CodeOffsetLabel target_;
 
   public:
     CodeLabel()
     { }
-    explicit CodeLabel(const AbsoluteLabel& dest)
-       : dest_(dest)
+    explicit CodeLabel(const CodeOffsetLabel& patchAt)
+      : patchAt_(patchAt)
+    { }
+    CodeLabel(const CodeOffsetLabel& patchAt, const CodeOffsetLabel& target)
+      : patchAt_(patchAt),
+        target_(target)
     { }
-    AbsoluteLabel* dest() {
-        return &dest_;
+    CodeOffsetLabel* patchAt() {
+        return &patchAt_;
     }
-    Label* src() {
-        return &src_;
+    CodeOffsetLabel* target() {
+        return &target_;
+    }
+    void offsetBy(size_t delta) {
+        patchAt_.offsetBy(delta);
+        target_.offsetBy(delta);
     }
 };
 
 // Location of a jump or label in a generated JitCode block, relative to the
 // start of the block.
 
 class CodeOffsetJump
 {
@@ -479,29 +518,16 @@ class CodeOffsetJump
     }
 
     size_t offset() const {
         return offset_;
     }
     void fixup(MacroAssembler* masm);
 };
 
-class CodeOffsetLabel
-{
-    size_t offset_;
-
-  public:
-    explicit CodeOffsetLabel(size_t offset) : offset_(offset) {}
-    CodeOffsetLabel() : offset_(0) {}
-
-    size_t offset() const {
-        return offset_;
-    }
-};
-
 // Absolute location of a jump or a label in some generated JitCode block.
 // Can also encode a CodeOffset{Jump,Label}, such that the offset is initially
 // set and the absolute location later filled in after the final JitCode is
 // allocated.
 
 class CodeLocationJump
 {
     uint8_t* raw_;
@@ -678,16 +704,17 @@ class CallSite : public CallSiteDesc
 
     CallSite(CallSiteDesc desc, uint32_t returnAddressOffset, uint32_t stackDepth)
       : CallSiteDesc(desc),
         returnAddressOffset_(returnAddressOffset),
         stackDepth_(stackDepth)
     { }
 
     void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
+    void offsetReturnAddressBy(int32_t o) { returnAddressOffset_ += o; }
     uint32_t returnAddressOffset() const { return returnAddressOffset_; }
 
     // The stackDepth measures the amount of stack space pushed since the
     // function was called. In particular, this includes the pushed return
     // address on all archs (whether or not the call instruction pushes the
     // return address (x86/x64) or the prologue does (ARM/MIPS)).
     uint32_t stackDepth() const { return stackDepth_; }
 };
@@ -804,16 +831,17 @@ class AsmJSHeapAccess
     {
         mozilla::PodZero(this);  // zero padding for Valgrind
         insnOffset_ = insnOffset;
     }
 #endif
 
     uint32_t insnOffset() const { return insnOffset_; }
     void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
+    void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
 #if defined(JS_CODEGEN_X86)
     void* patchHeapPtrImmAt(uint8_t* code) const { return code + (insnOffset_ + opLength_); }
 #endif
 #if defined(JS_CODEGEN_X64)
     bool throwOnOOB() const { return throwOnOOB_; }
     uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; }
 #endif
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
@@ -986,17 +1014,17 @@ class AssemblerShared
     void append(const CallSiteDesc& desc, CodeOffsetLabel label, size_t framePushed,
                 uint32_t targetIndex = CallSiteAndTarget::NOT_INTERNAL)
     {
         // framePushed does not include sizeof(AsmJSFrame), so add it in here (see
         // CallSite::stackDepth).
         CallSite callsite(desc, label.offset(), framePushed + sizeof(AsmJSFrame));
         enoughMemory_ &= callsites_.append(CallSiteAndTarget(callsite, targetIndex));
     }
-    const CallSiteAndTargetVector& callSites() const { return callsites_; }
+    CallSiteAndTargetVector& callSites() { return callsites_; }
 
     void append(AsmJSHeapAccess access) { enoughMemory_ &= asmJSHeapAccesses_.append(access); }
     AsmJSHeapAccessVector&& extractAsmJSHeapAccesses() { return Move(asmJSHeapAccesses_); }
 
     void append(AsmJSGlobalAccess access) { enoughMemory_ &= asmJSGlobalAccesses_.append(access); }
     size_t numAsmJSGlobalAccesses() const { return asmJSGlobalAccesses_.length(); }
     AsmJSGlobalAccess asmJSGlobalAccess(size_t i) const { return asmJSGlobalAccesses_[i]; }
 
@@ -1010,14 +1038,45 @@ class AssemblerShared
         propagateOOM(codeLabels_.append(label));
     }
     size_t numCodeLabels() const {
         return codeLabels_.length();
     }
     CodeLabel codeLabel(size_t i) {
         return codeLabels_[i];
     }
+
+    // Merge this assembler with the other one, invalidating it, by shifting all
+    // offsets by a delta.
+    bool asmMergeWith(size_t delta, const AssemblerShared& other) {
+        size_t i = callsites_.length();
+        enoughMemory_ &= callsites_.appendAll(other.callsites_);
+        for (; i < callsites_.length(); i++)
+            callsites_[i].offsetReturnAddressBy(delta);
+
+        i = asmJSHeapAccesses_.length();
+        enoughMemory_ &= asmJSHeapAccesses_.appendAll(other.asmJSHeapAccesses_);
+        for (; i < asmJSHeapAccesses_.length(); i++)
+            asmJSHeapAccesses_[i].offsetInsnOffsetBy(delta);
+
+        i = asmJSGlobalAccesses_.length();
+        enoughMemory_ &= asmJSGlobalAccesses_.appendAll(other.asmJSGlobalAccesses_);
+        for (; i < asmJSGlobalAccesses_.length(); i++)
+            asmJSGlobalAccesses_[i].patchAt.offsetBy(delta);
+
+        i = asmJSAbsoluteLinks_.length();
+        enoughMemory_ &= asmJSAbsoluteLinks_.appendAll(other.asmJSAbsoluteLinks_);
+        for (; i < asmJSAbsoluteLinks_.length(); i++)
+            asmJSAbsoluteLinks_[i].patchAt.offsetBy(delta);
+
+        i = codeLabels_.length();
+        enoughMemory_ &= codeLabels_.appendAll(other.codeLabels_);
+        for (; i < codeLabels_.length(); i++)
+            codeLabels_[i].offsetBy(delta);
+
+        return !oom();
+    }
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_shared_Assembler_shared_h */
--- a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
+++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
@@ -1094,16 +1094,29 @@ struct AssemblerBufferWithConstantPools 
         // The pools should have all been flushed, check.
         MOZ_ASSERT(pool_.numEntries() == 0);
         for (Slice* cur = getHead(); cur != nullptr; cur = cur->getNext()) {
             memcpy(dest, &cur->instructions[0], cur->length());
             dest += cur->length();
         }
     }
 
+    bool appendBuffer(const AssemblerBufferWithConstantPools& other) {
+        if (this->oom())
+            return false;
+        // The pools should have all been flushed, check.
+        MOZ_ASSERT(pool_.numEntries() == 0);
+        for (Slice* cur = other.getHead(); cur != nullptr; cur = cur->getNext()) {
+            this->putBytes(cur->length(), &cur->instructions[0]);
+            if (this->oom())
+                return false;
+        }
+        return true;
+    }
+
   public:
     size_t poolEntryOffset(PoolEntry pe) const {
         MOZ_ASSERT(pe.index() < poolEntryCount - pool_.numEntries(),
                    "Invalid pool entry, or not flushed yet.");
         // Find the pool containing pe.index().
         // The array is sorted, so we can use a binary search.
         auto b = poolInfo_.begin(), e = poolInfo_.end();
         // A note on asymmetric types in the upper_bound comparator:
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -607,22 +607,19 @@ class Assembler : public AssemblerX86Sha
         movq(src, dest);
     }
     void mov(Imm32 imm32, const Operand& dest) {
         movq(imm32, dest);
     }
     void mov(Register src, Register dest) {
         movq(src, dest);
     }
-    void mov(AbsoluteLabel* label, Register dest) {
-        MOZ_ASSERT(!label->bound());
-        // Thread the patch list through the unpatched address word in the
-        // instruction stream.
-        masm.movq_i64r(label->prev(), dest.encoding());
-        label->setPrev(masm.size());
+    void mov(CodeOffsetLabel* label, Register dest) {
+        masm.movq_i64r(/* placeholder */ 0, dest.encoding());
+        label->use(masm.size());
     }
     void xchg(Register src, Register dest) {
         xchgq(src, dest);
     }
     void lea(const Operand& src, Register dest) {
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.leaq_mr(src.disp(), src.base(), dest.encoding());
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -27,89 +27,94 @@ MacroAssemblerX64::loadConstantDouble(do
     if (!dbl)
         return;
     // The constants will be stored in a pool appended to the text (see
     // finish()), so they will always be a fixed distance from the
     // instructions which reference them. This allows the instructions to use
     // PC-relative addressing. Use "jump" label support code, because we need
     // the same PC-relative address patching that jumps use.
     JmpSrc j = masm.vmovsd_ripr(dest.encoding());
-    JmpSrc prev = JmpSrc(dbl->uses.use(j.offset()));
-    masm.setNextJump(j, prev);
+    dbl->uses.append(CodeOffsetLabel(j.offset()));
 }
 
 void
 MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
 {
     if (maybeInlineFloat(f, dest))
         return;
     Float* flt = getFloat(f);
     if (!flt)
         return;
     // See comment in loadConstantDouble
     JmpSrc j = masm.vmovss_ripr(dest.encoding());
-    JmpSrc prev = JmpSrc(flt->uses.use(j.offset()));
-    masm.setNextJump(j, prev);
+    flt->uses.append(CodeOffsetLabel(j.offset()));
 }
 
 void
 MacroAssemblerX64::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Int32x4);
     if (maybeInlineInt32x4(v, dest))
         return;
     SimdData* val = getSimdData(v);
     if (!val)
         return;
     MOZ_ASSERT(val->type() == SimdConstant::Int32x4);
     JmpSrc j = masm.vmovdqa_ripr(dest.encoding());
-    JmpSrc prev = JmpSrc(val->uses.use(j.offset()));
-    masm.setNextJump(j, prev);
+    val->uses.append(CodeOffsetLabel(j.offset()));
 }
 
 void
 MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
     if (maybeInlineFloat32x4(v, dest))
         return;
     SimdData* val = getSimdData(v);
     if (!val)
         return;
     MOZ_ASSERT(val->type() == SimdConstant::Float32x4);
     JmpSrc j = masm.vmovaps_ripr(dest.encoding());
-    JmpSrc prev = JmpSrc(val->uses.use(j.offset()));
-    masm.setNextJump(j, prev);
+    val->uses.append(CodeOffsetLabel(j.offset()));
+}
+
+void
+MacroAssemblerX64::bindOffsets(const MacroAssemblerX86Shared::UsesVector& uses)
+{
+    for (CodeOffsetLabel use : uses) {
+        JmpDst dst(currentOffset());
+        JmpSrc src(use.offset());
+        // Using linkJump here is safe, as explaind in the comment in
+        // loadConstantDouble.
+        masm.linkJump(src, dst);
+    }
 }
 
 void
 MacroAssemblerX64::finish()
 {
     if (!doubles_.empty())
         masm.haltingAlign(sizeof(double));
-    for (size_t i = 0; i < doubles_.length(); i++) {
-        Double& dbl = doubles_[i];
-        bind(&dbl.uses);
-        masm.doubleConstant(dbl.value);
+    for (const Double& d : doubles_) {
+        bindOffsets(d.uses);
+        masm.doubleConstant(d.value);
     }
 
     if (!floats_.empty())
         masm.haltingAlign(sizeof(float));
-    for (size_t i = 0; i < floats_.length(); i++) {
-        Float& flt = floats_[i];
-        bind(&flt.uses);
-        masm.floatConstant(flt.value);
+    for (const Float& f : floats_) {
+        bindOffsets(f.uses);
+        masm.floatConstant(f.value);
     }
 
     // SIMD memory values must be suitably aligned.
     if (!simds_.empty())
         masm.haltingAlign(SimdMemoryAlignment);
-    for (size_t i = 0; i < simds_.length(); i++) {
-        SimdData& v = simds_[i];
-        bind(&v.uses);
+    for (const SimdData& v : simds_) {
+        bindOffsets(v.uses);
         switch(v.type()) {
           case SimdConstant::Int32x4:   masm.int32x4Constant(v.value.asInt32x4());     break;
           case SimdConstant::Float32x4: masm.float32x4Constant(v.value.asFloat32x4()); break;
           default: MOZ_CRASH("unexpected SimdConstant type");
         }
     }
 
     MacroAssemblerX86Shared::finish();
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -27,36 +27,31 @@ struct ImmShiftedTag : public ImmWord
 
 struct ImmTag : public Imm32
 {
     explicit ImmTag(JSValueTag tag)
       : Imm32(tag)
     { }
 };
 
-struct MacroAssemblerX86Shared::PlatformSpecificLabel : public NonAssertingLabel
-{};
-
 class MacroAssemblerX64 : public MacroAssemblerX86Shared
 {
   private:
     // Perform a downcast. Should be removed by Bug 996602.
     MacroAssembler& asMasm();
     const MacroAssembler& asMasm() const;
 
+    void bindOffsets(const MacroAssemblerX86Shared::UsesVector&);
+
   public:
     using MacroAssemblerX86Shared::branch32;
     using MacroAssemblerX86Shared::branchTest32;
     using MacroAssemblerX86Shared::load32;
     using MacroAssemblerX86Shared::store32;
 
-    typedef MacroAssemblerX86Shared::Double<> Double;
-    typedef MacroAssemblerX86Shared::Float<> Float;
-    typedef MacroAssemblerX86Shared::SimdData<> SimdData;
-
     MacroAssemblerX64()
     {
     }
 
     // The buffer is about to be linked, make sure any constant pools or excess
     // bookkeeping has been flushed to the instruction stream.
     void finish();
 
--- a/js/src/jit/x64/Trampoline-x64.cpp
+++ b/js/src/jit/x64/Trampoline-x64.cpp
@@ -157,16 +157,17 @@ JitRuntime::generateEnterJIT(JSContext* 
     *****************************************************************/
     masm.subq(rsp, r14);
 
     // Create a frame descriptor.
     masm.makeFrameDescriptor(r14, JitFrame_Entry);
     masm.push(r14);
 
     CodeLabel returnLabel;
+    CodeLabel oomReturnLabel;
     if (type == EnterJitBaseline) {
         // Handle OSR.
         AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
         regs.takeUnchecked(OsrFrameReg);
         regs.take(rbp);
         regs.take(reg_code);
 
         // Ensure that |scratch| does not end up being JSReturnOperand.
@@ -177,17 +178,17 @@ JitRuntime::generateEnterJIT(JSContext* 
 
         Label notOsr;
         masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
 
         Register numStackValues = regs.takeAny();
         masm.movq(numStackValuesAddr, numStackValues);
 
         // Push return address
-        masm.mov(returnLabel.dest(), scratch);
+        masm.mov(returnLabel.patchAt(), scratch);
         masm.push(scratch);
 
         // Push previous frame pointer.
         masm.push(rbp);
 
         // Reserve frame.
         Register framePtr = rbp;
         masm.subPtr(Imm32(BaselineFrame::Size()), rsp);
@@ -265,34 +266,36 @@ JitRuntime::generateEnterJIT(JSContext* 
         masm.jump(reg_code);
 
         // OOM: load error value, discard return address and previous frame
         // pointer and return.
         masm.bind(&error);
         masm.mov(framePtr, rsp);
         masm.addPtr(Imm32(2 * sizeof(uintptr_t)), rsp);
         masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
-        masm.mov(returnLabel.dest(), scratch);
+        masm.mov(oomReturnLabel.patchAt(), scratch);
         masm.jump(scratch);
 
         masm.bind(&notOsr);
         masm.movq(scopeChain, R1.scratchReg());
     }
 
     // The call will push the return address on the stack, thus we check that
     // the stack would be aligned once the call is complete.
     masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
 
     // Call function.
     masm.callJitNoProfiler(reg_code);
 
     if (type == EnterJitBaseline) {
         // Baseline OSR will return here.
-        masm.bind(returnLabel.src());
+        masm.use(returnLabel.target());
         masm.addCodeLabel(returnLabel);
+        masm.use(oomReturnLabel.target());
+        masm.addCodeLabel(oomReturnLabel);
     }
 
     // Pop arguments and padding from stack.
     masm.pop(r14);              // Pop and decode descriptor.
     masm.shrq(Imm32(FRAMESIZE_SHIFT), r14);
     masm.addq(r14, rsp);        // Remove arguments.
 
     /*****************************************************************
--- a/js/src/jit/x86-shared/Assembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
@@ -104,17 +104,17 @@ AssemblerX86Shared::executableCopy(void*
     masm.executableCopy(buffer);
 }
 
 void
 AssemblerX86Shared::processCodeLabels(uint8_t* rawCode)
 {
     for (size_t i = 0; i < codeLabels_.length(); i++) {
         CodeLabel label = codeLabels_[i];
-        Bind(rawCode, label.dest(), rawCode + label.src()->offset());
+        Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
     }
 }
 
 AssemblerX86Shared::Condition
 AssemblerX86Shared::InvertCondition(Condition cond)
 {
     switch (cond) {
       case Zero:
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -393,20 +393,23 @@ class AssemblerX86Shared : public Assemb
         masm.setPrinter(sp);
     }
 
     static const Register getStackPointer() {
         return StackPointer;
     }
 
     void executableCopy(void* buffer);
+    bool asmMergeWith(const AssemblerX86Shared& other) {
+        MOZ_ASSERT(other.jumps_.length() == 0);
+        if (!AssemblerShared::asmMergeWith(masm.size(), other))
+            return false;
+        return masm.appendBuffer(other.masm);
+    }
     void processCodeLabels(uint8_t* rawCode);
-    static int32_t ExtractCodeLabelOffset(uint8_t* code) {
-        return *(uintptr_t*)code;
-    }
     void copyJumpRelocationTable(uint8_t* dest);
     void copyDataRelocationTable(uint8_t* dest);
     void copyPreBarrierTable(uint8_t* dest);
 
     // Size of the instruction stream, in bytes.
     size_t size() const {
         return masm.size();
     }
@@ -430,38 +433,20 @@ class AssemblerX86Shared : public Assemb
 
   public:
     void haltingAlign(int alignment) {
         masm.haltingAlign(alignment);
     }
     void nopAlign(int alignment) {
         masm.nopAlign(alignment);
     }
-    void writeCodePointer(AbsoluteLabel* label) {
-        MOZ_ASSERT(!label->bound());
-        // Thread the patch list through the unpatched address word in the
-        // instruction stream.
-        masm.jumpTablePointer(label->prev());
-        label->setPrev(masm.size());
-    }
-    void writeDoubleConstant(double d, Label* label) {
-        label->bind(masm.size());
-        masm.doubleConstant(d);
-    }
-    void writeFloatConstant(float f, Label* label) {
-        label->bind(masm.size());
-        masm.floatConstant(f);
-    }
-    void writeInt32x4Constant(const SimdConstant& v, Label* label) {
-        label->bind(masm.size());
-        masm.int32x4Constant(v.asInt32x4());
-    }
-    void writeFloat32x4Constant(const SimdConstant& v, Label* label) {
-        label->bind(masm.size());
-        masm.float32x4Constant(v.asFloat32x4());
+    void writeCodePointer(CodeOffsetLabel* label) {
+        // A CodeOffsetLabel only has one use, bake in the "end of list" value.
+        masm.jumpTablePointer(LabelBase::INVALID_OFFSET);
+        label->use(masm.size());
     }
     void movl(Imm32 imm32, Register dest) {
         masm.movl_i32r(imm32.value, dest.encoding());
     }
     void movl(Register src, Register dest) {
         masm.movl_rr(src.encoding(), dest.encoding());
     }
     void movl(const Operand& src, Register dest) {
@@ -933,59 +918,58 @@ class AssemblerX86Shared : public Assemb
     void bind(RepatchLabel* label) {
         JmpDst dst(masm.label());
         if (label->used()) {
             JmpSrc jmp(label->offset());
             masm.linkJump(jmp, dst);
         }
         label->bind(dst.offset());
     }
+    void use(CodeOffsetLabel* label) {
+        label->use(currentOffset());
+    }
     uint32_t currentOffset() {
         return masm.label().offset();
     }
 
     // Re-routes pending jumps to a new label.
+    void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target) {
+        if (!label->used())
+            return;
+        bool more;
+        JmpSrc jmp(label->offset() + baseOffset);
+        do {
+            JmpSrc next;
+            more = masm.nextJump(jmp, &next);
+            if (target->bound()) {
+                // The jump can be immediately patched to the correct destination.
+                masm.linkJump(jmp, JmpDst(target->offset()));
+            } else {
+                // Thread the jump list through the unpatched jump targets.
+                JmpSrc prev(target->use(jmp.offset()));
+                masm.setNextJump(jmp, prev);
+            }
+            jmp = JmpSrc(next.offset() + baseOffset);
+        } while (more);
+    }
     void retarget(Label* label, Label* target) {
-        if (label->used()) {
-            bool more;
-            JmpSrc jmp(label->offset());
-            do {
-                JmpSrc next;
-                more = masm.nextJump(jmp, &next);
-
-                if (target->bound()) {
-                    // The jump can be immediately patched to the correct destination.
-                    masm.linkJump(jmp, JmpDst(target->offset()));
-                } else {
-                    // Thread the jump list through the unpatched jump targets.
-                    JmpSrc prev = JmpSrc(target->use(jmp.offset()));
-                    masm.setNextJump(jmp, prev);
-                }
-
-                jmp = next;
-            } while (more);
-        }
+        retargetWithOffset(0, label, target);
         label->reset();
     }
 
-    static void Bind(uint8_t* raw, AbsoluteLabel* label, const void* address) {
+    static void Bind(uint8_t* raw, CodeOffsetLabel* label, const void* address) {
         if (label->used()) {
-            intptr_t src = label->offset();
-            do {
-                intptr_t next = reinterpret_cast<intptr_t>(X86Encoding::GetPointer(raw + src));
-                X86Encoding::SetPointer(raw + src, address);
-                src = next;
-            } while (src != AbsoluteLabel::INVALID_OFFSET);
+            intptr_t offset = label->offset();
+            X86Encoding::SetPointer(raw + offset, address);
         }
-        label->bind();
     }
 
     // See Bind and X86Encoding::setPointer.
-    size_t labelOffsetToPatchOffset(size_t offset) {
-        return offset - sizeof(void*);
+    size_t labelToPatchOffset(CodeOffsetLabel label) {
+        return label.offset() - sizeof(void*);
     }
 
     void ret() {
         masm.ret();
     }
     void retn(Imm32 n) {
         // Remove the size of the return address which is included in the frame.
         masm.ret_i(n.value - sizeof(void*));
--- a/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
+++ b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
@@ -75,16 +75,25 @@ namespace jit {
         }
 
         void ensureSpace(size_t space)
         {
             if (MOZ_UNLIKELY(!m_buffer.reserve(m_buffer.length() + space)))
                 oomDetected();
         }
 
+        bool growByUninitialized(size_t space)
+        {
+            if (MOZ_UNLIKELY(!m_buffer.growByUninitialized(space))) {
+                oomDetected();
+                return false;
+            }
+            return true;
+        }
+
         bool isAligned(size_t alignment) const
         {
             return !(m_buffer.length() & (alignment - 1));
         }
 
         void putByteUnchecked(int value)
         {
             m_buffer.infallibleAppend(char(value));
--- a/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
@@ -3407,16 +3407,25 @@ threeByteOpImmSimd("vblendps", VEX_PD, O
         unsigned char* code = m_formatter.data();
         SetRel32(code + from.offset(), code + to.offset());
     }
 
     void executableCopy(void* buffer)
     {
         memcpy(buffer, m_formatter.buffer(), size());
     }
+    bool appendBuffer(const BaseAssembler& other)
+    {
+        size_t otherSize = other.size();
+        size_t formerSize = size();
+        if (!m_formatter.growByUninitialized(otherSize))
+            return false;
+        memcpy((char*)m_formatter.buffer() + formerSize, other.m_formatter.buffer(), otherSize);
+        return true;
+    }
 
   protected:
     static bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(int8_t)value; }
     static bool CAN_SIGN_EXTEND_16_32(int32_t value) { return value == (int32_t)(int16_t)value; }
     static bool CAN_ZERO_EXTEND_8_32(int32_t value) { return value == (int32_t)(uint8_t)value; }
     static bool CAN_ZERO_EXTEND_8H_32(int32_t value) { return value == (value & 0xff00); }
     static bool CAN_ZERO_EXTEND_16_32(int32_t value) { return value == (int32_t)(uint16_t)value; }
     static bool CAN_ZERO_EXTEND_32_64(int32_t value) { return value >= 0; }
@@ -4657,16 +4666,17 @@ threeByteOpImmSimd("vblendps", VEX_PD, O
         {
             m_buffer.ensureSpace(sizeof(int32_t));
             m_buffer.putIntUnchecked(i);
         }
 
         // Administrative methods:
 
         size_t size() const { return m_buffer.size(); }
+        bool growByUninitialized(size_t size) { return m_buffer.growByUninitialized(size); }
         const unsigned char* buffer() const { return m_buffer.buffer(); }
         bool oom() const { return m_buffer.oom(); }
         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
         unsigned char* data() { return m_buffer.data(); }
 
     private:
 
         // Internals; ModRm and REX formatters.
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -1614,29 +1614,29 @@ class OutOfLineTableSwitch : public OutO
 };
 
 void
 CodeGeneratorX86Shared::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
 {
     MTableSwitch* mir = ool->mir();
 
     masm.haltingAlign(sizeof(void*));
-    masm.bind(ool->jumpLabel()->src());
+    masm.use(ool->jumpLabel()->target());
     masm.addCodeLabel(*ool->jumpLabel());
 
     for (size_t i = 0; i < mir->numCases(); i++) {
         LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
         Label* caseheader = caseblock->label();
         uint32_t caseoffset = caseheader->offset();
 
         // The entries of the jump table need to be absolute addresses and thus
         // must be patched after codegen is finished.
         CodeLabel cl;
-        masm.writeCodePointer(cl.dest());
-        cl.src()->bind(caseoffset);
+        masm.writeCodePointer(cl.patchAt());
+        cl.target()->use(caseoffset);
         masm.addCodeLabel(cl);
     }
 }
 
 void
 CodeGeneratorX86Shared::emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base)
 {
     Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
@@ -1652,17 +1652,17 @@ CodeGeneratorX86Shared::emitTableSwitchD
 
     // To fill in the CodeLabels for the case entries, we need to first
     // generate the case entries (we don't yet know their offsets in the
     // instruction stream).
     OutOfLineTableSwitch* ool = new(alloc()) OutOfLineTableSwitch(mir);
     addOutOfLineCode(ool, mir);
 
     // Compute the position where a pointer to the right case stands.
-    masm.mov(ool->jumpLabel()->dest(), base);
+    masm.mov(ool->jumpLabel()->patchAt(), base);
     Operand pointer = Operand(base, index, ScalePointer);
 
     // Jump to the right case
     masm.jmp(pointer);
 }
 
 void
 CodeGeneratorX86Shared::visitMathD(LMathD* math)
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -227,89 +227,152 @@ MacroAssemblerX86Shared::atomicExchangeT
 
 template void
 MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
                                                        Register value, Register temp, AnyRegister output);
 template void
 MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
                                                        Register value, Register temp, AnyRegister output);
 
-MacroAssemblerX86Shared::Float<>*
+MacroAssemblerX86Shared::Float*
 MacroAssemblerX86Shared::getFloat(float f)
 {
     if (!floatMap_.initialized()) {
         enoughMemory_ &= floatMap_.init();
         if (!enoughMemory_)
             return nullptr;
     }
     size_t floatIndex;
     if (FloatMap::AddPtr p = floatMap_.lookupForAdd(f)) {
         floatIndex = p->value();
     } else {
         floatIndex = floats_.length();
-        enoughMemory_ &= floats_.append(Float<>(f));
+        enoughMemory_ &= floats_.append(Float(f));
         if (!enoughMemory_)
             return nullptr;
         enoughMemory_ &= floatMap_.add(p, f, floatIndex);
         if (!enoughMemory_)
             return nullptr;
     }
-    Float<>& flt = floats_[floatIndex];
-    MOZ_ASSERT(!flt.uses.bound());
-    return &flt;
+    return &floats_[floatIndex];
 }
 
-MacroAssemblerX86Shared::Double<>*
+MacroAssemblerX86Shared::Double*
 MacroAssemblerX86Shared::getDouble(double d)
 {
     if (!doubleMap_.initialized()) {
         enoughMemory_ &= doubleMap_.init();
         if (!enoughMemory_)
             return nullptr;
     }
     size_t doubleIndex;
     if (DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d)) {
         doubleIndex = p->value();
     } else {
         doubleIndex = doubles_.length();
-        enoughMemory_ &= doubles_.append(Double<>(d));
+        enoughMemory_ &= doubles_.append(Double(d));
         if (!enoughMemory_)
             return nullptr;
         enoughMemory_ &= doubleMap_.add(p, d, doubleIndex);
         if (!enoughMemory_)
             return nullptr;
     }
-    Double<>& dbl = doubles_[doubleIndex];
-    MOZ_ASSERT(!dbl.uses.bound());
-    return &dbl;
+    return &doubles_[doubleIndex];
 }
 
-MacroAssemblerX86Shared::SimdData<>*
+MacroAssemblerX86Shared::SimdData*
 MacroAssemblerX86Shared::getSimdData(const SimdConstant& v)
 {
     if (!simdMap_.initialized()) {
         enoughMemory_ &= simdMap_.init();
         if (!enoughMemory_)
             return nullptr;
     }
     size_t index;
     if (SimdMap::AddPtr p = simdMap_.lookupForAdd(v)) {
         index = p->value();
     } else {
         index = simds_.length();
-        enoughMemory_ &= simds_.append(SimdData<>(v));
+        enoughMemory_ &= simds_.append(SimdData(v));
         if (!enoughMemory_)
             return nullptr;
         enoughMemory_ &= simdMap_.add(p, v, index);
         if (!enoughMemory_)
             return nullptr;
     }
-    SimdData<>& simd = simds_[index];
-    MOZ_ASSERT(!simd.uses.bound());
-    return &simd;
+    return &simds_[index];
+}
+
+static bool
+AppendShiftedUses(const MacroAssemblerX86Shared::UsesVector& old, size_t delta,
+                  MacroAssemblerX86Shared::UsesVector* vec)
+{
+    for (CodeOffsetLabel use : old) {
+        use.offsetBy(delta);
+        if (!vec->append(use))
+            return false;
+    }
+    return true;
+}
+
+bool
+MacroAssemblerX86Shared::asmMergeWith(const MacroAssemblerX86Shared& other)
+{
+    size_t sizeBefore = masm.size();
+
+    if (!Assembler::asmMergeWith(other))
+        return false;
+
+    if (!doubleMap_.initialized() && !doubleMap_.init())
+        return false;
+    if (!floatMap_.initialized() && !floatMap_.init())
+        return false;
+    if (!simdMap_.initialized() && !simdMap_.init())
+        return false;
+
+    for (const Double& d : other.doubles_) {
+        size_t index;
+        if (DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d.value)) {
+            index = p->value();
+        } else {
+            index = doubles_.length();
+            if (!doubles_.append(Double(d.value)) || !doubleMap_.add(p, d.value, index))
+                return false;
+        }
+        if (!AppendShiftedUses(d.uses, sizeBefore, &doubles_[index].uses))
+            return false;
+    }
+
+    for (const Float& f : other.floats_) {
+        size_t index;
+        if (FloatMap::AddPtr p = floatMap_.lookupForAdd(f.value)) {
+            index = p->value();
+        } else {
+            index = floats_.length();
+            if (!floats_.append(Float(f.value)) || !floatMap_.add(p, f.value, index))
+                return false;
+        }
+        if (!AppendShiftedUses(f.uses, sizeBefore, &floats_[index].uses))
+            return false;
+    }
+
+    for (const SimdData& s : other.simds_) {
+        size_t index;
+        if (SimdMap::AddPtr p = simdMap_.lookupForAdd(s.value)) {
+            index = p->value();
+        } else {
+            index = simds_.length();
+            if (!simds_.append(SimdData(s.value)) || !simdMap_.add(p, s.value, index))
+                return false;
+        }
+        if (!AppendShiftedUses(s.uses, sizeBefore, &simds_[index].uses))
+            return false;
+    }
+
+    return true;
 }
 
 //{{{ check_macroassembler_style
 // ===============================================================
 // Stack manipulation functions.
 
 void
 MacroAssembler::PushRegsInMask(LiveRegisterSet set)
@@ -555,18 +618,18 @@ MacroAssembler::callAndPushReturnAddress
 // ===============================================================
 // Jit Frames.
 
 uint32_t
 MacroAssembler::pushFakeReturnAddress(Register scratch)
 {
     CodeLabel cl;
 
-    mov(cl.dest(), scratch);
+    mov(cl.patchAt(), scratch);
     Push(scratch);
-    bind(cl.src());
+    use(cl.target());
     uint32_t retAddr = currentOffset();
 
     addCodeLabel(cl);
     return retAddr;
 }
 
 //}}} check_macroassembler_style
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
@@ -40,59 +40,67 @@ class MacroAssembler;
 
 class MacroAssemblerX86Shared : public Assembler
 {
   private:
     // Perform a downcast. Should be removed by Bug 996602.
     MacroAssembler& asMasm();
     const MacroAssembler& asMasm() const;
 
+  public:
+    typedef Vector<CodeOffsetLabel, 0, SystemAllocPolicy> UsesVector;
+
   protected:
-    struct PlatformSpecificLabel;
-
-    template<class LabelType = PlatformSpecificLabel>
+    // For Double, Float and SimdData, make the move ctors explicit so that MSVC
+    // knows what to use instead of copying these data structures.
     struct Double {
         double value;
-        LabelType uses;
+        UsesVector uses;
         explicit Double(double value) : value(value) {}
+        Double(Double&& other) : value(other.value), uses(mozilla::Move(other.uses)) {}
+        explicit Double(const Double&) = delete;
     };
 
     // These use SystemAllocPolicy since asm.js releases memory after each
     // function is compiled, and these need to live until after all functions
     // are compiled.
-    Vector<Double<PlatformSpecificLabel>, 0, SystemAllocPolicy> doubles_;
+    Vector<Double, 0, SystemAllocPolicy> doubles_;
     typedef HashMap<double, size_t, DefaultHasher<double>, SystemAllocPolicy> DoubleMap;
     DoubleMap doubleMap_;
 
-    template<class LabelType = PlatformSpecificLabel>
     struct Float {
         float value;
-        LabelType uses;
+        UsesVector uses;
         explicit Float(float value) : value(value) {}
+        Float(Float&& other) : value(other.value), uses(mozilla::Move(other.uses)) {}
+        explicit Float(const Float&) = delete;
     };
 
-    Vector<Float<PlatformSpecificLabel>, 0, SystemAllocPolicy> floats_;
+    Vector<Float, 0, SystemAllocPolicy> floats_;
     typedef HashMap<float, size_t, DefaultHasher<float>, SystemAllocPolicy> FloatMap;
     FloatMap floatMap_;
 
-    template<class LabelType = PlatformSpecificLabel>
     struct SimdData {
         SimdConstant value;
-        LabelType uses;
+        UsesVector uses;
         explicit SimdData(const SimdConstant& v) : value(v) {}
-        SimdConstant::Type type() { return value.type(); }
+        SimdData(SimdData&& other) : value(other.value), uses(mozilla::Move(other.uses)) {}
+        explicit SimdData(const SimdData&) = delete;
+        SimdConstant::Type type() const { return value.type(); }
     };
 
-    Vector<SimdData<PlatformSpecificLabel>, 0, SystemAllocPolicy> simds_;
+    Vector<SimdData, 0, SystemAllocPolicy> simds_;
     typedef HashMap<SimdConstant, size_t, SimdConstant, SystemAllocPolicy> SimdMap;
     SimdMap simdMap_;
 
-    Float<>* getFloat(float f);
-    Double<>* getDouble(double d);
-    SimdData<>* getSimdData(const SimdConstant& v);
+    Float* getFloat(float f);
+    Double* getDouble(double d);
+    SimdData* getSimdData(const SimdConstant& v);
+
+    bool asmMergeWith(const MacroAssemblerX86Shared& other);
 
   public:
     using Assembler::call;
 
     MacroAssemblerX86Shared()
     { }
 
     void compareDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -294,22 +294,20 @@ class Assembler : public AssemblerX86Sha
         movl(src, dest);
     }
     void mov(Register src, const Operand& dest) {
         movl(src, dest);
     }
     void mov(Imm32 imm, const Operand& dest) {
         movl(imm, dest);
     }
-    void mov(AbsoluteLabel* label, Register dest) {
-        MOZ_ASSERT(!label->bound());
-        // Thread the patch list through the unpatched address word in the
-        // instruction stream.
-        masm.movl_i32r(label->prev(), dest.encoding());
-        label->setPrev(masm.size());
+    void mov(CodeOffsetLabel* label, Register dest) {
+        // Put a placeholder value in the instruction stream.
+        masm.movl_i32r(0, dest.encoding());
+        label->use(masm.size());
     }
     void mov(Register src, Register dest) {
         movl(src, dest);
     }
     void xchg(Register src, Register dest) {
         xchgl(src, dest);
     }
     void lea(const Operand& src, Register dest) {
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -93,115 +93,117 @@ MacroAssemblerX86::convertUInt64ToDouble
 void
 MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest)
 {
     if (maybeInlineDouble(d, dest))
         return;
     Double* dbl = getDouble(d);
     if (!dbl)
         return;
-    masm.vmovsd_mr(reinterpret_cast<const void*>(dbl->uses.prev()), dest.encoding());
-    dbl->uses.setPrev(masm.size());
+    masm.vmovsd_mr(nullptr, dest.encoding());
+    dbl->uses.append(CodeOffsetLabel(masm.size()));
 }
 
 void
 MacroAssemblerX86::addConstantDouble(double d, FloatRegister dest)
 {
     Double* dbl = getDouble(d);
     if (!dbl)
         return;
-    masm.vaddsd_mr(reinterpret_cast<const void*>(dbl->uses.prev()), dest.encoding(), dest.encoding());
-    dbl->uses.setPrev(masm.size());
+    masm.vaddsd_mr(nullptr, dest.encoding(), dest.encoding());
+    dbl->uses.append(CodeOffsetLabel(masm.size()));
 }
 
 void
 MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest)
 {
     if (maybeInlineFloat(f, dest))
         return;
     Float* flt = getFloat(f);
     if (!flt)
         return;
-    masm.vmovss_mr(reinterpret_cast<const void*>(flt->uses.prev()), dest.encoding());
-    flt->uses.setPrev(masm.size());
+    masm.vmovss_mr(nullptr, dest.encoding());
+    flt->uses.append(CodeOffsetLabel(masm.size()));
 }
 
 void
 MacroAssemblerX86::addConstantFloat32(float f, FloatRegister dest)
 {
     Float* flt = getFloat(f);
     if (!flt)
         return;
-    masm.vaddss_mr(reinterpret_cast<const void*>(flt->uses.prev()), dest.encoding(), dest.encoding());
-    flt->uses.setPrev(masm.size());
+    masm.vaddss_mr(nullptr, dest.encoding(), dest.encoding());
+    flt->uses.append(CodeOffsetLabel(masm.size()));
 }
 
 void
 MacroAssemblerX86::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Int32x4);
     if (maybeInlineInt32x4(v, dest))
         return;
     SimdData* i4 = getSimdData(v);
     if (!i4)
         return;
     MOZ_ASSERT(i4->type() == SimdConstant::Int32x4);
-    masm.vmovdqa_mr(reinterpret_cast<const void*>(i4->uses.prev()), dest.encoding());
-    i4->uses.setPrev(masm.size());
+    masm.vmovdqa_mr(nullptr, dest.encoding());
+    i4->uses.append(CodeOffsetLabel(masm.size()));
 }
 
 void
 MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant& v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
     if (maybeInlineFloat32x4(v, dest))
         return;
     SimdData* f4 = getSimdData(v);
     if (!f4)
         return;
     MOZ_ASSERT(f4->type() == SimdConstant::Float32x4);
-    masm.vmovaps_mr(reinterpret_cast<const void*>(f4->uses.prev()), dest.encoding());
-    f4->uses.setPrev(masm.size());
+    masm.vmovaps_mr(nullptr, dest.encoding());
+    f4->uses.append(CodeOffsetLabel(masm.size()));
 }
 
 void
 MacroAssemblerX86::finish()
 {
     if (!doubles_.empty())
         masm.haltingAlign(sizeof(double));
-    for (size_t i = 0; i < doubles_.length(); i++) {
-        CodeLabel cl(doubles_[i].uses);
-        writeDoubleConstant(doubles_[i].value, cl.src());
-        addCodeLabel(cl);
+    for (const Double& d : doubles_) {
+        CodeOffsetLabel cst(masm.currentOffset());
+        for (CodeOffsetLabel use : d.uses)
+            addCodeLabel(CodeLabel(use, cst));
+        masm.doubleConstant(d.value);
         if (!enoughMemory_)
             return;
     }
 
     if (!floats_.empty())
         masm.haltingAlign(sizeof(float));
-    for (size_t i = 0; i < floats_.length(); i++) {
-        CodeLabel cl(floats_[i].uses);
-        writeFloatConstant(floats_[i].value, cl.src());
-        addCodeLabel(cl);
+    for (const Float& f : floats_) {
+        CodeOffsetLabel cst(masm.currentOffset());
+        for (CodeOffsetLabel use : f.uses)
+            addCodeLabel(CodeLabel(use, cst));
+        masm.floatConstant(f.value);
         if (!enoughMemory_)
             return;
     }
 
     // SIMD memory values must be suitably aligned.
     if (!simds_.empty())
         masm.haltingAlign(SimdMemoryAlignment);
-    for (size_t i = 0; i < simds_.length(); i++) {
-        CodeLabel cl(simds_[i].uses);
-        SimdData& v = simds_[i];
+    for (const SimdData& v : simds_) {
+        CodeOffsetLabel cst(masm.currentOffset());
+        for (CodeOffsetLabel use : v.uses)
+            addCodeLabel(CodeLabel(use, cst));
         switch (v.type()) {
-          case SimdConstant::Int32x4:   writeInt32x4Constant(v.value, cl.src());   break;
-          case SimdConstant::Float32x4: writeFloat32x4Constant(v.value, cl.src()); break;
+          case SimdConstant::Int32x4:   masm.int32x4Constant(v.value.asInt32x4());     break;
+          case SimdConstant::Float32x4: masm.float32x4Constant(v.value.asFloat32x4()); break;
           default: MOZ_CRASH("unexpected SimdConstant type");
         }
-        addCodeLabel(cl);
         if (!enoughMemory_)
             return;
     }
 }
 
 void
 MacroAssemblerX86::handleFailureWithHandlerTail(void* handler)
 {
--- a/js/src/jit/x86/MacroAssembler-x86.h
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -11,30 +11,23 @@
 
 #include "jit/JitFrames.h"
 #include "jit/MoveResolver.h"
 #include "jit/x86-shared/MacroAssembler-x86-shared.h"
 
 namespace js {
 namespace jit {
 
-struct MacroAssemblerX86Shared::PlatformSpecificLabel : public AbsoluteLabel
-{};
-
 class MacroAssemblerX86 : public MacroAssemblerX86Shared
 {
   private:
     // Perform a downcast. Should be removed by Bug 996602.
     MacroAssembler& asMasm();
     const MacroAssembler& asMasm() const;
 
-    typedef MacroAssemblerX86Shared::Double<> Double;
-    typedef MacroAssemblerX86Shared::Float<> Float;
-    typedef MacroAssemblerX86Shared::SimdData<> SimdData;
-
   protected:
     MoveResolver moveResolver_;
 
   private:
     Operand payloadOfAfterStackPush(const Address& address) {
         // If we are basing off %esp, the address will be invalid after the
         // first push.
         if (address.base == StackPointer)
--- a/js/src/jit/x86/Trampoline-x86.cpp
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -151,16 +151,17 @@ JitRuntime::generateEnterJIT(JSContext* 
     Push the number of bytes we've pushed so far on the stack and call
     *****************************************************************/
     // Create a frame descriptor.
     masm.subl(esp, esi);
     masm.makeFrameDescriptor(esi, JitFrame_Entry);
     masm.push(esi);
 
     CodeLabel returnLabel;
+    CodeLabel oomReturnLabel;
     if (type == EnterJitBaseline) {
         // Handle OSR.
         AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
         regs.take(JSReturnOperand);
         regs.takeUnchecked(OsrFrameReg);
         regs.take(ebp);
         regs.take(ReturnReg);
 
@@ -171,17 +172,17 @@ JitRuntime::generateEnterJIT(JSContext* 
 
         Register numStackValues = regs.takeAny();
         masm.loadPtr(Address(ebp, ARG_STACKVALUES), numStackValues);
 
         Register jitcode = regs.takeAny();
         masm.loadPtr(Address(ebp, ARG_JITCODE), jitcode);
 
         // Push return address.
-        masm.mov(returnLabel.dest(), scratch);
+        masm.mov(returnLabel.patchAt(), scratch);
         masm.push(scratch);
 
         // Push previous frame pointer.
         masm.push(ebp);
 
         // Reserve frame.
         Register framePtr = ebp;
         masm.subPtr(Imm32(BaselineFrame::Size()), esp);
@@ -256,17 +257,17 @@ JitRuntime::generateEnterJIT(JSContext* 
         masm.jump(jitcode);
 
         // OOM: load error value, discard return address and previous frame
         // pointer and return.
         masm.bind(&error);
         masm.mov(framePtr, esp);
         masm.addPtr(Imm32(2 * sizeof(uintptr_t)), esp);
         masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
-        masm.mov(returnLabel.dest(), scratch);
+        masm.mov(oomReturnLabel.patchAt(), scratch);
         masm.jump(scratch);
 
         masm.bind(&notOsr);
         masm.loadPtr(Address(ebp, ARG_SCOPECHAIN), R1.scratchReg());
     }
 
     // The call will push the return address on the stack, thus we check that
     // the stack would be aligned once the call is complete.
@@ -275,18 +276,20 @@ JitRuntime::generateEnterJIT(JSContext* 
     /***************************************************************
         Call passed-in code, get return value and fill in the
         passed in return value pointer
     ***************************************************************/
     masm.call(Address(ebp, ARG_JITCODE));
 
     if (type == EnterJitBaseline) {
         // Baseline OSR will return here.
-        masm.bind(returnLabel.src());
+        masm.use(returnLabel.target());
         masm.addCodeLabel(returnLabel);
+        masm.use(oomReturnLabel.target());
+        masm.addCodeLabel(oomReturnLabel);
     }
 
     // Pop arguments off the stack.
     // eax <- 8*argc (size of all arguments we pushed on the stack)
     masm.pop(eax);
     masm.shrl(Imm32(FRAMESIZE_SHIFT), eax); // Unmark EntryFrame.
     masm.addl(eax, esp);