Bug 1228369: Rename CodeOffsetLabel into CodeOffset; r=luke
authorBenjamin Bouvier <benj@benj.me>
Thu, 26 Nov 2015 17:23:32 +0100
changeset 308873 630fb403d6703b373cd3966ede1753ff6a118205
parent 308872 6dea7f38baeb7cd4c6b7759850116c4a4c2ef43a
child 308874 8e82466eab91ecab76ed5c7edf5b47788eb29b70
push id5513
push userraliiev@mozilla.com
push dateMon, 25 Jan 2016 13:55:34 +0000
treeherdermozilla-beta@5ee97dd05b5c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1228369
milestone45.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1228369: Rename CodeOffsetLabel into CodeOffset; r=luke
js/src/asmjs/AsmJSValidate.cpp
js/src/irregexp/NativeRegExpMacroAssembler.cpp
js/src/irregexp/NativeRegExpMacroAssembler.h
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineCompiler.h
js/src/jit/BaselineDebugModeOSR.cpp
js/src/jit/BaselineJIT.cpp
js/src/jit/BaselineJIT.h
js/src/jit/CodeGenerator.cpp
js/src/jit/CodeGenerator.h
js/src/jit/Ion.cpp
js/src/jit/IonCaches.cpp
js/src/jit/IonCaches.h
js/src/jit/MacroAssembler-inl.h
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/Safepoints.cpp
js/src/jit/SharedIC.h
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/arm/SharedICHelpers-arm.h
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/arm64/MacroAssembler-arm64.cpp
js/src/jit/arm64/MacroAssembler-arm64.h
js/src/jit/arm64/SharedICHelpers-arm64.h
js/src/jit/mips-shared/Assembler-mips-shared.h
js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
js/src/jit/mips32/MacroAssembler-mips32.cpp
js/src/jit/mips32/MacroAssembler-mips32.h
js/src/jit/mips32/SharedICHelpers-mips32.h
js/src/jit/mips64/MacroAssembler-mips64.cpp
js/src/jit/mips64/MacroAssembler-mips64.h
js/src/jit/mips64/SharedICHelpers-mips64.h
js/src/jit/none/MacroAssembler-none.h
js/src/jit/none/SharedICHelpers-none.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/BaselineCompiler-shared.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/CodeGenerator-shared.h
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x64/MacroAssembler-x64.h
js/src/jit/x64/SharedICHelpers-x64.h
js/src/jit/x86-shared/Assembler-x86-shared.h
js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
js/src/jit/x86-shared/MacroAssembler-x86-shared.h
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/MacroAssembler-x86.cpp
js/src/jit/x86/SharedICHelpers-x86.h
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -7281,18 +7281,18 @@ GenerateCheckForHeapDetachment(ModuleVal
 {
     if (!m.module().hasArrayView())
         return;
 
     MacroAssembler& masm = m.masm();
     MOZ_ASSERT(int(masm.framePushed()) >= int(ShadowStackSpace));
     AssertStackAlignment(masm, ABIStackAlignment);
 #if defined(JS_CODEGEN_X86)
-    CodeOffsetLabel label = masm.movlWithPatch(PatchedAbsoluteAddress(), scratch);
-    masm.append(AsmJSGlobalAccess(label, AsmJSHeapGlobalDataOffset));
+    CodeOffset offset = masm.movlWithPatch(PatchedAbsoluteAddress(), scratch);
+    masm.append(AsmJSGlobalAccess(offset, AsmJSHeapGlobalDataOffset));
     masm.branchTestPtr(Assembler::Zero, scratch, scratch, &m.onDetachedLabel());
 #else
     masm.branchTestPtr(Assembler::Zero, HeapReg, HeapReg, &m.onDetachedLabel());
 #endif
 }
 
 static bool
 GenerateFFIInterpExit(ModuleValidator& m, const Signature& sig, unsigned exitIndex,
--- a/js/src/irregexp/NativeRegExpMacroAssembler.cpp
+++ b/js/src/irregexp/NativeRegExpMacroAssembler.cpp
@@ -994,17 +994,17 @@ NativeRegExpMacroAssembler::PopRegister(
     masm.storePtr(temp0, register_location(register_index));
 }
 
 void
 NativeRegExpMacroAssembler::PushBacktrack(Label* label)
 {
     JitSpew(SPEW_PREFIX "PushBacktrack");
 
-    CodeOffsetLabel patchOffset = masm.movWithPatch(ImmPtr(nullptr), temp0);
+    CodeOffset patchOffset = masm.movWithPatch(ImmPtr(nullptr), temp0);
 
     MOZ_ASSERT(!label->bound());
 
     {
         AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!labelPatches.append(LabelPatch(label, patchOffset)))
             oomUnsafe.crash("NativeRegExpMacroAssembler::PushBacktrack");
     }
--- a/js/src/irregexp/NativeRegExpMacroAssembler.h
+++ b/js/src/irregexp/NativeRegExpMacroAssembler.h
@@ -186,19 +186,19 @@ class MOZ_STACK_CLASS NativeRegExpMacroA
     jit::LiveGeneralRegisterSet savedNonVolatileRegisters;
 
     struct LabelPatch {
         // Once it is bound via BindBacktrack, |label| becomes null and
         // |labelOffset| is set.
         jit::Label* label;
         size_t labelOffset;
 
-        jit::CodeOffsetLabel patchOffset;
+        jit::CodeOffset patchOffset;
 
-        LabelPatch(jit::Label* label, jit::CodeOffsetLabel patchOffset)
+        LabelPatch(jit::Label* label, jit::CodeOffset patchOffset)
           : label(label), labelOffset(0), patchOffset(patchOffset)
         {}
     };
 
     Vector<LabelPatch, 4, SystemAllocPolicy> labelPatches;
 
     // See RegExpMacroAssembler.cpp for the meaning of these registers.
     jit::Register input_end_pointer;
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -243,17 +243,17 @@ BaselineCompiler::compile()
     // If profiler instrumentation is enabled, toggle instrumentation on.
     if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
         baselineScript->toggleProfilerInstrumentation(true);
 
     AutoWritableJitCode awjc(code);
 
     // Patch IC loads using IC entries.
     for (size_t i = 0; i < icLoadLabels_.length(); i++) {
-        CodeOffsetLabel label = icLoadLabels_[i].label;
+        CodeOffset label = icLoadLabels_[i].label;
         size_t icEntry = icLoadLabels_[i].icEntry;
         ICEntry* entryAddr = &(baselineScript->icEntry(icEntry));
         Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label),
                                            ImmPtr(entryAddr),
                                            ImmPtr((void*)-1));
     }
 
     if (modifiesArguments_)
@@ -411,17 +411,17 @@ BaselineCompiler::emitPrologue()
 
 #ifdef JS_TRACE_LOGGING
     if (!emitTraceLoggerEnter())
         return false;
 #endif
 
     // Record the offset of the prologue, because Ion can bailout before
     // the scope chain is initialized.
-    prologueOffset_ = CodeOffsetLabel(masm.currentOffset());
+    prologueOffset_ = CodeOffset(masm.currentOffset());
 
     // When compiling with Debugger instrumentation, set the debuggeeness of
     // the frame before any operation that can call into the VM.
     emitIsDebuggeeCheck();
 
     // Initialize the scope chain before any operation that may
     // call into the VM and trigger a GC.
     if (!initScopeChain())
@@ -442,17 +442,17 @@ BaselineCompiler::emitPrologue()
     return true;
 }
 
 bool
 BaselineCompiler::emitEpilogue()
 {
     // Record the offset of the epilogue, so we can do early return from
     // Debugger handlers during on-stack recompile.
-    epilogueOffset_ = CodeOffsetLabel(masm.currentOffset());
+    epilogueOffset_ = CodeOffset(masm.currentOffset());
 
     masm.bind(&return_);
 
 #ifdef JS_TRACE_LOGGING
     if (!emitTraceLoggerExit())
         return false;
 #endif
 
@@ -504,19 +504,19 @@ BaselineCompiler::emitOutOfLinePostBarri
 
 bool
 BaselineCompiler::emitIC(ICStub* stub, ICEntry::Kind kind)
 {
     ICEntry* entry = allocateICEntry(stub, kind);
     if (!entry)
         return false;
 
-    CodeOffsetLabel patchOffset;
+    CodeOffset patchOffset;
     EmitCallIC(&patchOffset, masm);
-    entry->setReturnOffset(CodeOffsetLabel(masm.currentOffset()));
+    entry->setReturnOffset(CodeOffset(masm.currentOffset()));
     if (!addICLoadLabel(patchOffset))
         return false;
 
     return true;
 }
 
 typedef bool (*CheckOverRecursedWithExtraFn)(JSContext*, BaselineFrame*, uint32_t, uint32_t);
 static const VMFunction CheckOverRecursedWithExtraInfo =
@@ -619,17 +619,17 @@ BaselineCompiler::emitDebugPrologue()
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &done);
         {
             masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
             masm.jump(&return_);
         }
         masm.bind(&done);
     }
 
-    postDebugPrologueOffset_ = CodeOffsetLabel(masm.currentOffset());
+    postDebugPrologueOffset_ = CodeOffset(masm.currentOffset());
 
     return true;
 }
 
 typedef bool (*InitGlobalOrEvalScopeObjectsFn)(JSContext*, BaselineFrame*);
 static const VMFunction InitGlobalOrEvalScopeObjectsInfo =
     FunctionInfo<InitGlobalOrEvalScopeObjectsFn>(jit::InitGlobalOrEvalScopeObjects);
 
@@ -793,17 +793,17 @@ BaselineCompiler::emitDebugTrap()
     MOZ_ASSERT(frame.numUnsyncedSlots() == 0);
 
     bool enabled = script->stepModeEnabled() || script->hasBreakpointsAt(pc);
 
     // Emit patchable call to debug trap handler.
     JitCode* handler = cx->runtime()->jitRuntime()->debugTrapHandler(cx);
     if (!handler)
         return false;
-    mozilla::DebugOnly<CodeOffsetLabel> offset = masm.toggledCall(handler, enabled);
+    mozilla::DebugOnly<CodeOffset> offset = masm.toggledCall(handler, enabled);
 
 #ifdef DEBUG
     // Patchable call offset has to match the pc mapping offset.
     PCMappingEntry& entry = pcMappingEntries_.back();
     MOZ_ASSERT((&offset)->offset() == entry.nativeOffset);
 #endif
 
     // Add an IC entry for the return offset -> pc mapping.
@@ -881,32 +881,32 @@ BaselineCompiler::emitTraceLoggerExit()
 #endif
 
 void
 BaselineCompiler::emitProfilerEnterFrame()
 {
     // Store stack position to lastProfilingFrame variable, guarded by a toggled jump.
     // Starts off initially disabled.
     Label noInstrument;
-    CodeOffsetLabel toggleOffset = masm.toggledJump(&noInstrument);
+    CodeOffset toggleOffset = masm.toggledJump(&noInstrument);
     masm.profilerEnterFrame(masm.getStackPointer(), R0.scratchReg());
     masm.bind(&noInstrument);
 
     // Store the start offset in the appropriate location.
     MOZ_ASSERT(!profilerEnterFrameToggleOffset_.used());
     profilerEnterFrameToggleOffset_ = toggleOffset;
 }
 
 void
 BaselineCompiler::emitProfilerExitFrame()
 {
     // Store previous frame to lastProfilingFrame variable, guarded by a toggled jump.
     // Starts off initially disabled.
     Label noInstrument;
-    CodeOffsetLabel toggleOffset = masm.toggledJump(&noInstrument);
+    CodeOffset toggleOffset = masm.toggledJump(&noInstrument);
     masm.profilerExitFrame();
     masm.bind(&noInstrument);
 
     // Store the start offset in the appropriate location.
     MOZ_ASSERT(!profilerExitFrameToggleOffset_.used());
     profilerExitFrameToggleOffset_ = toggleOffset;
 }
 
--- a/js/src/jit/BaselineCompiler.h
+++ b/js/src/jit/BaselineCompiler.h
@@ -221,25 +221,25 @@ namespace jit {
 
 class BaselineCompiler : public BaselineCompilerSpecific
 {
     FixedList<Label>            labels_;
     NonAssertingLabel           return_;
     NonAssertingLabel           postBarrierSlot_;
 
     // Native code offset right before the scope chain is initialized.
-    CodeOffsetLabel prologueOffset_;
+    CodeOffset prologueOffset_;
 
     // Native code offset right before the frame is popped and the method
     // returned from.
-    CodeOffsetLabel epilogueOffset_;
+    CodeOffset epilogueOffset_;
 
     // Native code offset right after debug prologue and epilogue, or
     // equivalent positions when debug mode is off.
-    CodeOffsetLabel postDebugPrologueOffset_;
+    CodeOffset postDebugPrologueOffset_;
 
     // For each INITIALYIELD or YIELD op, this Vector maps the yield index
     // to the bytecode offset of the next op.
     Vector<uint32_t>            yieldOffsets_;
 
     // Whether any on stack arguments are modified.
     bool modifiesArguments_;
 
--- a/js/src/jit/BaselineDebugModeOSR.cpp
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -1110,17 +1110,17 @@ JitRuntime::generateBaselineDebugModeOSR
     Register temp = regs.takeAny();
     Register syncedStackStart = regs.takeAny();
 
     // Pop the frame reg.
     masm.pop(BaselineFrameReg);
 
     // Not all patched baseline frames are returning from a situation where
     // the frame reg is already fixed up.
-    CodeOffsetLabel noFrameRegPopOffset(masm.currentOffset());
+    CodeOffset noFrameRegPopOffset(masm.currentOffset());
 
     // Record the stack pointer for syncing.
     masm.moveStackPtrTo(syncedStackStart);
     masm.push(ReturnReg);
     masm.push(BaselineFrameReg);
 
     // Call a stub to fully initialize the info.
     masm.setupUnalignedABICall(temp);
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -570,17 +570,17 @@ BaselineScript::pcMappingReader(size_t i
     uint8_t* dataEnd = (indexEntry == numPCMappingIndexEntries() - 1)
         ? pcMappingData() + pcMappingSize_
         : pcMappingData() + pcMappingIndexEntry(indexEntry + 1).bufferOffset;
 
     return CompactBufferReader(dataStart, dataEnd);
 }
 
 ICEntry&
-BaselineScript::icEntryFromReturnOffset(CodeOffsetLabel returnOffset)
+BaselineScript::icEntryFromReturnOffset(CodeOffset returnOffset)
 {
     size_t bottom = 0;
     size_t top = numICEntries();
     size_t mid = bottom + (top - bottom) / 2;
     while (mid < top) {
         ICEntry& midEntry = icEntry(mid);
         if (midEntry.returnOffset().offset() < returnOffset.offset())
             bottom = mid + 1;
@@ -697,17 +697,17 @@ BaselineScript::stackCheckICEntry(bool e
     MOZ_CRASH("No stack check ICEntry found.");
 }
 
 ICEntry&
 BaselineScript::icEntryFromReturnAddress(uint8_t* returnAddr)
 {
     MOZ_ASSERT(returnAddr > method_->raw());
     MOZ_ASSERT(returnAddr < method_->raw() + method_->instructionsSize());
-    CodeOffsetLabel offset(returnAddr - method_->raw());
+    CodeOffset offset(returnAddr - method_->raw());
     return icEntryFromReturnOffset(offset);
 }
 
 void
 BaselineScript::copyYieldEntries(JSScript* script, Vector<uint32_t>& yieldOffsets)
 {
     uint8_t** entries = yieldEntryList();
 
@@ -907,17 +907,17 @@ BaselineScript::toggleDebugTraps(JSScrip
 
             scanner.advanceTo(script->pcToOffset(curPC));
 
             if (!pc || pc == curPC) {
                 bool enabled = (script->stepModeEnabled() && scanner.isLineHeader()) ||
                     script->hasBreakpointsAt(curPC);
 
                 // Patch the trap.
-                CodeLocationLabel label(method(), CodeOffsetLabel(nativeOffset));
+                CodeLocationLabel label(method(), CodeOffset(nativeOffset));
                 Assembler::ToggleCall(label, enabled);
             }
 
             curPC += GetBytecodeLength(curPC);
         }
     }
 }
 
@@ -929,18 +929,18 @@ BaselineScript::initTraceLogger(JSRuntim
     traceLoggerScriptsEnabled_ = TraceLogTextIdEnabled(TraceLogger_Scripts);
     traceLoggerEngineEnabled_ = TraceLogTextIdEnabled(TraceLogger_Engine);
 #endif
 
     TraceLoggerThread* logger = TraceLoggerForMainThread(runtime);
     traceLoggerScriptEvent_ = TraceLoggerEvent(logger, TraceLogger_Scripts, script);
 
     if (TraceLogTextIdEnabled(TraceLogger_Engine) || TraceLogTextIdEnabled(TraceLogger_Scripts)) {
-        CodeLocationLabel enter(method_, CodeOffsetLabel(traceLoggerEnterToggleOffset_));
-        CodeLocationLabel exit(method_, CodeOffsetLabel(traceLoggerExitToggleOffset_));
+        CodeLocationLabel enter(method_, CodeOffset(traceLoggerEnterToggleOffset_));
+        CodeLocationLabel exit(method_, CodeOffset(traceLoggerExitToggleOffset_));
         Assembler::ToggleToCmp(enter);
         Assembler::ToggleToCmp(exit);
     }
 }
 
 void
 BaselineScript::toggleTraceLoggerScripts(JSRuntime* runtime, JSScript* script, bool enable)
 {
@@ -955,18 +955,18 @@ BaselineScript::toggleTraceLoggerScripts
     if (enable)
         traceLoggerScriptEvent_ = TraceLoggerEvent(logger, TraceLogger_Scripts, script);
     else
         traceLoggerScriptEvent_ = TraceLoggerEvent(logger, TraceLogger_Scripts);
 
     AutoWritableJitCode awjc(method());
 
     // Enable/Disable the traceLogger prologue and epilogue.
-    CodeLocationLabel enter(method_, CodeOffsetLabel(traceLoggerEnterToggleOffset_));
-    CodeLocationLabel exit(method_, CodeOffsetLabel(traceLoggerExitToggleOffset_));
+    CodeLocationLabel enter(method_, CodeOffset(traceLoggerEnterToggleOffset_));
+    CodeLocationLabel exit(method_, CodeOffset(traceLoggerExitToggleOffset_));
     if (!engineEnabled) {
         if (enable) {
             Assembler::ToggleToCmp(enter);
             Assembler::ToggleToCmp(exit);
         } else {
             Assembler::ToggleToJmp(enter);
             Assembler::ToggleToJmp(exit);
         }
@@ -983,18 +983,18 @@ BaselineScript::toggleTraceLoggerEngine(
     bool scriptsEnabled = TraceLogTextIdEnabled(TraceLogger_Scripts);
 
     MOZ_ASSERT(enable == !traceLoggerEngineEnabled_);
     MOZ_ASSERT(scriptsEnabled == traceLoggerScriptsEnabled_);
 
     AutoWritableJitCode awjc(method());
 
     // Enable/Disable the traceLogger prologue and epilogue.
-    CodeLocationLabel enter(method_, CodeOffsetLabel(traceLoggerEnterToggleOffset_));
-    CodeLocationLabel exit(method_, CodeOffsetLabel(traceLoggerExitToggleOffset_));
+    CodeLocationLabel enter(method_, CodeOffset(traceLoggerEnterToggleOffset_));
+    CodeLocationLabel exit(method_, CodeOffset(traceLoggerExitToggleOffset_));
     if (!scriptsEnabled) {
         if (enable) {
             Assembler::ToggleToCmp(enter);
             Assembler::ToggleToCmp(exit);
         } else {
             Assembler::ToggleToJmp(enter);
             Assembler::ToggleToJmp(exit);
         }
@@ -1013,18 +1013,18 @@ BaselineScript::toggleProfilerInstrument
         return;
 
     JitSpew(JitSpew_BaselineIC, "  toggling profiling %s for BaselineScript %p",
             enable ? "on" : "off", this);
 
     AutoWritableJitCode awjc(method());
 
     // Toggle the jump
-    CodeLocationLabel enterToggleLocation(method_, CodeOffsetLabel(profilerEnterToggleOffset_));
-    CodeLocationLabel exitToggleLocation(method_, CodeOffsetLabel(profilerExitToggleOffset_));
+    CodeLocationLabel enterToggleLocation(method_, CodeOffset(profilerEnterToggleOffset_));
+    CodeLocationLabel exitToggleLocation(method_, CodeOffset(profilerExitToggleOffset_));
     if (enable) {
         Assembler::ToggleToCmp(enterToggleLocation);
         Assembler::ToggleToCmp(exitToggleLocation);
         flags_ |= uint32_t(PROFILER_INSTRUMENTATION_ON);
     } else {
         Assembler::ToggleToJmp(enterToggleLocation);
         Assembler::ToggleToJmp(exitToggleLocation);
         flags_ &= ~uint32_t(PROFILER_INSTRUMENTATION_ON);
--- a/js/src/jit/BaselineJIT.h
+++ b/js/src/jit/BaselineJIT.h
@@ -360,17 +360,17 @@ struct BaselineScript
         method()->togglePreBarriers(enabled);
     }
 
     bool containsCodeAddress(uint8_t* addr) const {
         return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize();
     }
 
     ICEntry& icEntry(size_t index);
-    ICEntry& icEntryFromReturnOffset(CodeOffsetLabel returnOffset);
+    ICEntry& icEntryFromReturnOffset(CodeOffset returnOffset);
     ICEntry& icEntryFromPCOffset(uint32_t pcOffset);
     ICEntry& icEntryFromPCOffset(uint32_t pcOffset, ICEntry* prevLookedUpEntry);
     ICEntry& callVMEntryFromPCOffset(uint32_t pcOffset);
     ICEntry& stackCheckICEntry(bool earlyCheck);
     ICEntry& icEntryFromReturnAddress(uint8_t* returnAddr);
     uint8_t* returnAddressForIC(const ICEntry& ent);
 
     size_t numICEntries() const {
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -1696,20 +1696,20 @@ CodeGenerator::emitSharedStub(ICStub::Ki
     masm.Push(Imm32(0));
 #endif
 
     // Create descriptor signifying end of Ion frame.
     uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS);
     masm.Push(Imm32(descriptor));
 
     // Call into the stubcode.
-    CodeOffsetLabel patchOffset;
+    CodeOffset patchOffset;
     IonICEntry entry(script->pcToOffset(pc), ICEntry::Kind_Op, script);
     EmitCallIC(&patchOffset, masm);
-    entry.setReturnOffset(CodeOffsetLabel(masm.currentOffset()));
+    entry.setReturnOffset(CodeOffset(masm.currentOffset()));
 
     SharedStub sharedStub(kind, entry, patchOffset);
     masm.propagateOOM(sharedStubs_.append(sharedStub));
 
     // Fix up upon return.
     uint32_t callOffset = masm.currentOffset();
 #ifdef JS_USE_LINK_REGISTER
     masm.freeStack(sizeof(intptr_t) * 2);
@@ -3984,17 +3984,17 @@ struct ScriptCountBlockState
         if (!printer.hadOutOfMemory())
             block.setCode(printer.string());
     }
 };
 
 void
 CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated)
 {
-    CodeOffsetLabel label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
+    CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
     masm.propagateOOM(ionScriptLabels_.append(label));
 
     // If IonScript::invalidationCount_ != 0, the script has been invalidated.
     masm.branch32(Assembler::NotEqual,
                   Address(temp, IonScript::offsetOfInvalidationCount()),
                   Imm32(0),
                   invalidated);
 }
@@ -8269,17 +8269,17 @@ CodeGenerator::link(JSContext* cx, Compi
                 Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
                                                    ImmPtr((void*) uintptr_t(textId)),
                                                    ImmPtr((void*)0));
             }
         }
 #endif
         // Patch shared stub IC loads using IC entries
         for (size_t i = 0; i < sharedStubs_.length(); i++) {
-            CodeOffsetLabel label = sharedStubs_[i].label;
+            CodeOffset label = sharedStubs_[i].label;
 
             IonICEntry& entry = ionScript->sharedStubList()[i];
             entry = sharedStubs_[i].entry;
             Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label),
                                                ImmPtr(&entry),
                                                ImmPtr((void*)-1));
 
             MOZ_ASSERT(entry.hasStub());
@@ -10214,17 +10214,17 @@ CodeGenerator::visitRecompileCheck(LReco
         masm.store32(tmp, warmUpCount);
         masm.branch32(Assembler::BelowOrEqual, tmp, Imm32(ins->mir()->recompileThreshold()), &done);
     } else {
         masm.branch32(Assembler::BelowOrEqual, warmUpCount, Imm32(ins->mir()->recompileThreshold()),
                       &done);
     }
 
     // Check if not yet recompiling.
-    CodeOffsetLabel label = masm.movWithPatch(ImmWord(uintptr_t(-1)), tmp);
+    CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), tmp);
     masm.propagateOOM(ionScriptLabels_.append(label));
     masm.branch32(Assembler::Equal,
                   Address(tmp, IonScript::offsetOfRecompiling()),
                   Imm32(0),
                   ool->entry());
     masm.bind(ool->rejoin());
     masm.bind(&done);
 }
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -479,24 +479,24 @@ class CodeGenerator : public CodeGenerat
 
     // Bailout if an element about to be written to is a hole.
     void emitStoreHoleCheck(Register elements, const LAllocation* index, int32_t offsetAdjustment,
                             LSnapshot* snapshot);
 
     void emitAssertRangeI(const Range* r, Register input);
     void emitAssertRangeD(const Range* r, FloatRegister input, FloatRegister temp);
 
-    Vector<CodeOffsetLabel, 0, JitAllocPolicy> ionScriptLabels_;
+    Vector<CodeOffset, 0, JitAllocPolicy> ionScriptLabels_;
 
     struct SharedStub {
         ICStub::Kind kind;
         IonICEntry entry;
-        CodeOffsetLabel label;
+        CodeOffset label;
 
-        SharedStub(ICStub::Kind kind, IonICEntry entry, CodeOffsetLabel label)
+        SharedStub(ICStub::Kind kind, IonICEntry entry, CodeOffset label)
           : kind(kind), entry(entry), label(label)
         {}
     };
 
     Vector<SharedStub, 0, SystemAllocPolicy> sharedStubs_;
 
     void branchIfInvalidated(Register temp, Label* invalidated);
 
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -870,17 +870,17 @@ void
 JitCode::togglePreBarriers(bool enabled)
 {
     AutoWritableJitCode awjc(this);
     uint8_t* start = code_ + preBarrierTableOffset();
     CompactBufferReader reader(start, start + preBarrierTableBytes_);
 
     while (reader.more()) {
         size_t offset = reader.readUnsigned();
-        CodeLocationLabel loc(this, CodeOffsetLabel(offset));
+        CodeLocationLabel loc(this, CodeOffset(offset));
         if (enabled)
             Assembler::ToggleToCmp(loc);
         else
             Assembler::ToggleToJmp(loc);
     }
 }
 
 IonScript::IonScript()
@@ -1113,18 +1113,18 @@ IonScript::copyPatchableBackedges(JSCont
     JitRuntime::AutoMutateBackedges amb(jrt);
 
     for (size_t i = 0; i < backedgeEntries_; i++) {
         PatchableBackedgeInfo& info = backedges[i];
         PatchableBackedge* patchableBackedge = &backedgeList()[i];
 
         info.backedge.fixup(&masm);
         CodeLocationJump backedge(code, info.backedge);
-        CodeLocationLabel loopHeader(code, CodeOffsetLabel(info.loopHeader->offset()));
-        CodeLocationLabel interruptCheck(code, CodeOffsetLabel(info.interruptCheck->offset()));
+        CodeLocationLabel loopHeader(code, CodeOffset(info.loopHeader->offset()));
+        CodeLocationLabel interruptCheck(code, CodeOffset(info.interruptCheck->offset()));
         new(patchableBackedge) PatchableBackedge(backedge, loopHeader, interruptCheck);
 
         // Point the backedge to either of its possible targets, according to
         // whether an interrupt is currently desired, matching the targets
         // established by ensureIonCodeAccessible() above. We don't handle the
         // interrupt immediately as the interrupt lock is held here.
         if (cx->runtime()->hasPendingInterrupt())
             PatchBackedge(backedge, interruptCheck, JitRuntime::BackedgeInterruptCheck);
@@ -2979,17 +2979,17 @@ InvalidateActivation(FreeOp* fop, const 
         AutoWritableJitCode awjc(ionCode);
         const SafepointIndex* si = ionScript->getSafepointIndex(it.returnAddressToFp());
         CodeLocationLabel dataLabelToMunge(it.returnAddressToFp());
         ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() -
                           (it.returnAddressToFp() - ionCode->raw());
         Assembler::PatchWrite_Imm32(dataLabelToMunge, Imm32(delta));
 
         CodeLocationLabel osiPatchPoint = SafepointReader::InvalidationPatchPoint(ionScript, si);
-        CodeLocationLabel invalidateEpilogue(ionCode, CodeOffsetLabel(ionScript->invalidateEpilogueOffset()));
+        CodeLocationLabel invalidateEpilogue(ionCode, CodeOffset(ionScript->invalidateEpilogueOffset()));
 
         JitSpew(JitSpew_IonInvalidate, "   ! Invalidate ionScript %p (inv count %u) -> patching osipoint %p",
                 ionScript, ionScript->invalidationCount(), (void*) osiPatchPoint.raw());
         Assembler::PatchWrite_NearCall(osiPatchPoint, invalidateEpilogue);
     }
 
     JitSpew(JitSpew_IonInvalidate, "END invalidating activation");
 }
--- a/js/src/jit/IonCaches.cpp
+++ b/js/src/jit/IonCaches.cpp
@@ -163,17 +163,17 @@ class IonCache::StubAttacher
     bool hasNextStubOffset_ : 1;
     bool hasStubCodePatchOffset_ : 1;
 
     IonCache& cache_;
 
     CodeLocationLabel rejoinLabel_;
     CodeOffsetJump nextStubOffset_;
     CodeOffsetJump rejoinOffset_;
-    CodeOffsetLabel stubCodePatchOffset_;
+    CodeOffset stubCodePatchOffset_;
 
   public:
     explicit StubAttacher(IonCache& cache)
       : hasNextStubOffset_(false),
         hasStubCodePatchOffset_(false),
         cache_(cache),
         rejoinLabel_(cache.rejoinLabel_),
         nextStubOffset_(),
@@ -277,17 +277,17 @@ const ImmPtr IonCache::StubAttacher::STU
 
 void
 IonCache::emitInitialJump(MacroAssembler& masm, RepatchLabel& entry)
 {
     initialJump_ = masm.jumpWithPatch(&entry);
     lastJump_ = initialJump_;
     Label label;
     masm.bind(&label);
-    rejoinLabel_ = CodeOffsetLabel(label.offset());
+    rejoinLabel_ = CodeOffset(label.offset());
 }
 
 void
 IonCache::attachStub(MacroAssembler& masm, StubAttacher& attacher, Handle<JitCode*> code)
 {
     MOZ_ASSERT(canAttachStub());
     incrementStubCount();
 
--- a/js/src/jit/IonCaches.h
+++ b/js/src/jit/IonCaches.h
@@ -251,17 +251,17 @@ class IonCache
     void disable();
     inline bool isDisabled() const {
         return disabled_;
     }
 
     // Set the initial 'out-of-line' jump state of the cache. The fallbackLabel is
     // the location of the out-of-line update (slow) path.  This location will
     // be set to the exitJump of the last generated stub.
-    void setFallbackLabel(CodeOffsetLabel fallbackLabel) {
+    void setFallbackLabel(CodeOffset fallbackLabel) {
         fallbackLabel_ = fallbackLabel;
     }
 
     void setProfilerLeavePC(jsbytecode* pc) {
         MOZ_ASSERT(pc != nullptr);
         profilerLeavePc_ = pc;
     }
 
--- a/js/src/jit/MacroAssembler-inl.h
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -57,50 +57,50 @@ MacroAssembler::implicitPop(uint32_t byt
     MOZ_ASSERT(bytes % sizeof(intptr_t) == 0);
     MOZ_ASSERT(bytes <= INT32_MAX);
     adjustFrame(-int32_t(bytes));
 }
 
 // ===============================================================
 // Stack manipulation functions.
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::PushWithPatch(ImmWord word)
 {
     framePushed_ += sizeof(word.value);
     return pushWithPatch(word);
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::PushWithPatch(ImmPtr imm)
 {
     return PushWithPatch(ImmWord(uintptr_t(imm.value)));
 }
 
 // ===============================================================
 // Simple call functions.
 
 void
 MacroAssembler::call(const CallSiteDesc& desc, const Register reg)
 {
-    CodeOffsetLabel l = call(reg);
+    CodeOffset l = call(reg);
     append(desc, l, framePushed());
 }
 
 void
 MacroAssembler::call(const CallSiteDesc& desc, Label* label)
 {
-    CodeOffsetLabel l = call(label);
+    CodeOffset l = call(label);
     append(desc, l, framePushed());
 }
 
 void
 MacroAssembler::call(const CallSiteDesc& desc, AsmJSInternalCallee callee)
 {
-    CodeOffsetLabel l = callWithPatch();
+    CodeOffset l = callWithPatch();
     append(desc, l, framePushed(), callee.index);
 }
 
 // ===============================================================
 // ABI function calls.
 
 void
 MacroAssembler::passABIArg(Register reg)
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -2098,31 +2098,31 @@ MacroAssembler::AutoProfilerCallInstrume
     Register reg = CallTempReg0;
     Register reg2 = CallTempReg1;
     masm.push(reg);
     masm.push(reg2);
 
     JitContext* icx = GetJitContext();
     AbsoluteAddress profilingActivation(icx->runtime->addressOfProfilingActivation());
 
-    CodeOffsetLabel label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
+    CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
     masm.loadPtr(profilingActivation, reg2);
     masm.storePtr(reg, Address(reg2, JitActivation::offsetOfLastProfilingCallSite()));
 
     masm.appendProfilerCallSite(label);
 
     masm.pop(reg2);
     masm.pop(reg);
 }
 
 void
 MacroAssembler::linkProfilerCallSites(JitCode* code)
 {
     for (size_t i = 0; i < profilerCallSites_.length(); i++) {
-        CodeOffsetLabel offset = profilerCallSites_[i];
+        CodeOffset offset = profilerCallSites_[i];
         CodeLocationLabel location(code, offset);
         PatchDataWithValueCheck(location, ImmPtr(location.raw()), ImmPtr((void*)-1));
     }
 }
 
 void
 MacroAssembler::alignJitStackBasedOnNArgs(Register nargs)
 {
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -454,18 +454,18 @@ class MacroAssembler : public MacroAssem
     void Push(jsid id, Register scratchReg);
     void Push(TypedOrValueRegister v);
     void Push(ConstantOrRegister v);
     void Push(const ValueOperand& val);
     void Push(const Value& val);
     void Push(JSValueType type, Register reg);
     void PushValue(const Address& addr);
     void PushEmptyRooted(VMFunction::RootType rootType);
-    inline CodeOffsetLabel PushWithPatch(ImmWord word);
-    inline CodeOffsetLabel PushWithPatch(ImmPtr imm);
+    inline CodeOffset PushWithPatch(ImmWord word);
+    inline CodeOffset PushWithPatch(ImmPtr imm);
 
     void Pop(const Operand op) DEFINED_ON(x86_shared);
     void Pop(Register reg) PER_SHARED_ARCH;
     void Pop(FloatRegister t) DEFINED_ON(x86_shared);
     void Pop(const ValueOperand& val) PER_SHARED_ARCH;
     void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand& valueReg);
 
     // Move the stack pointer based on the requested amount.
@@ -486,31 +486,31 @@ class MacroAssembler : public MacroAssem
     // Manipulated by the AutoGenericRegisterScope class.
     AllocatableRegisterSet debugTrackedRegisters_;
 #endif // DEBUG
 
   public:
     // ===============================================================
     // Simple call functions.
 
-    CodeOffsetLabel call(Register reg) PER_SHARED_ARCH;
-    CodeOffsetLabel call(Label* label) PER_SHARED_ARCH;
+    CodeOffset call(Register reg) PER_SHARED_ARCH;
+    CodeOffset call(Label* label) PER_SHARED_ARCH;
     void call(const Address& addr) DEFINED_ON(x86_shared);
     void call(ImmWord imm) PER_SHARED_ARCH;
     // Call a target native function, which is neither traceable nor movable.
     void call(ImmPtr imm) PER_SHARED_ARCH;
     void call(AsmJSImmPtr imm) PER_SHARED_ARCH;
     // Call a target JitCode, which must be traceable, and may be movable.
     void call(JitCode* c) PER_SHARED_ARCH;
 
     inline void call(const CallSiteDesc& desc, const Register reg);
     inline void call(const CallSiteDesc& desc, Label* label);
     inline void call(const CallSiteDesc& desc, AsmJSInternalCallee callee);
 
-    CodeOffsetLabel callWithPatch() PER_SHARED_ARCH;
+    CodeOffset callWithPatch() PER_SHARED_ARCH;
     void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
 
     // Push the return address and make a call. On platforms where this function
     // is not defined, push the link register (pushReturnAddress) at the entry
     // point of the callee.
     void callAndPushReturnAddress(Register reg) DEFINED_ON(mips_shared, x86_shared);
     void callAndPushReturnAddress(Label* label) DEFINED_ON(mips_shared, x86_shared);
 
@@ -685,17 +685,17 @@ class MacroAssembler : public MacroAssem
     void linkExitFrame();
 
     // Patch the value of PushStubCode with the pointer to the finalized code.
     void linkSelfReference(JitCode* code);
 
     // If the JitCode that created this assembler needs to transition into the VM,
     // we want to store the JitCode on the stack in order to mark it during a GC.
     // This is a reference to a patch location where the JitCode* will be written.
-    CodeOffsetLabel selfReferencePatch_;
+    CodeOffset selfReferencePatch_;
 
   public:
     // ===============================================================
     // Logical instructions
 
     inline void not32(Register reg) PER_SHARED_ARCH;
 
     inline void and32(Register src, Register dest) PER_SHARED_ARCH;
@@ -1045,17 +1045,17 @@ class MacroAssembler : public MacroAssem
     }
 
     template <typename T>
     void patchableCallPreBarrier(const T& address, MIRType type) {
         Label done;
 
         // All barriers are off by default.
         // They are enabled if necessary at the end of CodeGenerator::generate().
-        CodeOffsetLabel nopJump = toggledJump(&done);
+        CodeOffset nopJump = toggledJump(&done);
         writePrebarrierOffset(nopJump);
 
         callPreBarrier(address, type);
         jump(&done);
 
         haltingAlign(8);
         bind(&done);
     }
@@ -1304,31 +1304,31 @@ class MacroAssembler : public MacroAssem
 
       public:
         explicit AutoProfilerCallInstrumentation(MacroAssembler& masm
                                                  MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
         ~AutoProfilerCallInstrumentation() {}
     };
     friend class AutoProfilerCallInstrumentation;
 
-    void appendProfilerCallSite(CodeOffsetLabel label) {
+    void appendProfilerCallSite(CodeOffset label) {
         propagateOOM(profilerCallSites_.append(label));
     }
 
     // Fix up the code pointers to be written for locations where profilerCallSite
     // emitted moves of RIP to a register.
     void linkProfilerCallSites(JitCode* code);
 
     // This field is used to manage profiling instrumentation output. If
     // provided and enabled, then instrumentation will be emitted around call
     // sites.
     bool emitProfilingInstrumentation_;
 
     // Record locations of the call sites.
-    Vector<CodeOffsetLabel, 0, SystemAllocPolicy> profilerCallSites_;
+    Vector<CodeOffset, 0, SystemAllocPolicy> profilerCallSites_;
 
   public:
     void loadBaselineOrIonRaw(Register script, Register dest, Label* failure);
     void loadBaselineOrIonNoArgCheck(Register callee, Register dest, Label* failure);
 
     void loadBaselineFramePtr(Register framePtr, Register dest);
 
     void pushBaselineFramePtr(Register framePtr, Register scratch) {
--- a/js/src/jit/Safepoints.cpp
+++ b/js/src/jit/Safepoints.cpp
@@ -423,17 +423,17 @@ SafepointReader::osiReturnPointOffset() 
     return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
 }
 
 CodeLocationLabel
 SafepointReader::InvalidationPatchPoint(IonScript* script, const SafepointIndex* si)
 {
     SafepointReader reader(script, si);
 
-    return CodeLocationLabel(script->method(), CodeOffsetLabel(reader.osiCallPointOffset()));
+    return CodeLocationLabel(script->method(), CodeOffset(reader.osiCallPointOffset()));
 }
 
 void
 SafepointReader::advanceFromGcRegs()
 {
     currentSlotChunk_ = 0;
     nextSlotChunkNumber_ = 0;
     currentSlotsAreStack_ = true;
--- a/js/src/jit/SharedIC.h
+++ b/js/src/jit/SharedIC.h
@@ -276,21 +276,21 @@ class ICEntry
         // The offset must fit in at least 28 bits, since we shave off 4 for
         // the Kind enum.
         MOZ_ASSERT(pcOffset_ == pcOffset);
         JS_STATIC_ASSERT(BaselineScript::MAX_JSSCRIPT_LENGTH <= (1u << 28) - 1);
         MOZ_ASSERT(pcOffset <= BaselineScript::MAX_JSSCRIPT_LENGTH);
         setKind(kind);
     }
 
-    CodeOffsetLabel returnOffset() const {
-        return CodeOffsetLabel(returnOffset_);
+    CodeOffset returnOffset() const {
+        return CodeOffset(returnOffset_);
     }
 
-    void setReturnOffset(CodeOffsetLabel offset) {
+    void setReturnOffset(CodeOffset offset) {
         MOZ_ASSERT(offset.offset() <= (size_t) UINT32_MAX);
         returnOffset_ = (uint32_t) offset.offset();
     }
 
     uint32_t pcOffset() const {
         return pcOffset_;
     }
 
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -946,23 +946,23 @@ Assembler::processCodeLabels(uint8_t* ra
 {
     for (size_t i = 0; i < codeLabels_.length(); i++) {
         CodeLabel label = codeLabels_[i];
         Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
     }
 }
 
 void
-Assembler::writeCodePointer(CodeOffsetLabel* label) {
+Assembler::writeCodePointer(CodeOffset* label) {
     BufferOffset off = writeInst(LabelBase::INVALID_OFFSET);
     label->use(off.getOffset());
 }
 
 void
-Assembler::Bind(uint8_t* rawCode, CodeOffsetLabel* label, const void* address)
+Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address)
 {
     *reinterpret_cast<const void**>(rawCode + label->offset()) = address;
 }
 
 Assembler::Condition
 Assembler::InvertCondition(Condition cond)
 {
     const uint32_t ConditionInversionBit = 0x10000000;
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -1349,17 +1349,17 @@ class Assembler : public AssemblerShared
     void writeDataRelocation(ImmGCPtr ptr) {
         if (ptr.value) {
             if (gc::IsInsideNursery(ptr.value))
                 embedsNurseryPointers_ = true;
             if (ptr.value)
                 dataRelocations_.writeUnsigned(nextOffset().getOffset());
         }
     }
-    void writePrebarrierOffset(CodeOffsetLabel label) {
+    void writePrebarrierOffset(CodeOffset label) {
         preBarriers_.writeUnsigned(label.offset());
     }
 
     enum RelocBranchStyle {
         B_MOVWT,
         B_LDR_BX,
         B_LDR,
         B_MOVW_ADD
@@ -1425,17 +1425,17 @@ class Assembler : public AssemblerShared
     // be overwritten subsequently.
     BufferOffset allocBranchInst();
 
     // A static variant for the cases where we don't want to have an assembler
     // object.
     static void WriteInstStatic(uint32_t x, uint32_t* dest);
 
   public:
-    void writeCodePointer(CodeOffsetLabel* label);
+    void writeCodePointer(CodeOffset* label);
 
     void haltingAlign(int alignment);
     void nopAlign(int alignment);
     BufferOffset as_nop();
     BufferOffset as_alu(Register dest, Register src1, Operand2 op2,
                         ALUOp op, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_mov(Register dest,
                         Operand2 op2, SBit s = LeaveCC, Condition c = Always);
@@ -1691,20 +1691,20 @@ class Assembler : public AssemblerShared
     uint32_t currentOffset() {
         return nextOffset().getOffset();
     }
     void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target);
     void retarget(Label* label, Label* target);
     // I'm going to pretend this doesn't exist for now.
     void retarget(Label* label, void* target, Relocation::Kind reloc);
 
-    void Bind(uint8_t* rawCode, CodeOffsetLabel* label, const void* address);
+    void Bind(uint8_t* rawCode, CodeOffset* label, const void* address);
 
     // See Bind
-    size_t labelToPatchOffset(CodeOffsetLabel label) {
+    size_t labelToPatchOffset(CodeOffset label) {
         return label.offset();
     }
 
     void as_bkpt();
 
   public:
     static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
     static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -2022,17 +2022,17 @@ void
 MacroAssemblerARMCompat::movePtr(AsmJSImmPtr imm, Register dest)
 {
     RelocStyle rs;
     if (HasMOVWT())
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
-    append(AsmJSAbsoluteLink(CodeOffsetLabel(currentOffset()), imm.kind()));
+    append(AsmJSAbsoluteLink(CodeOffset(currentOffset()), imm.kind()));
     ma_movPatchable(Imm32(-1), dest, Always, rs);
 }
 
 void
 MacroAssemblerARMCompat::load8ZeroExtend(const Address& address, Register dest)
 {
     ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest);
 }
@@ -4086,37 +4086,37 @@ MacroAssemblerARMCompat::ceilf(FloatRegi
         ma_mov(output, output, SetCC);
         ma_b(bail, Signed);
         ma_b(bail, Zero);
     }
 
     bind(&fin);
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssemblerARMCompat::toggledJump(Label* label)
 {
     // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
     BufferOffset b = ma_b(label, Always);
-    CodeOffsetLabel ret(b.getOffset());
+    CodeOffset ret(b.getOffset());
     return ret;
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssemblerARMCompat::toggledCall(JitCode* target, bool enabled)
 {
     BufferOffset bo = nextOffset();
     addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
     ScratchRegisterScope scratch(asMasm());
     ma_movPatchable(ImmPtr(target->raw()), scratch, Always, HasMOVWT() ? L_MOVWT : L_LDR);
     if (enabled)
         ma_blx(scratch);
     else
         ma_nop();
-    return CodeOffsetLabel(bo.getOffset());
+    return CodeOffset(bo.getOffset());
 }
 
 void
 MacroAssemblerARMCompat::round(FloatRegister input, Register output, Label* bail, FloatRegister tmp)
 {
     Label handleZero;
     Label handleNeg;
     Label fin;
@@ -5081,29 +5081,29 @@ MacroAssembler::reserveStack(uint32_t am
     if (amount)
         ma_sub(Imm32(amount), sp);
     adjustFrame(amount);
 }
 
 // ===============================================================
 // Simple call functions.
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::call(Register reg)
 {
     as_blx(reg);
-    return CodeOffsetLabel(currentOffset());
-}
-
-CodeOffsetLabel
+    return CodeOffset(currentOffset());
+}
+
+CodeOffset
 MacroAssembler::call(Label* label)
 {
     // For now, assume that it'll be nearby.
     as_bl(label, Always);
-    return CodeOffsetLabel(currentOffset());
+    return CodeOffset(currentOffset());
 }
 
 void
 MacroAssembler::call(ImmWord imm)
 {
     call(ImmPtr((void*)imm.value));
 }
 
@@ -5133,22 +5133,22 @@ MacroAssembler::call(JitCode* c)
     else
         rs = L_LDR;
 
     ScratchRegisterScope scratch(*this);
     ma_movPatchable(ImmPtr(c->raw()), scratch, Always, rs);
     callJitNoProfiler(scratch);
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::callWithPatch()
 {
     // For now, assume that it'll be nearby.
     as_bl(BOffImm(), Always, /* documentation */ nullptr);
-    return CodeOffsetLabel(currentOffset());
+    return CodeOffset(currentOffset());
 }
 void
 MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
 {
     BufferOffset inst(callerOffset - 4);
     as_bl(BufferOffset(calleeOffset).diffB<BOffImm>(inst), Always, inst);
 }
 
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -597,35 +597,35 @@ class MacroAssemblerARMCompat : public M
         ma_vpop(VFPRegister(reg));
     }
 
     void popN(Register reg, Imm32 extraSpace) {
         Imm32 totSpace = Imm32(extraSpace.value + 4);
         ma_dtr(IsLoad, sp, totSpace, reg, PostIndex);
     }
 
-    CodeOffsetLabel toggledJump(Label* label);
+    CodeOffset toggledJump(Label* label);
 
     // Emit a BLX or NOP instruction. ToggleCall can be used to patch this
     // instruction.
-    CodeOffsetLabel toggledCall(JitCode* target, bool enabled);
+    CodeOffset toggledCall(JitCode* target, bool enabled);
 
-    CodeOffsetLabel pushWithPatch(ImmWord imm) {
+    CodeOffset pushWithPatch(ImmWord imm) {
         ScratchRegisterScope scratch(asMasm());
-        CodeOffsetLabel label = movWithPatch(imm, scratch);
+        CodeOffset label = movWithPatch(imm, scratch);
         ma_push(scratch);
         return label;
     }
 
-    CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) {
-        CodeOffsetLabel label = CodeOffsetLabel(currentOffset());
+    CodeOffset movWithPatch(ImmWord imm, Register dest) {
+        CodeOffset label = CodeOffset(currentOffset());
         ma_movPatchable(Imm32(imm.value), dest, Always, HasMOVWT() ? L_MOVWT : L_LDR);
         return label;
     }
-    CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
+    CodeOffset movWithPatch(ImmPtr imm, Register dest) {
         return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
     }
 
     void jump(Label* label) {
         as_b(label);
     }
     void jump(JitCode* code) {
         branch(code);
@@ -1759,18 +1759,18 @@ class MacroAssemblerARMCompat : public M
         cond = testUndefined(cond, value);
         emitSet(cond, dest);
     }
 
   protected:
     bool buildOOLFakeExitFrame(void* fakeReturnAddr);
 
   public:
-    CodeOffsetLabel labelForPatch() {
-        return CodeOffsetLabel(nextOffset().getOffset());
+    CodeOffset labelForPatch() {
+        return CodeOffset(nextOffset().getOffset());
     }
 
     void computeEffectiveAddress(const Address& address, Register dest) {
         ma_add(address.base, Imm32(address.offset), dest, LeaveCC);
     }
     void computeEffectiveAddress(const BaseIndex& address, Register dest) {
         ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd, LeaveCC);
         if (address.offset)
--- a/js/src/jit/arm/SharedICHelpers-arm.h
+++ b/js/src/jit/arm/SharedICHelpers-arm.h
@@ -26,20 +26,20 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     // No-op on ARM because link register is always holding the return address.
 }
 
 inline void
-EmitCallIC(CodeOffsetLabel* patchOffset, MacroAssembler& masm)
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
 {
     // Move ICEntry offset into ICStubReg
-    CodeOffsetLabel offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+    CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg
     masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use r0.
     MOZ_ASSERT(R2 == ValueOperand(r1, r0));
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -238,17 +238,17 @@ class Assembler : public vixl::Assembler
 
     void processCodeLabels(uint8_t* rawCode) {
         for (size_t i = 0; i < codeLabels_.length(); i++) {
             CodeLabel label = codeLabels_[i];
             Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
         }
     }
 
-    void Bind(uint8_t* rawCode, CodeOffsetLabel* label, const void* address) {
+    void Bind(uint8_t* rawCode, CodeOffset* label, const void* address) {
         *reinterpret_cast<const void**>(rawCode + label->offset()) = address;
     }
 
     void retarget(Label* cur, Label* next);
     void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target) {
         MOZ_CRASH("NYI");
     }
 
@@ -257,17 +257,17 @@ class Assembler : public vixl::Assembler
     void flush() {
         armbuffer_.flushPool();
     }
 
     int actualIndex(int curOffset) {
         ARMBuffer::PoolEntry pe(curOffset);
         return armbuffer_.poolEntryOffset(pe);
     }
-    size_t labelToPatchOffset(CodeOffsetLabel label) {
+    size_t labelToPatchOffset(CodeOffset label) {
         return label.offset();
     }
     static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index) {
         return code->raw() + index;
     }
     void setPrinter(Sprinter* sp) {
     }
 
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -504,30 +504,30 @@ MacroAssembler::reserveStack(uint32_t am
     // It would save some instructions if we had a fixed frame size.
     vixl::MacroAssembler::Claim(Operand(amount));
     adjustFrame(amount);
 }
 
 // ===============================================================
 // Simple call functions.
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::call(Register reg)
 {
     syncStackPtr();
     Blr(ARMRegister(reg, 64));
-    return CodeOffsetLabel(currentOffset());
+    return CodeOffset(currentOffset());
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::call(Label* label)
 {
     syncStackPtr();
     Bl(label);
-    return CodeOffsetLabel(currentOffset());
+    return CodeOffset(currentOffset());
 }
 
 void
 MacroAssembler::call(ImmWord imm)
 {
     call(ImmPtr((void*)imm.value));
 }
 
@@ -555,21 +555,21 @@ MacroAssembler::call(JitCode* c)
     vixl::UseScratchRegisterScope temps(this);
     const ARMRegister scratch64 = temps.AcquireX();
     syncStackPtr();
     BufferOffset off = immPool64(scratch64, uint64_t(c->raw()));
     addPendingJump(off, ImmPtr(c->raw()), Relocation::JITCODE);
     blr(scratch64);
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::callWithPatch()
 {
     MOZ_CRASH("NYI");
-    return CodeOffsetLabel();
+    return CodeOffset();
 }
 void
 MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
 {
     MOZ_CRASH("NYI");
 }
 
 void
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -220,21 +220,21 @@ class MacroAssemblerCompat : public vixl
     }
     void Pop(ARMRegister r) {
         vixl::MacroAssembler::Pop(r);
         adjustFrame(- r.size() / 8);
     }
     // FIXME: This is the same on every arch.
     // FIXME: If we can share framePushed_, we can share this.
     // FIXME: Or just make it at the highest level.
-    CodeOffsetLabel PushWithPatch(ImmWord word) {
+    CodeOffset PushWithPatch(ImmWord word) {
         framePushed_ += sizeof(word.value);
         return pushWithPatch(word);
     }
-    CodeOffsetLabel PushWithPatch(ImmPtr ptr) {
+    CodeOffset PushWithPatch(ImmPtr ptr) {
         return PushWithPatch(ImmWord(uintptr_t(ptr.value)));
     }
 
     uint32_t framePushed() const {
         return framePushed_;
     }
     void adjustFrame(int32_t diff) {
         setFramePushed(framePushed_ + diff);
@@ -384,31 +384,31 @@ class MacroAssemblerCompat : public vixl
     void moveValue(const Value& src, const ValueOperand& dest) {
         moveValue(src, dest.valueReg());
     }
     void moveValue(const ValueOperand& src, const ValueOperand& dest) {
         if (src.valueReg() != dest.valueReg())
             movePtr(src.valueReg(), dest.valueReg());
     }
 
-    CodeOffsetLabel pushWithPatch(ImmWord imm) {
+    CodeOffset pushWithPatch(ImmWord imm) {
         vixl::UseScratchRegisterScope temps(this);
         const Register scratch = temps.AcquireX().asUnsized();
-        CodeOffsetLabel label = movWithPatch(imm, scratch);
+        CodeOffset label = movWithPatch(imm, scratch);
         push(scratch);
         return label;
     }
 
-    CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) {
+    CodeOffset movWithPatch(ImmWord imm, Register dest) {
         BufferOffset off = immPool64(ARMRegister(dest, 64), imm.value);
-        return CodeOffsetLabel(off.getOffset());
-    }
-    CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
+        return CodeOffset(off.getOffset());
+    }
+    CodeOffset movWithPatch(ImmPtr imm, Register dest) {
         BufferOffset off = immPool64(ARMRegister(dest, 64), uint64_t(imm.value));
-        return CodeOffsetLabel(off.getOffset());
+        return CodeOffset(off.getOffset());
     }
 
     void boxValue(JSValueType type, Register src, Register dest) {
         Orr(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(ImmShiftedTag(type).value));
     }
     void splitTag(Register src, Register dest) {
         ubfx(ARMRegister(dest, 64), ARMRegister(src, 64), JSVAL_TAG_SHIFT, (64 - JSVAL_TAG_SHIFT));
     }
@@ -773,17 +773,17 @@ class MacroAssemblerCompat : public vixl
     void movePtr(ImmWord imm, Register dest) {
         Mov(ARMRegister(dest, 64), int64_t(imm.value));
     }
     void movePtr(ImmPtr imm, Register dest) {
         Mov(ARMRegister(dest, 64), int64_t(imm.value));
     }
     void movePtr(AsmJSImmPtr imm, Register dest) {
         BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
-        append(AsmJSAbsoluteLink(CodeOffsetLabel(off.getOffset()), imm.kind()));
+        append(AsmJSAbsoluteLink(CodeOffset(off.getOffset()), imm.kind()));
     }
     void movePtr(ImmGCPtr imm, Register dest) {
         BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
         writeDataRelocation(imm, load);
     }
     void move64(Register64 src, Register64 dest) {
         movePtr(src.reg, dest.reg);
     }
@@ -2511,19 +2511,19 @@ class MacroAssemblerCompat : public vixl
         }
     }
 
     void loadInstructionPointerAfterCall(Register dest) {
         MOZ_CRASH("loadInstructionPointerAfterCall");
     }
 
     // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
-    CodeOffsetLabel toggledJump(Label* label) {
+    CodeOffset toggledJump(Label* label) {
         BufferOffset offset = b(label, Always);
-        CodeOffsetLabel ret(offset.getOffset());
+        CodeOffset ret(offset.getOffset());
         return ret;
     }
 
     // load: offset to the load instruction obtained by movePatchablePtr().
     void writeDataRelocation(ImmGCPtr ptr, BufferOffset load) {
         if (ptr.value)
             dataRelocations_.writeUnsigned(load.getOffset());
     }
@@ -2531,17 +2531,17 @@ class MacroAssemblerCompat : public vixl
         if (val.isMarkable()) {
             gc::Cell* cell = reinterpret_cast<gc::Cell*>(val.toGCThing());
             if (cell && gc::IsInsideNursery(cell))
                 embedsNurseryPointers_ = true;
             dataRelocations_.writeUnsigned(load.getOffset());
         }
     }
 
-    void writePrebarrierOffset(CodeOffsetLabel label) {
+    void writePrebarrierOffset(CodeOffset label) {
         preBarriers_.writeUnsigned(label.offset());
     }
 
     void computeEffectiveAddress(const Address& address, Register dest) {
         Add(ARMRegister(dest, 64), ARMRegister(address.base, 64), Operand(address.offset));
     }
     void computeEffectiveAddress(const BaseIndex& address, Register dest) {
         ARMRegister dest64(dest, 64);
@@ -2549,24 +2549,24 @@ class MacroAssemblerCompat : public vixl
         ARMRegister index64(address.index, 64);
 
         Add(dest64, base64, Operand(index64, vixl::LSL, address.scale));
         if (address.offset)
             Add(dest64, dest64, Operand(address.offset));
     }
 
   public:
-    CodeOffsetLabel labelForPatch() {
-        return CodeOffsetLabel(nextOffset().getOffset());
+    CodeOffset labelForPatch() {
+        return CodeOffset(nextOffset().getOffset());
     }
 
     void handleFailureWithHandlerTail(void* handler);
 
     // FIXME: See CodeGeneratorX64 calls to noteAsmJSGlobalAccess.
-    void patchAsmJSGlobalAccess(CodeOffsetLabel patchAt, uint8_t* code,
+    void patchAsmJSGlobalAccess(CodeOffset patchAt, uint8_t* code,
                                 uint8_t* globalData, unsigned globalDataOffset)
     {
         MOZ_CRASH("patchAsmJSGlobalAccess");
     }
 
     void memIntToValue(const Address& src, const Address& dest) {
         vixl::UseScratchRegisterScope temps(this);
         const Register scratch = temps.AcquireX().asUnsized();
@@ -2861,17 +2861,17 @@ class MacroAssemblerCompat : public vixl
                                         Register temp, AnyRegister output);
 
     template<typename T>
     void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
                                        Register temp, AnyRegister output);
 
     // Emit a BLR or NOP instruction. ToggleCall can be used to patch
     // this instruction.
-    CodeOffsetLabel toggledCall(JitCode* target, bool enabled) {
+    CodeOffset toggledCall(JitCode* target, bool enabled) {
         // The returned offset must be to the first instruction generated,
         // for the debugger to match offset with Baseline's pcMappingEntries_.
         BufferOffset offset = nextOffset();
 
         syncStackPtr();
 
         BufferOffset loadOffset;
         {
@@ -2887,17 +2887,17 @@ class MacroAssemblerCompat : public vixl
 
             if (enabled)
                 blr(ScratchReg2_64);
             else
                 nop();
         }
 
         addPendingJump(loadOffset, ImmPtr(target->raw()), Relocation::JITCODE);
-        CodeOffsetLabel ret(offset.getOffset());
+        CodeOffset ret(offset.getOffset());
         return ret;
     }
 
     static size_t ToggledCallSize(uint8_t* code) {
         static const uint32_t syncStackInstruction = 0x9100039f; // mov sp, r28
 
         // start it off as an 8 byte sequence
         int ret = 8;
--- a/js/src/jit/arm64/SharedICHelpers-arm64.h
+++ b/js/src/jit/arm64/SharedICHelpers-arm64.h
@@ -26,20 +26,20 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     // No-op on ARM because link register is always holding the return address.
 }
 
 inline void
-EmitCallIC(CodeOffsetLabel* patchOffset, MacroAssembler& masm)
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
 {
     // Move ICEntry offset into ICStubReg
-    CodeOffsetLabel offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+    CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg
     masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use r0.
     MOZ_ASSERT(R2 == ValueOperand(r0));
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -774,17 +774,17 @@ class AssemblerMIPSShared : public Assem
     // before to recover the pointer, and not after.
     void writeDataRelocation(ImmGCPtr ptr) {
         if (ptr.value) {
             if (gc::IsInsideNursery(ptr.value))
                 embedsNurseryPointers_ = true;
             dataRelocations_.writeUnsigned(nextOffset().getOffset());
         }
     }
-    void writePrebarrierOffset(CodeOffsetLabel label) {
+    void writePrebarrierOffset(CodeOffset label) {
         preBarriers_.writeUnsigned(label.offset());
     }
 
   public:
     bool oom() const;
 
     void setPrinter(Sprinter* sp) {
     }
@@ -1039,17 +1039,17 @@ class AssemblerMIPSShared : public Assem
         return nextOffset().getOffset();
     }
     void retarget(Label* label, Label* target);
     void retargetWithOffset(size_t offset, const LabelBase* label, Label* target) {
         MOZ_CRASH("NYI");
     }
 
     // See Bind
-    size_t labelToPatchOffset(CodeOffsetLabel label) { return label.offset(); }
+    size_t labelToPatchOffset(CodeOffset label) { return label.offset(); }
 
     void call(Label* label);
     void call(void* target);
 
     void as_break(uint32_t code);
     void as_sync(uint32_t stype = 0);
 
   public:
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -1120,32 +1120,32 @@ MacroAssembler::Pop(const ValueOperand& 
     popValue(val);
     framePushed_ -= sizeof(Value);
 }
 
 
 // ===============================================================
 // Simple call functions.
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::call(Register reg)
 {
     as_jalr(reg);
     as_nop();
-    return CodeOffsetLabel(currentOffset());
+    return CodeOffset(currentOffset());
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::call(Label* label)
 {
     ma_bal(label);
-    return CodeOffsetLabel(currentOffset());
+    return CodeOffset(currentOffset());
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::callWithPatch()
 {
     addLongJump(nextOffset());
     ma_liPatchable(ScratchRegister, ImmWord(0));
     return call(ScratchRegister);
 }
 
 void
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -887,17 +887,17 @@ MacroAssemblerMIPSCompat::movePtr(ImmGCP
 void
 MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
 void
 MacroAssemblerMIPSCompat::movePtr(AsmJSImmPtr imm, Register dest)
 {
-    append(AsmJSAbsoluteLink(CodeOffsetLabel(nextOffset().getOffset()), imm.kind()));
+    append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm.kind()));
     ma_liPatchable(dest, ImmWord(-1));
 }
 
 void
 MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address, Register dest)
 {
     ma_load(dest, address, SizeByte, ZeroExtend);
 }
@@ -2516,29 +2516,29 @@ MacroAssemblerMIPSCompat::atomicExchange
                                                         Register offsetTemp, Register maskTemp,
                                                         AnyRegister output);
 template void
 MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
                                                         Register value, Register temp, Register valueTemp,
                                                         Register offsetTemp, Register maskTemp,
                                                         AnyRegister output);
 
-CodeOffsetLabel
+CodeOffset
 MacroAssemblerMIPSCompat::toggledJump(Label* label)
 {
-    CodeOffsetLabel ret(nextOffset().getOffset());
+    CodeOffset ret(nextOffset().getOffset());
     ma_b(label);
     return ret;
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssemblerMIPSCompat::toggledCall(JitCode* target, bool enabled)
 {
     BufferOffset bo = nextOffset();
-    CodeOffsetLabel offset(bo.getOffset());
+    CodeOffset offset(bo.getOffset());
     addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
     ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
     if (enabled) {
         as_jalr(ScratchRegister);
         as_nop();
     } else {
         as_nop();
         as_nop();
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -269,39 +269,39 @@ class MacroAssemblerMIPSCompat : public 
     }
     void pop(FloatRegister reg) {
         ma_pop(reg);
     }
 
     // Emit a branch that can be toggled to a non-operation. On MIPS we use
     // "andi" instruction to toggle the branch.
     // See ToggleToJmp(), ToggleToCmp().
-    CodeOffsetLabel toggledJump(Label* label);
+    CodeOffset toggledJump(Label* label);
 
     // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
     // this instruction.
-    CodeOffsetLabel toggledCall(JitCode* target, bool enabled);
+    CodeOffset toggledCall(JitCode* target, bool enabled);
 
     static size_t ToggledCallSize(uint8_t* code) {
         // Four instructions used in: MacroAssemblerMIPSCompat::toggledCall
         return 4 * sizeof(uint32_t);
     }
 
-    CodeOffsetLabel pushWithPatch(ImmWord imm) {
-        CodeOffsetLabel label = movWithPatch(imm, ScratchRegister);
+    CodeOffset pushWithPatch(ImmWord imm) {
+        CodeOffset label = movWithPatch(imm, ScratchRegister);
         ma_push(ScratchRegister);
         return label;
     }
 
-    CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) {
-        CodeOffsetLabel label = CodeOffsetLabel(currentOffset());
+    CodeOffset movWithPatch(ImmWord imm, Register dest) {
+        CodeOffset label = CodeOffset(currentOffset());
         ma_liPatchable(dest, imm);
         return label;
     }
-    CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
+    CodeOffset movWithPatch(ImmPtr imm, Register dest) {
         return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
     }
 
     void jump(Label* label) {
         ma_b(label);
     }
     void jump(Register reg) {
         as_jr(reg);
@@ -1309,18 +1309,18 @@ class MacroAssemblerMIPSCompat : public 
     {
         ma_cmp_set(dest, lhs, rhs, cond);
     }
 
   protected:
     bool buildOOLFakeExitFrame(void* fakeReturnAddr);
 
   public:
-    CodeOffsetLabel labelForPatch() {
-        return CodeOffsetLabel(nextOffset().getOffset());
+    CodeOffset labelForPatch() {
+        return CodeOffset(nextOffset().getOffset());
     }
 
     void memIntToValue(Address Source, Address Dest) {
         load32(Source, ScratchRegister);
         storeValue(JSVAL_TYPE_INT32, ScratchRegister, Dest);
     }
 
     void lea(Operand addr, Register dest) {
--- a/js/src/jit/mips32/SharedICHelpers-mips32.h
+++ b/js/src/jit/mips32/SharedICHelpers-mips32.h
@@ -27,20 +27,20 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     // No-op on MIPS because ra register is always holding the return address.
 }
 
 inline void
-EmitCallIC(CodeOffsetLabel* patchOffset, MacroAssembler& masm)
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
 {
     // Move ICEntry offset into ICStubReg.
-    CodeOffsetLabel offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+    CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg.
     masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use it as scratch.
     masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -972,17 +972,17 @@ MacroAssemblerMIPS64Compat::movePtr(ImmG
 void
 MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
 void
 MacroAssemblerMIPS64Compat::movePtr(AsmJSImmPtr imm, Register dest)
 {
-    append(AsmJSAbsoluteLink(CodeOffsetLabel(nextOffset().getOffset()), imm.kind()));
+    append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm.kind()));
     ma_liPatchable(dest, ImmWord(-1));
 }
 
 void
 MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address, Register dest)
 {
     ma_load(dest, address, SizeByte, ZeroExtend);
 }
@@ -2619,29 +2619,29 @@ MacroAssemblerMIPS64Compat::atomicExchan
                                                           Register offsetTemp, Register maskTemp,
                                                           AnyRegister output);
 template void
 MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
                                                           Register value, Register temp, Register valueTemp,
                                                           Register offsetTemp, Register maskTemp,
                                                           AnyRegister output);
 
-CodeOffsetLabel
+CodeOffset
 MacroAssemblerMIPS64Compat::toggledJump(Label* label)
 {
-    CodeOffsetLabel ret(nextOffset().getOffset());
+    CodeOffset ret(nextOffset().getOffset());
     ma_b(label);
     return ret;
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssemblerMIPS64Compat::toggledCall(JitCode* target, bool enabled)
 {
     BufferOffset bo = nextOffset();
-    CodeOffsetLabel offset(bo.getOffset());
+    CodeOffset offset(bo.getOffset());
     addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
     ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
     if (enabled) {
         as_jalr(ScratchRegister);
         as_nop();
     } else {
         as_nop();
         as_nop();
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -290,42 +290,42 @@ class MacroAssemblerMIPS64Compat : publi
     }
     void pop(FloatRegister reg) {
         ma_pop(reg);
     }
 
     // Emit a branch that can be toggled to a non-operation. On MIPS64 we use
     // "andi" instruction to toggle the branch.
     // See ToggleToJmp(), ToggleToCmp().
-    CodeOffsetLabel toggledJump(Label* label);
+    CodeOffset toggledJump(Label* label);
 
     // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
     // this instruction.
-    CodeOffsetLabel toggledCall(JitCode* target, bool enabled);
+    CodeOffset toggledCall(JitCode* target, bool enabled);
 
     static size_t ToggledCallSize(uint8_t* code) {
         // Six instructions used in: MacroAssemblerMIPS64Compat::toggledCall
         return 6 * sizeof(uint32_t);
     }
 
-    CodeOffsetLabel pushWithPatch(ImmWord imm) {
-        CodeOffsetLabel label = movWithPatch(imm, ScratchRegister);
+    CodeOffset pushWithPatch(ImmWord imm) {
+        CodeOffset offset = movWithPatch(imm, ScratchRegister);
         ma_push(ScratchRegister);
-        return label;
+        return offset;
     }
 
-    CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) {
-        CodeOffsetLabel label = CodeOffsetLabel(currentOffset());
+    CodeOffset movWithPatch(ImmWord imm, Register dest) {
+        CodeOffset offset = CodeOffset(currentOffset());
         ma_liPatchable(dest, imm, Li64);
-        return label;
+        return offset;
     }
-    CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
-        CodeOffsetLabel label = CodeOffsetLabel(currentOffset());
+    CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+        CodeOffset offset = CodeOffset(currentOffset());
         ma_liPatchable(dest, imm);
-        return label;
+        return offset;
     }
 
     void jump(Label* label) {
         ma_b(label);
     }
     void jump(Register reg) {
         as_jr(reg);
         as_nop();
@@ -1327,18 +1327,18 @@ class MacroAssemblerMIPS64Compat : publi
         ma_cmp_set(dest, lhs, rhs, cond);
     }
     void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs, Register dest);
 
   protected:
     bool buildOOLFakeExitFrame(void* fakeReturnAddr);
 
   public:
-    CodeOffsetLabel labelForPatch() {
-        return CodeOffsetLabel(nextOffset().getOffset());
+    CodeOffset labelForPatch() {
+        return CodeOffset(nextOffset().getOffset());
     }
 
     void memIntToValue(Address Source, Address Dest) {
         load32(Source, ScratchRegister);
         storeValue(JSVAL_TYPE_INT32, ScratchRegister, Dest);
     }
 
     void lea(Operand addr, Register dest) {
--- a/js/src/jit/mips64/SharedICHelpers-mips64.h
+++ b/js/src/jit/mips64/SharedICHelpers-mips64.h
@@ -27,20 +27,20 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     // No-op on MIPS because ra register is always holding the return address.
 }
 
 inline void
-EmitCallIC(CodeOffsetLabel* patchOffset, MacroAssembler& masm)
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
 {
     // Move ICEntry offset into ICStubReg.
-    CodeOffsetLabel offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+    CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg.
     masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use it as scratch.
     masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -183,29 +183,29 @@ class MacroAssemblerNone : public Assemb
 
     template <typename T> void bind(T) { MOZ_CRASH(); }
     template <typename T> void j(Condition, T) { MOZ_CRASH(); }
     template <typename T> void jump(T) { MOZ_CRASH(); }
     void haltingAlign(size_t) { MOZ_CRASH(); }
     void nopAlign(size_t) { MOZ_CRASH(); }
     void checkStackAlignment() { MOZ_CRASH(); }
     uint32_t currentOffset() { MOZ_CRASH(); }
-    uint32_t labelToPatchOffset(CodeOffsetLabel) { MOZ_CRASH(); }
-    CodeOffsetLabel labelForPatch() { MOZ_CRASH(); }
+    uint32_t labelToPatchOffset(CodeOffset) { MOZ_CRASH(); }
+    CodeOffset labelForPatch() { MOZ_CRASH(); }
 
     void nop() { MOZ_CRASH(); }
     void breakpoint() { MOZ_CRASH(); }
     void abiret() { MOZ_CRASH(); }
     void ret() { MOZ_CRASH(); }
 
-    CodeOffsetLabel toggledJump(Label*) { MOZ_CRASH(); }
-    CodeOffsetLabel toggledCall(JitCode*, bool) { MOZ_CRASH(); }
+    CodeOffset toggledJump(Label*) { MOZ_CRASH(); }
+    CodeOffset toggledCall(JitCode*, bool) { MOZ_CRASH(); }
     static size_t ToggledCallSize(uint8_t*) { MOZ_CRASH(); }
 
-    void writePrebarrierOffset(CodeOffsetLabel) { MOZ_CRASH(); }
+    void writePrebarrierOffset(CodeOffset) { MOZ_CRASH(); }
 
     void finish() { MOZ_CRASH(); }
 
     template <typename T, typename S> void moveValue(T, S) { MOZ_CRASH(); }
     template <typename T, typename S, typename U> void moveValue(T, S, U) { MOZ_CRASH(); }
     template <typename T, typename S> void storeValue(T, S) { MOZ_CRASH(); }
     template <typename T, typename S, typename U> void storeValue(T, S, U) { MOZ_CRASH(); }
     template <typename T, typename S> void loadValue(T, S) { MOZ_CRASH(); }
@@ -213,17 +213,17 @@ class MacroAssemblerNone : public Assemb
     template <typename T, typename S> void pushValue(T, S) { MOZ_CRASH(); }
     void popValue(ValueOperand) { MOZ_CRASH(); }
     void tagValue(JSValueType, Register, ValueOperand) { MOZ_CRASH(); }
     void retn(Imm32 n) { MOZ_CRASH(); }
     template <typename T> void push(T) { MOZ_CRASH(); }
     template <typename T> void Push(T) { MOZ_CRASH(); }
     template <typename T> void pop(T) { MOZ_CRASH(); }
     template <typename T> void Pop(T) { MOZ_CRASH(); }
-    template <typename T> CodeOffsetLabel pushWithPatch(T) { MOZ_CRASH(); }
+    template <typename T> CodeOffset pushWithPatch(T) { MOZ_CRASH(); }
 
     CodeOffsetJump jumpWithPatch(RepatchLabel*, Label* doc = nullptr) { MOZ_CRASH(); }
     CodeOffsetJump jumpWithPatch(RepatchLabel*, Condition, Label* doc = nullptr) { MOZ_CRASH(); }
     CodeOffsetJump backedgeJump(RepatchLabel* label, Label* doc = nullptr) { MOZ_CRASH(); }
     template <typename T, typename S>
     CodeOffsetJump branchPtrWithPatch(Condition, T, S, RepatchLabel*) { MOZ_CRASH(); }
 
     template <typename T, typename S> void branchTestValue(Condition, T, S, Label*) { MOZ_CRASH(); }
@@ -261,17 +261,17 @@ class MacroAssemblerNone : public Assemb
     template <typename T, typename S> void branchTest64(Condition, T, T, S, Label*) { MOZ_CRASH(); }
     template <typename T, typename S> void mov(T, S) { MOZ_CRASH(); }
     template <typename T, typename S> void movq(T, S) { MOZ_CRASH(); }
     template <typename T, typename S> void movePtr(T, S) { MOZ_CRASH(); }
     template <typename T, typename S> void move32(T, S) { MOZ_CRASH(); }
     template <typename T, typename S> void moveFloat32(T, S) { MOZ_CRASH(); }
     template <typename T, typename S> void moveDouble(T, S) { MOZ_CRASH(); }
     template <typename T, typename S> void move64(T, S) { MOZ_CRASH(); }
-    template <typename T> CodeOffsetLabel movWithPatch(T, Register) { MOZ_CRASH(); }
+    template <typename T> CodeOffset movWithPatch(T, Register) { MOZ_CRASH(); }
 
     template <typename T> void loadInt32x1(T, FloatRegister dest) { MOZ_CRASH(); }
     template <typename T> void loadInt32x2(T, FloatRegister dest) { MOZ_CRASH(); }
     template <typename T> void loadInt32x3(T, FloatRegister dest) { MOZ_CRASH(); }
     template <typename T> void loadFloat32x3(T, FloatRegister dest) { MOZ_CRASH(); }
 
     template <typename T> void loadPtr(T, Register) { MOZ_CRASH(); }
     template <typename T> void load32(T, Register) { MOZ_CRASH(); }
--- a/js/src/jit/none/SharedICHelpers-none.h
+++ b/js/src/jit/none/SharedICHelpers-none.h
@@ -11,17 +11,17 @@ namespace js {
 namespace jit {
 
 static const size_t ICStackValueOffset = 0;
 static const uint32_t STUB_FRAME_SIZE = 0;
 static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = 0;
 
 inline void EmitRestoreTailCallReg(MacroAssembler&) { MOZ_CRASH(); }
 inline void EmitRepushTailCallReg(MacroAssembler&) { MOZ_CRASH(); }
-inline void EmitCallIC(CodeOffsetLabel*, MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitCallIC(CodeOffset*, MacroAssembler&) { MOZ_CRASH(); }
 inline void EmitEnterTypeMonitorIC(MacroAssembler&, size_t v = 0) { MOZ_CRASH(); }
 inline void EmitReturnFromIC(MacroAssembler&) { MOZ_CRASH(); }
 inline void EmitChangeICReturnAddress(MacroAssembler&, Register) { MOZ_CRASH(); }
 inline void EmitBaselineTailCallVM(JitCode*, MacroAssembler&, uint32_t) { MOZ_CRASH(); }
 inline void EmitIonTailCallVM(JitCode*, MacroAssembler&, uint32_t) { MOZ_CRASH(); }
 inline void EmitBaselineCreateStubFrameDescriptor(MacroAssembler&, Register) { MOZ_CRASH(); }
 inline void EmitBaselineCallVM(JitCode*, MacroAssembler&) { MOZ_CRASH(); }
 inline void EmitIonCallVM(JitCode*, size_t, MacroAssembler&) { MOZ_CRASH(); }
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -417,25 +417,25 @@ struct AbsoluteLabel : public LabelBase
     void bind() {
         bound_ = true;
 
         // These labels cannot be used after being bound.
         offset_ = -1;
     }
 };
 
-class CodeOffsetLabel
+class CodeOffset
 {
     size_t offset_;
 
     static const size_t NOT_USED = size_t(-1);
 
   public:
-    explicit CodeOffsetLabel(size_t offset) : offset_(offset) {}
-    CodeOffsetLabel() : offset_(NOT_USED) {}
+    explicit CodeOffset(size_t offset) : offset_(offset) {}
+    CodeOffset() : offset_(NOT_USED) {}
 
     size_t offset() const {
         MOZ_ASSERT(used());
         return offset_;
     }
 
     void use(size_t offset) {
         MOZ_ASSERT(!used());
@@ -456,36 +456,36 @@ class CodeOffsetLabel
 // A code label contains an absolute reference to a point in the code. Thus, it
 // cannot be patched until after linking.
 // When the source label is resolved into a memory address, this address is
 // patched into the destination address.
 class CodeLabel
 {
     // The destination position, where the absolute reference should get
     // patched into.
-    CodeOffsetLabel patchAt_;
+    CodeOffset patchAt_;
 
     // The source label (relative) in the code to where the destination should
     // get patched to.
-    CodeOffsetLabel target_;
+    CodeOffset target_;
 
   public:
     CodeLabel()
     { }
-    explicit CodeLabel(const CodeOffsetLabel& patchAt)
+    explicit CodeLabel(const CodeOffset& patchAt)
       : patchAt_(patchAt)
     { }
-    CodeLabel(const CodeOffsetLabel& patchAt, const CodeOffsetLabel& target)
+    CodeLabel(const CodeOffset& patchAt, const CodeOffset& target)
       : patchAt_(patchAt),
         target_(target)
     { }
-    CodeOffsetLabel* patchAt() {
+    CodeOffset* patchAt() {
         return &patchAt_;
     }
-    CodeOffsetLabel* target() {
+    CodeOffset* target() {
         return &target_;
     }
     void offsetBy(size_t delta) {
         patchAt_.offsetBy(delta);
         target_.offsetBy(delta);
     }
 };
 
@@ -620,30 +620,30 @@ class CodeLocationLabel
     }
 #endif
 
   public:
     CodeLocationLabel() {
         raw_ = nullptr;
         setUninitialized();
     }
-    CodeLocationLabel(JitCode* code, CodeOffsetLabel base) {
+    CodeLocationLabel(JitCode* code, CodeOffset base) {
         *this = base;
         repoint(code);
     }
     explicit CodeLocationLabel(JitCode* code) {
         raw_ = code->raw();
         setAbsolute();
     }
     explicit CodeLocationLabel(uint8_t* raw) {
         raw_ = raw;
         setAbsolute();
     }
 
-    void operator = (CodeOffsetLabel base) {
+    void operator = (CodeOffset base) {
         raw_ = (uint8_t*)base.offset();
         setRelative();
     }
     ptrdiff_t operator - (const CodeLocationLabel& other) {
         return raw_ - other.raw_;
     }
 
     void repoint(JitCode* code, MacroAssembler* masm = nullptr);
@@ -852,20 +852,20 @@ class AsmJSHeapAccess
     }
 #endif
 };
 
 typedef Vector<AsmJSHeapAccess, 0, SystemAllocPolicy> AsmJSHeapAccessVector;
 
 struct AsmJSGlobalAccess
 {
-    CodeOffsetLabel patchAt;
+    CodeOffset patchAt;
     unsigned globalDataOffset;
 
-    AsmJSGlobalAccess(CodeOffsetLabel patchAt, unsigned globalDataOffset)
+    AsmJSGlobalAccess(CodeOffset patchAt, unsigned globalDataOffset)
       : patchAt(patchAt), globalDataOffset(globalDataOffset)
     {}
 };
 
 // Describes the intended pointee of an immediate to be embedded in asm.js
 // code. By representing the pointee as a symbolic enum, the pointee can be
 // patched after deserialization when the address of global things has changed.
 enum AsmJSImmKind
@@ -950,19 +950,19 @@ class AsmJSAbsoluteAddress
     AsmJSAbsoluteAddress() {}
 };
 
 // Represents an instruction to be patched and the intended pointee. These
 // links are accumulated in the MacroAssembler, but patching is done outside
 // the MacroAssembler (in AsmJSModule::staticallyLink).
 struct AsmJSAbsoluteLink
 {
-    AsmJSAbsoluteLink(CodeOffsetLabel patchAt, AsmJSImmKind target)
+    AsmJSAbsoluteLink(CodeOffset patchAt, AsmJSImmKind target)
       : patchAt(patchAt), target(target) {}
-    CodeOffsetLabel patchAt;
+    CodeOffset patchAt;
     AsmJSImmKind target;
 };
 
 // Represents a call from an asm.js function to another asm.js function,
 // represented by the index of the callee in the Module Validator
 struct AsmJSInternalCallee
 {
     uint32_t index;
@@ -1006,17 +1006,17 @@ class AssemblerShared
     bool oom() const {
         return !enoughMemory_;
     }
 
     bool embedsNurseryPointers() const {
         return embedsNurseryPointers_;
     }
 
-    void append(const CallSiteDesc& desc, CodeOffsetLabel label, size_t framePushed,
+    void append(const CallSiteDesc& desc, CodeOffset label, size_t framePushed,
                 uint32_t targetIndex = CallSiteAndTarget::NOT_INTERNAL)
     {
         // framePushed does not include sizeof(AsmJSFrame), so add it in here (see
         // CallSite::stackDepth).
         CallSite callsite(desc, label.offset(), framePushed + sizeof(AsmJSFrame));
         enoughMemory_ &= callsites_.append(CallSiteAndTarget(callsite, targetIndex));
     }
     CallSiteAndTargetVector& callSites() { return callsites_; }
--- a/js/src/jit/shared/BaselineCompiler-shared.h
+++ b/js/src/jit/shared/BaselineCompiler-shared.h
@@ -48,29 +48,29 @@ class BaselineCompilerShared
     js::Vector<PCMappingEntry, 16, SystemAllocPolicy> pcMappingEntries_;
 
     // Labels for the 'movWithPatch' for loading IC entry pointers in
     // the generated IC-calling code in the main jitcode.  These need
     // to be patched with the actual icEntry offsets after the BaselineScript
     // has been allocated.
     struct ICLoadLabel {
         size_t icEntry;
-        CodeOffsetLabel label;
+        CodeOffset label;
     };
     js::Vector<ICLoadLabel, 16, SystemAllocPolicy> icLoadLabels_;
 
     uint32_t pushedBeforeCall_;
     mozilla::DebugOnly<bool> inCall_;
 
-    CodeOffsetLabel spsPushToggleOffset_;
-    CodeOffsetLabel profilerEnterFrameToggleOffset_;
-    CodeOffsetLabel profilerExitFrameToggleOffset_;
-    CodeOffsetLabel traceLoggerEnterToggleOffset_;
-    CodeOffsetLabel traceLoggerExitToggleOffset_;
-    CodeOffsetLabel traceLoggerScriptTextIdOffset_;
+    CodeOffset spsPushToggleOffset_;
+    CodeOffset profilerEnterFrameToggleOffset_;
+    CodeOffset profilerExitFrameToggleOffset_;
+    CodeOffset traceLoggerEnterToggleOffset_;
+    CodeOffset traceLoggerExitToggleOffset_;
+    CodeOffset traceLoggerScriptTextIdOffset_;
 
     BaselineCompilerShared(JSContext* cx, TempAllocator& alloc, JSScript* script);
 
     ICEntry* allocateICEntry(ICStub* stub, ICEntry::Kind kind) {
         if (!stub)
             return nullptr;
 
         // Create the entry and add it to the vector.
@@ -85,25 +85,25 @@ class BaselineCompilerShared
 
         // Return pointer to the IC entry
         return &vecEntry;
     }
 
     // Append an ICEntry without a stub.
     bool appendICEntry(ICEntry::Kind kind, uint32_t returnOffset) {
         ICEntry entry(script->pcToOffset(pc), kind);
-        entry.setReturnOffset(CodeOffsetLabel(returnOffset));
+        entry.setReturnOffset(CodeOffset(returnOffset));
         if (!icEntries_.append(entry)) {
             ReportOutOfMemory(cx);
             return false;
         }
         return true;
     }
 
-    bool addICLoadLabel(CodeOffsetLabel label) {
+    bool addICLoadLabel(CodeOffset label) {
         MOZ_ASSERT(!icEntries_.empty());
         ICLoadLabel loadLabel;
         loadLabel.label = label;
         loadLabel.icEntry = icEntries_.length() - 1;
         if (!icLoadLabels_.append(loadLabel)) {
             ReportOutOfMemory(cx);
             return false;
         }
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -259,17 +259,17 @@ CodeGeneratorShared::addNativeToBytecode
             dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
             return true;
         }
     }
 
     // Otherwise, some native code was generated for the previous bytecode site.
     // Add a new entry for code that is about to be generated.
     NativeToBytecode entry;
-    entry.nativeOffset = CodeOffsetLabel(nativeOffset);
+    entry.nativeOffset = CodeOffset(nativeOffset);
     entry.tree = tree;
     entry.pc = pc;
     if (!nativeToBytecodeList_.append(entry))
         return false;
 
     JitSpew(JitSpew_Profiling, " => Push new entry.");
     dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
     return true;
@@ -338,34 +338,34 @@ CodeGeneratorShared::addTrackedOptimizat
         // we are done.
         if (lastEntry.optimizations == optimizations)
             return true;
     }
 
     // If we're generating code for a new set of optimizations, add a new
     // entry.
     NativeToTrackedOptimizations entry;
-    entry.startOffset = CodeOffsetLabel(nativeOffset);
-    entry.endOffset = CodeOffsetLabel(nativeOffset);
+    entry.startOffset = CodeOffset(nativeOffset);
+    entry.endOffset = CodeOffset(nativeOffset);
     entry.optimizations = optimizations;
     return trackedOptimizations_.append(entry);
 }
 
 void
 CodeGeneratorShared::extendTrackedOptimizationsEntry(const TrackedOptimizations* optimizations)
 {
     if (!isOptimizationTrackingEnabled())
         return;
 
     uint32_t nativeOffset = masm.currentOffset();
     NativeToTrackedOptimizations& entry = trackedOptimizations_.back();
     MOZ_ASSERT(entry.optimizations == optimizations);
     MOZ_ASSERT_IF(!masm.oom(), nativeOffset >= entry.endOffset.offset());
 
-    entry.endOffset = CodeOffsetLabel(nativeOffset);
+    entry.endOffset = CodeOffset(nativeOffset);
 
     // If we generated no code, remove the last entry.
     if (nativeOffset == entry.startOffset.offset())
         trackedOptimizations_.popBack();
 }
 
 // see OffsetOfFrameSlot
 static inline int32_t
@@ -1729,25 +1729,25 @@ CodeGeneratorShared::emitTracelogScript(
     Label done;
 
     AllocatableRegisterSet regs(RegisterSet::Volatile());
     Register logger = regs.takeAnyGeneral();
     Register script = regs.takeAnyGeneral();
 
     masm.Push(logger);
 
-    CodeOffsetLabel patchLogger = masm.movWithPatch(ImmPtr(nullptr), logger);
+    CodeOffset patchLogger = masm.movWithPatch(ImmPtr(nullptr), logger);
     masm.propagateOOM(patchableTraceLoggers_.append(patchLogger));
 
     Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
     masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
 
     masm.Push(script);
 
-    CodeOffsetLabel patchScript = masm.movWithPatch(ImmWord(0), script);
+    CodeOffset patchScript = masm.movWithPatch(ImmWord(0), script);
     masm.propagateOOM(patchableTLScripts_.append(patchScript));
 
     if (isStart)
         masm.tracelogStartId(logger, script);
     else
         masm.tracelogStopId(logger, script);
 
     masm.Pop(script);
@@ -1764,17 +1764,17 @@ CodeGeneratorShared::emitTracelogTree(bo
         return;
 
     Label done;
     AllocatableRegisterSet regs(RegisterSet::Volatile());
     Register logger = regs.takeAnyGeneral();
 
     masm.Push(logger);
 
-    CodeOffsetLabel patchLocation = masm.movWithPatch(ImmPtr(nullptr), logger);
+    CodeOffset patchLocation = masm.movWithPatch(ImmPtr(nullptr), logger);
     masm.propagateOOM(patchableTraceLoggers_.append(patchLocation));
 
     Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
     masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
 
     if (isStart)
         masm.tracelogStartId(logger, textId);
     else
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -50,18 +50,18 @@ struct ReciprocalMulConstants {
 };
 
 // This should be nested in CodeGeneratorShared, but it is used in
 // optimization tracking implementation and nested classes cannot be
 // forward-declared.
 struct NativeToTrackedOptimizations
 {
     // [startOffset, endOffset]
-    CodeOffsetLabel startOffset;
-    CodeOffsetLabel endOffset;
+    CodeOffset startOffset;
+    CodeOffset endOffset;
     const TrackedOptimizations* optimizations;
 };
 
 class CodeGeneratorShared : public LElementVisitor
 {
     js::Vector<OutOfLineCode*, 0, SystemAllocPolicy> outOfLineCode_;
 
     MacroAssembler& ensureMasm(MacroAssembler* masm);
@@ -78,17 +78,17 @@ class CodeGeneratorShared : public LElem
     RecoverWriter recovers_;
     JitCode* deoptTable_;
 #ifdef DEBUG
     uint32_t pushedArgs_;
 #endif
     uint32_t lastOsiPointOffset_;
     SafepointWriter safepoints_;
     Label invalidate_;
-    CodeOffsetLabel invalidateEpilogueData_;
+    CodeOffset invalidateEpilogueData_;
 
     // Label for the common return path.
     NonAssertingLabel returnLabel_;
 
     FallbackICStubSpace stubSpace_;
 
     js::Vector<SafepointIndex, 0, SystemAllocPolicy> safepointIndices_;
     js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_;
@@ -101,23 +101,23 @@ class CodeGeneratorShared : public LElem
 
     // Vector of information about generated polymorphic inline caches.
     js::Vector<uint32_t, 0, SystemAllocPolicy> cacheList_;
 
     // Patchable backedges generated for loops.
     Vector<PatchableBackedgeInfo, 0, SystemAllocPolicy> patchableBackedges_;
 
 #ifdef JS_TRACE_LOGGING
-    js::Vector<CodeOffsetLabel, 0, SystemAllocPolicy> patchableTraceLoggers_;
-    js::Vector<CodeOffsetLabel, 0, SystemAllocPolicy> patchableTLScripts_;
+    js::Vector<CodeOffset, 0, SystemAllocPolicy> patchableTraceLoggers_;
+    js::Vector<CodeOffset, 0, SystemAllocPolicy> patchableTLScripts_;
 #endif
 
   public:
     struct NativeToBytecode {
-        CodeOffsetLabel nativeOffset;
+        CodeOffset nativeOffset;
         InlineScriptTree* tree;
         jsbytecode* pc;
     };
 
   protected:
     js::Vector<NativeToBytecode, 0, SystemAllocPolicy> nativeToBytecodeList_;
     uint8_t* nativeToBytecodeMap_;
     uint32_t nativeToBytecodeMapSize_;
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -310,32 +310,32 @@ class Assembler : public AssemblerX86Sha
     }
     void push(ImmPtr imm) {
         push(ImmWord(uintptr_t(imm.value)));
     }
     void push(FloatRegister src) {
         subq(Imm32(sizeof(double)), StackPointer);
         vmovsd(src, Address(StackPointer, 0));
     }
-    CodeOffsetLabel pushWithPatch(ImmWord word) {
-        CodeOffsetLabel label = movWithPatch(word, ScratchReg);
+    CodeOffset pushWithPatch(ImmWord word) {
+        CodeOffset label = movWithPatch(word, ScratchReg);
         push(ScratchReg);
         return label;
     }
 
     void pop(FloatRegister src) {
         vmovsd(Address(StackPointer, 0), src);
         addq(Imm32(sizeof(double)), StackPointer);
     }
 
-    CodeOffsetLabel movWithPatch(ImmWord word, Register dest) {
+    CodeOffset movWithPatch(ImmWord word, Register dest) {
         masm.movq_i64r(word.value, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
+    CodeOffset movWithPatch(ImmPtr imm, Register dest) {
         return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
     }
 
     // Load an ImmWord value into a register. Note that this instruction will
     // attempt to optimize its immediate field size. When a full 64-bit
     // immediate is needed for a relocation, use movWithPatch.
     void movq(ImmWord word, Register dest) {
         // Load a 64-bit immediate into a register. If the value falls into
@@ -593,31 +593,31 @@ class Assembler : public AssemblerX86Sha
         else
             movq(word, dest);
     }
     void mov(ImmPtr imm, Register dest) {
         movq(imm, dest);
     }
     void mov(AsmJSImmPtr imm, Register dest) {
         masm.movq_i64r(-1, dest.encoding());
-        append(AsmJSAbsoluteLink(CodeOffsetLabel(masm.currentOffset()), imm.kind()));
+        append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm.kind()));
     }
     void mov(const Operand& src, Register dest) {
         movq(src, dest);
     }
     void mov(Register src, const Operand& dest) {
         movq(src, dest);
     }
     void mov(Imm32 imm32, const Operand& dest) {
         movq(imm32, dest);
     }
     void mov(Register src, Register dest) {
         movq(src, dest);
     }
-    void mov(CodeOffsetLabel* label, Register dest) {
+    void mov(CodeOffset* label, Register dest) {
         masm.movq_i64r(/* placeholder */ 0, dest.encoding());
         label->use(masm.size());
     }
     void xchg(Register src, Register dest) {
         xchgq(src, dest);
     }
     void lea(const Operand& src, Register dest) {
         switch (src.kind()) {
@@ -627,59 +627,59 @@ class Assembler : public AssemblerX86Sha
           case Operand::MEM_SCALE:
             masm.leaq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexepcted operand kind");
         }
     }
 
-    CodeOffsetLabel loadRipRelativeInt32(Register dest) {
-        return CodeOffsetLabel(masm.movl_ripr(dest.encoding()).offset());
+    CodeOffset loadRipRelativeInt32(Register dest) {
+        return CodeOffset(masm.movl_ripr(dest.encoding()).offset());
     }
-    CodeOffsetLabel loadRipRelativeInt64(Register dest) {
-        return CodeOffsetLabel(masm.movq_ripr(dest.encoding()).offset());
+    CodeOffset loadRipRelativeInt64(Register dest) {
+        return CodeOffset(masm.movq_ripr(dest.encoding()).offset());
     }
-    CodeOffsetLabel loadRipRelativeDouble(FloatRegister dest) {
-        return CodeOffsetLabel(masm.vmovsd_ripr(dest.encoding()).offset());
+    CodeOffset loadRipRelativeDouble(FloatRegister dest) {
+        return CodeOffset(masm.vmovsd_ripr(dest.encoding()).offset());
     }
-    CodeOffsetLabel loadRipRelativeFloat32(FloatRegister dest) {
-        return CodeOffsetLabel(masm.vmovss_ripr(dest.encoding()).offset());
+    CodeOffset loadRipRelativeFloat32(FloatRegister dest) {
+        return CodeOffset(masm.vmovss_ripr(dest.encoding()).offset());
     }
-    CodeOffsetLabel loadRipRelativeInt32x4(FloatRegister dest) {
-        return CodeOffsetLabel(masm.vmovdqa_ripr(dest.encoding()).offset());
+    CodeOffset loadRipRelativeInt32x4(FloatRegister dest) {
+        return CodeOffset(masm.vmovdqa_ripr(dest.encoding()).offset());
     }
-    CodeOffsetLabel loadRipRelativeFloat32x4(FloatRegister dest) {
-        return CodeOffsetLabel(masm.vmovaps_ripr(dest.encoding()).offset());
+    CodeOffset loadRipRelativeFloat32x4(FloatRegister dest) {
+        return CodeOffset(masm.vmovaps_ripr(dest.encoding()).offset());
     }
-    CodeOffsetLabel storeRipRelativeInt32(Register dest) {
-        return CodeOffsetLabel(masm.movl_rrip(dest.encoding()).offset());
+    CodeOffset storeRipRelativeInt32(Register dest) {
+        return CodeOffset(masm.movl_rrip(dest.encoding()).offset());
     }
-    CodeOffsetLabel storeRipRelativeDouble(FloatRegister dest) {
-        return CodeOffsetLabel(masm.vmovsd_rrip(dest.encoding()).offset());
+    CodeOffset storeRipRelativeDouble(FloatRegister dest) {
+        return CodeOffset(masm.vmovsd_rrip(dest.encoding()).offset());
     }
-    CodeOffsetLabel storeRipRelativeFloat32(FloatRegister dest) {
-        return CodeOffsetLabel(masm.vmovss_rrip(dest.encoding()).offset());
+    CodeOffset storeRipRelativeFloat32(FloatRegister dest) {
+        return CodeOffset(masm.vmovss_rrip(dest.encoding()).offset());
     }
-    CodeOffsetLabel storeRipRelativeInt32x4(FloatRegister dest) {
-        return CodeOffsetLabel(masm.vmovdqa_rrip(dest.encoding()).offset());
+    CodeOffset storeRipRelativeInt32x4(FloatRegister dest) {
+        return CodeOffset(masm.vmovdqa_rrip(dest.encoding()).offset());
     }
-    CodeOffsetLabel storeRipRelativeFloat32x4(FloatRegister dest) {
-        return CodeOffsetLabel(masm.vmovaps_rrip(dest.encoding()).offset());
+    CodeOffset storeRipRelativeFloat32x4(FloatRegister dest) {
+        return CodeOffset(masm.vmovaps_rrip(dest.encoding()).offset());
     }
-    CodeOffsetLabel leaRipRelative(Register dest) {
-        return CodeOffsetLabel(masm.leaq_rip(dest.encoding()).offset());
+    CodeOffset leaRipRelative(Register dest) {
+        return CodeOffset(masm.leaq_rip(dest.encoding()).offset());
     }
 
     void loadAsmJSActivation(Register dest) {
-        CodeOffsetLabel label = loadRipRelativeInt64(dest);
+        CodeOffset label = loadRipRelativeInt64(dest);
         append(AsmJSGlobalAccess(label, AsmJSActivationGlobalDataOffset));
     }
     void loadAsmJSHeapRegisterFromGlobalData() {
-        CodeOffsetLabel label = loadRipRelativeInt64(HeapReg);
+        CodeOffset label = loadRipRelativeInt64(HeapReg);
         append(AsmJSGlobalAccess(label, AsmJSHeapGlobalDataOffset));
     }
 
     void cmpq(Register rhs, Register lhs) {
         masm.cmpq_rr(rhs.encoding(), lhs.encoding());
     }
     void cmpq(Register rhs, const Operand& lhs) {
         switch (lhs.kind()) {
@@ -765,18 +765,18 @@ class Assembler : public AssemblerX86Sha
     }
     void call(JitCode* target) {
         JmpSrc src = masm.call();
         addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
     }
 
     // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
     // this instruction.
-    CodeOffsetLabel toggledCall(JitCode* target, bool enabled) {
-        CodeOffsetLabel offset(size());
+    CodeOffset toggledCall(JitCode* target, bool enabled) {
+        CodeOffset offset(size());
         JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
         addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
         MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
         return offset;
     }
 
     static size_t ToggledCallSize(uint8_t* code) {
         // Size of a call instruction.
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -730,17 +730,17 @@ CodeGeneratorX64::visitAsmJSAtomicBinopH
 void
 CodeGeneratorX64::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
 {
     MAsmJSLoadGlobalVar* mir = ins->mir();
 
     MIRType type = mir->type();
     MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
 
-    CodeOffsetLabel label;
+    CodeOffset label;
     switch (type) {
       case MIRType_Int32:
         label = masm.loadRipRelativeInt32(ToRegister(ins->output()));
         break;
       case MIRType_Float32:
         label = masm.loadRipRelativeFloat32(ToFloatRegister(ins->output()));
         break;
       case MIRType_Double:
@@ -764,17 +764,17 @@ CodeGeneratorX64::visitAsmJSLoadGlobalVa
 void
 CodeGeneratorX64::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar* ins)
 {
     MAsmJSStoreGlobalVar* mir = ins->mir();
 
     MIRType type = mir->value()->type();
     MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
 
-    CodeOffsetLabel label;
+    CodeOffset label;
     switch (type) {
       case MIRType_Int32:
         label = masm.storeRipRelativeInt32(ToRegister(ins->value()));
         break;
       case MIRType_Float32:
         label = masm.storeRipRelativeFloat32(ToFloatRegister(ins->value()));
         break;
       case MIRType_Double:
@@ -799,27 +799,27 @@ void
 CodeGeneratorX64::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr* ins)
 {
     MAsmJSLoadFuncPtr* mir = ins->mir();
 
     Register index = ToRegister(ins->index());
     Register tmp = ToRegister(ins->temp());
     Register out = ToRegister(ins->output());
 
-    CodeOffsetLabel label = masm.leaRipRelative(tmp);
+    CodeOffset label = masm.leaRipRelative(tmp);
     masm.loadPtr(Operand(tmp, index, TimesEight, 0), out);
     masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
 }
 
 void
 CodeGeneratorX64::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc* ins)
 {
     MAsmJSLoadFFIFunc* mir = ins->mir();
 
-    CodeOffsetLabel label = masm.loadRipRelativeInt64(ToRegister(ins->output()));
+    CodeOffset label = masm.loadRipRelativeInt64(ToRegister(ins->output()));
     masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
 }
 
 void
 CodeGeneratorX64::visitTruncateDToInt32(LTruncateDToInt32* ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     Register output = ToRegister(ins->output());
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -27,64 +27,64 @@ MacroAssemblerX64::loadConstantDouble(do
     if (!dbl)
         return;
     // The constants will be stored in a pool appended to the text (see
     // finish()), so they will always be a fixed distance from the
     // instructions which reference them. This allows the instructions to use
     // PC-relative addressing. Use "jump" label support code, because we need
     // the same PC-relative address patching that jumps use.
     JmpSrc j = masm.vmovsd_ripr(dest.encoding());
-    dbl->uses.append(CodeOffsetLabel(j.offset()));
+    dbl->uses.append(CodeOffset(j.offset()));
 }
 
 void
 MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
 {
     if (maybeInlineFloat(f, dest))
         return;
     Float* flt = getFloat(f);
     if (!flt)
         return;
     // See comment in loadConstantDouble
     JmpSrc j = masm.vmovss_ripr(dest.encoding());
-    flt->uses.append(CodeOffsetLabel(j.offset()));
+    flt->uses.append(CodeOffset(j.offset()));
 }
 
 void
 MacroAssemblerX64::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Int32x4);
     if (maybeInlineInt32x4(v, dest))
         return;
     SimdData* val = getSimdData(v);
     if (!val)
         return;
     MOZ_ASSERT(val->type() == SimdConstant::Int32x4);
     JmpSrc j = masm.vmovdqa_ripr(dest.encoding());
-    val->uses.append(CodeOffsetLabel(j.offset()));
+    val->uses.append(CodeOffset(j.offset()));
 }
 
 void
 MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
     if (maybeInlineFloat32x4(v, dest))
         return;
     SimdData* val = getSimdData(v);
     if (!val)
         return;
     MOZ_ASSERT(val->type() == SimdConstant::Float32x4);
     JmpSrc j = masm.vmovaps_ripr(dest.encoding());
-    val->uses.append(CodeOffsetLabel(j.offset()));
+    val->uses.append(CodeOffset(j.offset()));
 }
 
 void
 MacroAssemblerX64::bindOffsets(const MacroAssemblerX86Shared::UsesVector& uses)
 {
-    for (CodeOffsetLabel use : uses) {
+    for (CodeOffset use : uses) {
         JmpDst dst(currentOffset());
         JmpSrc src(use.offset());
         // Using linkJump here is safe, as explaind in the comment in
         // loadConstantDouble.
         masm.linkJump(src, dst);
     }
 }
 
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -1383,17 +1383,17 @@ class MacroAssemblerX64 : public MacroAs
 
         bind(&done);
     }
 
   public:
     void handleFailureWithHandlerTail(void* handler);
 
     // See CodeGeneratorX64 calls to noteAsmJSGlobalAccess.
-    void patchAsmJSGlobalAccess(CodeOffsetLabel patchAt, uint8_t* code, uint8_t* globalData,
+    void patchAsmJSGlobalAccess(CodeOffset patchAt, uint8_t* code, uint8_t* globalData,
                                 unsigned globalDataOffset)
     {
         uint8_t* nextInsn = code + patchAt.offset();
         MOZ_ASSERT(nextInsn <= globalData);
         uint8_t* target = globalData + globalDataOffset;
         ((int32_t*)nextInsn)[-1] = target - nextInsn;
     }
     void memIntToValue(Address Source, Address Dest) {
--- a/js/src/jit/x64/SharedICHelpers-x64.h
+++ b/js/src/jit/x64/SharedICHelpers-x64.h
@@ -26,20 +26,20 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     masm.Push(ICTailCallReg);
 }
 
 inline void
-EmitCallIC(CodeOffsetLabel* patchOffset, MacroAssembler& masm)
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
 {
     // Move ICEntry offset into ICStubReg
-    CodeOffsetLabel offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+    CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg
     masm.loadPtr(Address(ICStubReg, (int32_t) ICEntry::offsetOfFirstStub()),
                  ICStubReg);
 
     // Call the stubcode.
     masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -257,17 +257,17 @@ class AssemblerX86Shared : public Assemb
 
     void writeDataRelocation(ImmGCPtr ptr) {
         if (ptr.value) {
             if (gc::IsInsideNursery(ptr.value))
                 embedsNurseryPointers_ = true;
             dataRelocations_.writeUnsigned(masm.currentOffset());
         }
     }
-    void writePrebarrierOffset(CodeOffsetLabel label) {
+    void writePrebarrierOffset(CodeOffset label) {
         preBarriers_.writeUnsigned(label.offset());
     }
 
   protected:
     X86Encoding::BaseAssemblerSpecific masm;
 
     typedef X86Encoding::JmpSrc JmpSrc;
     typedef X86Encoding::JmpDst JmpDst;
@@ -433,18 +433,18 @@ class AssemblerX86Shared : public Assemb
 
   public:
     void haltingAlign(int alignment) {
         masm.haltingAlign(alignment);
     }
     void nopAlign(int alignment) {
         masm.nopAlign(alignment);
     }
-    void writeCodePointer(CodeOffsetLabel* label) {
-        // A CodeOffsetLabel only has one use, bake in the "end of list" value.
+    void writeCodePointer(CodeOffset* label) {
+        // A CodeOffset only has one use, bake in the "end of list" value.
         masm.jumpTablePointer(LabelBase::INVALID_OFFSET);
         label->use(masm.size());
     }
     void movl(Imm32 imm32, Register dest) {
         masm.movl_i32r(imm32.value, dest.encoding());
     }
     void movl(Register src, Register dest) {
         masm.movl_rr(src.encoding(), dest.encoding());
@@ -918,17 +918,17 @@ class AssemblerX86Shared : public Assemb
     void bind(RepatchLabel* label) {
         JmpDst dst(masm.label());
         if (label->used()) {
             JmpSrc jmp(label->offset());
             masm.linkJump(jmp, dst);
         }
         label->bind(dst.offset());
     }
-    void use(CodeOffsetLabel* label) {
+    void use(CodeOffset* label) {
         label->use(currentOffset());
     }
     uint32_t currentOffset() {
         return masm.label().offset();
     }
 
     // Re-routes pending jumps to a new label.
     void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target) {
@@ -950,64 +950,64 @@ class AssemblerX86Shared : public Assemb
             jmp = JmpSrc(next.offset() + baseOffset);
         } while (more);
     }
     void retarget(Label* label, Label* target) {
         retargetWithOffset(0, label, target);
         label->reset();
     }
 
-    static void Bind(uint8_t* raw, CodeOffsetLabel* label, const void* address) {
+    static void Bind(uint8_t* raw, CodeOffset* label, const void* address) {
         if (label->used()) {
             intptr_t offset = label->offset();
             X86Encoding::SetPointer(raw + offset, address);
         }
     }
 
     // See Bind and X86Encoding::setPointer.
-    size_t labelToPatchOffset(CodeOffsetLabel label) {
+    size_t labelToPatchOffset(CodeOffset label) {
         return label.offset() - sizeof(void*);
     }
 
     void ret() {
         masm.ret();
     }
     void retn(Imm32 n) {
         // Remove the size of the return address which is included in the frame.
         masm.ret_i(n.value - sizeof(void*));
     }
-    CodeOffsetLabel call(Label* label) {
+    CodeOffset call(Label* label) {
         if (label->bound()) {
             masm.linkJump(masm.call(), JmpDst(label->offset()));
         } else {
             JmpSrc j = masm.call();
             JmpSrc prev = JmpSrc(label->use(j.offset()));
             masm.setNextJump(j, prev);
         }
-        return CodeOffsetLabel(masm.currentOffset());
-    }
-    CodeOffsetLabel call(Register reg) {
+        return CodeOffset(masm.currentOffset());
+    }
+    CodeOffset call(Register reg) {
         masm.call_r(reg.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
     void call(const Operand& op) {
         switch (op.kind()) {
           case Operand::REG:
             masm.call_r(op.reg());
             break;
           case Operand::MEM_REG_DISP:
             masm.call_m(op.disp(), op.base());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
 
-    CodeOffsetLabel callWithPatch() {
-        return CodeOffsetLabel(masm.call().offset());
+    CodeOffset callWithPatch() {
+        return CodeOffset(masm.call().offset());
     }
     void patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
         unsigned char* code = masm.data();
         X86Encoding::SetRel32(code + callerOffset, code + calleeOffset);
     }
 
     void breakpoint() {
         masm.int3();
@@ -1069,19 +1069,19 @@ class AssemblerX86Shared : public Assemb
             break;
           case Operand::MEM_ADDRESS32:
             masm.cmpl_im(rhs.value, lhs.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
-    CodeOffsetLabel cmplWithPatch(Imm32 rhs, Register lhs) {
+    CodeOffset cmplWithPatch(Imm32 rhs, Register lhs) {
         masm.cmpl_i32r(rhs.value, lhs.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
     void cmpw(Register rhs, Register lhs) {
         masm.cmpw_rr(rhs.encoding(), lhs.encoding());
     }
     void setCC(Condition cond, Register r) {
         masm.setCC_r(static_cast<X86Encoding::Condition>(cond), r.encoding());
     }
     void testb(Register rhs, Register lhs) {
@@ -1113,19 +1113,19 @@ class AssemblerX86Shared : public Assemb
             MOZ_CRASH("unexpected operand kind");
             break;
         }
     }
 
     void addl(Imm32 imm, Register dest) {
         masm.addl_ir(imm.value, dest.encoding());
     }
-    CodeOffsetLabel addlWithPatch(Imm32 imm, Register dest) {
+    CodeOffset addlWithPatch(Imm32 imm, Register dest) {
         masm.addl_i32r(imm.value, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
     void addl(Imm32 imm, const Operand& op) {
         switch (op.kind()) {
           case Operand::REG:
             masm.addl_ir(imm.value, op.reg());
             break;
           case Operand::MEM_REG_DISP:
             masm.addl_im(imm.value, op.disp(), op.base());
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -300,17 +300,17 @@ MacroAssemblerX86Shared::getSimdData(con
     }
     return &simds_[index];
 }
 
 static bool
 AppendShiftedUses(const MacroAssemblerX86Shared::UsesVector& old, size_t delta,
                   MacroAssemblerX86Shared::UsesVector* vec)
 {
-    for (CodeOffsetLabel use : old) {
+    for (CodeOffset use : old) {
         use.offsetBy(delta);
         if (!vec->append(use))
             return false;
     }
     return true;
 }
 
 bool
@@ -543,23 +543,23 @@ MacroAssembler::Pop(const ValueOperand& 
 {
     popValue(val);
     implicitPop(sizeof(Value));
 }
 
 // ===============================================================
 // Simple call functions.
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::call(Register reg)
 {
     return Assembler::call(reg);
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::call(Label* label)
 {
     return Assembler::call(label);
 }
 
 void
 MacroAssembler::call(const Address& addr)
 {
@@ -587,17 +587,17 @@ MacroAssembler::call(ImmPtr target)
 }
 
 void
 MacroAssembler::call(JitCode* target)
 {
     Assembler::call(target);
 }
 
-CodeOffsetLabel
+CodeOffset
 MacroAssembler::callWithPatch()
 {
     return Assembler::callWithPatch();
 }
 void
 MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
 {
     Assembler::patchCall(callerOffset, calleeOffset);
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
@@ -41,17 +41,17 @@ class MacroAssembler;
 class MacroAssemblerX86Shared : public Assembler
 {
   private:
     // Perform a downcast. Should be removed by Bug 996602.
     MacroAssembler& asMasm();
     const MacroAssembler& asMasm() const;
 
   public:
-    typedef Vector<CodeOffsetLabel, 0, SystemAllocPolicy> UsesVector;
+    typedef Vector<CodeOffset, 0, SystemAllocPolicy> UsesVector;
 
   protected:
     // For Double, Float and SimdData, make the move ctors explicit so that MSVC
     // knows what to use instead of copying these data structures.
     struct Double {
         double value;
         UsesVector uses;
         explicit Double(double value) : value(value) {}
@@ -200,17 +200,17 @@ class MacroAssemblerX86Shared : public A
         cmpl(rhs, lhs);
     }
     void cmp32(const Operand& lhs, Register rhs) {
         cmpl(rhs, lhs);
     }
     void cmp32(Register lhs, const Operand& rhs) {
         cmpl(rhs, lhs);
     }
-    CodeOffsetLabel cmp32WithPatch(Register lhs, Imm32 rhs) {
+    CodeOffset cmp32WithPatch(Register lhs, Imm32 rhs) {
         return cmplWithPatch(rhs, lhs);
     }
     void add32(Register src, Register dest) {
         addl(src, dest);
     }
     void add32(Imm32 imm, Register dest) {
         addl(imm, dest);
     }
@@ -1467,33 +1467,33 @@ class MacroAssemblerX86Shared : public A
     template <typename T1, typename T2>
     void cmp32Set(Assembler::Condition cond, T1 lhs, T2 rhs, Register dest)
     {
         cmp32(lhs, rhs);
         emitSet(cond, dest);
     }
 
     // Emit a JMP that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
-    CodeOffsetLabel toggledJump(Label* label) {
-        CodeOffsetLabel offset(size());
+    CodeOffset toggledJump(Label* label) {
+        CodeOffset offset(size());
         jump(label);
         return offset;
     }
 
     template <typename T>
     void computeEffectiveAddress(const T& address, Register dest) {
         lea(Operand(address), dest);
     }
 
     void checkStackAlignment() {
         // Exists for ARM compatibility.
     }
 
-    CodeOffsetLabel labelForPatch() {
-        return CodeOffsetLabel(size());
+    CodeOffset labelForPatch() {
+        return CodeOffset(size());
     }
 
     void abiret() {
         ret();
     }
 
     template<typename T>
     void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -223,31 +223,31 @@ class Assembler : public AssemblerX86Sha
     void push(const ImmPtr imm) {
         push(ImmWord(uintptr_t(imm.value)));
     }
     void push(FloatRegister src) {
         subl(Imm32(sizeof(double)), StackPointer);
         vmovsd(src, Address(StackPointer, 0));
     }
 
-    CodeOffsetLabel pushWithPatch(ImmWord word) {
+    CodeOffset pushWithPatch(ImmWord word) {
         masm.push_i32(int32_t(word.value));
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
 
     void pop(FloatRegister src) {
         vmovsd(Address(StackPointer, 0), src);
         addl(Imm32(sizeof(double)), StackPointer);
     }
 
-    CodeOffsetLabel movWithPatch(ImmWord word, Register dest) {
+    CodeOffset movWithPatch(ImmWord word, Register dest) {
         movl(Imm32(word.value), dest);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
+    CodeOffset movWithPatch(ImmPtr imm, Register dest) {
         return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
     }
 
     void movl(ImmGCPtr ptr, Register dest) {
         masm.movl_i32r(uintptr_t(ptr.value), dest.encoding());
         writeDataRelocation(ptr);
     }
     void movl(ImmGCPtr ptr, const Operand& dest) {
@@ -283,28 +283,28 @@ class Assembler : public AssemblerX86Sha
         else
             movl(imm, dest);
     }
     void mov(ImmPtr imm, Register dest) {
         mov(ImmWord(uintptr_t(imm.value)), dest);
     }
     void mov(AsmJSImmPtr imm, Register dest) {
         masm.movl_i32r(-1, dest.encoding());
-        append(AsmJSAbsoluteLink(CodeOffsetLabel(masm.currentOffset()), imm.kind()));
+        append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm.kind()));
     }
     void mov(const Operand& src, Register dest) {
         movl(src, dest);
     }
     void mov(Register src, const Operand& dest) {
         movl(src, dest);
     }
     void mov(Imm32 imm, const Operand& dest) {
         movl(imm, dest);
     }
-    void mov(CodeOffsetLabel* label, Register dest) {
+    void mov(CodeOffset* label, Register dest) {
         // Put a placeholder value in the instruction stream.
         masm.movl_i32r(0, dest.encoding());
         label->use(masm.size());
     }
     void mov(Register src, Register dest) {
         movl(src, dest);
     }
     void xchg(Register src, Register dest) {
@@ -362,21 +362,21 @@ class Assembler : public AssemblerX86Sha
             writeDataRelocation(rhs);
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
     void cmpl(Register rhs, AsmJSAbsoluteAddress lhs) {
         masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
-        append(AsmJSAbsoluteLink(CodeOffsetLabel(masm.currentOffset()), lhs.kind()));
+        append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), lhs.kind()));
     }
     void cmpl(Imm32 rhs, AsmJSAbsoluteAddress lhs) {
         JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
-        append(AsmJSAbsoluteLink(CodeOffsetLabel(src.offset()), lhs.kind()));
+        append(AsmJSAbsoluteLink(CodeOffset(src.offset()), lhs.kind()));
     }
 
     void adcl(Imm32 imm, Register dest) {
         masm.adcl_ir(imm.value, dest.encoding());
     }
 
     void mull(Register multiplier) {
         masm.mull_r(multiplier.encoding());
@@ -459,18 +459,18 @@ class Assembler : public AssemblerX86Sha
     }
     void call(ImmPtr target) {
         JmpSrc src = masm.call();
         addPendingJump(src, target, Relocation::HARDCODED);
     }
 
     // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
     // this instruction.
-    CodeOffsetLabel toggledCall(JitCode* target, bool enabled) {
-        CodeOffsetLabel offset(size());
+    CodeOffset toggledCall(JitCode* target, bool enabled) {
+        CodeOffset offset(size());
         JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
         addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
         MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
         return offset;
     }
 
     static size_t ToggledCallSize(uint8_t* code) {
         // Size of a call instruction.
@@ -490,423 +490,423 @@ class Assembler : public AssemblerX86Sha
                 jmp = next;
             } while (more);
         }
         label->reset();
     }
 
     // Move a 32-bit immediate into a register where the immediate can be
     // patched.
-    CodeOffsetLabel movlWithPatch(Imm32 imm, Register dest) {
+    CodeOffset movlWithPatch(Imm32 imm, Register dest) {
         masm.movl_i32r(imm.value, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
 
     // Load from *(base + disp32) where disp32 can be patched.
-    CodeOffsetLabel movsblWithPatch(const Operand& src, Register dest) {
+    CodeOffset movsblWithPatch(const Operand& src, Register dest) {
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movsbl_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.movsbl_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movzblWithPatch(const Operand& src, Register dest) {
+    CodeOffset movzblWithPatch(const Operand& src, Register dest) {
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movzbl_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.movzbl_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movswlWithPatch(const Operand& src, Register dest) {
+    CodeOffset movswlWithPatch(const Operand& src, Register dest) {
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movswl_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.movswl_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movzwlWithPatch(const Operand& src, Register dest) {
+    CodeOffset movzwlWithPatch(const Operand& src, Register dest) {
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movzwl_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.movzwl_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movlWithPatch(const Operand& src, Register dest) {
+    CodeOffset movlWithPatch(const Operand& src, Register dest) {
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movl_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.movl_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovssWithPatch(const Operand& src, FloatRegister dest) {
+    CodeOffset vmovssWithPatch(const Operand& src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovss_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovdWithPatch(const Operand& src, FloatRegister dest) {
+    CodeOffset vmovdWithPatch(const Operand& src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovd_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovd_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovqWithPatch(const Operand& src, FloatRegister dest) {
+    CodeOffset vmovqWithPatch(const Operand& src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovq_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovq_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovsdWithPatch(const Operand& src, FloatRegister dest) {
+    CodeOffset vmovsdWithPatch(const Operand& src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovsd_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovupsWithPatch(const Operand& src, FloatRegister dest) {
+    CodeOffset vmovupsWithPatch(const Operand& src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovups_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovups_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovdquWithPatch(const Operand& src, FloatRegister dest) {
+    CodeOffset vmovdquWithPatch(const Operand& src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovdqu_mr_disp32(src.disp(), src.base(), dest.encoding());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovdqu_mr(src.address(), dest.encoding());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
 
     // Store to *(base + disp32) where disp32 can be patched.
-    CodeOffsetLabel movbWithPatch(Register src, const Operand& dest) {
+    CodeOffset movbWithPatch(Register src, const Operand& dest) {
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movb_rm_disp32(src.encoding(), dest.disp(), dest.base());
             break;
           case Operand::MEM_ADDRESS32:
             masm.movb_rm(src.encoding(), dest.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movwWithPatch(Register src, const Operand& dest) {
+    CodeOffset movwWithPatch(Register src, const Operand& dest) {
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movw_rm_disp32(src.encoding(), dest.disp(), dest.base());
             break;
           case Operand::MEM_ADDRESS32:
             masm.movw_rm(src.encoding(), dest.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movlWithPatch(Register src, const Operand& dest) {
+    CodeOffset movlWithPatch(Register src, const Operand& dest) {
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movl_rm_disp32(src.encoding(), dest.disp(), dest.base());
             break;
           case Operand::MEM_ADDRESS32:
             masm.movl_rm(src.encoding(), dest.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovdWithPatch(FloatRegister src, const Operand& dest) {
+    CodeOffset vmovdWithPatch(FloatRegister src, const Operand& dest) {
         MOZ_ASSERT(HasSSE2());
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovd_rm_disp32(src.encoding(), dest.disp(), dest.base());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovd_rm(src.encoding(), dest.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovqWithPatch(FloatRegister src, const Operand& dest) {
+    CodeOffset vmovqWithPatch(FloatRegister src, const Operand& dest) {
         MOZ_ASSERT(HasSSE2());
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovq_rm_disp32(src.encoding(), dest.disp(), dest.base());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovq_rm(src.encoding(), dest.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovssWithPatch(FloatRegister src, const Operand& dest) {
+    CodeOffset vmovssWithPatch(FloatRegister src, const Operand& dest) {
         MOZ_ASSERT(HasSSE2());
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovss_rm(src.encoding(), dest.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovsdWithPatch(FloatRegister src, const Operand& dest) {
+    CodeOffset vmovsdWithPatch(FloatRegister src, const Operand& dest) {
         MOZ_ASSERT(HasSSE2());
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovsd_rm(src.encoding(), dest.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovupsWithPatch(FloatRegister src, const Operand& dest) {
+    CodeOffset vmovupsWithPatch(FloatRegister src, const Operand& dest) {
         MOZ_ASSERT(HasSSE2());
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovups_rm_disp32(src.encoding(), dest.disp(), dest.base());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovups_rm(src.encoding(), dest.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovdquWithPatch(FloatRegister src, const Operand& dest) {
+    CodeOffset vmovdquWithPatch(FloatRegister src, const Operand& dest) {
         MOZ_ASSERT(HasSSE2());
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.vmovdqu_rm_disp32(src.encoding(), dest.disp(), dest.base());
             break;
           case Operand::MEM_ADDRESS32:
             masm.vmovdqu_rm(src.encoding(), dest.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
 
     // Load from *(addr + index*scale) where addr can be patched.
-    CodeOffsetLabel movlWithPatch(PatchedAbsoluteAddress addr, Register index, Scale scale,
+    CodeOffset movlWithPatch(PatchedAbsoluteAddress addr, Register index, Scale scale,
                                   Register dest)
     {
         masm.movl_mr(addr.addr, index.encoding(), scale, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
 
     // Load from *src where src can be patched.
-    CodeOffsetLabel movsblWithPatch(PatchedAbsoluteAddress src, Register dest) {
+    CodeOffset movsblWithPatch(PatchedAbsoluteAddress src, Register dest) {
         masm.movsbl_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movzblWithPatch(PatchedAbsoluteAddress src, Register dest) {
+    CodeOffset movzblWithPatch(PatchedAbsoluteAddress src, Register dest) {
         masm.movzbl_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movswlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+    CodeOffset movswlWithPatch(PatchedAbsoluteAddress src, Register dest) {
         masm.movswl_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movzwlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+    CodeOffset movzwlWithPatch(PatchedAbsoluteAddress src, Register dest) {
         masm.movzwl_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+    CodeOffset movlWithPatch(PatchedAbsoluteAddress src, Register dest) {
         masm.movl_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovssWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+    CodeOffset vmovssWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovss_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+    CodeOffset vmovdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovd_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovqWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+    CodeOffset vmovqWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovq_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovsdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+    CodeOffset vmovsdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovsd_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+    CodeOffset vmovdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovdqa_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+    CodeOffset vmovdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovdqu_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+    CodeOffset vmovapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovaps_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+    CodeOffset vmovupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovups_mr(src.addr, dest.encoding());
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
 
     // Store to *dest where dest can be patched.
-    CodeOffsetLabel movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
+    CodeOffset movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
         masm.movb_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movwWithPatch(Register src, PatchedAbsoluteAddress dest) {
+    CodeOffset movwWithPatch(Register src, PatchedAbsoluteAddress dest) {
         masm.movw_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel movlWithPatch(Register src, PatchedAbsoluteAddress dest) {
+    CodeOffset movlWithPatch(Register src, PatchedAbsoluteAddress dest) {
         masm.movl_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovssWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+    CodeOffset vmovssWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovss_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+    CodeOffset vmovdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovd_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovqWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+    CodeOffset vmovqWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovq_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovsdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+    CodeOffset vmovsdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovsd_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+    CodeOffset vmovdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovdqa_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+    CodeOffset vmovapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovaps_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+    CodeOffset vmovdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovdqu_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
-    CodeOffsetLabel vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+    CodeOffset vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovups_rm(src.encoding(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
+        return CodeOffset(masm.currentOffset());
     }
 
     void loadAsmJSActivation(Register dest) {
-        CodeOffsetLabel label = movlWithPatch(PatchedAbsoluteAddress(), dest);
+        CodeOffset label = movlWithPatch(PatchedAbsoluteAddress(), dest);
         append(AsmJSGlobalAccess(label, AsmJSActivationGlobalDataOffset));
     }
     void loadAsmJSHeapRegisterFromGlobalData() {
         // x86 doesn't have a pinned heap register.
     }
 
     static bool canUseInSingleByteInstruction(Register reg) {
         return X86Encoding::HasSubregL(reg.encoding());
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -773,17 +773,17 @@ CodeGeneratorX86::visitAsmJSAtomicBinopH
 
 void
 CodeGeneratorX86::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
 {
     MAsmJSLoadGlobalVar* mir = ins->mir();
     MIRType type = mir->type();
     MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
 
-    CodeOffsetLabel label;
+    CodeOffset label;
     switch (type) {
       case MIRType_Int32:
         label = masm.movlWithPatch(PatchedAbsoluteAddress(), ToRegister(ins->output()));
         break;
       case MIRType_Float32:
         label = masm.vmovssWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
         break;
       case MIRType_Double:
@@ -806,17 +806,17 @@ CodeGeneratorX86::visitAsmJSLoadGlobalVa
 void
 CodeGeneratorX86::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar* ins)
 {
     MAsmJSStoreGlobalVar* mir = ins->mir();
 
     MIRType type = mir->value()->type();
     MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
 
-    CodeOffsetLabel label;
+    CodeOffset label;
     switch (type) {
       case MIRType_Int32:
         label = masm.movlWithPatch(ToRegister(ins->value()), PatchedAbsoluteAddress());
         break;
       case MIRType_Float32:
         label = masm.vmovssWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
         break;
       case MIRType_Double:
@@ -838,27 +838,27 @@ CodeGeneratorX86::visitAsmJSStoreGlobalV
 
 void
 CodeGeneratorX86::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr* ins)
 {
     MAsmJSLoadFuncPtr* mir = ins->mir();
 
     Register index = ToRegister(ins->index());
     Register out = ToRegister(ins->output());
-    CodeOffsetLabel label = masm.movlWithPatch(PatchedAbsoluteAddress(), index, TimesFour, out);
+    CodeOffset label = masm.movlWithPatch(PatchedAbsoluteAddress(), index, TimesFour, out);
     masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
 }
 
 void
 CodeGeneratorX86::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc* ins)
 {
     MAsmJSLoadFFIFunc* mir = ins->mir();
 
     Register out = ToRegister(ins->output());
-    CodeOffsetLabel label = masm.movlWithPatch(PatchedAbsoluteAddress(), out);
+    CodeOffset label = masm.movlWithPatch(PatchedAbsoluteAddress(), out);
     masm.append(AsmJSGlobalAccess(label, mir->globalDataOffset()));
 }
 
 namespace js {
 namespace jit {
 
 class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86>
 {
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -94,110 +94,110 @@ void
 MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest)
 {
     if (maybeInlineDouble(d, dest))
         return;
     Double* dbl = getDouble(d);
     if (!dbl)
         return;
     masm.vmovsd_mr(nullptr, dest.encoding());
-    dbl->uses.append(CodeOffsetLabel(masm.size()));
+    dbl->uses.append(CodeOffset(masm.size()));
 }
 
 void
 MacroAssemblerX86::addConstantDouble(double d, FloatRegister dest)
 {
     Double* dbl = getDouble(d);
     if (!dbl)
         return;
     masm.vaddsd_mr(nullptr, dest.encoding(), dest.encoding());
-    dbl->uses.append(CodeOffsetLabel(masm.size()));
+    dbl->uses.append(CodeOffset(masm.size()));
 }
 
 void
 MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest)
 {
     if (maybeInlineFloat(f, dest))
         return;
     Float* flt = getFloat(f);
     if (!flt)
         return;
     masm.vmovss_mr(nullptr, dest.encoding());
-    flt->uses.append(CodeOffsetLabel(masm.size()));
+    flt->uses.append(CodeOffset(masm.size()));
 }
 
 void
 MacroAssemblerX86::addConstantFloat32(float f, FloatRegister dest)
 {
     Float* flt = getFloat(f);
     if (!flt)
         return;
     masm.vaddss_mr(nullptr, dest.encoding(), dest.encoding());
-    flt->uses.append(CodeOffsetLabel(masm.size()));
+    flt->uses.append(CodeOffset(masm.size()));
 }
 
 void
 MacroAssemblerX86::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Int32x4);
     if (maybeInlineInt32x4(v, dest))
         return;
     SimdData* i4 = getSimdData(v);
     if (!i4)
         return;
     MOZ_ASSERT(i4->type() == SimdConstant::Int32x4);
     masm.vmovdqa_mr(nullptr, dest.encoding());
-    i4->uses.append(CodeOffsetLabel(masm.size()));
+    i4->uses.append(CodeOffset(masm.size()));
 }
 
 void
 MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant& v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
     if (maybeInlineFloat32x4(v, dest))
         return;
     SimdData* f4 = getSimdData(v);
     if (!f4)
         return;
     MOZ_ASSERT(f4->type() == SimdConstant::Float32x4);
     masm.vmovaps_mr(nullptr, dest.encoding());
-    f4->uses.append(CodeOffsetLabel(masm.size()));
+    f4->uses.append(CodeOffset(masm.size()));
 }
 
 void
 MacroAssemblerX86::finish()
 {
     if (!doubles_.empty())
         masm.haltingAlign(sizeof(double));
     for (const Double& d : doubles_) {
-        CodeOffsetLabel cst(masm.currentOffset());
-        for (CodeOffsetLabel use : d.uses)
+        CodeOffset cst(masm.currentOffset());
+        for (CodeOffset use : d.uses)
             addCodeLabel(CodeLabel(use, cst));
         masm.doubleConstant(d.value);
         if (!enoughMemory_)
             return;
     }
 
     if (!floats_.empty())
         masm.haltingAlign(sizeof(float));
     for (const Float& f : floats_) {
-        CodeOffsetLabel cst(masm.currentOffset());
-        for (CodeOffsetLabel use : f.uses)
+        CodeOffset cst(masm.currentOffset());
+        for (CodeOffset use : f.uses)
             addCodeLabel(CodeLabel(use, cst));
         masm.floatConstant(f.value);
         if (!enoughMemory_)
             return;
     }
 
     // SIMD memory values must be suitably aligned.
     if (!simds_.empty())
         masm.haltingAlign(SimdMemoryAlignment);
     for (const SimdData& v : simds_) {
-        CodeOffsetLabel cst(masm.currentOffset());
-        for (CodeOffsetLabel use : v.uses)
+        CodeOffset cst(masm.currentOffset());
+        for (CodeOffset use : v.uses)
             addCodeLabel(CodeLabel(use, cst));
         switch (v.type()) {
           case SimdConstant::Int32x4:   masm.int32x4Constant(v.value.asInt32x4());     break;
           case SimdConstant::Float32x4: masm.float32x4Constant(v.value.asFloat32x4()); break;
           default: MOZ_CRASH("unexpected SimdConstant type");
         }
         if (!enoughMemory_)
             return;
--- a/js/src/jit/x86/SharedICHelpers-x86.h
+++ b/js/src/jit/x86/SharedICHelpers-x86.h
@@ -26,20 +26,20 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     masm.Push(ICTailCallReg);
 }
 
 inline void
-EmitCallIC(CodeOffsetLabel* patchOffset, MacroAssembler& masm)
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
 {
     // Move ICEntry offset into ICStubReg
-    CodeOffsetLabel offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+    CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg
     masm.loadPtr(Address(ICStubReg, (int32_t) ICEntry::offsetOfFirstStub()),
                  ICStubReg);
 
     // Load stubcode pointer from BaselineStubEntry into ICTailCallReg
     // ICTailCallReg will always be unused in the contexts where ICs are called.