js/src/jit/CodeGenerator.cpp
author Nicolas B. Pierron <nicolas.b.pierron@nbp.name>
Tue, 16 Apr 2019 13:56:58 +0000
changeset 469672 e55ace0633daf9eb7bd5f260300fdbd770834bb3
parent 469671 e024cb135284b83edb799f6f8cf84a4f054d7e35
child 470193 0930e8c0f382127ba1b6d12e0a4a7c8405039edb
permissions -rw-r--r--
Bug 1534840 part 3 - Prevent ARM from generating nops within jump tables. r=sstangl Depends on D26522 Differential Revision: https://phabricator.services.mozilla.com/D26523

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
 * vim: set ts=8 sts=2 et sw=2 tw=80:
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#include "jit/CodeGenerator.h"

#include "mozilla/Assertions.h"
#include "mozilla/Attributes.h"
#include "mozilla/Casting.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/EnumeratedArray.h"
#include "mozilla/EnumeratedRange.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/Unused.h"

#include <type_traits>

#include "jslibmath.h"
#include "jsmath.h"
#include "jsnum.h"

#include "builtin/Eval.h"
#include "builtin/RegExp.h"
#include "builtin/SelfHostingDefines.h"
#include "builtin/String.h"
#include "builtin/TypedObject.h"
#include "gc/Nursery.h"
#include "irregexp/NativeRegExpMacroAssembler.h"
#include "jit/AtomicOperations.h"
#include "jit/BaselineCompiler.h"
#include "jit/IonBuilder.h"
#include "jit/IonIC.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/JitcodeMap.h"
#include "jit/JitSpewer.h"
#include "jit/Linker.h"
#include "jit/Lowering.h"
#include "jit/MIRGenerator.h"
#include "jit/MoveEmitter.h"
#include "jit/RangeAnalysis.h"
#include "jit/SharedICHelpers.h"
#include "jit/StackSlotAllocator.h"
#include "jit/VMFunctions.h"
#include "js/RegExpFlags.h"  // JS::RegExpFlag
#include "util/Unicode.h"
#include "vm/AsyncFunction.h"
#include "vm/AsyncIteration.h"
#include "vm/EqualityOperations.h"  // js::SameValue
#include "vm/MatchPairs.h"
#include "vm/RegExpObject.h"
#include "vm/RegExpStatics.h"
#include "vm/StringType.h"
#include "vm/TraceLogging.h"
#include "vm/TypedArrayObject.h"
#include "vtune/VTuneWrapper.h"
#include "wasm/WasmGC.h"
#include "wasm/WasmStubs.h"

#include "builtin/Boolean-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
#include "jit/shared/Lowering-shared-inl.h"
#include "jit/TemplateObject-inl.h"
#include "jit/VMFunctionList-inl.h"
#include "vm/Interpreter-inl.h"
#include "vm/JSScript-inl.h"

using namespace js;
using namespace js::jit;

using JS::GenericNaN;
using mozilla::AssertedCast;
using mozilla::DebugOnly;
using mozilla::FloatingPoint;
using mozilla::Maybe;
using mozilla::NegativeInfinity;
using mozilla::PositiveInfinity;

namespace js {
namespace jit {

#ifdef CHECK_OSIPOINT_REGISTERS
template <class Op>
static void HandleRegisterDump(Op op, MacroAssembler& masm,
                               LiveRegisterSet liveRegs, Register activation,
                               Register scratch) {
  const size_t baseOffset = JitActivation::offsetOfRegs();

  // Handle live GPRs.
  for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
    Register reg = *iter;
    Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));

    if (reg == activation) {
      // To use the original value of the activation register (that's
      // now on top of the stack), we need the scratch register.
      masm.push(scratch);
      masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
      op(scratch, dump);
      masm.pop(scratch);
    } else {
      op(reg, dump);
    }
  }

  // Handle live FPRs.
  for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
    FloatRegister reg = *iter;
    Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
    op(reg, dump);
  }
}

class StoreOp {
  MacroAssembler& masm;

 public:
  explicit StoreOp(MacroAssembler& masm) : masm(masm) {}

  void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
  void operator()(FloatRegister reg, Address dump) {
    if (reg.isDouble()) {
      masm.storeDouble(reg, dump);
    } else if (reg.isSingle()) {
      masm.storeFloat32(reg, dump);
#  if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
    } else if (reg.isSimd128()) {
      masm.storeUnalignedSimd128Float(reg, dump);
#  endif
    } else {
      MOZ_CRASH("Unexpected register type.");
    }
  }
};

class VerifyOp {
  MacroAssembler& masm;
  Label* failure_;

 public:
  VerifyOp(MacroAssembler& masm, Label* failure)
      : masm(masm), failure_(failure) {}

  void operator()(Register reg, Address dump) {
    masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
  }
  void operator()(FloatRegister reg, Address dump) {
    if (reg.isDouble()) {
      ScratchDoubleScope scratch(masm);
      masm.loadDouble(dump, scratch);
      masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
    } else if (reg.isSingle()) {
      ScratchFloat32Scope scratch(masm);
      masm.loadFloat32(dump, scratch);
      masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
    }

    // :TODO: (Bug 1133745) Add support to verify SIMD registers.
  }
};

void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
  // Ensure the live registers stored by callVM did not change between
  // the call and this OsiPoint. Try-catch relies on this invariant.

  // Load pointer to the JitActivation in a scratch register.
  AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
  Register scratch = allRegs.takeAny();
  masm.push(scratch);
  masm.loadJitActivation(scratch);

  // If we should not check registers (because the instruction did not call
  // into the VM, or a GC happened), we're done.
  Label failure, done;
  Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
  masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);

  // Having more than one VM function call made in one visit function at
  // runtime is a sec-ciritcal error, because if we conservatively assume that
  // one of the function call can re-enter Ion, then the invalidation process
  // will potentially add a call at a random location, by patching the code
  // before the return address.
  masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);

  // Set checkRegs to 0, so that we don't try to verify registers after we
  // return from this script to the caller.
  masm.store32(Imm32(0), checkRegs);

  // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
  // temps after calling into the VM. This is fine because no other
  // instructions (including this OsiPoint) will depend on them. Also
  // backtracking can also use the same register for an input and an output.
  // These are marked as clobbered and shouldn't get checked.
  LiveRegisterSet liveRegs;
  liveRegs.set() = RegisterSet::Intersect(
      safepoint->liveRegs().set(),
      RegisterSet::Not(safepoint->clobberedRegs().set()));

  VerifyOp op(masm, &failure);
  HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());

  masm.jump(&done);

  // Do not profile the callWithABI that occurs below.  This is to avoid a
  // rare corner case that occurs when profiling interacts with itself:
  //
  // When slow profiling assertions are turned on, FunctionBoundary ops
  // (which update the profiler pseudo-stack) may emit a callVM, which
  // forces them to have an osi point associated with them.  The
  // FunctionBoundary for inline function entry is added to the caller's
  // graph with a PC from the caller's code, but during codegen it modifies
  // Gecko Profiler instrumentation to add the callee as the current top-most
  // script. When codegen gets to the OSIPoint, and the callWithABI below is
  // emitted, the codegen thinks that the current frame is the callee, but
  // the PC it's using from the OSIPoint refers to the caller.  This causes
  // the profiler instrumentation of the callWithABI below to ASSERT, since
  // the script and pc are mismatched.  To avoid this, we simply omit
  // instrumentation for these callWithABIs.

  // Any live register captured by a safepoint (other than temp registers)
  // must remain unchanged between the call and the OsiPoint instruction.
  masm.bind(&failure);
  masm.assumeUnreachable("Modified registers between VM call and OsiPoint");

  masm.bind(&done);
  masm.pop(scratch);
}

bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
  if (!checkOsiPointRegisters) {
    return false;
  }

  if (safepoint->liveRegs().emptyGeneral() &&
      safepoint->liveRegs().emptyFloat()) {
    return false;  // No registers to check.
  }

  return true;
}

void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
  if (!shouldVerifyOsiPointRegs(safepoint)) {
    return;
  }

  // Set checkRegs to 0. If we perform a VM call, the instruction
  // will set it to 1.
  AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
  Register scratch = allRegs.takeAny();
  masm.push(scratch);
  masm.loadJitActivation(scratch);
  Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
  masm.store32(Imm32(0), checkRegs);
  masm.pop(scratch);
}

static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
  // Store a copy of all live registers before performing the call.
  // When we reach the OsiPoint, we can use this to check nothing
  // modified them in the meantime.

  // Load pointer to the JitActivation in a scratch register.
  AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
  Register scratch = allRegs.takeAny();
  masm.push(scratch);
  masm.loadJitActivation(scratch);

  Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
  masm.add32(Imm32(1), checkRegs);

  StoreOp op(masm);
  HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());

  masm.pop(scratch);
}
#endif  // CHECK_OSIPOINT_REGISTERS

// Before doing any call to Cpp, you should ensure that volatile
// registers are evicted by the register allocator.
void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins,
                                   const Register* dynStack) {
  TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
  const VMFunctionData& fun = GetVMFunction(id);

#ifdef DEBUG
  if (ins->mirRaw()) {
    MOZ_ASSERT(ins->mirRaw()->isInstruction());
    MInstruction* mir = ins->mirRaw()->toInstruction();
    MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
  }
#endif

  // Stack is:
  //    ... frame ...
  //    [args]
#ifdef DEBUG
  MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
  pushedArgs_ = 0;
#endif

#ifdef CHECK_OSIPOINT_REGISTERS
  if (shouldVerifyOsiPointRegs(ins->safepoint())) {
    StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
  }
#endif

  // Push an exit frame descriptor. If |dynStack| is a valid pointer to a
  // register, then its value is added to the value of the |framePushed()| to
  // fill the frame descriptor.
  if (dynStack) {
    masm.addPtr(Imm32(masm.framePushed()), *dynStack);
    masm.makeFrameDescriptor(*dynStack, FrameType::IonJS,
                             ExitFrameLayout::Size());
    masm.Push(*dynStack);  // descriptor
  } else {
    masm.pushStaticFrameDescriptor(FrameType::IonJS, ExitFrameLayout::Size());
  }

  // Call the wrapper function.  The wrapper is in charge to unwind the stack
  // when returning from the call.  Failures are handled with exceptions based
  // on the return value of the C functions.  To guard the outcome of the
  // returned value, use another LIR instruction.
  uint32_t callOffset = masm.callJit(code);
  markSafepointAt(callOffset, ins);

  // Remove rest of the frame left on the stack. We remove the return address
  // which is implicitly poped when returning.
  int framePop = sizeof(ExitFrameLayout) - sizeof(void*);

  // Pop arguments from framePushed.
  masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
  // Stack is:
  //    ... frame ...
}

template <typename Fn, Fn fn>
void CodeGenerator::callVM(LInstruction* ins, const Register* dynStack) {
  VMFunctionId id = VMFunctionToId<Fn, fn>::id;
  callVMInternal(id, ins, dynStack);
}

// ArgSeq store arguments for OutOfLineCallVM.
//
// OutOfLineCallVM are created with "oolCallVM" function. The third argument of
// this function is an instance of a class which provides a "generate" in charge
// of pushing the argument, with "pushArg", for a VMFunction.
//
// Such list of arguments can be created by using the "ArgList" function which
// creates one instance of "ArgSeq", where the type of the arguments are
// inferred from the type of the arguments.
//
// The list of arguments must be written in the same order as if you were
// calling the function in C++.
//
// Example:
//   ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))

template <typename... ArgTypes>
class ArgSeq;

template <>
class ArgSeq<> {
 public:
  ArgSeq() {}

  inline void generate(CodeGenerator* codegen) const {}

#ifdef DEBUG
  static constexpr size_t numArgs = 0;
#endif
};

template <typename HeadType, typename... TailTypes>
class ArgSeq<HeadType, TailTypes...> : public ArgSeq<TailTypes...> {
 private:
  using RawHeadType = typename mozilla::RemoveReference<HeadType>::Type;
  RawHeadType head_;

 public:
  template <typename ProvidedHead, typename... ProvidedTail>
  explicit ArgSeq(ProvidedHead&& head, ProvidedTail&&... tail)
      : ArgSeq<TailTypes...>(std::forward<ProvidedTail>(tail)...),
        head_(std::forward<ProvidedHead>(head)) {}

  // Arguments are pushed in reverse order, from last argument to first
  // argument.
  inline void generate(CodeGenerator* codegen) const {
    this->ArgSeq<TailTypes...>::generate(codegen);
    codegen->pushArg(head_);
  }

#ifdef DEBUG
  static constexpr size_t numArgs = sizeof...(TailTypes) + 1;
#endif
};

template <typename... ArgTypes>
inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
  return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
}

// Store wrappers, to generate the right move of data after the VM call.

struct StoreNothing {
  inline void generate(CodeGenerator* codegen) const {}
  inline LiveRegisterSet clobbered() const {
    return LiveRegisterSet();  // No register gets clobbered
  }
};

class StoreRegisterTo {
 private:
  Register out_;

 public:
  explicit StoreRegisterTo(Register out) : out_(out) {}

  inline void generate(CodeGenerator* codegen) const {
    // It's okay to use storePointerResultTo here - the VMFunction wrapper
    // ensures the upper bytes are zero for bool/int32 return values.
    codegen->storePointerResultTo(out_);
  }
  inline LiveRegisterSet clobbered() const {
    LiveRegisterSet set;
    set.add(out_);
    return set;
  }
};

class StoreFloatRegisterTo {
 private:
  FloatRegister out_;

 public:
  explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}

  inline void generate(CodeGenerator* codegen) const {
    codegen->storeFloatResultTo(out_);
  }
  inline LiveRegisterSet clobbered() const {
    LiveRegisterSet set;
    set.add(out_);
    return set;
  }
};

template <typename Output>
class StoreValueTo_ {
 private:
  Output out_;

 public:
  explicit StoreValueTo_(const Output& out) : out_(out) {}

  inline void generate(CodeGenerator* codegen) const {
    codegen->storeResultValueTo(out_);
  }
  inline LiveRegisterSet clobbered() const {
    LiveRegisterSet set;
    set.add(out_);
    return set;
  }
};

template <typename Output>
StoreValueTo_<Output> StoreValueTo(const Output& out) {
  return StoreValueTo_<Output>(out);
}

template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
 private:
  LInstruction* lir_;
  ArgSeq args_;
  StoreOutputTo out_;

 public:
  OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
                  const StoreOutputTo& out)
      : lir_(lir), args_(args), out_(out) {}

  void accept(CodeGenerator* codegen) override {
    codegen->visitOutOfLineCallVM(this);
  }

  LInstruction* lir() const { return lir_; }
  const ArgSeq& args() const { return args_; }
  const StoreOutputTo& out() const { return out_; }
};

template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
                                        const StoreOutputTo& out) {
  MOZ_ASSERT(lir->mirRaw());
  MOZ_ASSERT(lir->mirRaw()->isInstruction());

#ifdef DEBUG
  VMFunctionId id = VMFunctionToId<Fn, fn>::id;
  const VMFunctionData& fun = GetVMFunction(id);
  MOZ_ASSERT(fun.explicitArgs == args.numArgs);
  MOZ_ASSERT(fun.returnsData() !=
             (mozilla::IsSame<StoreOutputTo, StoreNothing>::value));
#endif

  OutOfLineCode* ool = new (alloc())
      OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
  addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
  return ool;
}

template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
void CodeGenerator::visitOutOfLineCallVM(
    OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
  LInstruction* lir = ool->lir();

  saveLive(lir);
  ool->args().generate(this);
  callVM<Fn, fn>(lir);
  ool->out().generate(this);
  restoreLiveIgnore(lir, ool->out().clobbered());
  masm.jump(ool->rejoin());
}

class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
 private:
  LInstruction* lir_;
  size_t cacheIndex_;
  size_t cacheInfoIndex_;

 public:
  OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
                      size_t cacheInfoIndex)
      : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}

  void bind(MacroAssembler* masm) override {
    // The binding of the initial jump is done in
    // CodeGenerator::visitOutOfLineICFallback.
  }

  size_t cacheIndex() const { return cacheIndex_; }
  size_t cacheInfoIndex() const { return cacheInfoIndex_; }
  LInstruction* lir() const { return lir_; }

  void accept(CodeGenerator* codegen) override {
    codegen->visitOutOfLineICFallback(this);
  }
};

void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
  if (cacheIndex == SIZE_MAX) {
    masm.setOOM();
    return;
  }

  DataPtr<IonIC> cache(this, cacheIndex);
  MInstruction* mir = lir->mirRaw()->toInstruction();
  if (mir->resumePoint()) {
    cache->setScriptedLocation(mir->block()->info().script(),
                               mir->resumePoint()->pc());
  } else {
    cache->setIdempotent();
  }

  Register temp = cache->scratchRegisterForEntryJump();
  icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
  masm.jump(Address(temp, 0));

  MOZ_ASSERT(!icInfo_.empty());

  OutOfLineICFallback* ool =
      new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
  addOutOfLineCode(ool, mir);

  masm.bind(ool->rejoin());
  cache->setRejoinLabel(CodeOffset(ool->rejoin()->offset()));
}

void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
  LInstruction* lir = ool->lir();
  size_t cacheIndex = ool->cacheIndex();
  size_t cacheInfoIndex = ool->cacheInfoIndex();

  DataPtr<IonIC> ic(this, cacheIndex);

  // Register the location of the OOL path in the IC.
  ic->setFallbackLabel(masm.labelForPatch());

  switch (ic->kind()) {
    case CacheKind::GetProp:
    case CacheKind::GetElem: {
      IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();

      saveLive(lir);

      pushArg(getPropIC->id());
      pushArg(getPropIC->value());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
                          HandleValue, HandleValue, MutableHandleValue);
      callVM<Fn, IonGetPropertyIC::update>(lir);

      StoreValueTo(getPropIC->output()).generate(this);
      restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::GetPropSuper:
    case CacheKind::GetElemSuper: {
      IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();

      saveLive(lir);

      pushArg(getPropSuperIC->id());
      pushArg(getPropSuperIC->receiver());
      pushArg(getPropSuperIC->object());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn =
          bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
                   HandleValue, HandleValue, MutableHandleValue);
      callVM<Fn, IonGetPropSuperIC::update>(lir);

      StoreValueTo(getPropSuperIC->output()).generate(this);
      restoreLiveIgnore(lir,
                        StoreValueTo(getPropSuperIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::SetProp:
    case CacheKind::SetElem: {
      IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();

      saveLive(lir);

      pushArg(setPropIC->rhs());
      pushArg(setPropIC->id());
      pushArg(setPropIC->object());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
                          HandleObject, HandleValue, HandleValue);
      callVM<Fn, IonSetPropertyIC::update>(lir);

      restoreLive(lir);

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::GetName: {
      IonGetNameIC* getNameIC = ic->asGetNameIC();

      saveLive(lir);

      pushArg(getNameIC->environment());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
                          MutableHandleValue);
      callVM<Fn, IonGetNameIC::update>(lir);

      StoreValueTo(getNameIC->output()).generate(this);
      restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::BindName: {
      IonBindNameIC* bindNameIC = ic->asBindNameIC();

      saveLive(lir);

      pushArg(bindNameIC->environment());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn =
          JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
      callVM<Fn, IonBindNameIC::update>(lir);

      StoreRegisterTo(bindNameIC->output()).generate(this);
      restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::GetIterator: {
      IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();

      saveLive(lir);

      pushArg(getIteratorIC->value());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
                               HandleValue);
      callVM<Fn, IonGetIteratorIC::update>(lir);

      StoreRegisterTo(getIteratorIC->output()).generate(this);
      restoreLiveIgnore(lir,
                        StoreRegisterTo(getIteratorIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::In: {
      IonInIC* inIC = ic->asInIC();

      saveLive(lir);

      pushArg(inIC->object());
      pushArg(inIC->key());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
                          HandleObject, bool*);
      callVM<Fn, IonInIC::update>(lir);

      StoreRegisterTo(inIC->output()).generate(this);
      restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::HasOwn: {
      IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();

      saveLive(lir);

      pushArg(hasOwnIC->id());
      pushArg(hasOwnIC->value());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
                          HandleValue, int32_t*);
      callVM<Fn, IonHasOwnIC::update>(lir);

      StoreRegisterTo(hasOwnIC->output()).generate(this);
      restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::InstanceOf: {
      IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();

      saveLive(lir);

      pushArg(hasInstanceOfIC->rhs());
      pushArg(hasInstanceOfIC->lhs());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
                          HandleValue lhs, HandleObject rhs, bool* res);
      callVM<Fn, IonInstanceOfIC::update>(lir);

      StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
      restoreLiveIgnore(lir,
                        StoreRegisterTo(hasInstanceOfIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::UnaryArith: {
      IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();

      saveLive(lir);

      pushArg(unaryArithIC->input());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
                          IonUnaryArithIC * stub, HandleValue val,
                          MutableHandleValue res);
      callVM<Fn, IonUnaryArithIC::update>(lir);

      StoreValueTo(unaryArithIC->output()).generate(this);
      restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::BinaryArith: {
      IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();

      saveLive(lir);

      pushArg(binaryArithIC->rhs());
      pushArg(binaryArithIC->lhs());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
                          IonBinaryArithIC * stub, HandleValue lhs,
                          HandleValue rhs, MutableHandleValue res);
      callVM<Fn, IonBinaryArithIC::update>(lir);

      StoreValueTo(binaryArithIC->output()).generate(this);
      restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::Compare: {
      IonCompareIC* compareIC = ic->asCompareIC();

      saveLive(lir);

      pushArg(compareIC->rhs());
      pushArg(compareIC->lhs());
      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
      pushArg(ImmGCPtr(gen->info().script()));

      using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
                          IonCompareIC * stub, HandleValue lhs, HandleValue rhs,
                          bool* res);
      callVM<Fn, IonCompareIC::update>(lir);

      StoreRegisterTo(compareIC->output()).generate(this);
      restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());

      masm.jump(ool->rejoin());
      return;
    }
    case CacheKind::Call:
    case CacheKind::TypeOf:
    case CacheKind::ToBool:
    case CacheKind::GetIntrinsic:
    case CacheKind::NewObject:
      MOZ_CRASH("Unsupported IC");
  }
  MOZ_CRASH();
}

StringObject* MNewStringObject::templateObj() const {
  return &templateObj_->as<StringObject>();
}

CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
                             MacroAssembler* masm)
    : CodeGeneratorSpecific(gen, graph, masm),
      ionScriptLabels_(gen->alloc()),
      scriptCounts_(nullptr),
      realmStubsToReadBarrier_(0) {}

CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }

void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
  ValueOperand operand = ToValue(lir, LValueToInt32::Input);
  Register output = ToRegister(lir->output());
  FloatRegister temp = ToFloatRegister(lir->tempFloat());

  MDefinition* input;
  if (lir->mode() == LValueToInt32::NORMAL) {
    input = lir->mirNormal()->input();
  } else {
    input = lir->mirTruncate()->input();
  }

  Label fails;
  if (lir->mode() == LValueToInt32::TRUNCATE) {
    OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());

    // We can only handle strings in truncation contexts, like bitwise
    // operations.
    Label* stringEntry;
    Label* stringRejoin;
    Register stringReg;
    if (input->mightBeType(MIRType::String)) {
      stringReg = ToRegister(lir->temp());
      using Fn = bool (*)(JSContext*, JSString*, double*);
      OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
          lir, ArgList(stringReg), StoreFloatRegisterTo(temp));
      stringEntry = oolString->entry();
      stringRejoin = oolString->rejoin();
    } else {
      stringReg = InvalidReg;
      stringEntry = nullptr;
      stringRejoin = nullptr;
    }

    masm.truncateValueToInt32(operand, input, stringEntry, stringRejoin,
                              oolDouble->entry(), stringReg, temp, output,
                              &fails);
    masm.bind(oolDouble->rejoin());
  } else {
    masm.convertValueToInt32(operand, input, temp, output, &fails,
                             lir->mirNormal()->canBeNegativeZero(),
                             lir->mirNormal()->conversion());
  }

  bailoutFrom(&fails, lir->snapshot());
}

void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
  MToDouble* mir = lir->mir();
  ValueOperand operand = ToValue(lir, LValueToDouble::Input);
  FloatRegister output = ToFloatRegister(lir->output());

  Label isDouble, isInt32, isBool, isNull, isUndefined, done;
  bool hasBoolean = false, hasNull = false, hasUndefined = false;

  {
    ScratchTagScope tag(masm, operand);
    masm.splitTagForTest(operand, tag);

    masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
    masm.branchTestInt32(Assembler::Equal, tag, &isInt32);

    if (mir->conversion() != MToFPInstruction::NumbersOnly) {
      masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
      masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
      hasBoolean = true;
      hasUndefined = true;
      if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
        masm.branchTestNull(Assembler::Equal, tag, &isNull);
        hasNull = true;
      }
    }
  }

  bailout(lir->snapshot());

  if (hasNull) {
    masm.bind(&isNull);
    masm.loadConstantDouble(0.0, output);
    masm.jump(&done);
  }

  if (hasUndefined) {
    masm.bind(&isUndefined);
    masm.loadConstantDouble(GenericNaN(), output);
    masm.jump(&done);
  }

  if (hasBoolean) {
    masm.bind(&isBool);
    masm.boolValueToDouble(operand, output);
    masm.jump(&done);
  }

  masm.bind(&isInt32);
  masm.int32ValueToDouble(operand, output);
  masm.jump(&done);

  masm.bind(&isDouble);
  masm.unboxDouble(operand, output);
  masm.bind(&done);
}

void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
  MToFloat32* mir = lir->mir();
  ValueOperand operand = ToValue(lir, LValueToFloat32::Input);
  FloatRegister output = ToFloatRegister(lir->output());

  Label isDouble, isInt32, isBool, isNull, isUndefined, done;
  bool hasBoolean = false, hasNull = false, hasUndefined = false;

  {
    ScratchTagScope tag(masm, operand);
    masm.splitTagForTest(operand, tag);

    masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
    masm.branchTestInt32(Assembler::Equal, tag, &isInt32);

    if (mir->conversion() != MToFPInstruction::NumbersOnly) {
      masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
      masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
      hasBoolean = true;
      hasUndefined = true;
      if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
        masm.branchTestNull(Assembler::Equal, tag, &isNull);
        hasNull = true;
      }
    }
  }

  bailout(lir->snapshot());

  if (hasNull) {
    masm.bind(&isNull);
    masm.loadConstantFloat32(0.0f, output);
    masm.jump(&done);
  }

  if (hasUndefined) {
    masm.bind(&isUndefined);
    masm.loadConstantFloat32(float(GenericNaN()), output);
    masm.jump(&done);
  }

  if (hasBoolean) {
    masm.bind(&isBool);
    masm.boolValueToFloat32(operand, output);
    masm.jump(&done);
  }

  masm.bind(&isInt32);
  masm.int32ValueToFloat32(operand, output);
  masm.jump(&done);

  masm.bind(&isDouble);
  // ARM and MIPS may not have a double register available if we've
  // allocated output as a float32.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
  ScratchDoubleScope fpscratch(masm);
  masm.unboxDouble(operand, fpscratch);
  masm.convertDoubleToFloat32(fpscratch, output);
#else
  masm.unboxDouble(operand, output);
  masm.convertDoubleToFloat32(output, output);
#endif
  masm.bind(&done);
}

void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
  masm.convertInt32ToDouble(ToRegister(lir->input()),
                            ToFloatRegister(lir->output()));
}

void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
  masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
                              ToFloatRegister(lir->output()));
}

void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
  masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
                              ToFloatRegister(lir->output()));
}

void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
  masm.convertInt32ToFloat32(ToRegister(lir->input()),
                             ToFloatRegister(lir->output()));
}

void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
  Label fail;
  FloatRegister input = ToFloatRegister(lir->input());
  Register output = ToRegister(lir->output());
  masm.convertDoubleToInt32(input, output, &fail,
                            lir->mir()->canBeNegativeZero());
  bailoutFrom(&fail, lir->snapshot());
}

void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
  Label fail;
  FloatRegister input = ToFloatRegister(lir->input());
  Register output = ToRegister(lir->output());
  masm.convertFloat32ToInt32(input, output, &fail,
                             lir->mir()->canBeNegativeZero());
  bailoutFrom(&fail, lir->snapshot());
}

void CodeGenerator::emitOOLTestObject(Register objreg,
                                      Label* ifEmulatesUndefined,
                                      Label* ifDoesntEmulateUndefined,
                                      Register scratch) {
  saveVolatile(scratch);
  masm.setupUnalignedABICall(scratch);
  masm.passABIArg(objreg);
  masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::EmulatesUndefined));
  masm.storeCallBoolResult(scratch);
  restoreVolatile(scratch);

  masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
  masm.jump(ifDoesntEmulateUndefined);
}

// Base out-of-line code generator for all tests of the truthiness of an
// object, where the object might not be truthy.  (Recall that per spec all
// objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
// flag to permit objects to look like |undefined| in certain contexts,
// including in object truthiness testing.)  We check truthiness inline except
// when we're testing it on a proxy (or if TI guarantees us that the specified
// object will never emulate |undefined|), in which case out-of-line code will
// call EmulatesUndefined for a conclusive answer.
class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
  Register objreg_;
  Register scratch_;

  Label* ifEmulatesUndefined_;
  Label* ifDoesntEmulateUndefined_;

#ifdef DEBUG
  bool initialized() { return ifEmulatesUndefined_ != nullptr; }
#endif

 public:
  OutOfLineTestObject()
      : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}

  void accept(CodeGenerator* codegen) final {
    MOZ_ASSERT(initialized());
    codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
                               ifDoesntEmulateUndefined_, scratch_);
  }

  // Specify the register where the object to be tested is found, labels to
  // jump to if the object is truthy or falsy, and a scratch register for
  // use in the out-of-line path.
  void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
                          Label* ifDoesntEmulateUndefined, Register scratch) {
    MOZ_ASSERT(!initialized());
    MOZ_ASSERT(ifEmulatesUndefined);
    objreg_ = objreg;
    scratch_ = scratch;
    ifEmulatesUndefined_ = ifEmulatesUndefined;
    ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
  }
};

// A subclass of OutOfLineTestObject containing two extra labels, for use when
// the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
// code.  The user should bind these labels in inline code, and specify them as
// targets via setInputAndTargets, as appropriate.
class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
  Label label1_;
  Label label2_;

 public:
  OutOfLineTestObjectWithLabels() {}

  Label* label1() { return &label1_; }
  Label* label2() { return &label2_; }
};

void CodeGenerator::testObjectEmulatesUndefinedKernel(
    Register objreg, Label* ifEmulatesUndefined,
    Label* ifDoesntEmulateUndefined, Register scratch,
    OutOfLineTestObject* ool) {
  ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
                          scratch);

  // Perform a fast-path check of the object's class flags if the object's
  // not a proxy.  Let out-of-line code handle the slow cases that require
  // saving registers, making a function call, and restoring registers.
  masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
                                       ifEmulatesUndefined);
}

void CodeGenerator::branchTestObjectEmulatesUndefined(
    Register objreg, Label* ifEmulatesUndefined,
    Label* ifDoesntEmulateUndefined, Register scratch,
    OutOfLineTestObject* ool) {
  MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
             "ifDoesntEmulateUndefined will be bound to the fallthrough path");

  testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
                                    ifDoesntEmulateUndefined, scratch, ool);
  masm.bind(ifDoesntEmulateUndefined);
}

void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
                                                Label* ifEmulatesUndefined,
                                                Label* ifDoesntEmulateUndefined,
                                                Register scratch,
                                                OutOfLineTestObject* ool) {
  testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
                                    ifDoesntEmulateUndefined, scratch, ool);
  masm.jump(ifDoesntEmulateUndefined);
}

void CodeGenerator::testValueTruthyKernel(
    const ValueOperand& value, const LDefinition* scratch1,
    const LDefinition* scratch2, FloatRegister fr, Label* ifTruthy,
    Label* ifFalsy, OutOfLineTestObject* ool, MDefinition* valueMIR) {
  // Count the number of possible type tags we might have, so we'll know when
  // we've checked them all and hence can avoid emitting a tag check for the
  // last one.  In particular, whenever tagCount is 1 that means we've tried
  // all but one of them already so we know exactly what's left based on the
  // mightBe* booleans.
  bool mightBeUndefined = valueMIR->mightBeType(MIRType::Undefined);
  bool mightBeNull = valueMIR->mightBeType(MIRType::Null);
  bool mightBeBoolean = valueMIR->mightBeType(MIRType::Boolean);
  bool mightBeInt32 = valueMIR->mightBeType(MIRType::Int32);
  bool mightBeObject = valueMIR->mightBeType(MIRType::Object);
  bool mightBeString = valueMIR->mightBeType(MIRType::String);
  bool mightBeSymbol = valueMIR->mightBeType(MIRType::Symbol);
  bool mightBeDouble = valueMIR->mightBeType(MIRType::Double);
  bool mightBeBigInt = valueMIR->mightBeType(MIRType::BigInt);
  int tagCount = int(mightBeUndefined) + int(mightBeNull) +
                 int(mightBeBoolean) + int(mightBeInt32) + int(mightBeObject) +
                 int(mightBeString) + int(mightBeSymbol) + int(mightBeDouble) +
                 int(mightBeBigInt);

  MOZ_ASSERT_IF(!valueMIR->emptyResultTypeSet(), tagCount > 0);

  // If we know we're null or undefined, we're definitely falsy, no
  // need to even check the tag.
  if (int(mightBeNull) + int(mightBeUndefined) == tagCount) {
    masm.jump(ifFalsy);
    return;
  }

  ScratchTagScope tag(masm, value);
  masm.splitTagForTest(value, tag);

  if (mightBeUndefined) {
    MOZ_ASSERT(tagCount > 1);
    masm.branchTestUndefined(Assembler::Equal, tag, ifFalsy);
    --tagCount;
  }

  if (mightBeNull) {
    MOZ_ASSERT(tagCount > 1);
    masm.branchTestNull(Assembler::Equal, tag, ifFalsy);
    --tagCount;
  }

  if (mightBeBoolean) {
    MOZ_ASSERT(tagCount != 0);
    Label notBoolean;
    if (tagCount != 1) {
      masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
    }
    {
      ScratchTagScopeRelease _(&tag);
      masm.branchTestBooleanTruthy(false, value, ifFalsy);
    }
    if (tagCount != 1) {
      masm.jump(ifTruthy);
    }
    // Else just fall through to truthiness.
    masm.bind(&notBoolean);
    --tagCount;
  }

  if (mightBeInt32) {
    MOZ_ASSERT(tagCount != 0);
    Label notInt32;
    if (tagCount != 1) {
      masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
    }
    {
      ScratchTagScopeRelease _(&tag);
      masm.branchTestInt32Truthy(false, value, ifFalsy);
    }
    if (tagCount != 1) {
      masm.jump(ifTruthy);
    }
    // Else just fall through to truthiness.
    masm.bind(&notInt32);
    --tagCount;
  }

  if (mightBeObject) {
    MOZ_ASSERT(tagCount != 0);
    if (ool) {
      Label notObject;

      if (tagCount != 1) {
        masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
      }

      {
        ScratchTagScopeRelease _(&tag);
        Register objreg = masm.extractObject(value, ToRegister(scratch1));
        testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy,
                                    ToRegister(scratch2), ool);
      }

      masm.bind(&notObject);
    } else {
      if (tagCount != 1) {
        masm.branchTestObject(Assembler::Equal, tag, ifTruthy);
      }
      // Else just fall through to truthiness.
    }
    --tagCount;
  } else {
    MOZ_ASSERT(!ool,
               "We better not have an unused OOL path, since the code "
               "generator will try to "
               "generate code for it but we never set up its labels, which "
               "will cause null "
               "derefs of those labels.");
  }

  if (mightBeString) {
    // Test if a string is non-empty.
    MOZ_ASSERT(tagCount != 0);
    Label notString;
    if (tagCount != 1) {
      masm.branchTestString(Assembler::NotEqual, tag, &notString);
    }
    {
      ScratchTagScopeRelease _(&tag);
      masm.branchTestStringTruthy(false, value, ifFalsy);
    }
    if (tagCount != 1) {
      masm.jump(ifTruthy);
    }
    // Else just fall through to truthiness.
    masm.bind(&notString);
    --tagCount;
  }

  if (mightBeBigInt) {
    MOZ_ASSERT(tagCount != 0);
    Label notBigInt;
    if (tagCount != 1) {
      masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
    }
    {
      ScratchTagScopeRelease _(&tag);
      masm.branchTestBigIntTruthy(false, value, ifFalsy);
    }
    if (tagCount != 1) {
      masm.jump(ifTruthy);
    }
    masm.bind(&notBigInt);
    --tagCount;
  }

  if (mightBeSymbol) {
    // All symbols are truthy.
    MOZ_ASSERT(tagCount != 0);
    if (tagCount != 1) {
      masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
    }
    // Else fall through to ifTruthy.
    --tagCount;
  }

  if (mightBeDouble) {
    MOZ_ASSERT(tagCount == 1);
    // If we reach here the value is a double.
    {
      ScratchTagScopeRelease _(&tag);
      masm.unboxDouble(value, fr);
      masm.branchTestDoubleTruthy(false, fr, ifFalsy);
    }
    --tagCount;
  }

  MOZ_ASSERT(tagCount == 0);

  // Fall through for truthy.
}

void CodeGenerator::testValueTruthy(const ValueOperand& value,
                                    const LDefinition* scratch1,
                                    const LDefinition* scratch2,
                                    FloatRegister fr, Label* ifTruthy,
                                    Label* ifFalsy, OutOfLineTestObject* ool,
                                    MDefinition* valueMIR) {
  testValueTruthyKernel(value, scratch1, scratch2, fr, ifTruthy, ifFalsy, ool,
                        valueMIR);
  masm.jump(ifTruthy);
}

void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
  MIRType inputType = lir->mir()->input()->type();
  MOZ_ASSERT(inputType == MIRType::ObjectOrNull ||
                 lir->mir()->operandMightEmulateUndefined(),
             "If the object couldn't emulate undefined, this should have been "
             "folded.");

  Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
  Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
  Register input = ToRegister(lir->input());

  if (lir->mir()->operandMightEmulateUndefined()) {
    if (inputType == MIRType::ObjectOrNull) {
      masm.branchTestPtr(Assembler::Zero, input, input, falsy);
    }

    OutOfLineTestObject* ool = new (alloc()) OutOfLineTestObject();
    addOutOfLineCode(ool, lir->mir());

    testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
                                ool);
  } else {
    MOZ_ASSERT(inputType == MIRType::ObjectOrNull);
    testZeroEmitBranch(Assembler::NotEqual, input, lir->ifTruthy(),
                       lir->ifFalsy());
  }
}

void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
  OutOfLineTestObject* ool = nullptr;
  MDefinition* input = lir->mir()->input();
  // Unfortunately, it's possible that someone (e.g. phi elimination) switched
  // out our input after we did cacheOperandMightEmulateUndefined.  So we
  // might think it can emulate undefined _and_ know that it can't be an
  // object.
  if (lir->mir()->operandMightEmulateUndefined() &&
      input->mightBeType(MIRType::Object)) {
    ool = new (alloc()) OutOfLineTestObject();
    addOutOfLineCode(ool, lir->mir());
  }

  Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
  Label* falsy = getJumpLabelForBranch(lir->ifFalsy());

  testValueTruthy(ToValue(lir, LTestVAndBranch::Input), lir->temp1(),
                  lir->temp2(), ToFloatRegister(lir->tempFloat()), truthy,
                  falsy, ool, input);
}

void CodeGenerator::visitFunctionDispatch(LFunctionDispatch* lir) {
  MFunctionDispatch* mir = lir->mir();
  Register input = ToRegister(lir->input());

  // Compare function pointers
  for (size_t i = 0; i < mir->numCases(); i++) {
    MOZ_ASSERT(i < mir->numCases());
    LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();
    if (ObjectGroup* funcGroup = mir->getCaseObjectGroup(i)) {
      masm.branchTestObjGroupUnsafe(Assembler::Equal, input, funcGroup,
                                    target->label());
    } else {
      JSFunction* func = mir->getCase(i);
      masm.branchPtr(Assembler::Equal, input, ImmGCPtr(func), target->label());
    }
  }

  // If at the end, and we have a fallback, we can jump to the fallback block.
  if (mir->hasFallback()) {
    masm.jump(skipTrivialBlocks(mir->getFallback())->lir()->label());
    return;
  }

  // Otherwise, crash.
  masm.assumeUnreachable("Did not match input function!");
}

void CodeGenerator::visitObjectGroupDispatch(LObjectGroupDispatch* lir) {
  MObjectGroupDispatch* mir = lir->mir();
  Register input = ToRegister(lir->input());
  Register temp = ToRegister(lir->temp());

  // Load the incoming ObjectGroup in temp.
  masm.loadObjGroupUnsafe(input, temp);

  // Compare ObjectGroups.
  MacroAssembler::BranchGCPtr lastBranch;
  LBlock* lastBlock = nullptr;
  InlinePropertyTable* propTable = mir->propTable();
  for (size_t i = 0; i < mir->numCases(); i++) {
    JSFunction* func = mir->getCase(i);
    LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();

    DebugOnly<bool> found = false;
    for (size_t j = 0; j < propTable->numEntries(); j++) {
      if (propTable->getFunction(j) != func) {
        continue;
      }

      if (lastBranch.isInitialized()) {
        lastBranch.emit(masm);
      }

      ObjectGroup* group = propTable->getObjectGroup(j);
      lastBranch = MacroAssembler::BranchGCPtr(
          Assembler::Equal, temp, ImmGCPtr(group), target->label());
      lastBlock = target;
      found = true;
    }
    MOZ_ASSERT(found);
  }

  // Jump to fallback block if we have an unknown ObjectGroup. If there's no
  // fallback block, we should have handled all cases.

  if (!mir->hasFallback()) {
    MOZ_ASSERT(lastBranch.isInitialized());

    Label ok;
    lastBranch.relink(&ok);
    lastBranch.emit(masm);
    masm.assumeUnreachable("Unexpected ObjectGroup");
    masm.bind(&ok);

    if (!isNextBlock(lastBlock)) {
      masm.jump(lastBlock->label());
    }
    return;
  }

  LBlock* fallback = skipTrivialBlocks(mir->getFallback())->lir();
  if (!lastBranch.isInitialized()) {
    if (!isNextBlock(fallback)) {
      masm.jump(fallback->label());
    }
    return;
  }

  lastBranch.invertCondition();
  lastBranch.relink(fallback->label());
  lastBranch.emit(masm);

  if (!isNextBlock(lastBlock)) {
    masm.jump(lastBlock->label());
  }
}

void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
  Register input = ToRegister(lir->input());
  Register output = ToRegister(lir->output());
  const JSAtomState& names = gen->runtime->names();
  Label true_, done;

  masm.branchTest32(Assembler::NonZero, input, input, &true_);
  masm.movePtr(ImmGCPtr(names.false_), output);
  masm.jump(&done);

  masm.bind(&true_);
  masm.movePtr(ImmGCPtr(names.true_), output);

  masm.bind(&done);
}

void CodeGenerator::emitIntToString(Register input, Register output,
                                    Label* ool) {
  masm.boundsCheck32PowerOfTwo(input, StaticStrings::INT_STATIC_LIMIT, ool);

  // Fast path for small integers.
  masm.movePtr(ImmPtr(&gen->runtime->staticStrings().intStaticTable), output);
  masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
}

void CodeGenerator::visitIntToString(LIntToString* lir) {
  Register input = ToRegister(lir->input());
  Register output = ToRegister(lir->output());

  using Fn = JSFlatString* (*)(JSContext*, int);
  OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
      lir, ArgList(input), StoreRegisterTo(output));

  emitIntToString(input, output, ool->entry());

  masm.bind(ool->rejoin());
}

void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
  FloatRegister input = ToFloatRegister(lir->input());
  Register temp = ToRegister(lir->tempInt());
  Register output = ToRegister(lir->output());

  using Fn = JSString* (*)(JSContext*, double);
  OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
      lir, ArgList(input), StoreRegisterTo(output));

  // Try double to integer conversion and run integer to string code.
  masm.convertDoubleToInt32(input, temp, ool->entry(), true);
  emitIntToString(temp, output, ool->entry());

  masm.bind(ool->rejoin());
}

void CodeGenerator::visitValueToString(LValueToString* lir) {
  ValueOperand input = ToValue(lir, LValueToString::Input);
  Register output = ToRegister(lir->output());

  using Fn = JSString* (*)(JSContext*, HandleValue);
  OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
      lir, ArgList(input), StoreRegisterTo(output));

  Label done;
  Register tag = masm.extractTag(input, output);
  const JSAtomState& names = gen->runtime->names();

  // String
  if (lir->mir()->input()->mightBeType(MIRType::String)) {
    Label notString;
    masm.branchTestString(Assembler::NotEqual, tag, &notString);
    masm.unboxString(input, output);
    masm.jump(&done);
    masm.bind(&notString);
  }

  // Integer
  if (lir->mir()->input()->mightBeType(MIRType::Int32)) {
    Label notInteger;
    masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
    Register unboxed = ToTempUnboxRegister(lir->tempToUnbox());
    unboxed = masm.extractInt32(input, unboxed);
    emitIntToString(unboxed, output, ool->entry());
    masm.jump(&done);
    masm.bind(&notInteger);
  }

  // Double
  if (lir->mir()->input()->mightBeType(MIRType::Double)) {
    // Note: no fastpath. Need two extra registers and can only convert doubles
    // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
    masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
  }

  // Undefined
  if (lir->mir()->input()->mightBeType(MIRType::Undefined)) {
    Label notUndefined;
    masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
    masm.movePtr(ImmGCPtr(names.undefined), output);
    masm.jump(&done);
    masm.bind(&notUndefined);
  }

  // Null
  if (lir->mir()->input()->mightBeType(MIRType::Null)) {
    Label notNull;
    masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
    masm.movePtr(ImmGCPtr(names.null), output);
    masm.jump(&done);
    masm.bind(&notNull);
  }

  // Boolean
  if (lir->mir()->input()->mightBeType(MIRType::Boolean)) {
    Label notBoolean, true_;
    masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
    masm.branchTestBooleanTruthy(true, input, &true_);
    masm.movePtr(ImmGCPtr(names.false_), output);
    masm.jump(&done);
    masm.bind(&true_);
    masm.movePtr(ImmGCPtr(names.true_), output);
    masm.jump(&done);
    masm.bind(&notBoolean);
  }

  // Object
  if (lir->mir()->input()->mightBeType(MIRType::Object)) {
    // Bail.
    MOZ_ASSERT(lir->mir()->fallible());
    Label bail;
    masm.branchTestObject(Assembler::Equal, tag, &bail);
    bailoutFrom(&bail, lir->snapshot());
  }

  // Symbol
  if (lir->mir()->input()->mightBeType(MIRType::Symbol)) {
    // Bail.
    MOZ_ASSERT(lir->mir()->fallible());
    Label bail;
    masm.branchTestSymbol(Assembler::Equal, tag, &bail);
    bailoutFrom(&bail, lir->snapshot());
  }

  // BigInt
  if (lir->mir()->input()->mightBeType(MIRType::BigInt)) {
    // No fastpath currently implemented.
    masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
  }

#ifdef DEBUG
  masm.assumeUnreachable("Unexpected type for MValueToString.");
#endif

  masm.bind(&done);
  masm.bind(ool->rejoin());
}

void CodeGenerator::visitValueToObject(LValueToObject* lir) {
  ValueOperand input = ToValue(lir, LValueToObject::Input);
  Register output = ToRegister(lir->output());

  using Fn = JSObject* (*)(JSContext*, HandleValue, bool);
  OutOfLineCode* ool = oolCallVM<Fn, ToObjectSlow>(
      lir, ArgList(input, Imm32(0)), StoreRegisterTo(output));

  masm.branchTestObject(Assembler::NotEqual, input, ool->entry());
  masm.unboxObject(input, output);

  masm.bind(ool->rejoin());
}

void CodeGenerator::visitValueToObjectOrNull(LValueToObjectOrNull* lir) {
  ValueOperand input = ToValue(lir, LValueToObjectOrNull::Input);
  Register output = ToRegister(lir->output());

  using Fn = JSObject* (*)(JSContext*, HandleValue, bool);
  OutOfLineCode* ool = oolCallVM<Fn, ToObjectSlow>(
      lir, ArgList(input, Imm32(0)), StoreRegisterTo(output));

  Label isObject;
  masm.branchTestObject(Assembler::Equal, input, &isObject);
  masm.branchTestNull(Assembler::NotEqual, input, ool->entry());

  masm.movePtr(ImmWord(0), output);
  masm.jump(ool->rejoin());

  masm.bind(&isObject);
  masm.unboxObject(input, output);

  masm.bind(ool->rejoin());
}

static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
                                    size_t offset, Register buffer,
                                    LiveGeneralRegisterSet& liveVolatiles,
                                    void (*fun)(js::gc::StoreBuffer*,
                                                js::gc::Cell**)) {
  Label callVM;
  Label exit;

  // Call into the VM to barrier the write. The only registers that need to
  // be preserved are those in liveVolatiles, so once they are saved on the
  // stack all volatile registers are available for use.
  masm.bind(&callVM);
  masm.PushRegsInMask(liveVolatiles);

  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
  regs.takeUnchecked(buffer);
  regs.takeUnchecked(holder);
  Register addrReg = regs.takeAny();

  masm.computeEffectiveAddress(Address(holder, offset), addrReg);

  bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
  if (needExtraReg) {
    masm.push(holder);
    masm.setupUnalignedABICall(holder);
  } else {
    masm.setupUnalignedABICall(regs.takeAny());
  }
  masm.passABIArg(buffer);
  masm.passABIArg(addrReg);
  masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, fun), MoveOp::GENERAL,
                   CheckUnsafeCallWithABI::DontCheckOther);

  if (needExtraReg) {
    masm.pop(holder);
  }
  masm.PopRegsInMask(liveVolatiles);
  masm.bind(&exit);
}

// Warning: this function modifies prev and next.
static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
                                  size_t offset, Register prev, Register next,
                                  LiveGeneralRegisterSet& liveVolatiles) {
  Label exit;
  Label checkRemove, putCell;

  // if (next && (buffer = next->storeBuffer()))
  // but we never pass in nullptr for next.
  Register storebuffer = next;
  masm.loadStoreBuffer(next, storebuffer);
  masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);

  // if (prev && prev->storeBuffer())
  masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
  masm.loadStoreBuffer(prev, prev);
  masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);

  // buffer->putCell(cellp)
  masm.bind(&putCell);
  EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
                          JSString::addCellAddressToStoreBuffer);
  masm.jump(&exit);

  // if (prev && (buffer = prev->storeBuffer()))
  masm.bind(&checkRemove);
  masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
  masm.loadStoreBuffer(prev, storebuffer);
  masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
  EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
                          JSString::removeCellAddressFromStoreBuffer);

  masm.bind(&exit);
}

void CodeGenerator::visitRegExp(LRegExp* lir) {
  Register output = ToRegister(lir->output());
  Register temp = ToRegister(lir->temp());
  JSObject* source = lir->mir()->source();

  using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
  OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
      lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
  if (lir->mir()->hasShared()) {
    TemplateObject templateObject(source);
    masm.createGCObject(output, temp, templateObject, gc::DefaultHeap,
                        ool->entry());
  } else {
    masm.jump(ool->entry());
  }
  masm.bind(ool->rejoin());
}

// Amount of space to reserve on the stack when executing RegExps inline.
static const size_t RegExpReservedStack =
    sizeof(irregexp::InputOutputData) + sizeof(MatchPairs) +
    RegExpObject::MaxPairCount * sizeof(MatchPair);

static size_t RegExpPairsVectorStartOffset(size_t inputOutputDataStartOffset) {
  return inputOutputDataStartOffset + sizeof(irregexp::InputOutputData) +
         sizeof(MatchPairs);
}

static Address RegExpPairCountAddress(MacroAssembler& masm,
                                      size_t inputOutputDataStartOffset) {
  return Address(masm.getStackPointer(), inputOutputDataStartOffset +
                                             sizeof(irregexp::InputOutputData) +
                                             MatchPairs::offsetOfPairCount());
}

// Prepare an InputOutputData and optional MatchPairs which space has been
// allocated for on the stack, and try to execute a RegExp on a string input.
// If the RegExp was successfully executed and matched the input, fallthrough,
// otherwise jump to notFound or failure.
static bool PrepareAndExecuteRegExp(
    JSContext* cx, MacroAssembler& masm, Register regexp, Register input,
    Register lastIndex, Register temp1, Register temp2, Register temp3,
    size_t inputOutputDataStartOffset, RegExpShared::CompilationMode mode,
    bool stringsCanBeInNursery, Label* notFound, Label* failure) {
  JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");

  // clang-format off
    /*
     * [SMDOC] Stack layout for PrepareAndExecuteRegExp
     *
     * inputOutputDataStartOffset +-----> +---------------+
     *                                    |InputOutputData|
     *          inputStartAddress +---------->  inputStart|
     *            inputEndAddress +---------->    inputEnd|
     *          startIndexAddress +---------->  startIndex|
     *            endIndexAddress +---------->    endIndex|
     *      matchesPointerAddress +---------->     matches|
     *         matchResultAddress +---------->      result|
     *                                    +---------------+
     *      matchPairsStartOffset +-----> +---------------+
     *                                    |  MatchPairs   |
     *            pairCountAddress +----------->  count   |
     *         pairsPointerAddress +----------->  pairs   |
     *                                    |               |
     *                                    +---------------+
     *     pairsVectorStartOffset +-----> +---------------+
     *                                    |   MatchPair   |
     *                                    |       start   |  <-------+
     *                                    |       limit   |          | Reserved space for
     *                                    +---------------+          | `RegExpObject::MaxPairCount`
     *                                           .                   | MatchPair objects.
     *                                           .                   |
     *                                           .                   |
     *                                    +---------------+          |
     *                                    |   MatchPair   |          |
     *                                    |       start   |  <-------+
     *                                    |       limit   |
     *                                    +---------------+
     */
  // clang-format on

  size_t matchPairsStartOffset =
      inputOutputDataStartOffset + sizeof(irregexp::InputOutputData);
  size_t pairsVectorStartOffset =
      RegExpPairsVectorStartOffset(inputOutputDataStartOffset);

  Address inputStartAddress(
      masm.getStackPointer(),
      inputOutputDataStartOffset +
          offsetof(irregexp::InputOutputData, inputStart));
  Address inputEndAddress(masm.getStackPointer(),
                          inputOutputDataStartOffset +
                              offsetof(irregexp::InputOutputData, inputEnd));
  Address matchesPointerAddress(
      masm.getStackPointer(), inputOutputDataStartOffset +
                                  offsetof(irregexp::InputOutputData, matches));
  Address startIndexAddress(
      masm.getStackPointer(),
      inputOutputDataStartOffset +
          offsetof(irregexp::InputOutputData, startIndex));
  Address endIndexAddress(masm.getStackPointer(),
                          inputOutputDataStartOffset +
                              offsetof(irregexp::InputOutputData, endIndex));
  Address matchResultAddress(
      masm.getStackPointer(),
      inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, result));

  Address pairCountAddress =
      RegExpPairCountAddress(masm, inputOutputDataStartOffset);
  Address pairsPointerAddress(
      masm.getStackPointer(),
      matchPairsStartOffset + MatchPairs::offsetOfPairs());

  RegExpStatics* res = GlobalObject::getRegExpStatics(cx, cx->global());
  if (!res) {
    return false;
  }
#ifdef JS_USE_LINK_REGISTER
  if (mode != RegExpShared::MatchOnly) {
    masm.pushReturnAddress();
  }
#endif
  if (mode == RegExpShared::Normal) {
    // First, fill in a skeletal MatchPairs instance on the stack. This will be
    // passed to the OOL stub in the caller if we aren't able to execute the
    // RegExp inline, and that stub needs to be able to determine whether the
    // execution finished successfully.

    // Initialize MatchPairs::pairCount to 1, the correct value can only
    // be determined after loading the RegExpShared.
    masm.store32(Imm32(1), pairCountAddress);

    // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch.
    Address firstMatchPairStartAddress(
        masm.getStackPointer(),
        pairsVectorStartOffset + offsetof(MatchPair, start));
    masm.store32(Imm32(MatchPair::NoMatch), firstMatchPairStartAddress);

    // Assign the MatchPairs::pairs pointer to the first MatchPair object.
    Address pairsVectorAddress(masm.getStackPointer(), pairsVectorStartOffset);
    masm.computeEffectiveAddress(pairsVectorAddress, temp1);
    masm.storePtr(temp1, pairsPointerAddress);
  }

  // Check for a linear input string.
  masm.branchIfRopeOrExternal(input, temp1, failure);

  // Get the RegExpShared for the RegExp.
  masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(
                                   RegExpObject::PRIVATE_SLOT)),
               temp1);
  masm.branchPtr(Assembler::Equal, temp1, ImmWord(0), failure);

  // ES6 21.2.2.2 step 2.
  // See RegExp.cpp ExecuteRegExp for more detail.
  {
    Label done;

    masm.branchTest32(Assembler::Zero,
                      Address(temp1, RegExpShared::offsetOfFlags()),
                      Imm32(int32_t(JS::RegExpFlag::Unicode)), &done);

    // If input is latin1, there should not be surrogate pair.
    masm.branchLatin1String(input, &done);

    // Check if |lastIndex > 0 && lastIndex < input->length()|.
    // lastIndex should already have no sign here.
    masm.branchTest32(Assembler::Zero, lastIndex, lastIndex, &done);
    masm.loadStringLength(input, temp2);
    masm.branch32(Assembler::AboveOrEqual, lastIndex, temp2, &done);

    // For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and
    // LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following
    // equations hold.
    //
    //    SurrogateMin ≤ x ≤ SurrogateMax
    // <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1
    // <> ((x - SurrogateMin) >>> 10) = 0    where >>> is an unsigned-shift
    // See Hacker's Delight, section 4-1 for details.
    //
    //    ((x - SurrogateMin) >>> 10) = 0
    // <> floor((x - SurrogateMin) / 1024) = 0
    // <> floor((x / 1024) - (SurrogateMin / 1024)) = 0
    // <> floor(x / 1024) = SurrogateMin / 1024
    // <> floor(x / 1024) * 1024 = SurrogateMin
    // <> (x >>> 10) << 10 = SurrogateMin
    // <> x & ~(2^10 - 1) = SurrogateMin

    constexpr char16_t SurrogateMask = 0xFC00;

    // Check if input[lastIndex] is trail surrogate.
    masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
    masm.loadChar(temp2, lastIndex, temp3, CharEncoding::TwoByte);

    masm.and32(Imm32(SurrogateMask), temp3);
    masm.branch32(Assembler::NotEqual, temp3, Imm32(unicode::TrailSurrogateMin),
                  &done);

    // Check if input[lastIndex-1] is lead surrogate.
    masm.loadChar(temp2, lastIndex, temp3, CharEncoding::TwoByte,
                  -int32_t(sizeof(char16_t)));

    masm.and32(Imm32(SurrogateMask), temp3);
    masm.branch32(Assembler::NotEqual, temp3, Imm32(unicode::LeadSurrogateMin),
                  &done);

    // Move lastIndex to lead surrogate.
    masm.sub32(Imm32(1), lastIndex);

    masm.bind(&done);
  }

  if (mode == RegExpShared::Normal) {
    // Don't handle RegExps with excessive parens.
    masm.load32(Address(temp1, RegExpShared::offsetOfParenCount()), temp2);
    masm.branch32(Assembler::AboveOrEqual, temp2,
                  Imm32(RegExpObject::MaxPairCount), failure);

    // Fill in the paren count in the MatchPairs on the stack.
    masm.add32(Imm32(1), temp2);
    masm.store32(temp2, pairCountAddress);
  }

  // Load the code pointer for the type of input string we have, and compute
  // the input start/end pointers in the InputOutputData.
  Register codePointer = temp1;
  {
    masm.loadStringLength(input, temp3);

    Label isLatin1, done;
    masm.branchLatin1String(input, &isLatin1);
    {
      masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
      masm.storePtr(temp2, inputStartAddress);
      masm.lshiftPtr(Imm32(1), temp3);
      masm.loadPtr(Address(temp1, RegExpShared::offsetOfTwoByteJitCode(mode)),
                   codePointer);
      masm.jump(&done);
    }
    masm.bind(&isLatin1);
    {
      masm.loadStringChars(input, temp2, CharEncoding::Latin1);
      masm.storePtr(temp2, inputStartAddress);
      masm.loadPtr(Address(temp1, RegExpShared::offsetOfLatin1JitCode(mode)),
                   codePointer);
    }
    masm.bind(&done);

    masm.addPtr(temp3, temp2);
    masm.storePtr(temp2, inputEndAddress);
  }

  // Check the RegExpShared has been compiled for this type of input.
  masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
  masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);

  // Finish filling in the InputOutputData instance on the stack.
  if (mode == RegExpShared::Normal) {
    masm.computeEffectiveAddress(
        Address(masm.getStackPointer(), matchPairsStartOffset), temp2);
    masm.storePtr(temp2, matchesPointerAddress);
  } else {
    // Use InputOutputData.endIndex itself for output.
    masm.computeEffectiveAddress(endIndexAddress, temp2);
    masm.storePtr(temp2, endIndexAddress);
  }
  masm.storePtr(lastIndex, startIndexAddress);
  masm.store32(Imm32(RegExpRunStatus_Error), matchResultAddress);

  // Save any volatile inputs.
  LiveGeneralRegisterSet volatileRegs;
  if (lastIndex.volatile_()) {
    volatileRegs.add(lastIndex);
  }
  if (input.volatile_()) {
    volatileRegs.add(input);
  }
  if (regexp.volatile_()) {
    volatileRegs.add(regexp);
  }

#ifdef JS_TRACE_LOGGING
  if (TraceLogTextIdEnabled(TraceLogger_IrregexpExecute)) {
    masm.push(temp1);
    masm.loadTraceLogger(temp1);
    masm.tracelogStartId(temp1, TraceLogger_IrregexpExecute);
    masm.pop(temp1);
  }
#endif

  // Execute the RegExp.
  masm.computeEffectiveAddress(
      Address(masm.getStackPointer(), inputOutputDataStartOffset), temp2);
  masm.PushRegsInMask(volatileRegs);
  masm.setupUnalignedABICall(temp3);
  masm.passABIArg(temp2);
  masm.callWithABI(codePointer);
  masm.PopRegsInMask(volatileRegs);

#ifdef JS_TRACE_LOGGING
  if (TraceLogTextIdEnabled(TraceLogger_IrregexpExecute)) {
    masm.loadTraceLogger(temp1);
    masm.tracelogStopId(temp1, TraceLogger_IrregexpExecute);
  }
#endif

  Label success;
  masm.branch32(Assembler::Equal, matchResultAddress,
                Imm32(RegExpRunStatus_Success_NotFound), notFound);
  masm.branch32(Assembler::Equal, matchResultAddress,
                Imm32(RegExpRunStatus_Error), failure);

  // Lazily update the RegExpStatics.
  masm.movePtr(ImmPtr(res), temp1);

  Address pendingInputAddress(temp1, RegExpStatics::offsetOfPendingInput());
  Address matchesInputAddress(temp1, RegExpStatics::offsetOfMatchesInput());
  Address lazySourceAddress(temp1, RegExpStatics::offsetOfLazySource());
  Address lazyIndexAddress(temp1, RegExpStatics::offsetOfLazyIndex());

  masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
  masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
  masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);

  if (stringsCanBeInNursery) {
    // Writing into RegExpStatics tenured memory; must post-barrier.
    if (temp1.volatile_()) {
      volatileRegs.add(temp1);
    }

    masm.loadPtr(pendingInputAddress, temp2);
    masm.storePtr(input, pendingInputAddress);
    masm.movePtr(input, temp3);
    EmitPostWriteBarrierS(masm, temp1, RegExpStatics::offsetOfPendingInput(),
                          temp2 /* prev */, temp3 /* next */, volatileRegs);

    masm.loadPtr(matchesInputAddress, temp2);
    masm.storePtr(input, matchesInputAddress);
    masm.movePtr(input, temp3);
    EmitPostWriteBarrierS(masm, temp1, RegExpStatics::offsetOfMatchesInput(),
                          temp2 /* prev */, temp3 /* next */, volatileRegs);
  } else {
    masm.storePtr(input, pendingInputAddress);
    masm.storePtr(input, matchesInputAddress);
  }

  masm.storePtr(lastIndex, Address(temp1, RegExpStatics::offsetOfLazyIndex()));
  masm.store32(Imm32(1),
               Address(temp1, RegExpStatics::offsetOfPendingLazyEvaluation()));

  masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(
                                   RegExpObject::PRIVATE_SLOT)),
               temp2);
  masm.loadPtr(Address(temp2, RegExpShared::offsetOfSource()), temp3);
  masm.storePtr(temp3, lazySourceAddress);
  masm.load32(Address(temp2, RegExpShared::offsetOfFlags()), temp3);
  masm.store32(temp3, Address(temp1, RegExpStatics::offsetOfLazyFlags()));

  if (mode == RegExpShared::MatchOnly) {
    // endIndex is passed via temp3.
    masm.load32(endIndexAddress, temp3);
  }

  return true;
}

static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
                            Register len, Register byteOpScratch,
                            CharEncoding encoding);

class CreateDependentString {
  CharEncoding encoding_;
  Register string_;
  Register temp1_;
  Register temp2_;
  Label* failure_;

  enum class FallbackKind : uint8_t {
    InlineString,
    FatInlineString,
    NotInlineString,
    Count
  };
  mozilla::EnumeratedArray<FallbackKind, FallbackKind::Count, Label> fallbacks_,
      joins_;

 public:
  CreateDependentString(CharEncoding encoding, Register string, Register temp1,
                        Register temp2, Label* failure)
      : encoding_(encoding),
        string_(string),
        temp1_(temp1),
        temp2_(temp2),
        failure_(failure) {}

  Register string() const { return string_; }
  CharEncoding encoding() const { return encoding_; }

  // Generate code that creates DependentString.
  // Caller should call generateFallback after masm.ret(), to generate
  // fallback path.
  void generate(MacroAssembler& masm, const JSAtomState& names,
                CompileRuntime* runtime, Register base,
                BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
                bool stringsCanBeInNursery);

  // Generate fallback path for creating DependentString.
  void generateFallback(MacroAssembler& masm);
};

void CreateDependentString::generate(MacroAssembler& masm,
                                     const JSAtomState& names,
                                     CompileRuntime* runtime, Register base,
                                     BaseIndex startIndexAddress,
                                     BaseIndex limitIndexAddress,
                                     bool stringsCanBeInNursery) {
  JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
          (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));

  auto newGCString = [&](FallbackKind kind) {
    uint32_t flags = kind == FallbackKind::InlineString
                         ? JSString::INIT_THIN_INLINE_FLAGS
                         : kind == FallbackKind::FatInlineString
                               ? JSString::INIT_FAT_INLINE_FLAGS
                               : JSString::DEPENDENT_FLAGS;
    if (encoding_ == CharEncoding::Latin1) {
      flags |= JSString::LATIN1_CHARS_BIT;
    }

    if (kind != FallbackKind::FatInlineString) {
      masm.newGCString(string_, temp2_, &fallbacks_[kind],
                       stringsCanBeInNursery);
    } else {
      masm.newGCFatInlineString(string_, temp2_, &fallbacks_[kind],
                                stringsCanBeInNursery);
    }
    masm.bind(&joins_[kind]);
    masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
  };

  // Compute the string length.
  masm.load32(startIndexAddress, temp2_);
  masm.load32(limitIndexAddress, temp1_);
  masm.sub32(temp2_, temp1_);

  Label done, nonEmpty;

  // Zero length matches use the empty string.
  masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
  masm.movePtr(ImmGCPtr(names.empty), string_);
  masm.jump(&done);

  masm.bind(&nonEmpty);

  // Complete matches use the base string.
  Label nonBaseStringMatch;
  masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
  masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
                temp1_, &nonBaseStringMatch);
  masm.movePtr(base, string_);
  masm.jump(&done);

  masm.bind(&nonBaseStringMatch);

  Label notInline;

  int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
                                ? JSFatInlineString::MAX_LENGTH_LATIN1
                                : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
  masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
  {
    // Make a thin or fat inline string.
    Label stringAllocated, fatInline;

    int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
                                      ? JSThinInlineString::MAX_LENGTH_LATIN1
                                      : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
    masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
                  &fatInline);
    if (encoding_ == CharEncoding::Latin1) {
      // One character Latin-1 strings can be loaded directly from the
      // static strings table.
      Label thinInline;
      masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
      {
        static_assert(
            StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
            "Latin-1 strings can be loaded from static strings");

        masm.loadStringChars(base, temp1_, encoding_);
        masm.loadChar(temp1_, temp2_, temp1_, encoding_);

        masm.movePtr(ImmPtr(&runtime->staticStrings().unitStaticTable),
                     string_);
        masm.loadPtr(BaseIndex(string_, temp1_, ScalePointer), string_);

        masm.jump(&done);
      }
      masm.bind(&thinInline);
    }
    {
      newGCString(FallbackKind::InlineString);
      masm.jump(&stringAllocated);
    }
    masm.bind(&fatInline);
    { newGCString(FallbackKind::FatInlineString); }
    masm.bind(&stringAllocated);

    masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));

    masm.push(string_);
    masm.push(base);

    // Adjust the start index address for the above pushes.
    MOZ_ASSERT(startIndexAddress.base == masm.getStackPointer());
    BaseIndex newStartIndexAddress = startIndexAddress;
    newStartIndexAddress.offset += 2 * sizeof(void*);

    // Load chars pointer for the new string.
    masm.loadInlineStringCharsForStore(string_, string_);

    // Load the source characters pointer.
    masm.loadStringChars(base, temp2_, encoding_);
    masm.load32(newStartIndexAddress, base);
    masm.addToCharPtr(temp2_, base, encoding_);

    CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);

    // Null-terminate.
    masm.storeChar(Imm32(0), Address(string_, 0), encoding_);

    masm.pop(base);
    masm.pop(string_);

    masm.jump(&done);
  }

  masm.bind(&notInline);

  {
    // Make a dependent string.
    // Warning: string may be tenured (if the fallback case is hit), so
    // stores into it must be post barriered.
    newGCString(FallbackKind::NotInlineString);

    masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));

    masm.loadNonInlineStringChars(base, temp1_, encoding_);
    masm.load32(startIndexAddress, temp2_);
    masm.addToCharPtr(temp1_, temp2_, encoding_);
    masm.storeNonInlineStringChars(temp1_, string_);
    masm.storeDependentStringBase(base, string_);
    masm.movePtr(base, temp1_);

    // Follow any base pointer if the input is itself a dependent string.
    // Watch for undepended strings, which have a base pointer but don't
    // actually share their characters with it.
    Label noBase;
    masm.load32(Address(base, JSString::offsetOfFlags()), temp2_);
    masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_);
    masm.branch32(Assembler::NotEqual, temp2_, Imm32(JSString::DEPENDENT_FLAGS),
                  &noBase);
    masm.loadDependentStringBase(base, temp1_);
    masm.storeDependentStringBase(temp1_, string_);
    masm.bind(&noBase);

    // Post-barrier the base store, whether it was the direct or indirect
    // base (both will end up in temp1 here).
    masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done);
    masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done);

    LiveRegisterSet regsToSave(RegisterSet::Volatile());
    regsToSave.takeUnchecked(temp1_);
    regsToSave.takeUnchecked(temp2_);

    masm.PushRegsInMask(regsToSave);

    masm.mov(ImmPtr(runtime), temp1_);

    masm.setupUnalignedABICall(temp2_);
    masm.passABIArg(temp1_);
    masm.passABIArg(string_);
    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier));

    masm.PopRegsInMask(regsToSave);
  }

  masm.bind(&done);
}

static void* AllocateString(JSContext* cx) {
  AutoUnsafeCallWithABI unsafe;
  return js::AllocateString<JSString, NoGC>(cx, js::gc::TenuredHeap);
}

static void* AllocateFatInlineString(JSContext* cx) {
  AutoUnsafeCallWithABI unsafe;
  return js::AllocateString<JSFatInlineString, NoGC>(cx, js::gc::TenuredHeap);
}

void CreateDependentString::generateFallback(MacroAssembler& masm) {
  JitSpew(JitSpew_Codegen,
          "# Emitting CreateDependentString fallback (encoding=%s)",
          (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));

  LiveRegisterSet regsToSave(RegisterSet::Volatile());
  regsToSave.takeUnchecked(string_);
  regsToSave.takeUnchecked(temp2_);

  for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
    masm.bind(&fallbacks_[kind]);

    masm.PushRegsInMask(regsToSave);

    masm.setupUnalignedABICall(string_);
    masm.loadJSContext(string_);
    masm.passABIArg(string_);
    masm.callWithABI(kind == FallbackKind::FatInlineString
                         ? JS_FUNC_TO_DATA_PTR(void*, AllocateFatInlineString)
                         : JS_FUNC_TO_DATA_PTR(void*, AllocateString));
    masm.storeCallPointerResult(string_);

    masm.PopRegsInMask(regsToSave);

    masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);

    masm.jump(&joins_[kind]);
  }
}

static void* CreateMatchResultFallbackFunc(JSContext* cx, gc::AllocKind kind,
                                           size_t nDynamicSlots) {
  AutoUnsafeCallWithABI unsafe;
  return js::AllocateObject<NoGC>(cx, kind, nDynamicSlots, gc::DefaultHeap,
                                  &ArrayObject::class_);
}

static void CreateMatchResultFallback(MacroAssembler& masm, Register object,
                                      Register temp1, Register temp2,
                                      const TemplateObject& templateObject,
                                      Label* fail) {
  JitSpew(JitSpew_Codegen, "# Emitting CreateMatchResult fallback");

  MOZ_ASSERT(templateObject.isArrayObject());

  LiveRegisterSet regsToSave(RegisterSet::Volatile());
  regsToSave.takeUnchecked(object);
  regsToSave.takeUnchecked(temp1);
  regsToSave.takeUnchecked(temp2);

  masm.PushRegsInMask(regsToSave);

  masm.setupUnalignedABICall(object);

  masm.loadJSContext(object);
  masm.passABIArg(object);
  masm.move32(Imm32(int32_t(templateObject.getAllocKind())), temp1);
  masm.passABIArg(temp1);
  masm.move32(
      Imm32(int32_t(templateObject.asNativeTemplateObject().numDynamicSlots())),
      temp2);
  masm.passABIArg(temp2);
  masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, CreateMatchResultFallbackFunc));
  masm.storeCallPointerResult(object);

  masm.PopRegsInMask(regsToSave);

  masm.branchPtr(Assembler::Equal, object, ImmWord(0), fail);

  masm.initGCThing(object, temp1, templateObject, true);
}

JitCode* JitRealm::generateRegExpMatcherStub(JSContext* cx) {
  JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");

  Register regexp = RegExpMatcherRegExpReg;
  Register input = RegExpMatcherStringReg;
  Register lastIndex = RegExpMatcherLastIndexReg;
  ValueOperand result = JSReturnOperand;

  // We are free to clobber all registers, as LRegExpMatcher is a call
  // instruction.
  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
  regs.take(input);
  regs.take(regexp);
  regs.take(lastIndex);

  Register temp1 = regs.takeAny();
  Register temp2 = regs.takeAny();
  Register temp3 = regs.takeAny();
  Register temp4 = regs.takeAny();
  Register maybeTemp5 = InvalidReg;
  if (!regs.empty()) {
    // There are not enough registers on x86.
    maybeTemp5 = regs.takeAny();
  }

  ArrayObject* templateObject =
      cx->realm()->regExps.getOrCreateMatchResultTemplateObject(cx);
  if (!templateObject) {
    return nullptr;
  }
  TemplateObject templateObj(templateObject);
  const NativeTemplateObject& nativeTemplateObj =
      templateObj.asNativeTemplateObject();

  // The template object should have enough space for the maximum number of
  // pairs this stub can handle.
  MOZ_ASSERT(ObjectElements::VALUES_PER_HEADER + RegExpObject::MaxPairCount ==
             gc::GetGCKindSlots(templateObj.getAllocKind()));

  StackMacroAssembler masm(cx);

  // The InputOutputData is placed above the return address on the stack.
  size_t inputOutputDataStartOffset = sizeof(void*);

  Label notFound, oolEntry;
  if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2,
                               temp3, inputOutputDataStartOffset,
                               RegExpShared::Normal, stringsCanBeInNursery,
                               &notFound, &oolEntry)) {
    return nullptr;
  }

  // Construct the result.
  Register object = temp1;
  Label matchResultFallback, matchResultJoin;
  masm.createGCObject(object, temp2, templateObj, gc::DefaultHeap,
                      &matchResultFallback);
  masm.bind(&matchResultJoin);

  // Initialize slots of result object.
  MOZ_ASSERT(nativeTemplateObj.numFixedSlots() == 0);
  MOZ_ASSERT(nativeTemplateObj.numDynamicSlots() == 2);
  static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
                "First slot holds the 'index' property");
  static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
                "Second slot holds the 'input' property");

  masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
  masm.storeValue(
      nativeTemplateObj.getSlot(RegExpRealm::MatchResultObjectIndexSlot),
      Address(temp2, 0));
  masm.storeValue(
      nativeTemplateObj.getSlot(RegExpRealm::MatchResultObjectInputSlot),
      Address(temp2, sizeof(Value)));

  // clang-format off
   /*
    * [SMDOC] Stack layout for the RegExpMatcher stub
    *
    *                                    +---------------+
    *                                    |Return-Address |
    *                                    +---------------+
    * inputOutputDataStartOffset +-----> +---------------+
    *                                    |InputOutputData|
    *                                    +---------------+
    *                                    +---------------+
    *                                    |  MatchPairs   |
    *           pairsCountAddress +----------->  count   |
    *                                    |       pairs   |
    *                                    |               |
    *                                    +---------------+
    *     pairsVectorStartOffset +-----> +---------------+
    *                                    |   MatchPair   |
    *             matchPairStart +------------>  start   |  <-------+
    *             matchPairLimit +------------>  limit   |          | Reserved space for
    *                                    +---------------+          | `RegExpObject::MaxPairCount`
    *                                           .                   | MatchPair objects.
    *                                           .                   |
    *                                           .                   | `count` objects will be
    *                                    +---------------+          | initialized and can be
    *                                    |   MatchPair   |          | accessed below.
    *                                    |       start   |  <-------+
    *                                    |       limit   |
    *                                    +---------------+
    */
  // clang-format on

  static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
                "MatchPair consists of two int32 values representing the start"
                "and the end offset of the match");

  Address pairCountAddress =
      RegExpPairCountAddress(masm, inputOutputDataStartOffset);

  size_t pairsVectorStartOffset =
      RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
  Address firstMatchPairStartAddress(
      masm.getStackPointer(),
      pairsVectorStartOffset + offsetof(MatchPair, start));

  // Incremented by one below for each match pair.
  Register matchIndex = temp2;
  masm.move32(Imm32(0), matchIndex);

  // The element in which to store the result of the current match.
  size_t elementsOffset = NativeObject::offsetOfFixedElements();
  BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);

  // The current match pair's "start" and "limit" member.
  BaseIndex matchPairStart(masm.getStackPointer(), matchIndex, TimesEight,
                           pairsVectorStartOffset + offsetof(MatchPair, start));
  BaseIndex matchPairLimit(masm.getStackPointer(), matchIndex, TimesEight,
                           pairsVectorStartOffset + offsetof(MatchPair, limit));

  Register temp5;
  if (maybeTemp5 == InvalidReg) {
    // We don't have enough registers for a fifth temporary. Reuse
    // |lastIndex| as a temporary. We don't need to restore its value,
    // because |lastIndex| is no longer used after a successful match.
    // (Neither here nor in the OOL path, cf. js::RegExpMatcherRaw.)
    temp5 = lastIndex;
  } else {
    temp5 = maybeTemp5;
  }

  // Loop to construct the match strings. There are two different loops,
  // depending on whether the input is a Two-Byte or a Latin-1 string.
  CreateDependentString depStrs[]{
      {CharEncoding::TwoByte, temp3, temp4, temp5, &oolEntry},
      {CharEncoding::Latin1, temp3, temp4, temp5, &oolEntry},
  };

  {
    Label isLatin1, done;
    masm.branchLatin1String(input, &isLatin1);

    for (auto& depStr : depStrs) {
      if (depStr.encoding() == CharEncoding::Latin1) {
        masm.bind(&isLatin1);
      }

      Label matchLoop;
      masm.bind(&matchLoop);

      static_assert(MatchPair::NoMatch == -1,
                    "MatchPair::start is negative if no match was found");

      Label isUndefined, storeDone;
      masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
                    &isUndefined);
      {
        depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
                        input, matchPairStart, matchPairLimit,
                        stringsCanBeInNursery);

        // Storing into nursery-allocated results object's elements; no post
        // barrier.
        masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
        masm.jump(&storeDone);
      }
      masm.bind(&isUndefined);
      { masm.storeValue(UndefinedValue(), objectMatchElement); }
      masm.bind(&storeDone);

      masm.add32(Imm32(1), matchIndex);
      masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
                    &done);
      masm.jump(&matchLoop);
    }

#ifdef DEBUG
    masm.assumeUnreachable("The match string loop doesn't fall through.");
#endif

    masm.bind(&done);
  }

  // Fill in the rest of the output object.
  masm.store32(
      matchIndex,
      Address(object,
              elementsOffset + ObjectElements::offsetOfInitializedLength()));
  masm.store32(
      matchIndex,
      Address(object, elementsOffset + ObjectElements::offsetOfLength()));

  masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);

  masm.load32(firstMatchPairStartAddress, temp3);
  masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));

  // No post barrier needed (address is within nursery object.)
  masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));

  // All done!
  masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
  masm.ret();

  masm.bind(&notFound);
  masm.moveValue(NullValue(), result);
  masm.ret();

  // Fallback paths for CreateDependentString.
  for (auto& depStr : depStrs) {
    depStr.generateFallback(masm);
  }

  // Fallback path for createGCObject.
  masm.bind(&matchResultFallback);
  CreateMatchResultFallback(masm, object, temp2, temp3, templateObj, &oolEntry);
  masm.jump(&matchResultJoin);

  // Use an undefined value to signal to the caller that the OOL stub needs to
  // be called.
  masm.bind(&oolEntry);
  masm.moveValue(UndefinedValue(), result);
  masm.ret();

  Linker linker(masm, "RegExpMatcherStub");
  JitCode* code = linker.newCode(cx, CodeKind::Other);
  if (!code) {
    return nullptr;
  }

#ifdef JS_ION_PERF
  writePerfSpewerJitCodeProfile(code, "RegExpMatcherStub");
#endif
#ifdef MOZ_VTUNE
  vtune::MarkStub(code, "RegExpMatcherStub");
#endif

  return code;
}

class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator> {
  LRegExpMatcher* lir_;

 public:
  explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) {}

  void accept(CodeGenerator* codegen) override {
    codegen->visitOutOfLineRegExpMatcher(this);
  }

  LRegExpMatcher* lir() const { return lir_; }
};

void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) {
  LRegExpMatcher* lir = ool->lir();
  Register lastIndex = ToRegister(lir->lastIndex());
  Register input = ToRegister(lir->string());
  Register regexp = ToRegister(lir->regexp());

  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
  regs.take(lastIndex);
  regs.take(input);
  regs.take(regexp);
  Register temp = regs.takeAny();

  masm.computeEffectiveAddress(
      Address(masm.getStackPointer(), sizeof(irregexp::InputOutputData)), temp);

  pushArg(temp);
  pushArg(lastIndex);
  pushArg(input);
  pushArg(regexp);

  // We are not using oolCallVM because we are in a Call, and that live
  // registers are already saved by the the register allocator.
  using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
                      int32_t lastIndex, MatchPairs * pairs,
                      MutableHandleValue output);
  callVM<Fn, RegExpMatcherRaw>(lir);

  masm.jump(ool->rejoin());
}

void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
  MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
  MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
  MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
  MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);

#if defined(JS_NUNBOX32)
  MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg_Type);
  MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg_Data);
  MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg_Type);
  MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg_Data);
  MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg_Type);
  MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg_Data);
#elif defined(JS_PUNBOX64)
  MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg);
  MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg);
  MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg);
#endif

  masm.reserveStack(RegExpReservedStack);

  OutOfLineRegExpMatcher* ool = new (alloc()) OutOfLineRegExpMatcher(lir);
  addOutOfLineCode(ool, lir->mir());

  const JitRealm* jitRealm = gen->realm->jitRealm();
  JitCode* regExpMatcherStub =
      jitRealm->regExpMatcherStubNoBarrier(&realmStubsToReadBarrier_);
  masm.call(regExpMatcherStub);
  masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
  masm.bind(ool->rejoin());

  masm.freeStack(RegExpReservedStack);
}

static const int32_t RegExpSearcherResultNotFound = -1;
static const int32_t RegExpSearcherResultFailed = -2;

JitCode* JitRealm::generateRegExpSearcherStub(JSContext* cx) {
  JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");

  Register regexp = RegExpTesterRegExpReg;
  Register input = RegExpTesterStringReg;
  Register lastIndex = RegExpTesterLastIndexReg;
  Register result = ReturnReg;

  // We are free to clobber all registers, as LRegExpSearcher is a call
  // instruction.
  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
  regs.take(input);
  regs.take(regexp);
  regs.take(lastIndex);

  Register temp1 = regs.takeAny();
  Register temp2 = regs.takeAny();
  Register temp3 = regs.takeAny();

  StackMacroAssembler masm(cx);

  // The InputOutputData is placed above the return address on the stack.
  size_t inputOutputDataStartOffset = sizeof(void*);

  Label notFound, oolEntry;
  if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2,
                               temp3, inputOutputDataStartOffset,
                               RegExpShared::Normal, stringsCanBeInNursery,
                               &notFound, &oolEntry)) {
    return nullptr;
  }

  // clang-format off
    /*
     * [SMDOC] Stack layout for the RegExpSearcher stub
     *
     *                                    +---------------+
     *                                    |Return-Address |
     *                                    +---------------+
     * inputOutputDataStartOffset +-----> +---------------+
     *                                    |InputOutputData|
     *                                    +---------------+
     *                                    +---------------+
     *                                    |  MatchPairs   |
     *                                    |       count   |
     *                                    |       pairs   |
     *                                    |               |
     *                                    +---------------+
     *     pairsVectorStartOffset +-----> +---------------+
     *                                    |   MatchPair   |
     *             matchPairStart +------------>  start   |  <-------+
     *             matchPairLimit +------------>  limit   |          | Reserved space for
     *                                    +---------------+          | `RegExpObject::MaxPairCount`
     *                                           .                   | MatchPair objects.
     *                                           .                   |
     *                                           .                   | Only a single object will
     *                                    +---------------+          | be initialized and can be
     *                                    |   MatchPair   |          | accessed below.
     *                                    |       start   |  <-------+
     *                                    |       limit   |
     *                                    +---------------+
     */
  // clang-format on

  size_t pairsVectorStartOffset =
      RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
  Address matchPairStart(masm.getStackPointer(),
                         pairsVectorStartOffset + offsetof(MatchPair, start));
  Address matchPairLimit(masm.getStackPointer(),
                         pairsVectorStartOffset + offsetof(MatchPair, limit));

  masm.load32(matchPairStart, result);
  masm.load32(matchPairLimit, input);
  masm.lshiftPtr(Imm32(15), input);
  masm.or32(input, result);
  masm.ret();

  masm.bind(&notFound);
  masm.move32(Imm32(RegExpSearcherResultNotFound), result);
  masm.ret();

  masm.bind(&oolEntry);
  masm.move32(Imm32(RegExpSearcherResultFailed), result);
  masm.ret();

  Linker linker(masm, "RegExpSearcherStub");
  JitCode* code = linker.newCode(cx, CodeKind::Other);
  if (!code) {
    return nullptr;
  }

#ifdef JS_ION_PERF
  writePerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
#endif
#ifdef MOZ_VTUNE
  vtune::MarkStub(code, "RegExpSearcherStub");
#endif

  return code;
}

class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator> {
  LRegExpSearcher* lir_;

 public:
  explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) {}

  void accept(CodeGenerator* codegen) override {
    codegen->visitOutOfLineRegExpSearcher(this);
  }

  LRegExpSearcher* lir() const { return lir_; }
};

void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) {
  LRegExpSearcher* lir = ool->lir();
  Register lastIndex = ToRegister(lir->lastIndex());
  Register input = ToRegister(lir->string());
  Register regexp = ToRegister(lir->regexp());

  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
  regs.take(lastIndex);
  regs.take(input);
  regs.take(regexp);
  Register temp = regs.takeAny();

  masm.computeEffectiveAddress(
      Address(masm.getStackPointer(), sizeof(irregexp::InputOutputData)), temp);

  pushArg(temp);
  pushArg(lastIndex);
  pushArg(input);
  pushArg(regexp);

  // We are not using oolCallVM because we are in a Call, and that live
  // registers are already saved by the the register allocator.
  using Fn = bool (*)(JSContext * cx, HandleObject regexp, HandleString input,
                      int32_t lastIndex, MatchPairs * pairs, int32_t * result);
  callVM<Fn, RegExpSearcherRaw>(lir);

  masm.jump(ool->rejoin());
}

void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
  MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpTesterRegExpReg);
  MOZ_ASSERT(ToRegister(lir->string()) == RegExpTesterStringReg);
  MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpTesterLastIndexReg);
  MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);

  MOZ_ASSERT(RegExpTesterRegExpReg != ReturnReg);
  MOZ_ASSERT(RegExpTesterStringReg != ReturnReg);
  MOZ_ASSERT(RegExpTesterLastIndexReg != ReturnReg);

  masm.reserveStack(RegExpReservedStack);

  OutOfLineRegExpSearcher* ool = new (alloc()) OutOfLineRegExpSearcher(lir);
  addOutOfLineCode(ool, lir->mir());

  const JitRealm* jitRealm = gen->realm->jitRealm();
  JitCode* regExpSearcherStub =
      jitRealm->regExpSearcherStubNoBarrier(&realmStubsToReadBarrier_);
  masm.call(regExpSearcherStub);
  masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
                ool->entry());
  masm.bind(ool->rejoin());

  masm.freeStack(RegExpReservedStack);
}

static const int32_t RegExpTesterResultNotFound = -1;
static const int32_t RegExpTesterResultFailed = -2;

JitCode* JitRealm::generateRegExpTesterStub(JSContext* cx) {
  JitSpew(JitSpew_Codegen, "# Emitting RegExpTester stub");

  Register regexp = RegExpTesterRegExpReg;
  Register input = RegExpTesterStringReg;
  Register lastIndex = RegExpTesterLastIndexReg;
  Register result = ReturnReg;

  StackMacroAssembler masm(cx);

#ifdef JS_USE_LINK_REGISTER
  masm.pushReturnAddress();
#endif

  // We are free to clobber all registers, as LRegExpTester is a call
  // instruction.
  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
  regs.take(input);
  regs.take(regexp);
  regs.take(lastIndex);

  Register temp1 = regs.takeAny();
  Register temp2 = regs.takeAny();
  Register temp3 = regs.takeAny();

  masm.reserveStack(sizeof(irregexp::InputOutputData));

  Label notFound, oolEntry;
  if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2,
                               temp3, 0, RegExpShared::MatchOnly,
                               stringsCanBeInNursery, &notFound, &oolEntry)) {
    return nullptr;
  }

  Label done;

  // temp3 contains endIndex.
  masm.move32(temp3, result);
  masm.jump(&done);

  masm.bind(&notFound);
  masm.move32(Imm32(RegExpTesterResultNotFound), result);
  masm.jump(&done);

  masm.bind(&oolEntry);
  masm.move32(Imm32(RegExpTesterResultFailed), result);

  masm.bind(&done);
  masm.freeStack(sizeof(irregexp::InputOutputData));
  masm.ret();

  Linker linker(masm, "RegExpTesterStub");
  JitCode* code = linker.newCode(cx, CodeKind::Other);
  if (!code) {
    return nullptr;
  }

#ifdef JS_ION_PERF
  writePerfSpewerJitCodeProfile(code, "RegExpTesterStub");
#endif
#ifdef MOZ_VTUNE
  vtune::MarkStub(code, "RegExpTesterStub");
#endif

  return code;
}

class OutOfLineRegExpTester : public OutOfLineCodeBase<CodeGenerator> {
  LRegExpTester* lir_;

 public:
  explicit OutOfLineRegExpTester(LRegExpTester* lir) : lir_(lir) {}

  void accept(CodeGenerator* codegen) override {
    codegen->visitOutOfLineRegExpTester(this);
  }

  LRegExpTester* lir() const { return lir_; }
};

void CodeGenerator::visitOutOfLineRegExpTester(OutOfLineRegExpTester* ool) {
  LRegExpTester* lir = ool->lir();
  Register lastIndex = ToRegister(lir->lastIndex());
  Register input = ToRegister(lir->string());
  Register regexp = ToRegister(lir->regexp());

  pushArg(lastIndex);
  pushArg(input);
  pushArg(regexp);

  // We are not using oolCallVM because we are in a Call, and that live
  // registers are already saved by the the register allocator.
  using Fn = bool (*)(JSContext * cx, HandleObject regexp, HandleString input,
                      int32_t lastIndex, int32_t * result);
  callVM<Fn, RegExpTesterRaw>(lir);

  masm.jump(ool->rejoin());
}

void CodeGenerator::visitRegExpTester(LRegExpTester* lir) {
  MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpTesterRegExpReg);
  MOZ_ASSERT(ToRegister(lir->string()) == RegExpTesterStringReg);
  MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpTesterLastIndexReg);
  MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);

  MOZ_ASSERT(RegExpTesterRegExpReg != ReturnReg);
  MOZ_ASSERT(RegExpTesterStringReg != ReturnReg);
  MOZ_ASSERT(RegExpTesterLastIndexReg != ReturnReg);

  OutOfLineRegExpTester* ool = new (alloc()) OutOfLineRegExpTester(lir);
  addOutOfLineCode(ool, lir->mir());

  const JitRealm* jitRealm = gen->realm->jitRealm();
  JitCode* regExpTesterStub =
      jitRealm->regExpTesterStubNoBarrier(&realmStubsToReadBarrier_);
  masm.call(regExpTesterStub);

  masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpTesterResultFailed),
                ool->entry());
  masm.bind(ool->rejoin());
}

class OutOfLineRegExpPrototypeOptimizable
    : public OutOfLineCodeBase<CodeGenerator> {
  LRegExpPrototypeOptimizable* ins_;

 public:
  explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins)
      : ins_(ins) {}

  void accept(CodeGenerator* codegen) override {
    codegen->visitOutOfLineRegExpPrototypeOptimizable(this);
  }
  LRegExpPrototypeOptimizable* ins() const { return ins_; }
};

void CodeGenerator::visitRegExpPrototypeOptimizable(
    LRegExpPrototypeOptimizable* ins) {
  Register object = ToRegister(ins->object());
  Register output = ToRegister(ins->output());
  Register temp = ToRegister(ins->temp());

  OutOfLineRegExpPrototypeOptimizable* ool =
      new (alloc()) OutOfLineRegExpPrototypeOptimizable(ins);
  addOutOfLineCode(ool, ins->mir());

  masm.loadJSContext(temp);
  masm.loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
  size_t offset = Realm::offsetOfRegExps() +
                  RegExpRealm::offsetOfOptimizableRegExpPrototypeShape();
  masm.loadPtr(Address(temp, offset), temp);

  masm.branchTestObjShapeUnsafe(Assembler::NotEqual, object, temp,
                                ool->entry());
  masm.move32(Imm32(0x1), output);

  masm.bind(ool->rejoin());
}

void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(
    OutOfLineRegExpPrototypeOptimizable* ool) {
  LRegExpPrototypeOptimizable* ins = ool->ins();
  Register object = ToRegister(ins->object());
  Register output = ToRegister(ins->output());

  saveVolatile(output);

  masm.setupUnalignedABICall(output);
  masm.loadJSContext(output);
  masm.passABIArg(output);
  masm.passABIArg(object);
  masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, RegExpPrototypeOptimizableRaw));
  masm.storeCallBoolResult(output);

  restoreVolatile(output);

  masm.jump(ool->rejoin());
}

class OutOfLineRegExpInstanceOptimizable
    : public OutOfLineCodeBase<CodeGenerator> {
  LRegExpInstanceOptimizable* ins_;

 public:
  explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins)
      : ins_(ins) {}

  void accept(CodeGenerator* codegen) override {
    codegen->visitOutOfLineRegExpInstanceOptimizable(this);
  }
  LRegExpInstanceOptimizable* ins() const { return ins_; }
};

void CodeGenerator::visitRegExpInstanceOptimizable(
    LRegExpInstanceOptimizable* ins) {
  Register object = ToRegister(ins->object());
  Register output = ToRegister(ins->output());
  Register temp = ToRegister(ins->temp());

  OutOfLineRegExpInstanceOptimizable* ool =
      new (alloc()) OutOfLineRegExpInstanceOptimizable(ins);
  addOutOfLineCode(ool, ins->mir());

  masm.loadJSContext(temp);
  masm.loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
  size_t offset = Realm::offsetOfRegExps() +
                  RegExpRealm::offsetOfOptimizableRegExpInstanceShape();
  masm.loadPtr(Address(temp, offset), temp);

  masm.branchTestObjShapeUnsafe(Assembler::NotEqual, object, temp,
                                ool->entry());
  masm.move32(Imm32(0x1), output);

  masm.bind(ool->rejoin());
}

void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(
    OutOfLineRegExpInstanceOptimizable* ool) {
  LRegExpInstanceOptimizable* ins = ool->ins();
  Register object = ToRegister(ins->object());
  Register proto = ToRegister(ins->proto());
  Register output = ToRegister(ins->output());

  saveVolatile(output);

  masm.setupUnalignedABICall(output);
  masm.loadJSContext(output);
  masm.passABIArg(output);
  masm.passABIArg(object);
  masm.passABIArg(proto);
  masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, RegExpInstanceOptimizableRaw));
  masm.storeCallBoolResult(output);

  restoreVolatile(output);

  masm.jump(ool->rejoin());
}

static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
                                 Register len, Register temp0, Register temp1,
                                 Register output, CharEncoding encoding) {
#ifdef DEBUG
  Label ok;
  masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
  masm.assumeUnreachable("Length should be greater than 0.");
  masm.bind(&ok);
#endif

  Register chars = temp0;
  masm.loadStringChars(str, chars, encoding);

  masm.move32(Imm32(0), output);

  Label start, done;
  masm.bind(&start);

  Register currentChar = temp1;
  masm.loadChar(chars, output, currentChar, encoding);
  masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);

  masm.add32(Imm32(1), output);
  masm.branch32(Assembler::NotEqual, output, len, &start);

  masm.move32(Imm32(-1), output);

  masm.bind(&done);
}

void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
  Register str = ToRegister(ins->str());
  Register output = ToRegister(ins->output());
  Register temp0 = ToRegister(ins->temp0());
  Register temp1 = ToRegister(ins->temp1());
  Register len = ToRegister(ins->temp2());

  using Fn = bool (*)(JSContext*, JSString*, int32_t*);
  OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
      ins, ArgList(str), StoreRegisterTo(output));

  masm.branchIfRope(str, ool->entry());
  masm.loadStringLength(str, len);

  Label isLatin1, done;
  masm.branchLatin1String(str, &isLatin1);
  {
    FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
                         CharEncoding::TwoByte);
    masm.jump(&done);
  }
  masm.bind(&isLatin1);
  {
    FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
                         CharEncoding::Latin1);
  }
  masm.bind(&done);
  masm.bind(ool->rejoin());
}

void CodeGenerator::visitStringReplace(LStringReplace* lir) {
  if (lir->replacement()->isConstant()) {
    pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
  } else {
    pushArg(ToRegister(lir->replacement()));
  }

  if (lir->pattern()->isConstant()) {
    pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
  } else {
    pushArg(ToRegister(lir->pattern()));
  }

  if (lir->string()->isConstant()) {
    pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
  } else {
    pushArg(ToRegister(lir->string()));
  }

  using Fn =
      JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
  if (lir->mir()->isFlatReplacement()) {
    callVM<Fn, StringFlatReplaceString>(lir);
  } else {
    callVM<Fn, StringReplace>(lir);
  }
}

void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
  TypedOrValueRegister lhs =
      TypedOrValueRegister(ToValue(lir, LBinaryValueCache::LhsInput));
  TypedOrValueRegister rhs =
      TypedOrValueRegister(ToValue(lir, LBinaryValueCache::RhsInput));
  ValueOperand output = ToOutValue(lir);

  JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());

  switch (jsop) {
    case JSOP_ADD:
    case JSOP_SUB:
    case JSOP_MUL:
    case JSOP_DIV:
    case JSOP_MOD:
    case JSOP_POW: {
      IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
      addIC(lir, allocateIC(ic));
      return;
    }
    default:
      MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
  }
}

void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
  TypedOrValueRegister lhs =
      TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::LhsInput));
  TypedOrValueRegister rhs =
      TypedOrValueRegister(ToValue(lir, LBinaryBoolCache::RhsInput));
  Register output = ToRegister(lir->output());

  JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());

  switch (jsop) {
    case JSOP_LT:
    case JSOP_LE:
    case JSOP_GT:
    case JSOP_GE:
    case JSOP_EQ:
    case JSOP_NE:
    case JSOP_STRICTEQ:
    case JSOP_STRICTNE: {
      IonCompareIC ic(liveRegs, lhs, rhs, output);
      addIC(lir, allocateIC(ic));
      return;
    }
    default:
      MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
  }
}

void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
  TypedOrValueRegister input =
      TypedOrValueRegister(ToValue(lir, LUnaryCache::Input));
  ValueOperand output = ToOutValue(lir);

  IonUnaryArithIC ic(liveRegs, input, output);
  addIC(lir, allocateIC(ic));
}

void CodeGenerator::visitClassConstructor(LClassConstructor* lir) {
  pushArg(ImmPtr(nullptr));
  pushArg(ImmPtr(lir->mir()->pc()));
  pushArg(ImmGCPtr(current->mir()->info().script()));

  using Fn =
      JSFunction* (*)(JSContext*, HandleScript, jsbytecode*, HandleObject);
  callVM<Fn, js::MakeDefaultConstructor>(lir);
}

void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
  pushArg(ImmPtr(lir->mir()->module()));

  using Fn = JSObject* (*)(JSContext*, HandleObject);
  callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
}

void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
  pushArg(ToValue(lir, LDynamicImport::SpecifierIndex));
  pushArg(ImmGCPtr(current->mir()->info().script()));

  using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue);
  callVM<Fn, js::StartDynamicModuleImport>(lir);
}

void CodeGenerator::visitLambdaForSingleton(LLambdaForSingleton* lir) {
  pushArg(ToRegister(lir->environmentChain()));
  pushArg(ImmGCPtr(lir->mir()->info().funUnsafe()));

  using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
  callVM<Fn, js::Lambda>(lir);
}

void CodeGenerator::visitLambda(LLambda* lir) {
  Register envChain = ToRegister(lir->environmentChain());
  Register output = ToRegister(lir->output());
  Register tempReg = ToRegister(lir->temp());
  const LambdaFunctionInfo& info = lir->mir()->info();

  using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject);
  OutOfLineCode* ool = oolCallVM<Fn, js::Lambda>(
      lir, ArgList(ImmGCPtr(info.funUnsafe()), envChain),
      StoreRegisterTo(output));

  MOZ_ASSERT(!info.singletonType);

  TemplateObject templateObject(info.funUnsafe());
  masm.createGCObject(output, tempReg, templateObject, gc::DefaultHeap,
                      ool->entry());

  emitLambdaInit(output, envChain, info);

  if (info.flags & JSFunction::EXTENDED) {
    static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2,
                  "All slots must be initialized");
    masm.storeValue(UndefinedValue(),
                    Address(output, FunctionExtended::offsetOfExtendedSlot(0)));
    masm.storeValue(UndefinedValue(),
                    Address(output, FunctionExtended::offsetOfExtendedSlot(1)));
  }

  masm.bind(ool->rejoin());
}

class OutOfLineLambdaArrow : public OutOfLineCodeBase<CodeGenerator> {
 public:
  LLambdaArrow* lir;
  Label entryNoPop_;

  explicit OutOfLineLambdaArrow(LLambdaArrow* lir) : lir(lir) {}

  void accept(CodeGenerator* codegen) override {
    codegen->visitOutOfLineLambdaArrow(this);
  }

  Label* entryNoPop() { return &entryNoPop_; }
};

void CodeGenerator::visitOutOfLineLambdaArrow(OutOfLineLambdaArrow* ool) {
  Register envChain = ToRegister(ool->lir->environmentChain());
  ValueOperand newTarget = ToValue(ool->lir, LLambdaArrow::NewTargetValue);
  Register output = ToRegister(ool->lir->output());
  const LambdaFunctionInfo& info = ool->lir->mir()->info();

  // When we get here, we may need to restore part of the newTarget,
  // which has been conscripted into service as a temp register.
  masm.pop(newTarget.scratchReg());

  masm.bind(ool->entryNoPop());

  saveLive(ool->lir);

  pushArg(newTarget);
  pushArg(envChain);
  pushArg(ImmGCPtr(info.funUnsafe()));

  using Fn =
      JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleValue);
  callVM<Fn, js::LambdaArrow>(ool->lir);
  StoreRegisterTo(output).generate(this);

  restoreLiveIgnore(ool->lir, StoreRegisterTo(output).clobbered());

  masm.jump(ool->rejoin());
}

void CodeGenerator::visitLambdaArrow(LLambdaArrow* lir) {
  Register envChain = ToRegister(lir->environmentChain());
  ValueOperand newTarget = ToValue(lir, LLambdaArrow::NewTargetValue);
  Register output = ToRegister(lir->output());
  const LambdaFunctionInfo& info = lir->mir()->info();

  OutOfLineLambdaArrow* ool = new (alloc()) OutOfLineLambdaArrow(lir);
  addOutOfLineCode(ool, lir->mir());

  MOZ_ASSERT(!info.useSingletonForClone);

  if (info.singletonType) {
    // If the function has a singleton type, this instruction will only be
    // executed once so we don't bother inlining it.
    masm.jump(ool->entryNoPop());
    masm.bind(ool->rejoin());
    return;
  }

  // There's not enough registers on x86 with the profiler enabled to request
  // a temp. Instead, spill part of one of the values, being prepared to
  // restore it if necessary on the out of line path.
  Register tempReg = newTarget.scratchReg();
  masm.push(newTarget.scratchReg());

  TemplateObject templateObject(info.funUnsafe());
  masm.createGCObject(output, tempReg, templateObject, gc::DefaultHeap,
                      ool->entry());

  masm.pop(newTarget.scratchReg());

  emitLambdaInit(output, envChain, info);

  // Initialize extended slots. Lexical |this| is stored in the first one.
  MOZ_ASSERT(info.flags & JSFunction::EXTENDED);
  static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2,
                "All slots must be initialized");
  static_assert(FunctionExtended::ARROW_NEWTARGET_SLOT == 0,
                "|new.target| must be stored in first slot");
  masm.storeValue(newTarget,
                  Address(output, FunctionExtended::offsetOfExtendedSlot(0)));
  masm.storeValue(UndefinedValue(),
                  Address(output, FunctionExtended::offsetOfExtendedSlot(1)));

  masm.bind(ool->rejoin());
}

void CodeGenerator::emitLambdaInit(Register output, Register envChain,
                                   const LambdaFunctionInfo& info) {
  // Initialize nargs and flags. We do this with a single uint32 to avoid
  // 16-bit writes.
  union {
    struct S {
      uint16_t nargs;
      uint16_t flags;
    } s;
    uint32_t word;
  } u;
  u.s.nargs = info.nargs;
  u.s.flags = info.flags;

  static_assert(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2,
                "the code below needs to be adapted");
  masm.store32(Imm32(u.word), Address(output, JSFunction::offsetOfNargs()));
  masm.storePtr(ImmGCPtr(info.scriptOrLazyScript),
                Address(output, JSFunction::offsetOfScriptOrLazyScript()));
  masm.storePtr(envChain, Address(output, JSFunction::offsetOfEnvironment()));
  // No post barrier needed because output is guaranteed to be allocated in
  // the nursery.
  masm.storePtr(ImmGCPtr(info.funUnsafe()->displayAtom()),
                Address(output, JSFunction::offsetOfAtom()));
}

void CodeGenerator::visitSetFunName(LSetFunName* lir) {
  pushArg(Imm32(lir->mir()->prefixKind()));
  pushArg(ToValue(lir, LSetFunName::NameValue));
  pushArg(ToRegister(lir->fun()));

  using Fn =
      bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
  callVM<Fn, js::SetFunctionName>(lir);
}

void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
  // Note: markOsiPoint ensures enough space exists between the last
  // LOsiPoint and this one to patch adjacent call instructions.

  MOZ_ASSERT(masm.framePushed() == frameSize());

  uint32_t osiCallPointOffset = markOsiPoint(lir);

  LSafepoint* safepoint = lir->associatedSafepoint();
  MOZ_ASSERT(!safepoint->osiCallPointOffset());
  safepoint->setOsiCallPointOffset(osiCallPointOffset);

#ifdef DEBUG
  // There should be no movegroups or other instructions between
  // an instruction and its OsiPoint. This is necessary because
  // we use the OsiPoint's snapshot from within VM calls.
  for (LInstructionReverseIterator iter(current->rbegin(lir));
       iter != current->rend(); iter++) {
    if (*iter == lir) {
      continue;
    }
    MOZ_ASSERT(!iter->isMoveGroup());
    MOZ_ASSERT(iter->safepoint() == safepoint);
    break;
  }
#endif

#ifdef CHECK_OSIPOINT_REGISTERS
  if (shouldVerifyOsiPointRegs(safepoint)) {
    verifyOsiPointRegs(safepoint);
  }
#endif
}

void CodeGenerator::visitPhi(LPhi* lir) {
  MOZ_CRASH("Unexpected LPhi in CodeGenerator");
}

void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }

void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
  MTableSwitch* mir = ins->mir();
  Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
  const LAllocation* temp;

  if (mir->getOperand(0)->type() != MIRType::Int32) {
    temp = ins->tempInt()->output();

    // The input is a double, so try and convert it to an integer.
    // If it does not fit in an integer, take the default case.
    masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp),
                              defaultcase, false);
  } else {
    temp = ins->index();
  }

  emitTableSwitchDispatch(mir, ToRegister(temp),
                          ToRegisterOrInvalid(ins->tempPointer()));
}

void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
  MTableSwitch* mir = ins->mir();
  Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();

  Register index = ToRegister(ins->tempInt());
  ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
  Register tag = masm.extractTag(value, index);
  masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);

  Label unboxInt, isInt;
  masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
  {
    FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
    masm.unboxDouble(value, floatIndex);
    masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
    masm.jump(&isInt);
  }

  masm.bind(&unboxInt);
  masm.unboxInt32(value, index);

  masm.bind(&isInt);

  emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
}

void CodeGenerator::visitCloneLiteral(LCloneLiteral* lir) {
  pushArg(ImmWord(TenuredObject));
  pushArg(ToRegister(lir->getObjectLiteral()));

  using Fn = JSObject* (*)(JSContext*, HandleObject, NewObjectKind);
  callVM<Fn, DeepCloneObjectLiteral>(lir);
}

void CodeGenerator::visitParameter(LParameter* lir) {}

void CodeGenerator::visitCallee(LCallee* lir) {
  Register callee = ToRegister(lir->output());
  Address ptr(masm.getStackPointer(),
              frameSize() + JitFrameLayout::offsetOfCalleeToken());

  masm.loadFunctionFromCalleeToken(ptr, callee);
}

void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
  Register output = ToRegister(lir->output());
  Address calleeToken(masm.getStackPointer(),
                      frameSize() + JitFrameLayout::offsetOfCalleeToken());
  masm.loadPtr(calleeToken, output);

  // We must be inside a function.
  MOZ_ASSERT(current->mir()->info().script()->functionNonDelazifying());

  // The low bit indicates whether this call is constructing, just clear the
  // other bits.
  static_assert(CalleeToken_Function == 0x0,
                "CalleeTokenTag value should match");
  static_assert(CalleeToken_FunctionConstructing == 0x1,
                "CalleeTokenTag value should match");
  masm.andPtr(Imm32(0x1), output);
}

void CodeGenerator::visitStart(LStart* lir) {}

void CodeGenerator::visitReturn(LReturn* lir) {
#if defined(JS_NUNBOX32)
  DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
  DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
  MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
  MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
#elif defined(JS_PUNBOX64)
  DebugOnly<LAllocation*> result = lir->getOperand(0);
  MOZ_ASSERT(ToRegister(result) == JSReturnReg);
#endif
  // Don't emit a jump to the return label if this is the last block.
  if (current->mir() != *gen->graph().poBegin()) {
    masm.jump(&returnLabel_);
  }
}

void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
  Register temp = ToRegister(lir->temp());

  // Remember the OSR entry offset into the code buffer.
  masm.flushBuffer();
  setOsrEntryOffset(masm.size());

#ifdef JS_TRACE_LOGGING
  if (JS::TraceLoggerSupported()) {
    emitTracelogStopEvent(TraceLogger_Baseline);
    emitTracelogStartEvent(TraceLogger_IonMonkey);
  }
#endif

  // If profiling, save the current frame pointer to a per-thread global field.
  if (isProfilerInstrumentationEnabled()) {
    masm.profilerEnterFrame(masm.getStackPointer(), temp);
  }

  // Allocate the full frame for this function
  // Note we have a new entry here. So we reset MacroAssembler::framePushed()
  // to 0, before reserving the stack.
  MOZ_ASSERT(masm.framePushed() == frameSize());
  masm.setFramePushed(0);

  // Ensure that the Ion frames is properly aligned.
  masm.assertStackAlignment(JitStackAlignment, 0);

  masm.reserveStack(frameSize());
}

void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
  const LAllocation* frame = lir->getOperand(0);
  const LDefinition* object = lir->getDef(0);

  const ptrdiff_t frameOffset =
      BaselineFrame::reverseOffsetOfEnvironmentChain();

  masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
}

void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
  const LAllocation* frame = lir->getOperand(0);
  const LDefinition* object = lir->getDef(0);

  const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();

  masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
}

void CodeGenerator::visitOsrValue(LOsrValue* value) {
  const LAllocation* frame = value->getOperand(0);
  const ValueOperand out = ToOutValue(value);

  const ptrdiff_t frameOffset = value->mir()->frameOffset();

  masm.loadValue(Address(ToRegister(frame), frameOffset), out);
}

void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
  const LAllocation* frame = lir->getOperand(0);
  const ValueOperand out = ToOutValue(lir);

  Address flags =
      Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
  Address retval =
      Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());

  masm.moveValue(UndefinedValue(), out);

  Label done;
  masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
                    &done);
  masm.loadValue(retval, out);
  masm.bind(&done);
}

void CodeGenerator::visitStackArgT(LStackArgT* lir) {
  const LAllocation* arg = lir->getArgument();
  MIRType argType = lir->type();
  uint32_t argslot = lir->argslot();
  MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());

  int32_t stack_offset = StackOffsetOfPassedArg(argslot);
  Address dest(masm.getStackPointer(), stack_offset);

  if (arg->isFloatReg()) {
    masm.storeDouble(ToFloatRegister(arg), dest);
  } else if (arg->isRegister()) {
    masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
  } else {
    masm.storeValue(arg->toConstant()->toJSValue(), dest);
  }
}

void CodeGenerator::visitStackArgV(LStackArgV* lir) {
  ValueOperand val = ToValue(lir, 0);
  uint32_t argslot = lir->argslot();
  MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());

  int32_t stack_offset = StackOffsetOfPassedArg(argslot);

  masm.storeValue(val, Address(masm.getStackPointer(), stack_offset));
}

void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
  if (!group->numMoves()) {
    return;
  }

  MoveResolver& resolver = masm.moveResolver();

  for (size_t i = 0; i < group->numMoves(); i++) {
    const LMove& move = group->getMove(i);

    LAllocation from = move.from();
    LAllocation to = move.to();
    LDefinition::Type type = move.type();

    // No bogus moves.
    MOZ_ASSERT(from != to);
    MOZ_ASSERT(!from.isConstant());
    MoveOp::Type moveType;
    switch (type) {
      case LDefinition::OBJECT:
      case LDefinition::SLOTS:
#ifdef JS_NUNBOX32
      case LDefinition::TYPE:
      case LDefinition::PAYLOAD:
#else
      case LDefinition::BOX:
#endif
      case LDefinition::GENERAL:
        moveType = MoveOp::GENERAL;
        break;
      case LDefinition::INT32:
        moveType = MoveOp::INT32;
        break;
      case LDefinition::FLOAT32:
        moveType = MoveOp::FLOAT32;
        break;
      case LDefinition::DOUBLE:
        moveType = MoveOp::DOUBLE;
        break;
      case LDefinition::SIMD128INT:
        moveType = MoveOp::SIMD128INT;
        break;
      case LDefinition::SIMD128FLOAT:
        moveType = MoveOp::SIMD128FLOAT;
        break;
      default:
        MOZ_CRASH("Unexpected move type");
    }

    masm.propagateOOM(
        resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
  }

  masm.propagateOOM(resolver.resolve());
  if (masm.oom()) {
    return;
  }

  MoveEmitter emitter(masm);

#ifdef JS_CODEGEN_X86
  if (group->maybeScratchRegister().isGeneralReg()) {
    emitter.setScratchRegister(
        group->maybeScratchRegister().toGeneralReg()->reg());
  } else {
    resolver.sortMemoryToMemoryMoves();
  }
#endif

  emitter.emit(resolver);
  emitter.finish();
}

void CodeGenerator::visitInteger(LInteger* lir) {
  masm.move32(Imm32(lir->getValue()), ToRegister(lir->output()));
}

void CodeGenerator::visitInteger64(LInteger64* lir) {
  masm.move64(Imm64(lir->getValue()), ToOutRegister64(lir));
}

void CodeGenerator::visitPointer(LPointer* lir) {
  if (lir->kind() == LPointer::GC_THING) {
    masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
  } else {
    masm.movePtr(ImmPtr(lir->ptr()), ToRegister(lir->output()));
  }
}

void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
  // No-op.
}

void CodeGenerator::visitSlots(LSlots* lir) {
  Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
  masm.loadPtr(slots, ToRegister(lir->output()));
}

void CodeGenerator::visitLoadSlotT(LLoadSlotT* lir) {
  Register base = ToRegister(lir->slots());
  int32_t offset = lir->mir()->slot() * sizeof(js::Value);
  AnyRegister result = ToAnyRegister(lir->output());

  masm.loadUnboxedValue(Address(base, offset), lir->mir()->type(), result);
}

void CodeGenerator::visitLoadSlotV(LLoadSlotV* lir) {
  ValueOperand dest = ToOutValue(lir);
  Register base = ToRegister(lir->input());
  int32_t offset = lir->mir()->slot() * sizeof(js::Value);

  masm.loadValue(Address(base, offset), dest);
}

void CodeGenerator::visitStoreSlotT(LStoreSlotT* lir) {
  Register base = ToRegister(lir->slots());
  int32_t offset = lir->mir()->slot() * sizeof(js::Value);
  Address dest(base, offset);

  if (lir->mir()->needsBarrier()) {
    emitPreBarrier(dest);
  }

  MIRType valueType = lir->mir()->value()->type();

  if (valueType == MIRType::ObjectOrNull) {
    masm.storeObjectOrNull(ToRegister(lir->value()), dest);
  } else {
    mozilla::Maybe<ConstantOrRegister> value;
    if (lir->value()->isConstant()) {
      value.emplace(
          ConstantOrRegister(lir->value()->toConstant()->toJSValue()));
    } else {
      value.emplace(
          TypedOrValueRegister(valueType, ToAnyRegister(lir->value())));
    }
    masm.storeUnboxedValue(value.ref(), valueType, dest,
                           lir->mir()->slotType());
  }
}

void CodeGenerator::visitStoreSlotV(LStoreSlotV* lir) {
  Register base = ToRegister(lir->slots());
  int32_t offset = lir->mir()->slot() * sizeof(Value);

  const ValueOperand value = ToValue(lir, LStoreSlotV::Value);

  if (lir->mir()->needsBarrier()) {
    emitPreBarrier(Address(base, offset));
  }

  masm.storeValue(value, Address(base, offset));
}

static void GuardReceiver(MacroAssembler& masm, const ReceiverGuard& guard,
                          Register obj, Register scratch, Label* miss) {
  if (guard.getGroup()) {
    masm.branchTestObjGroup(Assembler::NotEqual, obj, guard.getGroup(), scratch,
                            obj, miss);
  } else {
    masm.branchTestObjShape(Assembler::NotEqual, obj, guard.getShape(), scratch,
                            obj, miss);
  }
}

void CodeGenerator::emitGetPropertyPolymorphic(
    LInstruction* ins, Register obj, Register scratch,
    const TypedOrValueRegister& output) {
  MGetPropertyPolymorphic* mir = ins->mirRaw()->toGetPropertyPolymorphic();

  Label done;

  for (size_t i = 0; i < mir->numReceivers(); i++) {
    ReceiverGuard receiver = mir->receiver(i);

    Label next;
    masm.comment("GuardReceiver");
    GuardReceiver(masm, receiver, obj, scratch, &next);

    if (receiver.getShape()) {
      masm.comment("loadTypedOrValue");
      Register target = obj;

      Shape* shape = mir->shape(i);
      if (shape->slot() < shape->numFixedSlots()) {
        // Fixed slot.
        masm.loadTypedOrValue(
            Address(target, NativeObject::getFixedSlotOffset(shape->slot())),
            output);
      } else {
        // Dynamic slot.
        uint32_t offset =
            (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value);
        masm.loadPtr(Address(target, NativeObject::offsetOfSlots()), scratch);
        masm.loadTypedOrValue(Address(scratch, offset), output);
      }
    }

    if (i == mir->numReceivers() - 1) {
      bailoutFrom(&next, ins->snapshot());
    } else {
      masm.jump(&done);
      masm.bind(&next);
    }
  }

  masm.bind(&done);
}

void CodeGenerator::visitGetPropertyPolymorphicV(
    LGetPropertyPolymorphicV* ins) {
  Register obj = ToRegister(ins->obj());
  ValueOperand output = ToOutValue(ins);
  Register temp = ToRegister(ins->temp());
  emitGetPropertyPolymorphic(ins, obj, temp, output);
}

void CodeGenerator::visitGetPropertyPolymorphicT(
    LGetPropertyPolymorphicT* ins) {
  Register obj = ToRegister(ins->obj());
  TypedOrValueRegister output(ins->mir()->type(), ToAnyRegister(ins->output()));
  Register temp = ToRegister(ins->temp());
  emitGetPropertyPolymorphic(ins, obj, temp, output);
}

template <typename T>
static void EmitUnboxedPreBarrier(MacroAssembler& masm, T address,
                                  JSValueType type) {
  if (type == JSVAL_TYPE_OBJECT) {
    masm.guardedCallPreBarrier(address, MIRType::Object);
  } else if (type == JSVAL_TYPE_STRING) {
    masm.guardedCallPreBarrier(address, MIRType::String);
  }
}

void CodeGenerator::emitSetPropertyPolymorphic(
    LInstruction* ins, Register obj, Register scratch,
    const ConstantOrRegister& value) {
  MSetPropertyPolymorphic* mir = ins->mirRaw()->toSetPropertyPolymorphic();

  Label done;
  for (size_t i = 0; i < mir->numReceivers(); i++) {
    ReceiverGuard receiver = mir->receiver(i);

    Label next;
    GuardReceiver(masm, receiver, obj, scratch, &next);

    if (receiver.getShape()) {
      Register target = obj;

      Shape* shape = mir->shape(i);
      if (shape->slot() < shape->numFixedSlots()) {
        // Fixed slot.
        Address addr(target, NativeObject::getFixedSlotOffset(shape->slot()));
        if (mir->needsBarrier()) {
          emitPreBarrier(addr);
        }
        masm.storeConstantOrRegister(value, addr);
      } else {
        // Dynamic slot.
        masm.loadPtr(Address(target, NativeObject::offsetOfSlots()), scratch);
        Address addr(scratch, (shape->slot() - shape->numFixedSlots()) *
                                  sizeof(js::Value));
        if (mir->needsBarrier()) {
          emitPreBarrier(addr);
        }
        masm.storeConstantOrRegister(value, addr);
      }
    }

    if (i == mir->numReceivers() - 1) {
      bailoutFrom(&next, ins->snapshot());
    } else {
      masm.jump(&done);
      masm.bind(&next);
    }
  }

  masm.bind(&done);
}

void CodeGenerator::visitSetPropertyPolymorphicV(
    LSetPropertyPolymorphicV* ins) {
  Register obj = ToRegister(ins->obj());
  Register temp1 = ToRegister(ins->temp());
  ValueOperand value = ToValue(ins, LSetPropertyPolymorphicV::Value);
  emitSetPropertyPolymorphic(ins, obj, temp1, TypedOrValueRegister(value));
}

void CodeGenerator::visitSetPropertyPolymorphicT(
    LSetPropertyPolymorphicT* ins) {
  Register obj = ToRegister(ins->obj());
  Register temp1 = ToRegister(ins->temp());

  mozilla::Maybe<ConstantOrRegister> value;
  if (ins->mir()->value()->isConstant()) {
    value.emplace(
        ConstantOrRegister(ins->mir()->value()->toConstant()->toJSValue()));
  } else {
    value.emplace(TypedOrValueRegister(ins->mir()->value()->type(),
                                       ToAnyRegister(ins->value())));
  }

  emitSetPropertyPolymorphic(ins, obj, temp1, value.ref());
}

void CodeGenerator::visitElements(LElements* lir) {
  Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
  masm.loadPtr(elements, ToRegister(lir->output()));
}

void CodeGenerator::visitConvertElementsToDoubles(
    LConvertElementsToDoubles* lir) {
  Register elements = ToRegister(lir->elements());

  using Fn = void (*)(JSContext*, uintptr_t);
  OutOfLineCode* ool = oolCallVM<Fn, ObjectElements::ConvertElementsToDoubles>(
      lir, ArgList(elements), StoreNothing());

  Address convertedAddress(elements, ObjectElements::offsetOfFlags());
  Imm32 bit(ObjectElements::CONVERT_DOUBLE_ELEMENTS);
  masm.branchTest32(Assembler::Zero, convertedAddress, bit, ool->entry());
  masm.bind(ool->rejoin());
}

void CodeGenerator::visitMaybeToDoubleElement(LMaybeToDoubleElement* lir) {
  Register elements = ToRegister(lir->elements());
  Register value = ToRegister(lir->value());
  ValueOperand out = ToOutValue(lir);

  FloatRegister temp = ToFloatRegister(lir->tempFloat());
  Label convert, done;

  // If the CONVERT_DOUBLE_ELEMENTS flag is set, convert the int32
  // value to double. Else, just box it.
  masm.branchTest32(Assembler::NonZero,
                    Address(elements, ObjectElements::offsetOfFlags()),
                    Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS), &convert);

  masm.tagValue(JSVAL_TYPE_INT32, value, out);
  masm.jump(&done);

  masm.bind(&convert);
  masm.convertInt32ToDouble(value, temp);
  masm.boxDouble(temp, out, temp);

  masm.bind(&done);
}

void CodeGenerator::visitMaybeCopyElementsForWrite(
    LMaybeCopyElementsForWrite* lir) {
  Register object = ToRegister(lir->object());
  Register temp = ToRegister(lir->temp());

  using Fn = bool (*)(JSContext*, NativeObject*);
  OutOfLineCode* ool = oolCallVM<Fn, NativeObject::CopyElementsForWrite>(
      lir, ArgList(object), StoreNothing());

  if (lir->mir()->checkNative()) {
    masm.branchIfNonNativeObj(object, temp, ool->rejoin());
  }

  masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
  masm.branchTest32(Assembler::NonZero,
                    Address(temp, ObjectElements::offsetOfFlags()),
                    Imm32(ObjectElements::COPY_ON_WRITE), ool->entry());
  masm.bind(ool->rejoin());
}

void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
  Address environment(ToRegister(lir->function()),
                      JSFunction::offsetOfEnvironment());
  masm.loadPtr(environment, ToRegister(lir->output()));
}

void CodeGenerator::visitHomeObject(LHomeObject* lir) {
  Address homeObject(ToRegister(lir->function()),
                     FunctionExtended::offsetOfMethodHomeObjectSlot());
#ifdef DEBUG
  Label isObject;
  masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
  masm.assumeUnreachable("[[HomeObject]] must be Object");
  masm.bind(&isObject);
#endif
  masm.unboxObject(homeObject, ToRegister(lir->output()));
}

void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
  Register homeObject = ToRegister(lir->homeObject());
  Register output = ToRegister(lir->output());

  using Fn = JSObject* (*)(JSContext*, HandleObject);
  OutOfLineCode* ool = oolCallVM<Fn, HomeObjectSuperBase>(
      lir, ArgList(homeObject), StoreRegisterTo(output));

  masm.loadObjProto(homeObject, output);
  masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), ool->entry());
  masm.bind(ool->rejoin());
}

void CodeGenerator::visitNewLexicalEnvironmentObject(
    LNewLexicalEnvironmentObject* lir) {
  pushArg(Imm32(gc::DefaultHeap));
  pushArg(ToRegister(lir->enclosing()));
  pushArg(ImmGCPtr(lir->mir()->scope()));

  using Fn = LexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>,
                                           HandleObject, gc::InitialHeap);
  callVM<Fn, LexicalEnvironmentObject::create>(lir);
}

void CodeGenerator::visitCopyLexicalEnvironmentObject(
    LCopyLexicalEnvironmentObject* lir) {
  pushArg(Imm32(lir->mir()->copySlots()));
  pushArg(ToRegister(lir->env()));

  using Fn = JSObject* (*)(JSContext*, HandleObject, bool);
  callVM<Fn, jit::CopyLexicalEnvironmentObject>(lir);
}

void CodeGenerator::visitGuardShape(LGuardShape* guard) {
  Register obj = ToRegister(guard->input());
  Register temp = ToTempRegisterOrInvalid(guard->temp());
  Label bail;
  masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
                          obj, &bail);
  bailoutFrom(&bail, guard->snapshot());
}

void CodeGenerator::visitGuardObjectGroup(LGuardObjectGroup* guard) {
  Register obj = ToRegister(guard->input());
  Register temp = ToTempRegisterOrInvalid(guard->temp());
  Assembler::Condition cond =
      guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
  Label bail;
  masm.branchTestObjGroup(cond, obj, guard->mir()->group(), temp, obj, &bail);
  bailoutFrom(&bail, guard->snapshot());
}

void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
  Register input = ToRegister(guard->input());
  Register expected = ToRegister(guard->expected());

  Assembler::Condition cond =
      guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
  bailoutCmpPtr(cond, input, expected, guard->snapshot());
}

void CodeGenerator::visitGuardReceiverPolymorphic(
    LGuardReceiverPolymorphic* lir) {
  const MGuardReceiverPolymorphic* mir = lir->mir();
  Register obj = ToRegister(lir->object());
  Register temp = ToRegister(lir->temp());

  Label done;

  for (size_t i = 0; i < mir->numReceivers(); i++) {
    const ReceiverGuard& receiver = mir->receiver(i);

    Label next;
    GuardReceiver(masm, receiver, obj, temp, &next);

    if (i == mir->numReceivers() - 1) {
      bailoutFrom(&next, lir->snapshot());
    } else {
      masm.jump(&done);
      masm.bind(&next);
    }
  }

  masm.bind(&done);
}

void CodeGenerator::visitToNumeric(LToNumeric* lir) {
  ValueOperand operand = ToValue(lir, LToNumeric::Input);
  ValueOperand output = ToOutValue(lir);
  bool maybeInt32 = lir->mir()->mightBeType(MIRType::Int32);
  bool maybeDouble = lir->mir()->mightBeType(MIRType::Double);
  bool maybeNumber = maybeInt32 || maybeDouble;
  bool maybeBigInt = lir->mir()->mightBeType(MIRType::BigInt);
  int checks = int(maybeNumber) + int(maybeBigInt);

  using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
  OutOfLineCode* ool =
      oolCallVM<Fn, DoToNumeric>(lir, ArgList(operand), StoreValueTo(output));

  if (checks == 0) {
    masm.jump(ool->entry());
  } else {
    Label done;
    using Condition = Assembler::Condition;
    constexpr Condition Equal = Assembler::Equal;
    constexpr Condition NotEqual = Assembler::NotEqual;

    if (maybeNumber) {
      checks--;
      Condition cond = checks ? Equal : NotEqual;
      Label* target = checks ? &done : ool->entry();
      masm.branchTestNumber(cond, operand, target);
    }
    if (maybeBigInt) {
      checks--;
      Condition cond = checks ? Equal : NotEqual;
      Label* target = checks ? &done : ool->entry();
      masm.branchTestBigInt(cond, operand, target);
    }

    MOZ_ASSERT(checks == 0);
    masm.bind(&done);
    masm.moveValue(operand, output);
  }

  masm.bind(ool->rejoin());
}

void CodeGenerator::visitTypeBarrierV(LTypeBarrierV* lir) {
  ValueOperand operand = ToValue(lir, LTypeBarrierV::Input);
  Register unboxScratch = ToTempRegisterOrInvalid(lir->unboxTemp());
  Register objScratch = ToTempRegisterOrInvalid(lir->objTemp());

  // guardObjectType may zero the payload/Value register on speculative paths
  // (we should have a defineReuseInput allocation in this case).
  Register spectreRegToZero = operand.payloadOrValueReg();

  Label miss;
  masm.guardTypeSet(operand, lir->mir()->resultTypeSet(),
                    lir->mir()->barrierKind(), unboxScratch, objScratch,
                    spectreRegToZero, &miss);
  bailoutFrom(&miss, lir->snapshot());
}

void CodeGenerator::visitTypeBarrierO(LTypeBarrierO* lir) {
  Register obj = ToRegister(lir->object());
  Register scratch = ToTempRegisterOrInvalid(lir->temp());
  Label miss, ok;

  if (lir->mir()->type() == MIRType::ObjectOrNull) {
    masm.comment("Object or Null");
    Label* nullTarget =
        lir->mir()->resultTypeSet()->mightBeMIRType(MIRType::Null) ? &ok
                                                                   : &miss;
    masm.branchTestPtr(Assembler::Zero, obj, obj, nullTarget);
  } else {
    MOZ_ASSERT(lir->mir()->type() == MIRType::Object);
    MOZ_ASSERT(lir->mir()->barrierKind() != BarrierKind::TypeTagOnly);
  }

  if (lir->mir()->barrierKind() != BarrierKind::TypeTagOnly) {
    masm.comment("Type tag only");
    // guardObjectType may zero the object register on speculative paths
    // (we should have a defineReuseInput allocation in this case).
    Register spectreRegToZero = obj;
    masm.guardObjectType(obj, lir->mir()->resultTypeSet(), scratch,
                         spectreRegToZero, &miss);
  }

  bailoutFrom(&miss, lir->snapshot());
  masm.bind(&ok);
}

// Out-of-line path to update the store buffer.
class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
  LInstruction* lir_;
  const LAllocation* object_;

 public:
  OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
      : lir_(lir), object_(object) {}

  void accept(CodeGenerator* codegen) override {
    codegen->visitOutOfLineCallPostWriteBarrier(this);
  }

  LInstruction* lir() const { return lir_; }
  const LAllocation* object() const { return object_; }
};

static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
                                            const gc::TenuredCell* cell,
                                            AllocatableGeneralRegisterSet& regs,
                                            Label* exit, Label* callVM) {
  Register temp = regs.takeAny();

  gc::Arena* arena = cell->arena();

  Register cells = temp;
  masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);

  size_t index = gc::ArenaCellSet::getCellIndex(cell);
  size_t word;
  uint32_t mask;
  gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
  size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);

  masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
                    exit);

  // Check whether this is the sentinel set and if so call the VM to allocate
  // one for this arena.
  masm.branchPtr(Assembler::Equal,
                 Address(cells, gc::ArenaCellSet::offsetOfArena()),
                 ImmPtr(nullptr), callVM);

  // Add the cell to the set.
  masm.or32(Imm32(mask), Address(cells, offset));
  masm.jump(exit);

  regs.add(temp);
}

static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
                                 Register objreg, JSObject* maybeConstant,
                                 bool isGlobal,
                                 AllocatableGeneralRegisterSet& regs) {
  MOZ_ASSERT_IF(isGlobal, maybeConstant);

  Label callVM;
  Label exit;

  // We already have a fast path to check whether a global is in the store
  // buffer.
  if (!isGlobal && maybeConstant) {
    EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
                                    &exit, &callVM);
  }

  // Call into the VM to barrier the write.
  masm.bind(&callVM);

  Register runtimereg = regs.takeAny();
  masm.mov(ImmPtr(runtime), runtimereg);

  masm.setupUnalignedABICall(regs.takeAny());
  masm.passABIArg(runtimereg);
  masm.passABIArg(objreg);
  if (isGlobal) {
    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostGlobalWriteBarrier));
  } else {
    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier));
  }

  masm.bind(&exit);
}

void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());

  Register objreg;
  JSObject* object = nullptr;
  bool isGlobal = false;
  if (obj->isConstant()) {
    object = &obj->toConstant()->toObject();
    isGlobal = isGlobalObject(object);
    objreg = regs.takeAny();
    masm.movePtr(ImmGCPtr(object), objreg);
  } else {
    objreg = ToRegister(obj);
    regs.takeUnchecked(objreg);
  }

  EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
}

void CodeGenerator::emitPostWriteBarrier(Register objreg) {
  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
  regs.takeUnchecked(objreg);
  EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
}

void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
    OutOfLineCallPostWriteBarrier* ool) {
  saveLiveVolatile(ool->lir());
  const LAllocation* obj = ool->object();
  emitPostWriteBarrier(obj);
  restoreLiveVolatile(ool->lir());

  masm.jump(ool->rejoin());
}

void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
                                                OutOfLineCode* ool) {
  // Check whether an object is a global that we have already barriered before
  // calling into the VM.
  //
  // We only check for the script's global, not other globals within the same
  // compartment, because we bake in a pointer to realm->globalWriteBarriered
  // and doing that would be invalid for other realms because they could be
  // collected before the Ion code is discarded.

  if (!maybeGlobal->isConstant()) {
    return;
  }

  JSObject* obj = &maybeGlobal->toConstant()->toObject();
  if (gen->realm->maybeGlobal() != obj) {
    return;
  }

  const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
  masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
                ool->rejoin());
}

template <class LPostBarrierType, MIRType nurseryType>
void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
                                                OutOfLineCode* ool) {
  addOutOfLineCode(ool, lir->mir());

  Register temp = ToTempRegisterOrInvalid(lir->temp());

  if (lir->object()->isConstant()) {
    // Constant nursery objects cannot appear here, see
    // LIRGenerator::visitPostWriteElementBarrier.
    MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
  } else {
    masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
                                 temp, ool->rejoin());
  }

  maybeEmitGlobalBarrierCheck(lir->object(), ool);

  Register value = ToRegister(lir->value());
  if (nurseryType == MIRType::Object) {
    if (lir->mir()->value()->type() == MIRType::ObjectOrNull) {
      masm.branchTestPtr(Assembler::Zero, value, value, ool->rejoin());
    } else {
      MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
    }
  } else {
    MOZ_ASSERT(nurseryType == MIRType::String);
    MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
  }
  masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());

  masm.bind(ool->rejoin());
}

template <class LPostBarrierType>
void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
                                                 OutOfLineCode* ool) {
  addOutOfLineCode(ool, lir->mir());

  Register temp = ToTempRegisterOrInvalid(lir->temp());

  if (lir->object()->isConstant()) {
    // Constant nursery objects cannot appear here, see
    // LIRGenerator::visitPostWriteElementBarrier.
    MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
  } else {
    masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
                                 temp, ool->rejoin());
  }

  maybeEmitGlobalBarrierCheck(lir->object(), ool);

  ValueOperand value = ToValue(lir, LPostBarrierType::Input);
  // Bug 1386094 - most callers only need to check for object or string, not
  // both.
  masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());

  masm.bind(ool->rejoin());
}

void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
  auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
  visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
}

void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
  auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
  visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
}

void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
  auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
  visitPostWriteBarrierCommonV(lir, ool);
}

// Out-of-line path to update the store buffer.
class OutOfLineCallPostWriteElementBarrier
    : public OutOfLineCodeBase<CodeGenerator> {
  LInstruction* lir_;
  const LAllocation* object_;
  const LAllocation* index_;

 public:
  OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
                                       const LAllocation* object,
                                       const LAllocation* index)
      : lir_(lir), object_(object), index_(index) {}

  void accept(CodeGenerator* codegen) override {
    codegen->visitOutOfLineCallPostWriteElementBarrier(this);
  }

  LInstruction* lir() const { return lir_; }

  const LAllocation* object() const { return object_; }

  const LAllocation* index() const { return index_; }
};

void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
    OutOfLineCallPostWriteElementBarrier* ool) {
  saveLiveVolatile(ool->lir());

  const LAllocation* obj = ool->object();
  const LAllocation* index = ool->index();

  Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
  Register indexreg = ToRegister(index);

  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
  regs.takeUnchecked(indexreg);

  if (obj->isConstant()) {
    objreg = regs.takeAny();
    masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
  } else {
    regs.takeUnchecked(objreg);
  }

  Register runtimereg = regs.takeAny();
  masm.setupUnalignedABICall(runtimereg);
  masm.mov(ImmPtr(gen->runtime), runtimereg);
  masm.passABIArg(runtimereg);
  masm.passABIArg(objreg);
  masm.passABIArg(indexreg);
  masm.callWithABI(JS_FUNC_TO_DATA_PTR(
      void*, (PostWriteElementBarrier<IndexInBounds::Maybe>)));

  restoreLiveVolatile(ool->lir());

  masm.jump(ool->rejoin());
}

void CodeGenerator::visitPostWriteElementBarrierO(
    LPostWriteElementBarrierO* lir) {
  auto ool = new (alloc())
      OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
  visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
                                                                          ool);
}

void CodeGenerator::visitPostWriteElementBarrierS(
    LPostWriteElementBarrierS* lir) {
  auto ool = new (alloc())
      OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
  visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
                                                                          ool);
}

void CodeGenerator::visitPostWriteElementBarrierV(
    LPostWriteElementBarrierV* lir) {
  auto ool = new (alloc())
      OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
  visitPostWriteBarrierCommonV(lir, ool);
}

void CodeGenerator::visitCallNative(LCallNative* call) {
  WrappedFunction* target = call->getSingleTarget();
  MOZ_ASSERT(target);
  MOZ_ASSERT(target->isNativeWithCppEntry());

  int callargslot = call->argslot();
  int unusedStack = StackOffsetOfPassedArg(callargslot);

  // Registers used for callWithABI() argument-passing.
  const Register argContextReg = ToRegister(call->getArgContextReg());
  const Register argUintNReg = ToRegister(call->getArgUintNReg());
  const Register argVpReg = ToRegister(call->getArgVpReg());

  // Misc. temporary registers.
  const Register tempReg = ToRegister(call->getTempReg());

  DebugOnly<uint32_t> initialStack = masm.framePushed();

  masm.checkStackAlignment();

  // Native functions have the signature:
  //  bool (*)(JSContext*, unsigned, Value* vp)
  // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
  // are the function arguments.

  // Allocate space for the outparam, moving the StackPointer to what will be
  // &vp[1].
  masm.adjustStack(unusedStack);

  // Push a Value containing the callee object: natives are allowed to access
  // their callee before setting the return value. The StackPointer is moved
  // to &vp[0].
  masm.Push(ObjectValue(*target->rawJSFunction()));

  // Preload arguments into registers.
  masm.loadJSContext(argContextReg);
  masm.move32(Imm32(call->numActualArgs()), argUintNReg);
  masm.moveStackPtrTo(argVpReg);

  masm.Push(argUintNReg);

  if (call->mir()->maybeCrossRealm()) {
    masm.movePtr(ImmGCPtr(target->rawJSFunction()), tempReg);
    masm.switchToObjectRealm(tempReg, tempReg);
  }

  // Construct native exit frame.
  uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
  masm.enterFakeExitFrameForNative(argContextReg, tempReg,
                                   call->mir()->isConstructing());

  markSafepointAt(safepointOffset, call);

  if (JS::TraceLoggerSupported()) {
    emitTracelogStartEvent(TraceLogger_Call);
  }

  // Construct and execute call.
  masm.setupUnalignedABICall(tempReg);
  masm.passABIArg(argContextReg);
  masm.passABIArg(argUintNReg);
  masm.passABIArg(argVpReg);
  JSNative native = target->native();
  if (call->ignoresReturnValue() && target->hasJitInfo()) {
    const JSJitInfo* jitInfo = target->jitInfo();
    if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
      native = jitInfo->ignoresReturnValueMethod;
    }
  }
  masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, native), MoveOp::GENERAL,
                   CheckUnsafeCallWithABI::DontCheckHasExitFrame);

  if (JS::TraceLoggerSupported()) {
    emitTracelogStopEvent(TraceLogger_Call);
  }

  // Test for failure.
  masm.branchIfFalseBool(ReturnReg, masm.failureLabel());

  if (call->mir()->maybeCrossRealm()) {
    masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
  }

  // Load the outparam vp[0] into output register(s).
  masm.loadValue(
      Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
      JSReturnOperand);

  // Until C++ code is instrumented against Spectre, prevent speculative
  // execution from returning any private data.
  if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
      call->mir()->hasLiveDefUses()) {
    masm.speculationBarrier();
  }

  // The next instruction is removing the footer of the exit frame, so there
  // is no need for leaveFakeExitFrame.

  // Move the StackPointer back to its original location, unwinding the native
  // exit frame.
  masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
  MOZ_ASSERT(masm.framePushed() == initialStack);
}

static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
                           DOMObjectKind kind) {
  // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
  // will be in the first slot but may be fixed or non-fixed.
  MOZ_ASSERT(obj != priv);

  // Check if it's a proxy.
  Label isProxy, done;
  if (kind == DOMObjectKind::Unknown) {
    masm.branchTestObjectIsProxy(true, obj, priv, &isProxy);
  }

  if (kind != DOMObjectKind::Proxy) {
    // If it's a native object, the value must be in a fixed slot.
    masm.debugAssertObjHasFixedSlots(obj, priv);
    masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
    if (kind == DOMObjectKind::Unknown) {
      masm.jump(&done);
    }
  }

  if (kind != DOMObjectKind::Native) {
    masm.bind(&isProxy);
#ifdef DEBUG
    // Sanity check: it must be a DOM proxy.
    Label isDOMProxy;
    masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, priv,
                                      GetDOMProxyHandlerFamily(), &isDOMProxy);
    masm.assumeUnreachable("Expected a DOM proxy");
    masm.bind(&isDOMProxy);
#endif
    masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
    masm.loadPrivate(Address(priv, detail::ProxyReservedSlots::offsetOfSlot(0)),
                     priv);
  }

  masm.bind(&done);
}

void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
  WrappedFunction* target = call->getSingleTarget();
  MOZ_ASSERT(target);
  MOZ_ASSERT(target->isNative());
  MOZ_ASSERT(target->hasJitInfo());
  MOZ_ASSERT(call->mir()->isCallDOMNative());

  int callargslot = call->argslot();
  int unusedStack = StackOffsetOfPassedArg(callargslot);

  // Registers used for callWithABI() argument-passing.
  const Register argJSContext = ToRegister(call->getArgJSContext());
  const Register argObj = ToRegister(call->getArgObj());
  const Register argPrivate = ToRegister(call->getArgPrivate());
  const Register argArgs = ToRegister(call->getArgArgs());

  DebugOnly<uint32_t> initialStack = masm.framePushed();

  masm.checkStackAlignment();

  // DOM methods have the signature:
  //  bool (*)(JSContext*, HandleObject, void* private, const
  //  JSJitMethodCallArgs& args)
  // Where args is initialized from an argc and a vp, vp[0] is space for an
  // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
  // function arguments.  Note that args stores the argv, not the vp, and
  // argv == vp + 2.

  // Nestle the stack up against the pushed arguments, leaving StackPointer at
  // &vp[1]
  masm.adjustStack(unusedStack);
  // argObj is filled with the extracted object, then returned.
  Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
  MOZ_ASSERT(obj == argObj);

  // Push a Value containing the callee object: natives are allowed to access
  // their callee before setting the return value. After this the StackPointer
  // points to &vp[0].
  masm.Push(ObjectValue(*target->rawJSFunction()));

  // Now compute the argv value.  Since StackPointer is pointing to &vp[0] and
  // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
  // StackPointer.
  JS_STATIC_ASSERT(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
  JS_STATIC_ASSERT(JSJitMethodCallArgsTraits::offsetOfArgc ==
                   IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
  masm.computeEffectiveAddress(
      Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);

  LoadDOMPrivate(masm, obj, argPrivate,
                 static_cast<MCallDOMNative*>(call->mir())->objectKind());

  // Push argc from the call instruction into what will become the IonExitFrame
  masm.Push(Imm32(call->numActualArgs()));

  // Push our argv onto the stack
  masm.Push(argArgs);
  // And store our JSJitMethodCallArgs* in argArgs.
  masm.moveStackPtrTo(argArgs);

  // Push |this| object for passing HandleObject. We push after argc to
  // maintain the same sp-relative location of the object pointer with other
  // DOMExitFrames.
  masm.Push(argObj);
  masm.moveStackPtrTo(argObj);

  if (call->mir()->maybeCrossRealm()) {
    // We use argJSContext as scratch register here.
    masm.movePtr(ImmGCPtr(target->rawJSFunction()), argJSContext);
    masm.switchToObjectRealm(argJSContext, argJSContext);
  }

  // Construct native exit frame.
  uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
  masm.loadJSContext(argJSContext);
  masm.enterFakeExitFrame(argJSContext, argJSContext,
                          ExitFrameType::IonDOMMethod);

  markSafepointAt(safepointOffset, call);

  // Construct and execute call.
  masm.setupUnalignedABICall(argJSContext);
  masm.loadJSContext(argJSContext);
  masm.passABIArg(argJSContext);
  masm.passABIArg(argObj);
  masm.passABIArg(argPrivate);
  masm.passABIArg(argArgs);
  masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->jitInfo()->method),
                   MoveOp::GENERAL,
                   CheckUnsafeCallWithABI::DontCheckHasExitFrame);

  if (target->jitInfo()->isInfallible) {
    masm.loadValue(Address(masm.getStackPointer(),
                           IonDOMMethodExitFrameLayout::offsetOfResult()),
                   JSReturnOperand);
  } else {
    // Test for failure.
    masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());

    // Load the outparam vp[0] into output register(s).
    masm.loadValue(Address(masm.getStackPointer(),
                           IonDOMMethodExitFrameLayout::offsetOfResult()),
                   JSReturnOperand);
  }

  // Switch back to the current realm if needed. Note: if the DOM method threw
  // an exception, the exception handler will do this.
  if (call->mir()->maybeCrossRealm()) {
    static_assert(!JSReturnOperand.aliases(ReturnReg),
                  "Clobbering ReturnReg should not affect the return value");
    masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
  }

  // Until C++ code is instrumented against Spectre, prevent speculative
  // execution from returning any private data.
  if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
    masm.speculationBarrier();
  }

  // The next instruction is removing the footer of the exit frame, so there
  // is no need for leaveFakeExitFrame.

  // Move the StackPointer back to its original location, unwinding the native
  // exit frame.
  masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
  MOZ_ASSERT(masm.framePushed() == initialStack);
}

void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
  pushArg(ImmGCPtr(lir->mir()->name()));

  using Fn = bool (*)(JSContext * cx, HandlePropertyName, MutableHandleValue);
  callVM<Fn, GetIntrinsicValue>(lir);
}

void CodeGenerator::emitCallInvokeFunction(
    LInstruction* call, Register calleereg, bool constructing,
    bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
  // Nestle %esp up to the argument vector.
  // Each path must account for framePushed_ separately, for callVM to be valid.
  masm.freeStack(unusedStack);

  pushArg(masm.getStackPointer());  // argv.
  pushArg(Imm32(argc));             // argc.
  pushArg(Imm32(ignoresReturnValue));
  pushArg(Imm32(constructing));  // constructing.
  pushArg(calleereg);            // JSFunction*.

  using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
                      MutableHandleValue);
  callVM<Fn, jit::InvokeFunction>(call);

  // Un-nestle %esp from the argument vector. No prefix was pushed.
  masm.reserveStack(unusedStack);
}

void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
  Register calleereg = ToRegister(call->getFunction());
  Register objreg = ToRegister(call->getTempObject());
  Register nargsreg = ToRegister(call->getNargsReg());
  uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
  Label invoke, thunk, makeCall, end;

  // Known-target case is handled by LCallKnown.
  MOZ_ASSERT(!call->hasSingleTarget());

  masm.checkStackAlignment();

  // Guard that calleereg is actually a function object.
  if (call->mir()->needsClassCheck()) {
    masm.branchTestObjClass(Assembler::NotEqual, calleereg, &JSFunction::class_,
                            nargsreg, calleereg, &invoke);
  }

  // Guard that calleereg is an interpreted function with a JSScript or a
  // wasm function.
  // If we are constructing, also ensure the callee is a constructor.
  if (call->mir()->isConstructing()) {
    masm.branchIfNotInterpretedConstructor(calleereg, nargsreg, &invoke);
  } else {
    masm.branchIfFunctionHasNoJitEntry(calleereg, /* isConstructing */ false,
                                       &invoke);
    masm.branchFunctionKind(Assembler::Equal, JSFunction::ClassConstructor,
                            calleereg, objreg, &invoke);
  }

  if (call->mir()->maybeCrossRealm()) {
    masm.switchToObjectRealm(calleereg, objreg);
  }

  if (call->mir()->needsArgCheck()) {
    masm.loadJitCodeRaw(calleereg, objreg);
  } else {
    masm.loadJitCodeNoArgCheck(calleereg, objreg);
  }

  // Nestle the StackPointer up to the argument vector.
  masm.freeStack(unusedStack);

  // Construct the IonFramePrefix.
  uint32_t descriptor = MakeFrameDescriptor(
      masm.framePushed(), FrameType::IonJS, JitFrameLayout::Size());
  masm.Push(Imm32(call->numActualArgs()));
  masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
  masm.Push(Imm32(descriptor));

  // Check whether the provided arguments satisfy target argc.
  // We cannot have lowered to LCallGeneric with a known target. Assert that we
  // didn't add any undefineds in IonBuilder. NB: MCall::numStackArgs includes
  // |this|.
  DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
  MOZ_ASSERT(call->numActualArgs() ==
             call->mir()->numStackArgs() - numNonArgsOnStack);
  masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()),
                        nargsreg);
  masm.branch32(Assembler::Above, nargsreg, Imm32(call->numActualArgs()),
                &thunk);
  masm.jump(&makeCall);

  // Argument fixup needed. Load the ArgumentsRectifier.
  masm.bind(&thunk);
  {
    TrampolinePtr argumentsRectifier =
        gen->jitRuntime()->getArgumentsRectifier();
    masm.movePtr(argumentsRectifier, objreg);
  }

  // Finally call the function in objreg.
  masm.bind(&makeCall);
  uint32_t callOffset = masm.callJit(objreg);
  markSafepointAt(callOffset, call);

  if (call->mir()->maybeCrossRealm()) {
    static_assert(!JSReturnOperand.aliases(ReturnReg),
                  "ReturnReg available as scratch after scripted calls");
    masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
  }

  // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
  // The return address has already been removed from the Ion frame.
  int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*);
  masm.adjustStack(prefixGarbage - unusedStack);
  masm.jump(&end);

  // Handle uncompiled or native functions.
  masm.bind(&invoke);
  emitCallInvokeFunction(call, calleereg, call->isConstructing(),
                         call->ignoresReturnValue(), call->numActualArgs(),
                         unusedStack);

  masm.bind(&end);

  // If the return value of the constructing function is Primitive,
  // replace the return value with the Object from CreateThis.
  if (call->mir()->isConstructing()) {
    Label notPrimitive;
    masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
                             &notPrimitive);
    masm.loadValue(Address(masm.getStackPointer(), unusedStack),
                   JSReturnOperand);
    masm.bind(&notPrimitive);
  }
}

void CodeGenerator::emitCallInvokeFunctionShuffleNewTarget(
    LCallKnown* call, Register calleeReg, uint32_t numFormals,
    uint32_t unusedStack) {
  masm.freeStack(unusedStack);

  pushArg(masm.getStackPointer());
  pushArg(Imm32(numFormals));
  pushArg(Imm32(call->numActualArgs()));
  pushArg(calleeReg);

  using Fn = bool (*)(JSContext*, HandleObject, uint32_t, uint32_t, Value*,
                      MutableHandleValue);
  callVM<Fn, InvokeFunctionShuffleNewTarget>(call);

  masm.reserveStack(unusedStack);
}

void CodeGenerator::visitCallKnown(LCallKnown* call) {
  Register calleereg = ToRegister(call->getFunction());
  Register objreg = ToRegister(call->getTempObject());
  uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
  WrappedFunction* target = call->getSingleTarget();

  // Native single targets (except wasm) are handled by LCallNative.
  MOZ_ASSERT(!target->isNativeWithCppEntry());
  // Missing arguments must have been explicitly appended by the IonBuilder.
  DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
  MOZ_ASSERT(target->nargs() <=
             call->mir()->numStackArgs() - numNonArgsOnStack);

  MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());

  masm.checkStackAlignment();

  if (target->isClassConstructor() && !call->isConstructing()) {
    emitCallInvokeFunction(call, calleereg, call->isConstructing(),
                           call->ignoresReturnValue(), call->numActualArgs(),
                           unusedStack);
    return;
  }

  MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());

  Label uncompiled;
  if (!target->isNativeWithJitEntry()) {
    // The calleereg is known to be a non-native function, but might point
    // to a LazyScript instead of a JSScript.
    masm.branchIfFunctionHasNoJitEntry(calleereg, call->isConstructing(),
                                       &uncompiled);
  }

  if (call->mir()->maybeCrossRealm()) {
    masm.switchToObjectRealm(calleereg, objreg);
  }

  if (call->mir()->needsArgCheck()) {
    masm.loadJitCodeRaw(calleereg, objreg);
  } else {
    masm.loadJitCodeNoArgCheck(calleereg, objreg);
  }

  // Nestle the StackPointer up to the argument vector.
  masm.freeStack(unusedStack);

  // Construct the IonFramePrefix.
  uint32_t descriptor = MakeFrameDescriptor(
      masm.framePushed(), FrameType::IonJS, JitFrameLayout::Size());
  masm.Push(Imm32(call->numActualArgs()));
  masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
  masm.Push(Imm32(descriptor));

  // Finally call the function in objreg.
  uint32_t callOffset = masm.callJit(objreg);
  markSafepointAt(callOffset, call);

  if (call->mir()->maybeCrossRealm()) {
    static_assert(!JSReturnOperand.aliases(ReturnReg),
                  "ReturnReg available as scratch after scripted calls");
    masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
  }

  // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
  // The return address has already been removed from the Ion frame.
  int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*);
  masm.adjustStack(prefixGarbage - unusedStack);

  if (uncompiled.used()) {
    Label end;
    masm.jump(&end);

    // Handle uncompiled functions.
    masm.bind(&uncompiled);
    if (call->isConstructing() && target->nargs() > call->numActualArgs()) {
      emitCallInvokeFunctionShuffleNewTarget(call, calleereg, target->nargs(),
                                             unusedStack);
    } else {
      emitCallInvokeFunction(call, calleereg, call->isConstructing(),
                             call->ignoresReturnValue(), call->numActualArgs(),
                             unusedStack);
    }

    masm.bind(&end);
  }

  // If the return value of the constructing function is Primitive,
  // replace the return value with the Object from CreateThis.
  if (call->mir()->isConstructing()) {
    Label notPrimitive;
    masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
                             &notPrimitive);
    masm.loadValue(Address(masm.getStackPointer(), unusedStack),
                   JSReturnOperand);
    masm.bind(&notPrimitive);
  }
}

template <typename T>
void CodeGenerator::emitCallInvokeFunction(T* apply, Register extraStackSize) {
  Register objreg = ToRegister(apply->getTempObject());
  MOZ_ASSERT(objreg != extraStackSize);

  // Push the space used by the arguments.
  masm.moveStackPtrTo(objreg);
  masm.Push(extraStackSize);

  pushArg(objreg);                            // argv.
  pushArg(ToRegister(apply->getArgc()));      // argc.
  pushArg(Imm32(false));                      // ignoresReturnValue.
  pushArg(Imm32(false));                      // isConstrucing.
  pushArg(ToRegister(apply->getFunction()));  // JSFunction*.

  // This specialization og callVM restore the extraStackSize after the call.
  using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
                      MutableHandleValue);
  callVM<Fn, jit::InvokeFunction>(apply, &extraStackSize);

  masm.Pop(extraStackSize);
}

// Do not bailout after the execution of this function since the stack no longer
// correspond to what is expected by the snapshots.
void CodeGenerator::emitAllocateSpaceForApply(Register argcreg,
                                              Register extraStackSpace,
                                              Label* end) {
  // Initialize the loop counter AND Compute the stack usage (if == 0)
  masm.movePtr(argcreg, extraStackSpace);

  // Align the JitFrameLayout on the JitStackAlignment.
  if (JitStackValueAlignment > 1) {
    MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
               "Stack padding assumes that the frameSize is correct");
    MOZ_ASSERT(JitStackValueAlignment == 2);
    Label noPaddingNeeded;
    // if the number of arguments is odd, then we do not need any padding.
    masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
    masm.addPtr(Imm32(1), extraStackSpace);
    masm.bind(&noPaddingNeeded);
  }

  // Reserve space for copying the arguments.
  NativeObject::elementsSizeMustNotOverflow();
  masm.lshiftPtr(Imm32(ValueShift), extraStackSpace);
  masm.subFromStackPtr(extraStackSpace);

#ifdef DEBUG
  // Put a magic value in the space reserved for padding. Note, this code
  // cannot be merged with the previous test, as not all architectures can
  // write below their stack pointers.
  if (JitStackValueAlignment > 1) {
    MOZ_ASSERT(JitStackValueAlignment == 2);
    Label noPaddingNeeded;
    // if the number of arguments is odd, then we do not need any padding.
    masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
    BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
    masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
    masm.bind(&noPaddingNeeded);
  }
#endif

  // Skip the copy of arguments if there are none.
  masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, end);
}

// Destroys argvIndex and copyreg.
void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
                                           Register argvIndex, Register copyreg,
                                           size_t argvSrcOffset,
                                           size_t argvDstOffset) {
  Label loop;
  masm.bind(&loop);

  // As argvIndex is off by 1, and we use the decBranchPtr instruction
  // to loop back, we have to substract the size of the word which are
  // copied.
  BaseValueIndex srcPtr(argvSrcBase, argvIndex, argvSrcOffset - sizeof(void*));
  BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
                        argvDstOffset - sizeof(void*));
  masm.loadPtr(srcPtr, copyreg);
  masm.storePtr(copyreg, dstPtr);

  // Handle 32 bits architectures.
  if (sizeof(Value) == 2 * sizeof(void*)) {
    BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
                             argvSrcOffset - 2 * sizeof(void*));
    BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
                             argvDstOffset - 2 * sizeof(void*));
    masm.loadPtr(srcPtrLow, copyreg);
    masm.storePtr(copyreg, dstPtrLow);
  }

  masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
}

void CodeGenerator::emitPopArguments(Register extraStackSpace) {
  // Pop |this| and Arguments.
  masm.freeStack(extraStackSpace);
}

void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply,
                                      Register extraStackSpace) {
  // Holds the function nargs. Initially the number of args to the caller.
  Register argcreg = ToRegister(apply->getArgc());
  Register copyreg = ToRegister(apply->getTempObject());

  Label end;
  emitAllocateSpaceForApply(argcreg, extraStackSpace, &end);

  // We are making a copy of the arguments which are above the JitFrameLayout
  // of the current Ion frame.
  //
  // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1]
  // [arg0] <- dst

  // Compute the source and destination offsets into the stack.
  size_t argvSrcOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
  size_t argvDstOffset = 0;

  // Save the extra stack space, and re-use the register as a base.
  masm.push(extraStackSpace);
  Register argvSrcBase = extraStackSpace;
  argvSrcOffset += sizeof(void*);
  argvDstOffset += sizeof(void*);

  // Save the actual number of register, and re-use the register as an index
  // register.
  masm.push(argcreg);
  Register argvIndex = argcreg;
  argvSrcOffset += sizeof(void*);
  argvDstOffset += sizeof(void*);

  // srcPtr = (StackPointer + extraStackSpace) + argvSrcOffset
  // dstPtr = (StackPointer                  ) + argvDstOffset
  masm.addStackPtrTo(argvSrcBase);

  // Copy arguments.
  emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
                         argvDstOffset);

  // Restore argcreg and the extra stack space counter.
  masm.pop(argcreg);
  masm.pop(</