js/src/jit/IonBuilder.cpp
author Ciure Andrei <aciure@mozilla.com>
Wed, 01 Apr 2020 21:09:54 +0300
changeset 521671 011ad9ff26088e341b0aebecd4424523b7759e88
parent 521668 9514a3c5e611a24d1dacd19b9daeb5e1cc1b2827
child 521679 45b2d77ee119da12f4efa8c9d57b11d19bbf8cb0
permissions -rw-r--r--
Backed out changeset 9514a3c5e611 (bug 1617851) for causing bustages CLOSED TREE

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
 * vim: set ts=8 sts=2 et sw=2 tw=80:
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#include "jit/IonBuilder.h"

#include "mozilla/DebugOnly.h"
#include "mozilla/ScopeExit.h"

#include <algorithm>

#include "builtin/Eval.h"
#include "builtin/TypedObject.h"
#include "frontend/SourceNotes.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineInspector.h"
#include "jit/CacheIR.h"
#include "jit/Ion.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/JitSpewer.h"
#include "jit/Lowering.h"
#include "jit/MIRGraph.h"
#include "util/CheckedArithmetic.h"
#include "vm/ArgumentsObject.h"
#include "vm/BytecodeIterator.h"
#include "vm/BytecodeLocation.h"
#include "vm/BytecodeUtil.h"
#include "vm/EnvironmentObject.h"
#include "vm/Instrumentation.h"
#include "vm/Opcodes.h"
#include "vm/RegExpStatics.h"
#include "vm/SelfHosting.h"
#include "vm/TraceLogging.h"

#include "gc/Nursery-inl.h"
#include "jit/CompileInfo-inl.h"
#include "jit/shared/Lowering-shared-inl.h"
#include "vm/BytecodeIterator-inl.h"
#include "vm/BytecodeLocation-inl.h"
#include "vm/BytecodeUtil-inl.h"
#include "vm/EnvironmentObject-inl.h"
#include "vm/JSScript-inl.h"
#include "vm/NativeObject-inl.h"
#include "vm/ObjectGroup-inl.h"

using namespace js;
using namespace js::jit;

using mozilla::AssertedCast;
using mozilla::DebugOnly;
using mozilla::Maybe;
using mozilla::Nothing;

class jit::BaselineFrameInspector {
 public:
  TypeSet::Type thisType;
  JSObject* singletonEnvChain;

  Vector<TypeSet::Type, 4, JitAllocPolicy> argTypes;
  Vector<TypeSet::Type, 4, JitAllocPolicy> varTypes;

  explicit BaselineFrameInspector(TempAllocator* temp)
      : thisType(TypeSet::UndefinedType()),
        singletonEnvChain(nullptr),
        argTypes(*temp),
        varTypes(*temp) {}
};

BaselineFrameInspector* jit::NewBaselineFrameInspector(TempAllocator* temp,
                                                       BaselineFrame* frame,
                                                       uint32_t frameSize) {
  MOZ_ASSERT(frame);

  BaselineFrameInspector* inspector =
      temp->lifoAlloc()->new_<BaselineFrameInspector>(temp);
  if (!inspector) {
    return nullptr;
  }

  // Note: copying the actual values into a temporary structure for use
  // during compilation could capture nursery pointers, so the values' types
  // are recorded instead.

  if (frame->isFunctionFrame()) {
    inspector->thisType =
        TypeSet::GetMaybeUntrackedValueType(frame->thisArgument());
  }

  if (frame->environmentChain()->isSingleton()) {
    inspector->singletonEnvChain = frame->environmentChain();
  }

  JSScript* script = frame->script();

  if (script->function()) {
    if (!inspector->argTypes.reserve(frame->numFormalArgs())) {
      return nullptr;
    }
    for (size_t i = 0; i < frame->numFormalArgs(); i++) {
      if (script->formalIsAliased(i)) {
        inspector->argTypes.infallibleAppend(TypeSet::UndefinedType());
      } else if (!script->argsObjAliasesFormals()) {
        TypeSet::Type type =
            TypeSet::GetMaybeUntrackedValueType(frame->unaliasedFormal(i));
        inspector->argTypes.infallibleAppend(type);
      } else if (frame->hasArgsObj()) {
        TypeSet::Type type =
            TypeSet::GetMaybeUntrackedValueType(frame->argsObj().arg(i));
        inspector->argTypes.infallibleAppend(type);
      } else {
        inspector->argTypes.infallibleAppend(TypeSet::UndefinedType());
      }
    }
  }

  uint32_t numValueSlots = frame->numValueSlots(frameSize);
  if (!inspector->varTypes.reserve(numValueSlots)) {
    return nullptr;
  }
  for (size_t i = 0; i < numValueSlots; i++) {
    TypeSet::Type type =
        TypeSet::GetMaybeUntrackedValueType(*frame->valueSlot(i));
    inspector->varTypes.infallibleAppend(type);
  }

  return inspector;
}

IonBuilder::IonBuilder(JSContext* analysisContext, MIRGenerator& mirGen,
                       CompileInfo* info, CompilerConstraintList* constraints,
                       BaselineInspector* inspector,
                       BaselineFrameInspector* baselineFrame,
                       size_t inliningDepth, uint32_t loopDepth)
    : actionableAbortScript_(nullptr),
      actionableAbortPc_(nullptr),
      actionableAbortMessage_(nullptr),
      analysisContext(analysisContext),
      baselineFrame_(baselineFrame),
      constraints_(constraints),
      mirGen_(mirGen),
      tiOracle_(this, constraints),
      realm(mirGen.realm),
      info_(info),
      optimizationInfo_(&mirGen.optimizationInfo()),
      alloc_(&mirGen.alloc()),
      graph_(&mirGen.graph()),
      thisTypes(nullptr),
      argTypes(nullptr),
      typeArray(nullptr),
      typeArrayHint(0),
      bytecodeTypeMap(nullptr),
      loopDepth_(loopDepth),
      loopStack_(*alloc_),
      trackedOptimizationSites_(*alloc_),
      abortedPreliminaryGroups_(*alloc_),
      callerResumePoint_(nullptr),
      callerBuilder_(nullptr),
      iterators_(*alloc_),
      loopHeaders_(*alloc_),
      inspector(inspector),
      inliningDepth_(inliningDepth),
      inlinedBytecodeLength_(0),
      numLoopRestarts_(0),
      failedBoundsCheck_(info_->script()->failedBoundsCheck()),
      failedShapeGuard_(info_->script()->failedShapeGuard()),
      failedLexicalCheck_(info_->script()->failedLexicalCheck()),
#ifdef DEBUG
      hasLazyArguments_(false),
#endif
      inlineCallInfo_(nullptr),
      maybeFallbackFunctionGetter_(nullptr) {
  script_ = info_->script();
  pc = info_->startPC();

  // The script must have a JitScript. Compilation requires a BaselineScript
  // too.
  MOZ_ASSERT(script_->hasJitScript());
  MOZ_ASSERT_IF(!info_->isAnalysis(), script_->hasBaselineScript());

  MOZ_ASSERT(!!analysisContext ==
             (info_->analysisMode() == Analysis_DefiniteProperties));
  MOZ_ASSERT(script_->numBytecodeTypeSets() < JSScript::MaxBytecodeTypeSets);

  if (!info_->isAnalysis()) {
    script()->jitScript()->setIonCompiledOrInlined();
  }
}

mozilla::GenericErrorResult<AbortReason> IonBuilder::abort(AbortReason r) {
  auto res = mirGen_.abort(r);
#ifdef DEBUG
  JitSpew(JitSpew_IonAbort, "aborted @ %s:%d", script()->filename(),
          PCToLineNumber(script(), pc));
#else
  JitSpew(JitSpew_IonAbort, "aborted @ %s", script()->filename());
#endif
  return res;
}

mozilla::GenericErrorResult<AbortReason> IonBuilder::abort(AbortReason r,
                                                           const char* message,
                                                           ...) {
  // Don't call PCToLineNumber in release builds.
  va_list ap;
  va_start(ap, message);
  auto res = mirGen_.abortFmt(r, message, ap);
  va_end(ap);
#ifdef DEBUG
  JitSpew(JitSpew_IonAbort, "aborted @ %s:%d", script()->filename(),
          PCToLineNumber(script(), pc));
#else
  JitSpew(JitSpew_IonAbort, "aborted @ %s", script()->filename());
#endif
  return res;
}

IonBuilder* IonBuilder::outermostBuilder() {
  IonBuilder* builder = this;
  while (builder->callerBuilder_) {
    builder = builder->callerBuilder_;
  }
  return builder;
}

void IonBuilder::spew(const char* message) {
  // Don't call PCToLineNumber in release builds.
#ifdef DEBUG
  JitSpew(JitSpew_IonMIR, "%s @ %s:%d", message, script()->filename(),
          PCToLineNumber(script(), pc));
#endif
}

JSFunction* IonBuilder::getSingleCallTarget(TemporaryTypeSet* calleeTypes) {
  if (!calleeTypes) {
    return nullptr;
  }

  TemporaryTypeSet::ObjectKey* key = calleeTypes->maybeSingleObject();
  if (!key || key->clasp() != &JSFunction::class_) {
    return nullptr;
  }

  if (key->isSingleton()) {
    return &key->singleton()->as<JSFunction>();
  }

  if (JSFunction* fun = key->group()->maybeInterpretedFunction()) {
    return fun;
  }

  return nullptr;
}

AbortReasonOr<Ok> IonBuilder::getPolyCallTargets(TemporaryTypeSet* calleeTypes,
                                                 bool constructing,
                                                 InliningTargets& targets,
                                                 uint32_t maxTargets) {
  MOZ_ASSERT(targets.empty());

  if (!calleeTypes) {
    return Ok();
  }

  if (calleeTypes->baseFlags() != 0) {
    return Ok();
  }

  unsigned objCount = calleeTypes->getObjectCount();

  if (objCount == 0 || objCount > maxTargets) {
    return Ok();
  }

  if (!targets.reserve(objCount)) {
    return abort(AbortReason::Alloc);
  }
  for (unsigned i = 0; i < objCount; i++) {
    JSObject* obj = calleeTypes->getSingleton(i);
    ObjectGroup* group = nullptr;
    if (obj) {
      MOZ_ASSERT(obj->isSingleton());
    } else {
      group = calleeTypes->getGroup(i);
      if (!group) {
        continue;
      }

      obj = group->maybeInterpretedFunction();
      if (!obj) {
        targets.clear();
        return Ok();
      }

      MOZ_ASSERT(!obj->isSingleton());
    }

    // Don't optimize if the callee is not callable or constructable per
    // the manner it is being invoked, so that CallKnown does not have to
    // handle these cases (they will always throw).
    if (constructing ? !obj->isConstructor() : !obj->isCallable()) {
      targets.clear();
      return Ok();
    }

    targets.infallibleAppend(InliningTarget(obj, group));
  }

  return Ok();
}

IonBuilder::InliningDecision IonBuilder::DontInline(JSScript* targetScript,
                                                    const char* reason) {
  if (targetScript) {
    JitSpew(JitSpew_Inlining, "Cannot inline %s:%u:%u %s",
            targetScript->filename(), targetScript->lineno(),
            targetScript->column(), reason);
  } else {
    JitSpew(JitSpew_Inlining, "Cannot inline: %s", reason);
  }

  return InliningDecision_DontInline;
}

/*
 * |hasCommonInliningPath| determines whether the current inlining path has been
 * seen before based on the sequence of scripts in the chain of |IonBuilder|s.
 *
 * An inlining path for a function |f| is the sequence of functions whose
 * inlinings precede |f| up to any previous occurrences of |f|.
 * So, if we have the chain of inlinings
 *
 *          f1 -> f2 -> f -> f3 -> f4 -> f5 -> f
 *          --------         --------------
 *
 * the inlining paths for |f| are [f2, f1] and [f5, f4, f3].
 * When attempting to inline |f|, we find all existing inlining paths for |f|
 * and check whether they share a common prefix with the path created were |f|
 * inlined.
 *
 * For example, given mutually recursive functions |f| and |g|, a possible
 * inlining is
 *
 *                           +---- Inlining stopped here...
 *                           |
 *                           v
 *          a -> f -> g -> f \ -> g -> f -> g -> ...
 *
 * where the vertical bar denotes the termination of inlining.
 * Inlining is terminated because we have already observed the inlining path
 * [f] when inlining function |g|. Note that this will inline recursive
 * functions such as |fib| only one level, as |fib| has a zero length inlining
 * path which trivially prefixes all inlining paths.
 *
 */
bool IonBuilder::hasCommonInliningPath(const JSScript* scriptToInline) {
  // Find all previous inlinings of the |scriptToInline| and check for common
  // inlining paths with the top of the inlining stack.
  for (IonBuilder* it = this->callerBuilder_; it; it = it->callerBuilder_) {
    if (it->script() != scriptToInline) {
      continue;
    }

    // This only needs to check the top of each stack for a match,
    // as a match of length one ensures a common prefix.
    IonBuilder* path = it->callerBuilder_;
    if (!path || this->script() == path->script()) {
      return true;
    }
  }

  return false;
}

IonBuilder::InliningDecision IonBuilder::canInlineTarget(JSFunction* target,
                                                         CallInfo& callInfo) {
  if (!optimizationInfo().inlineInterpreted()) {
    return InliningDecision_DontInline;
  }

  if (TraceLogTextIdEnabled(TraceLogger_InlinedScripts)) {
    return DontInline(nullptr,
                      "Tracelogging of inlined scripts is enabled"
                      "but Tracelogger cannot do that yet.");
  }

  if (!target->isInterpreted()) {
    return DontInline(nullptr, "Non-interpreted target");
  }

  // Never inline scripted cross-realm calls.
  if (target->realm() != script()->realm()) {
    return DontInline(nullptr, "Cross-realm call");
  }

  if (info().analysisMode() != Analysis_DefiniteProperties) {
    // If |this| or an argument has an empty resultTypeSet, don't bother
    // inlining, as the call is currently unreachable due to incomplete type
    // information. This does not apply to the definite properties analysis,
    // in that case we want to inline anyway.

    if (callInfo.thisArg()->emptyResultTypeSet()) {
      return DontInline(nullptr, "Empty TypeSet for |this|");
    }

    for (size_t i = 0; i < callInfo.argc(); i++) {
      if (callInfo.getArg(i)->emptyResultTypeSet()) {
        return DontInline(nullptr, "Empty TypeSet for argument");
      }
    }
  }

  // Allow constructing lazy scripts when performing the definite properties
  // analysis, as baseline has not been used to warm the caller up yet.
  if (target->isInterpreted() &&
      info().analysisMode() == Analysis_DefiniteProperties) {
    RootedFunction fun(analysisContext, target);
    RootedScript script(analysisContext,
                        JSFunction::getOrCreateScript(analysisContext, fun));
    if (!script) {
      return InliningDecision_Error;
    }

    if (CanBaselineInterpretScript(script)) {
      AutoKeepJitScripts keepJitScript(analysisContext);
      if (!script->ensureHasJitScript(analysisContext, keepJitScript)) {
        return InliningDecision_Error;
      }
    }
  }

  if (!target->hasBytecode()) {
    return DontInline(nullptr, "Lazy script");
  }

  JSScript* inlineScript = target->nonLazyScript();
  if (callInfo.constructing()) {
    if (!target->isConstructor()) {
      return DontInline(inlineScript, "Callee is not a constructor");
    }

    // Don't inline if creating |this| for this target is complicated, for
    // example when the newTarget.prototype lookup may be effectful.
    if (!target->constructorNeedsUninitializedThis() &&
        callInfo.getNewTarget() != callInfo.fun()) {
      JSFunction* newTargetFun =
          getSingleCallTarget(callInfo.getNewTarget()->resultTypeSet());
      if (!newTargetFun) {
        return DontInline(inlineScript, "Constructing with unknown newTarget");
      }
      if (!newTargetFun->hasNonConfigurablePrototypeDataProperty()) {
        return DontInline(inlineScript,
                          "Constructing with effectful newTarget.prototype");
      }
    } else {
      // At this point, the target is either a function that requires an
      // uninitialized-this (bound function or derived class constructor) or a
      // scripted constructor with a non-configurable .prototype data property
      // (self-hosted built-in constructor, non-self-hosted scripted function).
      MOZ_ASSERT(target->constructorNeedsUninitializedThis() ||
                 target->hasNonConfigurablePrototypeDataProperty());
    }
  }

  if (!callInfo.constructing() && target->isClassConstructor()) {
    return DontInline(inlineScript, "Not constructing class constructor");
  }

  if (!CanIonInlineScript(inlineScript)) {
    return DontInline(inlineScript, "Disabled Ion compilation");
  }

  if (info().isAnalysis()) {
    // Analysis requires only a JitScript.
    if (!inlineScript->hasJitScript()) {
      return DontInline(inlineScript, "No JitScript");
    }
  } else {
    // Compilation requires a BaselineScript.
    if (!inlineScript->hasBaselineScript()) {
      return DontInline(inlineScript, "No baseline jitcode");
    }
  }

  // Don't inline functions with a higher optimization level.
  if (!isHighestOptimizationLevel()) {
    OptimizationLevel level = optimizationLevel();
    if (inlineScript->hasIonScript() &&
        (inlineScript->ionScript()->isRecompiling() ||
         inlineScript->ionScript()->optimizationLevel() > level)) {
      return DontInline(inlineScript, "More optimized");
    }
    if (IonOptimizations.levelForScript(inlineScript, nullptr) > level) {
      return DontInline(inlineScript, "Should be more optimized");
    }
  }

  if (TooManyFormalArguments(target->nargs())) {
    return DontInline(inlineScript, "Too many args");
  }

  // We check the number of actual arguments against the maximum number of
  // formal arguments as we do not want to encode all actual arguments in the
  // callerResumePoint.
  if (TooManyFormalArguments(callInfo.argc())) {
    return DontInline(inlineScript, "Too many actual args");
  }

  if (hasCommonInliningPath(inlineScript)) {
    return DontInline(inlineScript, "Common inlining path");
  }

  if (inlineScript->uninlineable()) {
    return DontInline(inlineScript, "Uninlineable script");
  }

  if (inlineScript->needsArgsObj()) {
    return DontInline(inlineScript, "Script that needs an arguments object");
  }

  if (inlineScript->isDebuggee()) {
    return DontInline(inlineScript, "Script is debuggee");
  }

  return InliningDecision_Inline;
}

AbortReasonOr<Ok> IonBuilder::analyzeNewLoopTypes(MBasicBlock* entry) {
  MOZ_ASSERT(!entry->isDead());
  MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead);

  // The phi inputs at the loop head only reflect types for variables that
  // were present at the start of the loop. If the variable changes to a new
  // type within the loop body, and that type is carried around to the loop
  // head, then we need to know about the new type up front.
  //
  // Since SSA information hasn't been constructed for the loop body yet, we
  // need a separate analysis to pick out the types that might flow around
  // the loop header. This is a best-effort analysis that may either over-
  // or under-approximate the set of such types.
  //
  // Over-approximating the types may lead to inefficient generated code, and
  // under-approximating the types will cause the loop body to be analyzed
  // multiple times as the correct types are deduced (see finishLoop).

  // If we restarted processing of an outer loop then get loop header types
  // directly from the last time we have previously processed this loop. This
  // both avoids repeated work from the bytecode traverse below, and will
  // also pick up types discovered while previously building the loop body.
  bool foundEntry = false;
  for (size_t i = 0; i < loopHeaders_.length(); i++) {
    if (loopHeaders_[i].pc == pc) {
      MBasicBlock* oldEntry = loopHeaders_[i].header;

      // If this block has been discarded, its resume points will have
      // already discarded their operands.
      if (oldEntry->isDead()) {
        loopHeaders_[i].header = entry;
        foundEntry = true;
        break;
      }

      MResumePoint* oldEntryRp = oldEntry->entryResumePoint();
      size_t stackDepth = oldEntryRp->stackDepth();
      for (size_t slot = 0; slot < stackDepth; slot++) {
        MDefinition* oldDef = oldEntryRp->getOperand(slot);
        if (!oldDef->isPhi()) {
          MOZ_ASSERT(oldDef->block()->id() < oldEntry->id());
          MOZ_ASSERT(oldDef == entry->getSlot(slot));
          continue;
        }
        MPhi* oldPhi = oldDef->toPhi();
        MPhi* newPhi = entry->getSlot(slot)->toPhi();
        if (!newPhi->addBackedgeType(alloc(), oldPhi->type(),
                                     oldPhi->resultTypeSet())) {
          return abort(AbortReason::Alloc);
        }
      }

      // Update the most recent header for this loop encountered, in case
      // new types flow to the phis and the loop is processed at least
      // three times.
      loopHeaders_[i].header = entry;
      return Ok();
    }
  }
  if (!foundEntry) {
    if (!loopHeaders_.append(LoopHeader(pc, entry))) {
      return abort(AbortReason::Alloc);
    }
  }

  // Get the start and end bytecode locations.
  BytecodeLocation start(script_, pc);
  BytecodeLocation end(script_, script_->codeEnd());

  // Iterate the bytecode quickly to seed possible types in the loopheader.
  Maybe<BytecodeLocation> last;
  Maybe<BytecodeLocation> earlier;

  for (auto it : BytecodeLocationRange(start, end)) {
    if (IsBackedgeForLoopHead(it.toRawBytecode(), pc)) {
      break;
    }
    MOZ_TRY(analyzeNewLoopTypesForLocation(entry, it, last, earlier));
    earlier = last;
    last = mozilla::Some(it);
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::analyzeNewLoopTypesForLocation(
    MBasicBlock* entry, const BytecodeLocation loc,
    const Maybe<BytecodeLocation>& last_,
    const Maybe<BytecodeLocation>& earlier) {
  // Unfortunately Maybe<> cannot be passed as by-value argument so make a copy
  // here.
  Maybe<BytecodeLocation> last = last_;

  // We're only interested in JSOp::SetLocal and JSOp::SetArg.
  uint32_t slot;
  if (loc.is(JSOp::SetLocal)) {
    slot = info().localSlot(loc.local());
  } else if (loc.is(JSOp::SetArg)) {
    slot = info().argSlotUnchecked(loc.arg());
  } else {
    return Ok();
  }
  if (slot >= info().firstStackSlot()) {
    return Ok();
  }

  // Ensure there is a |last| instruction.
  if (!last) {
    return Ok();
  }
  MOZ_ASSERT(last->isValid(script_));

  // Analyze the |last| bytecode instruction to try to dermine the type of this
  // local/argument.

  MPhi* phi = entry->getSlot(slot)->toPhi();

  auto addPhiBackedgeType =
      [&](MIRType type, TemporaryTypeSet* typeSet) -> AbortReasonOr<Ok> {
    if (!phi->addBackedgeType(alloc(), type, typeSet)) {
      return abort(AbortReason::Alloc);
    }
    return Ok();
  };

  // If it's a JSOp::Pos or JSOp::ToNumeric, use its operand instead.
  if (last->is(JSOp::Pos) || last->is(JSOp::ToNumeric)) {
    MOZ_ASSERT(earlier);
    last = earlier;
  }

  // If the |last| op had a TypeSet, use it.
  if (last->opHasTypeSet()) {
    TemporaryTypeSet* typeSet = bytecodeTypes(last->toRawBytecode());
    if (typeSet->empty()) {
      return Ok();
    }
    return addPhiBackedgeType(typeSet->getKnownMIRType(), typeSet);
  }

  // If the |last| op was a JSOp::GetLocal or JSOp::GetArg, use that slot's
  // type.
  if (last->is(JSOp::GetLocal) || last->is(JSOp::GetArg)) {
    uint32_t slot = (last->is(JSOp::GetLocal))
                        ? info().localSlot(last->local())
                        : info().argSlotUnchecked(last->arg());
    if (slot >= info().firstStackSlot()) {
      return Ok();
    }
    MPhi* otherPhi = entry->getSlot(slot)->toPhi();
    if (!otherPhi->hasBackedgeType()) {
      return Ok();
    }
    return addPhiBackedgeType(otherPhi->type(), otherPhi->resultTypeSet());
  }

  // If the |last| op has a known type (determined statically or from
  // BaselineInspector), use that.
  MIRType type = MIRType::None;
  switch (last->getOp()) {
    case JSOp::Void:
    case JSOp::Undefined:
      type = MIRType::Undefined;
      break;
    case JSOp::GImplicitThis:
      if (!script()->hasNonSyntacticScope()) {
        type = MIRType::Undefined;
      }
      break;
    case JSOp::Null:
      type = MIRType::Null;
      break;
    case JSOp::Zero:
    case JSOp::One:
    case JSOp::Int8:
    case JSOp::Int32:
    case JSOp::Uint16:
    case JSOp::Uint24:
    case JSOp::ResumeIndex:
      type = MIRType::Int32;
      break;
    case JSOp::BitAnd:
    case JSOp::BitOr:
    case JSOp::BitXor:
    case JSOp::BitNot:
    case JSOp::Rsh:
    case JSOp::Lsh:
      type = inspector->expectedResultType(last->toRawBytecode());
      break;
    case JSOp::Ursh:
      // Unsigned right shift is not applicable to BigInts, so we don't need
      // to query the baseline inspector for the possible result types.
      type = MIRType::Int32;
      break;
    case JSOp::False:
    case JSOp::True:
    case JSOp::Eq:
    case JSOp::Ne:
    case JSOp::Lt:
    case JSOp::Le:
    case JSOp::Gt:
    case JSOp::Ge:
    case JSOp::Not:
    case JSOp::StrictEq:
    case JSOp::StrictNe:
    case JSOp::In:
    case JSOp::Instanceof:
    case JSOp::HasOwn:
      type = MIRType::Boolean;
      break;
    case JSOp::Double:
      type = MIRType::Double;
      break;
    case JSOp::IterNext:
    case JSOp::String:
    case JSOp::ToString:
    case JSOp::Typeof:
    case JSOp::TypeofExpr:
      type = MIRType::String;
      break;
    case JSOp::Symbol:
      type = MIRType::Symbol;
      break;
    case JSOp::Add:
    case JSOp::Sub:
    case JSOp::Mul:
    case JSOp::Div:
    case JSOp::Mod:
    case JSOp::Neg:
    case JSOp::Inc:
    case JSOp::Dec:
      type = inspector->expectedResultType(last->toRawBytecode());
      break;
    case JSOp::BigInt:
      type = MIRType::BigInt;
      break;
    default:
      break;
  }

  if (type == MIRType::None) {
    return Ok();
  }

  return addPhiBackedgeType(type, /* typeSet = */ nullptr);
}

AbortReasonOr<Ok> IonBuilder::init() {
  {
    LifoAlloc::AutoFallibleScope fallibleAllocator(alloc().lifoAlloc());
    if (!JitScript::FreezeTypeSets(constraints(), script(), &thisTypes,
                                   &argTypes, &typeArray)) {
      return abort(AbortReason::Alloc);
    }
  }

  if (!alloc().ensureBallast()) {
    return abort(AbortReason::Alloc);
  }

  {
    JSContext* cx = TlsContext.get();
    RootedScript rootedScript(cx, script());
    if (!rootedScript->jitScript()->ensureHasCachedIonData(cx, rootedScript)) {
      return abort(AbortReason::Error);
    }
  }

  if (inlineCallInfo_) {
    // If we're inlining, the actual this/argument types are not necessarily
    // a subset of the script's observed types. |argTypes| is never accessed
    // for inlined scripts, so we just null it.
    thisTypes = inlineCallInfo_->thisArg()->resultTypeSet();
    argTypes = nullptr;
  }

  bytecodeTypeMap = script()->jitScript()->bytecodeTypeMap();

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::build() {
  // Spew IC info for inlined script, but only when actually compiling,
  // not when analyzing it.
#ifdef JS_STRUCTURED_SPEW
  if (!info().isAnalysis()) {
    JitSpewBaselineICStats(script(), "To-Be-Compiled");
  }
#endif

  MOZ_TRY(init());

  // The JitScript-based inlining heuristics only affect the highest
  // optimization level. Other levels do almost no inlining and we don't want to
  // overwrite data from the highest optimization tier.
  if (isHighestOptimizationLevel()) {
    script()->jitScript()->resetMaxInliningDepth();
  }

  MBasicBlock* entry;
  MOZ_TRY_VAR(entry, newBlock(info().firstStackSlot(), pc));
  MOZ_TRY(setCurrentAndSpecializePhis(entry));

#ifdef JS_JITSPEW
  if (info().isAnalysis()) {
    JitSpew(JitSpew_IonScripts, "Analyzing script %s:%u:%u (%p) %s",
            script()->filename(), script()->lineno(), script()->column(),
            (void*)script(), AnalysisModeString(info().analysisMode()));
  } else {
    JitSpew(JitSpew_IonScripts,
            "%sompiling script %s:%u:%u (%p) (warmup-counter=%" PRIu32
            ", level=%s)",
            (script()->hasIonScript() ? "Rec" : "C"), script()->filename(),
            script()->lineno(), script()->column(), (void*)script(),
            script()->getWarmUpCount(),
            OptimizationLevelString(optimizationLevel()));
  }
#endif

  MOZ_TRY(initParameters());
  initLocals();

  // Initialize something for the env chain. We can bail out before the
  // start instruction, but the snapshot is encoded *at* the start
  // instruction, which means generating any code that could load into
  // registers is illegal.
  MInstruction* env = MConstant::New(alloc(), UndefinedValue());
  current->add(env);
  current->initSlot(info().environmentChainSlot(), env);

  // Initialize the return value.
  MInstruction* returnValue = MConstant::New(alloc(), UndefinedValue());
  current->add(returnValue);
  current->initSlot(info().returnValueSlot(), returnValue);

  // Initialize the arguments object slot to undefined if necessary.
  if (info().hasArguments()) {
    MInstruction* argsObj = MConstant::New(alloc(), UndefinedValue());
    current->add(argsObj);
    current->initSlot(info().argsObjSlot(), argsObj);
  }

  // Emit the start instruction, so we can begin real instructions.
  current->add(MStart::New(alloc()));

  // Guard against over-recursion. Do this before we start unboxing, since
  // this will create an OSI point that will read the incoming argument
  // values, which is nice to do before their last real use, to minimize
  // register/stack pressure.
  MCheckOverRecursed* check = MCheckOverRecursed::New(alloc());
  current->add(check);
  MResumePoint* entryRpCopy =
      MResumePoint::Copy(alloc(), current->entryResumePoint());
  if (!entryRpCopy) {
    return abort(AbortReason::Alloc);
  }
  check->setResumePoint(entryRpCopy);

  // Parameters have been checked to correspond to the typeset, now we unbox
  // what we can in an infallible manner.
  MOZ_TRY(rewriteParameters());

  // It's safe to start emitting actual IR, so now build the env chain.
  MOZ_TRY(initEnvironmentChain());
  if (info().needsArgsObj()) {
    initArgumentsObject();
  }

  // The type analysis phase attempts to insert unbox operations near
  // definitions of values. It also attempts to replace uses in resume points
  // with the narrower, unboxed variants. However, we must prevent this
  // replacement from happening on values in the entry snapshot. Otherwise we
  // could get this:
  //
  //       v0 = MParameter(0)
  //       v1 = MParameter(1)
  //       --   ResumePoint(v2, v3)
  //       v2 = Unbox(v0, INT32)
  //       v3 = Unbox(v1, INT32)
  //
  // So we attach the initial resume point to each parameter, which the type
  // analysis explicitly checks (this is the same mechanism used for
  // effectful operations).
  for (uint32_t i = 0; i < info().endArgSlot(); i++) {
    MInstruction* ins = current->getEntrySlot(i)->toInstruction();
    if (ins->type() != MIRType::Value) {
      continue;
    }

    MResumePoint* entryRpCopy =
        MResumePoint::Copy(alloc(), current->entryResumePoint());
    if (!entryRpCopy) {
      return abort(AbortReason::Alloc);
    }
    ins->setResumePoint(entryRpCopy);
  }

#ifdef DEBUG
  // lazyArguments should never be accessed in |argsObjAliasesFormals| scripts.
  if (info().hasArguments() && !info().argsObjAliasesFormals()) {
    hasLazyArguments_ = true;
  }
#endif

  insertRecompileCheck(pc);

  auto clearLastPriorResumePoint = mozilla::MakeScopeExit([&] {
    // Discard unreferenced & pre-allocated resume points.
    replaceMaybeFallbackFunctionGetter(nullptr);
  });

  MOZ_TRY(traverseBytecode());

  if (isHighestOptimizationLevel() &&
      inlinedBytecodeLength_ > script_->jitScript()->inlinedBytecodeLength()) {
    script_->jitScript()->setInlinedBytecodeLength(inlinedBytecodeLength_);
  }

  MOZ_TRY(maybeAddOsrTypeBarriers());

  if (!MPhi::markIteratorPhis(iterators_)) {
    return abort(AbortReason::Alloc);
  }

  if (!info().isAnalysis() && !abortedPreliminaryGroups().empty()) {
    return abort(AbortReason::PreliminaryObjects);
  }

  MOZ_ASSERT(loopDepth_ == 0);
  MOZ_ASSERT(loopStack_.empty());
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::buildInline(IonBuilder* callerBuilder,
                                          MResumePoint* callerResumePoint,
                                          CallInfo& callInfo) {
  inlineCallInfo_ = &callInfo;

  // Spew IC info for inlined script, but only when actually compiling,
  // not when analyzing it.
#ifdef JS_STRUCTURED_SPEW
  if (!info().isAnalysis()) {
    JitSpewBaselineICStats(script(), "To-Be-Inlined");
  }
#endif

  MOZ_TRY(init());

  JitSpew(JitSpew_IonScripts, "Inlining script %s:%u:%u (%p)",
          script()->filename(), script()->lineno(), script()->column(),
          (void*)script());

  callerBuilder_ = callerBuilder;
  callerResumePoint_ = callerResumePoint;

  if (callerBuilder->failedBoundsCheck_) {
    failedBoundsCheck_ = true;
  }

  if (callerBuilder->failedShapeGuard_) {
    failedShapeGuard_ = true;
  }

  if (callerBuilder->failedLexicalCheck_) {
    failedLexicalCheck_ = true;
  }

  // Generate single entrance block.
  MBasicBlock* entry;
  MOZ_TRY_VAR(entry, newBlock(info().firstStackSlot(), pc));
  MOZ_TRY(setCurrentAndSpecializePhis(entry));

  current->setCallerResumePoint(callerResumePoint);

  // Connect the entrance block to the last block in the caller's graph.
  MBasicBlock* predecessor = callerBuilder->current;
  MOZ_ASSERT(predecessor == callerResumePoint->block());

  predecessor->end(MGoto::New(alloc(), current));
  if (!current->addPredecessorWithoutPhis(predecessor)) {
    return abort(AbortReason::Alloc);
  }

  // Initialize env chain slot to Undefined.  It's set later by
  // |initEnvironmentChain|.
  MInstruction* env = MConstant::New(alloc(), UndefinedValue());
  current->add(env);
  current->initSlot(info().environmentChainSlot(), env);

  // Initialize |return value| slot.
  MInstruction* returnValue = MConstant::New(alloc(), UndefinedValue());
  current->add(returnValue);
  current->initSlot(info().returnValueSlot(), returnValue);

  // Initialize |arguments| slot.
  if (info().hasArguments()) {
    MInstruction* argsObj = MConstant::New(alloc(), UndefinedValue());
    current->add(argsObj);
    current->initSlot(info().argsObjSlot(), argsObj);
  }

  // Initialize |this| slot.
  current->initSlot(info().thisSlot(), callInfo.thisArg());

  JitSpew(JitSpew_Inlining, "Initializing %u arg slots", info().nargs());

  // NB: Ion does not inline functions which |needsArgsObj|.  So using argSlot()
  // instead of argSlotUnchecked() below is OK
  MOZ_ASSERT(!info().needsArgsObj());

  // Initialize actually set arguments.
  uint32_t existing_args = std::min<uint32_t>(callInfo.argc(), info().nargs());
  for (size_t i = 0; i < existing_args; ++i) {
    MDefinition* arg = callInfo.getArg(i);
    current->initSlot(info().argSlot(i), arg);
  }

  // Pass Undefined for missing arguments
  for (size_t i = callInfo.argc(); i < info().nargs(); ++i) {
    MConstant* arg = MConstant::New(alloc(), UndefinedValue());
    current->add(arg);
    current->initSlot(info().argSlot(i), arg);
  }

  JitSpew(JitSpew_Inlining, "Initializing %u locals", info().nlocals());

  initLocals();

  JitSpew(JitSpew_Inlining,
          "Inline entry block MResumePoint %p, %u stack slots",
          (void*)current->entryResumePoint(),
          current->entryResumePoint()->stackDepth());

  // +2 for the env chain and |this|, maybe another +1 for arguments object
  // slot.
  MOZ_ASSERT(current->entryResumePoint()->stackDepth() == info().totalSlots());

#ifdef DEBUG
  if (script_->argumentsHasVarBinding()) {
    hasLazyArguments_ = true;
  }
#endif

  insertRecompileCheck(pc);

  // Initialize the env chain now that all resume points operands are
  // initialized.
  MOZ_TRY(initEnvironmentChain(callInfo.fun()));

  auto clearLastPriorResumePoint = mozilla::MakeScopeExit([&] {
    // Discard unreferenced & pre-allocated resume points.
    replaceMaybeFallbackFunctionGetter(nullptr);
  });

  MOZ_TRY(traverseBytecode());

  MOZ_ASSERT(iterators_.empty(), "Iterators should be added to outer builder");

  if (!info().isAnalysis() && !abortedPreliminaryGroups().empty()) {
    return abort(AbortReason::PreliminaryObjects);
  }

  return Ok();
}

void IonBuilder::rewriteParameter(uint32_t slotIdx, MDefinition* param) {
  MOZ_ASSERT(param->isParameter() || param->isGetArgumentsObjectArg());

  TemporaryTypeSet* types = param->resultTypeSet();
  MDefinition* actual = ensureDefiniteType(param, types->getKnownMIRType());
  if (actual == param) {
    return;
  }

  // Careful! We leave the original MParameter in the entry resume point. The
  // arguments still need to be checked unless proven otherwise at the call
  // site, and these checks can bailout. We can end up:
  //   v0 = Parameter(0)
  //   v1 = Unbox(v0, INT32)
  //   --   ResumePoint(v0)
  //
  // As usual, it would be invalid for v1 to be captured in the initial
  // resume point, rather than v0.
  current->rewriteSlot(slotIdx, actual);
}

// Apply Type Inference information to parameters early on, unboxing them if
// they have a definitive type. The actual guards will be emitted by the code
// generator, explicitly, as part of the function prologue.
AbortReasonOr<Ok> IonBuilder::rewriteParameters() {
  MOZ_ASSERT(info().environmentChainSlot() == 0);

  // If this JSScript is not the code of a function, then skip the
  // initialization of function parameters.
  if (!info().funMaybeLazy()) {
    return Ok();
  }

  for (uint32_t i = info().startArgSlot(); i < info().endArgSlot(); i++) {
    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }
    MDefinition* param = current->getSlot(i);
    rewriteParameter(i, param);
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::initParameters() {
  // If this JSScript is not the code of a function, then skip the
  // initialization of function parameters.
  if (!info().funMaybeLazy()) {
    return Ok();
  }

  // If we are doing OSR on a frame which initially executed in the
  // interpreter and didn't accumulate type information, try to use that OSR
  // frame to determine possible initial types for 'this' and parameters.

  if (thisTypes->empty() && baselineFrame_) {
    TypeSet::Type type = baselineFrame_->thisType;
    if (type.isSingletonUnchecked()) {
      checkNurseryObject(type.singleton());
    }
    thisTypes->addType(type, alloc_->lifoAlloc());
  }

  MParameter* param =
      MParameter::New(alloc(), MParameter::THIS_SLOT, thisTypes);
  current->add(param);
  current->initSlot(info().thisSlot(), param);

  for (uint32_t i = 0; i < info().nargs(); i++) {
    TemporaryTypeSet* types = &argTypes[i];
    if (types->empty() && baselineFrame_ &&
        !script_->jitScript()->modifiesArguments()) {
      TypeSet::Type type = baselineFrame_->argTypes[i];
      if (type.isSingletonUnchecked()) {
        checkNurseryObject(type.singleton());
      }
      types->addType(type, alloc_->lifoAlloc());
    }

    param = MParameter::New(alloc().fallible(), i, types);
    if (!param) {
      return abort(AbortReason::Alloc);
    }
    current->add(param);
    current->initSlot(info().argSlotUnchecked(i), param);
  }

  return Ok();
}

void IonBuilder::initLocals() {
  // Initialize all frame slots to undefined. Lexical bindings are temporal
  // dead zoned in bytecode.

  if (info().nlocals() == 0) {
    return;
  }

  MConstant* undef = MConstant::New(alloc(), UndefinedValue());
  current->add(undef);

  for (uint32_t i = 0; i < info().nlocals(); i++) {
    current->initSlot(info().localSlot(i), undef);
  }
}

bool IonBuilder::usesEnvironmentChain() {
  return script()->jitScript()->usesEnvironmentChain();
}

AbortReasonOr<Ok> IonBuilder::initEnvironmentChain(MDefinition* callee) {
  MInstruction* env = nullptr;

  // If the script doesn't use the envchain, then it's already initialized
  // from earlier.  However, always make a env chain when |needsArgsObj| is true
  // for the script, since arguments object construction requires the env chain
  // to be passed in.
  if (!info().needsArgsObj() && !usesEnvironmentChain()) {
    return Ok();
  }

  // The env chain is only tracked in scripts that have NAME opcodes which
  // will try to access the env. For other scripts, the env instructions
  // will be held live by resume points and code will still be generated for
  // them, so just use a constant undefined value.

  if (JSFunction* fun = info().funMaybeLazy()) {
    if (!callee) {
      MCallee* calleeIns = MCallee::New(alloc());
      current->add(calleeIns);
      callee = calleeIns;
    }
    env = MFunctionEnvironment::New(alloc(), callee);
    current->add(env);

    // This reproduce what is done in CallObject::createForFunction. Skip
    // this for the arguments analysis, as the script might not have a
    // baseline script with template objects yet.
    if (fun->needsSomeEnvironmentObject() &&
        info().analysisMode() != Analysis_ArgumentsUsage) {
      if (fun->needsNamedLambdaEnvironment()) {
        env = createNamedLambdaObject(callee, env);
      }

      // TODO: Parameter expression-induced extra var environment not
      // yet handled.
      if (fun->needsExtraBodyVarEnvironment()) {
        return abort(AbortReason::Disable, "Extra var environment unsupported");
      }

      if (fun->needsCallObject()) {
        MOZ_TRY_VAR(env, createCallObject(callee, env));
      }
    }
  } else if (ModuleObject* module = info().module()) {
    // Modules use a pre-created env object.
    env = constant(ObjectValue(module->initialEnvironment()));
  } else {
    // For global scripts without a non-syntactic global scope, the env
    // chain is the global lexical env.
    MOZ_ASSERT(!script()->isForEval());
    MOZ_ASSERT(!script()->hasNonSyntacticScope());
    env = constant(ObjectValue(script()->global().lexicalEnvironment()));
  }

  // Update the environment slot from UndefinedValue only after initial
  // environment is created so that bailout doesn't see a partial env.
  // See: |InitFromBailout|
  current->setEnvironmentChain(env);
  return Ok();
}

void IonBuilder::initArgumentsObject() {
  JitSpew(JitSpew_IonMIR,
          "%s:%u:%u - Emitting code to initialize arguments object! block=%p",
          script()->filename(), script()->lineno(), script()->column(),
          current);
  MOZ_ASSERT(info().needsArgsObj());

  bool mapped = script()->hasMappedArgsObj();
  ArgumentsObject* templateObj =
      script()->realm()->maybeArgumentsTemplateObject(mapped);

  MCreateArgumentsObject* argsObj = MCreateArgumentsObject::New(
      alloc(), current->environmentChain(), templateObj);
  current->add(argsObj);
  current->setArgumentsObject(argsObj);
}

AbortReasonOr<Ok> IonBuilder::addOsrValueTypeBarrier(
    uint32_t slot, MInstruction** def_, MIRType type,
    TemporaryTypeSet* typeSet) {
  MInstruction*& def = *def_;
  MBasicBlock* osrBlock = def->block();

  // Clear bogus type information added in newOsrPreheader().
  def->setResultType(MIRType::Value);
  def->setResultTypeSet(nullptr);

  if (typeSet && !typeSet->unknown()) {
    MInstruction* barrier = MTypeBarrier::New(alloc(), def, typeSet);
    osrBlock->insertBefore(osrBlock->lastIns(), barrier);
    osrBlock->rewriteSlot(slot, barrier);
    def = barrier;

    // If the TypeSet is more precise than |type|, adjust |type| for the
    // code below.
    if (type == MIRType::Value) {
      type = barrier->type();
    }
  } else if (type == MIRType::Null || type == MIRType::Undefined ||
             type == MIRType::MagicOptimizedArguments) {
    // No unbox instruction will be added below, so check the type by
    // adding a type barrier for a singleton type set.
    TypeSet::Type ntype = TypeSet::PrimitiveType(type);
    LifoAlloc* lifoAlloc = alloc().lifoAlloc();
    typeSet = lifoAlloc->new_<TemporaryTypeSet>(lifoAlloc, ntype);
    if (!typeSet) {
      return abort(AbortReason::Alloc);
    }
    MInstruction* barrier = MTypeBarrier::New(alloc(), def, typeSet);
    osrBlock->insertBefore(osrBlock->lastIns(), barrier);
    osrBlock->rewriteSlot(slot, barrier);
    def = barrier;
  }

  // The following guards aren't directly linked into the usedef chain,
  // however in the OSR block we need to ensure they're not optimized out, so we
  // mark them as implicitly used.
  switch (type) {
    case MIRType::Null:
    case MIRType::Undefined:
    case MIRType::MagicOptimizedArguments:
      def->setImplicitlyUsed();
      break;
    default:
      break;
  }

  // Unbox the OSR value to the type expected by the loop header.
  //
  // The only specialized types that can show up here are MIRTypes with a
  // corresponding TypeSet::Type because NewBaselineFrameInspector and
  // newPendingLoopHeader use TypeSet::Type for Values from the BaselineFrame.
  // This means magic values other than MagicOptimizedArguments are represented
  // as UnknownType() and MIRType::Value. See also TypeSet::IsUntrackedValue.
  switch (type) {
    case MIRType::Boolean:
    case MIRType::Int32:
    case MIRType::Double:
    case MIRType::String:
    case MIRType::Symbol:
    case MIRType::BigInt:
    case MIRType::Object:
      if (type != def->type()) {
        MUnbox* unbox = MUnbox::New(alloc(), def, type, MUnbox::Fallible);
        osrBlock->insertBefore(osrBlock->lastIns(), unbox);
        osrBlock->rewriteSlot(slot, unbox);
        def = unbox;
      }
      break;

    case MIRType::Value:
      // Nothing to do.
      break;

    case MIRType::Null: {
      MConstant* c = MConstant::New(alloc(), NullValue());
      osrBlock->insertBefore(osrBlock->lastIns(), c);
      osrBlock->rewriteSlot(slot, c);
      def = c;
      break;
    }

    case MIRType::Undefined: {
      MConstant* c = MConstant::New(alloc(), UndefinedValue());
      osrBlock->insertBefore(osrBlock->lastIns(), c);
      osrBlock->rewriteSlot(slot, c);
      def = c;
      break;
    }

    case MIRType::MagicOptimizedArguments: {
      MOZ_ASSERT(hasLazyArguments_);
      MConstant* lazyArg =
          MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS));
      osrBlock->insertBefore(osrBlock->lastIns(), lazyArg);
      osrBlock->rewriteSlot(slot, lazyArg);
      def = lazyArg;
      break;
    }

    default:
      MOZ_CRASH("Unexpected type");
  }

  MOZ_ASSERT(def == osrBlock->getSlot(slot));
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::maybeAddOsrTypeBarriers() {
  if (!info().osrPc()) {
    return Ok();
  }

  // The loop has successfully been processed, and the loop header phis
  // have their final type. Add unboxes and type barriers in the OSR
  // block to check that the values have the appropriate type, and update
  // the types in the preheader.

  MBasicBlock* osrBlock = graph().osrBlock();
  if (!osrBlock) {
    // Because IonBuilder does not compile catch blocks, it's possible to
    // end up without an OSR block if the OSR pc is only reachable via a
    // break-statement inside the catch block. For instance:
    //
    //   for (;;) {
    //       try {
    //           throw 3;
    //       } catch(e) {
    //           break;
    //       }
    //   }
    //   while (..) { } // <= OSR here, only reachable via catch block.
    //
    // For now we just abort in this case.
    MOZ_ASSERT(graph().hasTryBlock());
    return abort(AbortReason::Disable,
                 "OSR block only reachable through catch block");
  }

  MBasicBlock* preheader = osrBlock->getSuccessor(0);
  MBasicBlock* header = preheader->getSuccessor(0);
  static const size_t OSR_PHI_POSITION = 1;
  MOZ_ASSERT(preheader->getPredecessor(OSR_PHI_POSITION) == osrBlock);

  MResumePoint* headerRp = header->entryResumePoint();
  size_t stackDepth = headerRp->stackDepth();
  MOZ_ASSERT(stackDepth == osrBlock->stackDepth());
  for (uint32_t slot = info().startArgSlot(); slot < stackDepth; slot++) {
    // Aliased slots are never accessed, since they need to go through
    // the callobject. The typebarriers are added there and can be
    // discarded here.
    if (info().isSlotAliased(slot)) {
      continue;
    }

    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }

    MInstruction* def = osrBlock->getSlot(slot)->toInstruction();
    MPhi* preheaderPhi = preheader->getSlot(slot)->toPhi();
    MPhi* headerPhi = headerRp->getOperand(slot)->toPhi();

    MIRType type = headerPhi->type();
    TemporaryTypeSet* typeSet = headerPhi->resultTypeSet();

    MOZ_TRY(addOsrValueTypeBarrier(slot, &def, type, typeSet));

    preheaderPhi->replaceOperand(OSR_PHI_POSITION, def);
    preheaderPhi->setResultType(type);
    preheaderPhi->setResultTypeSet(typeSet);
  }

  return Ok();
}

#ifdef DEBUG
// In debug builds, after compiling a bytecode op, this class is used to check
// that all values popped by this opcode either:
//
//   (1) Have the ImplicitlyUsed flag set on them.
//   (2) Have more uses than before compiling this op (the value is
//       used as operand of a new MIR instruction).
//
// This is used to catch problems where IonBuilder pops a value without
// adding any SSA uses and doesn't call setImplicitlyUsedUnchecked on it.
class MOZ_RAII PoppedValueUseChecker {
  Vector<MDefinition*, 4, SystemAllocPolicy> popped_;
  Vector<size_t, 4, SystemAllocPolicy> poppedUses_;
  MBasicBlock* current_;
  jsbytecode* pc_;

 public:
  PoppedValueUseChecker(MBasicBlock* current, jsbytecode* pc)
      : current_(current), pc_(pc) {}

  MOZ_MUST_USE bool init() {
    unsigned nuses = GetUseCount(pc_);

    for (unsigned i = 0; i < nuses; i++) {
      MDefinition* def = current_->peek(-int32_t(i + 1));
      if (!popped_.append(def) || !poppedUses_.append(def->defUseCount())) {
        return false;
      }
    }

    return true;
  }

  void checkAfterOp() {
    JSOp op = JSOp(*pc_);

    // Don't require SSA uses for values popped by these ops.
    switch (op) {
      case JSOp::Pop:
      case JSOp::PopN:
      case JSOp::DupAt:
      case JSOp::Dup:
      case JSOp::Dup2:
      case JSOp::Pick:
      case JSOp::Unpick:
      case JSOp::Swap:
      case JSOp::SetArg:
      case JSOp::SetLocal:
      case JSOp::InitLexical:
      case JSOp::SetRval:
      case JSOp::Void:
        // Basic stack/local/argument management opcodes.
        return;

      case JSOp::Case:
      case JSOp::Default:
        // These ops have to pop the switch value when branching but don't
        // actually use it.
        return;

      default:
        break;
    }

    for (size_t i = 0; i < popped_.length(); i++) {
      switch (op) {
        case JSOp::Pos:
        case JSOp::ToNumeric:
        case JSOp::ToId:
        case JSOp::ToString:
          // These ops may leave their input on the stack without setting
          // the ImplicitlyUsed flag. If this value will be popped immediately,
          // we may replace it with |undefined|, but the difference is
          // not observable.
          MOZ_ASSERT(i == 0);
          if (current_->peek(-1) == popped_[0]) {
            break;
          }
          [[fallthrough]];

        default:
          MOZ_ASSERT(popped_[i]->isImplicitlyUsed() ||
                     // First value popped by JSOp::EndIter is not used at all,
                     // it's similar to JSOp::Pop above.
                     (op == JSOp::EndIter && i == 0) ||
                     popped_[i]->defUseCount() > poppedUses_[i]);
          break;
      }
    }
  }
};
#endif

AbortReasonOr<Ok> IonBuilder::traverseBytecode() {
  // See the "Control Flow handling in IonBuilder" comment in IonBuilder.h for
  // more information.

  MOZ_TRY(startTraversingBlock(current));

  const jsbytecode* const codeEnd = script()->codeEnd();

  while (true) {
    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }

    // Skip unreachable ops (for example code after a 'return' or 'throw') until
    // we get to the next jump target.
    if (hasTerminatedBlock()) {
      while (!BytecodeIsJumpTarget(JSOp(*pc))) {
        // Finish any "broken" loops with an unreachable backedge. For example:
        //
        //   do {
        //     ...
        //     return;
        //     ...
        //   } while (x);
        //
        // This loop never actually loops.
        if (!loopStack_.empty() &&
            IsBackedgeForLoopHead(pc, loopStack_.back().header()->pc())) {
          MOZ_ASSERT(loopDepth_ > 0);
          loopDepth_--;
          loopStack_.popBack();
        }

        pc = GetNextPc(pc);
        if (pc == codeEnd) {
          return Ok();
        }
      }
    }

#ifdef DEBUG
    PoppedValueUseChecker useChecker(current, pc);
    if (!useChecker.init()) {
      return abort(AbortReason::Alloc);
    }
#endif

    MOZ_ASSERT(script()->containsPC(pc));
    nextpc = GetNextPc(pc);

    // Nothing in inspectOpcode() is allowed to advance the pc.
    JSOp op = JSOp(*pc);
    bool restarted = false;
    MOZ_TRY(inspectOpcode(op, &restarted));

#ifdef DEBUG
    if (!restarted) {
      useChecker.checkAfterOp();
    }
#endif

    if (nextpc == codeEnd) {
      return Ok();
    }

    pc = nextpc;
    MOZ_ASSERT(script()->containsPC(pc));

    if (!hasTerminatedBlock()) {
      current->updateTrackedSite(bytecodeSite(pc));
    }
  }

  // The iloop above never breaks, so this point is unreachable.  Don't add code
  // here, or you'll trigger compile errors about unreachable code with some
  // compilers!
}

AbortReasonOr<Ok> IonBuilder::startTraversingBlock(MBasicBlock* block) {
  block->setLoopDepth(loopDepth_);

  if (block->pc() && script()->hasScriptCounts()) {
    block->setHitCount(script()->getHitCount(block->pc()));
  }

  // Optimization to move a predecessor that only has this block as successor
  // just before this block.  Skip this optimization if the previous block is
  // not part of the same function, as we might have to backtrack on inlining
  // failures.
  if (block->numPredecessors() == 1 &&
      block->getPredecessor(0)->numSuccessors() == 1 &&
      !block->getPredecessor(0)->outerResumePoint()) {
    graph().removeBlockFromList(block->getPredecessor(0));
    graph().addBlock(block->getPredecessor(0));
  }

  MOZ_TRY(setCurrentAndSpecializePhis(block));

  graph().addBlock(block);

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::jsop_goto(bool* restarted) {
  MOZ_ASSERT(JSOp(*pc) == JSOp::Goto);

  if (IsBackedgePC(pc)) {
    return visitBackEdge(restarted);
  }

  jsbytecode* target = pc + GET_JUMP_OFFSET(pc);
  return visitGoto(target);
}

AbortReasonOr<Ok> IonBuilder::addPendingEdge(const PendingEdge& edge,
                                             jsbytecode* target) {
  PendingEdgesMap::AddPtr p = pendingEdges_.lookupForAdd(target);
  if (p) {
    if (!p->value().append(edge)) {
      return abort(AbortReason::Alloc);
    }
    return Ok();
  }

  PendingEdges edges;
  static_assert(PendingEdges::InlineLength >= 1,
                "Appending one element should be infallible");
  MOZ_ALWAYS_TRUE(edges.append(edge));

  if (!pendingEdges_.add(p, target, std::move(edges))) {
    return abort(AbortReason::Alloc);
  }
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitGoto(jsbytecode* target) {
  current->end(MGoto::New(alloc(), nullptr));
  MOZ_TRY(addPendingEdge(PendingEdge::NewGoto(current), target));
  setTerminatedBlock();
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::jsop_loophead() {
  // All loops have the following bytecode structure:
  //
  //    LoopHead
  //    ...
  //    IfNe/Goto to LoopHead

  MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead);

  if (hasTerminatedBlock()) {
    // The whole loop is unreachable.
    return Ok();
  }

  bool osr = pc == info().osrPc();
  if (osr) {
    MBasicBlock* preheader;
    MOZ_TRY_VAR(preheader, newOsrPreheader(current, pc));
    current->end(MGoto::New(alloc(), preheader));
    MOZ_TRY(setCurrentAndSpecializePhis(preheader));
  }

  loopDepth_++;
  MBasicBlock* header;
  MOZ_TRY_VAR(header, newPendingLoopHeader(current, pc, osr));
  current->end(MGoto::New(alloc(), header));

  if (!loopStack_.emplaceBack(header)) {
    return abort(AbortReason::Alloc);
  }

  MOZ_TRY(analyzeNewLoopTypes(header));

  MOZ_TRY(startTraversingBlock(header));
  return emitLoopHeadInstructions(pc);
}

AbortReasonOr<Ok> IonBuilder::visitBackEdge(bool* restarted) {
  MOZ_ASSERT(loopDepth_ > 0);
  loopDepth_--;

  MBasicBlock* header = loopStack_.back().header();
  current->end(MGoto::New(alloc(), header));

  // Compute phis in the loop header and propagate them throughout the loop,
  // including the successor.
  AbortReason r = header->setBackedge(alloc(), current);
  switch (r) {
    case AbortReason::NoAbort:
      loopStack_.popBack();
      setTerminatedBlock();
      return Ok();

    case AbortReason::Disable:
      // If there are types for variables on the backedge that were not
      // present at the original loop header, then uses of the variables'
      // phis may have generated incorrect nodes. The new types have been
      // incorporated into the header phis, so remove all blocks for the
      // loop body and restart with the new types.
      *restarted = true;
      MOZ_TRY(restartLoop(header));
      return Ok();

    default:
      return abort(r);
  }
}

AbortReasonOr<Ok> IonBuilder::emitLoopHeadInstructions(jsbytecode* pc) {
  MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead);

  MInterruptCheck* check = MInterruptCheck::New(alloc());
  current->add(check);
  insertRecompileCheck(pc);

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::inspectOpcode(JSOp op, bool* restarted) {
  // Add not yet implemented opcodes at the bottom of the switch!
  switch (op) {
    case JSOp::NopDestructuring:
    case JSOp::TryDestructuring:
    case JSOp::Lineno:
    case JSOp::Nop:
      return Ok();

    case JSOp::LoopHead:
      return jsop_loophead();

    case JSOp::Undefined:
      // If this ever changes, change what JSOp::GImplicitThis does too.
      pushConstant(UndefinedValue());
      return Ok();

    case JSOp::Try:
      return visitTry();

    case JSOp::Default:
      current->pop();
      return visitGoto(pc + GET_JUMP_OFFSET(pc));

    case JSOp::Goto:
      return jsop_goto(restarted);

    case JSOp::IfNe:
    case JSOp::IfEq:
    case JSOp::And:
    case JSOp::Or:
    case JSOp::Case:
      return visitTest(op, restarted);

    case JSOp::Coalesce:
      return jsop_coalesce();

    case JSOp::Return:
    case JSOp::RetRval:
      return visitReturn(op);

    case JSOp::Throw:
      return visitThrow();

    case JSOp::JumpTarget:
      return visitJumpTarget(op);

    case JSOp::TableSwitch:
      return visitTableSwitch();

    case JSOp::BitNot:
      return jsop_bitnot();

    case JSOp::BitAnd:
    case JSOp::BitOr:
    case JSOp::BitXor:
    case JSOp::Lsh:
    case JSOp::Rsh:
    case JSOp::Ursh:
      return jsop_bitop(op);

    case JSOp::Add:
    case JSOp::Sub:
    case JSOp::Mul:
    case JSOp::Div:
    case JSOp::Mod:
      return jsop_binary_arith(op);

    case JSOp::Pow:
      return jsop_pow();

    case JSOp::Pos:
      return jsop_pos();

    case JSOp::ToNumeric:
      return jsop_tonumeric();

    case JSOp::Neg:
      return jsop_neg();

    case JSOp::Inc:
    case JSOp::Dec:
      return jsop_inc_or_dec(op);

    case JSOp::ToString:
      return jsop_tostring();

    case JSOp::DefVar:
      return jsop_defvar();

    case JSOp::DefLet:
    case JSOp::DefConst:
      return jsop_deflexical();

    case JSOp::DefFun:
      return jsop_deffun();

    case JSOp::CheckGlobalOrEvalDecl:
      return jsop_checkGlobalOrEvalDecl();

    case JSOp::Eq:
    case JSOp::Ne:
    case JSOp::StrictEq:
    case JSOp::StrictNe:
    case JSOp::Lt:
    case JSOp::Le:
    case JSOp::Gt:
    case JSOp::Ge:
      return jsop_compare(op);

    case JSOp::Double:
      pushConstant(GET_INLINE_VALUE(pc));
      return Ok();

    case JSOp::BigInt:
      pushConstant(BigIntValue(info().getBigInt(pc)));
      return Ok();

    case JSOp::String:
      pushConstant(StringValue(info().getAtom(pc)));
      return Ok();

    case JSOp::Symbol: {
      unsigned which = GET_UINT8(pc);
      JS::Symbol* sym = realm->runtime()->wellKnownSymbols().get(which);
      pushConstant(SymbolValue(sym));
      return Ok();
    }

    case JSOp::Zero:
      pushConstant(Int32Value(0));
      return Ok();

    case JSOp::One:
      pushConstant(Int32Value(1));
      return Ok();

    case JSOp::Null:
      pushConstant(NullValue());
      return Ok();

    case JSOp::Void:
      current->pop();
      pushConstant(UndefinedValue());
      return Ok();

    case JSOp::Hole:
      pushConstant(MagicValue(JS_ELEMENTS_HOLE));
      return Ok();

    case JSOp::False:
      pushConstant(BooleanValue(false));
      return Ok();

    case JSOp::True:
      pushConstant(BooleanValue(true));
      return Ok();

    case JSOp::Arguments:
      return jsop_arguments();

    case JSOp::Rest:
      return jsop_rest();

    case JSOp::GetArg:
      return jsop_getarg(GET_ARGNO(pc));

    case JSOp::SetArg:
      return jsop_setarg(GET_ARGNO(pc));

    case JSOp::GetLocal:
      current->pushLocal(GET_LOCALNO(pc));
      return Ok();

    case JSOp::SetLocal:
      current->setLocal(GET_LOCALNO(pc));
      return Ok();

    case JSOp::ThrowSetConst:
      return jsop_throwsetconst();

    case JSOp::CheckLexical:
    case JSOp::CheckAliasedLexical:
      return jsop_checklexical();

    case JSOp::InitLexical:
      current->setLocal(GET_LOCALNO(pc));
      return Ok();

    case JSOp::InitGLexical: {
      MOZ_ASSERT(!script()->hasNonSyntacticScope());
      MDefinition* value = current->pop();
      current->push(
          constant(ObjectValue(script()->global().lexicalEnvironment())));
      current->push(value);
      return jsop_setprop(info().getAtom(pc)->asPropertyName());
    }

    case JSOp::InitAliasedLexical:
      return jsop_setaliasedvar(EnvironmentCoordinate(pc));

    case JSOp::Uninitialized:
      pushConstant(MagicValue(JS_UNINITIALIZED_LEXICAL));
      return Ok();

    case JSOp::Pop: {
      MDefinition* def = current->pop();

      // Pop opcodes frequently appear where values are killed, e.g. after
      // SET* opcodes. Place a resume point afterwards to avoid capturing
      // the dead value in later snapshots, except in places where that
      // resume point is obviously unnecessary.
      if (JSOp(pc[JSOpLength_Pop]) == JSOp::Pop) {
        return Ok();
      }
      if (def->isConstant()) {
        return Ok();
      }
      return maybeInsertResume();
    }

    case JSOp::PopN:
      for (uint32_t i = 0, n = GET_UINT16(pc); i < n; i++) {
        current->pop();
      }
      return Ok();

    case JSOp::DupAt:
      current->pushSlot(current->stackDepth() - 1 - GET_UINT24(pc));
      return Ok();

    case JSOp::NewArray:
      return jsop_newarray(GET_UINT32(pc));

    case JSOp::NewArrayCopyOnWrite:
      return jsop_newarray_copyonwrite();

    case JSOp::NewInit:
    case JSOp::NewObject:
    case JSOp::NewObjectWithGroup:
      return jsop_newobject();

    case JSOp::InitElem:
    case JSOp::InitHiddenElem:
      return jsop_initelem();

    case JSOp::InitElemInc:
      return jsop_initelem_inc();

    case JSOp::InitElemArray:
      return jsop_initelem_array();

    case JSOp::InitProp:
    case JSOp::InitLockedProp:
    case JSOp::InitHiddenProp: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_initprop(name);
    }

    case JSOp::MutateProto: {
      return jsop_mutateproto();
    }

    case JSOp::InitPropGetter:
    case JSOp::InitHiddenPropGetter:
    case JSOp::InitPropSetter:
    case JSOp::InitHiddenPropSetter: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_initprop_getter_setter(name);
    }

    case JSOp::InitElemGetter:
    case JSOp::InitHiddenElemGetter:
    case JSOp::InitElemSetter:
    case JSOp::InitHiddenElemSetter:
      return jsop_initelem_getter_setter();

    case JSOp::FunCall:
      return jsop_funcall(GET_ARGC(pc));

    case JSOp::FunApply:
      return jsop_funapply(GET_ARGC(pc));

    case JSOp::SpreadCall:
      return jsop_spreadcall();

    case JSOp::SpreadNew:
    case JSOp::SpreadSuperCall:
      return jsop_spreadnew();

    case JSOp::Call:
    case JSOp::CallIgnoresRv:
    case JSOp::CallIter:
    case JSOp::New:
    case JSOp::SuperCall:
      MOZ_TRY(jsop_call(GET_ARGC(pc),
                        JSOp(*pc) == JSOp::New || JSOp(*pc) == JSOp::SuperCall,
                        JSOp(*pc) == JSOp::CallIgnoresRv));
      return Ok();

    case JSOp::Eval:
    case JSOp::StrictEval:
      return jsop_eval(GET_ARGC(pc));

    case JSOp::Int8:
      pushConstant(Int32Value(GET_INT8(pc)));
      return Ok();

    case JSOp::Uint16:
      pushConstant(Int32Value(GET_UINT16(pc)));
      return Ok();

    case JSOp::GetGName: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      if (!script()->hasNonSyntacticScope()) {
        return jsop_getgname(name);
      }
      return jsop_getname(name);
    }

    case JSOp::SetGName:
    case JSOp::StrictSetGName: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      JSObject* obj = nullptr;
      if (!script()->hasNonSyntacticScope()) {
        obj = testGlobalLexicalBinding(name);
      }
      if (obj) {
        return setStaticName(obj, name);
      }
      return jsop_setprop(name);
    }

    case JSOp::GetName: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_getname(name);
    }

    case JSOp::GetIntrinsic: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_intrinsic(name);
    }

    case JSOp::GetImport: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_getimport(name);
    }

    case JSOp::BindGName:
      if (!script()->hasNonSyntacticScope()) {
        if (JSObject* env = testGlobalLexicalBinding(info().getName(pc))) {
          pushConstant(ObjectValue(*env));
          return Ok();
        }
      }
      // Fall through to JSOp::BindName
      [[fallthrough]];
    case JSOp::BindName:
      return jsop_bindname(info().getName(pc));

    case JSOp::BindVar:
      return jsop_bindvar();

    case JSOp::Dup:
      current->pushSlot(current->stackDepth() - 1);
      return Ok();

    case JSOp::Dup2:
      return jsop_dup2();

    case JSOp::Swap:
      current->swapAt(-1);
      return Ok();

    case JSOp::Pick:
      current->pick(-GET_INT8(pc));
      return Ok();

    case JSOp::Unpick:
      current->unpick(-GET_INT8(pc));
      return Ok();

    case JSOp::GetAliasedVar:
      return jsop_getaliasedvar(EnvironmentCoordinate(pc));

    case JSOp::SetAliasedVar:
      return jsop_setaliasedvar(EnvironmentCoordinate(pc));

    case JSOp::Uint24:
    case JSOp::ResumeIndex:
      pushConstant(Int32Value(GET_UINT24(pc)));
      return Ok();

    case JSOp::Int32:
      pushConstant(Int32Value(GET_INT32(pc)));
      return Ok();

    case JSOp::GetElem:
    case JSOp::CallElem:
      MOZ_TRY(jsop_getelem());
      if (op == JSOp::CallElem) {
        MOZ_TRY(improveThisTypesForCall());
      }
      return Ok();

    case JSOp::SetElem:
    case JSOp::StrictSetElem:
      return jsop_setelem();

    case JSOp::Length:
      return jsop_length();

    case JSOp::Not:
      return jsop_not();

    case JSOp::FunctionThis:
      return jsop_functionthis();

    case JSOp::GlobalThis:
      return jsop_globalthis();

    case JSOp::Callee: {
      MDefinition* callee = getCallee();
      current->push(callee);
      return Ok();
    }

    case JSOp::EnvCallee:
      return jsop_envcallee();

    case JSOp::SuperBase:
      return jsop_superbase();

    case JSOp::GetPropSuper: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_getprop_super(name);
    }

    case JSOp::GetElemSuper:
      return jsop_getelem_super();

    case JSOp::GetProp:
    case JSOp::CallProp: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      MOZ_TRY(jsop_getprop(name));
      if (op == JSOp::CallProp) {
        MOZ_TRY(improveThisTypesForCall());
      }
      return Ok();
    }

    case JSOp::SetProp:
    case JSOp::StrictSetProp:
    case JSOp::SetName:
    case JSOp::StrictSetName: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_setprop(name);
    }

    case JSOp::DelProp:
    case JSOp::StrictDelProp: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_delprop(name);
    }

    case JSOp::DelElem:
    case JSOp::StrictDelElem:
      return jsop_delelem();

    case JSOp::RegExp:
      return jsop_regexp(info().getRegExp(pc));

    case JSOp::CallSiteObj:
      pushConstant(ObjectValue(*info().getObject(pc)));
      return Ok();

    case JSOp::Object:
      return jsop_object(info().getObject(pc));

    case JSOp::ClassConstructor:
      return jsop_classconstructor();

    case JSOp::DerivedConstructor:
      return jsop_derivedclassconstructor();

    case JSOp::Typeof:
    case JSOp::TypeofExpr:
      return jsop_typeof();

    case JSOp::ToAsyncIter:
      return jsop_toasynciter();

    case JSOp::ToId:
      return jsop_toid();

    case JSOp::IterNext:
      return jsop_iternext();

    case JSOp::Lambda:
      return jsop_lambda(info().getFunction(pc));

    case JSOp::LambdaArrow:
      return jsop_lambda_arrow(info().getFunction(pc));

    case JSOp::SetFunName:
      return jsop_setfunname(GET_UINT8(pc));

    case JSOp::PushLexicalEnv:
      return jsop_pushlexicalenv(GET_UINT32_INDEX(pc));

    case JSOp::PopLexicalEnv:
      current->setEnvironmentChain(walkEnvironmentChain(1));
      return Ok();

    case JSOp::FreshenLexicalEnv:
      return jsop_copylexicalenv(true);

    case JSOp::RecreateLexicalEnv:
      return jsop_copylexicalenv(false);

    case JSOp::Iter:
      return jsop_iter();

    case JSOp::MoreIter:
      return jsop_itermore();

    case JSOp::IsNoIter:
      return jsop_isnoiter();

    case JSOp::EndIter:
      return jsop_iterend();

    case JSOp::In:
      return jsop_in();

    case JSOp::HasOwn:
      return jsop_hasown();

    case JSOp::SetRval:
      MOZ_ASSERT(!script()->noScriptRval());
      current->setSlot(info().returnValueSlot(), current->pop());
      return Ok();

    case JSOp::Instanceof:
      return jsop_instanceof();

    case JSOp::DebugLeaveLexicalEnv:
      return Ok();

    case JSOp::Debugger:
      return jsop_debugger();

    case JSOp::GImplicitThis:
      if (!script()->hasNonSyntacticScope()) {
        pushConstant(UndefinedValue());
        return Ok();
      }
      // Fallthrough to ImplicitThis in non-syntactic scope case
      [[fallthrough]];
    case JSOp::ImplicitThis: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_implicitthis(name);
    }

    case JSOp::NewTarget:
      return jsop_newtarget();

    case JSOp::CheckIsObj:
      return jsop_checkisobj(GET_UINT8(pc));

    case JSOp::CheckIsCallable:
      return jsop_checkiscallable(GET_UINT8(pc));

    case JSOp::CheckObjCoercible:
      return jsop_checkobjcoercible();

    case JSOp::DebugCheckSelfHosted: {
#ifdef DEBUG
      MDebugCheckSelfHosted* check =
          MDebugCheckSelfHosted::New(alloc(), current->pop());
      current->add(check);
      current->push(check);
      MOZ_TRY(resumeAfter(check));
#endif
      return Ok();
    }

    case JSOp::IsConstructing:
      pushConstant(MagicValue(JS_IS_CONSTRUCTING));
      return Ok();

    case JSOp::OptimizeSpreadCall:
      return jsop_optimize_spreadcall();

    case JSOp::ImportMeta:
      return jsop_importmeta();

    case JSOp::DynamicImport:
      return jsop_dynamic_import();

    case JSOp::InstrumentationActive:
      return jsop_instrumentation_active();

    case JSOp::InstrumentationCallback:
      return jsop_instrumentation_callback();

    case JSOp::InstrumentationScriptId:
      return jsop_instrumentation_scriptid();

    case JSOp::CheckClassHeritage:
      return jsop_checkclassheritage();

    case JSOp::FunWithProto:
      return jsop_funwithproto(info().getFunction(pc));

    case JSOp::ObjWithProto:
      return jsop_objwithproto();

    case JSOp::FunctionProto:
      return jsop_functionproto();

    case JSOp::CheckReturn:
      return jsop_checkreturn();

    case JSOp::CheckThis:
      return jsop_checkthis();

    case JSOp::CheckThisReinit:
      return jsop_checkthisreinit();

    case JSOp::SuperFun:
      return jsop_superfun();

    case JSOp::InitHomeObject:
      return jsop_inithomeobject();

    // ===== NOT Yet Implemented =====
    // Read below!

    // With
    case JSOp::EnterWith:
    case JSOp::LeaveWith:

    // Spread
    case JSOp::SpreadEval:
    case JSOp::StrictSpreadEval:

    // Super
    case JSOp::SetPropSuper:
    case JSOp::SetElemSuper:
    case JSOp::StrictSetPropSuper:
    case JSOp::StrictSetElemSuper:

    // Environments (bug 1366470)
    case JSOp::PushVarEnv:

    // Compound assignment
    case JSOp::GetBoundName:

    // Generators / Async (bug 1317690)
    case JSOp::Exception:
    case JSOp::IsGenClosing:
    case JSOp::InitialYield:
    case JSOp::Yield:
    case JSOp::FinalYieldRval:
    case JSOp::Resume:
    case JSOp::ResumeKind:
    case JSOp::CheckResumeKind:
    case JSOp::AfterYield:
    case JSOp::Await:
    case JSOp::TrySkipAwait:
    case JSOp::Generator:
    case JSOp::AsyncAwait:
    case JSOp::AsyncResolve:

    // Misc
    case JSOp::DelName:
    case JSOp::Finally:
    case JSOp::GetRval:
    case JSOp::Gosub:
    case JSOp::Retsub:
    case JSOp::SetIntrinsic:
    case JSOp::ThrowMsg:
      // === !! WARNING WARNING WARNING !! ===
      // Do you really want to sacrifice performance by not implementing this
      // operation in the optimizing compiler?
      break;

    case JSOp::ForceInterpreter:
      // Intentionally not implemented.
      break;
  }

#ifdef DEBUG
  return abort(AbortReason::Disable, "Unsupported opcode: %s", CodeName(op));
#else
  return abort(AbortReason::Disable, "Unsupported opcode: %d",
               int(uint8_t(op)));
#endif
}

AbortReasonOr<Ok> IonBuilder::restartLoop(MBasicBlock* header) {
  AutoTraceLog logCompile(TraceLoggerForCurrentThread(),
                          TraceLogger_IonBuilderRestartLoop);

  spew("New types at loop header, restarting loop body");

  if (JitOptions.limitScriptSize) {
    if (++numLoopRestarts_ >= MAX_LOOP_RESTARTS) {
      return abort(AbortReason::Disable,
                   "Aborted while processing control flow");
    }
  }

  // Restore slots to entry state.
  size_t stackDepth = header->entryResumePoint()->stackDepth();
  for (size_t slot = 0; slot < stackDepth; slot++) {
    MDefinition* loopDef = header->entryResumePoint()->getOperand(slot);
    header->setSlot(slot, loopDef);
  }

  // Remove phi operands.
  for (MPhiIterator phi = header->phisBegin(); phi != header->phisEnd();
       phi++) {
    phi->removeOperand(phi->numOperands() - 1);
  }

  // Discard unreferenced & pre-allocated resume points.
  replaceMaybeFallbackFunctionGetter(nullptr);

  // Remove all blocks in the loop body other than the header, which has phis
  // of the appropriate type and incoming edges to preserve.
  if (!graph().removeSuccessorBlocks(header)) {
    return abort(AbortReason::Alloc);
  }
  graph().removeBlockFromList(header);

  // Remove all instructions from the header itself, and all resume points
  // except the entry resume point.
  header->discardAllInstructions();
  header->discardAllResumePoints(/* discardEntry = */ false);
  header->setStackDepth(header->getPredecessor(0)->stackDepth());

  loopDepth_ = header->loopDepth();

  // Don't specializePhis(), as the header has been visited before and the
  // phis have already had their type set.
  setCurrent(header);
  graph().addBlock(current);

  jsbytecode* loopHead = header->pc();
  MOZ_ASSERT(JSOp(*loopHead) == JSOp::LoopHead);

  // Since we discarded the header's instructions above, emit them again. This
  // includes the interrupt check.
  MOZ_TRY(emitLoopHeadInstructions(loopHead));
  nextpc = GetNextPc(loopHead);

  // Remove loop header and dead blocks from pendingBlocks.
  for (PendingEdgesMap::Range r = pendingEdges_.all(); !r.empty();
       r.popFront()) {
    PendingEdges& blocks = r.front().value();
    for (size_t i = blocks.length(); i > 0; i--) {
      PendingEdge& block = blocks[i - 1];
      if (block.block() == header || block.block()->isDead()) {
        blocks.erase(&block);
      }
    }
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::replaceTypeSet(MDefinition* subject,
                                             TemporaryTypeSet* type,
                                             MTest* test) {
  if (type->unknown()) {
    return Ok();
  }

  // Don't emit MFilterTypeSet if it doesn't improve the typeset.
  if (subject->resultTypeSet()) {
    if (subject->resultTypeSet()->equals(type)) {
      return Ok();
    }
  } else {
    TemporaryTypeSet oldTypes(alloc_->lifoAlloc(), subject->type());
    if (oldTypes.equals(type)) {
      return Ok();
    }
  }

  MInstruction* replace = nullptr;
  MDefinition* ins;

  for (uint32_t i = 0; i < current->stackDepth(); i++) {
    ins = current->getSlot(i);

    // Instead of creating a new MFilterTypeSet, try to update the old one.
    if (ins->isFilterTypeSet() && ins->getOperand(0) == subject &&
        ins->dependency() == test) {
      TemporaryTypeSet* intersect = TypeSet::intersectSets(
          ins->resultTypeSet(), type, alloc_->lifoAlloc());
      if (!intersect) {
        return abort(AbortReason::Alloc);
      }

      ins->toFilterTypeSet()->setResultType(intersect->getKnownMIRType());
      ins->toFilterTypeSet()->setResultTypeSet(intersect);

      if (ins->type() == MIRType::Undefined) {
        current->setSlot(i, constant(UndefinedValue()));
      } else if (ins->type() == MIRType::Null) {
        current->setSlot(i, constant(NullValue()));
      } else if (ins->type() == MIRType::MagicOptimizedArguments) {
        current->setSlot(i, constant(MagicValue(JS_OPTIMIZED_ARGUMENTS)));
      } else {
        MOZ_ASSERT(!IsMagicType(ins->type()));
      }
      continue;
    }

    if (ins == subject) {
      if (!replace) {
        replace = MFilterTypeSet::New(alloc(), subject, type);
        current->add(replace);

        // Make sure we don't hoist it above the MTest, we can use the
        // 'dependency' of an MInstruction. This is normally used by
        // Alias Analysis, but won't get overwritten, since this
        // instruction doesn't have an AliasSet.
        replace->setDependency(test);

        if (replace->type() == MIRType::Undefined) {
          replace = constant(UndefinedValue());
        } else if (replace->type() == MIRType::Null) {
          replace = constant(NullValue());
        } else if (replace->type() == MIRType::MagicOptimizedArguments) {
          replace = constant(MagicValue(JS_OPTIMIZED_ARGUMENTS));
        } else {
          MOZ_ASSERT(!IsMagicType(ins->type()));
        }
      }
      current->setSlot(i, replace);
    }
  }
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::improveTypesAtCompare(MCompare* ins,
                                                    bool trueBranch,
                                                    MTest* test) {
  if (ins->compareType() == MCompare::Compare_Undefined ||
      ins->compareType() == MCompare::Compare_Null) {
    return improveTypesAtNullOrUndefinedCompare(ins, trueBranch, test);
  }

  if ((ins->lhs()->isTypeOf() || ins->rhs()->isTypeOf()) &&
      (ins->lhs()->isConstant() || ins->rhs()->isConstant())) {
    return improveTypesAtTypeOfCompare(ins, trueBranch, test);
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::improveTypesAtTypeOfCompare(MCompare* ins,
                                                          bool trueBranch,
                                                          MTest* test) {
  MTypeOf* typeOf =
      ins->lhs()->isTypeOf() ? ins->lhs()->toTypeOf() : ins->rhs()->toTypeOf();
  MConstant* constant = ins->lhs()->isConstant() ? ins->lhs()->toConstant()
                                                 : ins->rhs()->toConstant();

  if (constant->type() != MIRType::String) {
    return Ok();
  }

  bool equal = ins->jsop() == JSOp::Eq || ins->jsop() == JSOp::StrictEq;
  bool notEqual = ins->jsop() == JSOp::Ne || ins->jsop() == JSOp::StrictNe;

  if (notEqual) {
    trueBranch = !trueBranch;
  }

  // Relational compares not supported.
  if (!equal && !notEqual) {
    return Ok();
  }

  MDefinition* subject = typeOf->input();
  TemporaryTypeSet* inputTypes = subject->resultTypeSet();

  // Create temporary typeset equal to the type if there is no resultTypeSet.
  TemporaryTypeSet tmp;
  if (!inputTypes) {
    if (subject->type() == MIRType::Value) {
      return Ok();
    }
    inputTypes = &tmp;
    tmp.addType(TypeSet::PrimitiveOrAnyObjectType(subject->type()),
                alloc_->lifoAlloc());
  }

  if (inputTypes->unknown()) {
    return Ok();
  }

  // Note: we cannot remove the AnyObject type in the false branch,
  // since there are multiple ways to get an object. That is the reason
  // for the 'trueBranch' test.
  TemporaryTypeSet filter;
  const JSAtomState& names = mirGen_.runtime->names();
  if (constant->toString() == TypeName(JSTYPE_UNDEFINED, names)) {
    filter.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
    if (typeOf->inputMaybeCallableOrEmulatesUndefined() && trueBranch) {
      filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
    }
  } else if (constant->toString() == TypeName(JSTYPE_BOOLEAN, names)) {
    filter.addType(TypeSet::BooleanType(), alloc_->lifoAlloc());
  } else if (constant->toString() == TypeName(JSTYPE_NUMBER, names)) {
    filter.addType(TypeSet::Int32Type(), alloc_->lifoAlloc());
    filter.addType(TypeSet::DoubleType(), alloc_->lifoAlloc());
  } else if (constant->toString() == TypeName(JSTYPE_STRING, names)) {
    filter.addType(TypeSet::StringType(), alloc_->lifoAlloc());
  } else if (constant->toString() == TypeName(JSTYPE_SYMBOL, names)) {
    filter.addType(TypeSet::SymbolType(), alloc_->lifoAlloc());
  } else if (constant->toString() == TypeName(JSTYPE_BIGINT, names)) {
    filter.addType(TypeSet::BigIntType(), alloc_->lifoAlloc());
  } else if (constant->toString() == TypeName(JSTYPE_OBJECT, names)) {
    filter.addType(TypeSet::NullType(), alloc_->lifoAlloc());
    if (trueBranch) {
      filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
    }
  } else if (constant->toString() == TypeName(JSTYPE_FUNCTION, names)) {
    if (typeOf->inputMaybeCallableOrEmulatesUndefined() && trueBranch) {
      filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
    }
  } else {
    return Ok();
  }

  TemporaryTypeSet* type;
  if (trueBranch) {
    type = TypeSet::intersectSets(&filter, inputTypes, alloc_->lifoAlloc());
  } else {
    type = TypeSet::removeSet(inputTypes, &filter, alloc_->lifoAlloc());
  }

  if (!type) {
    return abort(AbortReason::Alloc);
  }

  return replaceTypeSet(subject, type, test);
}

AbortReasonOr<Ok> IonBuilder::improveTypesAtNullOrUndefinedCompare(
    MCompare* ins, bool trueBranch, MTest* test) {
  MOZ_ASSERT(ins->compareType() == MCompare::Compare_Undefined ||
             ins->compareType() == MCompare::Compare_Null);

  // altersUndefined/Null represents if we can filter/set Undefined/Null.
  bool altersUndefined, altersNull;
  JSOp op = ins->jsop();

  switch (op) {
    case JSOp::StrictNe:
    case JSOp::StrictEq:
      altersUndefined = ins->compareType() == MCompare::Compare_Undefined;
      altersNull = ins->compareType() == MCompare::Compare_Null;
      break;
    case JSOp::Ne:
    case JSOp::Eq:
      altersUndefined = altersNull = true;
      break;
    default:
      MOZ_CRASH("Relational compares not supported");
  }

  MDefinition* subject = ins->lhs();
  TemporaryTypeSet* inputTypes = subject->resultTypeSet();

  MOZ_ASSERT(IsNullOrUndefined(ins->rhs()->type()));

  // Create temporary typeset equal to the type if there is no resultTypeSet.
  TemporaryTypeSet tmp;
  if (!inputTypes) {
    if (subject->type() == MIRType::Value) {
      return Ok();
    }
    inputTypes = &tmp;
    tmp.addType(TypeSet::PrimitiveOrAnyObjectType(subject->type()),
                alloc_->lifoAlloc());
  }

  if (inputTypes->unknown()) {
    return Ok();
  }

  TemporaryTypeSet* type;

  // Decide if we need to filter the type or set it.
  if ((op == JSOp::StrictEq || op == JSOp::Eq) ^ trueBranch) {
    // Remove undefined/null
    TemporaryTypeSet remove;
    if (altersUndefined) {
      remove.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
    }
    if (altersNull) {
      remove.addType(TypeSet::NullType(), alloc_->lifoAlloc());
    }

    type = TypeSet::removeSet(inputTypes, &remove, alloc_->lifoAlloc());
  } else {
    // Set undefined/null.
    TemporaryTypeSet base;
    if (altersUndefined) {
      base.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
      // If TypeSet emulates undefined, then we cannot filter the objects.
      if (inputTypes->maybeEmulatesUndefined(constraints())) {
        base.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
      }
    }

    if (altersNull) {
      base.addType(TypeSet::NullType(), alloc_->lifoAlloc());
    }

    type = TypeSet::intersectSets(&base, inputTypes, alloc_->lifoAlloc());
  }

  if (!type) {
    return abort(AbortReason::Alloc);
  }

  return replaceTypeSet(subject, type, test);
}

AbortReasonOr<Ok> IonBuilder::improveTypesAtTestSuccessor(
    MTest* test, MBasicBlock* successor) {
  MOZ_ASSERT(successor->numPredecessors() == 1);
  MOZ_ASSERT(test->block() == successor->getPredecessor(0));

  MOZ_ASSERT(test->ifTrue() == successor || test->ifFalse() == successor);
  bool trueBranch = test->ifTrue() == successor;

  return improveTypesAtTest(test->getOperand(0), trueBranch, test);
}

AbortReasonOr<Ok> IonBuilder::improveTypesAtTest(MDefinition* ins,
                                                 bool trueBranch, MTest* test) {
  // We explore the test condition to try and deduce as much type information
  // as possible.

  // All branches of this switch that don't want to fall through to the
  // default behavior must return.  The default behavior assumes that a true
  // test means the incoming ins is not null or undefined and that a false
  // tests means it's one of null, undefined, false, 0, "", and objects
  // emulating undefined
  switch (ins->op()) {
    case MDefinition::Opcode::Not:
      return improveTypesAtTest(ins->toNot()->getOperand(0), !trueBranch, test);
    case MDefinition::Opcode::IsObject: {
      MDefinition* subject = ins->getOperand(0);
      TemporaryTypeSet* oldType = subject->resultTypeSet();

      // Create temporary typeset equal to the type if there is no
      // resultTypeSet.
      TemporaryTypeSet tmp;
      if (!oldType) {
        if (subject->type() == MIRType::Value) {
          return Ok();
        }
        oldType = &tmp;
        tmp.addType(TypeSet::PrimitiveOrAnyObjectType(subject->type()),
                    alloc_->lifoAlloc());
      }

      if (oldType->unknown()) {
        return Ok();
      }

      TemporaryTypeSet* type = nullptr;
      if (trueBranch) {
        type = oldType->cloneObjectsOnly(alloc_->lifoAlloc());
      } else {
        type = oldType->cloneWithoutObjects(alloc_->lifoAlloc());
      }

      if (!type) {
        return abort(AbortReason::Alloc);
      }

      return replaceTypeSet(subject, type, test);
    }
    case MDefinition::Opcode::IsNullOrUndefined: {
      MDefinition* subject = ins->getOperand(0);
      TemporaryTypeSet* oldType = subject->resultTypeSet();

      // Create temporary typeset equal to the type if there is no
      // resultTypeSet.
      TemporaryTypeSet tmp;
      if (!oldType) {
        if (subject->type() == MIRType::Value) {
          return Ok();
        }
        oldType = &tmp;
        tmp.addType(TypeSet::PrimitiveOrAnyObjectType(subject->type()),
                    alloc_->lifoAlloc());
      }

      // If ins does not have a typeset we return as we cannot optimize.
      if (oldType->unknown()) {
        return Ok();
      }

      // Decide either to set or remove.
      TemporaryTypeSet filter;
      filter.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
      filter.addType(TypeSet::NullType(), alloc_->lifoAlloc());

      TemporaryTypeSet* type;
      if (trueBranch) {
        type = TypeSet::intersectSets(&filter, oldType, alloc_->lifoAlloc());
      } else {
        type = TypeSet::removeSet(oldType, &filter, alloc_->lifoAlloc());
      }

      if (!type) {
        return abort(AbortReason::Alloc);
      }

      return replaceTypeSet(subject, type, test);
    }

    case MDefinition::Opcode::Compare:
      return improveTypesAtCompare(ins->toCompare(), trueBranch, test);

    default:
      break;
  }

  // By default MTest tests ToBoolean(input). As a result in the true branch we
  // can filter undefined and null. In false branch we can only encounter
  // undefined, null, false, 0, "" and objects that emulate undefined.

  TemporaryTypeSet* oldType = ins->resultTypeSet();
  TemporaryTypeSet* type;

  // Create temporary typeset equal to the type if there is no resultTypeSet.
  TemporaryTypeSet tmp;
  if (!oldType) {
    if (ins->type() == MIRType::Value) {
      return Ok();
    }
    oldType = &tmp;
    tmp.addType(TypeSet::PrimitiveOrAnyObjectType(ins->type()),
                alloc_->lifoAlloc());
  }

  // If ins does not have a typeset we return as we cannot optimize.
  if (oldType->unknown()) {
    return Ok();
  }

  // Decide either to set or remove.
  if (trueBranch) {
    TemporaryTypeSet remove;
    remove.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
    remove.addType(TypeSet::NullType(), alloc_->lifoAlloc());
    type = TypeSet::removeSet(oldType, &remove, alloc_->lifoAlloc());
  } else {
    TemporaryTypeSet base;
    base.addType(TypeSet::UndefinedType(),
                 alloc_->lifoAlloc());  // ToBoolean(undefined) == false
    base.addType(TypeSet::NullType(),
                 alloc_->lifoAlloc());  // ToBoolean(null) == false
    base.addType(TypeSet::BooleanType(),
                 alloc_->lifoAlloc());  // ToBoolean(false) == false
    base.addType(TypeSet::Int32Type(),
                 alloc_->lifoAlloc());  // ToBoolean(0) == false
    base.addType(TypeSet::DoubleType(),
                 alloc_->lifoAlloc());  // ToBoolean(0.0) == false
    base.addType(TypeSet::StringType(),
                 alloc_->lifoAlloc());  // ToBoolean("") == false
    base.addType(TypeSet::BigIntType(),
                 alloc_->lifoAlloc());  // ToBoolean(0n) == false

    // If the typeset does emulate undefined, then we cannot filter out
    // objects.
    if (oldType->maybeEmulatesUndefined(constraints())) {
      base.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
    }

    type = TypeSet::intersectSets(&base, oldType, alloc_->lifoAlloc());
  }

  if (!type) {
    return abort(AbortReason::Alloc);
  }

  return replaceTypeSet(ins, type, test);
}

AbortReasonOr<Ok> IonBuilder::jsop_dup2() {
  uint32_t lhsSlot = current->stackDepth() - 2;
  uint32_t rhsSlot = current->stackDepth() - 1;
  current->pushSlot(lhsSlot);
  current->pushSlot(rhsSlot);
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitTestBackedge(JSOp op, bool* restarted) {
  MOZ_ASSERT(op == JSOp::IfNe);
  MOZ_ASSERT(loopDepth_ > 0);

  MDefinition* ins = current->pop();

  jsbytecode* loopHead = pc + GET_JUMP_OFFSET(pc);
  MOZ_ASSERT(JSOp(*loopHead) == JSOp::LoopHead);

  jsbytecode* successorPC = GetNextPc(pc);

  // We can finish the loop now. Use the loophead pc instead of the current pc
  // because the stack depth at the start of that op matches the current stack
  // depth (after popping our operand).
  MBasicBlock* backedge;
  MOZ_TRY_VAR(backedge, newBlock(current, loopHead));

  current->end(newTest(ins, backedge, nullptr));
  MOZ_TRY(addPendingEdge(PendingEdge::NewTestFalse(current, op), successorPC));

  MOZ_TRY(startTraversingBlock(backedge));
  return visitBackEdge(restarted);
}

AbortReasonOr<Ok> IonBuilder::visitTest(JSOp op, bool* restarted) {
  MOZ_ASSERT(op == JSOp::IfEq || op == JSOp::IfNe || op == JSOp::And ||
             op == JSOp::Or || op == JSOp::Case);

  if (IsBackedgePC(pc)) {
    return visitTestBackedge(op, restarted);
  }

  jsbytecode* target1 = GetNextPc(pc);
  jsbytecode* target2 = pc + GET_JUMP_OFFSET(pc);

  // JSOp::And and JSOp::Or inspect the top stack value but don't pop it.
  // Also note that JSOp::Case must pop a second value on the true-branch (the
  // input to the switch-statement). This conditional pop happens in
  // visitJumpTarget.
  bool mustKeepCondition = (op == JSOp::And || op == JSOp::Or);
  MDefinition* ins = mustKeepCondition ? current->peek(-1) : current->pop();

  // If this op always branches to the same pc we treat this as a JSOp::Goto.
  if (target1 == target2) {
    ins->setImplicitlyUsedUnchecked();
    return visitGoto(target1);
  }

  MTest* mir = newTest(ins, nullptr, nullptr);
  current->end(mir);

  if (TestTrueTargetIsJoinPoint(op)) {
    std::swap(target1, target2);
  }

  MOZ_TRY(addPendingEdge(PendingEdge::NewTestTrue(current, op), target1));
  MOZ_TRY(addPendingEdge(PendingEdge::NewTestFalse(current, op), target2));
  setTerminatedBlock();

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::jsop_coalesce() {
  jsbytecode* target1 = GetNextPc(pc);
  jsbytecode* target2 = pc + GET_JUMP_OFFSET(pc);
  MOZ_ASSERT(target2 > target1);

  MDefinition* ins = current->peek(-1);

  MIsNullOrUndefined* isNullOrUndefined = MIsNullOrUndefined::New(alloc(), ins);
  current->add(isNullOrUndefined);

  MTest* mir = newTest(isNullOrUndefined, nullptr, nullptr);
  current->end(mir);

  MOZ_TRY(addPendingEdge(PendingEdge::NewTestTrue(current, JSOp::Coalesce),
                         target1));
  MOZ_TRY(addPendingEdge(PendingEdge::NewTestFalse(current, JSOp::Coalesce),
                         target2));
  setTerminatedBlock();

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitTry() {
  // We don't support try-finally.
  if (script()->jitScript()->hasTryFinally()) {
    return abort(AbortReason::Disable, "Try-finally not supported");
  }

  // Try-catch within inline frames is not yet supported.
  if (isInlineBuilder()) {
    return abort(AbortReason::Disable, "Try-catch during inlining");
  }

  // Try-catch during analyses is not yet supported. Code within the 'catch'
  // block is not accounted for.
  if (info().isAnalysis()) {
    return abort(AbortReason::Disable, "Try-catch during analysis");
  }

  // Get the pc of the last instruction in the try block. It's a JSOp::Goto to
  // jump over the catch block.
  jsbytecode* endpc = pc + GET_CODE_OFFSET(pc);
  MOZ_ASSERT(JSOp(*endpc) == JSOp::Goto);
  MOZ_ASSERT(GET_JUMP_OFFSET(endpc) > 0);

  jsbytecode* afterTry = endpc + GET_JUMP_OFFSET(endpc);

  // The baseline compiler should not attempt to enter the catch block
  // via OSR.
  MOZ_ASSERT(info().osrPc() < endpc || info().osrPc() >= afterTry);

  // If controlflow in the try body is terminated (by a return or throw
  // statement), the code after the try-statement may still be reachable
  // via the catch block (which we don't compile) and OSR can enter it.
  // For example:
  //
  //     try {
  //         throw 3;
  //     } catch(e) { }
  //
  //     for (var i=0; i<1000; i++) {}
  //
  // To handle this, we create two blocks: one for the try block and one
  // for the code following the try-catch statement.

  graph().setHasTryBlock();

  MBasicBlock* tryBlock;
  MOZ_TRY_VAR(tryBlock, newBlock(current, GetNextPc(pc)));

  current->end(MGotoWithFake::New(alloc(), tryBlock, nullptr));
  MOZ_TRY(addPendingEdge(PendingEdge::NewGotoWithFake(current), afterTry));

  return startTraversingBlock(tryBlock);
}

AbortReasonOr<Ok> IonBuilder::visitJumpTarget(JSOp op) {
  PendingEdgesMap::Ptr p = pendingEdges_.lookup(pc);
  if (!p) {
    // No (reachable) jumps so this is just a no-op.
    return Ok();
  }

  PendingEdges edges(std::move(p->value()));
  pendingEdges_.remove(p);

  // Loop-restarts may clear the list rather than remove the map entry entirely.
  // This is to reduce allocator churn since it is likely the list will be
  // filled in again in the general case.
  if (edges.empty()) {
    return Ok();
  }

  MBasicBlock* joinBlock = nullptr;

  // Create join block if there's fall-through from the previous bytecode op.
  if (!hasTerminatedBlock()) {
    MOZ_TRY_VAR(joinBlock, newBlock(current, pc));
    current->end(MGoto::New(alloc(), joinBlock));
    setTerminatedBlock();
  }

  auto addEdge = [&](MBasicBlock* pred, size_t numToPop) -> AbortReasonOr<Ok> {
    if (joinBlock) {
      MOZ_ASSERT(pred->stackDepth() - numToPop == joinBlock->stackDepth());
      if (!joinBlock->addPredecessorPopN(alloc(), pred, numToPop)) {
        return abort(AbortReason::Alloc);
      }
      return Ok();
    }
    MOZ_TRY_VAR(joinBlock, newBlockPopN(pred, pc, numToPop));
    return Ok();
  };

  // When a block is terminated with an MTest instruction we can end up with the
  // following triangle structure:
  //
  //        testBlock
  //         /    |
  //     block    |
  //         \    |
  //        joinBlock
  //
  // Although this is fine for correctness, it has the following issues:
  //
  // 1) The FoldTests pass is unable to optimize this pattern. This matters for
  //    short-circuit operations (JSOp::And, JSOp::Coalesce, etc).
  //
  // 2) We can't easily use improveTypesAtTest to improve type information in
  //    this case:
  //
  //        var obj = ...;
  //        if (obj === null) {
  //          obj = {};
  //        }
  //        ... obj must be non-null ...
  //
  // To fix these issues, we create an empty block to get a diamond structure:
  //
  //        testBlock
  //         /    |
  //     block  emptyBlock
  //         \    |
  //        joinBlock
  auto createEmptyBlockForTest =
      [&](MBasicBlock* pred, size_t successor,
          size_t numToPop) -> AbortReasonOr<MBasicBlock*> {
    MOZ_ASSERT(joinBlock);

    MBasicBlock* emptyBlock;
    MOZ_TRY_VAR(emptyBlock, newBlockPopN(pred, pc, numToPop));
    MOZ_ASSERT(emptyBlock->stackDepth() == joinBlock->stackDepth());

    MTest* test = pred->lastIns()->toTest();
    test->initSuccessor(successor, emptyBlock);

    MOZ_TRY(startTraversingBlock(emptyBlock));
    MOZ_TRY(improveTypesAtTestSuccessor(test, emptyBlock));

    emptyBlock->end(MGoto::New(alloc(), joinBlock));
    setTerminatedBlock();

    return emptyBlock;
  };

  for (const PendingEdge& edge : edges) {
    MBasicBlock* source = edge.block();
    MControlInstruction* lastIns = source->lastIns();
    switch (edge.kind()) {
      case PendingEdge::Kind::TestTrue: {
        // JSOp::Case must pop the value when branching to the true-target.
        // If we create an empty block, we have to pop the value there instead
        // of as part of the emptyBlock -> joinBlock edge so stack depths match
        // the current depth.
        const size_t numToPop = (edge.testOp() == JSOp::Case) ? 1 : 0;

        const size_t successor = 0;  // true-branch
        if (joinBlock && TestTrueTargetIsJoinPoint(edge.testOp())) {
          MBasicBlock* pred;
          MOZ_TRY_VAR(pred,
                      createEmptyBlockForTest(source, successor, numToPop));
          MOZ_TRY(addEdge(pred, 0));
        } else {
          MOZ_TRY(addEdge(source, numToPop));
          lastIns->toTest()->initSuccessor(successor, joinBlock);
        }
        continue;
      }

      case PendingEdge::Kind::TestFalse: {
        const size_t numToPop = 0;
        const size_t successor = 1;  // false-branch
        if (joinBlock && !TestTrueTargetIsJoinPoint(edge.testOp())) {
          MBasicBlock* pred;
          MOZ_TRY_VAR(pred,
                      createEmptyBlockForTest(source, successor, numToPop));
          MOZ_TRY(addEdge(pred, 0));
        } else {
          MOZ_TRY(addEdge(source, numToPop));
          lastIns->toTest()->initSuccessor(successor, joinBlock);
        }
        continue;
      }

      case PendingEdge::Kind::Goto:
        MOZ_TRY(addEdge(source, 0));
        lastIns->toGoto()->initSuccessor(0, joinBlock);
        continue;

      case PendingEdge::Kind::GotoWithFake:
        MOZ_TRY(addEdge(source, 0));
        lastIns->toGotoWithFake()->initSuccessor(1, joinBlock);
        continue;
    }
    MOZ_CRASH("Invalid kind");
  }

  MOZ_ASSERT(joinBlock);
  MOZ_TRY(startTraversingBlock(joinBlock));

  // If the join block has just one predecessor with an MTest, try to improve
  // type information.
  if (joinBlock->numPredecessors() == 1) {
    MBasicBlock* pred = joinBlock->getPredecessor(0);
    if (pred->lastIns()->isTest()) {
      MTest* test = pred->lastIns()->toTest();
      MOZ_TRY(improveTypesAtTestSuccessor(test, joinBlock));
    }
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitReturn(JSOp op) {
  MDefinition* def;
  switch (op) {
    case JSOp::Return:
      // Return the last instruction.
      def = current->pop();
      break;

    case JSOp::RetRval:
      // Return undefined eagerly if script doesn't use return value.
      if (script()->noScriptRval()) {
        MInstruction* ins = MConstant::New(alloc(), UndefinedValue());
        current->add(ins);
        def = ins;
        break;
      }

      def = current->getSlot(info().returnValueSlot());
      break;

    default:
      MOZ_CRASH("unknown return op");
  }

  MReturn* ret = MReturn::New(alloc(), def);
  current->end(ret);

  if (!graph().addReturn(current)) {
    return abort(AbortReason::Alloc);
  }

  setTerminatedBlock();

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitThrow() {
  MDefinition* def = current->pop();

  MThrow* ins = MThrow::New(alloc(), def);
  current->add(ins);
  MOZ_TRY(resumeAfter(ins));

  // Terminate the block.
  current->end(MUnreachable::New(alloc()));
  setTerminatedBlock();

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitTableSwitch() {
  jsbytecode* defaultpc = pc + GET_JUMP_OFFSET(pc);

  int32_t low = GET_JUMP_OFFSET(pc + JUMP_OFFSET_LEN * 1);
  int32_t high = GET_JUMP_OFFSET(pc + JUMP_OFFSET_LEN * 2);
  size_t numCases = high - low + 1;

  // Create MIR instruction.
  MDefinition* ins = current->pop();
  MTableSwitch* tableswitch = MTableSwitch::New(alloc(), ins, low, high);
  current->end(tableswitch);

  MBasicBlock* switchBlock = current;

  // Create |default| block.
  {
    MBasicBlock* defaultBlock;
    MOZ_TRY_VAR(defaultBlock, newBlock(switchBlock, defaultpc));

    size_t index;
    if (!tableswitch->addDefault(defaultBlock, &index)) {
      return abort(AbortReason::Alloc);
    }
    MOZ_ASSERT(index == 0);

    MOZ_TRY(startTraversingBlock(defaultBlock));

    defaultBlock->end(MGoto::New(alloc(), nullptr));
    MOZ_TRY(addPendingEdge(PendingEdge::NewGoto(defaultBlock), defaultpc));
  }

  // Create blocks for all cases.
  for (size_t i = 0; i < numCases; i++) {
    jsbytecode* casepc = script()->tableSwitchCasePC(pc, i);

    MBasicBlock* caseBlock;
    MOZ_TRY_VAR(caseBlock, newBlock(switchBlock, casepc));

    size_t index;
    if (!tableswitch->addSuccessor(caseBlock, &index)) {
      return abort(AbortReason::Alloc);
    }
    if (!tableswitch->addCase(index)) {
      return abort(AbortReason::Alloc);
    }

    MOZ_TRY(startTraversingBlock(caseBlock));

    // If this is an actual case statement, optimize by replacing the
    // input to the switch case with the actual number of the case.
    MConstant* constant = MConstant::New(alloc(), Int32Value(low + int32_t(i)));
    caseBlock->add(constant);
    for (uint32_t j = 0; j < caseBlock->stackDepth(); j++) {
      if (ins != caseBlock->getSlot(j)) {
        continue;
      }

      constant->setDependency(ins);
      caseBlock->setSlot(j, constant);
    }

    caseBlock->end(MGoto::New(alloc(), nullptr));
    MOZ_TRY(addPendingEdge(PendingEdge::NewGoto(caseBlock), casepc));
  }

  setTerminatedBlock();

  return Ok();
}

void IonBuilder::pushConstant(const Value& v) { current->push(constant(v)); }

AbortReasonOr<Ok> IonBuilder::bitnotTrySpecialized(bool* emitted,
                                                   MDefinition* input) {
  MOZ_ASSERT(*emitted == false);

  // Try to emit a specialized bitnot instruction based on the input type
  // of the operand.

  if (input->mightBeType(MIRType::Object) ||
      input->mightBeType(MIRType::Symbol) ||
      input->mightBeType(MIRType::BigInt)) {
    return Ok();
  }

  MBitNot* ins = MBitNot::New(alloc(), input);
  ins->setSpecialization(MIRType::Int32);

  current->add(ins);
  current->push(ins);

  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::jsop_bitnot() {
  bool emitted = false;

  MDefinition* input = current->pop();

  if (!forceInlineCaches()) {
    MOZ_TRY(bitnotTrySpecialized(&emitted, input));
    if (emitted) return Ok();
  }

  MOZ_TRY(arithTryBinaryStub(&emitted, JSOp::BitNot, nullptr, input));
  if (emitted) {
    return Ok();
  }

  // Not possible to optimize. Do a slow vm call.
  MBitNot* ins = MBitNot::New(alloc(), input);

  current->add(ins);
  current->push(ins);
  MOZ_ASSERT(ins->isEffectful());
  return resumeAfter(ins);
}

AbortReasonOr<MBinaryBitwiseInstruction*> IonBuilder::binaryBitOpEmit(
    JSOp op, MIRType specialization, MDefinition* left, MDefinition* right) {
  MOZ_ASSERT(specialization == MIRType::Int32 ||
             specialization == MIRType::None);

  MBinaryBitwiseInstruction* ins;
  switch (op) {
    case JSOp::BitAnd:
      ins = MBitAnd::New(alloc(), left, right);
      break;

    case JSOp::BitOr:
      ins = MBitOr::New(alloc(), left, right);
      break;

    case JSOp::BitXor:
      ins = MBitXor::New(alloc(), left, right);
      break;

    case JSOp::Lsh:
      ins = MLsh::New(alloc(), left, right);
      break;

    case JSOp::Rsh:
      ins = MRsh::New(alloc(), left, right);
      break;

    case JSOp::Ursh:
      ins = MUrsh::New(alloc(), left, right);
      break;

    default:
      MOZ_CRASH("unexpected bitop");
  }

  current->add(ins);
  ins->infer(inspector, pc);

  // The expected specialization should match the inferred specialization.
  MOZ_ASSERT_IF(specialization == MIRType::None,
                ins->specialization() == MIRType::None);
  MOZ_ASSERT_IF(
      specialization == MIRType::Int32,
      ins->specialization() == MIRType::Int32 ||
          (op == JSOp::Ursh && ins->specialization() == MIRType::Double));

  current->push(ins);
  if (ins->isEffectful()) {
    MOZ_TRY(resumeAfter(ins));
  }

  return ins;
}

static inline bool SimpleBitOpOperand(MDefinition* op) {
  return !op->mightBeType(MIRType::Object) &&
         !op->mightBeType(MIRType::Symbol) && !op->mightBeType(MIRType::BigInt);
}

AbortReasonOr<Ok> IonBuilder::binaryBitOpTrySpecialized(bool* emitted, JSOp op,
                                                        MDefinition* left,
                                                        MDefinition* right) {
  MOZ_ASSERT(*emitted == false);

  // Try to emit a specialized binary instruction based on the input types
  // of the operands.

  // Anything complex - objects, symbols, and bigints - are not specialized
  if (!SimpleBitOpOperand(left) || !SimpleBitOpOperand(right)) {
    return Ok();
  }

  MIRType specialization = MIRType::Int32;
  MOZ_TRY(binaryBitOpEmit(op, specialization, left, right));

  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::jsop_bitop(JSOp op) {
  // Pop inputs.
  MDefinition* right = current->pop();
  MDefinition* left = current->pop();

  bool emitted = false;

  if (!forceInlineCaches()) {
    MOZ_TRY(binaryBitOpTrySpecialized(&emitted, op, left, right));
    if (emitted) {
      return Ok();
    }
  }

  MOZ_TRY(arithTryBinaryStub(&emitted, op, left, right));
  if (emitted) {
    return Ok();
  }

  // Not possible to optimize. Do a slow vm call.
  MOZ_TRY(binaryBitOpEmit(op, MIRType::None, left, right));
  return Ok();
}

MDefinition::Opcode BinaryJSOpToMDefinition(JSOp op) {
  switch (op) {
    case JSOp::Add:
      return MDefinition::Opcode::Add;
    case JSOp::Sub:
      return MDefinition::Opcode::Sub;
    case JSOp::Mul:
      return MDefinition::Opcode::Mul;
    case JSOp::Div:
      return MDefinition::Opcode::Div;
    case JSOp::Mod:
      return MDefinition::Opcode::Mod;
    default:
      MOZ_CRASH("unexpected binary opcode");
  }
}

AbortReasonOr<Ok> IonBuilder::binaryArithTryConcat(bool* emitted, JSOp op,
                                                   MDefinition* left,
                                                   MDefinition* right) {
  MOZ_ASSERT(*emitted == false);

  // Try to convert an addition into a concat operation if the inputs
  // indicate this might be a concatenation.

  // Only try to replace this with concat when we have an addition.
  if (op != JSOp::Add) {
    return Ok();
  }

  // Make sure one of the inputs is a string.
  if (left->type() != MIRType::String && right->type() != MIRType::String) {
    return Ok();
  }

  // The non-string input (if present) should be atleast easily coercible to
  // string.
  if (right->type() != MIRType::String &&
      (right->mightBeType(MIRType::Symbol) ||
       right->mightBeType(MIRType::Object) || right->mightBeMagicType())) {
    return Ok();
  }
  if (left->type() != MIRType::String &&
      (left->mightBeType(MIRType::Symbol) ||
       left->mightBeType(MIRType::Object) || left->mightBeMagicType())) {
    return Ok();
  }

  MConcat* ins = MConcat::New(alloc(), left, right);
  current->add(ins);
  current->push(ins);

  MOZ_TRY(maybeInsertResume());

  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::powTrySpecialized(bool* emitted,
                                                MDefinition* base,
                                                MDefinition* power,
                                                MIRType outputType) {
  // Typechecking.
  MDefinition* output = nullptr;
  MIRType baseType = base->type();
  MIRType powerType = power->type();

  if (outputType != MIRType::Int32 && outputType != MIRType::Double) {
    return Ok();
  }
  if (!IsNumberType(baseType)) {
    return Ok();
  }
  if (!IsNumberType(powerType)) {
    return Ok();
  }

  if (powerType == MIRType::Float32) {
    powerType = MIRType::Double;
  }

  MPow* pow = MPow::New(alloc(), base, power, powerType);
  current->add(pow);
  output = pow;

  // Cast to the right type
  if (outputType == MIRType::Int32 && output->type() != MIRType::Int32) {
    auto* toInt = MToNumberInt32::New(alloc(), output);
    current->add(toInt);
    output = toInt;
  }
  if (outputType == MIRType::Double && output->type() != MIRType::Double) {
    MToDouble* toDouble = MToDouble::New(alloc(), output);
    current->add(toDouble);
    output = toDouble;
  }

  current->push(output);
  *emitted = true;
  return Ok();
}

MIRType IonBuilder::binaryArithNumberSpecialization(MDefinition* left,
                                                    MDefinition* right) {
  // Try to specialize as int32.
  if (left->type() == MIRType::Int32 && right->type() == MIRType::Int32 &&
      !inspector->hasSeenDoubleResult(pc)) {
    return MIRType::Int32;
  }
  return MIRType::Double;
}

AbortReasonOr<MBinaryArithInstruction*> IonBuilder::binaryArithEmitSpecialized(
    MDefinition::Opcode op, MIRType specialization, MDefinition* left,
    MDefinition* right) {
  MBinaryArithInstruction* ins =
      MBinaryArithInstruction::New(alloc(), op, left, right);
  ins->setSpecialization(specialization);

  if (op == MDefinition::Opcode::Add || op == MDefinition::Opcode::Mul) {
    ins->setCommutative();
  }

  current->add(ins);
  current->push(ins);

  MOZ_ASSERT(!ins->isEffectful());
  MOZ_TRY(maybeInsertResume());

  return ins;
}

AbortReasonOr<Ok> IonBuilder::binaryArithTrySpecialized(bool* emitted, JSOp op,
                                                        MDefinition* left,
                                                        MDefinition* right) {
  MOZ_ASSERT(*emitted == false);

  // Try to emit a specialized binary instruction based on the input types
  // of the operands.

  // Anything complex - strings, symbols, and objects - are not specialized
  if (!SimpleArithOperand(left) || !SimpleArithOperand(right)) {
    return Ok();
  }

  // One of the inputs need to be a number.
  if (!IsNumberType(left->type()) && !IsNumberType(right->type())) {
    return Ok();
  }

  MDefinition::Opcode defOp = BinaryJSOpToMDefinition(op);
  MIRType specialization = binaryArithNumberSpecialization(left, right);
  MBinaryArithInstruction* ins;
  MOZ_TRY_VAR(ins,
              binaryArithEmitSpecialized(defOp, specialization, left, right));

  // Relax int32 to double if, despite the fact that we have int32 operands and
  // we've never seen a double result, we know the result may overflow or be a
  // double.
  if (specialization == MIRType::Int32 && ins->constantDoubleResult(alloc())) {
    ins->setSpecialization(MIRType::Double);
  }

  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::binaryArithTrySpecializedOnBaselineInspector(
    bool* emitted, JSOp op, MDefinition* left, MDefinition* right) {
  MOZ_ASSERT(*emitted == false);

  // Try to emit a specialized binary instruction speculating the
  // type using the baseline caches.

  // Anything complex - strings, symbols, and objects - are not specialized
  if (!SimpleArithOperand(left) || !SimpleArithOperand(right)) {
    return Ok();
  }

  MIRType specialization = inspector->expectedBinaryArithSpecialization(pc);
  if (specialization == MIRType::None) {
    return Ok();
  }

  MDefinition::Opcode defOp = BinaryJSOpToMDefinition(op);
  MOZ_TRY(binaryArithEmitSpecialized(defOp, specialization, left, right));

  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::arithTryBinaryStub(bool* emitted, JSOp op,
                                                 MDefinition* left,
                                                 MDefinition* right) {
  MOZ_ASSERT(*emitted == false);
  JSOp actualOp = JSOp(*pc);

  // The actual jsop 'jsop_pos' is not supported yet.
  // There's no IC support for JSOp::Pow either.
  if (actualOp == JSOp::Pos || actualOp == JSOp::Pow) {
    return Ok();
  }

  MInstruction* stub = nullptr;
  switch (actualOp) {
    case JSOp::Neg:
    case JSOp::BitNot:
      MOZ_ASSERT_IF(op == JSOp::Mul,
                    left->maybeConstantValue() &&
                        left->maybeConstantValue()->toInt32() == -1);
      MOZ_ASSERT_IF(op != JSOp::Mul, !left);
      stub = MUnaryCache::New(alloc(), right);
      break;
    case JSOp::Add:
    case JSOp::Sub:
    case JSOp::Mul:
    case JSOp::Div:
    case JSOp::Mod:
    case JSOp::BitAnd:
    case JSOp::BitOr:
    case JSOp::BitXor:
    case JSOp::Lsh:
    case JSOp::Rsh:
    case JSOp::Ursh:
      stub = MBinaryCache::New(alloc(), left, right, MIRType::Value);
      break;
    default:
      MOZ_CRASH("unsupported arith");
  }

  current->add(stub);
  current->push(stub);

  // Decrease type from 'any type' to 'empty type' when one of the operands
  // is 'empty typed'.
  maybeMarkEmpty(stub);

  MOZ_TRY(resumeAfter(stub));

  *emitted = true;
  return Ok();
}

MDefinition* IonBuilder::maybeConvertToNumber(MDefinition* def) {
  // Try converting strings to numbers during IonBuilder MIR construction,
  // because MIR foldsTo-folding runs off-main thread.
  if (def->type() == MIRType::String && def->isConstant()) {
    JSContext* cx = TlsContext.get();
    double d;
    if (StringToNumberPure(cx, def->toConstant()->toString(), &d)) {
      def->setImplicitlyUsedUnchecked();
      return constant(NumberValue(d));
    }
  }
  return def;
}

AbortReasonOr<Ok> IonBuilder::jsop_binary_arith(JSOp op, MDefinition* left,
                                                MDefinition* right) {
  bool emitted = false;

  if (!forceInlineCaches()) {
    MOZ_TRY(binaryArithTryConcat(&emitted, op, left, right));
    if (emitted) {
      return Ok();
    }

    if (op != JSOp::Add) {
      left = maybeConvertToNumber(left);
      right = maybeConvertToNumber(right);
    }

    MOZ_TRY(binaryArithTrySpecialized(&emitted, op, left, right));
    if (emitted) {
      return Ok();
    }

    MOZ_TRY(binaryArithTrySpecializedOnBaselineInspector(&emitted, op, left,
                                                         right));
    if (emitted) {
      return Ok();
    }
  }

  MOZ_TRY(arithTryBinaryStub(&emitted, op, left, right));
  if (emitted) {
    return Ok();
  }

  MDefinition::Opcode defOp = BinaryJSOpToMDefinition(op);
  MBinaryArithInstruction* ins =
      MBinaryArithInstruction::New(alloc(), defOp, left, right);

  // Decrease type from 'any type' to 'empty type' when one of the operands
  // is 'empty typed'.
  maybeMarkEmpty(ins);

  current->add(ins);
  current->push(ins);
  MOZ_ASSERT(ins->isEffectful());
  return resumeAfter(ins);
}

AbortReasonOr<Ok> IonBuilder::jsop_binary_arith(JSOp op) {
  MDefinition* right = current->pop();
  MDefinition* left = current->pop();

  return jsop_binary_arith(op, left, right);
}

AbortReasonOr<Ok> IonBuilder::jsop_pow() {
  MDefinition* exponent = current->pop();
  MDefinition* base = current->pop();

  bool emitted = false;

  if (!forceInlineCaches()) {
    MOZ_TRY(powTrySpecialized(&emitted, base, exponent, MIRType::Double));
    if (emitted) {
      return Ok();
    }
  }

  MOZ_TRY(arithTryBinaryStub(&emitted, JSOp::Pow, base, exponent));
  if (emitted) {
    return Ok();
  }

  // For now, use MIRType::None as a safe cover-all. See bug 1188079.
  MPow* pow = MPow::New(alloc(), base, exponent, MIRType::None);
  current->add(pow);
  current->push(pow);
  MOZ_ASSERT(pow->isEffectful());
  return resumeAfter(pow);
}

AbortReasonOr<Ok> IonBuilder::jsop_pos() {
  if (IsNumberType(current->peek(-1)->type())) {
    // Already int32 or double. Set the operand as implicitly used so it
    // doesn't get optimized out if it has no other uses, as we could bail
    // out.
    current->peek(-1)->setImplicitlyUsedUnchecked();
    return Ok();
  }

  MDefinition* value = current->pop();
  MConstant* one = MConstant::New(alloc(), Int32Value(1));
  current->add(one);

  // Compile +x as x * 1.
  MBinaryArithInstruction* ins = MBinaryArithInstruction::New(
      alloc(), MDefinition::Opcode::Mul, value, one);

  // Decrease type from 'any type' to 'empty type' when one of the operands
  // is 'empty typed'.
  maybeMarkEmpty(ins);

  current->add(ins);
  current->push(ins);
  return resumeAfter(ins);
}

AbortReasonOr<Ok> IonBuilder::jsop_neg() {
  // Since JSOp::Neg does not use a slot, we cannot push the MConstant.
  // The MConstant is therefore passed to JSOp::Mul without slot traffic.
  MConstant* negator = MConstant::New(alloc(), Int32Value(-1));
  current->add(negator);

  MDefinition* right = current->pop();

  return jsop_binary_arith(JSOp::Mul, negator, right);
}

AbortReasonOr<Ok> IonBuilder::jsop_tonumeric() {
  MDefinition* peeked = current->peek(-1);

  if (IsNumericType(peeked->type())) {
    // Elide the ToNumeric as we already unboxed the value.
    peeked->setImplicitlyUsedUnchecked();
    return Ok();
  }

  LifoAlloc* lifoAlloc = alloc().lifoAlloc();
  TemporaryTypeSet* types = lifoAlloc->new_<TemporaryTypeSet>();
  if (!types) {
    return abort(AbortReason::Alloc);
  }

  types->addType(TypeSet::Int32Type(), lifoAlloc);
  types->addType(TypeSet::DoubleType(), lifoAlloc);
  types->addType(TypeSet::BigIntType(), lifoAlloc);

  if (peeked->type() == MIRType::Value && peeked->resultTypeSet() &&
      peeked->resultTypeSet()->isSubset(types)) {
    // Elide the ToNumeric because the arg is already a boxed numeric.
    peeked->setImplicitlyUsedUnchecked();
    return Ok();
  }

  // Otherwise, pop the value and add an MToNumeric.
  MDefinition* popped = current->pop();
  MToNumeric* ins = MToNumeric::New(alloc(), popped, types);
  current->add(ins);
  current->push(ins);

  // toValue() is effectful, so add a resume point.
  return resumeAfter(ins);
}

MDefinition* IonBuilder::unaryArithConvertToBinary(JSOp op,
                                                   MDefinition::Opcode* defOp) {
  switch (op) {
    case JSOp::Inc: {
      *defOp = MDefinition::Opcode::Add;
      MConstant* right = MConstant::New(alloc(), Int32Value(1));
      current->add(right);
      return right;
    }
    case JSOp::Dec: {
      *defOp = MDefinition::Opcode::Sub;
      MConstant* right = MConstant::New(alloc(), Int32Value(1));
      current->add(right);
      return right;
    }
    default:
      MOZ_CRASH("unexpected unary opcode");
  }
}

AbortReasonOr<Ok> IonBuilder::unaryArithTrySpecialized(bool* emitted, JSOp op,
                                                       MDefinition* value) {
  MOZ_ASSERT(*emitted == false);

  // Try to convert Inc(x) or Dec(x) to Add(x,1) or Sub(x,1) if the operand is a
  // number.

  if (!IsNumberType(value->type())) {
    return Ok();
  }

  MDefinition::Opcode defOp;
  MDefinition* rhs = unaryArithConvertToBinary(op, &defOp);
  MIRType specialization = binaryArithNumberSpecialization(value, rhs);
  MOZ_TRY(binaryArithEmitSpecialized(defOp, specialization, value, rhs));

  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::unaryArithTrySpecializedOnBaselineInspector(
    bool* emitted, JSOp op, MDefinition* value) {
  MOZ_ASSERT(*emitted == false);

  // Try to emit a specialized binary instruction speculating the
  // type using the baseline caches.

  MIRType specialization = inspector->expectedBinaryArithSpecialization(pc);
  if (specialization == MIRType::None) {
    return Ok();
  }

  MDefinition::Opcode defOp;
  MDefinition* rhs = unaryArithConvertToBinary(op, &defOp);
  MOZ_TRY(binaryArithEmitSpecialized(defOp, specialization, value, rhs));

  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::jsop_inc_or_dec(JSOp op) {
  bool emitted = false;
  MDefinition* value = current->pop();

  MOZ_TRY(unaryArithTrySpecialized(&emitted, op, value));
  if (emitted) {
    return Ok();
  }

  MOZ_TRY(unaryArithTrySpecializedOnBaselineInspector(&emitted, op, value));
  if (emitted) {
    return Ok();
  }

  MInstruction* stub = MUnaryCache::New(alloc(), value);
  current->add(stub);
  current->push(stub);

  // Decrease type from 'any type' to 'empty type' when one of the operands
  // is 'empty typed'.
  maybeMarkEmpty(stub);

  return resumeAfter(stub);
}

AbortReasonOr<Ok> IonBuilder::jsop_tostring() {
  if (current->peek(-1)->type() == MIRType::String) {
    return Ok();
  }

  MDefinition* value = current->pop();
  MToString* ins =
      MToString::New(alloc(), value, MToString::SideEffectHandling::Supported);
  current->add(ins);
  current->push(ins);
  if (ins->isEffectful()) {
    MOZ_TRY(resumeAfter(ins));
  }
  return Ok();
}

class AutoAccumulateReturns {
  MIRGraph& graph_;
  MIRGraphReturns* prev_;

 public:
  AutoAccumulateReturns(MIRGraph& graph, MIRGraphReturns& returns)
      : graph_(graph) {
    prev_ = graph_.returnAccumulator();
    graph_.setReturnAccumulator(&returns);
  }
  ~AutoAccumulateReturns() { graph_.setReturnAccumulator(prev_); }
};

IonBuilder::InliningResult IonBuilder::inlineScriptedCall(CallInfo& callInfo,
                                                          JSFunction* target) {
  MOZ_ASSERT(target->hasBytecode());
  MOZ_ASSERT(IsIonInlinableOp(JSOp(*pc)));

  MBasicBlock::BackupPoint backup(current);
  if (!backup.init(alloc())) {
    return abort(AbortReason::Alloc);
  }

  callInfo.setImplicitlyUsedUnchecked();

  // Create new |this| on the caller-side for inlined constructors.
  if (callInfo.constructing()) {
    MDefinition* thisDefn =
        createThis(target, callInfo.fun(), callInfo.getNewTarget(),
                   /* inlining = */ true);
    callInfo.setThis(thisDefn);
  }

  // Capture formals in the outer resume point.
  MOZ_TRY(callInfo.pushCallStack(&mirGen_, current));

  MResumePoint* outerResumePoint =
      MResumePoint::New(alloc(), current, pc, MResumePoint::Outer);
  if (!outerResumePoint) {
    return abort(AbortReason::Alloc);
  }
  current->setOuterResumePoint(outerResumePoint);

  // Pop formals again, except leave |fun| on stack for duration of call.
  callInfo.popCallStack(current);
  current->push(callInfo.fun());

  JSScript* calleeScript = target->nonLazyScript();
  BaselineInspector inspector(calleeScript);

  // Improve type information of |this| when not set.
  if (callInfo.constructing() && !callInfo.thisArg()->resultTypeSet()) {
    AutoSweepJitScript sweep(calleeScript);
    StackTypeSet* types =
        calleeScript->jitScript()->thisTypes(sweep, calleeScript);
    if (!types->unknown()) {
      TemporaryTypeSet* clonedTypes = types->clone(alloc_->lifoAlloc());
      if (!clonedTypes) {
        return abort(AbortReason::Alloc);
      }
      MTypeBarrier* barrier =
          MTypeBarrier::New(alloc(), callInfo.thisArg(), clonedTypes);
      current->add(barrier);
      if (barrier->type() == MIRType::Undefined) {
        callInfo.setThis(constant(UndefinedValue()));
      } else if (barrier->type() == MIRType::Null) {
        callInfo.setThis(constant(NullValue()));
      } else {
        callInfo.setThis(barrier);
      }
    }
  }

  // Start inlining.
  LifoAlloc* lifoAlloc = alloc_->lifoAlloc();
  InlineScriptTree* inlineScriptTree =
      info().inlineScriptTree()->addCallee(alloc_, pc, calleeScript);
  if (!inlineScriptTree) {
    return abort(AbortReason::Alloc);
  }
  CompileInfo* info = lifoAlloc->new_<CompileInfo>(
      mirGen_.runtime, calleeScript, target, (jsbytecode*)nullptr,
      this->info().analysisMode(),
      /* needsArgsObj = */ false, inlineScriptTree);
  if (!info) {
    return abort(AbortReason::Alloc);
  }

  MIRGraphReturns returns(alloc());
  AutoAccumulateReturns aar(graph(), returns);

  // Build the graph.
  IonBuilder inlineBuilder(analysisContext, mirGen_, info, constraints(),
                           &inspector, nullptr, inliningDepth_ + 1, loopDepth_);
  AbortReasonOr<Ok> result =
      inlineBuilder.buildInline(this, outerResumePoint, callInfo);
  if (result.isErr()) {
    if (analysisContext && analysisContext->isExceptionPending()) {
      JitSpew(JitSpew_IonAbort, "Inline builder raised exception.");
      MOZ_ASSERT(result.unwrapErr() == AbortReason::Error);
      return Err(result.unwrapErr());
    }

    // Inlining the callee failed. Mark the callee as uninlineable only if
    // the inlining was aborted for a non-exception reason.
    switch (result.unwrapErr()) {
      case AbortReason::Disable:
        calleeScript->setUninlineable();
        if (!JitOptions.disableInlineBacktracking) {
          MBasicBlock* block = backup.restore();
          if (!block) {
            return abort(AbortReason::Alloc);
          }
          setCurrent(block);
          return InliningStatus_NotInlined;
        }
        return abort(AbortReason::Inlining);

      case AbortReason::PreliminaryObjects: {
        const ObjectGroupVector& groups =
            inlineBuilder.abortedPreliminaryGroups();
        MOZ_ASSERT(!groups.empty());
        for (size_t i = 0; i < groups.length(); i++) {
          addAbortedPreliminaryGroup(groups[i]);
        }
        return Err(result.unwrapErr());
      }

      case AbortReason::Alloc:
      case AbortReason::Inlining:
      case AbortReason::Error:
        return Err(result.unwrapErr());

      case AbortReason::NoAbort:
        MOZ_CRASH("Abort with AbortReason::NoAbort");
        return abort(AbortReason::Error);
    }
  }

  if (returns.empty()) {
    // Inlining of functions that have no exit is not supported.
    calleeScript->setUninlineable();
    if (!JitOptions.disableInlineBacktracking) {
      MBasicBlock* block = backup.restore();
      if (!block) {
        return abort(AbortReason::Alloc);
      }
      setCurrent(block);
      return InliningStatus_NotInlined;
    }
    return abort(AbortReason::Inlining);
  }

  // Create return block.
  jsbytecode* postCall = GetNextPc(pc);
  MBasicBlock* returnBlock;
  MOZ_TRY_VAR(returnBlock, newBlock(current->stackDepth(), postCall));
  graph().addBlock(returnBlock);
  returnBlock->setCallerResumePoint(callerResumePoint_);

  // Inherit the slots from current and pop |fun|.
  returnBlock->inheritSlots(current);
  returnBlock->pop();

  // Accumulate return values.
  MDefinition* retvalDefn =
      patchInlinedReturns(target, callInfo, returns, returnBlock);
  if (!retvalDefn) {
    return abort(AbortReason::Alloc);
  }
  returnBlock->push(retvalDefn);

  // Initialize entry slots now that the stack has been fixed up.
  if (!returnBlock->initEntrySlots(alloc())) {
    return abort(AbortReason::Alloc);
  }

  MOZ_TRY(setCurrentAndSpecializePhis(returnBlock));

  return InliningStatus_Inlined;
}

MDefinition* IonBuilder::patchInlinedReturn(JSFunction* target,
                                            CallInfo& callInfo,
                                            MBasicBlock* exit,
                                            MBasicBlock* bottom) {
  // Replaces the MReturn in the exit block with an MGoto.
  MDefinition* rdef = exit->lastIns()->toReturn()->input();
  exit->discardLastIns();

  // Constructors must be patched by the caller to always return an object.
  if (callInfo.constructing()) {
    if (target->isDerivedClassConstructor()) {
      // Derived class constructors contain extra bytecode to ensure an object
      // is always returned, so no additional patching is needed.
    } else if (rdef->type() == MIRType::Value) {
      // Unknown return: dynamically detect objects.
      MReturnFromCtor* filter =
          MReturnFromCtor::New(alloc(), rdef, callInfo.thisArg());
      exit->add(filter);
      rdef = filter;
    } else if (rdef->type() != MIRType::Object) {
      // Known non-object return: force |this|.
      rdef = callInfo.thisArg();
    }
  } else if (callInfo.isSetter()) {
    // Setters return their argument, not whatever value is returned.
    rdef = callInfo.getArg(0);
  }

  if (!callInfo.isSetter()) {
    rdef = specializeInlinedReturn(rdef, exit);
  }

  MGoto* replacement = MGoto::New(alloc(), bottom);
  exit->end(replacement);
  if (!bottom->addPredecessorWithoutPhis(exit)) {
    return nullptr;
  }

  return rdef;
}

MDefinition* IonBuilder::specializeInlinedReturn(MDefinition* rdef,
                                                 MBasicBlock* exit) {
  // Remove types from the return definition that weren't observed.
  TemporaryTypeSet* types = bytecodeTypes(pc);

  // The observed typeset doesn't contain extra information.
  if (types->empty() || types->unknown()) {
    return rdef;
  }

  // Decide if specializing is needed using the result typeset if available,
  // else use the result type.

  if (rdef->resultTypeSet()) {
    // Don't specialize if return typeset is a subset of the
    // observed typeset. The return typeset is already more specific.
    if (rdef->resultTypeSet()->isSubset(types)) {
      return rdef;
    }
  } else {
    MIRType observedType = types->getKnownMIRType();

    // Don't specialize if type is MIRType::Float32 and TI reports
    // MIRType::Double. Float is more specific than double.
    if (observedType == MIRType::Double && rdef->type() == MIRType::Float32) {
      return rdef;
    }

    // Don't specialize if types are inaccordance, except for MIRType::Value
    // and MIRType::Object (when not unknown object), since the typeset
    // contains more specific information.
    if (observedType == rdef->type() && observedType != MIRType::Value &&
        (observedType != MIRType::Object || types->unknownObject())) {
      return rdef;
    }
  }

  setCurrent(exit);

  MTypeBarrier* barrier = nullptr;
  rdef = addTypeBarrier(rdef, types, BarrierKind::TypeSet, &barrier);
  if (barrier) {
    barrier->setNotMovable();
  }

  return rdef;
}

MDefinition* IonBuilder::patchInlinedReturns(JSFunction* target,
                                             CallInfo& callInfo,
                                             MIRGraphReturns& returns,
                                             MBasicBlock* bottom) {
  // Replaces MReturns with MGotos, returning the MDefinition
  // representing the return value, or nullptr.
  MOZ_ASSERT(returns.length() > 0);

  if (returns.length() == 1) {
    return patchInlinedReturn(target, callInfo, returns[0], bottom);
  }

  // Accumulate multiple returns with a phi.
  MPhi* phi = MPhi::New(alloc());
  if (!phi->reserveLength(returns.length())) {
    return nullptr;
  }

  for (size_t i = 0; i < returns.length(); i++) {
    MDefinition* rdef =
        patchInlinedReturn(target, callInfo, returns[i], bottom);
    if (!rdef) {
      return nullptr;
    }
    phi->addInput(rdef);
  }

  bottom->addPhi(phi);
  return phi;
}

IonBuilder::InliningDecision IonBuilder::makeInliningDecision(
    JSObject* targetArg, CallInfo& callInfo) {
  // When there is no target, inlining is impossible.
  if (targetArg == nullptr) {
    return InliningDecision_DontInline;
  }

  // Inlining non-function targets is handled by inlineNonFunctionCall().
  if (!targetArg->is<JSFunction>()) {
    return InliningDecision_Inline;
  }

  JSFunction* target = &targetArg->as<JSFunction>();

  // Never inline during the arguments usage analysis.
  if (info().analysisMode() == Analysis_ArgumentsUsage) {
    return InliningDecision_DontInline;
  }

  // Native functions provide their own detection in inlineNativeCall().
  if (target->isNative()) {
    return InliningDecision_Inline;
  }

  // Determine whether inlining is possible at callee site
  InliningDecision decision = canInlineTarget(target, callInfo);
  if (decision != InliningDecision_Inline) {
    return decision;
  }

  // Heuristics!
  JSScript* targetScript = target->nonLazyScript();

  // Callee must not be excessively large.
  // This heuristic also applies to the callsite as a whole.
  bool offThread = mirGen_.options.offThreadCompilationAvailable();
  if (targetScript->length() >
      optimizationInfo().inlineMaxBytecodePerCallSite(offThread)) {
    return DontInline(targetScript, "Vetoed: callee excessively large");
  }

  // Callee must have been called a few times to have somewhat stable
  // type information, except for definite properties analysis,
  // as the caller has not run yet.
  if (targetScript->getWarmUpCount() <
          optimizationInfo().inliningWarmUpThreshold() &&
      !targetScript->jitScript()->ionCompiledOrInlined() &&
      info().analysisMode() != Analysis_DefiniteProperties) {
    JitSpew(JitSpew_Inlining,
            "Cannot inline %s:%u:%u: callee is insufficiently hot.",
            targetScript->filename(), targetScript->lineno(),
            targetScript->column());
    return InliningDecision_WarmUpCountTooLow;
  }

  // Don't inline if the callee is known to inline a lot of code, to avoid
  // huge MIR graphs.
  uint32_t inlinedBytecodeLength =
      targetScript->jitScript()->inlinedBytecodeLength();
  if (inlinedBytecodeLength >
      optimizationInfo().inlineMaxCalleeInlinedBytecodeLength()) {
    return DontInline(targetScript,
                      "Vetoed: callee inlinedBytecodeLength is too big");
  }

  IonBuilder* outerBuilder = outermostBuilder();

  // Cap the total bytecode length we inline under a single script, to avoid
  // excessive inlining in pathological cases.
  size_t totalBytecodeLength =
      outerBuilder->inlinedBytecodeLength_ + targetScript->length();
  if (totalBytecodeLength > optimizationInfo().inlineMaxTotalBytecodeLength()) {
    return DontInline(targetScript,
                      "Vetoed: exceeding max total bytecode length");
  }

  // Cap the inlining depth.

  uint32_t maxInlineDepth;
  if (JitOptions.isSmallFunction(targetScript)) {
    maxInlineDepth = optimizationInfo().smallFunctionMaxInlineDepth();
  } else {
    maxInlineDepth = optimizationInfo().maxInlineDepth();

    // Caller must not be excessively large.
    if (script()->length() >=
        optimizationInfo().inliningMaxCallerBytecodeLength()) {
      return DontInline(targetScript, "Vetoed: caller excessively large");
    }
  }

  JitScript* outerJitScript = outermostBuilder()->script()->jitScript();
  if (inliningDepth_ >= maxInlineDepth) {
    // We hit the depth limit and won't inline this function. Give the
    // outermost script a max inlining depth of 0, so that it won't be
    // inlined in other scripts. This heuristic is currently only used
    // when we're inlining scripts with loops, see the comment below.
    // These heuristics only apply to the highest optimization level.
    if (isHighestOptimizationLevel()) {
      outerJitScript->setMaxInliningDepth(0);
    }

    return DontInline(targetScript, "Vetoed: exceeding allowed inline depth");
  }

  // Inlining functions with loops can be complicated. For instance, if we're
  // close to the inlining depth limit and we inline the function f below, we
  // can no longer inline the call to g:
  //
  //   function f() {
  //      while (cond) {
  //          g();
  //      }
  //   }
  //
  // If the loop has many iterations, it's more efficient to call f and inline
  // g in f.
  //
  // To avoid this problem, we record a separate max inlining depth for each
  // script, indicating at which depth we won't be able to inline all functions
  // we inlined this time. This solves the issue above, because we will only
  // inline f if it means we can also inline g.
  //
  // These heuristics only apply to the highest optimization level: other tiers
  // do very little inlining and performance is not as much of a concern there.
  if (isHighestOptimizationLevel() && targetScript->hasLoops() &&
      inliningDepth_ >= targetScript->jitScript()->maxInliningDepth()) {
    return DontInline(targetScript,
                      "Vetoed: exceeding allowed script inline depth");
  }

  // Update the max depth at which we can inline the outer script.
  MOZ_ASSERT(maxInlineDepth > inliningDepth_);
  uint32_t scriptInlineDepth = maxInlineDepth - inliningDepth_ - 1;
  if (scriptInlineDepth < outerJitScript->maxInliningDepth() &&
      isHighestOptimizationLevel()) {
    outerJitScript->setMaxInliningDepth(scriptInlineDepth);
  }

  // End of heuristics, we will inline this function.

  outerBuilder->inlinedBytecodeLength_ += targetScript->length();

  return InliningDecision_Inline;
}

AbortReasonOr<Ok> IonBuilder::selectInliningTargets(
    const InliningTargets& targets, CallInfo& callInfo, BoolVector& choiceSet,
    uint32_t* numInlineable) {
  *numInlineable = 0;
  uint32_t totalSize = 0;

  // For each target, ask whether it may be inlined.
  if (!choiceSet.reserve(targets.length())) {
    return abort(AbortReason::Alloc);
  }

  // Don't inline polymorphic sites during the definite properties analysis.
  // AddClearDefiniteFunctionUsesInScript depends on this for correctness.
  if (info().analysisMode() == Analysis_DefiniteProperties &&
      targets.length() > 1) {
    return Ok();
  }

  for (size_t i = 0; i < targets.length(); i++) {
    JSObject* target = targets[i].target;

    bool inlineable;
    InliningDecision decision = makeInliningDecision(target, callInfo);
    switch (decision) {
      case InliningDecision_Error:
        return abort(AbortReason::Error);
      case InliningDecision_DontInline:
      case InliningDecision_WarmUpCountTooLow:
        inlineable = false;
        break;
      case InliningDecision_Inline:
        inlineable = true;
        break;
      default:
        MOZ_CRASH("Unhandled InliningDecision value!");
    }

    if (target->is<JSFunction>()) {
      // Enforce a maximum inlined bytecode limit at the callsite.
      if (inlineable && target->as<JSFunction>().isInterpreted()) {
        totalSize += target->as<JSFunction>().nonLazyScript()->length();
        bool offThread = mirGen_.options.offThreadCompilationAvailable();
        if (totalSize >
            optimizationInfo().inlineMaxBytecodePerCallSite(offThread)) {
          inlineable = false;
        }
      }
    } else {
      // Non-function targets are not supported by polymorphic inlining.
      inlineable = false;
    }

    // Only use a group guard and inline the target if we will recompile when
    // the target function gets a new group.
    if (inlineable && targets[i].group) {
      ObjectGroup* group = targets[i].group;
      TypeSet::ObjectKey* key = TypeSet::ObjectKey::get(group);
      if (!key->hasStableClassAndProto(constraints())) {
        inlineable = false;
      }
    }

    choiceSet.infallibleAppend(inlineable);
    if (inlineable) {
      *numInlineable += 1;
    }
  }

  MOZ_ASSERT(choiceSet.length() == targets.length());
  return Ok();
}

static bool CanInlineGetPropertyCache(MGetPropertyCache* cache,
                                      MDefinition* thisDef) {
  if (cache->value()->type() != MIRType::Object) {
    return false;
  }

  if (cache->value() != thisDef) {
    return false;
  }

  InlinePropertyTable* table = cache->propTable();
  if (!table) {
    return false;
  }
  if (table->numEntries() == 0) {
    return false;
  }
  return true;
}

class WrapMGetPropertyCache {
  MGetPropertyCache* cache_;

 private:
  void discardPriorResumePoint() {
    if (!cache_) {
      return;
    }

    InlinePropertyTable* propTable = cache_->propTable();
    if (!propTable) {
      return;
    }
    MResumePoint* rp = propTable->takePriorResumePoint();
    if (!rp) {
      return;
    }
    cache_->block()->discardPreAllocatedResumePoint(rp);
  }

 public:
  explicit WrapMGetPropertyCache(MGetPropertyCache* cache) : cache_(cache) {}

  ~WrapMGetPropertyCache() { discardPriorResumePoint(); }

  MGetPropertyCache* get() { return cache_; }
  MGetPropertyCache* operator->() { return get(); }

  // This function returns the cache given to the constructor if the
  // GetPropertyCache can be moved into the ObjectGroup fallback path.
  MGetPropertyCache* moveableCache(bool hasTypeBarrier, MDefinition* thisDef) {
    // If we have unhandled uses of the MGetPropertyCache, then we cannot
    // move it to the ObjectGroup fallback path.
    if (!hasTypeBarrier) {
      if (cache_->hasUses()) {
        return nullptr;
      }
    } else {
      // There is the TypeBarrier consumer, so we check that this is the
      // only consumer.
      MOZ_ASSERT(cache_->hasUses());
      if (!cache_->hasOneUse()) {
        return nullptr;
      }
    }

    // If the this-object is not identical to the object of the
    // MGetPropertyCache, then we cannot use the InlinePropertyTable, or if
    // we do not yet have enough information from the ObjectGroup.
    if (!CanInlineGetPropertyCache(cache_, thisDef)) {
      return nullptr;
    }

    MGetPropertyCache* ret = cache_;
    cache_ = nullptr;
    return ret;
  }
};

MGetPropertyCache* IonBuilder::getInlineableGetPropertyCache(
    CallInfo& callInfo) {
  if (callInfo.constructing()) {
    return nullptr;
  }

  MDefinition* thisDef = callInfo.thisArg();
  if (thisDef->type() != MIRType::Object) {
    return nullptr;
  }

  MDefinition* funcDef = callInfo.fun();
  if (funcDef->type() != MIRType::Object) {
    return nullptr;
  }

  // MGetPropertyCache with no uses may be optimized away.
  if (funcDef->isGetPropertyCache()) {
    WrapMGetPropertyCache cache(funcDef->toGetPropertyCache());
    return cache.moveableCache(/* hasTypeBarrier = */ false, thisDef);
  }

  // Optimize away the following common pattern:
  // MTypeBarrier[MIRType::Object] <- MGetPropertyCache
  if (funcDef->isTypeBarrier()) {
    MTypeBarrier* barrier = funcDef->toTypeBarrier();
    if (barrier->hasUses()) {
      return nullptr;
    }
    if (barrier->type() != MIRType::Object) {
      return nullptr;
    }
    if (!barrier->input()->isGetPropertyCache()) {
      return nullptr;
    }

    WrapMGetPropertyCache cache(barrier->input()->toGetPropertyCache());
    return cache.moveableCache(/* hasTypeBarrier = */ true, thisDef);
  }

  return nullptr;
}

IonBuilder::InliningResult IonBuilder::inlineSingleCall(CallInfo& callInfo,
                                                        JSObject* targetArg) {
  InliningStatus status;
  if (!targetArg->is<JSFunction>()) {
    return InliningStatus_NotInlined;
  }

  JSFunction* target = &targetArg->as<JSFunction>();
  if (target->isNative()) {
    MOZ_TRY_VAR(status, inlineNativeCall(callInfo, target));
    return status;
  }

  return inlineScriptedCall(callInfo, target);
}

IonBuilder::InliningResult IonBuilder::inlineCallsite(
    const InliningTargets& targets, CallInfo& callInfo) {
  if (targets.empty()) {
    return InliningStatus_NotInlined;
  }

  // Is the function provided by an MGetPropertyCache?
  // If so, the cache may be movable to a fallback path, with a dispatch
  // instruction guarding on the incoming ObjectGroup.
  WrapMGetPropertyCache propCache(getInlineableGetPropertyCache(callInfo));
  keepFallbackFunctionGetter(propCache.get());

  // Inline single targets -- unless they derive from a cache, in which case
  // avoiding the cache and guarding is still faster.
  if (!propCache.get() && targets.length() == 1) {
    JSObject* target = targets[0].target;

    InliningDecision decision = makeInliningDecision(target, callInfo);
    switch (decision) {
      case InliningDecision_Error:
        return abort(AbortReason::Error);
      case InliningDecision_DontInline:
        return InliningStatus_NotInlined;
      case InliningDecision_WarmUpCountTooLow:
        return InliningStatus_WarmUpCountTooLow;
      case InliningDecision_Inline:
        break;
    }

    // Inlining will elminate uses of the original callee, but it needs to
    // be preserved in phis if we bail out.  Mark the old callee definition as
    // implicitly used to ensure this happens.
    callInfo.fun()->setImplicitlyUsedUnchecked();

    // If the callee is not going to be a lambda (which may vary across
    // different invocations), then the callee definition can be replaced by a
    // constant.
    if (target->isSingleton()) {
      // Replace the function with an MConstant.
      MConstant* constFun = constant(ObjectValue(*target));
      if (callInfo.constructing() &&
          callInfo.getNewTarget() == callInfo.fun()) {
        callInfo.setNewTarget(constFun);
      }
      callInfo.setFun(constFun);
    }

    return inlineSingleCall(callInfo, target);
  }

  // Choose a subset of the targets for polymorphic inlining.
  BoolVector choiceSet(alloc());
  uint32_t numInlined;
  MOZ_TRY(selectInliningTargets(targets, callInfo, choiceSet, &numInlined));
  if (numInlined == 0) {
    return InliningStatus_NotInlined;
  }

  // Perform a polymorphic dispatch.
  MOZ_TRY(inlineCalls(callInfo, targets, choiceSet, propCache.get()));

  return InliningStatus_Inlined;
}

AbortReasonOr<Ok> IonBuilder::inlineGenericFallback(
    const Maybe<CallTargets>& targets, CallInfo& callInfo,
    MBasicBlock* dispatchBlock) {
  // Generate a new block with all arguments on-stack.
  MBasicBlock* fallbackBlock;
  MOZ_TRY_VAR(fallbackBlock, newBlock(dispatchBlock, pc));
  graph().addBlock(fallbackBlock);

  // Create a new CallInfo to track modified state within this block.
  CallInfo fallbackInfo(alloc(), pc, callInfo.constructing(),
                        callInfo.ignoresReturnValue());
  if (!fallbackInfo.init(callInfo)) {
    return abort(AbortReason::Alloc);
  }
  fallbackInfo.popCallStack(fallbackBlock);

  // Generate an MCall, which uses stateful |current|.
  MOZ_TRY(setCurrentAndSpecializePhis(fallbackBlock));
  MOZ_TRY(makeCall(targets, fallbackInfo));

  // Pass return block to caller as |current|.
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::inlineObjectGroupFallback(
    const Maybe<CallTargets>& targets, CallInfo& callInfo,
    MBasicBlock* dispatchBlock, MObjectGroupDispatch* dispatch,
    MGetPropertyCache* cache, MBasicBlock** fallbackTarget) {
  // Getting here implies the following:
  // 1. The call function is an MGetPropertyCache, or an MGetPropertyCache
  //    followed by an MTypeBarrier.
  MOZ_ASSERT(callInfo.fun()->isGetPropertyCache() ||
             callInfo.fun()->isTypeBarrier());

  // 2. The MGetPropertyCache has inlineable cases by guarding on the
  // ObjectGroup.
  MOZ_ASSERT(dispatch->numCases() > 0);

  // 3. The MGetPropertyCache (and, if applicable, MTypeBarrier) only
  //    have at most a single use.
  MOZ_ASSERT_IF(callInfo.fun()->isGetPropertyCache(), !cache->hasUses());
  MOZ_ASSERT_IF(callInfo.fun()->isTypeBarrier(), cache->hasOneUse());

  // This means that no resume points yet capture the MGetPropertyCache,
  // so everything from the MGetPropertyCache up until the call is movable.
  // We now move the MGetPropertyCache and friends into a fallback path.
  MOZ_ASSERT(cache->idempotent());

  // Create a new CallInfo to track modified state within the fallback path.
  CallInfo fallbackInfo(alloc(), pc, callInfo.constructing(),
                        callInfo.ignoresReturnValue());
  if (!fallbackInfo.init(callInfo)) {
    return abort(AbortReason::Alloc);
  }

  // Capture stack prior to the call operation. This captures the function.
  MResumePoint* preCallResumePoint =
      MResumePoint::New(alloc(), dispatchBlock, pc, MResumePoint::ResumeAt);
  if (!preCallResumePoint) {
    return abort(AbortReason::Alloc);
  }

  DebugOnly<size_t> preCallFuncIndex =
      preCallResumePoint->stackDepth() - callInfo.numFormals();
  MOZ_ASSERT(preCallResumePoint->getOperand(preCallFuncIndex) ==
             fallbackInfo.fun());

  // In the dispatch block, replace the function's slot entry with Undefined.
  MConstant* undefined = MConstant::New(alloc(), UndefinedValue());
  dispatchBlock->add(undefined);
  dispatchBlock->rewriteAtDepth(-int(callInfo.numFormals()), undefined);

  // Construct a block that does nothing but remove formals from the stack.
  // This is effectively changing the entry resume point of the later fallback
  // block.
  MBasicBlock* prepBlock;
  MOZ_TRY_VAR(prepBlock, newBlock(dispatchBlock, pc));
  graph().addBlock(prepBlock);
  fallbackInfo.popCallStack(prepBlock);

  // Construct a block into which the MGetPropertyCache can be moved.
  // This is subtle: the pc and resume point are those of the MGetPropertyCache!
  InlinePropertyTable* propTable = cache->propTable();
  MResumePoint* priorResumePoint = propTable->takePriorResumePoint();
  MOZ_ASSERT(propTable->pc() != nullptr);
  MOZ_ASSERT(priorResumePoint != nullptr);
  MBasicBlock* getPropBlock;
  MOZ_TRY_VAR(getPropBlock,
              newBlock(prepBlock, propTable->pc(), priorResumePoint));
  graph().addBlock(getPropBlock);

  prepBlock->end(MGoto::New(alloc(), getPropBlock));

  // Since the getPropBlock inherited the stack from right before the
  // MGetPropertyCache, the target of the MGetPropertyCache is still on the
  // stack.
  DebugOnly<MDefinition*> checkObject = getPropBlock->pop();
  MOZ_ASSERT(checkObject == cache->value());

  // Move the MGetPropertyCache and friends into the getPropBlock.
  if (fallbackInfo.fun()->isGetPropertyCache()) {
    MOZ_ASSERT(fallbackInfo.fun()->toGetPropertyCache() == cache);
    getPropBlock->addFromElsewhere(cache);
    getPropBlock->push(cache);
  } else {
    MTypeBarrier* barrier = callInfo.fun()->toTypeBarrier();
    MOZ_ASSERT(barrier->type() == MIRType::Object);
    MOZ_ASSERT(barrier->input()->isGetPropertyCache());
    MOZ_ASSERT(barrier->input()->toGetPropertyCache() == cache);

    getPropBlock->addFromElsewhere(cache);
    getPropBlock->addFromElsewhere(barrier);
    getPropBlock->push(barrier);
  }

  // Construct an end block with the correct resume point.
  MBasicBlock* preCallBlock;
  MOZ_TRY_VAR(preCallBlock, newBlock(getPropBlock, pc, preCallResumePoint));
  graph().addBlock(preCallBlock);
  getPropBlock->end(MGoto::New(alloc(), preCallBlock));

  // Now inline the MCallGeneric, using preCallBlock as the dispatch point.
  MOZ_TRY(inlineGenericFallback(targets, fallbackInfo, preCallBlock));

  // inlineGenericFallback() set the return block as |current|.
  preCallBlock->end(MGoto::New(alloc(), current));
  *fallbackTarget = prepBlock;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::inlineCalls(CallInfo& callInfo,
                                          const InliningTargets& targets,
                                          BoolVector& choiceSet,
                                          MGetPropertyCache* maybeCache) {
  // Only handle polymorphic inlining.
  MOZ_ASSERT(IsIonInlinableOp(JSOp(*pc)));
  MOZ_ASSERT(choiceSet.length() == targets.length());
  MOZ_ASSERT_IF(!maybeCache, targets.length() >= 2);
  MOZ_ASSERT_IF(maybeCache, targets.length() >= 1);
  MOZ_ASSERT_IF(maybeCache, maybeCache->value()->type() == MIRType::Object);

  MBasicBlock* dispatchBlock = current;
  callInfo.setImplicitlyUsedUnchecked();
  MOZ_TRY(callInfo.pushCallStack(&mirGen_, dispatchBlock));

  // Patch any InlinePropertyTable to only contain functions that are
  // inlineable. The InlinePropertyTable will also be patched at the end to
  // exclude native functions that vetoed inlining.
  if (maybeCache) {
    InlinePropertyTable* propTable = maybeCache->propTable();
    propTable->trimToTargets(targets);
    if (propTable->numEntries() == 0) {
      maybeCache = nullptr;
    }
  }

  // Generate a dispatch based on guard kind.
  MDispatchInstruction* dispatch;
  if (maybeCache) {
    dispatch = MObjectGroupDispatch::New(alloc(), maybeCache->value(),
                                         maybeCache->propTable());
    callInfo.fun()->setImplicitlyUsedUnchecked();
  } else {
    dispatch = MFunctionDispatch::New(alloc(), callInfo.fun());
  }

  MOZ_ASSERT(dispatchBlock->stackDepth() >= callInfo.numFormals());
  uint32_t stackDepth = dispatchBlock->stackDepth() - callInfo.numFormals() + 1;

  // Generate a return block to host the rval-collecting MPhi.
  jsbytecode* postCall = GetNextPc(pc);
  MBasicBlock* returnBlock;
  MOZ_TRY_VAR(returnBlock, newBlock(stackDepth, postCall));
  graph().addBlock(returnBlock);
  returnBlock->setCallerResumePoint(callerResumePoint_);

  // Set up stack, used to manually create a post-call resume point.
  returnBlock->inheritSlots(dispatchBlock);
  callInfo.popCallStack(returnBlock);

  MPhi* retPhi = MPhi::New(alloc());
  returnBlock->addPhi(retPhi);
  returnBlock->push(retPhi);

  // Create a resume point from current stack state.
  if (!returnBlock->initEntrySlots(alloc())) {
    return abort(AbortReason::Alloc);
  }

  // Reserve the capacity for the phi.
  // Note: this is an upperbound. Unreachable targets and uninlineable natives
  // are also counted.
  uint32_t count = 1;  // Possible fallback block.
  for (uint32_t i = 0; i < targets.length(); i++) {
    if (choiceSet[i]) {
      count++;
    }
  }
  if (!retPhi->reserveLength(count)) {
    return abort(AbortReason::Alloc);
  }

  // Inline each of the inlineable targets.
  for (uint32_t i = 0; i < targets.length(); i++) {
    // Target must be inlineable.
    if (!choiceSet[i]) {
      continue;
    }

    // Target must be reachable by the MDispatchInstruction.
    JSFunction* target = &targets[i].target->as<JSFunction>();
    if (maybeCache && !maybeCache->propTable()->hasFunction(target)) {
      choiceSet[i] = false;
      continue;
    }

    MBasicBlock* inlineBlock;
    MOZ_TRY_VAR(inlineBlock, newBlock(dispatchBlock, pc));
    graph().addBlock(inlineBlock);

    // Create a function MConstant to use in the entry ResumePoint. If we
    // can't use a constant, add a no-op MPolyInlineGuard, to prevent
    // hoisting env chain gets above the dispatch instruction.
    MInstruction* funcDef;
    if (target->isSingleton()) {
      funcDef = MConstant::New(alloc(), ObjectValue(*target), constraints());
    } else {
      funcDef = MPolyInlineGuard::New(alloc(), callInfo.fun());
    }

    funcDef->setImplicitlyUsedUnchecked();
    dispatchBlock->add(funcDef);

    // Use the inlined callee in the inline resume point and on stack.
    int funIndex =
        inlineBlock->entryResumePoint()->stackDepth() - callInfo.numFormals();
    inlineBlock->entryResumePoint()->replaceOperand(funIndex, funcDef);
    inlineBlock->rewriteSlot(funIndex, funcDef);

    // Create a new CallInfo to track modified state within the inline block.
    CallInfo inlineInfo(alloc(), pc, callInfo.constructing(),
                        callInfo.ignoresReturnValue());
    if (!inlineInfo.init(callInfo)) {
      return abort(AbortReason::Alloc);
    }
    inlineInfo.popCallStack(inlineBlock);
    inlineInfo.setFun(funcDef);

    if (callInfo.constructing() && callInfo.getNewTarget() == callInfo.fun()) {
      inlineInfo.setNewTarget(funcDef);
    }

    if (maybeCache) {
      // Assign the 'this' value a TypeSet specialized to the groups that
      // can generate this inlining target.
      MOZ_ASSERT(callInfo.thisArg() == maybeCache->value());
      TemporaryTypeSet* thisTypes =
          maybeCache->propTable()->buildTypeSetForFunction(alloc(), target);
      if (!thisTypes) {
        return abort(AbortReason::Alloc);
      }

      MFilterTypeSet* filter =
          MFilterTypeSet::New(alloc(), inlineInfo.thisArg(), thisTypes);
      inlineBlock->add(filter);
      inlineInfo.setThis(filter);
    }

    // Inline the call into the inlineBlock.
    MOZ_TRY(setCurrentAndSpecializePhis(inlineBlock));
    InliningStatus status;
    MOZ_TRY_VAR(status, inlineSingleCall(inlineInfo, target));

    // Natives may veto inlining.
    if (status == InliningStatus_NotInlined) {
      MOZ_ASSERT(current == inlineBlock);
      graph().removeBlock(inlineBlock);
      choiceSet[i] = false;
      continue;
    }

    // inlineSingleCall() changed |current| to the inline return block.
    MBasicBlock* inlineReturnBlock = current;
    setCurrent(dispatchBlock);

    // Connect the inline path to the returnBlock.
    if (!dispatch->addCase(target, targets[i].group, inlineBlock)) {
      return abort(AbortReason::Alloc);
    }

    MDefinition* retVal = inlineReturnBlock->peek(-1);
    retPhi->addInput(retVal);
    inlineReturnBlock->end(MGoto::New(alloc(), returnBlock));
    if (!returnBlock->addPredecessorWithoutPhis(inlineReturnBlock)) {
      return abort(AbortReason::Alloc);
    }
  }

  // Patch the InlinePropertyTable to not dispatch to vetoed paths.
  bool useFallback;
  if (maybeCache) {
    InlinePropertyTable* propTable = maybeCache->propTable();
    propTable->trimTo(targets, choiceSet);

    if (propTable->numEntries() == 0 || !propTable->hasPriorResumePoint()) {
      // Output a generic fallback path.
      MOZ_ASSERT_IF(propTable->numEntries() == 0, dispatch->numCases() == 0);
      maybeCache = nullptr;
      useFallback = true;
    } else {
      // We need a fallback path if the ObjectGroup dispatch does not
      // handle all incoming objects.
      useFallback = false;
      TemporaryTypeSet* objectTypes = maybeCache->value()->resultTypeSet();
      for (uint32_t i = 0; i < objectTypes->getObjectCount(); i++) {
        TypeSet::ObjectKey* obj = objectTypes->getObject(i);
        if (!obj) {
          continue;
        }

        if (!obj->isGroup()) {
          useFallback = true;
          break;
        }

        if (!propTable->hasObjectGroup(obj->group())) {
          useFallback = true;
          break;
        }
      }

      if (!useFallback) {
        // The object group dispatch handles all possible incoming
        // objects, so the cache and barrier will not be reached and
        // can be eliminated.
        if (callInfo.fun()->isGetPropertyCache()) {
          MOZ_ASSERT(callInfo.fun() == maybeCache);
        } else {
          MTypeBarrier* barrier = callInfo.fun()->toTypeBarrier();
          MOZ_ASSERT(!barrier->hasUses());
          MOZ_ASSERT(barrier->type() == MIRType::Object);
          MOZ_ASSERT(barrier->input()->isGetPropertyCache());
          MOZ_ASSERT(barrier->input()->toGetPropertyCache() == maybeCache);
          barrier->block()->discard(barrier);
        }

        MOZ_ASSERT(!maybeCache->hasUses());
        maybeCache->block()->discard(maybeCache);
      }
    }
  } else {
    useFallback = dispatch->numCases() < targets.length();
  }

  // If necessary, generate a fallback path.
  if (useFallback) {
    // Annotate the fallback call with the target information.
    Maybe<CallTargets> remainingTargets;
    remainingTargets.emplace(alloc());
    for (uint32_t i = 0; i < targets.length(); i++) {
      if (!maybeCache && choiceSet[i]) {
        continue;
      }

      JSObject* target = targets[i].target;
      if (!target->is<JSFunction>()) {
        remainingTargets = Nothing();
        break;
      }
      if (!remainingTargets->append(&target->as<JSFunction>())) {
        return abort(AbortReason::Alloc);
      }
    }

    // Generate fallback blocks, and set |current| to the fallback return block.
    if (maybeCache) {
      MBasicBlock* fallbackTarget;
      MOZ_TRY(inlineObjectGroupFallback(
          remainingTargets, callInfo, dispatchBlock,
          dispatch->toObjectGroupDispatch(), maybeCache, &fallbackTarget));
      dispatch->addFallback(fallbackTarget);
    } else {
      MOZ_TRY(inlineGenericFallback(remainingTargets, callInfo, dispatchBlock));
      dispatch->addFallback(current);
    }

    MBasicBlock* fallbackReturnBlock = current;

    // Connect fallback case to return infrastructure.
    MDefinition* retVal = fallbackReturnBlock->peek(-1);
    retPhi->addInput(retVal);
    fallbackReturnBlock->end(MGoto::New(alloc(), returnBlock));
    if (!returnBlock->addPredecessorWithoutPhis(fallbackReturnBlock)) {
      return abort(AbortReason::Alloc);
    }
  }

  // Finally add the dispatch instruction.
  // This must be done at the end so that add() may be called above.
  dispatchBlock->end(dispatch);

  // Check the depth change: +1 for retval
  MOZ_ASSERT(returnBlock->stackDepth() ==
             dispatchBlock->stackDepth() - callInfo.numFormals() + 1);

  graph().moveBlockToEnd(returnBlock);
  return setCurrentAndSpecializePhis(returnBlock);
}

MInstruction* IonBuilder::createNamedLambdaObject(MDefinition* callee,
                                                  MDefinition* env) {
  // Get a template CallObject that we'll use to generate inline object
  // creation.
  LexicalEnvironmentObject* templateObj =
      inspector->templateNamedLambdaObject();

  // One field is added to the function to handle its name.  This cannot be a
  // dynamic slot because there is still plenty of room on the NamedLambda
  // object.
  MOZ_ASSERT(!templateObj->hasDynamicSlots());

  // Allocate the actual object. It is important that no intervening
  // instructions could potentially bailout, thus leaking the dynamic slots
  // pointer.
  MInstruction* declEnvObj = MNewNamedLambdaObject::New(alloc(), templateObj);
  current->add(declEnvObj);

  // Initialize the object's reserved slots. No post barrier is needed here:
  // the object will be allocated in the nursery if possible, and if the
  // tenured heap is used instead, a minor collection will have been performed
  // that moved env/callee to the tenured heap.
  current->add(MStoreFixedSlot::New(
      alloc(), declEnvObj, NamedLambdaObject::enclosingEnvironmentSlot(), env));
  current->add(MStoreFixedSlot::New(alloc(), declEnvObj,
                                    NamedLambdaObject::lambdaSlot(), callee));

  return declEnvObj;
}

AbortReasonOr<MInstruction*> IonBuilder::createCallObject(MDefinition* callee,
                                                          MDefinition* env) {
  // Get a template CallObject that we'll use to generate inline object
  // creation.
  CallObject* templateObj = inspector->templateCallObject();
  MConstant* templateCst =
      MConstant::NewConstraintlessObject(alloc(), templateObj);
  current->add(templateCst);

  // Allocate the object.
  MNewCallObject* callObj = MNewCallObject::New(alloc(), templateCst);
  current->add(callObj);

  // Initialize the object's reserved slots. No post barrier is needed here,
  // for the same reason as in createNamedLambdaObject.
  current->add(MStoreFixedSlot::New(
      alloc(), callObj, CallObject::enclosingEnvironmentSlot(), env));
  current->add(
      MStoreFixedSlot::New(alloc(), callObj, CallObject::calleeSlot(), callee));

  // Copy closed-over argument slots if there aren't parameter expressions.
  MSlots* slots = nullptr;
  for (PositionalFormalParameterIter fi(script()); fi; fi++) {
    if (!fi.closedOver()) {
      continue;
    }

    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }

    unsigned slot = fi.location().slot();
    unsigned formal = fi.argumentSlot();
    unsigned numFixedSlots = templateObj->numFixedSlots();
    MDefinition* param;
    if (script()->functionHasParameterExprs()) {
      param = constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
    } else {
      param = current->getSlot(info().argSlotUnchecked(formal));
    }
    if (slot >= numFixedSlots) {
      if (!slots) {
        slots = MSlots::New(alloc(), callObj);
        current->add(slots);
      }
      current->add(
          MStoreSlot::New(alloc(), slots, slot - numFixedSlots, param));
    } else {
      current->add(MStoreFixedSlot::New(alloc(), callObj, slot, param));
    }
  }

  return AbortReasonOr<MInstruction*>(callObj);
}

MDefinition* IonBuilder::createThisScripted(MDefinition* callee,
                                            MDefinition* newTarget) {
  // Get callee.prototype.
  //
  // This instruction MUST be idempotent: since it does not correspond to an
  // explicit operation in the bytecode, we cannot use resumeAfter().
  // Getters may not override |prototype| fetching, so this operation is
  // indeed idempotent.
  // - First try an idempotent property cache.
  // - Upon failing idempotent property cache, we can't use a non-idempotent
  //   cache, therefore we fallback to CallGetProperty
  //
  // Note: both CallGetProperty and GetPropertyCache can trigger a GC,
  //       and thus invalidation.
  MInstruction* getProto;
  if (!invalidatedIdempotentCache()) {
    MConstant* id = constant(StringValue(names().prototype));
    MGetPropertyCache* getPropCache =
        MGetPropertyCache::New(alloc(), newTarget, id,
                               /* monitored = */ false);
    getPropCache->setIdempotent();
    getProto = getPropCache;
  } else {
    MCallGetProperty* callGetProp =
        MCallGetProperty::New(alloc(), newTarget, names().prototype);
    callGetProp->setIdempotent();
    getProto = callGetProp;
  }
  current->add(getProto);

  // Create this from prototype
  MCreateThisWithProto* createThis =
      MCreateThisWithProto::New(alloc(), callee, newTarget, getProto);
  current->add(createThis);

  return createThis;
}

JSObject* IonBuilder::getSingletonPrototype(JSFunction* target) {
  TypeSet::ObjectKey* targetKey = TypeSet::ObjectKey::get(target);
  if (targetKey->unknownProperties()) {
    return nullptr;
  }

  jsid protoid = NameToId(names().prototype);
  HeapTypeSetKey protoProperty = targetKey->property(protoid);

  return protoProperty.singleton(constraints());
}

MDefinition* IonBuilder::createThisScriptedSingleton(JSFunction* target) {
  if (!target->hasBytecode()) {
    return nullptr;
  }

  // Get the singleton prototype (if exists)
  JSObject* proto = getSingletonPrototype(target);
  if (!proto) {
    return nullptr;
  }

  JSObject* templateObject = inspector->getTemplateObject(pc);
  if (!templateObject) {
    return nullptr;
  }
  if (!templateObject->is<PlainObject>()) {
    return nullptr;
  }
  if (templateObject->staticPrototype() != proto) {
    return nullptr;
  }
  if (templateObject->nonCCWRealm() != target->realm()) {
    return nullptr;
  }

  TypeSet::ObjectKey* templateObjectKey =
      TypeSet::ObjectKey::get(templateObject->group());
  if (templateObjectKey->hasFlags(constraints(),
                                  OBJECT_FLAG_NEW_SCRIPT_CLEARED)) {
    return nullptr;
  }

  JSScript* targetScript = target->nonLazyScript();
  JitScript* jitScript = targetScript->maybeJitScript();
  if (!jitScript) {
    return nullptr;
  }

  AutoSweepJitScript sweep(targetScript);
  StackTypeSet* thisTypes = jitScript->thisTypes(sweep, targetScript);
  if (!thisTypes->hasType(TypeSet::ObjectType(templateObject))) {
    return nullptr;
  }

  // Generate an inline path to create a new |this| object with
  // the given singleton prototype.
  MConstant* templateConst =
      MConstant::NewConstraintlessObject(alloc(), templateObject);
  MCreateThisWithTemplate* createThis = MCreateThisWithTemplate::New(
      alloc(), constraints(), templateConst,
      templateObject->group()->initialHeap(constraints()));
  current->add(templateConst);
  current->add(createThis);

  return createThis;
}

MDefinition* IonBuilder::createThisScriptedBaseline(MDefinition* callee) {
  // Try to inline |this| creation based on Baseline feedback.

  JSFunction* target = inspector->getSingleCallee(pc);
  if (!target || !target->hasBytecode()) {
    return nullptr;
  }

  if (target->constructorNeedsUninitializedThis()) {
    return nullptr;
  }

  JSObject* templateObject = inspector->getTemplateObject(pc);
  if (!templateObject) {
    return nullptr;
  }
  if (!templateObject->is<PlainObject>()) {
    return nullptr;
  }
  if (templateObject->nonCCWRealm() != target->realm()) {
    return nullptr;
  }

  Shape* shape = target->lookupPure(realm->runtime()->names().prototype);
  if (!shape || !shape->isDataProperty()) {
    return nullptr;
  }

  Value protov = target->getSlot(shape->slot());
  if (!protov.isObject()) {
    return nullptr;
  }

  JSObject* proto = checkNurseryObject(&protov.toObject());
  if (proto != templateObject->staticPrototype()) {
    return nullptr;
  }

  TypeSet::ObjectKey* templateObjectKey =
      TypeSet::ObjectKey::get(templateObject->group());
  if (templateObjectKey->hasFlags(constraints(),
                                  OBJECT_FLAG_NEW_SCRIPT_CLEARED)) {
    return nullptr;
  }

  JSScript* targetScript = target->nonLazyScript();
  JitScript* jitScript = targetScript->maybeJitScript();
  if (!jitScript) {
    return nullptr;
  }

  AutoSweepJitScript sweep(targetScript);
  StackTypeSet* thisTypes = jitScript->thisTypes(sweep, targetScript);
  if (!thisTypes->hasType(TypeSet::ObjectType(templateObject))) {
    return nullptr;
  }

  // Shape guard.
  callee = addShapeGuard(callee, target->lastProperty(), Bailout_ShapeGuard);

  // Guard callee.prototype == proto.
  MOZ_ASSERT(shape->numFixedSlots() == 0, "Must be a dynamic slot");
  MSlots* slots = MSlots::New(alloc(), callee);
  current->add(slots);
  MLoadSlot* prototype = MLoadSlot::New(alloc(), slots, shape->slot());
  current->add(prototype);
  MDefinition* protoConst = constant(ObjectValue(*proto));
  MGuardObjectIdentity* guard =
      MGuardObjectIdentity::New(alloc(), prototype, protoConst,
                                /* bailOnEquality = */ false);
  current->add(guard);

  // Generate an inline path to create a new |this| object with
  // the given prototype.
  MConstant* templateConst =
      MConstant::NewConstraintlessObject(alloc(), templateObject);
  MCreateThisWithTemplate* createThis = MCreateThisWithTemplate::New(
      alloc(), constraints(), templateConst,
      templateObject->group()->initialHeap(constraints()));
  current->add(templateConst);
  current->add(createThis);

  return createThis;
}

MDefinition* IonBuilder::createThisSlow(MDefinition* callee,
                                        MDefinition* newTarget, bool inlining) {
  // Call jit::CreateThisFromIon. This may return a NullValue for |this| that
  // LCallGeneric has to check for if we can't create |this| inline.
  MOZ_ASSERT(!inlining);
  MCreateThis* createThis = MCreateThis::New(alloc(), callee, newTarget);
  current->add(createThis);
  return createThis;
}

MDefinition* IonBuilder::createThis(JSFunction* target, MDefinition* callee,
                                    MDefinition* newTarget, bool inlining) {
  // getPolyCallTargets ensures |target| is a constructor.
  MOZ_ASSERT_IF(target, target->isConstructor());

  // Only asm.js natives can be constructors and asm.js natives don't have a
  // JIT entry.
  MOZ_ASSERT_IF(target, !target->isNativeWithJitEntry());

  // Can't inline without a known target function.
  MOZ_ASSERT_IF(inlining, target);

  // Create |this| for unknown target.
  if (!target) {
    if (callee == newTarget) {
      if (MDefinition* createThis = createThisScriptedBaseline(callee)) {
        return createThis;
      }
    }
    return createThisSlow(callee, newTarget, inlining);
  }

  // Handle known native functions, bound functions and derived class
  // constructors. Note: proxies are already excluded since target has type
  // JSFunction.
  if (target->isNative()) {
    return constant(MagicValue(JS_IS_CONSTRUCTING));
  }
  if (target->constructorNeedsUninitializedThis()) {
    return constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
  }

  if (callee == newTarget) {
    // We must not have an effectful .prototype lookup when inlining.
    MOZ_ASSERT_IF(inlining, target->hasNonConfigurablePrototypeDataProperty());

    // Try baking in the prototype.
    if (MDefinition* createThis = createThisScriptedSingleton(target)) {
      return createThis;
    }
    if (MDefinition* createThis = createThisScriptedBaseline(callee)) {
      return createThis;
    }
  }

  // We can use createThisScripted if newTarget is known to be a function with a
  // (builtin, getter-free) .prototype property and the callee is not one of the
  // isNative/constructorNeedsUninitializedThis cases handled above.
  JSFunction* newTargetFun =
      callee == newTarget ? target
                          : getSingleCallTarget(newTarget->resultTypeSet());
  if (newTargetFun && newTargetFun->hasNonConfigurablePrototypeDataProperty()) {
    return createThisScripted(callee, newTarget);
  }

  // The .prototype lookup may be effectful, so we can't inline the call.
  MOZ_ASSERT(!inlining);
  return createThisSlow(callee, newTarget, inlining);
}

AbortReasonOr<Ok> IonBuilder::jsop_funcall(uint32_t argc) {
  // Stack for JSOp::FunCall:
  // 1:      arg0
  // ...
  // argc:   argN
  // argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position.
  // argc+2: The native 'call' function.

  int calleeDepth = -((int)argc + 2);
  int funcDepth = -((int)argc + 1);

  // If |Function.prototype.call| may be overridden, don't optimize callsite.
  TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
  JSFunction* native = getSingleCallTarget(calleeTypes);
  if (!native || !native->isNative() || native->native() != &fun_call) {
    CallInfo callInfo(alloc(), pc, /* constructing = */ false,
                      /* ignoresReturnValue = */ BytecodeIsPopped(pc));
    if (!callInfo.init(current, argc)) {
      return abort(AbortReason::Alloc);
    }
    return makeCall(native, callInfo);
  }
  current->peek(calleeDepth)->setImplicitlyUsedUnchecked();

  // Extract call target.
  TemporaryTypeSet* funTypes = current->peek(funcDepth)->resultTypeSet();
  JSFunction* target = getSingleCallTarget(funTypes);

  CallInfo callInfo(alloc(), pc, /* constructing = */ false,
                    /* ignoresReturnValue = */ BytecodeIsPopped(pc));

  // Save prior call stack in case we need to resolve during bailout
  // recovery of inner inlined function. This includes the JSFunction and the
  // 'call' native function.
  MOZ_TRY(callInfo.savePriorCallStack(&mirGen_, current, argc + 2));

  // Shimmy the slots down to remove the native 'call' function.
  current->shimmySlots(funcDepth - 1);

  bool zeroArguments = (argc == 0);

  // If no |this| argument was provided, explicitly pass Undefined.
  // Pushing is safe here, since one stack slot has been removed.
  if (zeroArguments) {
    pushConstant(UndefinedValue());
  } else {
    // |this| becomes implicit in the call.
    argc -= 1;
  }

  if (!callInfo.init(current, argc)) {
    return abort(AbortReason::Alloc);
  }

  // Try to inline the call.
  if (!zeroArguments) {
    InliningDecision decision = makeInliningDecision(target, callInfo);
    switch (decision) {
      case InliningDecision_Error:
        return abort(AbortReason::Error);
      case InliningDecision_DontInline:
      case InliningDecision_WarmUpCountTooLow:
        break;
      case InliningDecision_Inline: {
        InliningStatus status;
        MOZ_TRY_VAR(status, inlineSingleCall(callInfo, target));
        if (status == InliningStatus_Inlined) {
          return Ok();
        }
        break;
      }
    }
  }

  // Call without inlining.
  return makeCall(target, callInfo);
}

AbortReasonOr<Ok> IonBuilder::jsop_funapply(uint32_t argc) {
  int calleeDepth = -((int)argc + 2);

  TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
  JSFunction* native = getSingleCallTarget(calleeTypes);
  if (argc != 2 || info().analysisMode() == Analysis_ArgumentsUsage) {
    CallInfo callInfo(alloc(), pc, /* constructing = */ false,
                      /* ignoresReturnValue = */ BytecodeIsPopped(pc));
    if (!callInfo.init(current, argc)) {
      return abort(AbortReason::Alloc);
    }
    return makeCall(native, callInfo);
  }

  // Disable compilation if the second argument to |apply| cannot be guaranteed
  // to be either definitely |arguments| or definitely not |arguments|.
  MDefinition* argument = current->peek(-1);
  if (script()->argumentsHasVarBinding() &&
      argument->mightBeType(MIRType::MagicOptimizedArguments) &&
      argument->type() != MIRType::MagicOptimizedArguments) {
    return abort(AbortReason::Disable, "fun.apply with MaybeArguments");
  }

  // Fallback to regular call if arg 2 is not definitely |arguments|.
  if (argument->type() != MIRType::MagicOptimizedArguments) {
    // Optimize fun.apply(self, array) if the length is sane and there are no
    // holes.
    TemporaryTypeSet* objTypes = argument->resultTypeSet();
    if (native && native->isNative() && native->native() == fun_apply &&
        objTypes &&
        objTypes->getKnownClass(constraints()) == &ArrayObject::class_ &&
        !objTypes->hasObjectFlags(constraints(), OBJECT_FLAG_LENGTH_OVERFLOW) &&
        ElementAccessIsPacked(constraints(), argument)) {
      return jsop_funapplyarray(argc);
    }

    CallInfo callInfo(alloc(), pc, /* constructing = */ false,
                      /* ignoresReturnValue = */ BytecodeIsPopped(pc));
    if (!callInfo.init(current, argc)) {
      return abort(AbortReason::Alloc);
    }
    return makeCall(native, callInfo);
  }

  if ((!native || !native->isNative() || native->native() != fun_apply) &&
      info().analysisMode() != Analysis_DefiniteProperties) {
    return abort(AbortReason::Disable, "fun.apply speculation failed");
  }

  // Use funapply that definitely uses |arguments|
  return jsop_funapplyarguments(argc);
}

static void AssertSpreadArgIsArray(MDefinition* argument,
                                   CompilerConstraintList* constraints) {
#ifdef DEBUG
  // If we know class, ensure it is what we expected
  if (TemporaryTypeSet* objTypes = argument->resultTypeSet()) {
    if (const JSClass* clasp = objTypes->getKnownClass(constraints)) {
      MOZ_ASSERT(clasp == &ArrayObject::class_);
    }
  }
#endif
}

AbortReasonOr<Ok> IonBuilder::jsop_spreadcall() {
  MDefinition* argArr = current->pop();
  MDefinition* argThis = current->pop();
  MDefinition* argFunc = current->pop();

  // The arguments array is constructed by a JSOp::NewArray and not
  // leaked to user. The complications of spread call iterator behaviour are
  // handled when the user objects are expanded and copied into this hidden
  // array.
  AssertSpreadArgIsArray(argArr, constraints());

  // Extract call target.
  TemporaryTypeSet* funTypes = argFunc->resultTypeSet();
  JSFunction* target = getSingleCallTarget(funTypes);
  WrappedFunction* wrappedTarget =
      target ? new (alloc()) WrappedFunction(target) : nullptr;

  // Dense elements of argument array
  MElements* elements = MElements::New(alloc(), argArr);
  current->add(elements);

  MApplyArray* apply =
      MApplyArray::New(alloc(), wrappedTarget, argFunc, elements, argThis);
  current->add(apply);
  current->push(apply);
  MOZ_TRY(resumeAfter(apply));

  if (target && target->realm() == script()->realm()) {
    apply->setNotCrossRealm();
  }
  if (BytecodeIsPopped(pc)) {
    apply->setIgnoresReturnValue();
  }

  // TypeBarrier the call result
  TemporaryTypeSet* types = bytecodeTypes(pc);
  return pushTypeBarrier(apply, types, BarrierKind::TypeSet);
}

AbortReasonOr<Ok> IonBuilder::jsop_spreadnew() {
  MDefinition* newTarget = current->pop();
  MDefinition* argArr = current->pop();
  MDefinition* thisValue = current->pop();
  MDefinition* callee = current->pop();

  // The arguments array is constructed by JSOp::NewArray and not leaked to the
  // user. The complications of spread call iterator behaviour are handled when
  // the user objects are expanded and copied into this hidden array.
  AssertSpreadArgIsArray(argArr, constraints());

  // Extract call target.
  TemporaryTypeSet* funTypes = callee->resultTypeSet();
  JSFunction* target = getSingleCallTarget(funTypes);
  if (target && !target->isConstructor()) {
    // Don't optimize when the target doesn't support construct calls.
    target = nullptr;
  }
  WrappedFunction* wrappedTarget =
      target ? new (alloc()) WrappedFunction(target) : nullptr;

  // Inline the constructor on the caller-side.
  MDefinition* create = createThis(target, callee, newTarget, false);
  thisValue->setImplicitlyUsedUnchecked();

  // Dense elements of the argument array.
  MElements* elements = MElements::New(alloc(), argArr);
  current->add(elements);

  auto* apply = MConstructArray::New(alloc(), wrappedTarget, callee, elements,
                                     create, newTarget);
  current->add(apply);
  current->push(apply);
  MOZ_TRY(resumeAfter(apply));

  if (target && target->realm() == script()->realm()) {
    apply->setNotCrossRealm();
  }

  // TypeBarrier the call result.
  TemporaryTypeSet* types = bytecodeTypes(pc);
  return pushTypeBarrier(apply, types, BarrierKind::TypeSet);
}

bool IonBuilder::propertyIsConstantFunction(NativeObject* nobj, jsid id,
                                            bool (*test)(IonBuilder* builder,
                                                         JSFunction* fun)) {
  if (!nobj->isSingleton()) {
    return false;
  }

  TypeSet::ObjectKey* objKey = TypeSet::ObjectKey::get(nobj);
  if (analysisContext) {
    objKey->ensureTrackedProperty(analysisContext, id);
  }

  if (objKey->unknownProperties()) {
    return false;
  }

  HeapTypeSetKey property = objKey->property(id);
  Value value = UndefinedValue();
  if (!property.constant(constraints(), &value)) {
    return false;
  }
  return value.isObject() && value.toObject().is<JSFunction>() &&
         test(this, &value.toObject().as<JSFunction>());
}

bool IonBuilder::ensureArrayPrototypeIteratorNotModified() {
  NativeObject* obj = script()->global().maybeGetArrayPrototype();
  if (!obj) {
    return false;
  }

  jsid id = SYMBOL_TO_JSID(realm->runtime()->wellKnownSymbols().iterator);
  return propertyIsConstantFunction(obj, id, [](auto* builder, auto* fun) {
    CompileRuntime* runtime = builder->mirGen().runtime;
    return IsSelfHostedFunctionWithName(fun, runtime->names().ArrayValues);
  });
}

bool IonBuilder::ensureArrayIteratorPrototypeNextNotModified() {
  NativeObject* obj = script()->global().maybeGetArrayIteratorPrototype();
  if (!obj) {
    return false;
  }

  jsid id = NameToId(mirGen_.runtime->names().next);
  return propertyIsConstantFunction(obj, id, [](auto* builder, auto* fun) {
    return IsSelfHostedFunctionWithName(
        fun, builder->mirGen().runtime->names().ArrayIteratorNext);
  });
}

AbortReasonOr<Ok> IonBuilder::jsop_optimize_spreadcall() {
  MDefinition* arr = current->peek(-1);

  // Assuming optimization isn't available doesn't affect correctness.
  // TODO: Investigate dynamic checks.
  bool result = false;
  do {
    // Inline with MIsPackedArray if the conditions described in
    // js::OptimizeSpreadCall() are all met or can be expressed through
    // compiler constraints.

    // The argument is an array.
    TemporaryTypeSet* types = arr->resultTypeSet();
    if (!types || types->getKnownClass(constraints()) != &ArrayObject::class_) {
      break;
    }

    // The array has no hole.
    if (types->hasObjectFlags(constraints(), OBJECT_FLAG_NON_PACKED)) {
      break;
    }

    // The array's prototype is Array.prototype.
    JSObject* proto;
    if (!types->getCommonPrototype(constraints(), &proto)) {
      break;
    }
    NativeObject* arrayProto = script()->global().maybeGetArrayPrototype();
    if (!arrayProto || arrayProto != proto) {
      break;
    }

    // The array doesn't define an own @@iterator property.
    jsid id = SYMBOL_TO_JSID(