js/src/jit/IonBuilder.cpp
author André Bargull <andre.bargull@gmail.com>
Mon, 14 Jan 2019 01:52:12 -0800
changeset 511109 1d0d0fed3608b845bde92823eb157cd8ce611d21
parent 510563 745cbd9ae616029f2f146b3ab810709755e4b692
child 511172 17f1c985c5bc0340dea8d5d3105a07c93b20d93c
permissions -rw-r--r--
Bug 1519795: Adjust CallInfo::newTarget to allow inlining polymorphic construct-calls of native functions. r=jandem

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
 * vim: set ts=8 sts=2 et sw=2 tw=80:
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#include "jit/IonBuilder.h"

#include "mozilla/DebugOnly.h"
#include "mozilla/ScopeExit.h"

#include "builtin/Eval.h"
#include "builtin/TypedObject.h"
#include "frontend/SourceNotes.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineInspector.h"
#include "jit/CacheIR.h"
#include "jit/Ion.h"
#include "jit/IonControlFlow.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/JitSpewer.h"
#include "jit/Lowering.h"
#include "jit/MIRGraph.h"
#include "vm/ArgumentsObject.h"
#include "vm/EnvironmentObject.h"
#include "vm/Opcodes.h"
#include "vm/RegExpStatics.h"
#include "vm/TraceLogging.h"

#include "gc/Nursery-inl.h"
#include "jit/CompileInfo-inl.h"
#include "jit/shared/Lowering-shared-inl.h"
#include "vm/BytecodeUtil-inl.h"
#include "vm/EnvironmentObject-inl.h"
#include "vm/JSScript-inl.h"
#include "vm/NativeObject-inl.h"
#include "vm/ObjectGroup-inl.h"
#include "vm/UnboxedObject-inl.h"

using namespace js;
using namespace js::jit;

using mozilla::AssertedCast;
using mozilla::DebugOnly;
using mozilla::Maybe;
using mozilla::Nothing;

using JS::TrackedOutcome;
using JS::TrackedStrategy;
using JS::TrackedTypeSite;

class jit::BaselineFrameInspector {
 public:
  TypeSet::Type thisType;
  JSObject* singletonEnvChain;

  Vector<TypeSet::Type, 4, JitAllocPolicy> argTypes;
  Vector<TypeSet::Type, 4, JitAllocPolicy> varTypes;

  explicit BaselineFrameInspector(TempAllocator* temp)
      : thisType(TypeSet::UndefinedType()),
        singletonEnvChain(nullptr),
        argTypes(*temp),
        varTypes(*temp) {}
};

BaselineFrameInspector* jit::NewBaselineFrameInspector(TempAllocator* temp,
                                                       BaselineFrame* frame) {
  MOZ_ASSERT(frame);

  BaselineFrameInspector* inspector =
      temp->lifoAlloc()->new_<BaselineFrameInspector>(temp);
  if (!inspector) {
    return nullptr;
  }

  // Note: copying the actual values into a temporary structure for use
  // during compilation could capture nursery pointers, so the values' types
  // are recorded instead.

  if (frame->isFunctionFrame()) {
    inspector->thisType =
        TypeSet::GetMaybeUntrackedValueType(frame->thisArgument());
  }

  if (frame->environmentChain()->isSingleton()) {
    inspector->singletonEnvChain = frame->environmentChain();
  }

  JSScript* script = frame->script();

  if (script->functionNonDelazifying()) {
    if (!inspector->argTypes.reserve(frame->numFormalArgs())) {
      return nullptr;
    }
    for (size_t i = 0; i < frame->numFormalArgs(); i++) {
      if (script->formalIsAliased(i)) {
        inspector->argTypes.infallibleAppend(TypeSet::UndefinedType());
      } else if (!script->argsObjAliasesFormals()) {
        TypeSet::Type type =
            TypeSet::GetMaybeUntrackedValueType(frame->unaliasedFormal(i));
        inspector->argTypes.infallibleAppend(type);
      } else if (frame->hasArgsObj()) {
        TypeSet::Type type =
            TypeSet::GetMaybeUntrackedValueType(frame->argsObj().arg(i));
        inspector->argTypes.infallibleAppend(type);
      } else {
        inspector->argTypes.infallibleAppend(TypeSet::UndefinedType());
      }
    }
  }

  if (!inspector->varTypes.reserve(frame->numValueSlots())) {
    return nullptr;
  }
  for (size_t i = 0; i < frame->numValueSlots(); i++) {
    TypeSet::Type type =
        TypeSet::GetMaybeUntrackedValueType(*frame->valueSlot(i));
    inspector->varTypes.infallibleAppend(type);
  }

  return inspector;
}

IonBuilder::IonBuilder(JSContext* analysisContext, CompileRealm* realm,
                       const JitCompileOptions& options, TempAllocator* temp,
                       MIRGraph* graph, CompilerConstraintList* constraints,
                       BaselineInspector* inspector, CompileInfo* info,
                       const OptimizationInfo* optimizationInfo,
                       BaselineFrameInspector* baselineFrame,
                       size_t inliningDepth, uint32_t loopDepth)
    : MIRGenerator(realm, options, temp, graph, info, optimizationInfo),
      backgroundCodegen_(nullptr),
      actionableAbortScript_(nullptr),
      actionableAbortPc_(nullptr),
      actionableAbortMessage_(nullptr),
      rootList_(nullptr),
      analysisContext(analysisContext),
      baselineFrame_(baselineFrame),
      constraints_(constraints),
      thisTypes(nullptr),
      argTypes(nullptr),
      typeArray(nullptr),
      typeArrayHint(0),
      bytecodeTypeMap(nullptr),
      current(nullptr),
      loopDepth_(loopDepth),
      blockWorklist(*temp),
      cfgCurrent(nullptr),
      cfg(nullptr),
      trackedOptimizationSites_(*temp),
      lexicalCheck_(nullptr),
      callerResumePoint_(nullptr),
      callerBuilder_(nullptr),
      iterators_(*temp),
      loopHeaders_(*temp),
      loopHeaderStack_(*temp),
#ifdef DEBUG
      cfgLoopHeaderStack_(*temp),
#endif
      inspector(inspector),
      inliningDepth_(inliningDepth),
      inlinedBytecodeLength_(0),
      numLoopRestarts_(0),
      failedBoundsCheck_(info->script()->failedBoundsCheck()),
      failedShapeGuard_(info->script()->failedShapeGuard()),
      failedLexicalCheck_(info->script()->failedLexicalCheck()),
#ifdef DEBUG
      hasLazyArguments_(false),
#endif
      inlineCallInfo_(nullptr),
      maybeFallbackFunctionGetter_(nullptr) {
  script_ = info->script();
  scriptHasIonScript_ = script_->hasIonScript();
  pc = info->startPC();

  MOZ_ASSERT(script()->hasBaselineScript() ==
             (info->analysisMode() != Analysis_ArgumentsUsage));
  MOZ_ASSERT(!!analysisContext ==
             (info->analysisMode() == Analysis_DefiniteProperties));
  MOZ_ASSERT(script_->nTypeSets() < UINT16_MAX);

  if (!info->isAnalysis()) {
    script()->baselineScript()->setIonCompiledOrInlined();
  }
}

void IonBuilder::clearForBackEnd() {
  MOZ_ASSERT(!analysisContext);
  baselineFrame_ = nullptr;

  // The caches below allocate data from the malloc heap. Release this before
  // later phases of compilation to avoid leaks, as the top level IonBuilder
  // is not explicitly destroyed. Note that builders for inner scripts are
  // constructed on the stack and will release this memory on destruction.
  envCoordinateNameCache.purge();
}

mozilla::GenericErrorResult<AbortReason> IonBuilder::abort(AbortReason r) {
  auto res = this->MIRGenerator::abort(r);
#ifdef DEBUG
  JitSpew(JitSpew_IonAbort, "aborted @ %s:%d", script()->filename(),
          PCToLineNumber(script(), pc));
#else
  JitSpew(JitSpew_IonAbort, "aborted @ %s", script()->filename());
#endif
  return res;
}

mozilla::GenericErrorResult<AbortReason> IonBuilder::abort(AbortReason r,
                                                           const char* message,
                                                           ...) {
  // Don't call PCToLineNumber in release builds.
  va_list ap;
  va_start(ap, message);
  auto res = this->MIRGenerator::abortFmt(r, message, ap);
  va_end(ap);
#ifdef DEBUG
  JitSpew(JitSpew_IonAbort, "aborted @ %s:%d", script()->filename(),
          PCToLineNumber(script(), pc));
#else
  JitSpew(JitSpew_IonAbort, "aborted @ %s", script()->filename());
#endif
  trackActionableAbort(message);
  return res;
}

IonBuilder* IonBuilder::outermostBuilder() {
  IonBuilder* builder = this;
  while (builder->callerBuilder_) {
    builder = builder->callerBuilder_;
  }
  return builder;
}

void IonBuilder::trackActionableAbort(const char* message) {
  if (!isOptimizationTrackingEnabled()) {
    return;
  }

  IonBuilder* topBuilder = outermostBuilder();
  if (topBuilder->hadActionableAbort()) {
    return;
  }

  topBuilder->actionableAbortScript_ = script();
  topBuilder->actionableAbortPc_ = pc;
  topBuilder->actionableAbortMessage_ = message;
}

void IonBuilder::spew(const char* message) {
  // Don't call PCToLineNumber in release builds.
#ifdef DEBUG
  JitSpew(JitSpew_IonMIR, "%s @ %s:%d", message, script()->filename(),
          PCToLineNumber(script(), pc));
#endif
}

JSFunction* IonBuilder::getSingleCallTarget(TemporaryTypeSet* calleeTypes) {
  if (!calleeTypes) {
    return nullptr;
  }

  TemporaryTypeSet::ObjectKey* key = calleeTypes->maybeSingleObject();
  if (!key || key->clasp() != &JSFunction::class_) {
    return nullptr;
  }

  if (key->isSingleton()) {
    return &key->singleton()->as<JSFunction>();
  }

  if (JSFunction* fun = key->group()->maybeInterpretedFunction()) {
    return fun;
  }

  return nullptr;
}

AbortReasonOr<Ok> IonBuilder::getPolyCallTargets(TemporaryTypeSet* calleeTypes,
                                                 bool constructing,
                                                 InliningTargets& targets,
                                                 uint32_t maxTargets) {
  MOZ_ASSERT(targets.empty());

  if (!calleeTypes) {
    return Ok();
  }

  if (calleeTypes->baseFlags() != 0) {
    return Ok();
  }

  unsigned objCount = calleeTypes->getObjectCount();

  if (objCount == 0 || objCount > maxTargets) {
    return Ok();
  }

  if (!targets.reserve(objCount)) {
    return abort(AbortReason::Alloc);
  }
  for (unsigned i = 0; i < objCount; i++) {
    JSObject* obj = calleeTypes->getSingleton(i);
    ObjectGroup* group = nullptr;
    if (obj) {
      MOZ_ASSERT(obj->isSingleton());
    } else {
      group = calleeTypes->getGroup(i);
      if (!group) {
        continue;
      }

      obj = group->maybeInterpretedFunction();
      if (!obj) {
        targets.clear();
        return Ok();
      }

      MOZ_ASSERT(!obj->isSingleton());
    }

    // Don't optimize if the callee is not callable or constructable per
    // the manner it is being invoked, so that CallKnown does not have to
    // handle these cases (they will always throw).
    if (constructing ? !obj->isConstructor() : !obj->isCallable()) {
      targets.clear();
      return Ok();
    }

    targets.infallibleAppend(InliningTarget(obj, group));
  }

  return Ok();
}

IonBuilder::InliningDecision IonBuilder::DontInline(JSScript* targetScript,
                                                    const char* reason) {
  if (targetScript) {
    JitSpew(JitSpew_Inlining, "Cannot inline %s:%u:%u %s",
            targetScript->filename(), targetScript->lineno(),
            targetScript->column(), reason);
  } else {
    JitSpew(JitSpew_Inlining, "Cannot inline: %s", reason);
  }

  return InliningDecision_DontInline;
}

/*
 * |hasCommonInliningPath| determines whether the current inlining path has been
 * seen before based on the sequence of scripts in the chain of |IonBuilder|s.
 *
 * An inlining path for a function |f| is the sequence of functions whose
 * inlinings precede |f| up to any previous occurrences of |f|.
 * So, if we have the chain of inlinings
 *
 *          f1 -> f2 -> f -> f3 -> f4 -> f5 -> f
 *          --------         --------------
 *
 * the inlining paths for |f| are [f2, f1] and [f5, f4, f3].
 * When attempting to inline |f|, we find all existing inlining paths for |f|
 * and check whether they share a common prefix with the path created were |f|
 * inlined.
 *
 * For example, given mutually recursive functions |f| and |g|, a possible
 * inlining is
 *
 *                           +---- Inlining stopped here...
 *                           |
 *                           v
 *          a -> f -> g -> f \ -> g -> f -> g -> ...
 *
 * where the vertical bar denotes the termination of inlining.
 * Inlining is terminated because we have already observed the inlining path
 * [f] when inlining function |g|. Note that this will inline recursive
 * functions such as |fib| only one level, as |fib| has a zero length inlining
 * path which trivially prefixes all inlining paths.
 *
 */
bool IonBuilder::hasCommonInliningPath(const JSScript* scriptToInline) {
  // Find all previous inlinings of the |scriptToInline| and check for common
  // inlining paths with the top of the inlining stack.
  for (IonBuilder* it = this->callerBuilder_; it; it = it->callerBuilder_) {
    if (it->script() != scriptToInline) {
      continue;
    }

    // This only needs to check the top of each stack for a match,
    // as a match of length one ensures a common prefix.
    IonBuilder* path = it->callerBuilder_;
    if (!path || this->script() == path->script()) {
      return true;
    }
  }

  return false;
}

IonBuilder::InliningDecision IonBuilder::canInlineTarget(JSFunction* target,
                                                         CallInfo& callInfo) {
  if (!optimizationInfo().inlineInterpreted()) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineGeneric);
    return InliningDecision_DontInline;
  }

  if (TraceLogTextIdEnabled(TraceLogger_InlinedScripts)) {
    return DontInline(nullptr,
                      "Tracelogging of inlined scripts is enabled"
                      "but Tracelogger cannot do that yet.");
  }

  if (!target->isInterpreted()) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineNotInterpreted);
    return DontInline(nullptr, "Non-interpreted target");
  }

  if (info().analysisMode() != Analysis_DefiniteProperties) {
    // If |this| or an argument has an empty resultTypeSet, don't bother
    // inlining, as the call is currently unreachable due to incomplete type
    // information. This does not apply to the definite properties analysis,
    // in that case we want to inline anyway.

    if (callInfo.thisArg()->emptyResultTypeSet()) {
      trackOptimizationOutcome(TrackedOutcome::CantInlineUnreachable);
      return DontInline(nullptr, "Empty TypeSet for |this|");
    }

    for (size_t i = 0; i < callInfo.argc(); i++) {
      if (callInfo.getArg(i)->emptyResultTypeSet()) {
        trackOptimizationOutcome(TrackedOutcome::CantInlineUnreachable);
        return DontInline(nullptr, "Empty TypeSet for argument");
      }
    }
  }

  // Allow constructing lazy scripts when performing the definite properties
  // analysis, as baseline has not been used to warm the caller up yet.
  if (target->isInterpreted() &&
      info().analysisMode() == Analysis_DefiniteProperties) {
    RootedFunction fun(analysisContext, target);
    RootedScript script(analysisContext,
                        JSFunction::getOrCreateScript(analysisContext, fun));
    if (!script) {
      return InliningDecision_Error;
    }

    if (!script->hasBaselineScript() && script->canBaselineCompile()) {
      MethodStatus status = BaselineCompile(analysisContext, script);
      if (status == Method_Error) {
        return InliningDecision_Error;
      }
      if (status != Method_Compiled) {
        trackOptimizationOutcome(TrackedOutcome::CantInlineNoBaseline);
        return InliningDecision_DontInline;
      }
    }
  }

  if (!target->hasScript()) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineLazy);
    return DontInline(nullptr, "Lazy script");
  }

  JSScript* inlineScript = target->nonLazyScript();
  if (callInfo.constructing() && !target->isConstructor()) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineNotConstructor);
    return DontInline(inlineScript, "Callee is not a constructor");
  }

  if (!callInfo.constructing() && target->isClassConstructor()) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineClassConstructor);
    return DontInline(inlineScript, "Not constructing class constructor");
  }

  if (!CanIonInlineScript(inlineScript)) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineDisabledIon);
    return DontInline(inlineScript, "Disabled Ion compilation");
  }

  // Don't inline functions which don't have baseline scripts.
  if (!inlineScript->hasBaselineScript()) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineNoBaseline);
    return DontInline(inlineScript, "No baseline jitcode");
  }

  if (TooManyFormalArguments(target->nargs())) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineTooManyArgs);
    return DontInline(inlineScript, "Too many args");
  }

  // We check the number of actual arguments against the maximum number of
  // formal arguments as we do not want to encode all actual arguments in the
  // callerResumePoint.
  if (TooManyFormalArguments(callInfo.argc())) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineTooManyArgs);
    return DontInline(inlineScript, "Too many actual args");
  }

  if (hasCommonInliningPath(inlineScript)) {
    trackOptimizationOutcome(TrackedOutcome::HasCommonInliningPath);
    return DontInline(inlineScript, "Common inlining path");
  }

  if (inlineScript->uninlineable()) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineGeneric);
    return DontInline(inlineScript, "Uninlineable script");
  }

  if (inlineScript->needsArgsObj()) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineNeedsArgsObj);
    return DontInline(inlineScript, "Script that needs an arguments object");
  }

  if (inlineScript->isDebuggee()) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineDebuggee);
    return DontInline(inlineScript, "Script is debuggee");
  }

  return InliningDecision_Inline;
}

AbortReasonOr<Ok> IonBuilder::analyzeNewLoopTypes(
    const CFGBlock* loopEntryBlock) {
  CFGLoopEntry* loopEntry = loopEntryBlock->stopIns()->toLoopEntry();
  CFGBlock* cfgBlock = loopEntry->successor();
  MBasicBlock* entry = blockWorklist[cfgBlock->id()];
  MOZ_ASSERT(!entry->isDead());

  // The phi inputs at the loop head only reflect types for variables that
  // were present at the start of the loop. If the variable changes to a new
  // type within the loop body, and that type is carried around to the loop
  // head, then we need to know about the new type up front.
  //
  // Since SSA information hasn't been constructed for the loop body yet, we
  // need a separate analysis to pick out the types that might flow around
  // the loop header. This is a best-effort analysis that may either over-
  // or under-approximate the set of such types.
  //
  // Over-approximating the types may lead to inefficient generated code, and
  // under-approximating the types will cause the loop body to be analyzed
  // multiple times as the correct types are deduced (see finishLoop).

  // If we restarted processing of an outer loop then get loop header types
  // directly from the last time we have previously processed this loop. This
  // both avoids repeated work from the bytecode traverse below, and will
  // also pick up types discovered while previously building the loop body.
  bool foundEntry = false;
  for (size_t i = 0; i < loopHeaders_.length(); i++) {
    if (loopHeaders_[i].pc == cfgBlock->startPc()) {
      MBasicBlock* oldEntry = loopHeaders_[i].header;

      // If this block has been discarded, its resume points will have
      // already discarded their operands.
      if (oldEntry->isDead()) {
        loopHeaders_[i].header = entry;
        foundEntry = true;
        break;
      }

      MResumePoint* oldEntryRp = oldEntry->entryResumePoint();
      size_t stackDepth = oldEntryRp->stackDepth();
      for (size_t slot = 0; slot < stackDepth; slot++) {
        MDefinition* oldDef = oldEntryRp->getOperand(slot);
        if (!oldDef->isPhi()) {
          MOZ_ASSERT(oldDef->block()->id() < oldEntry->id());
          MOZ_ASSERT(oldDef == entry->getSlot(slot));
          continue;
        }
        MPhi* oldPhi = oldDef->toPhi();
        MPhi* newPhi = entry->getSlot(slot)->toPhi();
        if (!newPhi->addBackedgeType(alloc(), oldPhi->type(),
                                     oldPhi->resultTypeSet())) {
          return abort(AbortReason::Alloc);
        }
      }

      // Update the most recent header for this loop encountered, in case
      // new types flow to the phis and the loop is processed at least
      // three times.
      loopHeaders_[i].header = entry;
      return Ok();
    }
  }
  if (!foundEntry) {
    if (!loopHeaders_.append(LoopHeader(cfgBlock->startPc(), entry))) {
      return abort(AbortReason::Alloc);
    }
  }

  if (loopEntry->isForIn()) {
    // The backedge will have MIteratorMore with MIRType::Value. This slot
    // is initialized to MIRType::Undefined before the loop. Add
    // MIRType::Value to avoid unnecessary loop restarts.

    MPhi* phi = entry->getSlot(entry->stackDepth() - 1)->toPhi();
    MOZ_ASSERT(phi->getOperand(0)->type() == MIRType::Undefined);

    if (!phi->addBackedgeType(alloc(), MIRType::Value, nullptr)) {
      return abort(AbortReason::Alloc);
    }
  }

  // Get the start and end pc of this loop.
  jsbytecode* start = loopEntryBlock->stopPc();
  start += GetBytecodeLength(start);
  jsbytecode* end = loopEntry->loopStopPc();

  // Iterate the bytecode quickly to seed possible types in the loopheader.
  jsbytecode* last = nullptr;
  jsbytecode* earlier = nullptr;
  for (jsbytecode* pc = start; pc != end;
       earlier = last, last = pc, pc += GetBytecodeLength(pc)) {
    uint32_t slot;
    if (*pc == JSOP_SETLOCAL) {
      slot = info().localSlot(GET_LOCALNO(pc));
    } else if (*pc == JSOP_SETARG) {
      slot = info().argSlotUnchecked(GET_ARGNO(pc));
    } else {
      continue;
    }
    if (slot >= info().firstStackSlot()) {
      continue;
    }
    if (!last) {
      continue;
    }

    MPhi* phi = entry->getSlot(slot)->toPhi();

    if (*last == JSOP_POS) {
      last = earlier;
    }

    if (CodeSpec[*last].format & JOF_TYPESET) {
      TemporaryTypeSet* typeSet = bytecodeTypes(last);
      if (!typeSet->empty()) {
        MIRType type = typeSet->getKnownMIRType();
        if (!phi->addBackedgeType(alloc(), type, typeSet)) {
          return abort(AbortReason::Alloc);
        }
      }
    } else if (*last == JSOP_GETLOCAL || *last == JSOP_GETARG) {
      uint32_t slot = (*last == JSOP_GETLOCAL)
                          ? info().localSlot(GET_LOCALNO(last))
                          : info().argSlotUnchecked(GET_ARGNO(last));
      if (slot < info().firstStackSlot()) {
        MPhi* otherPhi = entry->getSlot(slot)->toPhi();
        if (otherPhi->hasBackedgeType()) {
          if (!phi->addBackedgeType(alloc(), otherPhi->type(),
                                    otherPhi->resultTypeSet())) {
            return abort(AbortReason::Alloc);
          }
        }
      }
    } else {
      MIRType type = MIRType::None;
      switch (*last) {
        case JSOP_VOID:
        case JSOP_UNDEFINED:
          type = MIRType::Undefined;
          break;
        case JSOP_GIMPLICITTHIS:
          if (!script()->hasNonSyntacticScope()) {
            type = MIRType::Undefined;
          }
          break;
        case JSOP_NULL:
          type = MIRType::Null;
          break;
        case JSOP_ZERO:
        case JSOP_ONE:
        case JSOP_INT8:
        case JSOP_INT32:
        case JSOP_UINT16:
        case JSOP_UINT24:
        case JSOP_RESUMEINDEX:
        case JSOP_BITAND:
        case JSOP_BITOR:
        case JSOP_BITXOR:
        case JSOP_BITNOT:
        case JSOP_RSH:
        case JSOP_LSH:
        case JSOP_URSH:
          type = MIRType::Int32;
          break;
        case JSOP_FALSE:
        case JSOP_TRUE:
        case JSOP_EQ:
        case JSOP_NE:
        case JSOP_LT:
        case JSOP_LE:
        case JSOP_GT:
        case JSOP_GE:
        case JSOP_NOT:
        case JSOP_STRICTEQ:
        case JSOP_STRICTNE:
        case JSOP_IN:
        case JSOP_INSTANCEOF:
        case JSOP_HASOWN:
          type = MIRType::Boolean;
          break;
        case JSOP_DOUBLE:
          type = MIRType::Double;
          break;
        case JSOP_ITERNEXT:
        case JSOP_STRING:
        case JSOP_TOSTRING:
        case JSOP_TYPEOF:
        case JSOP_TYPEOFEXPR:
          type = MIRType::String;
          break;
        case JSOP_SYMBOL:
          type = MIRType::Symbol;
          break;
        case JSOP_ADD:
        case JSOP_SUB:
        case JSOP_MUL:
        case JSOP_DIV:
        case JSOP_MOD:
        case JSOP_NEG:
          type = inspector->expectedResultType(last);
          break;
        default:
          break;
      }
      if (type != MIRType::None) {
        if (!phi->addBackedgeType(alloc(), type, nullptr)) {
          return abort(AbortReason::Alloc);
        }
      }
    }
  }
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::init() {
  {
    LifoAlloc::AutoFallibleScope fallibleAllocator(alloc().lifoAlloc());
    if (!TypeScript::FreezeTypeSets(constraints(), script(), &thisTypes,
                                    &argTypes, &typeArray)) {
      return abort(AbortReason::Alloc);
    }
  }

  if (!alloc().ensureBallast()) {
    return abort(AbortReason::Alloc);
  }

  if (inlineCallInfo_) {
    // If we're inlining, the actual this/argument types are not necessarily
    // a subset of the script's observed types. |argTypes| is never accessed
    // for inlined scripts, so we just null it.
    thisTypes = inlineCallInfo_->thisArg()->resultTypeSet();
    argTypes = nullptr;
  }

  // The baseline script normally has the bytecode type map, but compute
  // it ourselves if we do not have a baseline script.
  if (script()->hasBaselineScript()) {
    bytecodeTypeMap = script()->baselineScript()->bytecodeTypeMap();
  } else {
    bytecodeTypeMap = alloc_->lifoAlloc()->newArrayUninitialized<uint32_t>(
        script()->nTypeSets());
    if (!bytecodeTypeMap) {
      return abort(AbortReason::Alloc);
    }
    FillBytecodeTypeMap(script(), bytecodeTypeMap);
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::build() {
  // Spew IC info for inlined script, but only when actually compiling,
  // not when analyzing it.
#ifdef JS_STRUCTURED_SPEW
  if (!info().isAnalysis()) {
    JitSpewBaselineICStats(script(), "To-Be-Compiled");
  }
#endif

  MOZ_TRY(init());

  if (script()->hasBaselineScript()) {
    script()->baselineScript()->resetMaxInliningDepth();
  }

  MBasicBlock* entry;
  MOZ_TRY_VAR(entry, newBlock(info().firstStackSlot(), pc));
  MOZ_TRY(setCurrentAndSpecializePhis(entry));

#ifdef JS_JITSPEW
  if (info().isAnalysis()) {
    JitSpew(JitSpew_IonScripts, "Analyzing script %s:%u:%u (%p) %s",
            script()->filename(), script()->lineno(), script()->column(),
            (void*)script(), AnalysisModeString(info().analysisMode()));
  } else {
    JitSpew(JitSpew_IonScripts,
            "%sompiling script %s:%u:%u (%p) (warmup-counter=%" PRIu32
            ", level=%s)",
            (script()->hasIonScript() ? "Rec" : "C"), script()->filename(),
            script()->lineno(), script()->column(), (void*)script(),
            script()->getWarmUpCount(),
            OptimizationLevelString(optimizationInfo().level()));
  }
#endif

  MOZ_TRY(initParameters());
  initLocals();

  // Initialize something for the env chain. We can bail out before the
  // start instruction, but the snapshot is encoded *at* the start
  // instruction, which means generating any code that could load into
  // registers is illegal.
  MInstruction* env = MConstant::New(alloc(), UndefinedValue());
  current->add(env);
  current->initSlot(info().environmentChainSlot(), env);

  // Initialize the return value.
  MInstruction* returnValue = MConstant::New(alloc(), UndefinedValue());
  current->add(returnValue);
  current->initSlot(info().returnValueSlot(), returnValue);

  // Initialize the arguments object slot to undefined if necessary.
  if (info().hasArguments()) {
    MInstruction* argsObj = MConstant::New(alloc(), UndefinedValue());
    current->add(argsObj);
    current->initSlot(info().argsObjSlot(), argsObj);
  }

  // Emit the start instruction, so we can begin real instructions.
  current->add(MStart::New(alloc()));

  // Guard against over-recursion. Do this before we start unboxing, since
  // this will create an OSI point that will read the incoming argument
  // values, which is nice to do before their last real use, to minimize
  // register/stack pressure.
  MCheckOverRecursed* check = MCheckOverRecursed::New(alloc());
  current->add(check);
  MResumePoint* entryRpCopy =
      MResumePoint::Copy(alloc(), current->entryResumePoint());
  if (!entryRpCopy) {
    return abort(AbortReason::Alloc);
  }
  check->setResumePoint(entryRpCopy);

  // Parameters have been checked to correspond to the typeset, now we unbox
  // what we can in an infallible manner.
  MOZ_TRY(rewriteParameters());

  // Check for redeclaration errors for global scripts.
  if (!info().funMaybeLazy() && !info().module() &&
      script()->bodyScope()->is<GlobalScope>() &&
      script()->bodyScope()->as<GlobalScope>().hasBindings()) {
    MGlobalNameConflictsCheck* redeclCheck =
        MGlobalNameConflictsCheck::New(alloc());
    current->add(redeclCheck);
    MResumePoint* entryRpCopy =
        MResumePoint::Copy(alloc(), current->entryResumePoint());
    if (!entryRpCopy) {
      return abort(AbortReason::Alloc);
    }
    redeclCheck->setResumePoint(entryRpCopy);
  }

  // It's safe to start emitting actual IR, so now build the env chain.
  MOZ_TRY(initEnvironmentChain());
  if (info().needsArgsObj()) {
    initArgumentsObject();
  }

  // The type analysis phase attempts to insert unbox operations near
  // definitions of values. It also attempts to replace uses in resume points
  // with the narrower, unboxed variants. However, we must prevent this
  // replacement from happening on values in the entry snapshot. Otherwise we
  // could get this:
  //
  //       v0 = MParameter(0)
  //       v1 = MParameter(1)
  //       --   ResumePoint(v2, v3)
  //       v2 = Unbox(v0, INT32)
  //       v3 = Unbox(v1, INT32)
  //
  // So we attach the initial resume point to each parameter, which the type
  // analysis explicitly checks (this is the same mechanism used for
  // effectful operations).
  for (uint32_t i = 0; i < info().endArgSlot(); i++) {
    MInstruction* ins = current->getEntrySlot(i)->toInstruction();
    if (ins->type() != MIRType::Value) {
      continue;
    }

    MResumePoint* entryRpCopy =
        MResumePoint::Copy(alloc(), current->entryResumePoint());
    if (!entryRpCopy) {
      return abort(AbortReason::Alloc);
    }
    ins->setResumePoint(entryRpCopy);
  }

#ifdef DEBUG
  // lazyArguments should never be accessed in |argsObjAliasesFormals| scripts.
  if (info().hasArguments() && !info().argsObjAliasesFormals()) {
    hasLazyArguments_ = true;
  }
#endif

  insertRecompileCheck();

  auto clearLastPriorResumePoint = mozilla::MakeScopeExit([&] {
    // Discard unreferenced & pre-allocated resume points.
    replaceMaybeFallbackFunctionGetter(nullptr);
  });

  MOZ_TRY(traverseBytecode());

  if (script_->hasBaselineScript() &&
      inlinedBytecodeLength_ >
          script_->baselineScript()->inlinedBytecodeLength()) {
    script_->baselineScript()->setInlinedBytecodeLength(inlinedBytecodeLength_);
  }

  MOZ_TRY(maybeAddOsrTypeBarriers());
  MOZ_TRY(processIterators());

  if (!info().isAnalysis() && !abortedPreliminaryGroups().empty()) {
    return abort(AbortReason::PreliminaryObjects);
  }

  MOZ_ASSERT(loopDepth_ == 0);
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::processIterators() {
  // Find and mark phis that must transitively hold an iterator live.

  Vector<MDefinition*, 8, SystemAllocPolicy> worklist;

  for (size_t i = 0; i < iterators_.length(); i++) {
    MDefinition* iter = iterators_[i];
    if (!iter->isInWorklist()) {
      if (!worklist.append(iter)) {
        return abort(AbortReason::Alloc);
      }
      iter->setInWorklist();
    }
  }

  while (!worklist.empty()) {
    MDefinition* def = worklist.popCopy();
    def->setNotInWorklist();

    if (def->isPhi()) {
      MPhi* phi = def->toPhi();
      phi->setIterator();
      phi->setImplicitlyUsedUnchecked();
    }

    for (MUseDefIterator iter(def); iter; iter++) {
      MDefinition* use = iter.def();
      if (!use->isInWorklist() &&
          (!use->isPhi() || !use->toPhi()->isIterator())) {
        if (!worklist.append(use)) {
          return abort(AbortReason::Alloc);
        }
        use->setInWorklist();
      }
    }
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::buildInline(IonBuilder* callerBuilder,
                                          MResumePoint* callerResumePoint,
                                          CallInfo& callInfo) {
  inlineCallInfo_ = &callInfo;

  // Spew IC info for inlined script, but only when actually compiling,
  // not when analyzing it.
#ifdef JS_STRUCTURED_SPEW
  if (!info().isAnalysis()) {
    JitSpewBaselineICStats(script(), "To-Be-Inlined");
  }
#endif

  MOZ_TRY(init());

  JitSpew(JitSpew_IonScripts, "Inlining script %s:%u:%u (%p)",
          script()->filename(), script()->lineno(), script()->column(),
          (void*)script());

  callerBuilder_ = callerBuilder;
  callerResumePoint_ = callerResumePoint;

  if (callerBuilder->failedBoundsCheck_) {
    failedBoundsCheck_ = true;
  }

  if (callerBuilder->failedShapeGuard_) {
    failedShapeGuard_ = true;
  }

  if (callerBuilder->failedLexicalCheck_) {
    failedLexicalCheck_ = true;
  }

  safeForMinorGC_ = callerBuilder->safeForMinorGC_;

  // Generate single entrance block.
  MBasicBlock* entry;
  MOZ_TRY_VAR(entry, newBlock(info().firstStackSlot(), pc));
  MOZ_TRY(setCurrentAndSpecializePhis(entry));

  current->setCallerResumePoint(callerResumePoint);

  // Connect the entrance block to the last block in the caller's graph.
  MBasicBlock* predecessor = callerBuilder->current;
  MOZ_ASSERT(predecessor == callerResumePoint->block());

  predecessor->end(MGoto::New(alloc(), current));
  if (!current->addPredecessorWithoutPhis(predecessor)) {
    return abort(AbortReason::Alloc);
  }

  // Initialize env chain slot to Undefined.  It's set later by
  // |initEnvironmentChain|.
  MInstruction* env = MConstant::New(alloc(), UndefinedValue());
  current->add(env);
  current->initSlot(info().environmentChainSlot(), env);

  // Initialize |return value| slot.
  MInstruction* returnValue = MConstant::New(alloc(), UndefinedValue());
  current->add(returnValue);
  current->initSlot(info().returnValueSlot(), returnValue);

  // Initialize |arguments| slot.
  if (info().hasArguments()) {
    MInstruction* argsObj = MConstant::New(alloc(), UndefinedValue());
    current->add(argsObj);
    current->initSlot(info().argsObjSlot(), argsObj);
  }

  // Initialize |this| slot.
  current->initSlot(info().thisSlot(), callInfo.thisArg());

  JitSpew(JitSpew_Inlining, "Initializing %u arg slots", info().nargs());

  // NB: Ion does not inline functions which |needsArgsObj|.  So using argSlot()
  // instead of argSlotUnchecked() below is OK
  MOZ_ASSERT(!info().needsArgsObj());

  // Initialize actually set arguments.
  uint32_t existing_args = Min<uint32_t>(callInfo.argc(), info().nargs());
  for (size_t i = 0; i < existing_args; ++i) {
    MDefinition* arg = callInfo.getArg(i);
    current->initSlot(info().argSlot(i), arg);
  }

  // Pass Undefined for missing arguments
  for (size_t i = callInfo.argc(); i < info().nargs(); ++i) {
    MConstant* arg = MConstant::New(alloc(), UndefinedValue());
    current->add(arg);
    current->initSlot(info().argSlot(i), arg);
  }

  JitSpew(JitSpew_Inlining, "Initializing %u locals", info().nlocals());

  initLocals();

  JitSpew(JitSpew_Inlining,
          "Inline entry block MResumePoint %p, %u stack slots",
          (void*)current->entryResumePoint(),
          current->entryResumePoint()->stackDepth());

  // +2 for the env chain and |this|, maybe another +1 for arguments object
  // slot.
  MOZ_ASSERT(current->entryResumePoint()->stackDepth() == info().totalSlots());

#ifdef DEBUG
  if (script_->argumentsHasVarBinding()) {
    hasLazyArguments_ = true;
  }
#endif

  insertRecompileCheck();

  // Insert an interrupt check when recording or replaying, which will bump
  // the record/replay system's progress counter.
  if (script()->trackRecordReplayProgress()) {
    MInterruptCheck* check = MInterruptCheck::New(alloc());
    check->setTrackRecordReplayProgress();
    current->add(check);
  }

  // Initialize the env chain now that all resume points operands are
  // initialized.
  MOZ_TRY(initEnvironmentChain(callInfo.fun()));

  auto clearLastPriorResumePoint = mozilla::MakeScopeExit([&] {
    // Discard unreferenced & pre-allocated resume points.
    replaceMaybeFallbackFunctionGetter(nullptr);
  });

  MOZ_TRY(traverseBytecode());

  MOZ_ASSERT(iterators_.empty(), "Iterators should be added to outer builder");

  if (!info().isAnalysis() && !abortedPreliminaryGroups().empty()) {
    return abort(AbortReason::PreliminaryObjects);
  }

  return Ok();
}

void IonBuilder::rewriteParameter(uint32_t slotIdx, MDefinition* param) {
  MOZ_ASSERT(param->isParameter() || param->isGetArgumentsObjectArg());

  TemporaryTypeSet* types = param->resultTypeSet();
  MDefinition* actual = ensureDefiniteType(param, types->getKnownMIRType());
  if (actual == param) {
    return;
  }

  // Careful! We leave the original MParameter in the entry resume point. The
  // arguments still need to be checked unless proven otherwise at the call
  // site, and these checks can bailout. We can end up:
  //   v0 = Parameter(0)
  //   v1 = Unbox(v0, INT32)
  //   --   ResumePoint(v0)
  //
  // As usual, it would be invalid for v1 to be captured in the initial
  // resume point, rather than v0.
  current->rewriteSlot(slotIdx, actual);
}

// Apply Type Inference information to parameters early on, unboxing them if
// they have a definitive type. The actual guards will be emitted by the code
// generator, explicitly, as part of the function prologue.
AbortReasonOr<Ok> IonBuilder::rewriteParameters() {
  MOZ_ASSERT(info().environmentChainSlot() == 0);

  // If this JSScript is not the code of a function, then skip the
  // initialization of function parameters.
  if (!info().funMaybeLazy()) {
    return Ok();
  }

  for (uint32_t i = info().startArgSlot(); i < info().endArgSlot(); i++) {
    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }
    MDefinition* param = current->getSlot(i);
    rewriteParameter(i, param);
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::initParameters() {
  // If this JSScript is not the code of a function, then skip the
  // initialization of function parameters.
  if (!info().funMaybeLazy()) {
    return Ok();
  }

  // If we are doing OSR on a frame which initially executed in the
  // interpreter and didn't accumulate type information, try to use that OSR
  // frame to determine possible initial types for 'this' and parameters.

  if (thisTypes->empty() && baselineFrame_) {
    TypeSet::Type type = baselineFrame_->thisType;
    if (type.isSingletonUnchecked()) {
      checkNurseryObject(type.singleton());
    }
    thisTypes->addType(type, alloc_->lifoAlloc());
  }

  MParameter* param =
      MParameter::New(alloc(), MParameter::THIS_SLOT, thisTypes);
  current->add(param);
  current->initSlot(info().thisSlot(), param);

  for (uint32_t i = 0; i < info().nargs(); i++) {
    TemporaryTypeSet* types = &argTypes[i];
    if (types->empty() && baselineFrame_ &&
        !script_->baselineScript()->modifiesArguments()) {
      TypeSet::Type type = baselineFrame_->argTypes[i];
      if (type.isSingletonUnchecked()) {
        checkNurseryObject(type.singleton());
      }
      types->addType(type, alloc_->lifoAlloc());
    }

    param = MParameter::New(alloc().fallible(), i, types);
    if (!param) {
      return abort(AbortReason::Alloc);
    }
    current->add(param);
    current->initSlot(info().argSlotUnchecked(i), param);
  }

  return Ok();
}

void IonBuilder::initLocals() {
  // Initialize all frame slots to undefined. Lexical bindings are temporal
  // dead zoned in bytecode.

  if (info().nlocals() == 0) {
    return;
  }

  MConstant* undef = MConstant::New(alloc(), UndefinedValue());
  current->add(undef);

  for (uint32_t i = 0; i < info().nlocals(); i++) {
    current->initSlot(info().localSlot(i), undef);
  }
}

bool IonBuilder::usesEnvironmentChain() {
  // We don't have a BaselineScript if we're running the arguments analysis,
  // but it's fine to assume we always use the environment chain in this case.
  if (info().analysisMode() == Analysis_ArgumentsUsage) {
    return true;
  }
  return script()->baselineScript()->usesEnvironmentChain();
}

AbortReasonOr<Ok> IonBuilder::initEnvironmentChain(MDefinition* callee) {
  MInstruction* env = nullptr;

  // If the script doesn't use the envchain, then it's already initialized
  // from earlier.  However, always make a env chain when |needsArgsObj| is true
  // for the script, since arguments object construction requires the env chain
  // to be passed in.
  if (!info().needsArgsObj() && !usesEnvironmentChain()) {
    return Ok();
  }

  // The env chain is only tracked in scripts that have NAME opcodes which
  // will try to access the env. For other scripts, the env instructions
  // will be held live by resume points and code will still be generated for
  // them, so just use a constant undefined value.

  if (JSFunction* fun = info().funMaybeLazy()) {
    if (!callee) {
      MCallee* calleeIns = MCallee::New(alloc());
      current->add(calleeIns);
      callee = calleeIns;
    }
    env = MFunctionEnvironment::New(alloc(), callee);
    current->add(env);

    // This reproduce what is done in CallObject::createForFunction. Skip
    // this for the arguments analysis, as the script might not have a
    // baseline script with template objects yet.
    if (fun->needsSomeEnvironmentObject() &&
        info().analysisMode() != Analysis_ArgumentsUsage) {
      if (fun->needsNamedLambdaEnvironment()) {
        env = createNamedLambdaObject(callee, env);
      }

      // TODO: Parameter expression-induced extra var environment not
      // yet handled.
      if (fun->needsExtraBodyVarEnvironment()) {
        return abort(AbortReason::Disable, "Extra var environment unsupported");
      }

      if (fun->needsCallObject()) {
        MOZ_TRY_VAR(env, createCallObject(callee, env));
      }
    }
  } else if (ModuleObject* module = info().module()) {
    // Modules use a pre-created env object.
    env = constant(ObjectValue(module->initialEnvironment()));
  } else {
    // For global scripts without a non-syntactic global scope, the env
    // chain is the global lexical env.
    MOZ_ASSERT(!script()->isForEval());
    MOZ_ASSERT(!script()->hasNonSyntacticScope());
    env = constant(ObjectValue(script()->global().lexicalEnvironment()));
  }

  // Update the environment slot from UndefinedValue only after initial
  // environment is created so that bailout doesn't see a partial env.
  // See: |InitFromBailout|
  current->setEnvironmentChain(env);
  return Ok();
}

void IonBuilder::initArgumentsObject() {
  JitSpew(JitSpew_IonMIR,
          "%s:%u:%u - Emitting code to initialize arguments object! block=%p",
          script()->filename(), script()->lineno(), script()->column(),
          current);
  MOZ_ASSERT(info().needsArgsObj());

  bool mapped = script()->hasMappedArgsObj();
  ArgumentsObject* templateObj =
      script()->realm()->maybeArgumentsTemplateObject(mapped);

  MCreateArgumentsObject* argsObj = MCreateArgumentsObject::New(
      alloc(), current->environmentChain(), templateObj);
  current->add(argsObj);
  current->setArgumentsObject(argsObj);
}

AbortReasonOr<Ok> IonBuilder::addOsrValueTypeBarrier(
    uint32_t slot, MInstruction** def_, MIRType type,
    TemporaryTypeSet* typeSet) {
  MInstruction*& def = *def_;
  MBasicBlock* osrBlock = def->block();

  // Clear bogus type information added in newOsrPreheader().
  def->setResultType(MIRType::Value);
  def->setResultTypeSet(nullptr);

  if (typeSet && !typeSet->unknown()) {
    MInstruction* barrier = MTypeBarrier::New(alloc(), def, typeSet);
    osrBlock->insertBefore(osrBlock->lastIns(), barrier);
    osrBlock->rewriteSlot(slot, barrier);
    def = barrier;

    // If the TypeSet is more precise than |type|, adjust |type| for the
    // code below.
    if (type == MIRType::Value) {
      type = barrier->type();
    }
  } else if (type == MIRType::Null || type == MIRType::Undefined ||
             type == MIRType::MagicOptimizedArguments) {
    // No unbox instruction will be added below, so check the type by
    // adding a type barrier for a singleton type set.
    TypeSet::Type ntype = TypeSet::PrimitiveType(ValueTypeFromMIRType(type));
    LifoAlloc* lifoAlloc = alloc().lifoAlloc();
    typeSet = lifoAlloc->new_<TemporaryTypeSet>(lifoAlloc, ntype);
    if (!typeSet) {
      return abort(AbortReason::Alloc);
    }
    MInstruction* barrier = MTypeBarrier::New(alloc(), def, typeSet);
    osrBlock->insertBefore(osrBlock->lastIns(), barrier);
    osrBlock->rewriteSlot(slot, barrier);
    def = barrier;
  }

  switch (type) {
    case MIRType::Boolean:
    case MIRType::Int32:
    case MIRType::Double:
    case MIRType::String:
    case MIRType::Symbol:
    case MIRType::Object:
      if (type != def->type()) {
        MUnbox* unbox = MUnbox::New(alloc(), def, type, MUnbox::Fallible);
        osrBlock->insertBefore(osrBlock->lastIns(), unbox);
        osrBlock->rewriteSlot(slot, unbox);
        def = unbox;
      }
      break;

    case MIRType::Null: {
      MConstant* c = MConstant::New(alloc(), NullValue());
      osrBlock->insertBefore(osrBlock->lastIns(), c);
      osrBlock->rewriteSlot(slot, c);
      def = c;
      break;
    }

    case MIRType::Undefined: {
      MConstant* c = MConstant::New(alloc(), UndefinedValue());
      osrBlock->insertBefore(osrBlock->lastIns(), c);
      osrBlock->rewriteSlot(slot, c);
      def = c;
      break;
    }

    case MIRType::MagicOptimizedArguments: {
      MOZ_ASSERT(hasLazyArguments_);
      MConstant* lazyArg =
          MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS));
      osrBlock->insertBefore(osrBlock->lastIns(), lazyArg);
      osrBlock->rewriteSlot(slot, lazyArg);
      def = lazyArg;
      break;
    }

    default:
      break;
  }

  MOZ_ASSERT(def == osrBlock->getSlot(slot));
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::maybeAddOsrTypeBarriers() {
  if (!info().osrPc()) {
    return Ok();
  }

  // The loop has successfully been processed, and the loop header phis
  // have their final type. Add unboxes and type barriers in the OSR
  // block to check that the values have the appropriate type, and update
  // the types in the preheader.

  MBasicBlock* osrBlock = graph().osrBlock();
  if (!osrBlock) {
    // Because IonBuilder does not compile catch blocks, it's possible to
    // end up without an OSR block if the OSR pc is only reachable via a
    // break-statement inside the catch block. For instance:
    //
    //   for (;;) {
    //       try {
    //           throw 3;
    //       } catch(e) {
    //           break;
    //       }
    //   }
    //   while (..) { } // <= OSR here, only reachable via catch block.
    //
    // For now we just abort in this case.
    MOZ_ASSERT(graph().hasTryBlock());
    return abort(AbortReason::Disable,
                 "OSR block only reachable through catch block");
  }

  MBasicBlock* preheader = osrBlock->getSuccessor(0);
  MBasicBlock* header = preheader->getSuccessor(0);
  static const size_t OSR_PHI_POSITION = 1;
  MOZ_ASSERT(preheader->getPredecessor(OSR_PHI_POSITION) == osrBlock);

  MResumePoint* headerRp = header->entryResumePoint();
  size_t stackDepth = headerRp->stackDepth();
  MOZ_ASSERT(stackDepth == osrBlock->stackDepth());
  for (uint32_t slot = info().startArgSlot(); slot < stackDepth; slot++) {
    // Aliased slots are never accessed, since they need to go through
    // the callobject. The typebarriers are added there and can be
    // discarded here.
    if (info().isSlotAliased(slot)) {
      continue;
    }

    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }

    MInstruction* def = osrBlock->getSlot(slot)->toInstruction();
    MPhi* preheaderPhi = preheader->getSlot(slot)->toPhi();
    MPhi* headerPhi = headerRp->getOperand(slot)->toPhi();

    MIRType type = headerPhi->type();
    TemporaryTypeSet* typeSet = headerPhi->resultTypeSet();

    MOZ_TRY(addOsrValueTypeBarrier(slot, &def, type, typeSet));

    preheaderPhi->replaceOperand(OSR_PHI_POSITION, def);
    preheaderPhi->setResultType(type);
    preheaderPhi->setResultTypeSet(typeSet);
  }

  return Ok();
}

enum class CFGState : uint32_t { Alloc = 0, Abort = 1, Success = 2 };

static CFGState GetOrCreateControlFlowGraph(TempAllocator& tempAlloc,
                                            JSScript* script,
                                            const ControlFlowGraph** cfgOut) {
  if (script->hasBaselineScript() &&
      script->baselineScript()->controlFlowGraph()) {
    *cfgOut = script->baselineScript()->controlFlowGraph();
    return CFGState::Success;
  }

  ControlFlowGenerator cfgenerator(tempAlloc, script);
  if (!cfgenerator.traverseBytecode()) {
    if (cfgenerator.aborted()) {
      return CFGState::Abort;
    }
    return CFGState::Alloc;
  }

  // If possible cache the control flow graph on the baseline script.
  TempAllocator* graphAlloc = nullptr;
  if (script->hasBaselineScript()) {
    LifoAlloc& lifoAlloc = script->zone()->jitZone()->cfgSpace()->lifoAlloc();
    LifoAlloc::AutoFallibleScope fallibleAllocator(&lifoAlloc);
    graphAlloc = lifoAlloc.new_<TempAllocator>(&lifoAlloc);
    if (!graphAlloc) {
      return CFGState::Alloc;
    }
  } else {
    graphAlloc = &tempAlloc;
  }

  ControlFlowGraph* cfg = cfgenerator.getGraph(*graphAlloc);
  if (!cfg) {
    return CFGState::Alloc;
  }

  if (script->hasBaselineScript()) {
    MOZ_ASSERT(!script->baselineScript()->controlFlowGraph());
    script->baselineScript()->setControlFlowGraph(cfg);
  }

  if (JitSpewEnabled(JitSpew_CFG)) {
    JitSpew(JitSpew_CFG, "Generating graph for %s:%u:%u", script->filename(),
            script->lineno(), script->column());
    Fprinter& print = JitSpewPrinter();
    cfg->dump(print, script);
  }

  *cfgOut = cfg;
  return CFGState::Success;
}

// We traverse the bytecode using the control flow graph. This structure
// contains a graph of CFGBlocks in RPO order.
//
// Per CFGBlock we take the corresponding MBasicBlock and start iterating the
// bytecode of that CFGBlock. Each basic block has a mapping of local slots to
// instructions, as well as a stack depth. As we encounter instructions we
// mutate this mapping in the current block.
//
// Afterwards we visit the control flow instruction. There we add the ending ins
// of the MBasicBlock and create new MBasicBlocks for the successors. That means
// adding phi nodes for diamond join points, making sure to propagate types
// around loops ...
//
// We keep a link between a CFGBlock and the entry MBasicBlock (in
// blockWorklist). That vector only contains the MBasicBlocks that correspond
// with a CFGBlock. We can create new MBasicBlocks that don't correspond to a
// CFGBlock.
AbortReasonOr<Ok> IonBuilder::traverseBytecode() {
  CFGState state = GetOrCreateControlFlowGraph(alloc(), info().script(), &cfg);
  MOZ_ASSERT_IF(cfg && info().script()->hasBaselineScript(),
                info().script()->baselineScript()->controlFlowGraph() == cfg);
  if (state == CFGState::Alloc) {
    return abort(AbortReason::Alloc);
  }
  if (state == CFGState::Abort) {
    return abort(AbortReason::Disable, "Couldn't create the CFG of script");
  }

  if (!blockWorklist.growBy(cfg->numBlocks())) {
    return abort(AbortReason::Alloc);
  }
  blockWorklist[0] = current;

  size_t i = 0;
  while (i < cfg->numBlocks()) {
    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }

    bool restarted = false;
    const CFGBlock* cfgblock = cfg->block(i);
    MBasicBlock* mblock = blockWorklist[i];
    MOZ_ASSERT(mblock && !mblock->isDead());

    MOZ_TRY(visitBlock(cfgblock, mblock));
    MOZ_TRY(visitControlInstruction(cfgblock->stopIns(), &restarted));

    if (restarted) {
      // Move back to the start of the loop.
      while (!blockWorklist[i] || blockWorklist[i]->isDead()) {
        MOZ_ASSERT(i > 0);
        i--;
      }
      MOZ_ASSERT(cfgblock->stopIns()->isBackEdge());
      MOZ_ASSERT(loopHeaderStack_.back() == blockWorklist[i]);
    } else {
      i++;
    }
  }

#ifdef DEBUG
  MOZ_ASSERT(graph().numBlocks() >= blockWorklist.length());
  for (i = 0; i < cfg->numBlocks(); i++) {
    MOZ_ASSERT(blockWorklist[i]);
    MOZ_ASSERT(!blockWorklist[i]->isDead());
    MOZ_ASSERT_IF(i != 0, blockWorklist[i]->id() != 0);
  }
#endif

  cfg = nullptr;

  blockWorklist.clear();
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitBlock(const CFGBlock* cfgblock,
                                         MBasicBlock* mblock) {
  mblock->setLoopDepth(loopDepth_);

  cfgCurrent = cfgblock;
  pc = cfgblock->startPc();

  if (mblock->pc() && script()->hasScriptCounts()) {
    mblock->setHitCount(script()->getHitCount(mblock->pc()));
  }

  // Optimization to move a predecessor that only has this block as successor
  // just before this block.  Skip this optimization if the previous block is
  // not part of the same function, as we might have to backtrack on inlining
  // failures.
  if (mblock->numPredecessors() == 1 &&
      mblock->getPredecessor(0)->numSuccessors() == 1 &&
      !mblock->getPredecessor(0)->outerResumePoint()) {
    graph().removeBlockFromList(mblock->getPredecessor(0));
    graph().addBlock(mblock->getPredecessor(0));
  }

  MOZ_TRY(setCurrentAndSpecializePhis(mblock));
  graph().addBlock(mblock);

  while (pc < cfgblock->stopPc()) {
    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }

#ifdef DEBUG
    // In debug builds, after compiling this op, check that all values
    // popped by this opcode either:
    //
    //   (1) Have the ImplicitlyUsed flag set on them.
    //   (2) Have more uses than before compiling this op (the value is
    //       used as operand of a new MIR instruction).
    //
    // This is used to catch problems where IonBuilder pops a value without
    // adding any SSA uses and doesn't call setImplicitlyUsedUnchecked on it.
    Vector<MDefinition*, 4, JitAllocPolicy> popped(alloc());
    Vector<size_t, 4, JitAllocPolicy> poppedUses(alloc());
    unsigned nuses = GetUseCount(pc);

    for (unsigned i = 0; i < nuses; i++) {
      MDefinition* def = current->peek(-int32_t(i + 1));
      if (!popped.append(def) || !poppedUses.append(def->defUseCount())) {
        return abort(AbortReason::Alloc);
      }
    }
#endif

    // Nothing in inspectOpcode() is allowed to advance the pc.
    JSOp op = JSOp(*pc);
    MOZ_TRY(inspectOpcode(op));

#ifdef DEBUG
    for (size_t i = 0; i < popped.length(); i++) {
      switch (op) {
        case JSOP_POP:
        case JSOP_POPN:
        case JSOP_DUPAT:
        case JSOP_DUP:
        case JSOP_DUP2:
        case JSOP_PICK:
        case JSOP_UNPICK:
        case JSOP_SWAP:
        case JSOP_SETARG:
        case JSOP_SETLOCAL:
        case JSOP_INITLEXICAL:
        case JSOP_SETRVAL:
        case JSOP_VOID:
          // Don't require SSA uses for values popped by these ops.
          break;

        case JSOP_POS:
        case JSOP_TOID:
        case JSOP_TOSTRING:
          // These ops may leave their input on the stack without setting
          // the ImplicitlyUsed flag. If this value will be popped immediately,
          // we may replace it with |undefined|, but the difference is
          // not observable.
          MOZ_ASSERT(i == 0);
          if (current->peek(-1) == popped[0]) {
            break;
          }
          MOZ_FALLTHROUGH;

        default:
          MOZ_ASSERT(popped[i]->isImplicitlyUsed() ||

                     // MNewDerivedTypedObject instances are
                     // often dead unless they escape from the
                     // fn. See IonBuilder::loadTypedObjectData()
                     // for more details.
                     popped[i]->isNewDerivedTypedObject() ||

                     popped[i]->defUseCount() > poppedUses[i]);
          break;
      }
    }
#endif

    pc += CodeSpec[op].length;
    current->updateTrackedSite(bytecodeSite(pc));
  }
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitGoto(CFGGoto* ins) {
  return emitGoto(ins->successor(), ins->popAmount());
}

AbortReasonOr<Ok> IonBuilder::emitGoto(CFGBlock* successor, size_t popAmount) {
  size_t id = successor->id();
  bool create = !blockWorklist[id] || blockWorklist[id]->isDead();

  current->popn(popAmount);

  if (create) {
    MOZ_TRY_VAR(blockWorklist[id], newBlock(current, successor->startPc()));
  }

  MBasicBlock* succ = blockWorklist[id];
  current->end(MGoto::New(alloc(), succ));

  if (!create) {
    if (!succ->addPredecessor(alloc(), current)) {
      return abort(AbortReason::Alloc);
    }
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitBackEdge(CFGBackEdge* ins, bool* restarted) {
  loopDepth_--;

  MBasicBlock* loopEntry = blockWorklist[ins->getSuccessor(0)->id()];
  current->end(MGoto::New(alloc(), loopEntry));

  MOZ_ASSERT(ins->getSuccessor(0) == cfgLoopHeaderStack_.back());

  // Compute phis in the loop header and propagate them throughout the loop,
  // including the successor.
  AbortReason r = loopEntry->setBackedge(alloc(), current);
  switch (r) {
    case AbortReason::NoAbort:
      loopHeaderStack_.popBack();
#ifdef DEBUG
      cfgLoopHeaderStack_.popBack();
#endif
      return Ok();

    case AbortReason::Disable:
      // If there are types for variables on the backedge that were not
      // present at the original loop header, then uses of the variables'
      // phis may have generated incorrect nodes. The new types have been
      // incorporated into the header phis, so remove all blocks for the
      // loop body and restart with the new types.
      *restarted = true;
      MOZ_TRY(restartLoop(ins->getSuccessor(0)));
      return Ok();

    default:
      return abort(r);
  }
}

AbortReasonOr<Ok> IonBuilder::visitLoopEntry(CFGLoopEntry* loopEntry) {
  unsigned stackPhiCount = loopEntry->stackPhiCount();
  const CFGBlock* successor = loopEntry->getSuccessor(0);
  bool osr = successor->startPc() == info().osrPc();
  if (osr) {
    MOZ_ASSERT(loopEntry->canOsr());
    MBasicBlock* preheader;
    MOZ_TRY_VAR(preheader, newOsrPreheader(current, successor->startPc(), pc));
    current->end(MGoto::New(alloc(), preheader));
    MOZ_TRY(setCurrentAndSpecializePhis(preheader));
  }

  if (loopEntry->isBrokenLoop()) {
    // A "broken loop" is a loop that does not actually loop, for example:
    //
    //   while (x) {
    //      return true;
    //   }
    //
    // A broken loop has no backedge so we don't need a loop header and loop
    // phis. Just emit a Goto to the loop entry.
    return emitGoto(loopEntry->successor(), /* popAmount = */ 0);
  }

  loopDepth_++;
  MBasicBlock* header;
  MOZ_TRY_VAR(header, newPendingLoopHeader(current, successor->startPc(), osr,
                                           loopEntry->canOsr(), stackPhiCount));
  blockWorklist[successor->id()] = header;

  current->end(MGoto::New(alloc(), header));

  if (!loopHeaderStack_.append(header)) {
    return abort(AbortReason::Alloc);
  }
#ifdef DEBUG
  if (!cfgLoopHeaderStack_.append(successor)) {
    return abort(AbortReason::Alloc);
  }
#endif

  MOZ_TRY(analyzeNewLoopTypes(cfgCurrent));

  setCurrent(header);
  pc = header->pc();

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::jsop_loopentry() {
  MOZ_ASSERT(*pc == JSOP_LOOPENTRY);

  MInterruptCheck* check = MInterruptCheck::New(alloc());
  current->add(check);
  insertRecompileCheck();

  if (script()->trackRecordReplayProgress()) {
    check->setTrackRecordReplayProgress();

    // When recording/replaying, MInterruptCheck is effectful and should
    // not reexecute after bailing out.
    MOZ_TRY(resumeAfter(check));
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitControlInstruction(
    CFGControlInstruction* ins, bool* restarted) {
  switch (ins->type()) {
    case CFGControlInstruction::Type_Test:
      return visitTest(ins->toTest());
    case CFGControlInstruction::Type_CondSwitchCase:
      return visitCondSwitchCase(ins->toCondSwitchCase());
    case CFGControlInstruction::Type_Goto:
      return visitGoto(ins->toGoto());
    case CFGControlInstruction::Type_BackEdge:
      return visitBackEdge(ins->toBackEdge(), restarted);
    case CFGControlInstruction::Type_LoopEntry:
      return visitLoopEntry(ins->toLoopEntry());
    case CFGControlInstruction::Type_Return:
    case CFGControlInstruction::Type_RetRVal:
      return visitReturn(ins);
    case CFGControlInstruction::Type_Try:
      return visitTry(ins->toTry());
    case CFGControlInstruction::Type_Throw:
      return visitThrow(ins->toThrow());
    case CFGControlInstruction::Type_TableSwitch:
      return visitTableSwitch(ins->toTableSwitch());
  }
  MOZ_CRASH("Unknown Control Instruction");
}

AbortReasonOr<Ok> IonBuilder::inspectOpcode(JSOp op) {
  // Add not yet implemented opcodes at the bottom of the switch!
  switch (op) {
    case JSOP_NOP:
    case JSOP_NOP_DESTRUCTURING:
    case JSOP_TRY_DESTRUCTURING:
    case JSOP_LINENO:
    case JSOP_JUMPTARGET:
    case JSOP_LABEL:
      return Ok();

    case JSOP_UNDEFINED:
      // If this ever changes, change what JSOP_GIMPLICITTHIS does too.
      pushConstant(UndefinedValue());
      return Ok();

    case JSOP_IFNE:
    case JSOP_IFEQ:
    case JSOP_RETURN:
    case JSOP_RETRVAL:
    case JSOP_AND:
    case JSOP_OR:
    case JSOP_TRY:
    case JSOP_THROW:
    case JSOP_GOTO:
    case JSOP_CONDSWITCH:
    case JSOP_TABLESWITCH:
    case JSOP_CASE:
    case JSOP_DEFAULT:
      // Control flow opcodes should be handled in the ControlFlowGenerator.
      MOZ_CRASH("Shouldn't encounter this opcode.");

    case JSOP_BITNOT:
      return jsop_bitnot();

    case JSOP_BITAND:
    case JSOP_BITOR:
    case JSOP_BITXOR:
    case JSOP_LSH:
    case JSOP_RSH:
    case JSOP_URSH:
      return jsop_bitop(op);

    case JSOP_ADD:
    case JSOP_SUB:
    case JSOP_MUL:
    case JSOP_DIV:
    case JSOP_MOD:
      return jsop_binary_arith(op);

    case JSOP_POW:
      return jsop_pow();

    case JSOP_POS:
      return jsop_pos();

    case JSOP_NEG:
      return jsop_neg();

    case JSOP_TOSTRING:
      return jsop_tostring();

    case JSOP_DEFVAR:
      return jsop_defvar();

    case JSOP_DEFLET:
    case JSOP_DEFCONST:
      return jsop_deflexical();

    case JSOP_DEFFUN:
      return jsop_deffun();

    case JSOP_EQ:
    case JSOP_NE:
    case JSOP_STRICTEQ:
    case JSOP_STRICTNE:
    case JSOP_LT:
    case JSOP_LE:
    case JSOP_GT:
    case JSOP_GE:
      return jsop_compare(op);

    case JSOP_DOUBLE:
#ifdef ENABLE_BIGINT
    case JSOP_BIGINT:
#endif
      pushConstant(info().getConst(pc));
      return Ok();

    case JSOP_STRING:
      pushConstant(StringValue(info().getAtom(pc)));
      return Ok();

    case JSOP_SYMBOL: {
      unsigned which = GET_UINT8(pc);
      JS::Symbol* sym = realm->runtime()->wellKnownSymbols().get(which);
      pushConstant(SymbolValue(sym));
      return Ok();
    }

    case JSOP_ZERO:
      pushConstant(Int32Value(0));
      return Ok();

    case JSOP_ONE:
      pushConstant(Int32Value(1));
      return Ok();

    case JSOP_NULL:
      pushConstant(NullValue());
      return Ok();

    case JSOP_VOID:
      current->pop();
      pushConstant(UndefinedValue());
      return Ok();

    case JSOP_HOLE:
      pushConstant(MagicValue(JS_ELEMENTS_HOLE));
      return Ok();

    case JSOP_FALSE:
      pushConstant(BooleanValue(false));
      return Ok();

    case JSOP_TRUE:
      pushConstant(BooleanValue(true));
      return Ok();

    case JSOP_ARGUMENTS:
      return jsop_arguments();

    case JSOP_RUNONCE:
      return jsop_runonce();

    case JSOP_REST:
      return jsop_rest();

    case JSOP_GETARG:
      if (info().argsObjAliasesFormals()) {
        MGetArgumentsObjectArg* getArg = MGetArgumentsObjectArg::New(
            alloc(), current->argumentsObject(), GET_ARGNO(pc));
        current->add(getArg);
        current->push(getArg);
      } else {
        current->pushArg(GET_ARGNO(pc));
      }
      return Ok();

    case JSOP_SETARG:
      return jsop_setarg(GET_ARGNO(pc));

    case JSOP_GETLOCAL:
      current->pushLocal(GET_LOCALNO(pc));
      return Ok();

    case JSOP_SETLOCAL:
      current->setLocal(GET_LOCALNO(pc));
      return Ok();

    case JSOP_THROWSETCONST:
    case JSOP_THROWSETALIASEDCONST:
    case JSOP_THROWSETCALLEE:
      return jsop_throwsetconst();

    case JSOP_CHECKLEXICAL:
      return jsop_checklexical();

    case JSOP_INITLEXICAL:
      current->setLocal(GET_LOCALNO(pc));
      return Ok();

    case JSOP_INITGLEXICAL: {
      MOZ_ASSERT(!script()->hasNonSyntacticScope());
      MDefinition* value = current->pop();
      current->push(
          constant(ObjectValue(script()->global().lexicalEnvironment())));
      current->push(value);
      return jsop_setprop(info().getAtom(pc)->asPropertyName());
    }

    case JSOP_CHECKALIASEDLEXICAL:
      return jsop_checkaliasedlexical(EnvironmentCoordinate(pc));

    case JSOP_INITALIASEDLEXICAL:
      return jsop_setaliasedvar(EnvironmentCoordinate(pc));

    case JSOP_UNINITIALIZED:
      pushConstant(MagicValue(JS_UNINITIALIZED_LEXICAL));
      return Ok();

    case JSOP_POP: {
      MDefinition* def = current->pop();

      // POP opcodes frequently appear where values are killed, e.g. after
      // SET* opcodes. Place a resume point afterwards to avoid capturing
      // the dead value in later snapshots, except in places where that
      // resume point is obviously unnecessary.
      if (pc[JSOP_POP_LENGTH] == JSOP_POP) {
        return Ok();
      }
      if (def->isConstant()) {
        return Ok();
      }
      return maybeInsertResume();
    }

    case JSOP_POPN:
      for (uint32_t i = 0, n = GET_UINT16(pc); i < n; i++) {
        current->pop();
      }
      return Ok();

    case JSOP_DUPAT:
      current->pushSlot(current->stackDepth() - 1 - GET_UINT24(pc));
      return Ok();

    case JSOP_NEWARRAY:
      return jsop_newarray(GET_UINT32(pc));

    case JSOP_NEWARRAY_COPYONWRITE:
      return jsop_newarray_copyonwrite();

    case JSOP_NEWINIT:
    case JSOP_NEWOBJECT:
      return jsop_newobject();

    case JSOP_INITELEM:
    case JSOP_INITHIDDENELEM:
      return jsop_initelem();

    case JSOP_INITELEM_INC:
      return jsop_initelem_inc();

    case JSOP_INITELEM_ARRAY:
      return jsop_initelem_array();

    case JSOP_INITPROP:
    case JSOP_INITLOCKEDPROP:
    case JSOP_INITHIDDENPROP: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_initprop(name);
    }

    case JSOP_MUTATEPROTO: {
      return jsop_mutateproto();
    }

    case JSOP_INITPROP_GETTER:
    case JSOP_INITHIDDENPROP_GETTER:
    case JSOP_INITPROP_SETTER:
    case JSOP_INITHIDDENPROP_SETTER: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_initprop_getter_setter(name);
    }

    case JSOP_INITELEM_GETTER:
    case JSOP_INITHIDDENELEM_GETTER:
    case JSOP_INITELEM_SETTER:
    case JSOP_INITHIDDENELEM_SETTER:
      return jsop_initelem_getter_setter();

    case JSOP_FUNCALL:
      return jsop_funcall(GET_ARGC(pc));

    case JSOP_FUNAPPLY:
      return jsop_funapply(GET_ARGC(pc));

    case JSOP_SPREADCALL:
      return jsop_spreadcall();

    case JSOP_CALL:
    case JSOP_CALL_IGNORES_RV:
    case JSOP_CALLITER:
    case JSOP_NEW:
      MOZ_TRY(jsop_call(GET_ARGC(pc),
                        (JSOp)*pc == JSOP_NEW || (JSOp)*pc == JSOP_SUPERCALL,
                        (JSOp)*pc == JSOP_CALL_IGNORES_RV));
      if (op == JSOP_CALLITER) {
        if (!outermostBuilder()->iterators_.append(current->peek(-1))) {
          return abort(AbortReason::Alloc);
        }
      }
      return Ok();

    case JSOP_EVAL:
    case JSOP_STRICTEVAL:
      return jsop_eval(GET_ARGC(pc));

    case JSOP_INT8:
      pushConstant(Int32Value(GET_INT8(pc)));
      return Ok();

    case JSOP_UINT16:
      pushConstant(Int32Value(GET_UINT16(pc)));
      return Ok();

    case JSOP_GETGNAME: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      if (!script()->hasNonSyntacticScope()) {
        return jsop_getgname(name);
      }
      return jsop_getname(name);
    }

    case JSOP_SETGNAME:
    case JSOP_STRICTSETGNAME: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      JSObject* obj = nullptr;
      if (!script()->hasNonSyntacticScope()) {
        obj = testGlobalLexicalBinding(name);
      }
      if (obj) {
        return setStaticName(obj, name);
      }
      return jsop_setprop(name);
    }

    case JSOP_GETNAME: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_getname(name);
    }

    case JSOP_GETINTRINSIC: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_intrinsic(name);
    }

    case JSOP_GETIMPORT: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_getimport(name);
    }

    case JSOP_BINDGNAME:
      if (!script()->hasNonSyntacticScope()) {
        if (JSObject* env = testGlobalLexicalBinding(info().getName(pc))) {
          pushConstant(ObjectValue(*env));
          return Ok();
        }
      }
      // Fall through to JSOP_BINDNAME
      MOZ_FALLTHROUGH;
    case JSOP_BINDNAME:
      return jsop_bindname(info().getName(pc));

    case JSOP_BINDVAR:
      return jsop_bindvar();

    case JSOP_DUP:
      current->pushSlot(current->stackDepth() - 1);
      return Ok();

    case JSOP_DUP2:
      return jsop_dup2();

    case JSOP_SWAP:
      current->swapAt(-1);
      return Ok();

    case JSOP_PICK:
      current->pick(-GET_INT8(pc));
      return Ok();

    case JSOP_UNPICK:
      current->unpick(-GET_INT8(pc));
      return Ok();

    case JSOP_GETALIASEDVAR:
      return jsop_getaliasedvar(EnvironmentCoordinate(pc));

    case JSOP_SETALIASEDVAR:
      return jsop_setaliasedvar(EnvironmentCoordinate(pc));

    case JSOP_UINT24:
    case JSOP_RESUMEINDEX:
      pushConstant(Int32Value(GET_UINT24(pc)));
      return Ok();

    case JSOP_INT32:
      pushConstant(Int32Value(GET_INT32(pc)));
      return Ok();

    case JSOP_LOOPHEAD:
      // JSOP_LOOPHEAD is handled when processing the loop header.
      MOZ_CRASH("JSOP_LOOPHEAD outside loop");

    case JSOP_GETELEM:
    case JSOP_CALLELEM:
      MOZ_TRY(jsop_getelem());
      if (op == JSOP_CALLELEM) {
        MOZ_TRY(improveThisTypesForCall());
      }
      return Ok();

    case JSOP_SETELEM:
    case JSOP_STRICTSETELEM:
      return jsop_setelem();

    case JSOP_LENGTH:
      return jsop_length();

    case JSOP_NOT:
      return jsop_not();

    case JSOP_FUNCTIONTHIS:
      return jsop_functionthis();

    case JSOP_GLOBALTHIS:
      return jsop_globalthis();

    case JSOP_CALLEE: {
      MDefinition* callee = getCallee();
      current->push(callee);
      return Ok();
    }

    case JSOP_ENVCALLEE:
      return jsop_envcallee();

    case JSOP_SUPERBASE:
      return jsop_superbase();

    case JSOP_GETPROP_SUPER: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_getprop_super(name);
    }

    case JSOP_GETELEM_SUPER:
      return jsop_getelem_super();

    case JSOP_GETPROP:
    case JSOP_CALLPROP: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      MOZ_TRY(jsop_getprop(name));
      if (op == JSOP_CALLPROP) {
        MOZ_TRY(improveThisTypesForCall());
      }
      return Ok();
    }

    case JSOP_SETPROP:
    case JSOP_STRICTSETPROP:
    case JSOP_SETNAME:
    case JSOP_STRICTSETNAME: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_setprop(name);
    }

    case JSOP_DELPROP:
    case JSOP_STRICTDELPROP: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_delprop(name);
    }

    case JSOP_DELELEM:
    case JSOP_STRICTDELELEM:
      return jsop_delelem();

    case JSOP_REGEXP:
      return jsop_regexp(info().getRegExp(pc));

    case JSOP_CALLSITEOBJ:
      pushConstant(ObjectValue(*info().getObject(pc)));
      return Ok();

    case JSOP_OBJECT:
      return jsop_object(info().getObject(pc));

    case JSOP_CLASSCONSTRUCTOR:
      return jsop_classconstructor();

    case JSOP_TYPEOF:
    case JSOP_TYPEOFEXPR:
      return jsop_typeof();

    case JSOP_TOASYNC:
      return jsop_toasync();

    case JSOP_TOASYNCGEN:
      return jsop_toasyncgen();

    case JSOP_TOASYNCITER:
      return jsop_toasynciter();

    case JSOP_TOID:
      return jsop_toid();

    case JSOP_ITERNEXT:
      return jsop_iternext();

    case JSOP_LAMBDA:
      return jsop_lambda(info().getFunction(pc));

    case JSOP_LAMBDA_ARROW:
      return jsop_lambda_arrow(info().getFunction(pc));

    case JSOP_SETFUNNAME:
      return jsop_setfunname(GET_UINT8(pc));

    case JSOP_PUSHLEXICALENV:
      return jsop_pushlexicalenv(GET_UINT32_INDEX(pc));

    case JSOP_POPLEXICALENV:
      current->setEnvironmentChain(walkEnvironmentChain(1));
      return Ok();

    case JSOP_FRESHENLEXICALENV:
      return jsop_copylexicalenv(true);

    case JSOP_RECREATELEXICALENV:
      return jsop_copylexicalenv(false);

    case JSOP_ITER:
      return jsop_iter();

    case JSOP_MOREITER:
      return jsop_itermore();

    case JSOP_ISNOITER:
      return jsop_isnoiter();

    case JSOP_ENDITER:
      return jsop_iterend();

    case JSOP_IN:
      return jsop_in();

    case JSOP_HASOWN:
      return jsop_hasown();

    case JSOP_SETRVAL:
      MOZ_ASSERT(!script()->noScriptRval());
      current->setSlot(info().returnValueSlot(), current->pop());
      return Ok();

    case JSOP_INSTANCEOF:
      return jsop_instanceof();

    case JSOP_DEBUGLEAVELEXICALENV:
      return Ok();

    case JSOP_DEBUGGER:
      return jsop_debugger();

    case JSOP_GIMPLICITTHIS:
      if (!script()->hasNonSyntacticScope()) {
        pushConstant(UndefinedValue());
        return Ok();
      }
      // Fallthrough to IMPLICITTHIS in non-syntactic scope case
      MOZ_FALLTHROUGH;
    case JSOP_IMPLICITTHIS: {
      PropertyName* name = info().getAtom(pc)->asPropertyName();
      return jsop_implicitthis(name);
    }

    case JSOP_NEWTARGET:
      return jsop_newtarget();

    case JSOP_CHECKISOBJ:
      return jsop_checkisobj(GET_UINT8(pc));

    case JSOP_CHECKISCALLABLE:
      return jsop_checkiscallable(GET_UINT8(pc));

    case JSOP_CHECKOBJCOERCIBLE:
      return jsop_checkobjcoercible();

    case JSOP_DEBUGCHECKSELFHOSTED: {
#ifdef DEBUG
      MDebugCheckSelfHosted* check =
          MDebugCheckSelfHosted::New(alloc(), current->pop());
      current->add(check);
      current->push(check);
      MOZ_TRY(resumeAfter(check));
#endif
      return Ok();
    }

    case JSOP_IS_CONSTRUCTING:
      pushConstant(MagicValue(JS_IS_CONSTRUCTING));
      return Ok();

    case JSOP_OPTIMIZE_SPREADCALL: {
      // Assuming optimization isn't available doesn't affect correctness.
      // TODO: Investigate dynamic checks.
      MDefinition* arr = current->peek(-1);
      arr->setImplicitlyUsedUnchecked();
      pushConstant(BooleanValue(false));
      return Ok();
    }

    case JSOP_IMPORTMETA:
      return jsop_importmeta();

    case JSOP_DYNAMIC_IMPORT:
      return jsop_dynamic_import();

    case JSOP_LOOPENTRY:
      return jsop_loopentry();

    // ===== NOT Yet Implemented =====
    // Read below!

    // With
    case JSOP_ENTERWITH:
    case JSOP_LEAVEWITH:

    // Spread
    case JSOP_SPREADNEW:
    case JSOP_SPREADEVAL:
    case JSOP_STRICTSPREADEVAL:

    // Classes
    case JSOP_CHECKCLASSHERITAGE:
    case JSOP_FUNWITHPROTO:
    case JSOP_OBJWITHPROTO:
    case JSOP_BUILTINPROTO:
    case JSOP_INITHOMEOBJECT:
    case JSOP_DERIVEDCONSTRUCTOR:
    case JSOP_CHECKTHIS:
    case JSOP_CHECKRETURN:
    case JSOP_CHECKTHISREINIT:

    // Super
    case JSOP_SETPROP_SUPER:
    case JSOP_SETELEM_SUPER:
    case JSOP_STRICTSETPROP_SUPER:
    case JSOP_STRICTSETELEM_SUPER:
    case JSOP_SUPERFUN:
    // Most of the infrastructure for these exists in Ion, but needs review
    // and testing before these are enabled. Once other opcodes that are used
    // in derived classes are supported in Ion, this can be better validated
    // with testcases. Pay special attention to bailout and other areas where
    // JSOP_NEW has special handling.
    case JSOP_SPREADSUPERCALL:
    case JSOP_SUPERCALL:

    // Environments (bug 1366470)
    case JSOP_PUSHVARENV:
    case JSOP_POPVARENV:

    // Compound assignment
    case JSOP_GETBOUNDNAME:

    // Generators / Async (bug 1317690)
    case JSOP_EXCEPTION:
    case JSOP_ISGENCLOSING:
    case JSOP_INITIALYIELD:
    case JSOP_YIELD:
    case JSOP_FINALYIELDRVAL:
    case JSOP_RESUME:
    case JSOP_DEBUGAFTERYIELD:
    case JSOP_AWAIT:
    case JSOP_TRYSKIPAWAIT:
    case JSOP_GENERATOR:

    // Misc
    case JSOP_DELNAME:
    case JSOP_FINALLY:
    case JSOP_GETRVAL:
    case JSOP_GOSUB:
    case JSOP_RETSUB:
    case JSOP_SETINTRINSIC:
    case JSOP_THROWMSG:
      // === !! WARNING WARNING WARNING !! ===
      // Do you really want to sacrifice performance by not implementing this
      // operation in the optimizing compiler?
      break;

    case JSOP_FORCEINTERPRETER:
      // Intentionally not implemented.
      break;

    case JSOP_UNUSED151:
    case JSOP_LIMIT:
      break;
  }

  // Track a simpler message, since the actionable abort message is a
  // static string, and the internal opcode name isn't an actionable
  // thing anyways.
  trackActionableAbort("Unsupported bytecode");
#ifdef DEBUG
  return abort(AbortReason::Disable, "Unsupported opcode: %s", CodeName[op]);
#else
  return abort(AbortReason::Disable, "Unsupported opcode: %d", op);
#endif
}

AbortReasonOr<Ok> IonBuilder::restartLoop(const CFGBlock* cfgHeader) {
  AutoTraceLog logCompile(traceLogger(), TraceLogger_IonBuilderRestartLoop);

  spew("New types at loop header, restarting loop body");

  if (JitOptions.limitScriptSize) {
    if (++numLoopRestarts_ >= MAX_LOOP_RESTARTS) {
      return abort(AbortReason::Disable,
                   "Aborted while processing control flow");
    }
  }

  MBasicBlock* header = blockWorklist[cfgHeader->id()];

  // Discard unreferenced & pre-allocated resume points.
  replaceMaybeFallbackFunctionGetter(nullptr);

  // Remove all blocks in the loop body other than the header, which has phis
  // of the appropriate type and incoming edges to preserve.
  if (!graph().removeSuccessorBlocks(header)) {
    return abort(AbortReason::Alloc);
  }
  graph().removeBlockFromList(header);

  // Remove all instructions from the header itself, and all resume points
  // except the entry resume point.
  header->discardAllInstructions();
  header->discardAllResumePoints(/* discardEntry = */ false);
  header->setStackDepth(header->getPredecessor(0)->stackDepth());

  loopDepth_ = header->loopDepth();

  // Don't specializePhis(), as the header has been visited before and the
  // phis have already had their type set.
  setCurrent(header);
  pc = header->pc();

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::replaceTypeSet(MDefinition* subject,
                                             TemporaryTypeSet* type,
                                             MTest* test) {
  if (type->unknown()) {
    return Ok();
  }

  // Don't emit MFilterTypeSet if it doesn't improve the typeset.
  if (subject->resultTypeSet()) {
    if (subject->resultTypeSet()->equals(type)) {
      return Ok();
    }
  } else {
    TemporaryTypeSet oldTypes(alloc_->lifoAlloc(), subject->type());
    if (oldTypes.equals(type)) {
      return Ok();
    }
  }

  MInstruction* replace = nullptr;
  MDefinition* ins;

  for (uint32_t i = 0; i < current->stackDepth(); i++) {
    ins = current->getSlot(i);

    // Instead of creating a new MFilterTypeSet, try to update the old one.
    if (ins->isFilterTypeSet() && ins->getOperand(0) == subject &&
        ins->dependency() == test) {
      TemporaryTypeSet* intersect = TypeSet::intersectSets(
          ins->resultTypeSet(), type, alloc_->lifoAlloc());
      if (!intersect) {
        return abort(AbortReason::Alloc);
      }

      ins->toFilterTypeSet()->setResultType(intersect->getKnownMIRType());
      ins->toFilterTypeSet()->setResultTypeSet(intersect);

      if (ins->type() == MIRType::Undefined) {
        current->setSlot(i, constant(UndefinedValue()));
      } else if (ins->type() == MIRType::Null) {
        current->setSlot(i, constant(NullValue()));
      } else if (ins->type() == MIRType::MagicOptimizedArguments) {
        current->setSlot(i, constant(MagicValue(JS_OPTIMIZED_ARGUMENTS)));
      } else {
        MOZ_ASSERT(!IsMagicType(ins->type()));
      }
      continue;
    }

    if (ins == subject) {
      if (!replace) {
        replace = MFilterTypeSet::New(alloc(), subject, type);
        current->add(replace);

        // Make sure we don't hoist it above the MTest, we can use the
        // 'dependency' of an MInstruction. This is normally used by
        // Alias Analysis, but won't get overwritten, since this
        // instruction doesn't have an AliasSet.
        replace->setDependency(test);

        if (replace->type() == MIRType::Undefined) {
          replace = constant(UndefinedValue());
        } else if (replace->type() == MIRType::Null) {
          replace = constant(NullValue());
        } else if (replace->type() == MIRType::MagicOptimizedArguments) {
          replace = constant(MagicValue(JS_OPTIMIZED_ARGUMENTS));
        } else {
          MOZ_ASSERT(!IsMagicType(ins->type()));
        }
      }
      current->setSlot(i, replace);
    }
  }
  return Ok();
}

bool IonBuilder::detectAndOrStructure(MPhi* ins, bool* branchIsAnd) {
  // Look for a triangle pattern:
  //
  //       initialBlock
  //         /     |
  // branchBlock   |
  //         \     |
  //        testBlock
  //
  // Where ins is a phi from testBlock which combines two values
  // pushed onto the stack by initialBlock and branchBlock.

  if (ins->numOperands() != 2) {
    return false;
  }

  MBasicBlock* testBlock = ins->block();
  MOZ_ASSERT(testBlock->numPredecessors() == 2);

  MBasicBlock* initialBlock;
  MBasicBlock* branchBlock;
  if (testBlock->getPredecessor(0)->lastIns()->isTest()) {
    initialBlock = testBlock->getPredecessor(0);
    branchBlock = testBlock->getPredecessor(1);
  } else if (testBlock->getPredecessor(1)->lastIns()->isTest()) {
    initialBlock = testBlock->getPredecessor(1);
    branchBlock = testBlock->getPredecessor(0);
  } else {
    return false;
  }

  if (branchBlock->numSuccessors() != 1) {
    return false;
  }

  if (branchBlock->numPredecessors() != 1 ||
      branchBlock->getPredecessor(0) != initialBlock) {
    return false;
  }

  if (initialBlock->numSuccessors() != 2) {
    return false;
  }

  MDefinition* branchResult =
      ins->getOperand(testBlock->indexForPredecessor(branchBlock));
  MDefinition* initialResult =
      ins->getOperand(testBlock->indexForPredecessor(initialBlock));

  if (branchBlock->stackDepth() != initialBlock->stackDepth()) {
    return false;
  }
  if (branchBlock->stackDepth() != testBlock->stackDepth() + 1) {
    return false;
  }
  if (branchResult != branchBlock->peek(-1) ||
      initialResult != initialBlock->peek(-1)) {
    return false;
  }

  MTest* initialTest = initialBlock->lastIns()->toTest();
  bool branchIsTrue = branchBlock == initialTest->ifTrue();
  if (initialTest->input() == ins->getOperand(0)) {
    *branchIsAnd =
        branchIsTrue != (testBlock->getPredecessor(0) == branchBlock);
  } else if (initialTest->input() == ins->getOperand(1)) {
    *branchIsAnd =
        branchIsTrue != (testBlock->getPredecessor(1) == branchBlock);
  } else {
    return false;
  }

  return true;
}

AbortReasonOr<Ok> IonBuilder::improveTypesAtCompare(MCompare* ins,
                                                    bool trueBranch,
                                                    MTest* test) {
  if (ins->compareType() == MCompare::Compare_Undefined ||
      ins->compareType() == MCompare::Compare_Null) {
    return improveTypesAtNullOrUndefinedCompare(ins, trueBranch, test);
  }

  if ((ins->lhs()->isTypeOf() || ins->rhs()->isTypeOf()) &&
      (ins->lhs()->isConstant() || ins->rhs()->isConstant())) {
    return improveTypesAtTypeOfCompare(ins, trueBranch, test);
  }

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::improveTypesAtTypeOfCompare(MCompare* ins,
                                                          bool trueBranch,
                                                          MTest* test) {
  MTypeOf* typeOf =
      ins->lhs()->isTypeOf() ? ins->lhs()->toTypeOf() : ins->rhs()->toTypeOf();
  MConstant* constant = ins->lhs()->isConstant() ? ins->lhs()->toConstant()
                                                 : ins->rhs()->toConstant();

  if (constant->type() != MIRType::String) {
    return Ok();
  }

  bool equal = ins->jsop() == JSOP_EQ || ins->jsop() == JSOP_STRICTEQ;
  bool notEqual = ins->jsop() == JSOP_NE || ins->jsop() == JSOP_STRICTNE;

  if (notEqual) {
    trueBranch = !trueBranch;
  }

  // Relational compares not supported.
  if (!equal && !notEqual) {
    return Ok();
  }

  MDefinition* subject = typeOf->input();
  TemporaryTypeSet* inputTypes = subject->resultTypeSet();

  // Create temporary typeset equal to the type if there is no resultTypeSet.
  TemporaryTypeSet tmp;
  if (!inputTypes) {
    if (subject->type() == MIRType::Value) {
      return Ok();
    }
    inputTypes = &tmp;
    tmp.addType(TypeSet::PrimitiveType(ValueTypeFromMIRType(subject->type())),
                alloc_->lifoAlloc());
  }

  if (inputTypes->unknown()) {
    return Ok();
  }

  // Note: we cannot remove the AnyObject type in the false branch,
  // since there are multiple ways to get an object. That is the reason
  // for the 'trueBranch' test.
  TemporaryTypeSet filter;
  const JSAtomState& names = runtime->names();
  if (constant->toString() == TypeName(JSTYPE_UNDEFINED, names)) {
    filter.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
    if (typeOf->inputMaybeCallableOrEmulatesUndefined() && trueBranch) {
      filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
    }
  } else if (constant->toString() == TypeName(JSTYPE_BOOLEAN, names)) {
    filter.addType(TypeSet::BooleanType(), alloc_->lifoAlloc());
  } else if (constant->toString() == TypeName(JSTYPE_NUMBER, names)) {
    filter.addType(TypeSet::Int32Type(), alloc_->lifoAlloc());
    filter.addType(TypeSet::DoubleType(), alloc_->lifoAlloc());
  } else if (constant->toString() == TypeName(JSTYPE_STRING, names)) {
    filter.addType(TypeSet::StringType(), alloc_->lifoAlloc());
  } else if (constant->toString() == TypeName(JSTYPE_SYMBOL, names)) {
    filter.addType(TypeSet::SymbolType(), alloc_->lifoAlloc());
  } else if (constant->toString() == TypeName(JSTYPE_OBJECT, names)) {
    filter.addType(TypeSet::NullType(), alloc_->lifoAlloc());
    if (trueBranch) {
      filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
    }
  } else if (constant->toString() == TypeName(JSTYPE_FUNCTION, names)) {
    if (typeOf->inputMaybeCallableOrEmulatesUndefined() && trueBranch) {
      filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
    }
  } else {
    return Ok();
  }

  TemporaryTypeSet* type;
  if (trueBranch) {
    type = TypeSet::intersectSets(&filter, inputTypes, alloc_->lifoAlloc());
  } else {
    type = TypeSet::removeSet(inputTypes, &filter, alloc_->lifoAlloc());
  }

  if (!type) {
    return abort(AbortReason::Alloc);
  }

  return replaceTypeSet(subject, type, test);
}

AbortReasonOr<Ok> IonBuilder::improveTypesAtNullOrUndefinedCompare(
    MCompare* ins, bool trueBranch, MTest* test) {
  MOZ_ASSERT(ins->compareType() == MCompare::Compare_Undefined ||
             ins->compareType() == MCompare::Compare_Null);

  // altersUndefined/Null represents if we can filter/set Undefined/Null.
  bool altersUndefined, altersNull;
  JSOp op = ins->jsop();

  switch (op) {
    case JSOP_STRICTNE:
    case JSOP_STRICTEQ:
      altersUndefined = ins->compareType() == MCompare::Compare_Undefined;
      altersNull = ins->compareType() == MCompare::Compare_Null;
      break;
    case JSOP_NE:
    case JSOP_EQ:
      altersUndefined = altersNull = true;
      break;
    default:
      MOZ_CRASH("Relational compares not supported");
  }

  MDefinition* subject = ins->lhs();
  TemporaryTypeSet* inputTypes = subject->resultTypeSet();

  MOZ_ASSERT(IsNullOrUndefined(ins->rhs()->type()));

  // Create temporary typeset equal to the type if there is no resultTypeSet.
  TemporaryTypeSet tmp;
  if (!inputTypes) {
    if (subject->type() == MIRType::Value) {
      return Ok();
    }
    inputTypes = &tmp;
    tmp.addType(TypeSet::PrimitiveType(ValueTypeFromMIRType(subject->type())),
                alloc_->lifoAlloc());
  }

  if (inputTypes->unknown()) {
    return Ok();
  }

  TemporaryTypeSet* type;

  // Decide if we need to filter the type or set it.
  if ((op == JSOP_STRICTEQ || op == JSOP_EQ) ^ trueBranch) {
    // Remove undefined/null
    TemporaryTypeSet remove;
    if (altersUndefined) {
      remove.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
    }
    if (altersNull) {
      remove.addType(TypeSet::NullType(), alloc_->lifoAlloc());
    }

    type = TypeSet::removeSet(inputTypes, &remove, alloc_->lifoAlloc());
  } else {
    // Set undefined/null.
    TemporaryTypeSet base;
    if (altersUndefined) {
      base.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
      // If TypeSet emulates undefined, then we cannot filter the objects.
      if (inputTypes->maybeEmulatesUndefined(constraints())) {
        base.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
      }
    }

    if (altersNull) {
      base.addType(TypeSet::NullType(), alloc_->lifoAlloc());
    }

    type = TypeSet::intersectSets(&base, inputTypes, alloc_->lifoAlloc());
  }

  if (!type) {
    return abort(AbortReason::Alloc);
  }

  return replaceTypeSet(subject, type, test);
}

AbortReasonOr<Ok> IonBuilder::improveTypesAtTest(MDefinition* ins,
                                                 bool trueBranch, MTest* test) {
  // We explore the test condition to try and deduce as much type information
  // as possible.

  // All branches of this switch that don't want to fall through to the
  // default behavior must return.  The default behavior assumes that a true
  // test means the incoming ins is not null or undefined and that a false
  // tests means it's one of null, undefined, false, 0, "", and objects
  // emulating undefined
  switch (ins->op()) {
    case MDefinition::Opcode::Not:
      return improveTypesAtTest(ins->toNot()->getOperand(0), !trueBranch, test);
    case MDefinition::Opcode::IsObject: {
      MDefinition* subject = ins->getOperand(0);
      TemporaryTypeSet* oldType = subject->resultTypeSet();

      // Create temporary typeset equal to the type if there is no
      // resultTypeSet.
      TemporaryTypeSet tmp;
      if (!oldType) {
        if (subject->type() == MIRType::Value) {
          return Ok();
        }
        oldType = &tmp;
        tmp.addType(
            TypeSet::PrimitiveType(ValueTypeFromMIRType(subject->type())),
            alloc_->lifoAlloc());
      }

      if (oldType->unknown()) {
        return Ok();
      }

      TemporaryTypeSet* type = nullptr;
      if (trueBranch) {
        type = oldType->cloneObjectsOnly(alloc_->lifoAlloc());
      } else {
        type = oldType->cloneWithoutObjects(alloc_->lifoAlloc());
      }

      if (!type) {
        return abort(AbortReason::Alloc);
      }

      return replaceTypeSet(subject, type, test);
    }
    case MDefinition::Opcode::Phi: {
      bool branchIsAnd = true;
      if (!detectAndOrStructure(ins->toPhi(), &branchIsAnd)) {
        // Just fall through to the default behavior.
        break;
      }

      // Now we have detected the triangular structure and determined if it
      // was an AND or an OR.
      if (branchIsAnd) {
        if (trueBranch) {
          MOZ_TRY(improveTypesAtTest(ins->toPhi()->getOperand(0), true, test));
          MOZ_TRY(improveTypesAtTest(ins->toPhi()->getOperand(1), true, test));
        }
      } else {
        /*
         * if (a || b) {
         *    ...
         * } else {
         *    ...
         * }
         *
         * If we have a statements like the one described above,
         * And we are in the else branch of it. It amounts to:
         * if (!(a || b)) and being in the true branch.
         *
         * Simplifying, we have (!a && !b)
         * In this case we can use the same logic we use for branchIsAnd
         *
         */
        if (!trueBranch) {
          MOZ_TRY(improveTypesAtTest(ins->toPhi()->getOperand(0), false, test));
          MOZ_TRY(improveTypesAtTest(ins->toPhi()->getOperand(1), false, test));
        }
      }
      return Ok();
    }

    case MDefinition::Opcode::Compare:
      return improveTypesAtCompare(ins->toCompare(), trueBranch, test);

    default:
      break;
  }

  // By default MTest tests ToBoolean(input). As a result in the true branch we
  // can filter undefined and null. In false branch we can only encounter
  // undefined, null, false, 0, "" and objects that emulate undefined.

  TemporaryTypeSet* oldType = ins->resultTypeSet();
  TemporaryTypeSet* type;

  // Create temporary typeset equal to the type if there is no resultTypeSet.
  TemporaryTypeSet tmp;
  if (!oldType) {
    if (ins->type() == MIRType::Value) {
      return Ok();
    }
    oldType = &tmp;
    tmp.addType(TypeSet::PrimitiveType(ValueTypeFromMIRType(ins->type())),
                alloc_->lifoAlloc());
  }

  // If ins does not have a typeset we return as we cannot optimize.
  if (oldType->unknown()) {
    return Ok();
  }

  // Decide either to set or remove.
  if (trueBranch) {
    TemporaryTypeSet remove;
    remove.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
    remove.addType(TypeSet::NullType(), alloc_->lifoAlloc());
    type = TypeSet::removeSet(oldType, &remove, alloc_->lifoAlloc());
  } else {
    TemporaryTypeSet base;
    base.addType(TypeSet::UndefinedType(),
                 alloc_->lifoAlloc());  // ToBoolean(undefined) == false
    base.addType(TypeSet::NullType(),
                 alloc_->lifoAlloc());  // ToBoolean(null) == false
    base.addType(TypeSet::BooleanType(),
                 alloc_->lifoAlloc());  // ToBoolean(false) == false
    base.addType(TypeSet::Int32Type(),
                 alloc_->lifoAlloc());  // ToBoolean(0) == false
    base.addType(TypeSet::DoubleType(),
                 alloc_->lifoAlloc());  // ToBoolean(0.0) == false
    base.addType(TypeSet::StringType(),
                 alloc_->lifoAlloc());  // ToBoolean("") == false

    // If the typeset does emulate undefined, then we cannot filter out
    // objects.
    if (oldType->maybeEmulatesUndefined(constraints())) {
      base.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
    }

    type = TypeSet::intersectSets(&base, oldType, alloc_->lifoAlloc());
  }

  if (!type) {
    return abort(AbortReason::Alloc);
  }

  return replaceTypeSet(ins, type, test);
}

AbortReasonOr<Ok> IonBuilder::jsop_dup2() {
  uint32_t lhsSlot = current->stackDepth() - 2;
  uint32_t rhsSlot = current->stackDepth() - 1;
  current->pushSlot(lhsSlot);
  current->pushSlot(rhsSlot);
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitTest(CFGTest* test) {
  MDefinition* ins =
      test->mustKeepCondition() ? current->peek(-1) : current->pop();

  // Create true and false branches.
  MBasicBlock* ifTrue;
  MOZ_TRY_VAR(ifTrue, newBlock(current, test->trueBranch()->startPc()));
  MBasicBlock* ifFalse;
  MOZ_TRY_VAR(ifFalse, newBlock(current, test->falseBranch()->startPc()));

  MTest* mir = newTest(ins, ifTrue, ifFalse);
  current->end(mir);

  // Filter the types in the true branch.
  MOZ_TRY(setCurrentAndSpecializePhis(ifTrue));
  MOZ_TRY(improveTypesAtTest(mir->getOperand(0), /* trueBranch = */ true, mir));

  blockWorklist[test->trueBranch()->id()] = ifTrue;

  // Filter the types in the false branch.
  // Note: sometimes the false branch is used as merge point. As a result
  // reuse the ifFalse block as a type improvement block and create a new
  // ifFalse which we can use for the merge.
  MBasicBlock* filterBlock = ifFalse;
  ifFalse = nullptr;
  graph().addBlock(filterBlock);

  MOZ_TRY(setCurrentAndSpecializePhis(filterBlock));
  MOZ_TRY(
      improveTypesAtTest(mir->getOperand(0), /* trueBranch = */ false, mir));

  MOZ_TRY_VAR(ifFalse, newBlock(filterBlock, test->falseBranch()->startPc()));
  filterBlock->end(MGoto::New(alloc(), ifFalse));

  if (filterBlock->pc() && script()->hasScriptCounts()) {
    filterBlock->setHitCount(script()->getHitCount(filterBlock->pc()));
  }

  blockWorklist[test->falseBranch()->id()] = ifFalse;

  current = nullptr;

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitCondSwitchCase(
    CFGCondSwitchCase* switchCase) {
  MDefinition* cond = current->peek(-1);

  // Create true and false branches.
  MBasicBlock* ifTrue;
  MOZ_TRY_VAR(ifTrue, newBlockPopN(current, switchCase->trueBranch()->startPc(),
                                   switchCase->truePopAmount()));
  MBasicBlock* ifFalse;
  MOZ_TRY_VAR(ifFalse,
              newBlockPopN(current, switchCase->falseBranch()->startPc(),
                           switchCase->falsePopAmount()));

  blockWorklist[switchCase->trueBranch()->id()] = ifTrue;
  blockWorklist[switchCase->falseBranch()->id()] = ifFalse;

  MTest* mir = newTest(cond, ifTrue, ifFalse);
  current->end(mir);

  // Filter the types in the true branch.
  MOZ_TRY(setCurrentAndSpecializePhis(ifTrue));
  MOZ_TRY(improveTypesAtTest(mir->getOperand(0), /* trueBranch = */ true, mir));

  // Filter the types in the false branch.
  MOZ_TRY(setCurrentAndSpecializePhis(ifFalse));
  MOZ_TRY(
      improveTypesAtTest(mir->getOperand(0), /* trueBranch = */ false, mir));

  current = nullptr;

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitTry(CFGTry* try_) {
  // We don't support try-finally. The ControlFlowGenerator should have
  // aborted compilation in this case.

  // Try-catch within inline frames is not yet supported.
  MOZ_ASSERT(!isInlineBuilder());

  // Try-catch during the arguments usage analysis is not yet supported. Code
  // accessing the arguments within the 'catch' block is not accounted for.
  if (info().analysisMode() == Analysis_ArgumentsUsage) {
    return abort(AbortReason::Disable,
                 "Try-catch during arguments usage analysis");
  }

  graph().setHasTryBlock();

  MBasicBlock* tryBlock;
  MOZ_TRY_VAR(tryBlock, newBlock(current, try_->tryBlock()->startPc()));

  blockWorklist[try_->tryBlock()->id()] = tryBlock;

  // Connect the code after the try-catch to the graph with an MGotoWithFake
  // instruction that always jumps to the try block. This ensures the
  // successor block always has a predecessor.
  MBasicBlock* successor;
  MOZ_TRY_VAR(successor, newBlock(current, try_->getSuccessor(1)->startPc()));

  blockWorklist[try_->afterTryCatchBlock()->id()] = successor;

  current->end(MGotoWithFake::New(alloc(), tryBlock, successor));

  // The baseline compiler should not attempt to enter the catch block
  // via OSR.
  MOZ_ASSERT(info().osrPc() < try_->catchStartPc() ||
             info().osrPc() >= try_->afterTryCatchBlock()->startPc());

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitReturn(CFGControlInstruction* control) {
  MDefinition* def;
  switch (control->type()) {
    case CFGControlInstruction::Type_Return:
      // Return the last instruction.
      def = current->pop();
      break;

    case CFGControlInstruction::Type_RetRVal:
      // Return undefined eagerly if script doesn't use return value.
      if (script()->noScriptRval()) {
        MInstruction* ins = MConstant::New(alloc(), UndefinedValue());
        current->add(ins);
        def = ins;
        break;
      }

      def = current->getSlot(info().returnValueSlot());
      break;

    default:
      def = nullptr;
      MOZ_CRASH("unknown return op");
  }

  MReturn* ret = MReturn::New(alloc(), def);
  current->end(ret);

  if (!graph().addReturn(current)) {
    return abort(AbortReason::Alloc);
  }

  // Make sure no one tries to use this block now.
  setCurrent(nullptr);
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitThrow(CFGThrow* cfgIns) {
  MDefinition* def = current->pop();

  // MThrow is not marked as effectful. This means when it throws and we
  // are inside a try block, we could use an earlier resume point and this
  // resume point may not be up-to-date, for example:
  //
  // (function() {
  //     try {
  //         var x = 1;
  //         foo(); // resume point
  //         x = 2;
  //         throw foo;
  //     } catch(e) {
  //         print(x);
  //     }
  // ])();
  //
  // If we use the resume point after the call, this will print 1 instead
  // of 2. To fix this, we create a resume point right before the MThrow.
  //
  // Note that this is not a problem for instructions other than MThrow
  // because they are either marked as effectful (have their own resume
  // point) or cannot throw a catchable exception.
  //
  // We always install this resume point (instead of only when the function
  // has a try block) in order to handle the Debugger onExceptionUnwind
  // hook. When we need to handle the hook, we bail out to baseline right
  // after the throw and propagate the exception when debug mode is on. This
  // is opposed to the normal behavior of resuming directly in the
  // associated catch block.
  MNop* nop = MNop::New(alloc());
  current->add(nop);

  MOZ_TRY(resumeAfter(nop));

  MThrow* ins = MThrow::New(alloc(), def);
  current->end(ins);

  return Ok();
}

AbortReasonOr<Ok> IonBuilder::visitTableSwitch(CFGTableSwitch* cfgIns) {
  // Pop input.
  MDefinition* ins = current->pop();

  // Create MIR instruction
  MTableSwitch* tableswitch =
      MTableSwitch::New(alloc(), ins, cfgIns->low(), cfgIns->high());

#ifdef DEBUG
  MOZ_ASSERT(cfgIns->defaultCase() == cfgIns->getSuccessor(0));
  for (size_t i = 1; i < cfgIns->numSuccessors(); i++) {
    MOZ_ASSERT(cfgIns->getCase(i - 1) == cfgIns->getSuccessor(i));
  }
#endif

  // Create the cases
  for (size_t i = 0; i < cfgIns->numSuccessors(); i++) {
    const CFGBlock* cfgblock = cfgIns->getSuccessor(i);

    MBasicBlock* caseBlock;
    MOZ_TRY_VAR(caseBlock, newBlock(current, cfgblock->startPc()));

    blockWorklist[cfgblock->id()] = caseBlock;

    size_t index;
    if (i == 0) {
      if (!tableswitch->addDefault(caseBlock, &index)) {
        return abort(AbortReason::Alloc);
      }

    } else {
      if (!tableswitch->addSuccessor(caseBlock, &index)) {
        return abort(AbortReason::Alloc);
      }

      if (!tableswitch->addCase(index)) {
        return abort(AbortReason::Alloc);
      }

      // If this is an actual case statement, optimize by replacing the
      // input to the switch case with the actual number of the case.
      MConstant* constant =
          MConstant::New(alloc(), Int32Value(i - 1 + tableswitch->low()));
      caseBlock->add(constant);
      for (uint32_t j = 0; j < caseBlock->stackDepth(); j++) {
        if (ins != caseBlock->getSlot(j)) {
          continue;
        }

        constant->setDependency(ins);
        caseBlock->setSlot(j, constant);
      }
      graph().addBlock(caseBlock);

      if (caseBlock->pc() && script()->hasScriptCounts()) {
        caseBlock->setHitCount(script()->getHitCount(caseBlock->pc()));
      }

      MBasicBlock* merge;
      MOZ_TRY_VAR(merge, newBlock(caseBlock, cfgblock->startPc()));
      if (!merge) {
        return abort(AbortReason::Alloc);
      }

      caseBlock->end(MGoto::New(alloc(), merge));
      blockWorklist[cfgblock->id()] = merge;
    }

    MOZ_ASSERT(index == i);
  }

  // Save the MIR instruction as last instruction of this block.
  current->end(tableswitch);
  return Ok();
}

void IonBuilder::pushConstant(const Value& v) { current->push(constant(v)); }

AbortReasonOr<Ok> IonBuilder::bitnotTrySpecialized(bool* emitted,
                                                   MDefinition* input) {
  MOZ_ASSERT(*emitted == false);

  // Try to emit a specialized bitnot instruction based on the input type
  // of the operand.

  if (input->mightBeType(MIRType::Object) ||
      input->mightBeType(MIRType::Symbol)) {
    return Ok();
  }

  MBitNot* ins = MBitNot::New(alloc(), input);
  ins->setSpecialization(MIRType::Int32);

  current->add(ins);
  current->push(ins);

  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::jsop_bitnot() {
  bool emitted = false;

  MDefinition* input = current->pop();

  if (!forceInlineCaches()) {
    MOZ_TRY(bitnotTrySpecialized(&emitted, input));
    if (emitted) return Ok();
  }

  MOZ_TRY(arithTryBinaryStub(&emitted, JSOP_BITNOT, nullptr, input));
  if (emitted) {
    return Ok();
  }

  // Not possible to optimize. Do a slow vm call.
  MBitNot* ins = MBitNot::New(alloc(), input);

  current->add(ins);
  current->push(ins);
  MOZ_ASSERT(ins->isEffectful());
  return resumeAfter(ins);
}

AbortReasonOr<Ok> IonBuilder::jsop_bitop(JSOp op) {
  // Pop inputs.
  MDefinition* right = current->pop();
  MDefinition* left = current->pop();

  MBinaryBitwiseInstruction* ins;
  switch (op) {
    case JSOP_BITAND:
      ins = MBitAnd::New(alloc(), left, right);
      break;

    case JSOP_BITOR:
      ins = MBitOr::New(alloc(), left, right);
      break;

    case JSOP_BITXOR:
      ins = MBitXor::New(alloc(), left, right);
      break;

    case JSOP_LSH:
      ins = MLsh::New(alloc(), left, right);
      break;

    case JSOP_RSH:
      ins = MRsh::New(alloc(), left, right);
      break;

    case JSOP_URSH:
      ins = MUrsh::New(alloc(), left, right);
      break;

    default:
      MOZ_CRASH("unexpected bitop");
  }

  current->add(ins);
  ins->infer(inspector, pc);

  current->push(ins);
  if (ins->isEffectful()) {
    MOZ_TRY(resumeAfter(ins));
  }

  return Ok();
}

MDefinition::Opcode JSOpToMDefinition(JSOp op) {
  switch (op) {
    case JSOP_ADD:
      return MDefinition::Opcode::Add;
    case JSOP_SUB:
      return MDefinition::Opcode::Sub;
    case JSOP_MUL:
      return MDefinition::Opcode::Mul;
    case JSOP_DIV:
      return MDefinition::Opcode::Div;
    case JSOP_MOD:
      return MDefinition::Opcode::Mod;
    default:
      MOZ_CRASH("unexpected binary opcode");
  }
}

AbortReasonOr<Ok> IonBuilder::binaryArithTryConcat(bool* emitted, JSOp op,
                                                   MDefinition* left,
                                                   MDefinition* right) {
  MOZ_ASSERT(*emitted == false);

  // Try to convert an addition into a concat operation if the inputs
  // indicate this might be a concatenation.

  // Only try to replace this with concat when we have an addition.
  if (op != JSOP_ADD) {
    return Ok();
  }

  trackOptimizationAttempt(TrackedStrategy::BinaryArith_Concat);

  // Make sure one of the inputs is a string.
  if (left->type() != MIRType::String && right->type() != MIRType::String) {
    trackOptimizationOutcome(TrackedOutcome::OperandNotString);
    return Ok();
  }

  // The non-string input (if present) should be atleast easily coercible to
  // string.
  if (right->type() != MIRType::String &&
      (right->mightBeType(MIRType::Symbol) ||
       right->mightBeType(MIRType::Object) || right->mightBeMagicType())) {
    trackOptimizationOutcome(TrackedOutcome::OperandNotEasilyCoercibleToString);
    return Ok();
  }
  if (left->type() != MIRType::String &&
      (left->mightBeType(MIRType::Symbol) ||
       left->mightBeType(MIRType::Object) || left->mightBeMagicType())) {
    trackOptimizationOutcome(TrackedOutcome::OperandNotEasilyCoercibleToString);
    return Ok();
  }

  MConcat* ins = MConcat::New(alloc(), left, right);
  current->add(ins);
  current->push(ins);

  MOZ_TRY(maybeInsertResume());

  trackOptimizationSuccess();
  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::powTrySpecialized(bool* emitted,
                                                MDefinition* base,
                                                MDefinition* power,
                                                MIRType outputType) {
  // Typechecking.
  MDefinition* output = nullptr;
  MIRType baseType = base->type();
  MIRType powerType = power->type();

  if (outputType != MIRType::Int32 && outputType != MIRType::Double) {
    return Ok();
  }
  if (!IsNumberType(baseType)) {
    return Ok();
  }
  if (!IsNumberType(powerType)) {
    return Ok();
  }

  if (powerType == MIRType::Float32) {
    powerType = MIRType::Double;
  }

  MPow* pow = MPow::New(alloc(), base, power, powerType);
  current->add(pow);
  output = pow;

  // Cast to the right type
  if (outputType == MIRType::Int32 && output->type() != MIRType::Int32) {
    auto* toInt = MToNumberInt32::New(alloc(), output);
    current->add(toInt);
    output = toInt;
  }
  if (outputType == MIRType::Double && output->type() != MIRType::Double) {
    MToDouble* toDouble = MToDouble::New(alloc(), output);
    current->add(toDouble);
    output = toDouble;
  }

  current->push(output);
  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::binaryArithTrySpecialized(bool* emitted, JSOp op,
                                                        MDefinition* left,
                                                        MDefinition* right) {
  MOZ_ASSERT(*emitted == false);

  // Try to emit a specialized binary instruction based on the input types
  // of the operands.

  trackOptimizationAttempt(TrackedStrategy::BinaryArith_SpecializedTypes);

  // Anything complex - strings, symbols, and objects - are not specialized
  if (!SimpleArithOperand(left) || !SimpleArithOperand(right)) {
    trackOptimizationOutcome(TrackedOutcome::OperandNotSimpleArith);
    return Ok();
  }

  // One of the inputs need to be a number.
  if (!IsNumberType(left->type()) && !IsNumberType(right->type())) {
    trackOptimizationOutcome(TrackedOutcome::OperandNotNumber);
    return Ok();
  }

  MDefinition::Opcode defOp = JSOpToMDefinition(op);
  MBinaryArithInstruction* ins =
      MBinaryArithInstruction::New(alloc(), defOp, left, right);
  ins->setNumberSpecialization(alloc(), inspector, pc);

  if (op == JSOP_ADD || op == JSOP_MUL) {
    ins->setCommutative();
  }

  current->add(ins);
  current->push(ins);

  MOZ_ASSERT(!ins->isEffectful());
  MOZ_TRY(maybeInsertResume());

  trackOptimizationSuccess();
  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::binaryArithTrySpecializedOnBaselineInspector(
    bool* emitted, JSOp op, MDefinition* left, MDefinition* right) {
  MOZ_ASSERT(*emitted == false);

  // Try to emit a specialized binary instruction speculating the
  // type using the baseline caches.

  trackOptimizationAttempt(
      TrackedStrategy::BinaryArith_SpecializedOnBaselineTypes);

  MIRType specialization = inspector->expectedBinaryArithSpecialization(pc);
  if (specialization == MIRType::None) {
    trackOptimizationOutcome(TrackedOutcome::SpeculationOnInputTypesFailed);
    return Ok();
  }

  MDefinition::Opcode def_op = JSOpToMDefinition(op);
  MBinaryArithInstruction* ins =
      MBinaryArithInstruction::New(alloc(), def_op, left, right);
  ins->setSpecialization(specialization);

  current->add(ins);
  current->push(ins);

  MOZ_ASSERT(!ins->isEffectful());
  MOZ_TRY(maybeInsertResume());

  trackOptimizationSuccess();
  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::arithTryBinaryStub(bool* emitted, JSOp op,
                                                 MDefinition* left,
                                                 MDefinition* right) {
  MOZ_ASSERT(*emitted == false);
  JSOp actualOp = JSOp(*pc);

  // Try to emit a binary arith stub cache.
  if (JitOptions.disableCacheIRBinaryArith) {
    return Ok();
  }

  // The actual jsop 'jsop_pos' is not supported yet.
  // There's no IC support for JSOP_POW either.
  if (actualOp == JSOP_POS || actualOp == JSOP_POW) {
    return Ok();
  }

  MInstruction* stub = nullptr;
  switch (actualOp) {
    case JSOP_NEG:
    case JSOP_BITNOT:
      MOZ_ASSERT_IF(op == JSOP_MUL,
                    left->maybeConstantValue() &&
                        left->maybeConstantValue()->toInt32() == -1);
      MOZ_ASSERT_IF(op != JSOP_MUL, !left);
      stub = MUnaryCache::New(alloc(), right);
      break;
    case JSOP_ADD:
    case JSOP_SUB:
    case JSOP_MUL:
    case JSOP_DIV:
    case JSOP_MOD:
      stub = MBinaryCache::New(alloc(), left, right, MIRType::Value);
      break;
    default:
      MOZ_CRASH("unsupported arith");
  }

  current->add(stub);
  current->push(stub);

  // Decrease type from 'any type' to 'empty type' when one of the operands
  // is 'empty typed'.
  maybeMarkEmpty(stub);

  MOZ_TRY(resumeAfter(stub));

  *emitted = true;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::jsop_binary_arith(JSOp op, MDefinition* left,
                                                MDefinition* right) {
  bool emitted = false;

  startTrackingOptimizations();

  trackTypeInfo(TrackedTypeSite::Operand, left->type(), left->resultTypeSet());
  trackTypeInfo(TrackedTypeSite::Operand, right->type(),
                right->resultTypeSet());

  if (!forceInlineCaches()) {
    MOZ_TRY(binaryArithTryConcat(&emitted, op, left, right));
    if (emitted) {
      return Ok();
    }

    MOZ_TRY(binaryArithTrySpecialized(&emitted, op, left, right));
    if (emitted) {
      return Ok();
    }

    MOZ_TRY(binaryArithTrySpecializedOnBaselineInspector(&emitted, op, left,
                                                         right));
    if (emitted) {
      return Ok();
    }
  }

  MOZ_TRY(arithTryBinaryStub(&emitted, op, left, right));
  if (emitted) {
    return Ok();
  }

  // Not possible to optimize. Do a slow vm call.
  trackOptimizationAttempt(TrackedStrategy::BinaryArith_Call);
  trackOptimizationSuccess();

  MDefinition::Opcode def_op = JSOpToMDefinition(op);
  MBinaryArithInstruction* ins =
      MBinaryArithInstruction::New(alloc(), def_op, left, right);

  // Decrease type from 'any type' to 'empty type' when one of the operands
  // is 'empty typed'.
  maybeMarkEmpty(ins);

  current->add(ins);
  current->push(ins);
  MOZ_ASSERT(ins->isEffectful());
  return resumeAfter(ins);
}

AbortReasonOr<Ok> IonBuilder::jsop_binary_arith(JSOp op) {
  MDefinition* right = current->pop();
  MDefinition* left = current->pop();

  return jsop_binary_arith(op, left, right);
}

AbortReasonOr<Ok> IonBuilder::jsop_pow() {
  MDefinition* exponent = current->pop();
  MDefinition* base = current->pop();

  bool emitted = false;

  if (!forceInlineCaches()) {
    MOZ_TRY(powTrySpecialized(&emitted, base, exponent, MIRType::Double));
    if (emitted) {
      return Ok();
    }
  }

  MOZ_TRY(arithTryBinaryStub(&emitted, JSOP_POW, base, exponent));
  if (emitted) {
    return Ok();
  }

  // For now, use MIRType::None as a safe cover-all. See bug 1188079.
  MPow* pow = MPow::New(alloc(), base, exponent, MIRType::None);
  current->add(pow);
  current->push(pow);
  MOZ_ASSERT(pow->isEffectful());
  return resumeAfter(pow);
}

AbortReasonOr<Ok> IonBuilder::jsop_pos() {
  if (IsNumberType(current->peek(-1)->type())) {
    // Already int32 or double. Set the operand as implicitly used so it
    // doesn't get optimized out if it has no other uses, as we could bail
    // out.
    current->peek(-1)->setImplicitlyUsedUnchecked();
    return Ok();
  }

  // Compile +x as x * 1.
  MDefinition* value = current->pop();
  MConstant* one = MConstant::New(alloc(), Int32Value(1));
  current->add(one);

  return jsop_binary_arith(JSOP_MUL, value, one);
}

AbortReasonOr<Ok> IonBuilder::jsop_neg() {
  // Since JSOP_NEG does not use a slot, we cannot push the MConstant.
  // The MConstant is therefore passed to JSOP_MUL without slot traffic.
  MConstant* negator = MConstant::New(alloc(), Int32Value(-1));
  current->add(negator);

  MDefinition* right = current->pop();

  return jsop_binary_arith(JSOP_MUL, negator, right);
}

AbortReasonOr<Ok> IonBuilder::jsop_tostring() {
  if (current->peek(-1)->type() == MIRType::String) {
    return Ok();
  }

  MDefinition* value = current->pop();
  MToString* ins = MToString::New(alloc(), value);
  current->add(ins);
  current->push(ins);
  MOZ_ASSERT(!ins->isEffectful());
  return Ok();
}

class AutoAccumulateReturns {
  MIRGraph& graph_;
  MIRGraphReturns* prev_;

 public:
  AutoAccumulateReturns(MIRGraph& graph, MIRGraphReturns& returns)
      : graph_(graph) {
    prev_ = graph_.returnAccumulator();
    graph_.setReturnAccumulator(&returns);
  }
  ~AutoAccumulateReturns() { graph_.setReturnAccumulator(prev_); }
};

IonBuilder::InliningResult IonBuilder::inlineScriptedCall(CallInfo& callInfo,
                                                          JSFunction* target) {
  MOZ_ASSERT(target->hasScript());
  MOZ_ASSERT(IsIonInlinablePC(pc));

  MBasicBlock::BackupPoint backup(current);
  if (!backup.init(alloc())) {
    return abort(AbortReason::Alloc);
  }

  callInfo.setImplicitlyUsedUnchecked();

  // Create new |this| on the caller-side for inlined constructors.
  if (callInfo.constructing()) {
    MDefinition* thisDefn =
        createThis(target, callInfo.fun(), callInfo.getNewTarget());
    if (!thisDefn) {
      return abort(AbortReason::Alloc);
    }
    callInfo.setThis(thisDefn);
  }

  // Capture formals in the outer resume point.
  MOZ_TRY(callInfo.pushCallStack(this, current));

  MResumePoint* outerResumePoint =
      MResumePoint::New(alloc(), current, pc, MResumePoint::Outer);
  if (!outerResumePoint) {
    return abort(AbortReason::Alloc);
  }
  current->setOuterResumePoint(outerResumePoint);

  // Pop formals again, except leave |fun| on stack for duration of call.
  callInfo.popCallStack(current);
  current->push(callInfo.fun());

  JSScript* calleeScript = target->nonLazyScript();
  BaselineInspector inspector(calleeScript);

  // Improve type information of |this| when not set.
  if (callInfo.constructing() && !callInfo.thisArg()->resultTypeSet()) {
    StackTypeSet* types = TypeScript::ThisTypes(calleeScript);
    if (types && !types->unknown()) {
      TemporaryTypeSet* clonedTypes = types->clone(alloc_->lifoAlloc());
      if (!clonedTypes) {
        return abort(AbortReason::Alloc);
      }
      MTypeBarrier* barrier =
          MTypeBarrier::New(alloc(), callInfo.thisArg(), clonedTypes);
      current->add(barrier);
      if (barrier->type() == MIRType::Undefined) {
        callInfo.setThis(constant(UndefinedValue()));
      } else if (barrier->type() == MIRType::Null) {
        callInfo.setThis(constant(NullValue()));
      } else {
        callInfo.setThis(barrier);
      }
    }
  }

  // Start inlining.
  LifoAlloc* lifoAlloc = alloc_->lifoAlloc();
  InlineScriptTree* inlineScriptTree =
      info().inlineScriptTree()->addCallee(alloc_, pc, calleeScript);
  if (!inlineScriptTree) {
    return abort(AbortReason::Alloc);
  }
  CompileInfo* info = lifoAlloc->new_<CompileInfo>(
      runtime, calleeScript, target, (jsbytecode*)nullptr,
      this->info().analysisMode(),
      /* needsArgsObj = */ false, inlineScriptTree);
  if (!info) {
    return abort(AbortReason::Alloc);
  }

  MIRGraphReturns returns(alloc());
  AutoAccumulateReturns aar(graph(), returns);

  // Build the graph.
  IonBuilder inlineBuilder(analysisContext, realm, options, &alloc(), &graph(),
                           constraints(), &inspector, info, &optimizationInfo(),
                           nullptr, inliningDepth_ + 1, loopDepth_);
  AbortReasonOr<Ok> result =
      inlineBuilder.buildInline(this, outerResumePoint, callInfo);
  if (result.isErr()) {
    if (analysisContext && analysisContext->isExceptionPending()) {
      JitSpew(JitSpew_IonAbort, "Inline builder raised exception.");
      MOZ_ASSERT(result.unwrapErr() == AbortReason::Error);
      return Err(result.unwrapErr());
    }

    // Inlining the callee failed. Mark the callee as uninlineable only if
    // the inlining was aborted for a non-exception reason.
    switch (result.unwrapErr()) {
      case AbortReason::Disable:
        calleeScript->setUninlineable();
        if (!JitOptions.disableInlineBacktracking) {
          current = backup.restore();
          if (!current) {
            return abort(AbortReason::Alloc);
          }
          return InliningStatus_NotInlined;
        }
        return abort(AbortReason::Inlining);

      case AbortReason::PreliminaryObjects: {
        const ObjectGroupVector& groups =
            inlineBuilder.abortedPreliminaryGroups();
        MOZ_ASSERT(!groups.empty());
        for (size_t i = 0; i < groups.length(); i++) {
          addAbortedPreliminaryGroup(groups[i]);
        }
        return Err(result.unwrapErr());
      }

      case AbortReason::Alloc:
      case AbortReason::Inlining:
      case AbortReason::Error:
        return Err(result.unwrapErr());

      case AbortReason::NoAbort:
        MOZ_CRASH("Abort with AbortReason::NoAbort");
        return abort(AbortReason::Error);
    }
  }

  if (returns.empty()) {
    // Inlining of functions that have no exit is not supported.
    calleeScript->setUninlineable();
    if (!JitOptions.disableInlineBacktracking) {
      current = backup.restore();
      if (!current) {
        return abort(AbortReason::Alloc);
      }
      return InliningStatus_NotInlined;
    }
    return abort(AbortReason::Inlining);
  }

  // Create return block.
  jsbytecode* postCall = GetNextPc(pc);
  MBasicBlock* returnBlock;
  MOZ_TRY_VAR(returnBlock, newBlock(current->stackDepth(), postCall));
  graph().addBlock(returnBlock);
  returnBlock->setCallerResumePoint(callerResumePoint_);

  // Inherit the slots from current and pop |fun|.
  returnBlock->inheritSlots(current);
  returnBlock->pop();

  // Accumulate return values.
  MDefinition* retvalDefn = patchInlinedReturns(callInfo, returns, returnBlock);
  if (!retvalDefn) {
    return abort(AbortReason::Alloc);
  }
  returnBlock->push(retvalDefn);

  // Initialize entry slots now that the stack has been fixed up.
  if (!returnBlock->initEntrySlots(alloc())) {
    return abort(AbortReason::Alloc);
  }

  MOZ_TRY(setCurrentAndSpecializePhis(returnBlock));

  return InliningStatus_Inlined;
}

MDefinition* IonBuilder::patchInlinedReturn(CallInfo& callInfo,
                                            MBasicBlock* exit,
                                            MBasicBlock* bottom) {
  // Replaces the MReturn in the exit block with an MGoto.
  MDefinition* rdef = exit->lastIns()->toReturn()->input();
  exit->discardLastIns();

  // Constructors must be patched by the caller to always return an object.
  if (callInfo.constructing()) {
    if (rdef->type() == MIRType::Value) {
      // Unknown return: dynamically detect objects.
      MReturnFromCtor* filter =
          MReturnFromCtor::New(alloc(), rdef, callInfo.thisArg());
      exit->add(filter);
      rdef = filter;
    } else if (rdef->type() != MIRType::Object) {
      // Known non-object return: force |this|.
      rdef = callInfo.thisArg();
    }
  } else if (callInfo.isSetter()) {
    // Setters return their argument, not whatever value is returned.
    rdef = callInfo.getArg(0);
  }

  if (!callInfo.isSetter()) {
    rdef = specializeInlinedReturn(rdef, exit);
  }

  MGoto* replacement = MGoto::New(alloc(), bottom);
  exit->end(replacement);
  if (!bottom->addPredecessorWithoutPhis(exit)) {
    return nullptr;
  }

  return rdef;
}

MDefinition* IonBuilder::specializeInlinedReturn(MDefinition* rdef,
                                                 MBasicBlock* exit) {
  // Remove types from the return definition that weren't observed.
  TemporaryTypeSet* types = bytecodeTypes(pc);

  // The observed typeset doesn't contain extra information.
  if (types->empty() || types->unknown()) {
    return rdef;
  }

  // Decide if specializing is needed using the result typeset if available,
  // else use the result type.

  if (rdef->resultTypeSet()) {
    // Don't specialize if return typeset is a subset of the
    // observed typeset. The return typeset is already more specific.
    if (rdef->resultTypeSet()->isSubset(types)) {
      return rdef;
    }
  } else {
    MIRType observedType = types->getKnownMIRType();

    // Don't specialize if type is MIRType::Float32 and TI reports
    // MIRType::Double. Float is more specific than double.
    if (observedType == MIRType::Double && rdef->type() == MIRType::Float32) {
      return rdef;
    }

    // Don't specialize if types are inaccordance, except for MIRType::Value
    // and MIRType::Object (when not unknown object), since the typeset
    // contains more specific information.
    if (observedType == rdef->type() && observedType != MIRType::Value &&
        (observedType != MIRType::Object || types->unknownObject())) {
      return rdef;
    }
  }

  setCurrent(exit);

  MTypeBarrier* barrier = nullptr;
  rdef = addTypeBarrier(rdef, types, BarrierKind::TypeSet, &barrier);
  if (barrier) {
    barrier->setNotMovable();
  }

  return rdef;
}

MDefinition* IonBuilder::patchInlinedReturns(CallInfo& callInfo,
                                             MIRGraphReturns& returns,
                                             MBasicBlock* bottom) {
  // Replaces MReturns with MGotos, returning the MDefinition
  // representing the return value, or nullptr.
  MOZ_ASSERT(returns.length() > 0);

  if (returns.length() == 1) {
    return patchInlinedReturn(callInfo, returns[0], bottom);
  }

  // Accumulate multiple returns with a phi.
  MPhi* phi = MPhi::New(alloc());
  if (!phi->reserveLength(returns.length())) {
    return nullptr;
  }

  for (size_t i = 0; i < returns.length(); i++) {
    MDefinition* rdef = patchInlinedReturn(callInfo, returns[i], bottom);
    if (!rdef) {
      return nullptr;
    }
    phi->addInput(rdef);
  }

  bottom->addPhi(phi);
  return phi;
}

IonBuilder::InliningDecision IonBuilder::makeInliningDecision(
    JSObject* targetArg, CallInfo& callInfo) {
  // When there is no target, inlining is impossible.
  if (targetArg == nullptr) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineNoTarget);
    return InliningDecision_DontInline;
  }

  // Don't inline (native or scripted) cross-realm calls.
  Realm* targetRealm = JS::GetObjectRealmOrNull(targetArg);
  if (!targetRealm || targetRealm != script()->realm()) {
    return InliningDecision_DontInline;
  }

  // Inlining non-function targets is handled by inlineNonFunctionCall().
  if (!targetArg->is<JSFunction>()) {
    return InliningDecision_Inline;
  }

  JSFunction* target = &targetArg->as<JSFunction>();

  // Never inline during the arguments usage analysis.
  if (info().analysisMode() == Analysis_ArgumentsUsage) {
    return InliningDecision_DontInline;
  }

  // Native functions provide their own detection in inlineNativeCall().
  if (target->isNative()) {
    return InliningDecision_Inline;
  }

  // Determine whether inlining is possible at callee site
  InliningDecision decision = canInlineTarget(target, callInfo);
  if (decision != InliningDecision_Inline) {
    return decision;
  }

  // Heuristics!
  JSScript* targetScript = target->nonLazyScript();

  // Callee must not be excessively large.
  // This heuristic also applies to the callsite as a whole.
  bool offThread = options.offThreadCompilationAvailable();
  if (targetScript->length() >
      optimizationInfo().inlineMaxBytecodePerCallSite(offThread)) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineBigCallee);
    return DontInline(targetScript, "Vetoed: callee excessively large");
  }

  // Callee must have been called a few times to have somewhat stable
  // type information, except for definite properties analysis,
  // as the caller has not run yet.
  if (targetScript->getWarmUpCount() <
          optimizationInfo().inliningWarmUpThreshold() &&
      !targetScript->baselineScript()->ionCompiledOrInlined() &&
      info().analysisMode() != Analysis_DefiniteProperties) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineNotHot);
    JitSpew(JitSpew_Inlining,
            "Cannot inline %s:%u:%u: callee is insufficiently hot.",
            targetScript->filename(), targetScript->lineno(),
            targetScript->column());
    return InliningDecision_WarmUpCountTooLow;
  }

  // Don't inline if the callee is known to inline a lot of code, to avoid
  // huge MIR graphs.
  uint32_t inlinedBytecodeLength =
      targetScript->baselineScript()->inlinedBytecodeLength();
  if (inlinedBytecodeLength >
      optimizationInfo().inlineMaxCalleeInlinedBytecodeLength()) {
    trackOptimizationOutcome(
        TrackedOutcome::CantInlineBigCalleeInlinedBytecodeLength);
    return DontInline(targetScript,
                      "Vetoed: callee inlinedBytecodeLength is too big");
  }

  IonBuilder* outerBuilder = outermostBuilder();

  // Cap the total bytecode length we inline under a single script, to avoid
  // excessive inlining in pathological cases.
  size_t totalBytecodeLength =
      outerBuilder->inlinedBytecodeLength_ + targetScript->length();
  if (totalBytecodeLength > optimizationInfo().inlineMaxTotalBytecodeLength()) {
    trackOptimizationOutcome(
        TrackedOutcome::CantInlineExceededTotalBytecodeLength);
    return DontInline(targetScript,
                      "Vetoed: exceeding max total bytecode length");
  }

  // Cap the inlining depth.

  uint32_t maxInlineDepth;
  if (JitOptions.isSmallFunction(targetScript)) {
    maxInlineDepth = optimizationInfo().smallFunctionMaxInlineDepth();
  } else {
    maxInlineDepth = optimizationInfo().maxInlineDepth();

    // Caller must not be excessively large.
    if (script()->length() >=
        optimizationInfo().inliningMaxCallerBytecodeLength()) {
      trackOptimizationOutcome(TrackedOutcome::CantInlineBigCaller);
      return DontInline(targetScript, "Vetoed: caller excessively large");
    }
  }

  BaselineScript* outerBaseline =
      outermostBuilder()->script()->baselineScript();
  if (inliningDepth_ >= maxInlineDepth) {
    // We hit the depth limit and won't inline this function. Give the
    // outermost script a max inlining depth of 0, so that it won't be
    // inlined in other scripts. This heuristic is currently only used
    // when we're inlining scripts with loops, see the comment below.
    outerBaseline->setMaxInliningDepth(0);

    trackOptimizationOutcome(TrackedOutcome::CantInlineExceededDepth);
    return DontInline(targetScript, "Vetoed: exceeding allowed inline depth");
  }

  // Inlining functions with loops can be complicated. For instance, if we're
  // close to the inlining depth limit and we inline the function f below, we
  // can no longer inline the call to g:
  //
  //   function f() {
  //      while (cond) {
  //          g();
  //      }
  //   }
  //
  // If the loop has many iterations, it's more efficient to call f and inline
  // g in f.
  //
  // To avoid this problem, we record a separate max inlining depth for each
  // script, indicating at which depth we won't be able to inline all functions
  // we inlined this time. This solves the issue above, because we will only
  // inline f if it means we can also inline g.
  if (targetScript->hasLoops() &&
      inliningDepth_ >= targetScript->baselineScript()->maxInliningDepth()) {
    trackOptimizationOutcome(TrackedOutcome::CantInlineExceededDepth);
    return DontInline(targetScript,
                      "Vetoed: exceeding allowed script inline depth");
  }

  // Update the max depth at which we can inline the outer script.
  MOZ_ASSERT(maxInlineDepth > inliningDepth_);
  uint32_t scriptInlineDepth = maxInlineDepth - inliningDepth_ - 1;
  if (scriptInlineDepth < outerBaseline->maxInliningDepth()) {
    outerBaseline->setMaxInliningDepth(scriptInlineDepth);
  }

  // End of heuristics, we will inline this function.

  outerBuilder->inlinedBytecodeLength_ += targetScript->length();

  return InliningDecision_Inline;
}

AbortReasonOr<Ok> IonBuilder::selectInliningTargets(
    const InliningTargets& targets, CallInfo& callInfo, BoolVector& choiceSet,
    uint32_t* numInlineable) {
  *numInlineable = 0;
  uint32_t totalSize = 0;

  // For each target, ask whether it may be inlined.
  if (!choiceSet.reserve(targets.length())) {
    return abort(AbortReason::Alloc);
  }

  // Don't inline polymorphic sites during the definite properties analysis.
  // AddClearDefiniteFunctionUsesInScript depends on this for correctness.
  if (info().analysisMode() == Analysis_DefiniteProperties &&
      targets.length() > 1) {
    return Ok();
  }

  for (size_t i = 0; i < targets.length(); i++) {
    JSObject* target = targets[i].target;

    trackOptimizationAttempt(TrackedStrategy::Call_Inline);
    trackTypeInfo(TrackedTypeSite::Call_Target, target);

    bool inlineable;
    InliningDecision decision = makeInliningDecision(target, callInfo);
    switch (decision) {
      case InliningDecision_Error:
        return abort(AbortReason::Error);
      case InliningDecision_DontInline:
      case InliningDecision_WarmUpCountTooLow:
        inlineable = false;
        break;
      case InliningDecision_Inline:
        inlineable = true;
        break;
      default:
        MOZ_CRASH("Unhandled InliningDecision value!");
    }

    if (target->is<JSFunction>()) {
      // Enforce a maximum inlined bytecode limit at the callsite.
      if (inlineable && target->as<JSFunction>().isInterpreted()) {
        totalSize += target->as<JSFunction>().nonLazyScript()->length();
        bool offThread = options.offThreadCompilationAvailable();
        if (totalSize >
            optimizationInfo().inlineMaxBytecodePerCallSite(offThread)) {
          inlineable = false;
        }
      }
    } else {
      // Non-function targets are not supported by polymorphic inlining.
      inlineable = false;
    }

    choiceSet.infallibleAppend(inlineable);
    if (inlineable) {
      *numInlineable += 1;
    }
  }

  // If optimization tracking is turned on and one of the inlineable targets
  // is a native, track the type info of the call. Most native inlinings
  // depend on the types of the arguments and the return value.
  if (isOptimizationTrackingEnabled()) {
    for (size_t i = 0; i < targets.length(); i++) {
      if (choiceSet[i] && targets[i].target->as<JSFunction>().isNative()) {
        trackTypeInfo(callInfo);
        break;
      }
    }
  }

  MOZ_ASSERT(choiceSet.length() == targets.length());
  return Ok();
}

static bool CanInlineGetPropertyCache(MGetPropertyCache* cache,
                                      MDefinition* thisDef) {
  if (cache->value()->type() != MIRType::Object) {
    return false;
  }

  if (cache->value() != thisDef) {
    return false;
  }

  InlinePropertyTable* table = cache->propTable();
  if (!table) {
    return false;
  }
  if (table->numEntries() == 0) {
    return false;
  }
  return true;
}

class WrapMGetPropertyCache {
  MGetPropertyCache* cache_;

 private:
  void discardPriorResumePoint() {
    if (!cache_) {
      return;
    }

    InlinePropertyTable* propTable = cache_->propTable();
    if (!propTable) {
      return;
    }
    MResumePoint* rp = propTable->takePriorResumePoint();
    if (!rp) {
      return;
    }
    cache_->block()->discardPreAllocatedResumePoint(rp);
  }

 public:
  explicit WrapMGetPropertyCache(MGetPropertyCache* cache) : cache_(cache) {}

  ~WrapMGetPropertyCache() { discardPriorResumePoint(); }

  MGetPropertyCache* get() { return cache_; }
  MGetPropertyCache* operator->() { return get(); }

  // This function returns the cache given to the constructor if the
  // GetPropertyCache can be moved into the ObjectGroup fallback path.
  MGetPropertyCache* moveableCache(bool hasTypeBarrier, MDefinition* thisDef) {
    // If we have unhandled uses of the MGetPropertyCache, then we cannot
    // move it to the ObjectGroup fallback path.
    if (!hasTypeBarrier) {
      if (cache_->hasUses()) {
        return nullptr;
      }
    } else {
      // There is the TypeBarrier consumer, so we check that this is the
      // only consumer.
      MOZ_ASSERT(cache_->hasUses());
      if (!cache_->hasOneUse()) {
        return nullptr;
      }
    }

    // If the this-object is not identical to the object of the
    // MGetPropertyCache, then we cannot use the InlinePropertyTable, or if
    // we do not yet have enough information from the ObjectGroup.
    if (!CanInlineGetPropertyCache(cache_, thisDef)) {
      return nullptr;
    }

    MGetPropertyCache* ret = cache_;
    cache_ = nullptr;
    return ret;
  }
};

MGetPropertyCache* IonBuilder::getInlineableGetPropertyCache(
    CallInfo& callInfo) {
  if (callInfo.constructing()) {
    return nullptr;
  }

  MDefinition* thisDef = callInfo.thisArg();
  if (thisDef->type() != MIRType::Object) {
    return nullptr;
  }

  MDefinition* funcDef = callInfo.fun();
  if (funcDef->type() != MIRType::Object) {
    return nullptr;
  }

  // MGetPropertyCache with no uses may be optimized away.
  if (funcDef->isGetPropertyCache()) {
    WrapMGetPropertyCache cache(funcDef->toGetPropertyCache());
    return cache.moveableCache(/* hasTypeBarrier = */ false, thisDef);
  }

  // Optimize away the following common pattern:
  // MTypeBarrier[MIRType::Object] <- MGetPropertyCache
  if (funcDef->isTypeBarrier()) {
    MTypeBarrier* barrier = funcDef->toTypeBarrier();
    if (barrier->hasUses()) {
      return nullptr;
    }
    if (barrier->type() != MIRType::Object) {
      return nullptr;
    }
    if (!barrier->input()->isGetPropertyCache()) {
      return nullptr;
    }

    WrapMGetPropertyCache cache(barrier->input()->toGetPropertyCache());
    return cache.moveableCache(/* hasTypeBarrier = */ true, thisDef);
  }

  return nullptr;
}

IonBuilder::InliningResult IonBuilder::inlineSingleCall(CallInfo& callInfo,
                                                        JSObject* targetArg) {
  InliningStatus status;
  if (!targetArg->is<JSFunction>()) {
    MOZ_TRY_VAR(status, inlineNonFunctionCall(callInfo, targetArg));
    trackInlineSuccess(status);
    return status;
  }

  JSFunction* target = &targetArg->as<JSFunction>();
  if (target->isNative()) {
    MOZ_TRY_VAR(status, inlineNativeCall(callInfo, target));
    trackInlineSuccess(status);
    return status;
  }

  // Track success now, as inlining a scripted call makes a new return block
  // which has a different pc than the current call pc.
  trackInlineSuccess();
  return inlineScriptedCall(callInfo, target);
}

IonBuilder::InliningResult IonBuilder::inlineCallsite(
    const InliningTargets& targets, CallInfo& callInfo) {
  if (targets.empty()) {
    trackOptimizationAttempt(TrackedStrategy::Call_Inline);
    trackOptimizationOutcome(TrackedOutcome::CantInlineNoTarget);
    return InliningStatus_NotInlined;
  }

  // Is the function provided by an MGetPropertyCache?
  // If so, the cache may be movable to a fallback path, with a dispatch
  // instruction guarding on the incoming ObjectGroup.
  WrapMGetPropertyCache propCache(getInlineableGetPropertyCache(callInfo));
  keepFallbackFunctionGetter(propCache.get());

  // Inline single targets -- unless they derive from a cache, in which case
  // avoiding the cache and guarding is still faster.
  if (!propCache.get() && targets.length() == 1) {
    JSObject* target = targets[0].target;

    trackOptimizationAttempt(TrackedStrategy::Call_Inline);
    trackTypeInfo(TrackedTypeSite::Call_Target, target);

    InliningDecision decision = makeInliningDecision(target, callInfo);
    switch (decision) {
      case InliningDecision_Error:
        return abort(AbortReason::Error);
      case InliningDecision_DontInline:
        return InliningStatus_NotInlined;
      case InliningDecision_WarmUpCountTooLow:
        return InliningStatus_WarmUpCountTooLow;
      case InliningDecision_Inline:
        break;
    }

    // Inlining will elminate uses of the original callee, but it needs to
    // be preserved in phis if we bail out.  Mark the old callee definition as
    // implicitly used to ensure this happens.
    callInfo.fun()->setImplicitlyUsedUnchecked();

    // If the callee is not going to be a lambda (which may vary across
    // different invocations), then the callee definition can be replaced by a
    // constant.
    if (target->isSingleton()) {
      // Replace the function with an MConstant.
      MConstant* constFun = constant(ObjectValue(*target));
      if (callInfo.constructing() &&
          callInfo.getNewTarget() == callInfo.fun()) {
        callInfo.setNewTarget(constFun);
      }
      callInfo.setFun(constFun);
    }

    return inlineSingleCall(callInfo, target);
  }

  // Choose a subset of the targets for polymorphic inlining.
  BoolVector choiceSet(alloc());
  uint32_t numInlined;
  MOZ_TRY(selectInliningTargets(targets, callInfo, choiceSet, &numInlined));
  if (numInlined == 0) {
    return InliningStatus_NotInlined;
  }

  // Perform a polymorphic dispatch.
  MOZ_TRY(inlineCalls(callInfo, targets, choiceSet, propCache.get()));

  return InliningStatus_Inlined;
}

AbortReasonOr<Ok> IonBuilder::inlineGenericFallback(
    const Maybe<CallTargets>& targets, CallInfo& callInfo,
    MBasicBlock* dispatchBlock) {
  // Generate a new block with all arguments on-stack.
  MBasicBlock* fallbackBlock;
  MOZ_TRY_VAR(fallbackBlock, newBlock(dispatchBlock, pc));
  graph().addBlock(fallbackBlock);

  // Create a new CallInfo to track modified state within this block.
  CallInfo fallbackInfo(alloc(), pc, callInfo.constructing(),
                        callInfo.ignoresReturnValue());
  if (!fallbackInfo.init(callInfo)) {
    return abort(AbortReason::Alloc);
  }
  fallbackInfo.popCallStack(fallbackBlock);

  // Generate an MCall, which uses stateful |current|.
  MOZ_TRY(setCurrentAndSpecializePhis(fallbackBlock));
  MOZ_TRY(makeCall(targets, fallbackInfo));

  // Pass return block to caller as |current|.
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::inlineObjectGroupFallback(
    const Maybe<CallTargets>& targets, CallInfo& callInfo,
    MBasicBlock* dispatchBlock, MObjectGroupDispatch* dispatch,
    MGetPropertyCache* cache, MBasicBlock** fallbackTarget) {
  // Getting here implies the following:
  // 1. The call function is an MGetPropertyCache, or an MGetPropertyCache
  //    followed by an MTypeBarrier.
  MOZ_ASSERT(callInfo.fun()->isGetPropertyCache() ||
             callInfo.fun()->isTypeBarrier());

  // 2. The MGetPropertyCache has inlineable cases by guarding on the
  // ObjectGroup.
  MOZ_ASSERT(dispatch->numCases() > 0);

  // 3. The MGetPropertyCache (and, if applicable, MTypeBarrier) only
  //    have at most a single use.
  MOZ_ASSERT_IF(callInfo.fun()->isGetPropertyCache(), !cache->hasUses());
  MOZ_ASSERT_IF(callInfo.fun()->isTypeBarrier(), cache->hasOneUse());

  // This means that no resume points yet capture the MGetPropertyCache,
  // so everything from the MGetPropertyCache up until the call is movable.
  // We now move the MGetPropertyCache and friends into a fallback path.
  MOZ_ASSERT(cache->idempotent());

  // Create a new CallInfo to track modified state within the fallback path.
  CallInfo fallbackInfo(alloc(), pc, callInfo.constructing(),
                        callInfo.ignoresReturnValue());
  if (!fallbackInfo.init(callInfo)) {
    return abort(AbortReason::Alloc);
  }

  // Capture stack prior to the call operation. This captures the function.
  MResumePoint* preCallResumePoint =
      MResumePoint::New(alloc(), dispatchBlock, pc, MResumePoint::ResumeAt);
  if (!preCallResumePoint) {
    return abort(AbortReason::Alloc);
  }

  DebugOnly<size_t> preCallFuncIndex =
      preCallResumePoint->stackDepth() - callInfo.numFormals();
  MOZ_ASSERT(preCallResumePoint->getOperand(preCallFuncIndex) ==
             fallbackInfo.fun());

  // In the dispatch block, replace the function's slot entry with Undefined.
  MConstant* undefined = MConstant::New(alloc(), UndefinedValue());
  dispatchBlock->add(undefined);
  dispatchBlock->rewriteAtDepth(-int(callInfo.numFormals()), undefined);

  // Construct a block that does nothing but remove formals from the stack.
  // This is effectively changing the entry resume point of the later fallback
  // block.
  MBasicBlock* prepBlock;
  MOZ_TRY_VAR(prepBlock, newBlock(dispatchBlock, pc));
  graph().addBlock(prepBlock);
  fallbackInfo.popCallStack(prepBlock);

  // Construct a block into which the MGetPropertyCache can be moved.
  // This is subtle: the pc and resume point are those of the MGetPropertyCache!
  InlinePropertyTable* propTable = cache->propTable();
  MResumePoint* priorResumePoint = propTable->takePriorResumePoint();
  MOZ_ASSERT(propTable->pc() != nullptr);
  MOZ_ASSERT(priorResumePoint != nullptr);
  MBasicBlock* getPropBlock;
  MOZ_TRY_VAR(getPropBlock,
              newBlock(prepBlock, propTable->pc(), priorResumePoint));
  graph().addBlock(getPropBlock);

  prepBlock->end(MGoto::New(alloc(), getPropBlock));

  // Since the getPropBlock inherited the stack from right before the
  // MGetPropertyCache, the target of the MGetPropertyCache is still on the
  // stack.
  DebugOnly<MDefinition*> checkObject = getPropBlock->pop();
  MOZ_ASSERT(checkObject == cache->value());

  // Move the MGetPropertyCache and friends into the getPropBlock.
  if (fallbackInfo.fun()->isGetPropertyCache()) {
    MOZ_ASSERT(fallbackInfo.fun()->toGetPropertyCache() == cache);
    getPropBlock->addFromElsewhere(cache);
    getPropBlock->push(cache);
  } else {
    MTypeBarrier* barrier = callInfo.fun()->toTypeBarrier();
    MOZ_ASSERT(barrier->type() == MIRType::Object);
    MOZ_ASSERT(barrier->input()->isGetPropertyCache());
    MOZ_ASSERT(barrier->input()->toGetPropertyCache() == cache);

    getPropBlock->addFromElsewhere(cache);
    getPropBlock->addFromElsewhere(barrier);
    getPropBlock->push(barrier);
  }

  // Construct an end block with the correct resume point.
  MBasicBlock* preCallBlock;
  MOZ_TRY_VAR(preCallBlock, newBlock(getPropBlock, pc, preCallResumePoint));
  graph().addBlock(preCallBlock);
  getPropBlock->end(MGoto::New(alloc(), preCallBlock));

  // Now inline the MCallGeneric, using preCallBlock as the dispatch point.
  MOZ_TRY(inlineGenericFallback(targets, fallbackInfo, preCallBlock));

  // inlineGenericFallback() set the return block as |current|.
  preCallBlock->end(MGoto::New(alloc(), current));
  *fallbackTarget = prepBlock;
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::inlineCalls(CallInfo& callInfo,
                                          const InliningTargets& targets,
                                          BoolVector& choiceSet,
                                          MGetPropertyCache* maybeCache) {
  // Only handle polymorphic inlining.
  MOZ_ASSERT(IsIonInlinablePC(pc));
  MOZ_ASSERT(choiceSet.length() == targets.length());
  MOZ_ASSERT_IF(!maybeCache, targets.length() >= 2);
  MOZ_ASSERT_IF(maybeCache, targets.length() >= 1);
  MOZ_ASSERT_IF(maybeCache, maybeCache->value()->type() == MIRType::Object);

  MBasicBlock* dispatchBlock = current;
  callInfo.setImplicitlyUsedUnchecked();
  MOZ_TRY(callInfo.pushCallStack(this, dispatchBlock));

  // Patch any InlinePropertyTable to only contain functions that are
  // inlineable. The InlinePropertyTable will also be patched at the end to
  // exclude native functions that vetoed inlining.
  if (maybeCache) {
    InlinePropertyTable* propTable = maybeCache->propTable();
    propTable->trimToTargets(targets);
    if (propTable->numEntries() == 0) {
      maybeCache = nullptr;
    }
  }

  // Generate a dispatch based on guard kind.
  MDispatchInstruction* dispatch;
  if (maybeCache) {
    dispatch = MObjectGroupDispatch::New(alloc(), maybeCache->value(),
                                         maybeCache->propTable());
    callInfo.fun()->setImplicitlyUsedUnchecked();
  } else {
    dispatch = MFunctionDispatch::New(alloc(), callInfo.fun());
  }

  MOZ_ASSERT(dispatchBlock->stackDepth() >= callInfo.numFormals());
  uint32_t stackDepth = dispatchBlock->stackDepth() - callInfo.numFormals() + 1;

  // Generate a return block to host the rval-collecting MPhi.
  jsbytecode* postCall = GetNextPc(pc);
  MBasicBlock* returnBlock;
  MOZ_TRY_VAR(returnBlock, newBlock(stackDepth, postCall));
  graph().addBlock(returnBlock);
  returnBlock->setCallerResumePoint(callerResumePoint_);

  // Set up stack, used to manually create a post-call resume point.
  returnBlock->inheritSlots(dispatchBlock);
  callInfo.popCallStack(returnBlock);

  MPhi* retPhi = MPhi::New(alloc());
  returnBlock->addPhi(retPhi);
  returnBlock->push(retPhi);

  // Create a resume point from current stack state.
  if (!returnBlock->initEntrySlots(alloc())) {
    return abort(AbortReason::Alloc);
  }

  // Reserve the capacity for the phi.
  // Note: this is an upperbound. Unreachable targets and uninlineable natives
  // are also counted.
  uint32_t count = 1;  // Possible fallback block.
  for (uint32_t i = 0; i < targets.length(); i++) {
    if (choiceSet[i]) {
      count++;
    }
  }
  if (!retPhi->reserveLength(count)) {
    return abort(AbortReason::Alloc);
  }

  // Inline each of the inlineable targets.
  for (uint32_t i = 0; i < targets.length(); i++) {
    // Target must be inlineable.
    if (!choiceSet[i]) {
      continue;
    }

    // Even though we made one round of inline decisions already, we may
    // be amending them below.
    amendOptimizationAttempt(i);

    // Target must be reachable by the MDispatchInstruction.
    JSFunction* target = &targets[i].target->as<JSFunction>();
    if (maybeCache && !maybeCache->propTable()->hasFunction(target)) {
      choiceSet[i] = false;
      trackOptimizationOutcome(TrackedOutcome::CantInlineNotInDispatch);
      continue;
    }

    MBasicBlock* inlineBlock;
    MOZ_TRY_VAR(inlineBlock, newBlock(dispatchBlock, pc));
    graph().addBlock(inlineBlock);

    // Create a function MConstant to use in the entry ResumePoint. If we
    // can't use a constant, add a no-op MPolyInlineGuard, to prevent
    // hoisting env chain gets above the dispatch instruction.
    MInstruction* funcDef;
    if (target->isSingleton()) {
      funcDef = MConstant::New(alloc(), ObjectValue(*target), constraints());
    } else {
      funcDef = MPolyInlineGuard::New(alloc(), callInfo.fun());
    }

    funcDef->setImplicitlyUsedUnchecked();
    dispatchBlock->add(funcDef);

    // Use the inlined callee in the inline resume point and on stack.
    int funIndex =
        inlineBlock->entryResumePoint()->stackDepth() - callInfo.numFormals();
    inlineBlock->entryResumePoint()->replaceOperand(funIndex, funcDef);
    inlineBlock->rewriteSlot(funIndex, funcDef);

    // Create a new CallInfo to track modified state within the inline block.
    CallInfo inlineInfo(alloc(), pc, callInfo.constructing(),
                        callInfo.ignoresReturnValue());
    if (!inlineInfo.init(callInfo)) {
      return abort(AbortReason::Alloc);
    }
    inlineInfo.popCallStack(inlineBlock);
    inlineInfo.setFun(funcDef);

    if (callInfo.constructing() && callInfo.getNewTarget() == callInfo.fun()) {
      inlineInfo.setNewTarget(funcDef);
    }

    if (maybeCache) {
      // Assign the 'this' value a TypeSet specialized to the groups that
      // can generate this inlining target.
      MOZ_ASSERT(callInfo.thisArg() == maybeCache->value());
      TemporaryTypeSet* thisTypes =
          maybeCache->propTable()->buildTypeSetForFunction(alloc(), target);
      if (!thisTypes) {
        return abort(AbortReason::Alloc);
      }

      MFilterTypeSet* filter =
          MFilterTypeSet::New(alloc(), inlineInfo.thisArg(), thisTypes);
      inlineBlock->add(filter);
      inlineInfo.setThis(filter);
    }

    // Inline the call into the inlineBlock.
    MOZ_TRY(setCurrentAndSpecializePhis(inlineBlock));
    InliningStatus status;
    MOZ_TRY_VAR(status, inlineSingleCall(inlineInfo, target));

    // Natives may veto inlining.
    if (status == InliningStatus_NotInlined) {
      MOZ_ASSERT(current == inlineBlock);
      graph().removeBlock(inlineBlock);
      choiceSet[i] = false;
      continue;
    }

    // inlineSingleCall() changed |current| to the inline return block.
    MBasicBlock* inlineReturnBlock = current;
    setCurrent(dispatchBlock);

    // Connect the inline path to the returnBlock.
    if (!dispatch->addCase(target, targets[i].group, inlineBlock)) {
      return abort(AbortReason::Alloc);
    }

    MDefinition* retVal = inlineReturnBlock->peek(-1);
    retPhi->addInput(retVal);
    inlineReturnBlock->end(MGoto::New(alloc(), returnBlock));
    if (!returnBlock->addPredecessorWithoutPhis(inlineReturnBlock)) {
      return abort(AbortReason::Alloc);
    }
  }

  // Patch the InlinePropertyTable to not dispatch to vetoed paths.
  bool useFallback;
  if (maybeCache) {
    InlinePropertyTable* propTable = maybeCache->propTable();
    propTable->trimTo(targets, choiceSet);

    if (propTable->numEntries() == 0 || !propTable->hasPriorResumePoint()) {
      // Output a generic fallback path.
      MOZ_ASSERT_IF(propTable->numEntries() == 0, dispatch->numCases() == 0);
      maybeCache = nullptr;
      useFallback = true;
    } else {
      // We need a fallback path if the ObjectGroup dispatch does not
      // handle all incoming objects.
      useFallback = false;
      TemporaryTypeSet* objectTypes = maybeCache->value()->resultTypeSet();
      for (uint32_t i = 0; i < objectTypes->getObjectCount(); i++) {
        TypeSet::ObjectKey* obj = objectTypes->getObject(i);
        if (!obj) {
          continue;
        }

        if (!obj->isGroup()) {
          useFallback = true;
          break;
        }

        if (!propTable->hasObjectGroup(obj->group())) {
          useFallback = true;
          break;
        }
      }

      if (!useFallback) {
        // The object group dispatch handles all possible incoming
        // objects, so the cache and barrier will not be reached and
        // can be eliminated.
        if (callInfo.fun()->isGetPropertyCache()) {
          MOZ_ASSERT(callInfo.fun() == maybeCache);
        } else {
          MTypeBarrier* barrier = callInfo.fun()->toTypeBarrier();
          MOZ_ASSERT(!barrier->hasUses());
          MOZ_ASSERT(barrier->type() == MIRType::Object);
          MOZ_ASSERT(barrier->input()->isGetPropertyCache());
          MOZ_ASSERT(barrier->input()->toGetPropertyCache() == maybeCache);
          barrier->block()->discard(barrier);
        }

        MOZ_ASSERT(!maybeCache->hasUses());
        maybeCache->block()->discard(maybeCache);
      }
    }
  } else {
    useFallback = dispatch->numCases() < targets.length();
  }

  // If necessary, generate a fallback path.
  if (useFallback) {
    // Annotate the fallback call with the target information.
    Maybe<CallTargets> remainingTargets;
    remainingTargets.emplace(alloc());
    for (uint32_t i = 0; i < targets.length(); i++) {
      if (!maybeCache && choiceSet[i]) {
        continue;
      }

      JSObject* target = targets[i].target;
      if (!target->is<JSFunction>()) {
        remainingTargets = Nothing();
        break;
      }
      if (!remainingTargets->append(&target->as<JSFunction>())) {
        return abort(AbortReason::Alloc);
      }
    }

    // Generate fallback blocks, and set |current| to the fallback return block.
    if (maybeCache) {
      MBasicBlock* fallbackTarget;
      MOZ_TRY(inlineObjectGroupFallback(
          remainingTargets, callInfo, dispatchBlock,
          dispatch->toObjectGroupDispatch(), maybeCache, &fallbackTarget));
      dispatch->addFallback(fallbackTarget);
    } else {
      MOZ_TRY(inlineGenericFallback(remainingTargets, callInfo, dispatchBlock));
      dispatch->addFallback(current);
    }

    MBasicBlock* fallbackReturnBlock = current;

    // Connect fallback case to return infrastructure.
    MDefinition* retVal = fallbackReturnBlock->peek(-1);
    retPhi->addInput(retVal);
    fallbackReturnBlock->end(MGoto::New(alloc(), returnBlock));
    if (!returnBlock->addPredecessorWithoutPhis(fallbackReturnBlock)) {
      return abort(AbortReason::Alloc);
    }
  }

  // Finally add the dispatch instruction.
  // This must be done at the end so that add() may be called above.
  dispatchBlock->end(dispatch);

  // Check the depth change: +1 for retval
  MOZ_ASSERT(returnBlock->stackDepth() ==
             dispatchBlock->stackDepth() - callInfo.numFormals() + 1);

  graph().moveBlockToEnd(returnBlock);
  return setCurrentAndSpecializePhis(returnBlock);
}

MInstruction* IonBuilder::createNamedLambdaObject(MDefinition* callee,
                                                  MDefinition* env) {
  // Get a template CallObject that we'll use to generate inline object
  // creation.
  LexicalEnvironmentObject* templateObj =
      inspector->templateNamedLambdaObject();

  // One field is added to the function to handle its name.  This cannot be a
  // dynamic slot because there is still plenty of room on the NamedLambda
  // object.
  MOZ_ASSERT(!templateObj->hasDynamicSlots());

  // Allocate the actual object. It is important that no intervening
  // instructions could potentially bailout, thus leaking the dynamic slots
  // pointer.
  MInstruction* declEnvObj = MNewNamedLambdaObject::New(alloc(), templateObj);
  current->add(declEnvObj);

  // Initialize the object's reserved slots. No post barrier is needed here:
  // the object will be allocated in the nursery if possible, and if the
  // tenured heap is used instead, a minor collection will have been performed
  // that moved env/callee to the tenured heap.
  current->add(MStoreFixedSlot::New(
      alloc(), declEnvObj, NamedLambdaObject::enclosingEnvironmentSlot(), env));
  current->add(MStoreFixedSlot::New(alloc(), declEnvObj,
                                    NamedLambdaObject::lambdaSlot(), callee));

  return declEnvObj;
}

AbortReasonOr<MInstruction*> IonBuilder::createCallObject(MDefinition* callee,
                                                          MDefinition* env) {
  // Get a template CallObject that we'll use to generate inline object
  // creation.
  CallObject* templateObj = inspector->templateCallObject();
  MConstant* templateCst =
      MConstant::NewConstraintlessObject(alloc(), templateObj);
  current->add(templateCst);

  // Allocate the object. Run-once scripts need a singleton type, so always do
  // a VM call in such cases.
  MNewCallObjectBase* callObj;
  if (script()->treatAsRunOnce() || templateObj->isSingleton()) {
    callObj = MNewSingletonCallObject::New(alloc(), templateCst);
  } else {
    callObj = MNewCallObject::New(alloc(), templateCst);
  }
  current->add(callObj);

  // Initialize the object's reserved slots. No post barrier is needed here,
  // for the same reason as in createNamedLambdaObject.
  current->add(MStoreFixedSlot::New(
      alloc(), callObj, CallObject::enclosingEnvironmentSlot(), env));
  current->add(
      MStoreFixedSlot::New(alloc(), callObj, CallObject::calleeSlot(), callee));

  // if (!script()->functionHasParameterExprs()) {

  // Copy closed-over argument slots if there aren't parameter expressions.
  MSlots* slots = nullptr;
  for (PositionalFormalParameterIter fi(script()); fi; fi++) {
    if (!fi.closedOver()) {
      continue;
    }

    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }

    unsigned slot = fi.location().slot();
    unsigned formal = fi.argumentSlot();
    unsigned numFixedSlots = templateObj->numFixedSlots();
    MDefinition* param;
    if (script()->functionHasParameterExprs()) {
      param = constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
    } else {
      param = current->getSlot(info().argSlotUnchecked(formal));
    }
    if (slot >= numFixedSlots) {
      if (!slots) {
        slots = MSlots::New(alloc(), callObj);
        current->add(slots);
      }
      current->add(
          MStoreSlot::New(alloc(), slots, slot - numFixedSlots, param));
    } else {
      current->add(MStoreFixedSlot::New(alloc(), callObj, slot, param));
    }
  }

  return AbortReasonOr<MInstruction*>(callObj);
}

MDefinition* IonBuilder::createThisScripted(MDefinition* callee,
                                            MDefinition* newTarget) {
  // Get callee.prototype.
  //
  // This instruction MUST be idempotent: since it does not correspond to an
  // explicit operation in the bytecode, we cannot use resumeAfter().
  // Getters may not override |prototype| fetching, so this operation is
  // indeed idempotent.
  // - First try an idempotent property cache.
  // - Upon failing idempotent property cache, we can't use a non-idempotent
  //   cache, therefore we fallback to CallGetProperty
  //
  // Note: both CallGetProperty and GetPropertyCache can trigger a GC,
  //       and thus invalidation.
  MInstruction* getProto;
  if (!invalidatedIdempotentCache()) {
    MConstant* id = constant(StringValue(names().prototype));
    MGetPropertyCache* getPropCache =
        MGetPropertyCache::New(alloc(), newTarget, id,
                               /* monitored = */ false);
    getPropCache->setIdempotent();
    getProto = getPropCache;
  } else {
    MCallGetProperty* callGetProp =
        MCallGetProperty::New(alloc(), newTarget, names().prototype);
    callGetProp->setIdempotent();
    getProto = callGetProp;
  }
  current->add(getProto);

  // Create this from prototype
  MCreateThisWithProto* createThis =
      MCreateThisWithProto::New(alloc(), callee, newTarget, getProto);
  current->add(createThis);

  return createThis;
}

JSObject* IonBuilder::getSingletonPrototype(JSFunction* target) {
  TypeSet::ObjectKey* targetKey = TypeSet::ObjectKey::get(target);
  if (targetKey->unknownProperties()) {
    return nullptr;
  }

  jsid protoid = NameToId(names().prototype);
  HeapTypeSetKey protoProperty = targetKey->property(protoid);

  return protoProperty.singleton(constraints());
}

MDefinition* IonBuilder::createThisScriptedSingleton(JSFunction* target) {
  if (!target->hasScript()) {
    return nullptr;
  }

  // Get the singleton prototype (if exists)
  JSObject* proto = getSingletonPrototype(target);
  if (!proto) {
    return nullptr;
  }

  JSObject* templateObject = inspector->getTemplateObject(pc);
  if (!templateObject) {
    return nullptr;
  }
  if (!templateObject->is<PlainObject>() &&
      !templateObject->is<UnboxedPlainObject>()) {
    return nullptr;
  }
  if (templateObject->staticPrototype() != proto) {
    return nullptr;
  }

  TypeSet::ObjectKey* templateObjectKey =
      TypeSet::ObjectKey::get(templateObject->group());
  if (templateObjectKey->hasFlags(constraints(),
                                  OBJECT_FLAG_NEW_SCRIPT_CLEARED)) {
    return nullptr;
  }

  StackTypeSet* thisTypes = TypeScript::ThisTypes(target->nonLazyScript());
  if (!thisTypes || !thisTypes->hasType(TypeSet::ObjectType(templateObject))) {
    return nullptr;
  }

  // Generate an inline path to create a new |this| object with
  // the given singleton prototype.
  MConstant* templateConst =
      MConstant::NewConstraintlessObject(alloc(), templateObject);
  MCreateThisWithTemplate* createThis = MCreateThisWithTemplate::New(
      alloc(), constraints(), templateConst,
      templateObject->group()->initialHeap(constraints()));
  current->add(templateConst);
  current->add(createThis);

  return createThis;
}

MDefinition* IonBuilder::createThisScriptedBaseline(MDefinition* callee) {
  // Try to inline |this| creation based on Baseline feedback.

  JSFunction* target = inspector->getSingleCallee(pc);
  if (!target || !target->hasScript()) {
    return nullptr;
  }

  if (target->isBoundFunction() || target->isDerivedClassConstructor()) {
    return nullptr;
  }

  JSObject* templateObject = inspector->getTemplateObject(pc);
  if (!templateObject) {
    return nullptr;
  }
  if (!templateObject->is<PlainObject>() &&
      !templateObject->is<UnboxedPlainObject>()) {
    return nullptr;
  }

  Shape* shape = target->lookupPure(realm->runtime()->names().prototype);
  if (!shape || !shape->isDataProperty()) {
    return nullptr;
  }

  Value protov = target->getSlot(shape->slot());
  if (!protov.isObject()) {
    return nullptr;
  }

  JSObject* proto = checkNurseryObject(&protov.toObject());
  if (proto != templateObject->staticPrototype()) {
    return nullptr;
  }

  TypeSet::ObjectKey* templateObjectKey =
      TypeSet::ObjectKey::get(templateObject->group());
  if (templateObjectKey->hasFlags(constraints(),
                                  OBJECT_FLAG_NEW_SCRIPT_CLEARED)) {
    return nullptr;
  }

  StackTypeSet* thisTypes = TypeScript::ThisTypes(target->nonLazyScript());
  if (!thisTypes || !thisTypes->hasType(TypeSet::ObjectType(templateObject))) {
    return nullptr;
  }

  // Shape guard.
  callee = addShapeGuard(callee, target->lastProperty(), Bailout_ShapeGuard);

  // Guard callee.prototype == proto.
  MOZ_ASSERT(shape->numFixedSlots() == 0, "Must be a dynamic slot");
  MSlots* slots = MSlots::New(alloc(), callee);
  current->add(slots);
  MLoadSlot* prototype = MLoadSlot::New(alloc(), slots, shape->slot());
  current->add(prototype);
  MDefinition* protoConst = constant(ObjectValue(*proto));
  MGuardObjectIdentity* guard =
      MGuardObjectIdentity::New(alloc(), prototype, protoConst,
                                /* bailOnEquality = */ false);
  current->add(guard);

  // Generate an inline path to create a new |this| object with
  // the given prototype.
  MConstant* templateConst =
      MConstant::NewConstraintlessObject(alloc(), templateObject);
  MCreateThisWithTemplate* createThis = MCreateThisWithTemplate::New(
      alloc(), constraints(), templateConst,
      templateObject->group()->initialHeap(constraints()));
  current->add(templateConst);
  current->add(createThis);

  return createThis;
}

MDefinition* IonBuilder::createThis(JSFunction* target, MDefinition* callee,
                                    MDefinition* newTarget) {
  // Create |this| for unknown target or cross-realm target.
  if (!target || target->realm() != script()->realm()) {
    if (MDefinition* createThis = createThisScriptedBaseline(callee)) {
      return createThis;
    }

    MCreateThis* createThis = MCreateThis::New(alloc(), callee, newTarget);
    current->add(createThis);
    return createThis;
  }

  // Native constructors build the new Object themselves.
  if (target->isNative()) {
    if (!target->isConstructor()) {
      return nullptr;
    }

    if (target->isNativeWithJitEntry()) {
      // Do not bother inlining constructor calls to asm.js, since it is
      // not used much in practice.
      MOZ_ASSERT(target->isWasmOptimized());
      return nullptr;
    }

    MConstant* magic = MConstant::New(alloc(), MagicValue(JS_IS_CONSTRUCTING));
    current->add(magic);
    return magic;
  }

  if (target->isBoundFunction()) {
    return constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
  }

  if (target->isDerivedClassConstructor()) {
    MOZ_ASSERT(target->isClassConstructor());
    return constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
  }

  // Try baking in the prototype.
  if (MDefinition* createThis = createThisScriptedSingleton(target)) {
    return createThis;
  }

  if (MDefinition* createThis = createThisScriptedBaseline(callee)) {
    return createThis;
  }

  return createThisScripted(callee, newTarget);
}

AbortReasonOr<Ok> IonBuilder::jsop_funcall(uint32_t argc) {
  // Stack for JSOP_FUNCALL:
  // 1:      arg0
  // ...
  // argc:   argN
  // argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position.
  // argc+2: The native 'call' function.

  int calleeDepth = -((int)argc + 2);
  int funcDepth = -((int)argc + 1);

  // If |Function.prototype.call| may be overridden, don't optimize callsite.
  TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
  JSFunction* native = getSingleCallTarget(calleeTypes);
  if (!native || !native->isNative() || native->native() != &fun_call) {
    CallInfo callInfo(alloc(), pc, /* constructing = */ false,
                      /* ignoresReturnValue = */ BytecodeIsPopped(pc));
    if (!callInfo.init(current, argc)) {
      return abort(AbortReason::Alloc);
    }
    return makeCall(native, callInfo);
  }
  current->peek(calleeDepth)->setImplicitlyUsedUnchecked();

  // Extract call target.
  TemporaryTypeSet* funTypes = current->peek(funcDepth)->resultTypeSet();
  JSFunction* target = getSingleCallTarget(funTypes);

  CallInfo callInfo(alloc(), pc, /* constructing = */ false,
                    /* ignoresReturnValue = */ BytecodeIsPopped(pc));

  // Save prior call stack in case we need to resolve during bailout
  // recovery of inner inlined function. This includes the JSFunction and the
  // 'call' native function.
  MOZ_TRY(callInfo.savePriorCallStack(this, current, argc + 2));

  // Shimmy the slots down to remove the native 'call' function.
  current->shimmySlots(funcDepth - 1);

  bool zeroArguments = (argc == 0);

  // If no |this| argument was provided, explicitly pass Undefined.
  // Pushing is safe here, since one stack slot has been removed.
  if (zeroArguments) {
    pushConstant(UndefinedValue());
  } else {
    // |this| becomes implicit in the call.
    argc -= 1;
  }

  if (!callInfo.init(current, argc)) {
    return abort(AbortReason::Alloc);
  }

  // Try to inline the call.
  if (!zeroArguments) {
    InliningDecision decision = makeInliningDecision(target, callInfo);
    switch (decision) {
      case InliningDecision_Error:
        return abort(AbortReason::Error);
      case InliningDecision_DontInline:
      case InliningDecision_WarmUpCountTooLow:
        break;
      case InliningDecision_Inline: {
        InliningStatus status;
        MOZ_TRY_VAR(status, inlineSingleCall(callInfo, target));
        if (status == InliningStatus_Inlined) {
          return Ok();
        }
        break;
      }
    }
  }

  // Call without inlining.
  return makeCall(target, callInfo);
}

AbortReasonOr<Ok> IonBuilder::jsop_funapply(uint32_t argc) {
  int calleeDepth = -((int)argc + 2);

  TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
  JSFunction* native = getSingleCallTarget(calleeTypes);
  if (argc != 2 || info().analysisMode() == Analysis_ArgumentsUsage) {
    CallInfo callInfo(alloc(), pc, /* constructing = */ false,
                      /* ignoresReturnValue = */ BytecodeIsPopped(pc));
    if (!callInfo.init(current, argc)) {
      return abort(AbortReason::Alloc);
    }
    return makeCall(native, callInfo);
  }

  // Disable compilation if the second argument to |apply| cannot be guaranteed
  // to be either definitely |arguments| or definitely not |arguments|.
  MDefinition* argument = current->peek(-1);
  if (script()->argumentsHasVarBinding() &&
      argument->mightBeType(MIRType::MagicOptimizedArguments) &&
      argument->type() != MIRType::MagicOptimizedArguments) {
    return abort(AbortReason::Disable, "fun.apply with MaybeArguments");
  }

  // Fallback to regular call if arg 2 is not definitely |arguments|.
  if (argument->type() != MIRType::MagicOptimizedArguments) {
    // Optimize fun.apply(self, array) if the length is sane and there are no
    // holes.
    TemporaryTypeSet* objTypes = argument->resultTypeSet();
    if (native && native->isNative() && native->native() == fun_apply &&
        objTypes &&
        objTypes->getKnownClass(constraints()) == &ArrayObject::class_ &&
        !objTypes->hasObjectFlags(constraints(), OBJECT_FLAG_LENGTH_OVERFLOW) &&
        ElementAccessIsPacked(constraints(), argument)) {
      return jsop_funapplyarray(argc);
    }

    CallInfo callInfo(alloc(), pc, /* constructing = */ false,
                      /* ignoresReturnValue = */ BytecodeIsPopped(pc));
    if (!callInfo.init(current, argc)) {
      return abort(AbortReason::Alloc);
    }
    return makeCall(native, callInfo);
  }

  if ((!native || !native->isNative() || native->native() != fun_apply) &&
      info().analysisMode() != Analysis_DefiniteProperties) {
    return abort(AbortReason::Disable, "fun.apply speculation failed");
  }

  // Use funapply that definitely uses |arguments|
  return jsop_funapplyarguments(argc);
}

AbortReasonOr<Ok> IonBuilder::jsop_spreadcall() {
  // The arguments array is constructed by a JSOP_NEWARRAY and not
  // leaked to user. The complications of spread call iterator behaviour are
  // handled when the user objects are expanded and copied into this hidden
  // array.

#ifdef DEBUG
  // If we know class, ensure it is what we expected
  MDefinition* argument = current->peek(-1);
  if (TemporaryTypeSet* objTypes = argument->resultTypeSet()) {
    if (const Class* clasp = objTypes->getKnownClass(constraints())) {
      MOZ_ASSERT(clasp == &ArrayObject::class_);
    }
  }
#endif

  MDefinition* argArr = current->pop();
  MDefinition* argThis = current->pop();
  MDefinition* argFunc = current->pop();

  // Extract call target.
  TemporaryTypeSet* funTypes = argFunc->resultTypeSet();
  JSFunction* target = getSingleCallTarget(funTypes);
  WrappedFunction* wrappedTarget =
      target ? new (alloc()) WrappedFunction(target) : nullptr;

  // Dense elements of argument array
  MElements* elements = MElements::New(alloc(), argArr);
  current->add(elements);

  MApplyArray* apply =
      MApplyArray::New(alloc(), wrappedTarget, argFunc, elements, argThis);
  current->add(apply);
  current->push(apply);
  MOZ_TRY(resumeAfter(apply));

  if (target && target->realm() == script()->realm()) {
    apply->setNotCrossRealm();
  }

  // TypeBarrier the call result
  TemporaryTypeSet* types = bytecodeTypes(pc);
  return pushTypeBarrier(apply, types, BarrierKind::TypeSet);
}

AbortReasonOr<Ok> IonBuilder::jsop_funapplyarray(uint32_t argc) {
  MOZ_ASSERT(argc == 2);

  int funcDepth = -((int)argc + 1);

  // Extract call target.
  TemporaryTypeSet* funTypes = current->peek(funcDepth)->resultTypeSet();
  JSFunction* target = getSingleCallTarget(funTypes);

  // Pop the array agument
  MDefinition* argObj = current->pop();

  MElements* elements = MElements::New(alloc(), argObj);
  current->add(elements);

  // Pop the |this| argument.
  MDefinition* argThis = current->pop();

  // Unwrap the (JSFunction *) parameter.
  MDefinition* argFunc = current->pop();

  // Pop apply function.
  MDefinition* nativeFunc = current->pop();
  nativeFunc->setImplicitlyUsedUnchecked();

  WrappedFunction* wrappedTarget =
      target ? new (alloc()) WrappedFunction(target) : nullptr;
  MApplyArray* apply =
      MApplyArray::New(alloc(), wrappedTarget, argFunc, elements, argThis);
  current->add(apply);
  current->push(apply);
  MOZ_TRY(resumeAfter(apply));

  if (target && target->realm() == script()->realm()) {
    apply->setNotCrossRealm();
  }

  TemporaryTypeSet* types = bytecodeTypes(pc);
  return pushTypeBarrier(apply, types, BarrierKind::TypeSet);
}

AbortReasonOr<Ok> CallInfo::savePriorCallStack(MIRGenerator* mir,
                                               MBasicBlock* current,
                                               size_t peekDepth) {
  MOZ_ASSERT(priorArgs_.empty());
  if (!priorArgs_.reserve(peekDepth)) {
    return mir->abort(AbortReason::Alloc);
  }
  while (peekDepth) {
    priorArgs_.infallibleAppend(current->peek(0 - int32_t(peekDepth)));
    peekDepth--;
  }
  return Ok();
}

AbortReasonOr<Ok> IonBuilder::jsop_funapplyarguments(uint32_t argc) {
  // Stack for JSOP_FUNAPPLY:
  // 1:      Vp
  // 2:      This
  // argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position.
  // argc+2: The native 'apply' function.

  int funcDepth = -((int)argc + 1);

  // Extract call target.
  TemporaryTypeSet* funTypes = current->peek(funcDepth)->resultTypeSet();
  JSFunction* target = getSingleCallTarget(funTypes);

  // When this script isn't inlined, use MApplyArgs,
  // to copy the arguments from the stack and call the function
  if (inliningDepth_ == 0 &&
      info().analysisMode() != Analysis_DefiniteProperties) {
    // The array argument corresponds to the arguments object. As the JIT
    // is implicitly reading the arguments object in the next instruction,
    // we need to prevent the deletion of the arguments object from resume
    // points, so that Baseline will behave correctly after a bailout.
    MDefinition* vp = current->pop();
    vp->setImplicitlyUsedUnchecked();

    MDefinition* argThis = current->pop();

    // Unwrap the (JSFunction*) parameter.
    MDefinition* argFunc = current->pop();

    // Pop apply function.
    MDefinition* nativeFunc = current->pop();
    nativeFunc->setImplicitlyUsedUnchecked();

    MArgumentsLength* numArgs = MArgumentsLength::New(alloc());
    current->add(numArgs);

    WrappedFunction* wrappedTarget =
        target ? new (alloc()) WrappedFunction(target) : nullptr;
    MApplyArgs* apply =
        MApplyArgs::New(alloc(), wrappedTarget, argFunc, numArgs, argThis);
    current->add(apply);
    current->push(apply);
    MOZ_TRY(resumeAfter(apply));

    if (target && target->realm() == script()->realm()) {
      apply->setNotCrossRealm();
    }

    TemporaryTypeSet* types = bytecodeTypes(pc);
    return pushTypeBarrier(apply, types, BarrierKind::TypeSet);
  }

  // When inlining we have the arguments the function gets called with
  // and can optimize even more, by just calling the functions with the args.
  // We also try this path when doing the definite properties analysis, as we
  // can inline the apply() target and don't care about the actual arguments
  // that were passed in.

  CallInfo callInfo(alloc(), pc, /* constructing = */ false,
                    /* ignoresReturnValue = */ BytecodeIsPopped(pc));
  MOZ_TRY(callInfo.savePriorCallStack(this, current, 4));

  // Vp
  MDefinition* vp = current->pop();
  vp->setImplicitlyUsedUnchecked();

  // Arguments
  if (inliningDepth_) {
    if (!callInfo.setArgs(inlineCallInfo_->argv())) {
      return abort(AbortReason::Alloc);
    }
  }

  // This
  MDefinition* argThis = current->pop();
  callInfo.setThis(argThis);

  // Pop function parameter.
  MDefinition* argFunc = current->pop();
  callInfo.setFun(argFunc);

  // Pop apply function.
  MDefinition* nativeFunc = current->pop();
  nativeFunc->setImplicitlyUsedUnchecked();

  // Try to inline the call.
  InliningDecision decision = makeInliningDecision(target, callInfo);
  switch (decision) {
    case InliningDecision_Error:
      return abort(AbortReason::Error);
    case InliningDecision_DontInline:
    case InliningDecision_WarmUpCountTooLow:
      break;
    case InliningDecision_Inline: {
      InliningStatus status;
      MOZ_TRY_VAR(status, inlineSingleCall(callInfo, target));
      if (status == InliningStatus_Inlined) {
        return Ok();
      }
    }
  }

  return makeCall(target, callInfo);
}

AbortReasonOr<Ok> IonBuilder::jsop_call(uint32_t argc, bool constructing,
                                        bool ignoresReturnValue) {
  startTrackingOptimizations();

  // If this call has never executed, try to seed the observed type set
  // based on how the call result is used.
  TemporaryTypeSet* observed = bytecodeTypes(pc);
  if (observed->empty()) {
    if (BytecodeFlowsToBitop(pc)) {
      observed->addType(TypeSet::Int32Type(), alloc_->lifoAlloc());
    } else if (*GetNextPc(pc) == JSOP_POS) {
      // Note: this is lame, overspecialized on the code patterns used
      // by asm.js and should be replaced by a more general mechanism.
      // See bug 870847.
      observed->addType(TypeSet::DoubleType(), alloc_->lifoAlloc());
    }
  }

  int calleeDepth = -((int)argc + 2 + constructing);

  // Acquire known call target if existent.
  InliningTargets targets(alloc());
  TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
  if (calleeTypes) {
    MOZ_TRY(getPolyCallTargets(calleeTypes, constructing, targets, 4));
  }

  CallInfo callInfo(alloc(), pc, constructing, ignoresReturnValue);
  if (!callInfo.init(current, argc)) {
    return abort(AbortReason::Alloc);
  }

  // Try inlining
  InliningStatus status;
  MOZ_TRY_VAR(status, inlineCallsite(targets, callInfo));
  if (status == InliningStatus_Inlined) {
    return Ok();
  }

  // Discard unreferenced & pre-allocated resume points.
  replaceMaybeFallbackFunctionGetter(nullptr);

  // No inline, just make the call.
  Maybe<CallTargets> callTargets;
  if (!targets.empty()) {
    callTargets.emplace(alloc());
    for (const InliningTarget& target : targets) {
      if (!target.target->is<JSFunction>()) {
        callTargets = Nothing();
        break;
      }
      if (!callTargets->append(&target.target->as<JSFunction>())) {
        return abort(AbortReason::Alloc);
      }
    }
  }

  if (status == InliningStatus_WarmUpCountTooLow && callTargets &&
      callTargets->length() == 1) {
    JSFunction* target = callTargets.ref()[0];
    MRecompileCheck* check =
        MRecompileCheck::New(alloc(), target->nonLazyScript(),
                             optimizationInfo().inliningRecompileThreshold(),
                             MRecompileCheck::RecompileCheck_Inlining);
    current->add(check);
  }

  return makeCall(callTargets, callInfo);
}

AbortReasonOr<bool> IonBuilder::testShouldDOMCall(TypeSet* inTypes,
                                                  JSFunction* func,
                                                  JSJitInfo::OpType opType) {
  if (!func->isNative() || !func->hasJitInfo()) {
    return false;
  }

  // Some DOM optimizations cause execution to skip over recorded events such
  // as wrapper cache accesses, e.g. through GVN or loop hoisting of the
  // expression which performs the event. Disable DOM optimizations when
  // recording or replaying to avoid this problem.
  if (mozilla::recordreplay::IsRecordingOrReplaying()) {
    return false;
  }

  // If all the DOM objects flowing through are legal with this
  // property, we can bake in a call to the bottom half of the DOM
  // accessor
  DOMInstanceClassHasProtoAtDepth instanceChecker =
      realm->runtime()->DOMcallbacks()->instanceClassMatchesProto;

  const JSJitInfo* jinfo = func->jitInfo();
  if (jinfo->type() != opType) {
    return false;
  }

  for (unsigned i = 0; i < inTypes->getObjectCount(); i++) {
    TypeSet::ObjectKey* key = inTypes->getObject(i);
    if (!key) {
      continue;
    }

    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }

    if (!key->hasStableClassAndProto(constraints())) {
      return false;
    }

    if (!instanceChecker(key->clasp(), jinfo->protoID, jinfo->depth)) {
      return false;
    }
  }

  return true;
}

static bool ArgumentTypesMatch(MDefinition* def, StackTypeSet* calleeTypes) {
  if (!calleeTypes) {
    return false;
  }

  if (def->resultTypeSet()) {
    MOZ_ASSERT(def->type() == MIRType::Value || def->mightBeType(def->type()));
    return def->resultTypeSet()->isSubset(calleeTypes);
  }

  if (def->type() == MIRType::Value) {
    return false;
  }

  if (def->type() == MIRType::Object) {
    return calleeTypes->unknownObject();
  }

  return calleeTypes->mightBeMIRType(def->type());
}

bool IonBuilder::testNeedsArgumentCheck(JSFunction* target,
                                        CallInfo& callInfo) {
  // If we have a known target, check if the caller arg types are a subset of
  // callee. Since typeset accumulates and can't decrease that means we don't
  // need to check the arguments anymore.

  if (target->isNative()) {
    return false;
  }

  if (!target->hasScript()) {
    return true;
  }

  JSScript* targetScript = target->nonLazyScript();

  if (!ArgumentTypesMatch(callInfo.thisArg(),
                          TypeScript::ThisTypes(targetScript))) {
    return true;
  }
  uint32_t expected_args = Min<uint32_t>(callInfo.argc(), target->nargs());
  for (size_t i = 0; i < expected_args; i++) {
    if (!ArgumentTypesMatch(callInfo.getArg(i),
                            TypeScript::ArgTypes(targetScript, i))) {
      return true;
    }
  }
  for (size_t i = callInfo.argc(); i < target->nargs(); i++) {
    if (!TypeScript::ArgTypes(targetScript, i)
             ->mightBeMIRType(MIRType::Undefined)) {
      return true;
    }
  }

  return false;
}

AbortReasonOr<MCall*> IonBuilder::makeCallHelper(
    const Maybe<CallTargets>& targets, CallInfo& callInfo) {
  // This function may be called with mutated stack.
  // Querying TI for popped types is invalid.

  MOZ_ASSERT_IF(targets, !targets->empty());

  JSFunction* target = nullptr;
  if (targets && targets->length() == 1) {
    target = targets.ref()[0];
  }

  uint32_t targetArgs = callInfo.argc();

  // Collect number of missing arguments provided that the target is
  // scripted. Native functions are passed an explicit 'argc' parameter.
  if (target && !target->isNativeWithCppEntry()) {
    targetArgs = Max<uint32_t>(target->nargs(), callInfo.argc());
  }

  bool isDOMCall = false;
  DOMObjectKind objKind = DOMObjectKind::Unknown;
  if (target && !callInfo.constructing()) {
    // We know we have a single call target.  Check whether the "this" types
    // are DOM types and our function a DOM function, and if so flag the
    // MCall accordingly.
    TemporaryTypeSet* thisTypes = callInfo.thisArg()->resultTypeSet();
    if (thisTypes && thisTypes->getKnownMIRType() == MIRType::Object &&
        thisTypes->isDOMClass(constraints(), &objKind)) {
      MOZ_TRY_VAR(isDOMCall,
                  testShouldDOMCall(thisTypes, target, JSJitInfo::Method));
    }
  }

  MCall* call =
      MCall::New(alloc(), target, targetArgs + 1 + callInfo.constructing(),
                 callInfo.argc(), callInfo.constructing(),
                 callInfo.ignoresReturnValue(), isDOMCall, objKind);
  if (!call) {
    return abort(AbortReason::Alloc);
  }

  if (callInfo.constructing()) {
    call->addArg(targetArgs + 1, callInfo.getNewTarget());
  }

  // Explicitly pad any missing arguments with |undefined|.
  // This permits skipping the argumentsRectifier.
  MOZ_ASSERT_IF(target && targetArgs > callInfo.argc(),
                !target->isNativeWithCppEntry());
  for (int i = targetArgs; i > (int)callInfo.argc(); i--) {
    MConstant* undef = constant(UndefinedValue());
    if (!alloc().ensureBallast()) {
      return abort(AbortReason::Alloc);
    }
    call->addArg(i, undef);
  }

  // Add explicit arguments.
  // Skip addArg(0) because it is reserved for this
  for (int32_t i = callInfo.argc() - 1; i >= 0; i--) {
    call->addArg(i + 1, callInfo.getArg(i));
  }

  // Now that we've told it about all the args, compute whether it's movable
  call->computeMovable();

  // Inline the constructor on the caller-side.
  if (callInfo.constructing()) {
    MDefinition* create =
        createThis(target, callInfo.fun(), callInfo.getNewTarget());
    if (!create) {
      return abort(AbortReason::Disable,
                   "Failure inlining constructor for call.");
    }

    callInfo.thisArg()->setImplicitlyUsedUnchecked();
    callInfo.setThis(create);
  }

  // Pass |this| and function.
  MDefinition* thisArg = callInfo.thisArg();
  call->addArg(0, thisArg);

  if (targets) {
    // The callee must be one of the target JSFunctions, so we don't need a
    // Class check.
    call->disableClassCheck();

    // Determine whether we can skip the callee's prologue type checks and
    // whether we have to switch realms.
    bool needArgCheck = false;
    bool maybeCrossRealm = false;
    for (JSFunction* target : targets.ref()) {
      if (testNeedsArgumentCheck(target, callInfo)) {
        needArgCheck = true;
      }
      if (target->realm() != script()->realm()) {
        maybeCrossRealm = true;
      }
    }
    if (!needArgCheck) {
      call->disableArgCheck();
    }
    if (!maybeCrossRealm) {
      call->setNotCrossRealm();
    }