/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/IonBuilder.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/ScopeExit.h"
#include <algorithm>
#include "builtin/Eval.h"
#include "builtin/TypedObject.h"
#include "frontend/SourceNotes.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineInspector.h"
#include "jit/CacheIR.h"
#include "jit/Ion.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/JitSpewer.h"
#include "jit/Lowering.h"
#include "jit/MIRGraph.h"
#include "util/CheckedArithmetic.h"
#include "vm/ArgumentsObject.h"
#include "vm/BytecodeIterator.h"
#include "vm/BytecodeLocation.h"
#include "vm/BytecodeUtil.h"
#include "vm/EnvironmentObject.h"
#include "vm/Instrumentation.h"
#include "vm/Opcodes.h"
#include "vm/RegExpStatics.h"
#include "vm/SelfHosting.h"
#include "vm/TraceLogging.h"
#include "gc/Nursery-inl.h"
#include "jit/CompileInfo-inl.h"
#include "jit/shared/Lowering-shared-inl.h"
#include "vm/BytecodeIterator-inl.h"
#include "vm/BytecodeLocation-inl.h"
#include "vm/BytecodeUtil-inl.h"
#include "vm/EnvironmentObject-inl.h"
#include "vm/JSScript-inl.h"
#include "vm/NativeObject-inl.h"
#include "vm/ObjectGroup-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::AssertedCast;
using mozilla::DebugOnly;
using mozilla::Maybe;
using mozilla::Nothing;
using JS::TrackedOutcome;
using JS::TrackedStrategy;
using JS::TrackedTypeSite;
class jit::BaselineFrameInspector {
public:
TypeSet::Type thisType;
JSObject* singletonEnvChain;
Vector<TypeSet::Type, 4, JitAllocPolicy> argTypes;
Vector<TypeSet::Type, 4, JitAllocPolicy> varTypes;
explicit BaselineFrameInspector(TempAllocator* temp)
: thisType(TypeSet::UndefinedType()),
singletonEnvChain(nullptr),
argTypes(*temp),
varTypes(*temp) {}
};
BaselineFrameInspector* jit::NewBaselineFrameInspector(TempAllocator* temp,
BaselineFrame* frame,
uint32_t frameSize) {
MOZ_ASSERT(frame);
BaselineFrameInspector* inspector =
temp->lifoAlloc()->new_<BaselineFrameInspector>(temp);
if (!inspector) {
return nullptr;
}
// Note: copying the actual values into a temporary structure for use
// during compilation could capture nursery pointers, so the values' types
// are recorded instead.
if (frame->isFunctionFrame()) {
inspector->thisType =
TypeSet::GetMaybeUntrackedValueType(frame->thisArgument());
}
if (frame->environmentChain()->isSingleton()) {
inspector->singletonEnvChain = frame->environmentChain();
}
JSScript* script = frame->script();
if (script->function()) {
if (!inspector->argTypes.reserve(frame->numFormalArgs())) {
return nullptr;
}
for (size_t i = 0; i < frame->numFormalArgs(); i++) {
if (script->formalIsAliased(i)) {
inspector->argTypes.infallibleAppend(TypeSet::UndefinedType());
} else if (!script->argsObjAliasesFormals()) {
TypeSet::Type type =
TypeSet::GetMaybeUntrackedValueType(frame->unaliasedFormal(i));
inspector->argTypes.infallibleAppend(type);
} else if (frame->hasArgsObj()) {
TypeSet::Type type =
TypeSet::GetMaybeUntrackedValueType(frame->argsObj().arg(i));
inspector->argTypes.infallibleAppend(type);
} else {
inspector->argTypes.infallibleAppend(TypeSet::UndefinedType());
}
}
}
uint32_t numValueSlots = frame->numValueSlots(frameSize);
if (!inspector->varTypes.reserve(numValueSlots)) {
return nullptr;
}
for (size_t i = 0; i < numValueSlots; i++) {
TypeSet::Type type =
TypeSet::GetMaybeUntrackedValueType(*frame->valueSlot(i));
inspector->varTypes.infallibleAppend(type);
}
return inspector;
}
IonBuilder::IonBuilder(JSContext* analysisContext, CompileRealm* realm,
const JitCompileOptions& options, TempAllocator* temp,
MIRGraph* graph, CompilerConstraintList* constraints,
BaselineInspector* inspector, CompileInfo* info,
const OptimizationInfo* optimizationInfo,
BaselineFrameInspector* baselineFrame,
size_t inliningDepth, uint32_t loopDepth)
: MIRGenerator(realm, options, temp, graph, info, optimizationInfo),
backgroundCodegen_(nullptr),
actionableAbortScript_(nullptr),
actionableAbortPc_(nullptr),
actionableAbortMessage_(nullptr),
rootList_(nullptr),
analysisContext(analysisContext),
baselineFrame_(baselineFrame),
constraints_(constraints),
tiOracle_(this, constraints),
thisTypes(nullptr),
argTypes(nullptr),
typeArray(nullptr),
typeArrayHint(0),
bytecodeTypeMap(nullptr),
loopDepth_(loopDepth),
loopStack_(*temp),
trackedOptimizationSites_(*temp),
lexicalCheck_(nullptr),
callerResumePoint_(nullptr),
callerBuilder_(nullptr),
iterators_(*temp),
loopHeaders_(*temp),
inspector(inspector),
inliningDepth_(inliningDepth),
inlinedBytecodeLength_(0),
numLoopRestarts_(0),
failedBoundsCheck_(info->script()->failedBoundsCheck()),
failedShapeGuard_(info->script()->failedShapeGuard()),
failedLexicalCheck_(info->script()->failedLexicalCheck()),
#ifdef DEBUG
hasLazyArguments_(false),
#endif
inlineCallInfo_(nullptr),
maybeFallbackFunctionGetter_(nullptr) {
script_ = info->script();
scriptHasIonScript_ = script_->hasIonScript();
pc = info->startPC();
// The script must have a JitScript. Compilation requires a BaselineScript
// too.
MOZ_ASSERT(script_->hasJitScript());
MOZ_ASSERT_IF(!info->isAnalysis(), script_->hasBaselineScript());
MOZ_ASSERT(!!analysisContext ==
(info->analysisMode() == Analysis_DefiniteProperties));
MOZ_ASSERT(script_->numBytecodeTypeSets() < JSScript::MaxBytecodeTypeSets);
if (!info->isAnalysis()) {
script()->jitScript()->setIonCompiledOrInlined();
}
}
void IonBuilder::clearForBackEnd() {
MOZ_ASSERT(!analysisContext);
baselineFrame_ = nullptr;
}
mozilla::GenericErrorResult<AbortReason> IonBuilder::abort(AbortReason r) {
auto res = this->MIRGenerator::abort(r);
#ifdef DEBUG
JitSpew(JitSpew_IonAbort, "aborted @ %s:%d", script()->filename(),
PCToLineNumber(script(), pc));
#else
JitSpew(JitSpew_IonAbort, "aborted @ %s", script()->filename());
#endif
return res;
}
mozilla::GenericErrorResult<AbortReason> IonBuilder::abort(AbortReason r,
const char* message,
...) {
// Don't call PCToLineNumber in release builds.
va_list ap;
va_start(ap, message);
auto res = this->MIRGenerator::abortFmt(r, message, ap);
va_end(ap);
#ifdef DEBUG
JitSpew(JitSpew_IonAbort, "aborted @ %s:%d", script()->filename(),
PCToLineNumber(script(), pc));
#else
JitSpew(JitSpew_IonAbort, "aborted @ %s", script()->filename());
#endif
trackActionableAbort(message);
return res;
}
IonBuilder* IonBuilder::outermostBuilder() {
IonBuilder* builder = this;
while (builder->callerBuilder_) {
builder = builder->callerBuilder_;
}
return builder;
}
void IonBuilder::trackActionableAbort(const char* message) {
if (!isOptimizationTrackingEnabled()) {
return;
}
IonBuilder* topBuilder = outermostBuilder();
if (topBuilder->hadActionableAbort()) {
return;
}
topBuilder->actionableAbortScript_ = script();
topBuilder->actionableAbortPc_ = pc;
topBuilder->actionableAbortMessage_ = message;
}
void IonBuilder::spew(const char* message) {
// Don't call PCToLineNumber in release builds.
#ifdef DEBUG
JitSpew(JitSpew_IonMIR, "%s @ %s:%d", message, script()->filename(),
PCToLineNumber(script(), pc));
#endif
}
JSFunction* IonBuilder::getSingleCallTarget(TemporaryTypeSet* calleeTypes) {
if (!calleeTypes) {
return nullptr;
}
TemporaryTypeSet::ObjectKey* key = calleeTypes->maybeSingleObject();
if (!key || key->clasp() != &JSFunction::class_) {
return nullptr;
}
if (key->isSingleton()) {
return &key->singleton()->as<JSFunction>();
}
if (JSFunction* fun = key->group()->maybeInterpretedFunction()) {
return fun;
}
return nullptr;
}
AbortReasonOr<Ok> IonBuilder::getPolyCallTargets(TemporaryTypeSet* calleeTypes,
bool constructing,
InliningTargets& targets,
uint32_t maxTargets) {
MOZ_ASSERT(targets.empty());
if (!calleeTypes) {
return Ok();
}
if (calleeTypes->baseFlags() != 0) {
return Ok();
}
unsigned objCount = calleeTypes->getObjectCount();
if (objCount == 0 || objCount > maxTargets) {
return Ok();
}
if (!targets.reserve(objCount)) {
return abort(AbortReason::Alloc);
}
for (unsigned i = 0; i < objCount; i++) {
JSObject* obj = calleeTypes->getSingleton(i);
ObjectGroup* group = nullptr;
if (obj) {
MOZ_ASSERT(obj->isSingleton());
} else {
group = calleeTypes->getGroup(i);
if (!group) {
continue;
}
obj = group->maybeInterpretedFunction();
if (!obj) {
targets.clear();
return Ok();
}
MOZ_ASSERT(!obj->isSingleton());
}
// Don't optimize if the callee is not callable or constructable per
// the manner it is being invoked, so that CallKnown does not have to
// handle these cases (they will always throw).
if (constructing ? !obj->isConstructor() : !obj->isCallable()) {
targets.clear();
return Ok();
}
targets.infallibleAppend(InliningTarget(obj, group));
}
return Ok();
}
IonBuilder::InliningDecision IonBuilder::DontInline(JSScript* targetScript,
const char* reason) {
if (targetScript) {
JitSpew(JitSpew_Inlining, "Cannot inline %s:%u:%u %s",
targetScript->filename(), targetScript->lineno(),
targetScript->column(), reason);
} else {
JitSpew(JitSpew_Inlining, "Cannot inline: %s", reason);
}
return InliningDecision_DontInline;
}
/*
* |hasCommonInliningPath| determines whether the current inlining path has been
* seen before based on the sequence of scripts in the chain of |IonBuilder|s.
*
* An inlining path for a function |f| is the sequence of functions whose
* inlinings precede |f| up to any previous occurrences of |f|.
* So, if we have the chain of inlinings
*
* f1 -> f2 -> f -> f3 -> f4 -> f5 -> f
* -------- --------------
*
* the inlining paths for |f| are [f2, f1] and [f5, f4, f3].
* When attempting to inline |f|, we find all existing inlining paths for |f|
* and check whether they share a common prefix with the path created were |f|
* inlined.
*
* For example, given mutually recursive functions |f| and |g|, a possible
* inlining is
*
* +---- Inlining stopped here...
* |
* v
* a -> f -> g -> f \ -> g -> f -> g -> ...
*
* where the vertical bar denotes the termination of inlining.
* Inlining is terminated because we have already observed the inlining path
* [f] when inlining function |g|. Note that this will inline recursive
* functions such as |fib| only one level, as |fib| has a zero length inlining
* path which trivially prefixes all inlining paths.
*
*/
bool IonBuilder::hasCommonInliningPath(const JSScript* scriptToInline) {
// Find all previous inlinings of the |scriptToInline| and check for common
// inlining paths with the top of the inlining stack.
for (IonBuilder* it = this->callerBuilder_; it; it = it->callerBuilder_) {
if (it->script() != scriptToInline) {
continue;
}
// This only needs to check the top of each stack for a match,
// as a match of length one ensures a common prefix.
IonBuilder* path = it->callerBuilder_;
if (!path || this->script() == path->script()) {
return true;
}
}
return false;
}
IonBuilder::InliningDecision IonBuilder::canInlineTarget(JSFunction* target,
CallInfo& callInfo) {
if (!optimizationInfo().inlineInterpreted()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineGeneric);
return InliningDecision_DontInline;
}
if (TraceLogTextIdEnabled(TraceLogger_InlinedScripts)) {
return DontInline(nullptr,
"Tracelogging of inlined scripts is enabled"
"but Tracelogger cannot do that yet.");
}
if (!target->isInterpreted()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNotInterpreted);
return DontInline(nullptr, "Non-interpreted target");
}
// Never inline scripted cross-realm calls.
if (target->realm() != script()->realm()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineCrossRealm);
return DontInline(nullptr, "Cross-realm call");
}
if (info().analysisMode() != Analysis_DefiniteProperties) {
// If |this| or an argument has an empty resultTypeSet, don't bother
// inlining, as the call is currently unreachable due to incomplete type
// information. This does not apply to the definite properties analysis,
// in that case we want to inline anyway.
if (callInfo.thisArg()->emptyResultTypeSet()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineUnreachable);
return DontInline(nullptr, "Empty TypeSet for |this|");
}
for (size_t i = 0; i < callInfo.argc(); i++) {
if (callInfo.getArg(i)->emptyResultTypeSet()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineUnreachable);
return DontInline(nullptr, "Empty TypeSet for argument");
}
}
}
// Allow constructing lazy scripts when performing the definite properties
// analysis, as baseline has not been used to warm the caller up yet.
if (target->isInterpreted() &&
info().analysisMode() == Analysis_DefiniteProperties) {
RootedFunction fun(analysisContext, target);
RootedScript script(analysisContext,
JSFunction::getOrCreateScript(analysisContext, fun));
if (!script) {
return InliningDecision_Error;
}
if (CanBaselineInterpretScript(script)) {
AutoKeepJitScripts keepJitScript(analysisContext);
if (!script->ensureHasJitScript(analysisContext, keepJitScript)) {
return InliningDecision_Error;
}
}
}
if (!target->hasScript()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineLazy);
return DontInline(nullptr, "Lazy script");
}
JSScript* inlineScript = target->nonLazyScript();
if (callInfo.constructing() && !target->isConstructor()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNotConstructor);
return DontInline(inlineScript, "Callee is not a constructor");
}
if (!callInfo.constructing() && target->isClassConstructor()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineClassConstructor);
return DontInline(inlineScript, "Not constructing class constructor");
}
if (!CanIonInlineScript(inlineScript)) {
trackOptimizationOutcome(TrackedOutcome::CantInlineDisabledIon);
return DontInline(inlineScript, "Disabled Ion compilation");
}
if (info().isAnalysis()) {
// Analysis requires only a JitScript.
if (!inlineScript->hasJitScript()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNoJitScript);
return DontInline(inlineScript, "No JitScript");
}
} else {
// Compilation requires a BaselineScript.
if (!inlineScript->hasBaselineScript()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNoBaseline);
return DontInline(inlineScript, "No baseline jitcode");
}
}
// Don't inline functions with a higher optimization level.
if (!isHighestOptimizationLevel()) {
OptimizationLevel level = optimizationLevel();
if (inlineScript->hasIonScript() &&
(inlineScript->ionScript()->isRecompiling() ||
inlineScript->ionScript()->optimizationLevel() > level)) {
return DontInline(inlineScript, "More optimized");
}
if (IonOptimizations.levelForScript(inlineScript, nullptr) > level) {
return DontInline(inlineScript, "Should be more optimized");
}
}
if (TooManyFormalArguments(target->nargs())) {
trackOptimizationOutcome(TrackedOutcome::CantInlineTooManyArgs);
return DontInline(inlineScript, "Too many args");
}
// We check the number of actual arguments against the maximum number of
// formal arguments as we do not want to encode all actual arguments in the
// callerResumePoint.
if (TooManyFormalArguments(callInfo.argc())) {
trackOptimizationOutcome(TrackedOutcome::CantInlineTooManyArgs);
return DontInline(inlineScript, "Too many actual args");
}
if (hasCommonInliningPath(inlineScript)) {
trackOptimizationOutcome(TrackedOutcome::HasCommonInliningPath);
return DontInline(inlineScript, "Common inlining path");
}
if (inlineScript->uninlineable()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineGeneric);
return DontInline(inlineScript, "Uninlineable script");
}
if (inlineScript->needsArgsObj()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNeedsArgsObj);
return DontInline(inlineScript, "Script that needs an arguments object");
}
if (inlineScript->isDebuggee()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineDebuggee);
return DontInline(inlineScript, "Script is debuggee");
}
return InliningDecision_Inline;
}
AbortReasonOr<Ok> IonBuilder::analyzeNewLoopTypes(MBasicBlock* entry) {
MOZ_ASSERT(!entry->isDead());
MOZ_ASSERT(JSOp(*pc) == JSOP_LOOPHEAD);
// The phi inputs at the loop head only reflect types for variables that
// were present at the start of the loop. If the variable changes to a new
// type within the loop body, and that type is carried around to the loop
// head, then we need to know about the new type up front.
//
// Since SSA information hasn't been constructed for the loop body yet, we
// need a separate analysis to pick out the types that might flow around
// the loop header. This is a best-effort analysis that may either over-
// or under-approximate the set of such types.
//
// Over-approximating the types may lead to inefficient generated code, and
// under-approximating the types will cause the loop body to be analyzed
// multiple times as the correct types are deduced (see finishLoop).
// If we restarted processing of an outer loop then get loop header types
// directly from the last time we have previously processed this loop. This
// both avoids repeated work from the bytecode traverse below, and will
// also pick up types discovered while previously building the loop body.
bool foundEntry = false;
for (size_t i = 0; i < loopHeaders_.length(); i++) {
if (loopHeaders_[i].pc == pc) {
MBasicBlock* oldEntry = loopHeaders_[i].header;
// If this block has been discarded, its resume points will have
// already discarded their operands.
if (oldEntry->isDead()) {
loopHeaders_[i].header = entry;
foundEntry = true;
break;
}
MResumePoint* oldEntryRp = oldEntry->entryResumePoint();
size_t stackDepth = oldEntryRp->stackDepth();
for (size_t slot = 0; slot < stackDepth; slot++) {
MDefinition* oldDef = oldEntryRp->getOperand(slot);
if (!oldDef->isPhi()) {
MOZ_ASSERT(oldDef->block()->id() < oldEntry->id());
MOZ_ASSERT(oldDef == entry->getSlot(slot));
continue;
}
MPhi* oldPhi = oldDef->toPhi();
MPhi* newPhi = entry->getSlot(slot)->toPhi();
if (!newPhi->addBackedgeType(alloc(), oldPhi->type(),
oldPhi->resultTypeSet())) {
return abort(AbortReason::Alloc);
}
}
// Update the most recent header for this loop encountered, in case
// new types flow to the phis and the loop is processed at least
// three times.
loopHeaders_[i].header = entry;
return Ok();
}
}
if (!foundEntry) {
if (!loopHeaders_.append(LoopHeader(pc, entry))) {
return abort(AbortReason::Alloc);
}
}
// Get the start and end bytecode locations.
BytecodeLocation start(script_, pc);
BytecodeLocation end(script_, script_->codeEnd());
// Iterate the bytecode quickly to seed possible types in the loopheader.
Maybe<BytecodeLocation> last;
Maybe<BytecodeLocation> earlier;
for (auto it : BytecodeLocationRange(start, end)) {
if (IsBackedgeForLoopHead(it.toRawBytecode(), pc)) {
break;
}
MOZ_TRY(analyzeNewLoopTypesForLocation(entry, it, last, earlier));
earlier = last;
last = mozilla::Some(it);
}
return Ok();
}
AbortReasonOr<Ok> IonBuilder::analyzeNewLoopTypesForLocation(
MBasicBlock* entry, const BytecodeLocation loc,
const Maybe<BytecodeLocation>& last_,
const Maybe<BytecodeLocation>& earlier) {
// Unfortunately Maybe<> cannot be passed as by-value argument so make a copy
// here.
Maybe<BytecodeLocation> last = last_;
// We're only interested in JSOP_SETLOCAL and JSOP_SETARG.
uint32_t slot;
if (loc.is(JSOP_SETLOCAL)) {
slot = info().localSlot(loc.local());
} else if (loc.is(JSOP_SETARG)) {
slot = info().argSlotUnchecked(loc.arg());
} else {
return Ok();
}
if (slot >= info().firstStackSlot()) {
return Ok();
}
// Ensure there is a |last| instruction.
if (!last) {
return Ok();
}
MOZ_ASSERT(last->isValid(script_));
// Analyze the |last| bytecode instruction to try to dermine the type of this
// local/argument.
MPhi* phi = entry->getSlot(slot)->toPhi();
auto addPhiBackedgeType =
[&](MIRType type, TemporaryTypeSet* typeSet) -> AbortReasonOr<Ok> {
if (!phi->addBackedgeType(alloc(), type, typeSet)) {
return abort(AbortReason::Alloc);
}
return Ok();
};
// If it's a JSOP_POS or JSOP_TONUMERIC, use its operand instead.
if (last->is(JSOP_POS) || last->is(JSOP_TONUMERIC)) {
MOZ_ASSERT(earlier);
last = earlier;
}
// If the |last| op had a TypeSet, use it.
if (last->opHasTypeSet()) {
TemporaryTypeSet* typeSet = bytecodeTypes(last->toRawBytecode());
if (typeSet->empty()) {
return Ok();
}
return addPhiBackedgeType(typeSet->getKnownMIRType(), typeSet);
}
// If the |last| op was a JSOP_GETLOCAL or JSOP_GETARG, use that slot's type.
if (last->is(JSOP_GETLOCAL) || last->is(JSOP_GETARG)) {
uint32_t slot = (last->is(JSOP_GETLOCAL))
? info().localSlot(last->local())
: info().argSlotUnchecked(last->arg());
if (slot >= info().firstStackSlot()) {
return Ok();
}
MPhi* otherPhi = entry->getSlot(slot)->toPhi();
if (!otherPhi->hasBackedgeType()) {
return Ok();
}
return addPhiBackedgeType(otherPhi->type(), otherPhi->resultTypeSet());
}
// If the |last| op has a known type (determined statically or from
// BaselineInspector), use that.
MIRType type = MIRType::None;
switch (last->getOp()) {
case JSOP_VOID:
case JSOP_UNDEFINED:
type = MIRType::Undefined;
break;
case JSOP_GIMPLICITTHIS:
if (!script()->hasNonSyntacticScope()) {
type = MIRType::Undefined;
}
break;
case JSOP_NULL:
type = MIRType::Null;
break;
case JSOP_ZERO:
case JSOP_ONE:
case JSOP_INT8:
case JSOP_INT32:
case JSOP_UINT16:
case JSOP_UINT24:
case JSOP_RESUMEINDEX:
type = MIRType::Int32;
break;
case JSOP_BITAND:
case JSOP_BITOR:
case JSOP_BITXOR:
case JSOP_BITNOT:
case JSOP_RSH:
case JSOP_LSH:
type = inspector->expectedResultType(last->toRawBytecode());
break;
case JSOP_URSH:
// Unsigned right shift is not applicable to BigInts, so we don't need
// to query the baseline inspector for the possible result types.
type = MIRType::Int32;
break;
case JSOP_FALSE:
case JSOP_TRUE:
case JSOP_EQ:
case JSOP_NE:
case JSOP_LT:
case JSOP_LE:
case JSOP_GT:
case JSOP_GE:
case JSOP_NOT:
case JSOP_STRICTEQ:
case JSOP_STRICTNE:
case JSOP_IN:
case JSOP_INSTANCEOF:
case JSOP_HASOWN:
type = MIRType::Boolean;
break;
case JSOP_DOUBLE:
type = MIRType::Double;
break;
case JSOP_ITERNEXT:
case JSOP_STRING:
case JSOP_TOSTRING:
case JSOP_TYPEOF:
case JSOP_TYPEOFEXPR:
type = MIRType::String;
break;
case JSOP_SYMBOL:
type = MIRType::Symbol;
break;
case JSOP_ADD:
case JSOP_SUB:
case JSOP_MUL:
case JSOP_DIV:
case JSOP_MOD:
case JSOP_NEG:
case JSOP_INC:
case JSOP_DEC:
type = inspector->expectedResultType(last->toRawBytecode());
break;
case JSOP_BIGINT:
type = MIRType::BigInt;
break;
default:
break;
}
if (type == MIRType::None) {
return Ok();
}
return addPhiBackedgeType(type, /* typeSet = */ nullptr);
}
AbortReasonOr<Ok> IonBuilder::init() {
{
LifoAlloc::AutoFallibleScope fallibleAllocator(alloc().lifoAlloc());
if (!JitScript::FreezeTypeSets(constraints(), script(), &thisTypes,
&argTypes, &typeArray)) {
return abort(AbortReason::Alloc);
}
}
if (!alloc().ensureBallast()) {
return abort(AbortReason::Alloc);
}
{
JSContext* cx = TlsContext.get();
RootedScript rootedScript(cx, script());
if (!rootedScript->jitScript()->ensureHasCachedIonData(cx, rootedScript)) {
return abort(AbortReason::Error);
}
}
if (inlineCallInfo_) {
// If we're inlining, the actual this/argument types are not necessarily
// a subset of the script's observed types. |argTypes| is never accessed
// for inlined scripts, so we just null it.
thisTypes = inlineCallInfo_->thisArg()->resultTypeSet();
argTypes = nullptr;
}
bytecodeTypeMap = script()->jitScript()->bytecodeTypeMap();
return Ok();
}
AbortReasonOr<Ok> IonBuilder::build() {
// Spew IC info for inlined script, but only when actually compiling,
// not when analyzing it.
#ifdef JS_STRUCTURED_SPEW
if (!info().isAnalysis()) {
JitSpewBaselineICStats(script(), "To-Be-Compiled");
}
#endif
MOZ_TRY(init());
// The JitScript-based inlining heuristics only affect the highest
// optimization level. Other levels do almost no inlining and we don't want to
// overwrite data from the highest optimization tier.
if (isHighestOptimizationLevel()) {
script()->jitScript()->resetMaxInliningDepth();
}
MBasicBlock* entry;
MOZ_TRY_VAR(entry, newBlock(info().firstStackSlot(), pc));
MOZ_TRY(setCurrentAndSpecializePhis(entry));
#ifdef JS_JITSPEW
if (info().isAnalysis()) {
JitSpew(JitSpew_IonScripts, "Analyzing script %s:%u:%u (%p) %s",
script()->filename(), script()->lineno(), script()->column(),
(void*)script(), AnalysisModeString(info().analysisMode()));
} else {
JitSpew(JitSpew_IonScripts,
"%sompiling script %s:%u:%u (%p) (warmup-counter=%" PRIu32
", level=%s)",
(script()->hasIonScript() ? "Rec" : "C"), script()->filename(),
script()->lineno(), script()->column(), (void*)script(),
script()->getWarmUpCount(),
OptimizationLevelString(optimizationLevel()));
}
#endif
MOZ_TRY(initParameters());
initLocals();
// Initialize something for the env chain. We can bail out before the
// start instruction, but the snapshot is encoded *at* the start
// instruction, which means generating any code that could load into
// registers is illegal.
MInstruction* env = MConstant::New(alloc(), UndefinedValue());
current->add(env);
current->initSlot(info().environmentChainSlot(), env);
// Initialize the return value.
MInstruction* returnValue = MConstant::New(alloc(), UndefinedValue());
current->add(returnValue);
current->initSlot(info().returnValueSlot(), returnValue);
// Initialize the arguments object slot to undefined if necessary.
if (info().hasArguments()) {
MInstruction* argsObj = MConstant::New(alloc(), UndefinedValue());
current->add(argsObj);
current->initSlot(info().argsObjSlot(), argsObj);
}
// Emit the start instruction, so we can begin real instructions.
current->add(MStart::New(alloc()));
// Guard against over-recursion. Do this before we start unboxing, since
// this will create an OSI point that will read the incoming argument
// values, which is nice to do before their last real use, to minimize
// register/stack pressure.
MCheckOverRecursed* check = MCheckOverRecursed::New(alloc());
current->add(check);
MResumePoint* entryRpCopy =
MResumePoint::Copy(alloc(), current->entryResumePoint());
if (!entryRpCopy) {
return abort(AbortReason::Alloc);
}
check->setResumePoint(entryRpCopy);
// Parameters have been checked to correspond to the typeset, now we unbox
// what we can in an infallible manner.
MOZ_TRY(rewriteParameters());
// Check for redeclaration errors for global scripts.
if (!info().funMaybeLazy() && !info().module() &&
script()->bodyScope()->is<GlobalScope>() &&
script()->bodyScope()->as<GlobalScope>().hasBindings()) {
MGlobalNameConflictsCheck* redeclCheck =
MGlobalNameConflictsCheck::New(alloc());
current->add(redeclCheck);
MResumePoint* entryRpCopy =
MResumePoint::Copy(alloc(), current->entryResumePoint());
if (!entryRpCopy) {
return abort(AbortReason::Alloc);
}
redeclCheck->setResumePoint(entryRpCopy);
}
// It's safe to start emitting actual IR, so now build the env chain.
MOZ_TRY(initEnvironmentChain());
if (info().needsArgsObj()) {
initArgumentsObject();
}
// The type analysis phase attempts to insert unbox operations near
// definitions of values. It also attempts to replace uses in resume points
// with the narrower, unboxed variants. However, we must prevent this
// replacement from happening on values in the entry snapshot. Otherwise we
// could get this:
//
// v0 = MParameter(0)
// v1 = MParameter(1)
// -- ResumePoint(v2, v3)
// v2 = Unbox(v0, INT32)
// v3 = Unbox(v1, INT32)
//
// So we attach the initial resume point to each parameter, which the type
// analysis explicitly checks (this is the same mechanism used for
// effectful operations).
for (uint32_t i = 0; i < info().endArgSlot(); i++) {
MInstruction* ins = current->getEntrySlot(i)->toInstruction();
if (ins->type() != MIRType::Value) {
continue;
}
MResumePoint* entryRpCopy =
MResumePoint::Copy(alloc(), current->entryResumePoint());
if (!entryRpCopy) {
return abort(AbortReason::Alloc);
}
ins->setResumePoint(entryRpCopy);
}
#ifdef DEBUG
// lazyArguments should never be accessed in |argsObjAliasesFormals| scripts.
if (info().hasArguments() && !info().argsObjAliasesFormals()) {
hasLazyArguments_ = true;
}
#endif
insertRecompileCheck(pc);
auto clearLastPriorResumePoint = mozilla::MakeScopeExit([&] {
// Discard unreferenced & pre-allocated resume points.
replaceMaybeFallbackFunctionGetter(nullptr);
});
MOZ_TRY(traverseBytecode());
if (isHighestOptimizationLevel() &&
inlinedBytecodeLength_ > script_->jitScript()->inlinedBytecodeLength()) {
script_->jitScript()->setInlinedBytecodeLength(inlinedBytecodeLength_);
}
MOZ_TRY(maybeAddOsrTypeBarriers());
MOZ_TRY(processIterators());
if (!info().isAnalysis() && !abortedPreliminaryGroups().empty()) {
return abort(AbortReason::PreliminaryObjects);
}
MOZ_ASSERT(loopDepth_ == 0);
MOZ_ASSERT(loopStack_.empty());
return Ok();
}
AbortReasonOr<Ok> IonBuilder::processIterators() {
// Find and mark phis that must transitively hold an iterator live.
Vector<MDefinition*, 8, SystemAllocPolicy> worklist;
for (size_t i = 0; i < iterators_.length(); i++) {
MDefinition* iter = iterators_[i];
if (!iter->isInWorklist()) {
if (!worklist.append(iter)) {
return abort(AbortReason::Alloc);
}
iter->setInWorklist();
}
}
while (!worklist.empty()) {
MDefinition* def = worklist.popCopy();
def->setNotInWorklist();
if (def->isPhi()) {
MPhi* phi = def->toPhi();
phi->setIterator();
phi->setImplicitlyUsedUnchecked();
}
for (MUseDefIterator iter(def); iter; iter++) {
MDefinition* use = iter.def();
if (!use->isInWorklist() &&
(!use->isPhi() || !use->toPhi()->isIterator())) {
if (!worklist.append(use)) {
return abort(AbortReason::Alloc);
}
use->setInWorklist();
}
}
}
return Ok();
}
AbortReasonOr<Ok> IonBuilder::buildInline(IonBuilder* callerBuilder,
MResumePoint* callerResumePoint,
CallInfo& callInfo) {
inlineCallInfo_ = &callInfo;
// Spew IC info for inlined script, but only when actually compiling,
// not when analyzing it.
#ifdef JS_STRUCTURED_SPEW
if (!info().isAnalysis()) {
JitSpewBaselineICStats(script(), "To-Be-Inlined");
}
#endif
MOZ_TRY(init());
JitSpew(JitSpew_IonScripts, "Inlining script %s:%u:%u (%p)",
script()->filename(), script()->lineno(), script()->column(),
(void*)script());
callerBuilder_ = callerBuilder;
callerResumePoint_ = callerResumePoint;
if (callerBuilder->failedBoundsCheck_) {
failedBoundsCheck_ = true;
}
if (callerBuilder->failedShapeGuard_) {
failedShapeGuard_ = true;
}
if (callerBuilder->failedLexicalCheck_) {
failedLexicalCheck_ = true;
}
safeForMinorGC_ = callerBuilder->safeForMinorGC_;
// Generate single entrance block.
MBasicBlock* entry;
MOZ_TRY_VAR(entry, newBlock(info().firstStackSlot(), pc));
MOZ_TRY(setCurrentAndSpecializePhis(entry));
current->setCallerResumePoint(callerResumePoint);
// Connect the entrance block to the last block in the caller's graph.
MBasicBlock* predecessor = callerBuilder->current;
MOZ_ASSERT(predecessor == callerResumePoint->block());
predecessor->end(MGoto::New(alloc(), current));
if (!current->addPredecessorWithoutPhis(predecessor)) {
return abort(AbortReason::Alloc);
}
// Initialize env chain slot to Undefined. It's set later by
// |initEnvironmentChain|.
MInstruction* env = MConstant::New(alloc(), UndefinedValue());
current->add(env);
current->initSlot(info().environmentChainSlot(), env);
// Initialize |return value| slot.
MInstruction* returnValue = MConstant::New(alloc(), UndefinedValue());
current->add(returnValue);
current->initSlot(info().returnValueSlot(), returnValue);
// Initialize |arguments| slot.
if (info().hasArguments()) {
MInstruction* argsObj = MConstant::New(alloc(), UndefinedValue());
current->add(argsObj);
current->initSlot(info().argsObjSlot(), argsObj);
}
// Initialize |this| slot.
current->initSlot(info().thisSlot(), callInfo.thisArg());
JitSpew(JitSpew_Inlining, "Initializing %u arg slots", info().nargs());
// NB: Ion does not inline functions which |needsArgsObj|. So using argSlot()
// instead of argSlotUnchecked() below is OK
MOZ_ASSERT(!info().needsArgsObj());
// Initialize actually set arguments.
uint32_t existing_args = std::min<uint32_t>(callInfo.argc(), info().nargs());
for (size_t i = 0; i < existing_args; ++i) {
MDefinition* arg = callInfo.getArg(i);
current->initSlot(info().argSlot(i), arg);
}
// Pass Undefined for missing arguments
for (size_t i = callInfo.argc(); i < info().nargs(); ++i) {
MConstant* arg = MConstant::New(alloc(), UndefinedValue());
current->add(arg);
current->initSlot(info().argSlot(i), arg);
}
JitSpew(JitSpew_Inlining, "Initializing %u locals", info().nlocals());
initLocals();
JitSpew(JitSpew_Inlining,
"Inline entry block MResumePoint %p, %u stack slots",
(void*)current->entryResumePoint(),
current->entryResumePoint()->stackDepth());
// +2 for the env chain and |this|, maybe another +1 for arguments object
// slot.
MOZ_ASSERT(current->entryResumePoint()->stackDepth() == info().totalSlots());
#ifdef DEBUG
if (script_->argumentsHasVarBinding()) {
hasLazyArguments_ = true;
}
#endif
insertRecompileCheck(pc);
// Insert an interrupt check when recording or replaying, which will bump
// the record/replay system's progress counter.
if (script()->trackRecordReplayProgress()) {
MInterruptCheck* check = MInterruptCheck::New(alloc());
check->setTrackRecordReplayProgress();
current->add(check);
}
// Initialize the env chain now that all resume points operands are
// initialized.
MOZ_TRY(initEnvironmentChain(callInfo.fun()));
auto clearLastPriorResumePoint = mozilla::MakeScopeExit([&] {
// Discard unreferenced & pre-allocated resume points.
replaceMaybeFallbackFunctionGetter(nullptr);
});
MOZ_TRY(traverseBytecode());
MOZ_ASSERT(iterators_.empty(), "Iterators should be added to outer builder");
if (!info().isAnalysis() && !abortedPreliminaryGroups().empty()) {
return abort(AbortReason::PreliminaryObjects);
}
return Ok();
}
void IonBuilder::runTask() {
// This is the entry point when ion compiles are run offthread.
TraceLoggerThread* logger = TraceLoggerForCurrentThread();
TraceLoggerEvent event(TraceLogger_AnnotateScripts, script());
AutoTraceLog logScript(logger, event);
AutoTraceLog logCompile(logger, TraceLogger_IonCompilation);
jit::JitContext jctx(realm->runtime(), realm, &alloc());
setBackgroundCodegen(jit::CompileBackEnd(this));
}
void IonBuilder::rewriteParameter(uint32_t slotIdx, MDefinition* param) {
MOZ_ASSERT(param->isParameter() || param->isGetArgumentsObjectArg());
TemporaryTypeSet* types = param->resultTypeSet();
MDefinition* actual = ensureDefiniteType(param, types->getKnownMIRType());
if (actual == param) {
return;
}
// Careful! We leave the original MParameter in the entry resume point. The
// arguments still need to be checked unless proven otherwise at the call
// site, and these checks can bailout. We can end up:
// v0 = Parameter(0)
// v1 = Unbox(v0, INT32)
// -- ResumePoint(v0)
//
// As usual, it would be invalid for v1 to be captured in the initial
// resume point, rather than v0.
current->rewriteSlot(slotIdx, actual);
}
// Apply Type Inference information to parameters early on, unboxing them if
// they have a definitive type. The actual guards will be emitted by the code
// generator, explicitly, as part of the function prologue.
AbortReasonOr<Ok> IonBuilder::rewriteParameters() {
MOZ_ASSERT(info().environmentChainSlot() == 0);
// If this JSScript is not the code of a function, then skip the
// initialization of function parameters.
if (!info().funMaybeLazy()) {
return Ok();
}
for (uint32_t i = info().startArgSlot(); i < info().endArgSlot(); i++) {
if (!alloc().ensureBallast()) {
return abort(AbortReason::Alloc);
}
MDefinition* param = current->getSlot(i);
rewriteParameter(i, param);
}
return Ok();
}
AbortReasonOr<Ok> IonBuilder::initParameters() {
// If this JSScript is not the code of a function, then skip the
// initialization of function parameters.
if (!info().funMaybeLazy()) {
return Ok();
}
// If we are doing OSR on a frame which initially executed in the
// interpreter and didn't accumulate type information, try to use that OSR
// frame to determine possible initial types for 'this' and parameters.
if (thisTypes->empty() && baselineFrame_) {
TypeSet::Type type = baselineFrame_->thisType;
if (type.isSingletonUnchecked()) {
checkNurseryObject(type.singleton());
}
thisTypes->addType(type, alloc_->lifoAlloc());
}
MParameter* param =
MParameter::New(alloc(), MParameter::THIS_SLOT, thisTypes);
current->add(param);
current->initSlot(info().thisSlot(), param);
for (uint32_t i = 0; i < info().nargs(); i++) {
TemporaryTypeSet* types = &argTypes[i];
if (types->empty() && baselineFrame_ &&
!script_->jitScript()->modifiesArguments()) {
TypeSet::Type type = baselineFrame_->argTypes[i];
if (type.isSingletonUnchecked()) {
checkNurseryObject(type.singleton());
}
types->addType(type, alloc_->lifoAlloc());
}
param = MParameter::New(alloc().fallible(), i, types);
if (!param) {
return abort(AbortReason::Alloc);
}
current->add(param);
current->initSlot(info().argSlotUnchecked(i), param);
}
return Ok();
}
void IonBuilder::initLocals() {
// Initialize all frame slots to undefined. Lexical bindings are temporal
// dead zoned in bytecode.
if (info().nlocals() == 0) {
return;
}
MConstant* undef = MConstant::New(alloc(), UndefinedValue());
current->add(undef);
for (uint32_t i = 0; i < info().nlocals(); i++) {
current->initSlot(info().localSlot(i), undef);
}
}
bool IonBuilder::usesEnvironmentChain() {
return script()->jitScript()->usesEnvironmentChain();
}
AbortReasonOr<Ok> IonBuilder::initEnvironmentChain(MDefinition* callee) {
MInstruction* env = nullptr;
// If the script doesn't use the envchain, then it's already initialized
// from earlier. However, always make a env chain when |needsArgsObj| is true
// for the script, since arguments object construction requires the env chain
// to be passed in.
if (!info().needsArgsObj() && !usesEnvironmentChain()) {
return Ok();
}
// The env chain is only tracked in scripts that have NAME opcodes which
// will try to access the env. For other scripts, the env instructions
// will be held live by resume points and code will still be generated for
// them, so just use a constant undefined value.
if (JSFunction* fun = info().funMaybeLazy()) {
if (!callee) {
MCallee* calleeIns = MCallee::New(alloc());
current->add(calleeIns);
callee = calleeIns;
}
env = MFunctionEnvironment::New(alloc(), callee);
current->add(env);
// This reproduce what is done in CallObject::createForFunction. Skip
// this for the arguments analysis, as the script might not have a
// baseline script with template objects yet.
if (fun->needsSomeEnvironmentObject() &&
info().analysisMode() != Analysis_ArgumentsUsage) {
if (fun->needsNamedLambdaEnvironment()) {
env = createNamedLambdaObject(callee, env);
}
// TODO: Parameter expression-induced extra var environment not
// yet handled.
if (fun->needsExtraBodyVarEnvironment()) {
return abort(AbortReason::Disable, "Extra var environment unsupported");
}
if (fun->needsCallObject()) {
MOZ_TRY_VAR(env, createCallObject(callee, env));
}
}
} else if (ModuleObject* module = info().module()) {
// Modules use a pre-created env object.
env = constant(ObjectValue(module->initialEnvironment()));
} else {
// For global scripts without a non-syntactic global scope, the env
// chain is the global lexical env.
MOZ_ASSERT(!script()->isForEval());
MOZ_ASSERT(!script()->hasNonSyntacticScope());
env = constant(ObjectValue(script()->global().lexicalEnvironment()));
}
// Update the environment slot from UndefinedValue only after initial
// environment is created so that bailout doesn't see a partial env.
// See: |InitFromBailout|
current->setEnvironmentChain(env);
return Ok();
}
void IonBuilder::initArgumentsObject() {
JitSpew(JitSpew_IonMIR,
"%s:%u:%u - Emitting code to initialize arguments object! block=%p",
script()->filename(), script()->lineno(), script()->column(),
current);
MOZ_ASSERT(info().needsArgsObj());
bool mapped = script()->hasMappedArgsObj();
ArgumentsObject* templateObj =
script()->realm()->maybeArgumentsTemplateObject(mapped);
MCreateArgumentsObject* argsObj = MCreateArgumentsObject::New(
alloc(), current->environmentChain(), templateObj);
current->add(argsObj);
current->setArgumentsObject(argsObj);
}
AbortReasonOr<Ok> IonBuilder::addOsrValueTypeBarrier(
uint32_t slot, MInstruction** def_, MIRType type,
TemporaryTypeSet* typeSet) {
MInstruction*& def = *def_;
MBasicBlock* osrBlock = def->block();
// Clear bogus type information added in newOsrPreheader().
def->setResultType(MIRType::Value);
def->setResultTypeSet(nullptr);
if (typeSet && !typeSet->unknown()) {
MInstruction* barrier = MTypeBarrier::New(alloc(), def, typeSet);
osrBlock->insertBefore(osrBlock->lastIns(), barrier);
osrBlock->rewriteSlot(slot, barrier);
def = barrier;
// If the TypeSet is more precise than |type|, adjust |type| for the
// code below.
if (type == MIRType::Value) {
type = barrier->type();
}
} else if (type == MIRType::Null || type == MIRType::Undefined ||
type == MIRType::MagicOptimizedArguments) {
// No unbox instruction will be added below, so check the type by
// adding a type barrier for a singleton type set.
TypeSet::Type ntype = TypeSet::PrimitiveType(type);
LifoAlloc* lifoAlloc = alloc().lifoAlloc();
typeSet = lifoAlloc->new_<TemporaryTypeSet>(lifoAlloc, ntype);
if (!typeSet) {
return abort(AbortReason::Alloc);
}
MInstruction* barrier = MTypeBarrier::New(alloc(), def, typeSet);
osrBlock->insertBefore(osrBlock->lastIns(), barrier);
osrBlock->rewriteSlot(slot, barrier);
def = barrier;
}
// The following guards aren't directly linked into the usedef chain,
// however in the OSR block we need to ensure they're not optimized out, so we
// mark them as implicitly used.
switch (type) {
case MIRType::Null:
case MIRType::Undefined:
case MIRType::MagicOptimizedArguments:
def->setImplicitlyUsed();
break;
default:
break;
}
// Unbox the OSR value to the type expected by the loop header.
//
// The only specialized types that can show up here are MIRTypes with a
// corresponding TypeSet::Type because NewBaselineFrameInspector and
// newPendingLoopHeader use TypeSet::Type for Values from the BaselineFrame.
// This means magic values other than MagicOptimizedArguments are represented
// as UnknownType() and MIRType::Value. See also TypeSet::IsUntrackedValue.
switch (type) {
case MIRType::Boolean:
case MIRType::Int32:
case MIRType::Double:
case MIRType::String:
case MIRType::Symbol:
case MIRType::BigInt:
case MIRType::Object:
if (type != def->type()) {
MUnbox* unbox = MUnbox::New(alloc(), def, type, MUnbox::Fallible);
osrBlock->insertBefore(osrBlock->lastIns(), unbox);
osrBlock->rewriteSlot(slot, unbox);
def = unbox;
}
break;
case MIRType::Value:
// Nothing to do.
break;
case MIRType::Null: {
MConstant* c = MConstant::New(alloc(), NullValue());
osrBlock->insertBefore(osrBlock->lastIns(), c);
osrBlock->rewriteSlot(slot, c);
def = c;
break;
}
case MIRType::Undefined: {
MConstant* c = MConstant::New(alloc(), UndefinedValue());
osrBlock->insertBefore(osrBlock->lastIns(), c);
osrBlock->rewriteSlot(slot, c);
def = c;
break;
}
case MIRType::MagicOptimizedArguments: {
MOZ_ASSERT(hasLazyArguments_);
MConstant* lazyArg =
MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS));
osrBlock->insertBefore(osrBlock->lastIns(), lazyArg);
osrBlock->rewriteSlot(slot, lazyArg);
def = lazyArg;
break;
}
default:
MOZ_CRASH("Unexpected type");
}
MOZ_ASSERT(def == osrBlock->getSlot(slot));
return Ok();
}
AbortReasonOr<Ok> IonBuilder::maybeAddOsrTypeBarriers() {
if (!info().osrPc()) {
return Ok();
}
// The loop has successfully been processed, and the loop header phis
// have their final type. Add unboxes and type barriers in the OSR
// block to check that the values have the appropriate type, and update
// the types in the preheader.
MBasicBlock* osrBlock = graph().osrBlock();
if (!osrBlock) {
// Because IonBuilder does not compile catch blocks, it's possible to
// end up without an OSR block if the OSR pc is only reachable via a
// break-statement inside the catch block. For instance:
//
// for (;;) {
// try {
// throw 3;
// } catch(e) {
// break;
// }
// }
// while (..) { } // <= OSR here, only reachable via catch block.
//
// For now we just abort in this case.
MOZ_ASSERT(graph().hasTryBlock());
return abort(AbortReason::Disable,
"OSR block only reachable through catch block");
}
MBasicBlock* preheader = osrBlock->getSuccessor(0);
MBasicBlock* header = preheader->getSuccessor(0);
static const size_t OSR_PHI_POSITION = 1;
MOZ_ASSERT(preheader->getPredecessor(OSR_PHI_POSITION) == osrBlock);
MResumePoint* headerRp = header->entryResumePoint();
size_t stackDepth = headerRp->stackDepth();
MOZ_ASSERT(stackDepth == osrBlock->stackDepth());
for (uint32_t slot = info().startArgSlot(); slot < stackDepth; slot++) {
// Aliased slots are never accessed, since they need to go through
// the callobject. The typebarriers are added there and can be
// discarded here.
if (info().isSlotAliased(slot)) {
continue;
}
if (!alloc().ensureBallast()) {
return abort(AbortReason::Alloc);
}
MInstruction* def = osrBlock->getSlot(slot)->toInstruction();
MPhi* preheaderPhi = preheader->getSlot(slot)->toPhi();
MPhi* headerPhi = headerRp->getOperand(slot)->toPhi();
MIRType type = headerPhi->type();
TemporaryTypeSet* typeSet = headerPhi->resultTypeSet();
MOZ_TRY(addOsrValueTypeBarrier(slot, &def, type, typeSet));
preheaderPhi->replaceOperand(OSR_PHI_POSITION, def);
preheaderPhi->setResultType(type);
preheaderPhi->setResultTypeSet(typeSet);
}
return Ok();
}
#ifdef DEBUG
// In debug builds, after compiling a bytecode op, this class is used to check
// that all values popped by this opcode either:
//
// (1) Have the ImplicitlyUsed flag set on them.
// (2) Have more uses than before compiling this op (the value is
// used as operand of a new MIR instruction).
//
// This is used to catch problems where IonBuilder pops a value without
// adding any SSA uses and doesn't call setImplicitlyUsedUnchecked on it.
class MOZ_RAII PoppedValueUseChecker {
Vector<MDefinition*, 4, SystemAllocPolicy> popped_;
Vector<size_t, 4, SystemAllocPolicy> poppedUses_;
MBasicBlock* current_;
jsbytecode* pc_;
public:
PoppedValueUseChecker(MBasicBlock* current, jsbytecode* pc)
: current_(current), pc_(pc) {}
MOZ_MUST_USE bool init() {
unsigned nuses = GetUseCount(pc_);
for (unsigned i = 0; i < nuses; i++) {
MDefinition* def = current_->peek(-int32_t(i + 1));
if (!popped_.append(def) || !poppedUses_.append(def->defUseCount())) {
return false;
}
}
return true;
}
void checkAfterOp() {
JSOp op = JSOp(*pc_);
// Don't require SSA uses for values popped by these ops.
switch (op) {
case JSOP_POP:
case JSOP_POPN:
case JSOP_DUPAT:
case JSOP_DUP:
case JSOP_DUP2:
case JSOP_PICK:
case JSOP_UNPICK:
case JSOP_SWAP:
case JSOP_SETARG:
case JSOP_SETLOCAL:
case JSOP_INITLEXICAL:
case JSOP_SETRVAL:
case JSOP_VOID:
// Basic stack/local/argument management opcodes.
return;
case JSOP_CASE:
case JSOP_DEFAULT:
// These ops have to pop the switch value when branching but don't
// actually use it.
return;
default:
break;
}
for (size_t i = 0; i < popped_.length(); i++) {
switch (op) {
case JSOP_POS:
case JSOP_TONUMERIC:
case JSOP_TOID:
case JSOP_TOSTRING:
// These ops may leave their input on the stack without setting
// the ImplicitlyUsed flag. If this value will be popped immediately,
// we may replace it with |undefined|, but the difference is
// not observable.
MOZ_ASSERT(i == 0);
if (current_->peek(-1) == popped_[0]) {
break;
}
MOZ_FALLTHROUGH;
default:
MOZ_ASSERT(popped_[i]->isImplicitlyUsed() ||
// First value popped by JSOP_ENDITER is not used at all,
// it's similar to JSOP_POP above.
(op == JSOP_ENDITER && i == 0) ||
// MNewDerivedTypedObject instances are
// often dead unless they escape from the
// fn. See IonBuilder::loadTypedObjectData()
// for more details.
popped_[i]->isNewDerivedTypedObject() ||
popped_[i]->defUseCount() > poppedUses_[i]);
break;
}
}
}
};
#endif
AbortReasonOr<Ok> IonBuilder::traverseBytecode() {
// See the "Control Flow handling in IonBuilder" comment in IonBuilder.h for
// more information.
// IonBuilder's destructor is not called, so make sure pendingEdges_ and
// GSNCache are not holding onto malloc memory when we return.
pendingEdges_.emplace();
auto freeMemory = mozilla::MakeScopeExit([&] {
pendingEdges_.reset();
gsn.purge();
});
MOZ_TRY(startTraversingBlock(current));
const jsbytecode* const codeEnd = script()->codeEnd();
while (true) {
if (!alloc().ensureBallast()) {
return abort(AbortReason::Alloc);
}
// Skip unreachable ops (for example code after a 'return' or 'throw') until
// we get to the next jump target.
if (hasTerminatedBlock()) {
while (!BytecodeIsJumpTarget(JSOp(*pc))) {
// Finish any "broken" loops with an unreachable backedge. For example:
//
// do {
// ...
// return;
// ...
// } while (x);
//
// This loop never actually loops.
if (!loopStack_.empty() &&
IsBackedgeForLoopHead(pc, loopStack_.back().header()->pc())) {
MOZ_ASSERT(loopDepth_ > 0);
loopDepth_--;
loopStack_.popBack();
}
pc = GetNextPc(pc);
if (pc == codeEnd) {
return Ok();
}
}
}
#ifdef DEBUG
PoppedValueUseChecker useChecker(current, pc);
if (!useChecker.init()) {
return abort(AbortReason::Alloc);
}
#endif
MOZ_ASSERT(script()->containsPC(pc));
nextpc = GetNextPc(pc);
// Nothing in inspectOpcode() is allowed to advance the pc.
JSOp op = JSOp(*pc);
bool restarted = false;
MOZ_TRY(inspectOpcode(op, &restarted));
#ifdef DEBUG
if (!restarted) {
useChecker.checkAfterOp();
}
#endif
if (nextpc == codeEnd) {
return Ok();
}
pc = nextpc;
MOZ_ASSERT(script()->containsPC(pc));
if (!hasTerminatedBlock()) {
current->updateTrackedSite(bytecodeSite(pc));
}
}
// The iloop above never breaks, so this point is unreachable. Don't add code
// here, or you'll trigger compile errors about unreachable code with some
// compilers!
}
AbortReasonOr<Ok> IonBuilder::startTraversingBlock(MBasicBlock* block) {
block->setLoopDepth(loopDepth_);
if (block->pc() && script()->hasScriptCounts()) {
block->setHitCount(script()->getHitCount(block->pc()));
}
// Optimization to move a predecessor that only has this block as successor
// just before this block. Skip this optimization if the previous block is
// not part of the same function, as we might have to backtrack on inlining
// failures.
if (block->numPredecessors() == 1 &&
block->getPredecessor(0)->numSuccessors() == 1 &&
!block->getPredecessor(0)->outerResumePoint()) {
graph().removeBlockFromList(block->getPredecessor(0));
graph().addBlock(block->getPredecessor(0));
}
MOZ_TRY(setCurrentAndSpecializePhis(block));
graph().addBlock(block);
return Ok();
}
AbortReasonOr<Ok> IonBuilder::jsop_goto(bool* restarted) {
MOZ_ASSERT(*pc == JSOP_GOTO);
if (IsBackedgePC(pc)) {
return visitBackEdge(restarted);
}
jsbytecode* target = pc + GET_JUMP_OFFSET(pc);
return visitGoto(target);
}
AbortReasonOr<Ok> IonBuilder::addPendingEdge(const PendingEdge& edge,
jsbytecode* target) {
PendingEdgesMap::AddPtr p = pendingEdges_->lookupForAdd(target);
if (p) {
if (!p->value().append(edge)) {
return abort(AbortReason::Alloc);
}
return Ok();
}
PendingEdges edges;
static_assert(PendingEdges::InlineLength >= 1,
"Appending one element should be infallible");
MOZ_ALWAYS_TRUE(edges.append(edge));
if (!pendingEdges_->add(p, target, std::move(edges))) {
return abort(AbortReason::Alloc);
}
return Ok();
}
AbortReasonOr<Ok> IonBuilder::visitGoto(jsbytecode* target) {
current->end(MGoto::New(alloc(), nullptr));
MOZ_TRY(addPendingEdge(PendingEdge::NewGoto(current), target));
setTerminatedBlock();
return Ok();
}
AbortReasonOr<Ok> IonBuilder::jsop_loophead() {
// All loops have the following bytecode structure:
//
// LOOPHEAD
// ...
// IFNE/IFEQ/GOTO to LOOPHEAD
MOZ_ASSERT(*pc == JSOP_LOOPHEAD);
if (hasTerminatedBlock()) {
// The whole loop is unreachable.
return Ok();
}
jssrcnote* sn = GetSrcNote(gsn, script(), pc);
MOZ_ASSERT(sn);
uint32_t stackPhiCount;
switch (SN_TYPE(sn)) {
case SRC_FOR_OF:
stackPhiCount = 3;
break;
case SRC_FOR_IN:
case SRC_FOR:
case SRC_WHILE:
case SRC_DO_WHILE:
stackPhiCount = 0;
break;
default:
MOZ_CRASH("Unexpected source note");
}
bool canOsr = LoopHeadCanIonOsr(pc);
bool osr = pc == info().osrPc();
if (osr) {
MOZ_ASSERT(canOsr);
MBasicBlock* preheader;
MOZ_TRY_VAR(preheader, newOsrPreheader(current, pc));
current->end(MGoto::New(alloc(), preheader));
MOZ_TRY(setCurrentAndSpecializePhis(preheader));
}
loopDepth_++;
MBasicBlock* header;
MOZ_TRY_VAR(header,
newPendingLoopHeader(current, pc, osr, canOsr, stackPhiCount));
current->end(MGoto::New(alloc(), header));
if (!loopStack_.emplaceBack(header)) {
return abort(AbortReason::Alloc);
}
MOZ_TRY(analyzeNewLoopTypes(header));
MOZ_TRY(startTraversingBlock(header));
return emitLoopHeadInstructions(pc);
}
AbortReasonOr<Ok> IonBuilder::visitBackEdge(bool* restarted) {
MOZ_ASSERT(loopDepth_ > 0);
loopDepth_--;
MBasicBlock* header = loopStack_.back().header();
current->end(MGoto::New(alloc(), header));
// Compute phis in the loop header and propagate them throughout the loop,
// including the successor.
AbortReason r = header->setBackedge(alloc(), current);
switch (r) {
case AbortReason::NoAbort:
loopStack_.popBack();
setTerminatedBlock();
return Ok();
case AbortReason::Disable:
// If there are types for variables on the backedge that were not
// present at the original loop header, then uses of the variables'
// phis may have generated incorrect nodes. The new types have been
// incorporated into the header phis, so remove all blocks for the
// loop body and restart with the new types.
*restarted = true;
MOZ_TRY(restartLoop(header));
return Ok();
default:
return abort(r);
}
}
AbortReasonOr<Ok> IonBuilder::emitLoopHeadInstructions(jsbytecode* pc) {
MOZ_ASSERT(JSOp(*pc) == JSOP_LOOPHEAD);
MInterruptCheck* check = MInterruptCheck::New(alloc());
current->add(check);
insertRecompileCheck(pc);
if (script()->trackRecordReplayProgress()) {
check->setTrackRecordReplayProgress();
// When recording/replaying, MInterruptCheck is effectful and should
// not reexecute after bailing out.
MOZ_TRY(resumeAfter(check));
}
return Ok();
}
AbortReasonOr<Ok> IonBuilder::inspectOpcode(JSOp op, bool* restarted) {
// Add not yet implemented opcodes at the bottom of the switch!
switch (op) {
case JSOP_NOP_DESTRUCTURING:
case JSOP_TRY_DESTRUCTURING:
case JSOP_LINENO:
case JSOP_NOP:
return Ok();
case JSOP_LOOPHEAD:
return jsop_loophead();
case JSOP_UNDEFINED:
// If this ever changes, change what JSOP_GIMPLICITTHIS does too.
pushConstant(UndefinedValue());
return Ok();
case JSOP_TRY:
return visitTry();
case JSOP_DEFAULT:
current->pop();
return visitGoto(pc + GET_JUMP_OFFSET(pc));
case JSOP_GOTO:
return jsop_goto(restarted);
case JSOP_IFNE:
case JSOP_IFEQ:
case JSOP_AND:
case JSOP_OR:
case JSOP_CASE:
return visitTest(op, restarted);
case JSOP_COALESCE:
return jsop_coalesce();
case JSOP_RETURN:
case JSOP_RETRVAL:
return visitReturn(op);
case JSOP_THROW:
return visitThrow();
case JSOP_JUMPTARGET:
return visitJumpTarget(op);
case JSOP_TABLESWITCH:
return visitTableSwitch();
case JSOP_BITNOT:
return jsop_bitnot();
case JSOP_BITAND:
case JSOP_BITOR:
case JSOP_BITXOR:
case JSOP_LSH:
case JSOP_RSH:
case JSOP_URSH:
return jsop_bitop(op);
case JSOP_ADD:
case JSOP_SUB:
case JSOP_MUL:
case JSOP_DIV:
case JSOP_MOD:
return jsop_binary_arith(op);
case JSOP_POW:
return jsop_pow();
case JSOP_POS:
return jsop_pos();
case JSOP_TONUMERIC:
return jsop_tonumeric();
case JSOP_NEG:
return jsop_neg();
case JSOP_INC:
case JSOP_DEC:
return jsop_inc_or_dec(op);
case JSOP_TOSTRING:
return jsop_tostring();
case JSOP_DEFVAR:
return jsop_defvar();
case JSOP_DEFLET:
case JSOP_DEFCONST:
return jsop_deflexical();
case JSOP_DEFFUN:
return jsop_deffun();
case JSOP_EQ:
case JSOP_NE:
case JSOP_STRICTEQ:
case JSOP_STRICTNE:
case JSOP_LT:
case JSOP_LE:
case JSOP_GT:
case JSOP_GE:
return jsop_compare(op);
case JSOP_DOUBLE:
pushConstant(GET_INLINE_VALUE(pc));
return Ok();
case JSOP_BIGINT:
pushConstant(BigIntValue(info().getBigInt(pc)));
return Ok();
case JSOP_STRING:
pushConstant(StringValue(info().getAtom(pc)));
return Ok();
case JSOP_SYMBOL: {
unsigned which = GET_UINT8(pc);
JS::Symbol* sym = realm->runtime()->wellKnownSymbols().get(which);
pushConstant(SymbolValue(sym));
return Ok();
}
case JSOP_ZERO:
pushConstant(Int32Value(0));
return Ok();
case JSOP_ONE:
pushConstant(Int32Value(1));
return Ok();
case JSOP_NULL:
pushConstant(NullValue());
return Ok();
case JSOP_VOID:
current->pop();
pushConstant(UndefinedValue());
return Ok();
case JSOP_HOLE:
pushConstant(MagicValue(JS_ELEMENTS_HOLE));
return Ok();
case JSOP_FALSE:
pushConstant(BooleanValue(false));
return Ok();
case JSOP_TRUE:
pushConstant(BooleanValue(true));
return Ok();
case JSOP_ARGUMENTS:
return jsop_arguments();
case JSOP_REST:
return jsop_rest();
case JSOP_GETARG:
if (info().argsObjAliasesFormals()) {
MGetArgumentsObjectArg* getArg = MGetArgumentsObjectArg::New(
alloc(), current->argumentsObject(), GET_ARGNO(pc));
current->add(getArg);
current->push(getArg);
} else {
current->pushArg(GET_ARGNO(pc));
}
return Ok();
case JSOP_SETARG:
return jsop_setarg(GET_ARGNO(pc));
case JSOP_GETLOCAL:
current->pushLocal(GET_LOCALNO(pc));
return Ok();
case JSOP_SETLOCAL:
current->setLocal(GET_LOCALNO(pc));
return Ok();
case JSOP_THROWSETCONST:
case JSOP_THROWSETALIASEDCONST:
case JSOP_THROWSETCALLEE:
return jsop_throwsetconst();
case JSOP_CHECKLEXICAL:
return jsop_checklexical();
case JSOP_INITLEXICAL:
current->setLocal(GET_LOCALNO(pc));
return Ok();
case JSOP_INITGLEXICAL: {
MOZ_ASSERT(!script()->hasNonSyntacticScope());
MDefinition* value = current->pop();
current->push(
constant(ObjectValue(script()->global().lexicalEnvironment())));
current->push(value);
return jsop_setprop(info().getAtom(pc)->asPropertyName());
}
case JSOP_CHECKALIASEDLEXICAL:
return jsop_checkaliasedlexical(EnvironmentCoordinate(pc));
case JSOP_INITALIASEDLEXICAL:
return jsop_setaliasedvar(EnvironmentCoordinate(pc));
case JSOP_UNINITIALIZED:
pushConstant(MagicValue(JS_UNINITIALIZED_LEXICAL));
return Ok();
case JSOP_POP: {
MDefinition* def = current->pop();
// POP opcodes frequently appear where values are killed, e.g. after
// SET* opcodes. Place a resume point afterwards to avoid capturing
// the dead value in later snapshots, except in places where that
// resume point is obviously unnecessary.
if (pc[JSOP_POP_LENGTH] == JSOP_POP) {
return Ok();
}
if (def->isConstant()) {
return Ok();
}
return maybeInsertResume();
}
case JSOP_POPN:
for (uint32_t i = 0, n = GET_UINT16(pc); i < n; i++) {
current->pop();
}
return Ok();
case JSOP_DUPAT:
current->pushSlot(current->stackDepth() - 1 - GET_UINT24(pc));
return Ok();
case JSOP_NEWARRAY:
return jsop_newarray(GET_UINT32(pc));
case JSOP_NEWARRAY_COPYONWRITE:
return jsop_newarray_copyonwrite();
case JSOP_NEWINIT:
case JSOP_NEWOBJECT:
case JSOP_NEWOBJECT_WITHGROUP:
return jsop_newobject();
case JSOP_INITELEM:
case JSOP_INITHIDDENELEM:
return jsop_initelem();
case JSOP_INITELEM_INC:
return jsop_initelem_inc();
case JSOP_INITELEM_ARRAY:
return jsop_initelem_array();
case JSOP_INITPROP:
case JSOP_INITLOCKEDPROP:
case JSOP_INITHIDDENPROP: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
return jsop_initprop(name);
}
case JSOP_MUTATEPROTO: {
return jsop_mutateproto();
}
case JSOP_INITPROP_GETTER:
case JSOP_INITHIDDENPROP_GETTER:
case JSOP_INITPROP_SETTER:
case JSOP_INITHIDDENPROP_SETTER: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
return jsop_initprop_getter_setter(name);
}
case JSOP_INITELEM_GETTER:
case JSOP_INITHIDDENELEM_GETTER:
case JSOP_INITELEM_SETTER:
case JSOP_INITHIDDENELEM_SETTER:
return jsop_initelem_getter_setter();
case JSOP_FUNCALL:
return jsop_funcall(GET_ARGC(pc));
case JSOP_FUNAPPLY:
return jsop_funapply(GET_ARGC(pc));
case JSOP_SPREADCALL:
return jsop_spreadcall();
case JSOP_CALL:
case JSOP_CALL_IGNORES_RV:
case JSOP_CALLITER:
case JSOP_NEW:
MOZ_TRY(jsop_call(GET_ARGC(pc),
(JSOp)*pc == JSOP_NEW || (JSOp)*pc == JSOP_SUPERCALL,
(JSOp)*pc == JSOP_CALL_IGNORES_RV));
if (op == JSOP_CALLITER) {
if (!outermostBuilder()->iterators_.append(current->peek(-1))) {
return abort(AbortReason::Alloc);
}
}
return Ok();
case JSOP_EVAL:
case JSOP_STRICTEVAL:
return jsop_eval(GET_ARGC(pc));
case JSOP_INT8:
pushConstant(Int32Value(GET_INT8(pc)));
return Ok();
case JSOP_UINT16:
pushConstant(Int32Value(GET_UINT16(pc)));
return Ok();
case JSOP_GETGNAME: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
if (!script()->hasNonSyntacticScope()) {
return jsop_getgname(name);
}
return jsop_getname(name);
}
case JSOP_SETGNAME:
case JSOP_STRICTSETGNAME: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
JSObject* obj = nullptr;
if (!script()->hasNonSyntacticScope()) {
obj = testGlobalLexicalBinding(name);
}
if (obj) {
return setStaticName(obj, name);
}
return jsop_setprop(name);
}
case JSOP_GETNAME: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
return jsop_getname(name);
}
case JSOP_GETINTRINSIC: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
return jsop_intrinsic(name);
}
case JSOP_GETIMPORT: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
return jsop_getimport(name);
}
case JSOP_BINDGNAME:
if (!script()->hasNonSyntacticScope()) {
if (JSObject* env = testGlobalLexicalBinding(info().getName(pc))) {
pushConstant(ObjectValue(*env));
return Ok();
}
}
// Fall through to JSOP_BINDNAME
MOZ_FALLTHROUGH;
case JSOP_BINDNAME:
return jsop_bindname(info().getName(pc));
case JSOP_BINDVAR:
return jsop_bindvar();
case JSOP_DUP:
current->pushSlot(current->stackDepth() - 1);
return Ok();
case JSOP_DUP2:
return jsop_dup2();
case JSOP_SWAP:
current->swapAt(-1);
return Ok();
case JSOP_PICK:
current->pick(-GET_INT8(pc));
return Ok();
case JSOP_UNPICK:
current->unpick(-GET_INT8(pc));
return Ok();
case JSOP_GETALIASEDVAR:
return jsop_getaliasedvar(EnvironmentCoordinate(pc));
case JSOP_SETALIASEDVAR:
return jsop_setaliasedvar(EnvironmentCoordinate(pc));
case JSOP_UINT24:
case JSOP_RESUMEINDEX:
pushConstant(Int32Value(GET_UINT24(pc)));
return Ok();
case JSOP_INT32:
pushConstant(Int32Value(GET_INT32(pc)));
return Ok();
case JSOP_GETELEM:
case JSOP_CALLELEM:
MOZ_TRY(jsop_getelem());
if (op == JSOP_CALLELEM) {
MOZ_TRY(improveThisTypesForCall());
}
return Ok();
case JSOP_SETELEM:
case JSOP_STRICTSETELEM:
return jsop_setelem();
case JSOP_LENGTH:
return jsop_length();
case JSOP_NOT:
return jsop_not();
case JSOP_FUNCTIONTHIS:
return jsop_functionthis();
case JSOP_GLOBALTHIS:
return jsop_globalthis();
case JSOP_CALLEE: {
MDefinition* callee = getCallee();
current->push(callee);
return Ok();
}
case JSOP_ENVCALLEE:
return jsop_envcallee();
case JSOP_SUPERBASE:
return jsop_superbase();
case JSOP_GETPROP_SUPER: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
return jsop_getprop_super(name);
}
case JSOP_GETELEM_SUPER:
return jsop_getelem_super();
case JSOP_GETPROP:
case JSOP_CALLPROP: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
MOZ_TRY(jsop_getprop(name));
if (op == JSOP_CALLPROP) {
MOZ_TRY(improveThisTypesForCall());
}
return Ok();
}
case JSOP_SETPROP:
case JSOP_STRICTSETPROP:
case JSOP_SETNAME:
case JSOP_STRICTSETNAME: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
return jsop_setprop(name);
}
case JSOP_DELPROP:
case JSOP_STRICTDELPROP: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
return jsop_delprop(name);
}
case JSOP_DELELEM:
case JSOP_STRICTDELELEM:
return jsop_delelem();
case JSOP_REGEXP:
return jsop_regexp(info().getRegExp(pc));
case JSOP_CALLSITEOBJ:
pushConstant(ObjectValue(*info().getObject(pc)));
return Ok();
case JSOP_OBJECT:
return jsop_object(info().getObject(pc));
case JSOP_CLASSCONSTRUCTOR:
return jsop_classconstructor();
case JSOP_TYPEOF:
case JSOP_TYPEOFEXPR:
return jsop_typeof();
case JSOP_TOASYNCITER:
return jsop_toasynciter();
case JSOP_TOID:
return jsop_toid();
case JSOP_ITERNEXT:
return jsop_iternext();
case JSOP_LAMBDA:
return jsop_lambda(info().getFunction(pc));
case JSOP_LAMBDA_ARROW:
return jsop_lambda_arrow(info().getFunction(pc));
case JSOP_SETFUNNAME:
return jsop_setfunname(GET_UINT8(pc));
case JSOP_PUSHLEXICALENV:
return jsop_pushlexicalenv(GET_UINT32_INDEX(pc));
case JSOP_POPLEXICALENV:
current->setEnvironmentChain(walkEnvironmentChain(1));
return Ok();
case JSOP_FRESHENLEXICALENV:
return jsop_copylexicalenv(true);
case JSOP_RECREATELEXICALENV:
return jsop_copylexicalenv(false);
case JSOP_ITER:
return jsop_iter();
case JSOP_MOREITER:
return jsop_itermore();
case JSOP_ISNOITER:
return jsop_isnoiter();
case JSOP_ENDITER:
return jsop_iterend();
case JSOP_IN:
return jsop_in();
case JSOP_HASOWN:
return jsop_hasown();
case JSOP_SETRVAL:
MOZ_ASSERT(!script()->noScriptRval());
current->setSlot(info().returnValueSlot(), current->pop());
return Ok();
case JSOP_INSTANCEOF:
return jsop_instanceof();
case JSOP_DEBUGLEAVELEXICALENV:
return Ok();
case JSOP_DEBUGGER:
return jsop_debugger();
case JSOP_GIMPLICITTHIS:
if (!script()->hasNonSyntacticScope()) {
pushConstant(UndefinedValue());
return Ok();
}
// Fallthrough to IMPLICITTHIS in non-syntactic scope case
MOZ_FALLTHROUGH;
case JSOP_IMPLICITTHIS: {
PropertyName* name = info().getAtom(pc)->asPropertyName();
return jsop_implicitthis(name);
}
case JSOP_NEWTARGET:
return jsop_newtarget();
case JSOP_CHECKISOBJ:
return jsop_checkisobj(GET_UINT8(pc));
case JSOP_CHECKISCALLABLE:
return jsop_checkiscallable(GET_UINT8(pc));
case JSOP_CHECKOBJCOERCIBLE:
return jsop_checkobjcoercible();
case JSOP_DEBUGCHECKSELFHOSTED: {
#ifdef DEBUG
MDebugCheckSelfHosted* check =
MDebugCheckSelfHosted::New(alloc(), current->pop());
current->add(check);
current->push(check);
MOZ_TRY(resumeAfter(check));
#endif
return Ok();
}
case JSOP_IS_CONSTRUCTING:
pushConstant(MagicValue(JS_IS_CONSTRUCTING));
return Ok();
case JSOP_OPTIMIZE_SPREADCALL:
return jsop_optimize_spreadcall();
case JSOP_IMPORTMETA:
return jsop_importmeta();
case JSOP_DYNAMIC_IMPORT:
return jsop_dynamic_import();
case JSOP_INSTRUMENTATION_ACTIVE:
return jsop_instrumentation_active();
case JSOP_INSTRUMENTATION_CALLBACK:
return jsop_instrumentation_callback();
case JSOP_INSTRUMENTATION_SCRIPT_ID:
return jsop_instrumentation_scriptid();
// ===== NOT Yet Implemented =====
// Read below!
// With
case JSOP_ENTERWITH:
case JSOP_LEAVEWITH:
// Spread
case JSOP_SPREADNEW:
case JSOP_SPREADEVAL:
case JSOP_STRICTSPREADEVAL:
// Classes
case JSOP_CHECKCLASSHERITAGE:
case JSOP_FUNWITHPROTO:
case JSOP_OBJWITHPROTO:
case JSOP_BUILTINPROTO:
case JSOP_INITHOMEOBJECT:
case JSOP_DERIVEDCONSTRUCTOR:
case JSOP_CHECKTHIS:
case JSOP_CHECKRETURN:
case JSOP_CHECKTHISREINIT:
// Super
case JSOP_SETPROP_SUPER:
case JSOP_SETELEM_SUPER:
case JSOP_STRICTSETPROP_SUPER:
case JSOP_STRICTSETELEM_SUPER:
case JSOP_SUPERFUN:
// Most of the infrastructure for these exists in Ion, but needs review
// and testing before these are enabled. Once other opcodes that are used
// in derived classes are supported in Ion, this can be better validated
// with testcases. Pay special attention to bailout and other areas where
// JSOP_NEW has special handling.
case JSOP_SPREADSUPERCALL:
case JSOP_SUPERCALL:
// Environments (bug 1366470)
case JSOP_PUSHVARENV:
case JSOP_POPVARENV:
// Compound assignment
case JSOP_GETBOUNDNAME:
// Generators / Async (bug 1317690)
case JSOP_EXCEPTION:
case JSOP_ISGENCLOSING:
case JSOP_INITIALYIELD:
case JSOP_YIELD:
case JSOP_FINALYIELDRVAL:
case JSOP_RESUME:
case JSOP_AFTERYIELD:
case JSOP_AWAIT:
case JSOP_TRYSKIPAWAIT:
case JSOP_GENERATOR:
case JSOP_ASYNCAWAIT:
case JSOP_ASYNCRESOLVE:
// Misc
case JSOP_DELNAME:
case JSOP_FINALLY:
case JSOP_GETRVAL:
case JSOP_GOSUB:
case JSOP_RETSUB:
case JSOP_SETINTRINSIC:
case JSOP_THROWMSG:
// === !! WARNING WARNING WARNING !! ===
// Do you really want to sacrifice performance by not implementing this
// operation in the optimizing compiler?
break;
case JSOP_FORCEINTERPRETER:
// Intentionally not implemented.
break;
case JSOP_UNUSED71:
case JSOP_UNUSED106:
case JSOP_UNUSED120:
case JSOP_UNUSED149:
case JSOP_UNUSED227:
case JSOP_LIMIT:
break;
}
// Track a simpler message, since the actionable abort message is a
// static string, and the internal opcode name isn't an actionable
// thing anyways.
trackActionableAbort("Unsupported bytecode");
#ifdef DEBUG
return abort(AbortReason::Disable, "Unsupported opcode: %s", CodeName[op]);
#else
return abort(AbortReason::Disable, "Unsupported opcode: %d", op);
#endif
}
AbortReasonOr<Ok> IonBuilder::restartLoop(MBasicBlock* header) {
AutoTraceLog logCompile(TraceLoggerForCurrentThread(),
TraceLogger_IonBuilderRestartLoop);
spew("New types at loop header, restarting loop body");
if (JitOptions.limitScriptSize) {
if (++numLoopRestarts_ >= MAX_LOOP_RESTARTS) {
return abort(AbortReason::Disable,
"Aborted while processing control flow");
}
}
// Restore slots to entry state.
size_t stackDepth = header->entryResumePoint()->stackDepth();
for (size_t slot = 0; slot < stackDepth; slot++) {
MDefinition* loopDef = header->entryResumePoint()->getOperand(slot);
header->setSlot(slot, loopDef);
}
// Remove phi operands.
for (MPhiIterator phi = header->phisBegin(); phi != header->phisEnd();
phi++) {
phi->removeOperand(phi->numOperands() - 1);
}
// Discard unreferenced & pre-allocated resume points.
replaceMaybeFallbackFunctionGetter(nullptr);
// Remove all blocks in the loop body other than the header, which has phis
// of the appropriate type and incoming edges to preserve.
if (!graph().removeSuccessorBlocks(header)) {
return abort(AbortReason::Alloc);
}
graph().removeBlockFromList(header);
// Remove all instructions from the header itself, and all resume points
// except the entry resume point.
header->discardAllInstructions();
header->discardAllResumePoints(/* discardEntry = */ false);
header->setStackDepth(header->getPredecessor(0)->stackDepth());
loopDepth_ = header->loopDepth();
// Don't specializePhis(), as the header has been visited before and the
// phis have already had their type set.
setCurrent(header);
graph().addBlock(current);
jsbytecode* loopHead = header->pc();
MOZ_ASSERT(JSOp(*loopHead) == JSOP_LOOPHEAD);
// Since we discarded the header's instructions above, emit them again. This
// includes the interrupt check.
MOZ_TRY(emitLoopHeadInstructions(loopHead));
nextpc = GetNextPc(loopHead);
// Remove loop header and dead blocks from pendingBlocks.
for (PendingEdgesMap::Range r = pendingEdges_->all(); !r.empty();
r.popFront()) {
PendingEdges& blocks = r.front().value();
for (size_t i = blocks.length(); i > 0; i--) {
PendingEdge& block = blocks[i - 1];
if (block.block() == header || block.block()->isDead()) {
blocks.erase(&block);
}
}
}
return Ok();
}
AbortReasonOr<Ok> IonBuilder::replaceTypeSet(MDefinition* subject,
TemporaryTypeSet* type,
MTest* test) {
if (type->unknown()) {
return Ok();
}
// Don't emit MFilterTypeSet if it doesn't improve the typeset.
if (subject->resultTypeSet()) {
if (subject->resultTypeSet()->equals(type)) {
return Ok();
}
} else {
TemporaryTypeSet oldTypes(alloc_->lifoAlloc(), subject->type());
if (oldTypes.equals(type)) {
return Ok();
}
}
MInstruction* replace = nullptr;
MDefinition* ins;
for (uint32_t i = 0; i < current->stackDepth(); i++) {
ins = current->getSlot(i);
// Instead of creating a new MFilterTypeSet, try to update the old one.
if (ins->isFilterTypeSet() && ins->getOperand(0) == subject &&
ins->dependency() == test) {
TemporaryTypeSet* intersect = TypeSet::intersectSets(
ins->resultTypeSet(), type, alloc_->lifoAlloc());
if (!intersect) {
return abort(AbortReason::Alloc);
}
ins->toFilterTypeSet()->setResultType(intersect->getKnownMIRType());
ins->toFilterTypeSet()->setResultTypeSet(intersect);
if (ins->type() == MIRType::Undefined) {
current->setSlot(i, constant(UndefinedValue()));
} else if (ins->type() == MIRType::Null) {
current->setSlot(i, constant(NullValue()));
} else if (ins->type() == MIRType::MagicOptimizedArguments) {
current->setSlot(i, constant(MagicValue(JS_OPTIMIZED_ARGUMENTS)));
} else {
MOZ_ASSERT(!IsMagicType(ins->type()));
}
continue;
}
if (ins == subject) {
if (!replace) {
replace = MFilterTypeSet::New(alloc(), subject, type);
current->add(replace);
// Make sure we don't hoist it above the MTest, we can use the
// 'dependency' of an MInstruction. This is normally used by
// Alias Analysis, but won't get overwritten, since this
// instruction doesn't have an AliasSet.
replace->setDependency(test);
if (replace->type() == MIRType::Undefined) {
replace = constant(UndefinedValue());
} else if (replace->type() == MIRType::Null) {
replace = constant(NullValue());
} else if (replace->type() == MIRType::MagicOptimizedArguments) {
replace = constant(MagicValue(JS_OPTIMIZED_ARGUMENTS));
} else {
MOZ_ASSERT(!IsMagicType(ins->type()));
}
}
current->setSlot(i, replace);
}
}
return Ok();
}
AbortReasonOr<Ok> IonBuilder::improveTypesAtCompare(MCompare* ins,
bool trueBranch,
MTest* test) {
if (ins->compareType() == MCompare::Compare_Undefined ||
ins->compareType() == MCompare::Compare_Null) {
return improveTypesAtNullOrUndefinedCompare(ins, trueBranch, test);
}
if ((ins->lhs()->isTypeOf() || ins->rhs()->isTypeOf()) &&
(ins->lhs()->isConstant() || ins->rhs()->isConstant())) {
return improveTypesAtTypeOfCompare(ins, trueBranch, test);
}
return Ok();
}
AbortReasonOr<Ok> IonBuilder::improveTypesAtTypeOfCompare(MCompare* ins,
bool trueBranch,
MTest* test) {
MTypeOf* typeOf =
ins->lhs()->isTypeOf() ? ins->lhs()->toTypeOf() : ins->rhs()->toTypeOf();
MConstant* constant = ins->lhs()->isConstant() ? ins->lhs()->toConstant()
: ins->rhs()->toConstant();
if (constant->type() != MIRType::String) {
return Ok();
}
bool equal = ins->jsop() == JSOP_EQ || ins->jsop() == JSOP_STRICTEQ;
bool notEqual = ins->jsop() == JSOP_NE || ins->jsop() == JSOP_STRICTNE;
if (notEqual) {
trueBranch = !trueBranch;
}
// Relational compares not supported.
if (!equal && !notEqual) {
return Ok();
}
MDefinition* subject = typeOf->input();
TemporaryTypeSet* inputTypes = subject->resultTypeSet();
// Create temporary typeset equal to the type if there is no resultTypeSet.
TemporaryTypeSet tmp;
if (!inputTypes) {
if (subject->type() == MIRType::Value) {
return Ok();
}
inputTypes = &tmp;
tmp.addType(TypeSet::PrimitiveOrAnyObjectType(subject->type()),
alloc_->lifoAlloc());
}
if (inputTypes->unknown()) {
return Ok();
}
// Note: we cannot remove the AnyObject type in the false branch,
// since there are multiple ways to get an object. That is the reason
// for the 'trueBranch' test.
TemporaryTypeSet filter;
const JSAtomState& names = runtime->names();
if (constant->toString() == TypeName(JSTYPE_UNDEFINED, names)) {
filter.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
if (typeOf->inputMaybeCallableOrEmulatesUndefined() && trueBranch) {
filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
}
} else if (constant->toString() == TypeName(JSTYPE_BOOLEAN, names)) {
filter.addType(TypeSet::BooleanType(), alloc_->lifoAlloc());
} else if (constant->toString() == TypeName(JSTYPE_NUMBER, names)) {
filter.addType(TypeSet::Int32Type(), alloc_->lifoAlloc());
filter.addType(TypeSet::DoubleType(), alloc_->lifoAlloc());
} else if (constant->toString() == TypeName(JSTYPE_STRING, names)) {
filter.addType(TypeSet::StringType(), alloc_->lifoAlloc());
} else if (constant->toString() == TypeName(JSTYPE_SYMBOL, names)) {
filter.addType(TypeSet::SymbolType(), alloc_->lifoAlloc());
} else if (constant->toString() == TypeName(JSTYPE_BIGINT, names)) {
filter.addType(TypeSet::BigIntType(), alloc_->lifoAlloc());
} else if (constant->toString() == TypeName(JSTYPE_OBJECT, names)) {
filter.addType(TypeSet::NullType(), alloc_->lifoAlloc());
if (trueBranch) {
filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
}
} else if (constant->toString() == TypeName(JSTYPE_FUNCTION, names)) {
if (typeOf->inputMaybeCallableOrEmulatesUndefined() && trueBranch) {
filter.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
}
} else {
return Ok();
}
TemporaryTypeSet* type;
if (trueBranch) {
type = TypeSet::intersectSets(&filter, inputTypes, alloc_->lifoAlloc());
} else {
type = TypeSet::removeSet(inputTypes, &filter, alloc_->lifoAlloc());
}
if (!type) {
return abort(AbortReason::Alloc);
}
return replaceTypeSet(subject, type, test);
}
AbortReasonOr<Ok> IonBuilder::improveTypesAtNullOrUndefinedCompare(
MCompare* ins, bool trueBranch, MTest* test) {
MOZ_ASSERT(ins->compareType() == MCompare::Compare_Undefined ||
ins->compareType() == MCompare::Compare_Null);
// altersUndefined/Null represents if we can filter/set Undefined/Null.
bool altersUndefined, altersNull;
JSOp op = ins->jsop();
switch (op) {
case JSOP_STRICTNE:
case JSOP_STRICTEQ:
altersUndefined = ins->compareType() == MCompare::Compare_Undefined;
altersNull = ins->compareType() == MCompare::Compare_Null;
break;
case JSOP_NE:
case JSOP_EQ:
altersUndefined = altersNull = true;
break;
default:
MOZ_CRASH("Relational compares not supported");
}
MDefinition* subject = ins->lhs();
TemporaryTypeSet* inputTypes = subject->resultTypeSet();
MOZ_ASSERT(IsNullOrUndefined(ins->rhs()->type()));
// Create temporary typeset equal to the type if there is no resultTypeSet.
TemporaryTypeSet tmp;
if (!inputTypes) {
if (subject->type() == MIRType::Value) {
return Ok();
}
inputTypes = &tmp;
tmp.addType(TypeSet::PrimitiveOrAnyObjectType(subject->type()),
alloc_->lifoAlloc());
}
if (inputTypes->unknown()) {
return Ok();
}
TemporaryTypeSet* type;
// Decide if we need to filter the type or set it.
if ((op == JSOP_STRICTEQ || op == JSOP_EQ) ^ trueBranch) {
// Remove undefined/null
TemporaryTypeSet remove;
if (altersUndefined) {
remove.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
}
if (altersNull) {
remove.addType(TypeSet::NullType(), alloc_->lifoAlloc());
}
type = TypeSet::removeSet(inputTypes, &remove, alloc_->lifoAlloc());
} else {
// Set undefined/null.
TemporaryTypeSet base;
if (altersUndefined) {
base.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
// If TypeSet emulates undefined, then we cannot filter the objects.
if (inputTypes->maybeEmulatesUndefined(constraints())) {
base.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
}
}
if (altersNull) {
base.addType(TypeSet::NullType(), alloc_->lifoAlloc());
}
type = TypeSet::intersectSets(&base, inputTypes, alloc_->lifoAlloc());
}
if (!type) {
return abort(AbortReason::Alloc);
}
return replaceTypeSet(subject, type, test);
}
AbortReasonOr<Ok> IonBuilder::improveTypesAtTestSuccessor(
MTest* test, MBasicBlock* successor) {
MOZ_ASSERT(successor->numPredecessors() == 1);
MOZ_ASSERT(test->block() == successor->getPredecessor(0));
MOZ_ASSERT(test->ifTrue() == successor || test->ifFalse() == successor);
bool trueBranch = test->ifTrue() == successor;
return improveTypesAtTest(test->getOperand(0), trueBranch, test);
}
AbortReasonOr<Ok> IonBuilder::improveTypesAtTest(MDefinition* ins,
bool trueBranch, MTest* test) {
// We explore the test condition to try and deduce as much type information
// as possible.
// All branches of this switch that don't want to fall through to the
// default behavior must return. The default behavior assumes that a true
// test means the incoming ins is not null or undefined and that a false
// tests means it's one of null, undefined, false, 0, "", and objects
// emulating undefined
switch (ins->op()) {
case MDefinition::Opcode::Not:
return improveTypesAtTest(ins->toNot()->getOperand(0), !trueBranch, test);
case MDefinition::Opcode::IsObject: {
MDefinition* subject = ins->getOperand(0);
TemporaryTypeSet* oldType = subject->resultTypeSet();
// Create temporary typeset equal to the type if there is no
// resultTypeSet.
TemporaryTypeSet tmp;
if (!oldType) {
if (subject->type() == MIRType::Value) {
return Ok();
}
oldType = &tmp;
tmp.addType(TypeSet::PrimitiveOrAnyObjectType(subject->type()),
alloc_->lifoAlloc());
}
if (oldType->unknown()) {
return Ok();
}
TemporaryTypeSet* type = nullptr;
if (trueBranch) {
type = oldType->cloneObjectsOnly(alloc_->lifoAlloc());
} else {
type = oldType->cloneWithoutObjects(alloc_->lifoAlloc());
}
if (!type) {
return abort(AbortReason::Alloc);
}
return replaceTypeSet(subject, type, test);
}
case MDefinition::Opcode::IsNullOrUndefined: {
MDefinition* subject = ins->getOperand(0);
TemporaryTypeSet* oldType = subject->resultTypeSet();
// Create temporary typeset equal to the type if there is no
// resultTypeSet.
TemporaryTypeSet tmp;
if (!oldType) {
if (subject->type() == MIRType::Value) {
return Ok();
}
oldType = &tmp;
tmp.addType(TypeSet::PrimitiveOrAnyObjectType(subject->type()),
alloc_->lifoAlloc());
}
// If ins does not have a typeset we return as we cannot optimize.
if (oldType->unknown()) {
return Ok();
}
// Decide either to set or remove.
TemporaryTypeSet filter;
filter.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
filter.addType(TypeSet::NullType(), alloc_->lifoAlloc());
TemporaryTypeSet* type;
if (trueBranch) {
type = TypeSet::intersectSets(&filter, oldType, alloc_->lifoAlloc());
} else {
type = TypeSet::removeSet(oldType, &filter, alloc_->lifoAlloc());
}
if (!type) {
return abort(AbortReason::Alloc);
}
return replaceTypeSet(subject, type, test);
}
case MDefinition::Opcode::Compare:
return improveTypesAtCompare(ins->toCompare(), trueBranch, test);
default:
break;
}
// By default MTest tests ToBoolean(input). As a result in the true branch we
// can filter undefined and null. In false branch we can only encounter
// undefined, null, false, 0, "" and objects that emulate undefined.
TemporaryTypeSet* oldType = ins->resultTypeSet();
TemporaryTypeSet* type;
// Create temporary typeset equal to the type if there is no resultTypeSet.
TemporaryTypeSet tmp;
if (!oldType) {
if (ins->type() == MIRType::Value) {
return Ok();
}
oldType = &tmp;
tmp.addType(TypeSet::PrimitiveOrAnyObjectType(ins->type()),
alloc_->lifoAlloc());
}
// If ins does not have a typeset we return as we cannot optimize.
if (oldType->unknown()) {
return Ok();
}
// Decide either to set or remove.
if (trueBranch) {
TemporaryTypeSet remove;
remove.addType(TypeSet::UndefinedType(), alloc_->lifoAlloc());
remove.addType(TypeSet::NullType(), alloc_->lifoAlloc());
type = TypeSet::removeSet(oldType, &remove, alloc_->lifoAlloc());
} else {
TemporaryTypeSet base;
base.addType(TypeSet::UndefinedType(),
alloc_->lifoAlloc()); // ToBoolean(undefined) == false
base.addType(TypeSet::NullType(),
alloc_->lifoAlloc()); // ToBoolean(null) == false
base.addType(TypeSet::BooleanType(),
alloc_->lifoAlloc()); // ToBoolean(false) == false
base.addType(TypeSet::Int32Type(),
alloc_->lifoAlloc()); // ToBoolean(0) == false
base.addType(TypeSet::DoubleType(),
alloc_->lifoAlloc()); // ToBoolean(0.0) == false
base.addType(TypeSet::StringType(),
alloc_->lifoAlloc()); // ToBoolean("") == false
base.addType(TypeSet::BigIntType(),
alloc_->lifoAlloc()); // ToBoolean(0n) == false
// If the typeset does emulate undefined, then we cannot filter out
// objects.
if (oldType->maybeEmulatesUndefined(constraints())) {
base.addType(TypeSet::AnyObjectType(), alloc_->lifoAlloc());
}
type = TypeSet::intersectSets(&base, oldType, alloc_->lifoAlloc());
}
if (!type) {
return abort(AbortReason::Alloc);
}
return replaceTypeSet(ins, type, test);
}
AbortReasonOr<Ok> IonBuilder::jsop_dup2() {
uint32_t lhsSlot = current->stackDepth() - 2;
uint32_t rhsSlot = current->stackDepth() - 1;
current->pushSlot(lhsSlot);
current->pushSlot(rhsSlot);
return Ok();
}
AbortReasonOr<Ok> IonBuilder::visitTestBackedge(JSOp op, bool* restarted) {
MOZ_ASSERT(op == JSOP_IFNE);
MOZ_ASSERT(loopDepth_ > 0);
MDefinition* ins = current->pop();
jsbytecode* loopHead = pc + GET_JUMP_OFFSET(pc);
MOZ_ASSERT(JSOp(*loopHead) == JSOP_LOOPHEAD);
jsbytecode* successorPC = GetNextPc(pc);
// We can finish the loop now. Use the loophead pc instead of the current pc
// because the stack depth at the start of that op matches the current stack
// depth (after popping our operand).
MBasicBlock* backedge;
MOZ_TRY_VAR(backedge, newBlock(current, loopHead));
current->end(newTest(ins, backedge, nullptr));
MOZ_TRY(addPendingEdge(PendingEdge::NewTestFalse(current, op), successorPC));
MOZ_TRY(startTraversingBlock(backedge));
return visitBackEdge(restarted);
}
// Returns true iff the MTest added for |op| has a true-target corresponding
// with the join point in the bytecode.
static bool TestTrueTargetIsJoinPoint(JSOp op) {
switch (op) {
case JSOP_IFNE:
case JSOP_OR:
case JSOP_CASE:
return true;
case JSOP_IFEQ:
case JSOP_AND:
case JSOP_COALESCE:
return false;
default:
MOZ_CRASH("Unexpected op");
}
}
AbortReasonOr<Ok> IonBuilder::visitTest(JSOp op, bool* restarted) {
MOZ_ASSERT(op == JSOP_IFEQ || op == JSOP_IFNE || op == JSOP_AND ||
op == JSOP_OR || op == JSOP_CASE);
if (IsBackedgePC(pc)) {
return visitTestBackedge(op, restarted);
}
jsbytecode* target1 = GetNextPc(pc);
jsbytecode* target2 = pc + GET_JUMP_OFFSET(pc);
// JSOP_AND and JSOP_OR inspect the top stack value but don't pop it.
// Also note that JSOP_CASE must pop a second value on the true-branch (the
// input to the switch-statement). This conditional pop happens in
// visitJumpTarget.
bool mustKeepCondition = (op == JSOP_AND || op == JSOP_OR);
MDefinition* ins = mustKeepCondition ? current->peek(-1) : current->pop();
// If this op always branches to the same pc we treat this as a JSOP_GOTO.
if (target1 == target2) {
ins->setImplicitlyUsedUnchecked();
return visitGoto(target1);
}
MTest* mir = newTest(ins, nullptr, nullptr);
current->end(mir);
if (TestTrueTargetIsJoinPoint(op)) {
mozilla::Swap(target1, target2);
}
MOZ_TRY(addPendingEdge(PendingEdge::NewTestTrue(current, op), target1));
MOZ_TRY(addPendingEdge(PendingEdge::NewTestFalse(current, op), target2));
setTerminatedBlock();
return Ok();
}
AbortReasonOr<Ok> IonBuilder::jsop_coalesce() {
jsbytecode* target1 = GetNextPc(pc);
jsbytecode* target2 = pc + GET_JUMP_OFFSET(pc);
MOZ_ASSERT(target2 > target1);
MDefinition* ins = current->peek(-1);
MIsNullOrUndefined* isNullOrUndefined = MIsNullOrUndefined::New(alloc(), ins);
current->add(isNullOrUndefined);
MTest* mir = newTest(isNullOrUndefined, nullptr, nullptr);
current->end(mir);
MOZ_TRY(addPendingEdge(PendingEdge::NewTestTrue(current, JSOP_COALESCE),
target1));
MOZ_TRY(addPendingEdge(PendingEdge::NewTestFalse(current, JSOP_COALESCE),
target2));
setTerminatedBlock();
return Ok();
}
AbortReasonOr<Ok> IonBuilder::visitTry() {
// We don't support try-finally.
if (script()->jitScript()->hasTryFinally()) {
return abort(AbortReason::Disable, "Try-finally not supported");
}
// Try-catch within inline frames is not yet supported.
if (isInlineBuilder()) {
return abort(AbortReason::Disable, "Try-catch during inlining");
}
// Try-catch during analyses is not yet supported. Code within the 'catch'
// block is not accounted for.
if (info().isAnalysis()) {
return abort(AbortReason::Disable, "Try-catch during analysis");
}
jssrcnote* sn = GetSrcNote(gsn, script(), pc);
MOZ_ASSERT(SN_TYPE(sn) == SRC_TRY);
// Get the pc of the last instruction in the try block. It's a JSOP_GOTO to
// jump over the catch block.
jsbytecode* endpc =
pc + GetSrcNoteOffset(sn, SrcNote::Try::EndOfTryJumpOffset);
MOZ_ASSERT(JSOp(*endpc) == JSOP_GOTO);
MOZ_ASSERT(GET_JUMP_OFFSET(endpc) > 0);
jsbytecode* afterTry = endpc + GET_JUMP_OFFSET(endpc);
// The baseline compiler should not attempt to enter the catch block
// via OSR.
MOZ_ASSERT(info().osrPc() < endpc || info().osrPc() >= afterTry);
// If controlflow in the try body is terminated (by a return or throw
// statement), the code after the try-statement may still be reachable
// via the catch block (which we don't compile) and OSR can enter it.
// For example:
//
// try {
// throw 3;
// } catch(e) { }
//
// for (var i=0; i<1000; i++) {}
//
// To handle this, we create two blocks: one for the try block and one
// for the code following the try-catch statement.
graph().setHasTryBlock();
MBasicBlock* tryBlock;
MOZ_TRY_VAR(tryBlock, newBlock(current, GetNextPc(pc)));
current->end(MGotoWithFake::New(alloc(), tryBlock, nullptr));
MOZ_TRY(addPendingEdge(PendingEdge::NewGotoWithFake(current), afterTry));
return startTraversingBlock(tryBlock);
}
AbortReasonOr<Ok> IonBuilder::visitJumpTarget(JSOp op) {
PendingEdgesMap::Ptr p = pendingEdges_->lookup(pc);
if (!p) {
// No (reachable) jumps so this is just a no-op.
return Ok();
}
PendingEdges edges(std::move(p->value()));
pendingEdges_->remove(p);
// Loop-restarts may clear the list rather than remove the map entry entirely.
// This is to reduce allocator churn since it is likely the list will be
// filled in again in the general case.
if (edges.empty()) {
return Ok();
}
MBasicBlock* joinBlock = nullptr;
// Create join block if there's fall-through from the previous bytecode op.
if (!hasTerminatedBlock()) {
MOZ_TRY_VAR(joinBlock, newBlock(current, pc));
current->end(MGoto::New(alloc(), joinBlock));
setTerminatedBlock();
}
auto addEdge = [&](MBasicBlock* pred, size_t numToPop) -> AbortReasonOr<Ok> {
if (joinBlock) {
MOZ_ASSERT(pred->stackDepth() - numToPop == joinBlock->stackDepth());
if (!joinBlock->addPredecessorPopN(alloc(), pred, numToPop)) {
return abort(AbortReason::Alloc);
}
return Ok();
}
MOZ_TRY_VAR(joinBlock, newBlockPopN(pred, pc, numToPop));
return Ok();
};
// When a block is terminated with an MTest instruction we can end up with the
// following triangle structure:
//
// testBlock
// / |
// block |
// \ |
// joinBlock
//
// Although this is fine for correctness, it has the following issues:
//
// 1) The FoldTests pass is unable to optimize this pattern. This matters for
// short-circuit operations (JSOP_AND, JSOP_COALESCE, etc).
//
// 2) We can't easily use improveTypesAtTest to improve type information in
// this case:
//
// var obj = ...;
// if (obj === null) {
// obj = {};
// }
// ... obj must be non-null ...
//
// To fix these issues, we create an empty block to get a diamond structure:
//
// testBlock
// / |
// block emptyBlock
// \ |
// joinBlock
auto createEmptyBlockForTest =
[&](MBasicBlock* pred, size_t successor,
size_t numToPop) -> AbortReasonOr<MBasicBlock*> {
MOZ_ASSERT(joinBlock);
MBasicBlock* emptyBlock;
MOZ_TRY_VAR(emptyBlock, newBlockPopN(pred, pc, numToPop));
MOZ_ASSERT(emptyBlock->stackDepth() == joinBlock->stackDepth());
MTest* test = pred->lastIns()->toTest();
test->initSuccessor(successor, emptyBlock);
MOZ_TRY(startTraversingBlock(emptyBlock));
MOZ_TRY(improveTypesAtTestSuccessor(test, emptyBlock));
emptyBlock->end(MGoto::New(alloc(), joinBlock));
setTerminatedBlock();
return emptyBlock;
};
for (const PendingEdge& edge : edges) {
MBasicBlock* source = edge.block();
MControlInstruction* lastIns = source->lastIns();
switch (edge.kind()) {
case PendingEdge::Kind::TestTrue: {
// JSOP_CASE must pop the value when branching to the true-target.
// If we create an empty block, we have to pop the value there instead
// of as part of the emptyBlock -> joinBlock edge so stack depths match
// the current depth.
const size_t numToPop = (edge.testOp() == JSOP_CASE) ? 1 : 0;
const size_t successor = 0; // true-branch
if (joinBlock && TestTrueTargetIsJoinPoint(edge.testOp())) {
MBasicBlock* pred;
MOZ_TRY_VAR(pred,
createEmptyBlockForTest(source, successor, numToPop));
MOZ_TRY(addEdge(pred, 0));
} else {
MOZ_TRY(addEdge(source, numToPop));
lastIns->toTest()->initSuccessor(successor, joinBlock);
}
continue;
}
case PendingEdge::Kind::TestFalse: {
const size_t numToPop = 0;
const size_t successor = 1; // false-branch
if (joinBlock && !TestTrueTargetIsJoinPoint(edge.testOp())) {
MBasicBlock* pred;
MOZ_TRY_VAR(pred,
createEmptyBlockForTest(source, successor, numToPop));
MOZ_TRY(addEdge(pred, 0));
} else {
MOZ_TRY(addEdge(source, numToPop));
lastIns->toTest()->initSuccessor(successor, joinBlock);
}
continue;
}
case PendingEdge::Kind::Goto:
MOZ_TRY(addEdge(source, 0));
lastIns->toGoto()->initSuccessor(0, joinBlock);
continue;
case PendingEdge::Kind::GotoWithFake:
MOZ_TRY(addEdge(source, 0));
lastIns->toGotoWithFake()->initSuccessor(1, joinBlock);
continue;
}
MOZ_CRASH("Invalid kind");
}
MOZ_ASSERT(joinBlock);
MOZ_TRY(startTraversingBlock(joinBlock));
// If the join block has just one predecessor with an MTest, try to improve
// type information.
if (joinBlock->numPredecessors() == 1) {
MBasicBlock* pred = joinBlock->getPredecessor(0);
if (pred->lastIns()->isTest()) {
MTest* test = pred->lastIns()->toTest();
MOZ_TRY(improveTypesAtTestSuccessor(test, joinBlock));
}
}
return Ok();
}
AbortReasonOr<Ok> IonBuilder::visitReturn(JSOp op) {
MDefinition* def;
switch (op) {
case JSOP_RETURN:
// Return the last instruction.
def = current->pop();
break;
case JSOP_RETRVAL:
// Return undefined eagerly if script doesn't use return value.
if (script()->noScriptRval()) {
MInstruction* ins = MConstant::New(alloc(), UndefinedValue());
current->add(ins);
def = ins;
break;
}
def = current->getSlot(info().returnValueSlot());
break;
default:
MOZ_CRASH("unknown return op");
}
MReturn* ret = MReturn::New(alloc(), def);
current->end(ret);
if (!graph().addReturn(current)) {
return abort(AbortReason::Alloc);
}
setTerminatedBlock();
return Ok();
}
AbortReasonOr<Ok> IonBuilder::visitThrow() {
MDefinition* def = current->pop();
// MThrow is not marked as effectful. This means when it throws and we
// are inside a try block, we could use an earlier resume point and this
// resume point may not be up-to-date, for example:
//
// (function() {
// try {
// var x = 1;
// foo(); // resume point
// x = 2;
// throw foo;
// } catch(e) {
// print(x);
// }
// ])();
//
// If we use the resume point after the call, this will print 1 instead
// of 2. To fix this, we create a resume point right before the MThrow.
//
// Note that this is not a problem for instructions other than MThrow
// because they are either marked as effectful (have their own resume
// point) or cannot throw a catchable exception.
//
// We always install this resume point (instead of only when the function
// has a try block) in order to handle the Debugger onExceptionUnwind
// hook. When we need to handle the hook, we bail out to baseline right
// after the throw and propagate the exception when debug mode is on. This
// is opposed to the normal behavior of resuming directly in the
// associated catch block.
MNop* nop = MNop::New(alloc());
current->add(nop);
MOZ_TRY(resumeAfter(nop));
MThrow* ins = MThrow::New(alloc(), def);
current->end(ins);
setTerminatedBlock();
return Ok();
}
AbortReasonOr<Ok> IonBuilder::visitTableSwitch() {
jsbytecode* defaultpc = pc + GET_JUMP_OFFSET(pc);
int32_t low = GET_JUMP_OFFSET(pc + JUMP_OFFSET_LEN * 1);
int32_t high = GET_JUMP_OFFSET(pc + JUMP_OFFSET_LEN * 2);
size_t numCases = high - low + 1;
// Create MIR instruction.
MDefinition* ins = current->pop();
MTableSwitch* tableswitch = MTableSwitch::New(alloc(), ins, low, high);
current->end(tableswitch);
MBasicBlock* switchBlock = current;
// Create |default| block.
{
MBasicBlock* defaultBlock;
MOZ_TRY_VAR(defaultBlock, newBlock(switchBlock, defaultpc));
size_t index;
if (!tableswitch->addDefault(defaultBlock, &index)) {
return abort(AbortReason::Alloc);
}
MOZ_ASSERT(index == 0);
MOZ_TRY(startTraversingBlock(defaultBlock));
defaultBlock->end(MGoto::New(alloc(), nullptr));
MOZ_TRY(addPendingEdge(PendingEdge::NewGoto(defaultBlock), defaultpc));
}
// Create blocks for all cases.
for (size_t i = 0; i < numCases; i++) {
jsbytecode* casepc = script()->tableSwitchCasePC(pc, i);
MBasicBlock* caseBlock;
MOZ_TRY_VAR(caseBlock, newBlock(switchBlock, casepc));
size_t index;
if (!tableswitch->addSuccessor(caseBlock, &index)) {
return abort(AbortReason::Alloc);
}
if (!tableswitch->addCase(index)) {
return abort(AbortReason::Alloc);
}
MOZ_TRY(startTraversingBlock(caseBlock));
// If this is an actual case statement, optimize by replacing the
// input to the switch case with the actual number of the case.
MConstant* constant = MConstant::New(alloc(), Int32Value(low + int32_t(i)));
caseBlock->add(constant);
for (uint32_t j = 0; j < caseBlock->stackDepth(); j++) {
if (ins != caseBlock->getSlot(j)) {
continue;
}
constant->setDependency(ins);
caseBlock->setSlot(j, constant);
}
caseBlock->end(MGoto::New(alloc(), nullptr));
MOZ_TRY(addPendingEdge(PendingEdge::NewGoto(caseBlock), casepc));
}
setTerminatedBlock();
return Ok();
}
void IonBuilder::pushConstant(const Value& v) { current->push(constant(v)); }
AbortReasonOr<Ok> IonBuilder::bitnotTrySpecialized(bool* emitted,
MDefinition* input) {
MOZ_ASSERT(*emitted == false);
// Try to emit a specialized bitnot instruction based on the input type
// of the operand.
if (input->mightBeType(MIRType::Object) ||
input->mightBeType(MIRType::Symbol) ||
input->mightBeType(MIRType::BigInt)) {
return Ok();
}
MBitNot* ins = MBitNot::New(alloc(), input);
ins->setSpecialization(MIRType::Int32);
current->add(ins);
current->push(ins);
*emitted = true;
return Ok();
}
AbortReasonOr<Ok> IonBuilder::jsop_bitnot() {
bool emitted = false;
MDefinition* input = current->pop();
if (!forceInlineCaches()) {
MOZ_TRY(bitnotTrySpecialized(&emitted, input));
if (emitted) return Ok();
}
MOZ_TRY(arithTryBinaryStub(&emitted, JSOP_BITNOT, nullptr, input));
if (emitted) {
return Ok();
}
// Not possible to optimize. Do a slow vm call.
MBitNot* ins = MBitNot::New(alloc(), input);
current->add(ins);
current->push(ins);
MOZ_ASSERT(ins->isEffectful());
return resumeAfter(ins);
}
AbortReasonOr<MBinaryBitwiseInstruction*> IonBuilder::binaryBitOpEmit(
JSOp op, MIRType specialization, MDefinition* left, MDefinition* right) {
MOZ_ASSERT(specialization == MIRType::Int32 ||
specialization == MIRType::None);
MBinaryBitwiseInstruction* ins;
switch (op) {
case JSOP_BITAND:
ins = MBitAnd::New(alloc(), left, right);
break;
case JSOP_BITOR:
ins = MBitOr::New(alloc(), left, right);
break;
case JSOP_BITXOR:
ins = MBitXor::New(alloc(), left, right);
break;
case JSOP_LSH:
ins = MLsh::New(alloc(), left, right);
break;
case JSOP_RSH:
ins = MRsh::New(alloc(), left, right);
break;
case JSOP_URSH:
ins = MUrsh::New(alloc(), left, right);
break;
default:
MOZ_CRASH("unexpected bitop");
}
current->add(ins);
ins->infer(inspector, pc);
// The expected specialization should match the inferred specialization.
MOZ_ASSERT_IF(specialization == MIRType::None,
ins->specialization() == MIRType::None);
MOZ_ASSERT_IF(
specialization == MIRType::Int32,
ins->specialization() == MIRType::Int32 ||
(op == JSOP_URSH && ins->specialization() == MIRType::Double));
current->push(ins);
if (ins->isEffectful()) {
MOZ_TRY(resumeAfter(ins));
}
return ins;
}
static inline bool SimpleBitOpOperand(MDefinition* op) {
return !op->mightBeType(MIRType::Object) &&
!op->mightBeType(MIRType::Symbol) && !op->mightBeType(MIRType::BigInt);
}
AbortReasonOr<Ok> IonBuilder::binaryBitOpTrySpecialized(bool* emitted, JSOp op,
MDefinition* left,
MDefinition* right) {
MOZ_ASSERT(*emitted == false);
// Try to emit a specialized binary instruction based on the input types
// of the operands.
// Anything complex - objects, symbols, and bigints - are not specialized
if (!SimpleBitOpOperand(left) || !SimpleBitOpOperand(right)) {
return Ok();
}
MIRType specialization = MIRType::Int32;
MOZ_TRY(binaryBitOpEmit(op, specialization, left, right));
*emitted = true;
return Ok();
}
AbortReasonOr<Ok> IonBuilder::jsop_bitop(JSOp op) {
// Pop inputs.
MDefinition* right = current->pop();
MDefinition* left = current->pop();
bool emitted = false;
if (!forceInlineCaches()) {
MOZ_TRY(binaryBitOpTrySpecialized(&emitted, op, left, right));
if (emitted) {
return Ok();
}
}
MOZ_TRY(arithTryBinaryStub(&emitted, op, left, right));
if (emitted) {
return Ok();
}
// Not possible to optimize. Do a slow vm call.
MOZ_TRY(binaryBitOpEmit(op, MIRType::None, left, right));
return Ok();
}
MDefinition::Opcode BinaryJSOpToMDefinition(JSOp op) {
switch (op) {
case JSOP_ADD:
return MDefinition::Opcode::Add;
case JSOP_SUB:
return MDefinition::Opcode::Sub;
case JSOP_MUL:
return MDefinition::Opcode::Mul;
case JSOP_DIV:
return MDefinition::Opcode::Div;
case JSOP_MOD:
return MDefinition::Opcode::Mod;
default:
MOZ_CRASH("unexpected binary opcode");
}
}
AbortReasonOr<Ok> IonBuilder::binaryArithTryConcat(bool* emitted, JSOp op,
MDefinition* left,
MDefinition* right) {
MOZ_ASSERT(*emitted == false);
// Try to convert an addition into a concat operation if the inputs
// indicate this might be a concatenation.
// Only try to replace this with concat when we have an addition.
if (op != JSOP_ADD) {
return Ok();
}
trackOptimizationAttempt(TrackedStrategy::BinaryArith_Concat);
// Make sure one of the inputs is a string.
if (left->type() != MIRType::String && right->type() != MIRType::String) {
trackOptimizationOutcome(TrackedOutcome::OperandNotString);
return Ok();
}
// The non-string input (if present) should be atleast easily coercible to
// string.
if (right->type() != MIRType::String &&
(right->mightBeType(MIRType::Symbol) ||
right->mightBeType(MIRType::Object) || right->mightBeMagicType())) {
trackOptimizationOutcome(TrackedOutcome::OperandNotEasilyCoercibleToString);
return Ok();
}
if (left->type() != MIRType::String &&
(left->mightBeType(MIRType::Symbol) ||
left->mightBeType(MIRType::Object) || left->mightBeMagicType())) {
trackOptimizationOutcome(TrackedOutcome::OperandNotEasilyCoercibleToString);
return Ok();
}
MConcat* ins = MConcat::New(alloc(), left, right);
current->add(ins);
current->push(ins);
MOZ_TRY(maybeInsertResume());
trackOptimizationSuccess();
*emitted = true;
return Ok();
}
AbortReasonOr<Ok> IonBuilder::powTrySpecialized(bool* emitted,
MDefinition* base,
MDefinition* power,
MIRType outputType) {
// Typechecking.
MDefinition* output = nullptr;
MIRType baseType = base->type();
MIRType powerType = power->type();
if (outputType != MIRType::Int32 && outputType != MIRType::Double) {
return Ok();
}
if (!IsNumberType(baseType)) {
return Ok();
}
if (!IsNumberType(powerType)) {
return Ok();
}
if (powerType == MIRType::Float32) {
powerType = MIRType::Double;
}
MPow* pow = MPow::New(alloc(), base, power, powerType);
current->add(pow);
output = pow;
// Cast to the right type
if (outputType == MIRType::Int32 && output->type() != MIRType::Int32) {
auto* toInt = MToNumberInt32::New(alloc(), output);
current->add(toInt);
output = toInt;
}
if (outputType == MIRType::Double && output->type() != MIRType::Double) {
MToDouble* toDouble = MToDouble::New(alloc(), output);
current->add(toDouble);
output = toDouble;
}
current->push(output);
*emitted = true;
return Ok();
}
MIRType IonBuilder::binaryArithNumberSpecialization(MDefinition* left,
MDefinition* right) {
// Try to specialize as int32.
if (left->type() == MIRType::Int32 && right->type() == MIRType::Int32 &&
!inspector->hasSeenDoubleResult(pc)) {
return MIRType::Int32;
}
return MIRType::Double;
}
AbortReasonOr<MBinaryArithInstruction*> IonBuilder::binaryArithEmitSpecialized(
MDefinition::Opcode op, MIRType specialization, MDefinition* left,
MDefinition* right) {
MBinaryArithInstruction* ins =
MBinaryArithInstruction::New(alloc(), op, left, right);
ins->setSpecialization(specialization);
if (op == MDefinition::Opcode::Add || op == MDefinition::Opcode::Mul) {
ins->setCommutative();
}
current->add(ins);
current->push(ins);
MOZ_ASSERT(!ins->isEffectful());
MOZ_TRY(maybeInsertResume());
return ins;
}
AbortReasonOr<Ok> IonBuilder::binaryArithTrySpecialized(bool* emitted, JSOp op,
MDefinition* left,
MDefinition* right) {
MOZ_ASSERT(*emitted == false);
// Try to emit a specialized binary instruction based on the input types
// of the operands.
trackOptimizationAttempt(TrackedStrategy::BinaryArith_SpecializedTypes);
// Anything complex - strings, symbols, and objects - are not specialized
if (!SimpleArithOperand(left) || !SimpleArithOperand(right)) {
trackOptimizationOutcome(TrackedOutcome::OperandNotSimpleArith);
return Ok();
}
// One of the inputs need to be a number.
if (!IsNumberType(left->type()) && !IsNumberType(right->type())) {
trackOptimizationOutcome(TrackedOutcome::OperandNotNumber);
return Ok();
}
MDefinition::Opcode defOp = BinaryJSOpToMDefinition(op);
MIRType specialization = binaryArithNumberSpecialization(left, right);
MBinaryArithInstruction* ins;
MOZ_TRY_VAR(ins,
binaryArithEmitSpecialized(defOp, specialization, left, right));
// Relax int32 to double if, despite the fact that we have int32 operands and
// we've never seen a double result, we know the result may overflow or be a
// double.
if (specialization == MIRType::Int32 && ins->constantDoubleResult(alloc())) {
ins->setSpecialization(MIRType::Double);
}
trackOptimizationSuccess();
*emitted = true;
return Ok();
}
AbortReasonOr<Ok> IonBuilder::binaryArithTrySpecializedOnBaselineInspector(
bool* emitted, JSOp op, MDefinition* left, MDefinition* right) {
MOZ_ASSERT(*emitted == false);
// Try to emit a specialized binary instruction speculating the
// type using the baseline caches.
trackOptimizationAttempt(
TrackedStrategy::BinaryArith_SpecializedOnBaselineTypes);
MIRType specialization = inspector->expectedBinaryArithSpecialization(pc);
if (specialization == MIRType::None) {
trackOptimizationOutcome(TrackedOutcome::SpeculationOnInputTypesFailed);
return Ok();
}
MDefinition::Opcode defOp = BinaryJSOpToMDefinition(op);
MOZ_TRY(binaryArithEmitSpecialized(defOp, specialization, left, right));
trackOptimizationSuccess();
*emitted = true;
return Ok();
}
AbortReasonOr<Ok> IonBuilder::arithTryBinaryStub(bool* emitted, JSOp op,
MDefinition* left,
MDefinition* right) {
MOZ_ASSERT(*emitted == false);
JSOp actualOp = JSOp(*pc);
// The actual jsop 'jsop_pos' is not supported yet.
// There's no IC support for JSOP_POW either.
if (actualOp == JSOP_POS || actualOp == JSOP_POW) {
return Ok();
}
MInstruction* stub = nullptr;
switch (actualOp) {
case JSOP_NEG:
case JSOP_BITNOT:
MOZ_ASSERT_IF(op == JSOP_MUL,
left->maybeConstantValue() &&
left->maybeConstantValue()->toInt32() == -1);
MOZ_ASSERT_IF(op != JSOP_MUL, !left);
stub = MUnaryCache::New(alloc(), right);
break;
case JSOP_ADD:
case JSOP_SUB:
case JSOP_MUL:
case JSOP_DIV:
case JSOP_MOD:
case JSOP_BITAND:
case JSOP_BITOR:
case JSOP_BITXOR:
case JSOP_LSH:
case JSOP_RSH:
case JSOP_URSH:
stub = MBinaryCache::New(alloc(), left, right, MIRType::Value);
break;
default:
MOZ_CRASH("unsupported arith");
}
current->add(stub);
current->push(stub);
// Decrease type from 'any type' to 'empty type' when one of the operands
// is 'empty typed'.
maybeMarkEmpty(stub);
MOZ_TRY(resumeAfter(stub));
*emitted = true;
return Ok();
}
AbortReasonOr<Ok> IonBuilder::jsop_binary_arith(JSOp op, MDefinition* left,
MDefinition* right) {
bool emitted = false;
startTrackingOptimizations();
trackTypeInfo(TrackedTypeSite::Operand, left->type(), left->resultTypeSet());
trackTypeInfo(TrackedTypeSite::Operand, right->type(),
right->resultTypeSet());
if (!forceInlineCaches()) {
MOZ_TRY(binaryArithTryConcat(&emitted, op, left, right));
if (emitted) {
return Ok();
}
MOZ_TRY(binaryArithTrySpecialized(&emitted, op, left, right));
if (emitted) {
return Ok();
}
MOZ_TRY(binaryArithTrySpecializedOnBaselineInspector(&emitted, op, left,
right));
if (emitted) {
return Ok();
}
}
MOZ_TRY(arithTryBinaryStub(&emitted, op, left, right));
if (emitted) {
return Ok();
}
// Not possible to optimize. Do a slow vm call.
trackOptimizationAttempt(TrackedStrategy::BinaryArith_Call);
trackOptimizationSuccess();
MDefinition::Opcode defOp = BinaryJSOpToMDefinition(op);
MBinaryArithInstruction* ins =
MBinaryArithInstruction::New(alloc(), defOp, left, right);
// Decrease type from 'any type' to 'empty type' when one of the operands
// is 'empty typed'.
maybeMarkEmpty(ins);
current->add(ins);
current->push(ins);
MOZ_ASSERT(ins->isEffectful());
return resumeAfter(ins);
}
AbortReasonOr<Ok> IonBuilder::jsop_binary_arith(JSOp op) {
MDefinition* right = current->pop();
MDefinition* left = current->pop();
return jsop_binary_arith(op, left, right);
}
AbortReasonOr<Ok> IonBuilder::jsop_pow() {
MDefinition* exponent = current->pop();
MDefinition* base = current->pop();
bool emitted = false;
if (!forceInlineCaches()) {
MOZ_TRY(powTrySpecialized(&emitted, base, exponent, MIRType::Double));
if (emitted) {
return Ok();
}
}
MOZ_TRY(arithTryBinaryStub(&emitted, JSOP_POW, base, exponent));
if (emitted) {
return Ok();
}
// For now, use MIRType::None as a safe cover-all. See bug 1188079.
MPow* pow = MPow::New(alloc(), base, exponent, MIRType::None);
current->add(pow);
current->push(pow);
MOZ_ASSERT(pow->isEffectful());
return resumeAfter(pow);
}
AbortReasonOr<Ok> IonBuilder::jsop_pos() {
if (IsNumberType(current->peek(-1)->type())) {
// Already int32 or double. Set the operand as implicitly used so it
// doesn't get optimized out if it has no other uses, as we could bail
// out.
current->peek(-1)->setImplicitlyUsedUnchecked();
return Ok();
}
// Compile +x as x * 1.
MDefinition* value = current->pop();
MConstant* one = MConstant::New(alloc(), Int32Value(1));
current->add(one);
return jsop_binary_arith(JSOP_MUL, value, one);
}
AbortReasonOr<Ok> IonBuilder::jsop_neg() {
// Since JSOP_NEG does not use a slot, we cannot push the MConstant.
// The MConstant is therefore passed to JSOP_MUL without slot traffic.
MConstant* negator = MConstant::New(alloc(), Int32Value(-1));
current->add(negator);
MDefinition* right = current->pop();
return jsop_binary_arith(JSOP_MUL, negator, right);
}
AbortReasonOr<Ok> IonBuilder::jsop_tonumeric() {
MDefinition* peeked = current->peek(-1);
if (IsNumericType(peeked->type())) {
// Elide the ToNumeric as we already unboxed the value.
peeked->setImplicitlyUsedUnchecked();
return Ok();
}
LifoAlloc* lifoAlloc = alloc().lifoAlloc();
TemporaryTypeSet* types = lifoAlloc->new_<TemporaryTypeSet>();
if (!types) {
return abort(AbortReason::Alloc);
}
types->addType(TypeSet::Int32Type(), lifoAlloc);
types->addType(TypeSet::DoubleType(), lifoAlloc);
types->addType(TypeSet::BigIntType(), lifoAlloc);
if (peeked->type() == MIRType::Value && peeked->resultTypeSet() &&
peeked->resultTypeSet()->isSubset(types)) {
// Elide the ToNumeric because the arg is already a boxed numeric.
peeked->setImplicitlyUsedUnchecked();
return Ok();
}
// Otherwise, pop the value and add an MToNumeric.
MDefinition* popped = current->pop();
MToNumeric* ins = MToNumeric::New(alloc(), popped, types);
current->add(ins);
current->push(ins);
// toValue() is effectful, so add a resume point.
return resumeAfter(ins);
}
MDefinition* IonBuilder::unaryArithConvertToBinary(JSOp op,
MDefinition::Opcode* defOp) {
switch (op) {
case JSOP_INC: {
*defOp = MDefinition::Opcode::Add;
MConstant* right = MConstant::New(alloc(), Int32Value(1));
current->add(right);
return right;
}
case JSOP_DEC: {
*defOp = MDefinition::Opcode::Sub;
MConstant* right = MConstant::New(alloc(), Int32Value(1));
current->add(right);
return right;
}
default:
MOZ_CRASH("unexpected unary opcode");
}
}
AbortReasonOr<Ok> IonBuilder::unaryArithTrySpecialized(bool* emitted, JSOp op,
MDefinition* value) {
MOZ_ASSERT(*emitted == false);
// Try to convert Inc(x) or Dec(x) to Add(x,1) or Sub(x,1) if the operand is a
// number.
trackOptimizationAttempt(TrackedStrategy::UnaryArith_SpecializedTypes);
if (!IsNumberType(value->type())) {
trackOptimizationOutcome(TrackedOutcome::OperandNotNumber);
return Ok();
}
MDefinition::Opcode defOp;
MDefinition* rhs = unaryArithConvertToBinary(op, &defOp);
MIRType specialization = binaryArithNumberSpecialization(value, rhs);
MOZ_TRY(binaryArithEmitSpecialized(defOp, specialization, value, rhs));
trackOptimizationSuccess();
*emitted = true;
return Ok();
}
AbortReasonOr<Ok> IonBuilder::unaryArithTrySpecializedOnBaselineInspector(
bool* emitted, JSOp op, MDefinition* value) {
MOZ_ASSERT(*emitted == false);
// Try to emit a specialized binary instruction speculating the
// type using the baseline caches.
trackOptimizationAttempt(
TrackedStrategy::UnaryArith_SpecializedOnBaselineTypes);
MIRType specialization = inspector->expectedBinaryArithSpecialization(pc);
if (specialization == MIRType::None) {
trackOptimizationOutcome(TrackedOutcome::SpeculationOnInputTypesFailed);
return Ok();
}
MDefinition::Opcode defOp;
MDefinition* rhs = unaryArithConvertToBinary(op, &defOp);
MOZ_TRY(binaryArithEmitSpecialized(defOp, specialization, value, rhs));
trackOptimizationSuccess();
*emitted = true;
return Ok();
}
AbortReasonOr<Ok> IonBuilder::jsop_inc_or_dec(JSOp op) {
bool emitted = false;
MDefinition* value = current->pop();
startTrackingOptimizations();
trackTypeInfo(TrackedTypeSite::Operand, value->type(),
value->resultTypeSet());
MOZ_TRY(unaryArithTrySpecialized(&emitted, op, value));
if (emitted) {
return Ok();
}
MOZ_TRY(unaryArithTrySpecializedOnBaselineInspector(&emitted, op, value));
if (emitted) {
return Ok();
}
trackOptimizationAttempt(TrackedStrategy::UnaryArith_InlineCache);
trackOptimizationSuccess();
MInstruction* stub = MUnaryCache::New(alloc(), value);
current->add(stub);
current->push(stub);
// Decrease type from 'any type' to 'empty type' when one of the operands
// is 'empty typed'.
maybeMarkEmpty(stub);
return resumeAfter(stub);
}
AbortReasonOr<Ok> IonBuilder::jsop_tostring() {
if (current->peek(-1)->type() == MIRType::String) {
return Ok();
}
MDefinition* value = current->pop();
MToString* ins =
MToString::New(alloc(), value, MToString::SideEffectHandling::Supported);
current->add(ins);
current->push(ins);
if (ins->isEffectful()) {
MOZ_TRY(resumeAfter(ins));
}
return Ok();
}
class AutoAccumulateReturns {
MIRGraph& graph_;
MIRGraphReturns* prev_;
public:
AutoAccumulateReturns(MIRGraph& graph, MIRGraphReturns& returns)
: graph_(graph) {
prev_ = graph_.returnAccumulator();
graph_.setReturnAccumulator(&returns);
}
~AutoAccumulateReturns() { graph_.setReturnAccumulator(prev_); }
};
IonBuilder::InliningResult IonBuilder::inlineScriptedCall(CallInfo& callInfo,
JSFunction* target) {
MOZ_ASSERT(target->hasScript());
MOZ_ASSERT(IsIonInlinableOp(JSOp(*pc)));
MBasicBlock::BackupPoint backup(current);
if (!backup.init(alloc())) {
return abort(AbortReason::Alloc);
}
callInfo.setImplicitlyUsedUnchecked();
// Create new |this| on the caller-side for inlined constructors.
if (callInfo.constructing()) {
MDefinition* thisDefn =
createThis(target, callInfo.fun(), callInfo.getNewTarget());
if (!thisDefn) {
return abort(AbortReason::Alloc);
}
callInfo.setThis(thisDefn);
}
// Capture formals in the outer resume point.
MOZ_TRY(callInfo.pushCallStack(this, current));
MResumePoint* outerResumePoint =
MResumePoint::New(alloc(), current, pc, MResumePoint::Outer);
if (!outerResumePoint) {
return abort(AbortReason::Alloc);
}
current->setOuterResumePoint(outerResumePoint);
// Pop formals again, except leave |fun| on stack for duration of call.
callInfo.popCallStack(current);
current->push(callInfo.fun());
JSScript* calleeScript = target->nonLazyScript();
BaselineInspector inspector(calleeScript);
// Improve type information of |this| when not set.
if (callInfo.constructing() && !callInfo.thisArg()->resultTypeSet()) {
AutoSweepJitScript sweep(calleeScript);
StackTypeSet* types =
calleeScript->jitScript()->thisTypes(sweep, calleeScript);
if (!types->unknown()) {
TemporaryTypeSet* clonedTypes = types->clone(alloc_->lifoAlloc());
if (!clonedTypes) {
return abort(AbortReason::Alloc);
}
MTypeBarrier* barrier =
MTypeBarrier::New(alloc(), callInfo.thisArg(), clonedTypes);
current->add(barrier);
if (barrier->type() == MIRType::Undefined) {
callInfo.setThis(constant(UndefinedValue()));
} else if (barrier->type() == MIRType::Null) {
callInfo.setThis(constant(NullValue()));
} else {
callInfo.setThis(barrier);
}
}
}
// Start inlining.
LifoAlloc* lifoAlloc = alloc_->lifoAlloc();
InlineScriptTree* inlineScriptTree =
info().inlineScriptTree()->addCallee(alloc_, pc, calleeScript);
if (!inlineScriptTree) {
return abort(AbortReason::Alloc);
}
CompileInfo* info = lifoAlloc->new_<CompileInfo>(
runtime, calleeScript, target, (jsbytecode*)nullptr,
this->info().analysisMode(),
/* needsArgsObj = */ false, inlineScriptTree);
if (!info) {
return abort(AbortReason::Alloc);
}
MIRGraphReturns returns(alloc());
AutoAccumulateReturns aar(graph(), returns);
// Build the graph.
IonBuilder inlineBuilder(analysisContext, realm, options, &alloc(), &graph(),
constraints(), &inspector, info, &optimizationInfo(),
nullptr, inliningDepth_ + 1, loopDepth_);
AbortReasonOr<Ok> result =
inlineBuilder.buildInline(this, outerResumePoint, callInfo);
if (result.isErr()) {
if (analysisContext && analysisContext->isExceptionPending()) {
JitSpew(JitSpew_IonAbort, "Inline builder raised exception.");
MOZ_ASSERT(result.unwrapErr() == AbortReason::Error);
return Err(result.unwrapErr());
}
// Inlining the callee failed. Mark the callee as uninlineable only if
// the inlining was aborted for a non-exception reason.
switch (result.unwrapErr()) {
case AbortReason::Disable:
calleeScript->setUninlineable();
if (!JitOptions.disableInlineBacktracking) {
MBasicBlock* block = backup.restore();
if (!block) {
return abort(AbortReason::Alloc);
}
setCurrent(block);
return InliningStatus_NotInlined;
}
return abort(AbortReason::Inlining);
case AbortReason::PreliminaryObjects: {
const ObjectGroupVector& groups =
inlineBuilder.abortedPreliminaryGroups();
MOZ_ASSERT(!groups.empty());
for (size_t i = 0; i < groups.length(); i++) {
addAbortedPreliminaryGroup(groups[i]);
}
return Err(result.unwrapErr());
}
case AbortReason::Alloc:
case AbortReason::Inlining:
case AbortReason::Error:
return Err(result.unwrapErr());
case AbortReason::NoAbort:
MOZ_CRASH("Abort with AbortReason::NoAbort");
return abort(AbortReason::Error);
}
}
if (returns.empty()) {
// Inlining of functions that have no exit is not supported.
calleeScript->setUninlineable();
if (!JitOptions.disableInlineBacktracking) {
MBasicBlock* block = backup.restore();
if (!block) {
return abort(AbortReason::Alloc);
}
setCurrent(block);
return InliningStatus_NotInlined;
}
return abort(AbortReason::Inlining);
}
// Create return block.
jsbytecode* postCall = GetNextPc(pc);
MBasicBlock* returnBlock;
MOZ_TRY_VAR(returnBlock, newBlock(current->stackDepth(), postCall));
graph().addBlock(returnBlock);
returnBlock->setCallerResumePoint(callerResumePoint_);
// Inherit the slots from current and pop |fun|.
returnBlock->inheritSlots(current);
returnBlock->pop();
// Accumulate return values.
MDefinition* retvalDefn = patchInlinedReturns(callInfo, returns, returnBlock);
if (!retvalDefn) {
return abort(AbortReason::Alloc);
}
returnBlock->push(retvalDefn);
// Initialize entry slots now that the stack has been fixed up.
if (!returnBlock->initEntrySlots(alloc())) {
return abort(AbortReason::Alloc);
}
MOZ_TRY(setCurrentAndSpecializePhis(returnBlock));
return InliningStatus_Inlined;
}
MDefinition* IonBuilder::patchInlinedReturn(CallInfo& callInfo,
MBasicBlock* exit,
MBasicBlock* bottom) {
// Replaces the MReturn in the exit block with an MGoto.
MDefinition* rdef = exit->lastIns()->toReturn()->input();
exit->discardLastIns();
// Constructors must be patched by the caller to always return an object.
if (callInfo.constructing()) {
if (rdef->type() == MIRType::Value) {
// Unknown return: dynamically detect objects.
MReturnFromCtor* filter =
MReturnFromCtor::New(alloc(), rdef, callInfo.thisArg());
exit->add(filter);
rdef = filter;
} else if (rdef->type() != MIRType::Object) {
// Known non-object return: force |this|.
rdef = callInfo.thisArg();
}
} else if (callInfo.isSetter()) {
// Setters return their argument, not whatever value is returned.
rdef = callInfo.getArg(0);
}
if (!callInfo.isSetter()) {
rdef = specializeInlinedReturn(rdef, exit);
}
MGoto* replacement = MGoto::New(alloc(), bottom);
exit->end(replacement);
if (!bottom->addPredecessorWithoutPhis(exit)) {
return nullptr;
}
return rdef;
}
MDefinition* IonBuilder::specializeInlinedReturn(MDefinition* rdef,
MBasicBlock* exit) {
// Remove types from the return definition that weren't observed.
TemporaryTypeSet* types = bytecodeTypes(pc);
// The observed typeset doesn't contain extra information.
if (types->empty() || types->unknown()) {
return rdef;
}
// Decide if specializing is needed using the result typeset if available,
// else use the result type.
if (rdef->resultTypeSet()) {
// Don't specialize if return typeset is a subset of the
// observed typeset. The return typeset is already more specific.
if (rdef->resultTypeSet()->isSubset(types)) {
return rdef;
}
} else {
MIRType observedType = types->getKnownMIRType();
// Don't specialize if type is MIRType::Float32 and TI reports
// MIRType::Double. Float is more specific than double.
if (observedType == MIRType::Double && rdef->type() == MIRType::Float32) {
return rdef;
}
// Don't specialize if types are inaccordance, except for MIRType::Value
// and MIRType::Object (when not unknown object), since the typeset
// contains more specific information.
if (observedType == rdef->type() && observedType != MIRType::Value &&
(observedType != MIRType::Object || types->unknownObject())) {
return rdef;
}
}
setCurrent(exit);
MTypeBarrier* barrier = nullptr;
rdef = addTypeBarrier(rdef, types, BarrierKind::TypeSet, &barrier);
if (barrier) {
barrier->setNotMovable();
}
return rdef;
}
MDefinition* IonBuilder::patchInlinedReturns(CallInfo& callInfo,
MIRGraphReturns& returns,
MBasicBlock* bottom) {
// Replaces MReturns with MGotos, returning the MDefinition
// representing the return value, or nullptr.
MOZ_ASSERT(returns.length() > 0);
if (returns.length() == 1) {
return patchInlinedReturn(callInfo, returns[0], bottom);
}
// Accumulate multiple returns with a phi.
MPhi* phi = MPhi::New(alloc());
if (!phi->reserveLength(returns.length())) {
return nullptr;
}
for (size_t i = 0; i < returns.length(); i++) {
MDefinition* rdef = patchInlinedReturn(callInfo, returns[i], bottom);
if (!rdef) {
return nullptr;
}
phi->addInput(rdef);
}
bottom->addPhi(phi);
return phi;
}
IonBuilder::InliningDecision IonBuilder::makeInliningDecision(
JSObject* targetArg, CallInfo& callInfo) {
// When there is no target, inlining is impossible.
if (targetArg == nullptr) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNoTarget);
return InliningDecision_DontInline;
}
// Inlining non-function targets is handled by inlineNonFunctionCall().
if (!targetArg->is<JSFunction>()) {
return InliningDecision_Inline;
}
JSFunction* target = &targetArg->as<JSFunction>();
// Never inline during the arguments usage analysis.
if (info().analysisMode() == Analysis_ArgumentsUsage) {
return InliningDecision_DontInline;
}
// Native functions provide their own detection in inlineNativeCall().
if (target->isNative()) {
return InliningDecision_Inline;
}
// Determine whether inlining is possible at callee site
InliningDecision decision = canInlineTarget(target, callInfo);
if (decision != InliningDecision_Inline) {
return decision;
}
// Heuristics!
JSScript* targetScript = target->nonLazyScript();
// Callee must not be excessively large.
// This heuristic also applies to the callsite as a whole.
bool offThread = options.offThreadCompilationAvailable();
if (targetScript->length() >
optimizationInfo().inlineMaxBytecodePerCallSite(offThread)) {
trackOptimizationOutcome(TrackedOutcome::CantInlineBigCallee);
return DontInline(targetScript, "Vetoed: callee excessively large");
}
// Callee must have been called a few times to have somewhat stable
// type information, except for definite properties analysis,
// as the caller has not run yet.
if (targetScript->getWarmUpCount() <
optimizationInfo().inliningWarmUpThreshold() &&
!targetScript->jitScript()->ionCompiledOrInlined() &&
info().analysisMode() != Analysis_DefiniteProperties) {
trackOptimizationOutcome(TrackedOutcome::CantInlineNotHot);
JitSpew(JitSpew_Inlining,
"Cannot inline %s:%u:%u: callee is insufficiently hot.",
targetScript->filename(), targetScript->lineno(),
targetScript->column());
return InliningDecision_WarmUpCountTooLow;
}
// Don't inline if the callee is known to inline a lot of code, to avoid
// huge MIR graphs.
uint32_t inlinedBytecodeLength =
targetScript->jitScript()->inlinedBytecodeLength();
if (inlinedBytecodeLength >
optimizationInfo().inlineMaxCalleeInlinedBytecodeLength()) {
trackOptimizationOutcome(
TrackedOutcome::CantInlineBigCalleeInlinedBytecodeLength);
return DontInline(targetScript,
"Vetoed: callee inlinedBytecodeLength is too big");
}
IonBuilder* outerBuilder = outermostBuilder();
// Cap the total bytecode length we inline under a single script, to avoid
// excessive inlining in pathological cases.
size_t totalBytecodeLength =
outerBuilder->inlinedBytecodeLength_ + targetScript->length();
if (totalBytecodeLength > optimizationInfo().inlineMaxTotalBytecodeLength()) {
trackOptimizationOutcome(
TrackedOutcome::CantInlineExceededTotalBytecodeLength);
return DontInline(targetScript,
"Vetoed: exceeding max total bytecode length");
}
// Cap the inlining depth.
uint32_t maxInlineDepth;
if (JitOptions.isSmallFunction(targetScript)) {
maxInlineDepth = optimizationInfo().smallFunctionMaxInlineDepth();
} else {
maxInlineDepth = optimizationInfo().maxInlineDepth();
// Caller must not be excessively large.
if (script()->length() >=
optimizationInfo().inliningMaxCallerBytecodeLength()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineBigCaller);
return DontInline(targetScript, "Vetoed: caller excessively large");
}
}
JitScript* outerJitScript = outermostBuilder()->script()->jitScript();
if (inliningDepth_ >= maxInlineDepth) {
// We hit the depth limit and won't inline this function. Give the
// outermost script a max inlining depth of 0, so that it won't be
// inlined in other scripts. This heuristic is currently only used
// when we're inlining scripts with loops, see the comment below.
// These heuristics only apply to the highest optimization level.
if (isHighestOptimizationLevel()) {
outerJitScript->setMaxInliningDepth(0);
}
trackOptimizationOutcome(TrackedOutcome::CantInlineExceededDepth);
return DontInline(targetScript, "Vetoed: exceeding allowed inline depth");
}
// Inlining functions with loops can be complicated. For instance, if we're
// close to the inlining depth limit and we inline the function f below, we
// can no longer inline the call to g:
//
// function f() {
// while (cond) {
// g();
// }
// }
//
// If the loop has many iterations, it's more efficient to call f and inline
// g in f.
//
// To avoid this problem, we record a separate max inlining depth for each
// script, indicating at which depth we won't be able to inline all functions
// we inlined this time. This solves the issue above, because we will only
// inline f if it means we can also inline g.
//
// These heuristics only apply to the highest optimization level: other tiers
// do very little inlining and performance is not as much of a concern there.
if (isHighestOptimizationLevel() && targetScript->hasLoops() &&
inliningDepth_ >= targetScript->jitScript()->maxInliningDepth()) {
trackOptimizationOutcome(TrackedOutcome::CantInlineExceededDepth);
return DontInline(targetScript,
"Vetoed: exceeding allowed script inline depth");
}
// Update the max depth at which we can inline the outer script.
MOZ_ASSERT(maxInlineDepth > inliningDepth_);
uint32_t scriptInlineDepth = maxInlineDepth - inliningDepth_ - 1;
if (scriptInlineDepth < outerJitScript->maxInliningDepth() &&
isHighestOptimizationLevel()) {
outerJitScript->setMaxInliningDepth(scriptInlineDepth);
}
// End of heuristics, we will inline this function.
outerBuilder->inlinedBytecodeLength_ += targetScript->length();
return InliningDecision_Inline;
}
AbortReasonOr<Ok> IonBuilder::selectInliningTargets(
const InliningTargets& targets, CallInfo& callInfo, BoolVector& choiceSet,
uint32_t* numInlineable) {
*numInlineable = 0;
uint32_t totalSize = 0;
// For each target, ask whether it may be inlined.
if (!choiceSet.reserve(targets.length())) {
return abort(AbortReason::Alloc);
}
// Don't inline polymorphic sites during the definite properties analysis.
// AddClearDefiniteFunctionUsesInScript depends on this for correctness.
if (info().analysisMode() == Analysis_DefiniteProperties &&
targets.length() > 1) {
return Ok();
}
for (size_t i = 0; i < targets.length(); i++) {
JSObject* target = targets[i].target;
trackOptimizationAttempt(TrackedStrategy::Call_Inline);
trackTypeInfo(TrackedTypeSite::Call_Target, target);
bool inlineable;
InliningDecision decision = makeInliningDecision(target, callInfo);
switch (decision) {
case InliningDecision_Error:
return abort(AbortReason::Error);
case InliningDecision_DontInline:
case InliningDecision_WarmUpCountTooLow:
inlineable = false;
break;
case InliningDecision_Inline:
inlineable = true;
break;
default:
MOZ_CRASH("Unhandled InliningDecision value!");
}
if (target->is<JSFunction>()) {
// Enforce a maximum inlined bytecode limit at the callsite.
if (inlineable && target->as<JSFunction>().isInterpreted()) {
totalSize += target->as<JSFunction>().nonLazyScript()->length();
bool offThread = options.offThreadCompilationAvailable();
if (totalSize >
optimizationInfo().inlineMaxBytecodePerCallSite(offThread)) {
inlineable = false;
}
}
} else {
// Non-function targets are not supported by polymorphic inlining.
inlineable = false;
}
// Only use a group guard and inline the target if we will recompile when
// the target function gets a new group.
if (inlineable && targets[i].group) {
ObjectGroup* group = targets[i].group;
TypeSet::ObjectKey* key = TypeSet::ObjectKey::get(group);
if (!key->hasStableClassAndProto(constraints())) {
inlineable = false;
}
}
choiceSet.infallibleAppend(inlineable);
if (inlineable) {
*numInlineable += 1;
}
}
// If optimization tracking is turned on and one of the inlineable targets
// is a native, track the type info of the call. Most native inlinings
// depend on the types of the arguments and the return value.
if (isOptimizationTrackingEnabled()) {
for (size_t i = 0; i < targets.length(); i++) {
if (choiceSet[i] && targets[i].target->as<JSFunction>().isNative()) {
trackTypeInfo(callInfo);
break;
}
}
}
MOZ_ASSERT(choiceSet.length() == targets.length());
return Ok();
}
static bool CanInlineGetPropertyCache(MGetPropertyCache* cache,
MDefinition* thisDef) {
if (cache->value()->type() != MIRType::Object) {
return false;
}
if (cache->value() != thisDef) {
return false;
}
InlinePropertyTable* table = cache->propTable();
if (!table) {
return false;
}
if (table->numEntries() == 0) {
return false;
}
return true;
}
class WrapMGetPropertyCache {
MGetPropertyCache* cache_;
private:
void discardPriorResumePoint() {
if (!cache_) {
return;
}
InlinePropertyTable* propTable = cache_->propTable();
if (!propTable) {
return;
}
MResumePoint* rp = propTable->takePriorResumePoint();
if (!rp) {
return;
}
cache_->block()->discardPreAllocatedResumePoint(rp);
}
public:
explicit WrapMGetPropertyCache(MGetPropertyCache* cache) : cache_(cache) {}
~WrapMGetPropertyCache() { discardPriorResumePoint(); }
MGetPropertyCache* get() { return cache_; }
MGetPropertyCache* operator->() { return get(); }
// This function returns the cache given to the constructor if the
// GetPropertyCache can be moved into the ObjectGroup fallback path.
MGetPropertyCache* moveableCache(bool hasTypeBarrier, MDefinition* thisDef) {
// If we have unhandled uses of the MGetPropertyCache, then we cannot
// move it to the ObjectGroup fallback path.
if (!hasTypeBarrier) {
if (cache_->hasUses()) {
return nullptr;
}
} else {
// There is the TypeBarrier consumer, so we check that this is the
// only consumer.
MOZ_ASSERT(cache_->hasUses());
if (!cache_->hasOneUse()) {
return nullptr;
}
}
// If the this-object is not identical to the object of the
// MGetPropertyCache, then we cannot use the InlinePropertyTable, or if
// we do not yet have enough information from the ObjectGroup.
if (!CanInlineGetPropertyCache(cache_, thisDef)) {
return nullptr;
}
MGetPropertyCache* ret = cache_;
cache_ = nullptr;
return ret;
}
};
MGetPropertyCache* IonBuilder::getInlineableGetPropertyCache(
CallInfo& callInfo) {
if (callInfo.constructing()) {
return nullptr;
}
MDefinition* thisDef = callInfo.thisArg();
if (thisDef->type() != MIRType::Object) {
return nullptr;
}
MDefinition* funcDef = callInfo.fun();
if (funcDef->type() != MIRType::Object) {
return nullptr;
}
// MGetPropertyCache with no uses may be optimized away.
if (funcDef->isGetPropertyCache()) {
WrapMGetPropertyCache cache(funcDef->toGetPropertyCache());
return cache.moveableCache(/* hasTypeBarrier = */ false, thisDef);
}
// Optimize away the following common pattern:
// MTypeBarrier[MIRType::Object] <- MGetPropertyCache
if (funcDef->isTypeBarrier()) {
MTypeBarrier* barrier = funcDef->toTypeBarrier();
if (barrier->hasUses()) {
return nullptr;
}
if (barrier->type() != MIRType::Object) {
return nullptr;
}
if (!barrier->input()->isGetPropertyCache()) {
return nullptr;
}
WrapMGetPropertyCache cache(barrier->input()->toGetPropertyCache());
return cache.moveableCache(/* hasTypeBarrier = */ true, thisDef);
}
return nullptr;
}
IonBuilder::InliningResult IonBuilder::inlineSingleCall(CallInfo& callInfo,
JSObject* targetArg) {
InliningStatus status;
if (!targetArg->is<JSFunction>()) {
MOZ_TRY_VAR(status, inlineNonFunctionCall(callInfo, targetArg));
trackInlineSuccess(status);
return status;
}
JSFunction* target = &targetArg->as<JSFunction>();
if (target->isNative()) {
MOZ_TRY_VAR(status, inlineNativeCall(callInfo, target));
trackInlineSuccess(status);
return status;
}
// Track success now, as inlining a scripted call makes a new return block
// which has a different pc than the current call pc.
trackInlineSuccess();
return inlineScriptedCall(callInfo, target);
}
IonBuilder::InliningResult IonBuilder::inlineCallsite(
const InliningTargets& targets, CallInfo& callInfo) {
if (targets.empty()) {
trackOptimizationAttempt(TrackedStrategy::Call_Inline);
trackOptimizationOutcome(TrackedOutcome::CantInlineNoTarget);
return InliningStatus_NotInlined;
}
// Is the function provided by an MGetPropertyCache?
// If so, the cache may be movable to a fallback path, with a dispatch
// instruction guarding on the incoming ObjectGroup.
WrapMGetPropertyCache propCache(getInlineableGetPropertyCache(callInfo));
keepFallbackFunctionGetter(propCache.get());
// Inline single targets -- unless they derive from a cache, in which case
// avoiding the cache and guarding is still faster.
if (!propCache.get() && targets.length() == 1) {
JSObject* target = targets[0].target;
trackOptimizationAttempt(TrackedStrategy::Call_Inline);
trackTypeInfo(TrackedTypeSite::Call_Target, target);
InliningDecision decision = makeInliningDecision(target, callInfo);
switch (decision) {
case InliningDecision_Error:
return abort(AbortReason::Error);
case InliningDecision_DontInline:
return InliningStatus_NotInlined;
case InliningDecision_WarmUpCountTooLow:
return InliningStatus_WarmUpCountTooLow;
case InliningDecision_Inline:
break;
}
// Inlining will elminate uses of the original callee, but it needs to
// be preserved in phis if we bail out. Mark the old callee definition as
// implicitly used to ensure this happens.
callInfo.fun()->setImplicitlyUsedUnchecked();
// If the callee is not going to be a lambda (which may vary across
// different invocations), then the callee definition can be replaced by a
// constant.
if (target->isSingleton()) {
// Replace the function with an MConstant.
MConstant* constFun = constant(ObjectValue(*target));
if (callInfo.constructing() &&
callInfo.getNewTarget() == callInfo.fun()) {
callInfo.setNewTarget(constFun);
}
callInfo.setFun(constFun);
}
return inlineSingleCall(callInfo, target);
}
// Choose a subset of the targets for polymorphic inlining.
BoolVector choiceSet(alloc());
uint32_t numInlined;
MOZ_TRY(selectInliningTargets(targets, callInfo, choiceSet, &numInlined));
if (numInlined == 0) {
return InliningStatus_NotInlined;
}
// Perform a polymorphic dispatch.
MOZ_TRY(inlineCalls(callInfo, targets, choiceSet, propCache.get()));
return InliningStatus_Inlined;
}
AbortReasonOr<Ok> IonBuilder::inlineGenericFallback(
const Maybe<CallTargets>& targets, CallInfo& callInfo,
MBasicBlock* dispatchBlock) {
// Generate a new block with all arguments on-stack.
MBasicBlock* fallbackBlock;
MOZ_TRY_VAR(fallbackBlock, newBlock(dispatchBlock, pc));
graph().addBlock(fallbackBlock);
// Create a new CallInfo to track modified state within this block.
CallInfo fallbackInfo(alloc(), pc, callInfo.constructing(),
callInfo.ignoresReturnValue());
if (!fallbackInfo.init(callInfo)) {
return abort(AbortReason::Alloc);
}
fallbackInfo.popCallStack(fallbackBlock);
// Generate an MCall, which uses stateful |current|.
MOZ_TRY(setCurrentAndSpecializePhis(fallbackBlock));
MOZ_TRY(makeCall(targets, fallbackInfo));
// Pass return block to caller as |current|.
return Ok();
}
AbortReasonOr<Ok> IonBuilder::inlineObjectGroupFallback(
const Maybe<CallTargets>& targets, CallInfo& callInfo,
MBasicBlock* dispatchBlock, MObjectGroupDispatch* dispatch,
MGetPropertyCache* cache, MBasicBlock** fallbackTarget) {
// Getting here implies the following:
// 1. The call function is an MGetPropertyCache, or an MGetPropertyCache
// followed by an MTypeBarrier.
MOZ_ASSERT(callInfo.fun()->isGetPropertyCache() ||
callInfo.fun()->isTypeBarrier());
// 2. The MGetPropertyCache has inlineable cases by guarding on the
// ObjectGroup.
MOZ_ASSERT(dispatch->numCases() > 0);
// 3. The MGetPropertyCache (and, if applicable, MTypeBarrier) only
// have at most a single use.
MOZ_ASSERT_IF(callInfo.fun()->isGetPropertyCache(), !cache->hasUses());
MOZ_ASSERT_IF(callInfo.fun()->isTypeBarrier(), cache->hasOneUse());
// This means that no resume points yet capture the MGetPropertyCache,
// so everything from the MGetPropertyCache up until the call is movable.
// We now move the MGetPropertyCache and friends into a fallback path.
MOZ_ASSERT(cache->idempotent());
// Create a new CallInfo to track modified state within the fallback path.
CallInfo fallbackInfo(alloc(), pc, callInfo.constructing(),
callInfo.ignoresReturnValue());
if (!fallbackInfo.init(callInfo)) {
return abort(AbortReason::Alloc);
}
// Capture stack prior to the call operation. This captures the function.
MResumePoint* preCallResumePoint =
MResumePoint::New(alloc(), dispatchBlock, pc, MResumePoint::ResumeAt);
if (!preCallResumePoint) {
return abort(AbortReason::Alloc);
}
DebugOnly<size_t> preCallFuncIndex =
preCallResumePoint->stackDepth() - callInfo.numFormals();
MOZ_ASSERT(preCallResumePoint->getOperand(preCallFuncIndex) ==
fallbackInfo.fun());
// In the dispatch block, replace the function's slot entry with Undefined.
MConstant* undefined = MConstant::New(alloc(), UndefinedValue());
dispatchBlock->add(undefined);
dispatchBlock->rewriteAtDepth(-int(callInfo.numFormals()), undefined);
// Construct a block that does nothing but remove formals from the stack.
// This is effectively changing the entry resume point of the later fallback
// block.
MBasicBlock* prepBlock;
MOZ_TRY_VAR(prepBlock, newBlock(dispatchBlock, pc));
graph().addBlock(prepBlock);
fallbackInfo.popCallStack(prepBlock);
// Construct a block into which the MGetPropertyCache can be moved.
// This is subtle: the pc and resume point are those of the MGetPropertyCache!
InlinePropertyTable* propTable = cache->propTable();
MResumePoint* priorResumePoint = propTable->takePriorResumePoint();
MOZ_ASSERT(propTable->pc() != nullptr);
MOZ_ASSERT(priorResumePoint != nullptr);
MBasicBlock* getPropBlock;
MOZ_TRY_VAR(getPropBlock,
newBlock(prepBlock, propTable->pc(), priorResumePoint));
graph().addBlock(getPropBlock);
prepBlock->end(MGoto::New(alloc(), getPropBlock));
// Since the getPropBlock inherited the stack from right before the
// MGetPropertyCache, the target of the MGetPropertyCache is still on the
// stack.
DebugOnly<MDefinition*> checkObject = getPropBlock->pop();
MOZ_ASSERT(checkObject == cache->value());
// Move the MGetPropertyCache and friends into the getPropBlock.
if (fallbackInfo.fun()->isGetPropertyCache()) {
MOZ_ASSERT(fallbackInfo.fun()->toGetPropertyCache() == cache);
getPropBlock->addFromElsewhere(cache);
getPropBlock->push(cache);
} else {
MTypeBarrier* barrier = callInfo.fun()->toTypeBarrier();
MOZ_ASSERT(barrier->type() == MIRType::Object);
MOZ_ASSERT(barrier->input()->isGetPropertyCache());
MOZ_ASSERT(barrier->input()->toGetPropertyCache() == cache);
getPropBlock->addFromElsewhere(cache);
getPropBlock->addFromElsewhere(barrier);
getPropBlock->push(barrier);
}
// Construct an end block with the correct resume point.
MBasicBlock* preCallBlock;
MOZ_TRY_VAR(preCallBlock, newBlock(getPropBlock, pc, preCallResumePoint));
graph().addBlock(preCallBlock);
getPropBlock->end(MGoto::New(alloc(), preCallBlock));
// Now inline the MCallGeneric, using preCallBlock as the dispatch point.
MOZ_TRY(inlineGenericFallback(targets, fallbackInfo, preCallBlock));
// inlineGenericFallback() set the return block as |current|.
preCallBlock->end(MGoto::New(alloc(), current));
*fallbackTarget = prepBlock;
return Ok();
}
AbortReasonOr<Ok> IonBuilder::inlineCalls(CallInfo& callInfo,
const InliningTargets& targets,
BoolVector& choiceSet,
MGetPropertyCache* maybeCache) {
// Only handle polymorphic inlining.
MOZ_ASSERT(IsIonInlinableOp(JSOp(*pc)));
MOZ_ASSERT(choiceSet.length() == targets.length());
MOZ_ASSERT_IF(!maybeCache, targets.length() >= 2);
MOZ_ASSERT_IF(maybeCache, targets.length() >= 1);
MOZ_ASSERT_IF(maybeCache, maybeCache->value()->type() == MIRType::Object);
MBasicBlock* dispatchBlock = current;
callInfo.setImplicitlyUsedUnchecked();
MOZ_TRY(callInfo.pushCallStack(this, dispatchBlock));
// Patch any InlinePropertyTable to only contain functions that are
// inlineable. The InlinePropertyTable will also be patched at the end to
// exclude native functions that vetoed inlining.
if (maybeCache) {
InlinePropertyTable* propTable = maybeCache->propTable();
propTable->trimToTargets(targets);
if (propTable->numEntries() == 0) {
maybeCache = nullptr;
}
}
// Generate a dispatch based on guard kind.
MDispatchInstruction* dispatch;
if (maybeCache) {
dispatch = MObjectGroupDispatch::New(alloc(), maybeCache->value(),
maybeCache->propTable());
callInfo.fun()->setImplicitlyUsedUnchecked();
} else {
dispatch = MFunctionDispatch::New(alloc(), callInfo.fun());
}
MOZ_ASSERT(dispatchBlock->stackDepth() >= callInfo.numFormals());
uint32_t stackDepth = dispatchBlock->stackDepth() - callInfo.numFormals() + 1;
// Generate a return block to host the rval-collecting MPhi.
jsbytecode* postCall = GetNextPc(pc);
MBasicBlock* returnBlock;
MOZ_TRY_VAR(returnBlock, newBlock(stackDepth, postCall));
graph().addBlock(returnBlock);
returnBlock->setCallerResumePoint(callerResumePoint_);
// Set up stack, used to manually create a post-call resume point.
returnBlock->inheritSlots(dispatchBlock);
callInfo.popCallStack(returnBlock);
MPhi* retPhi = MPhi::New(alloc());
returnBlock->addPhi(retPhi);
returnBlock->push(retPhi);
// Create a resume point from current stack state.
if (!returnBlock->initEntrySlots(alloc())) {
return abort(AbortReason::Alloc);
}
// Reserve the capacity for the phi.
// Note: this is an upperbound. Unreachable targets and uninlineable natives
// are also counted.
uint32_t count = 1; // Possible fallback block.
for (uint32_t i = 0; i < targets.length(); i++) {
if (choiceSet[i]) {
count++;
}
}
if (!retPhi->reserveLength(count)) {
return abort(AbortReason::Alloc);
}
// Inline each of the inlineable targets.
for (uint32_t i = 0; i < targets.length(); i++) {
// Target must be inlineable.
if (!choiceSet[i]) {
continue;
}
// Even though we made one round of inline decisions already, we may
// be amending them below.
amendOptimizationAttempt(i);
// Target must be reachable by the MDispatchInstruction.
JSFunction* target = &targets[i].target->as<JSFunction>();
if (maybeCache && !maybeCache->propTable()->hasFunction(target)) {
choiceSet[i] = false;
trackOptimizationOutcome(TrackedOutcome::CantInlineNotInDispatch);
continue;
}
MBasicBlock* inlineBlock;
MOZ_TRY_VAR(inlineBlock, newBlock(dispatchBlock, pc));
graph().addBlock(inlineBlock);
// Create a function MConstant to use in the entry ResumePoint. If we
// can't use a constant, add a no-op MPolyInlineGuard, to prevent
// hoisting env chain gets above the dispatch instruction.
MInstruction* funcDef;
if (target->isSingleton()) {
funcDef = MConstant::New(alloc(), ObjectValue(*target), constraints());
} else {
funcDef = MPolyInlineGuard::New(alloc(), callInfo.fun());
}
funcDef->setImplicitlyUsedUnchecked();
dispatchBlock->add(funcDef);
// Use the inlined callee in the inline resume point and on stack.
int funIndex =
inlineBlock->entryResumePoint()->stackDepth() - callInfo.numFormals();
inlineBlock->entryResumePoint()->replaceOperand(funIndex, funcDef);
inlineBlock->rewriteSlot(funIndex, funcDef);
// Create a new CallInfo to track modified state within the inline block.
CallInfo inlineInfo(alloc(), pc, callInfo.constructing(),
callInfo.ignoresReturnValue());
if (!inlineInfo.init(callInfo)) {
return abort(AbortReason::Alloc);
}
inlineInfo.popCallStack(inlineBlock);
inlineInfo.setFun(funcDef);
if (callInfo.constructing() && callInfo.getNewTarget() == callInfo.fun()) {
inlineInfo.setNewTarget(funcDef);
}
if (maybeCache) {
// Assign the 'this' value a TypeSet specialized to the groups that
// can generate this inlining target.
MOZ_ASSERT(callInfo.thisArg() == maybeCache->value());
TemporaryTypeSet* thisTypes =
maybeCache->propTable()->buildTypeSetForFunction(alloc(), target);
if (!thisTypes) {
return abort(AbortReason::Alloc);
}
MFilterTypeSet* filter =
MFilterTypeSet::New(alloc(), inlineInfo.thisArg(), thisTypes);
inlineBlock->add(filter);
inlineInfo.setThis(filter);
}
// Inline the call into the inlineBlock.
MOZ_TRY(setCurrentAndSpecializePhis(inlineBlock));
InliningStatus status;
MOZ_TRY_VAR(status, inlineSingleCall(inlineInfo, target));
// Natives may veto inlining.
if (status == InliningStatus_NotInlined) {
MOZ_ASSERT(current == inlineBlock);
graph().removeBlock(inlineBlock);
choiceSet[i] = false;
continue;
}
// inlineSingleCall() changed |current| to the inline return block.
MBasicBlock* inlineReturnBlock = current;
setCurrent(dispatchBlock);
// Connect the inline path to the returnBlock.
if (!dispatch->addCase(target, targets[i].group, inlineBlock)) {
return abort(AbortReason::Alloc);
}
MDefinition* retVal = inlineReturnBlock->peek(-1);
retPhi->addInput(retVal);
inlineReturnBlock->end(MGoto::New(alloc(), returnBlock));
if (!returnBlock->addPredecessorWithoutPhis(inlineReturnBlock)) {
return abort(AbortReason::Alloc);
}
}
// Patch the InlinePropertyTable to not dispatch to vetoed paths.
bool useFallback;
if (maybeCache) {
InlinePropertyTable* propTable = maybeCache->propTable();
propTable->trimTo(targets, choiceSet);
if (propTable->numEntries() == 0 || !propTable->hasPriorResumePoint()) {
// Output a generic fallback path.
MOZ_ASSERT_IF(propTable->numEntries() == 0, dispatch->numCases() == 0);
maybeCache = nullptr;
useFallback = true;
} else {
// We need a fallback path if the ObjectGroup dispatch does not
// handle all incoming objects.
useFallback = false;
TemporaryTypeSet* objectTypes = maybeCache->value()->resultTypeSet();
for (uint32_t i = 0; i < objectTypes->getObjectCount(); i++) {
TypeSet::ObjectKey* obj = objectTypes->getObject(i);
if (!obj) {
continue;
}
if (!obj->isGroup()) {
useFallback = true;
break;
}
if (!propTable->hasObjectGroup(obj->group())) {
useFallback = true;
break;
}
}
if (!useFallback) {
// The object group dispatch handles all possible incoming
// objects, so the cache and barrier will not be reached and
// can be eliminated.
if (callInfo.fun()->isGetPropertyCache()) {
MOZ_ASSERT(callInfo.fun() == maybeCache);
} else {
MTypeBarrier* barrier = callInfo.fun()->toTypeBarrier();
MOZ_ASSERT(!barrier->hasUses());
MOZ_ASSERT(barrier->type() == MIRType::Object);
MOZ_ASSERT(barrier->input()->isGetPropertyCache());
MOZ_ASSERT(barrier->input()->toGetPropertyCache() == maybeCache);
barrier->block()->discard(barrier);
}
MOZ_ASSERT(!maybeCache->hasUses());
maybeCache->block()->discard(maybeCache);
}
}
} else {
useFallback = dispatch->numCases() < targets.length();
}
// If necessary, generate a fallback path.
if (useFallback) {
// Annotate the fallback call with the target information.
Maybe<CallTargets> remainingTargets;
remainingTargets.emplace(alloc());
for (uint32_t i = 0; i < targets.length(); i++) {
if (!maybeCache && choiceSet[i]) {
continue;
}
JSObject* target = targets[i].target;
if (!target->is<JSFunction>()) {
remainingTargets = Nothing();
break;
}
if (!remainingTargets->append(&target->as<JSFunction>())) {
return abort(AbortReason::Alloc);
}
}
// Generate fallback blocks, and set |current| to the fallback return block.
if (maybeCache) {
MBasicBlock* fallbackTarget;
MOZ_TRY(inlineObjectGroupFallback(
remainingTargets, callInfo, dispatchBlock,
dispatch->toObjectGroupDispatch(), maybeCache, &fallbackTarget));
dispatch->addFallback(fallbackTarget);
} else {
MOZ_TRY(inlineGenericFallback(remainingTargets, callInfo, dispatchBlock));
dispatch->addFallback(current);
}
MBasicBlock* fallbackReturnBlock = current;
// Connect fallback case to return infrastructure.
MDefinition* retVal = fallbackReturnBlock->peek(-1);
retPhi->addInput(retVal);
fallbackReturnBlock->end(MGoto::New(alloc(), returnBlock));
if (!returnBlock->addPredecessorWithoutPhis(fallbackReturnBlock)) {
return abort(AbortReason::Alloc);
}
}
// Finally add the dispatch instruction.
// This must be done at the end so that add() may be called above.
dispatchBlock->end(dispatch);
// Check the depth change: +1 for retval
MOZ_ASSERT(returnBlock->stackDepth() ==
dispatchBlock->stackDepth() - callInfo.numFormals() + 1);
graph().moveBlockToEnd(returnBlock);
return setCurrentAndSpecializePhis(returnBlock);
}
MInstruction* IonBuilder::createNamedLambdaObject(MDefinition* callee,
MDefinition* env) {
// Get a template CallObject that we'll use to generate inline object
// creation.
LexicalEnvironmentObject* templateObj =
inspector->templateNamedLambdaObject();
// One field is added to the function to handle its name. This cannot be a
// dynamic slot because there is still plenty of room on the NamedLambda
// object.
MOZ_ASSERT(!templateObj->hasDynamicSlots());
// Allocate the actual object. It is important that no intervening
// instructions could potentially bailout, thus leaking the dynamic slots
// pointer.
MInstruction* declEnvObj = MNewNamedLambdaObject::New(alloc(), templateObj);
current->add(declEnvObj);
// Initialize the object's reserved slots. No post barrier is needed here:
// the object will be allocated in the nursery if possible, and if the
// tenured heap is used instead, a minor collection will have been performed
// that moved env/callee to the tenured heap.
current->add(MStoreFixedSlot::New(
alloc(), declEnvObj, NamedLambdaObject::enclosingEnvironmentSlot(), env));
current->add(MStoreFixedSlot::New(alloc(), declEnvObj,
NamedLambdaObject::lambdaSlot(), callee));
return declEnvObj;
}
AbortReasonOr<MInstruction*> IonBuilder::createCallObject(MDefinition* callee,
MDefinition* env) {
// Get a template CallObject that we'll use to generate inline object
// creation.
CallObject* templateObj = inspector->templateCallObject();
MConstant* templateCst =
MConstant::NewConstraintlessObject(alloc(), templateObj);
current->add(templateCst);
// Allocate the object.
MNewCallObject* callObj = MNewCallObject::New(alloc(), templateCst);
current->add(callObj);
// Initialize the object's reserved slots. No post barrier is needed here,
// for the same reason as in createNamedLambdaObject.
current->add(MStoreFixedSlot::New(
alloc(), callObj, CallObject::enclosingEnvironmentSlot(), env));
current->add(
MStoreFixedSlot::New(alloc(), callObj, CallObject::calleeSlot(), callee));
// if (!script()->functionHasParameterExprs()) {
// Copy closed-over argument slots if there aren't parameter expressions.
MSlots* slots = nullptr;
for (PositionalFormalParameterIter fi(script()); fi; fi++) {
if (!fi.closedOver()) {
continue;
}
if (!alloc().ensureBallast()) {
return abort(AbortReason::Alloc);
}
unsigned slot = fi.location().slot();
unsigned formal = fi.argumentSlot();
unsigned numFixedSlots = templateObj->numFixedSlots();
MDefinition* param;
if (script()->functionHasParameterExprs()) {
param = constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
} else {
param = current->getSlot(info().argSlotUnchecked(formal));
}
if (slot >= numFixedSlots) {
if (!slots) {
slots = MSlots::New(alloc(), callObj);
current->add(slots);
}
current->add(
MStoreSlot::New(alloc(), slots, slot - numFixedSlots, param));
} else {
current->add(MStoreFixedSlot::New(alloc(), callObj, slot, param));
}
}
return AbortReasonOr<MInstruction*>(callObj);
}
MDefinition* IonBuilder::createThisScripted(MDefinition* callee,
MDefinition* newTarget) {
// Get callee.prototype.
//
// This instruction MUST be idempotent: since it does not correspond to an
// explicit operation in the bytecode, we cannot use resumeAfter().
// Getters may not override |prototype| fetching, so this operation is
// indeed idempotent.
// - First try an idempotent property cache.
// - Upon failing idempotent property cache, we can't use a non-idempotent
// cache, therefore we fallback to CallGetProperty
//
// Note: both CallGetProperty and GetPropertyCache can trigger a GC,
// and thus invalidation.
MInstruction* getProto;
if (!invalidatedIdempotentCache()) {
MConstant* id = constant(StringValue(names().prototype));
MGetPropertyCache* getPropCache =
MGetPropertyCache::New(alloc(), newTarget, id,
/* monitored = */ false);
getPropCache->setIdempotent();
getProto = getPropCache;
} else {
MCallGetProperty* callGetProp =
MCallGetProperty::New(alloc(), newTarget, names().prototype);
callGetProp->setIdempotent();
getProto = callGetProp;
}
current->add(getProto);
// Create this from prototype
MCreateThisWithProto* createThis =
MCreateThisWithProto::New(alloc(), callee, newTarget, getProto);
current->add(createThis);
return createThis;
}
JSObject* IonBuilder::getSingletonPrototype(JSFunction* target) {
TypeSet::ObjectKey* targetKey = TypeSet::ObjectKey::get(target);
if (targetKey->unknownProperties()) {
return nullptr;
}
jsid protoid = NameToId(names().prototype);
HeapTypeSetKey protoProperty = targetKey->property(protoid);
return protoProperty.singleton(constraints());
}
MDefinition* IonBuilder::createThisScriptedSingleton(JSFunction* target) {
if (!target->hasScript()) {
return nullptr;
}
// Get the singleton prototype (if exists)
JSObject* proto = getSingletonPrototype(target);
if (!proto) {
return nullptr;
}
JSObject* templateObject = inspector->getTemplateObject(pc);
if (!templateObject) {
return nullptr;
}
if (!templateObject->is<PlainObject>()) {
return nullptr;
}
if (templateObject->staticPrototype() != proto) {
return nullptr;
}
if (templateObject->nonCCWRealm() != target->realm()) {
return nullptr;
}
TypeSet::ObjectKey* templateObjectKey =
TypeSet::ObjectKey::get(templateObject->group());
if (templateObjectKey->hasFlags(constraints(),
OBJECT_FLAG_NEW_SCRIPT_CLEARED)) {
return nullptr;
}
JSScript* targetScript = target->nonLazyScript();
JitScript* jitScript = targetScript->maybeJitScript();
if (!jitScript) {
return nullptr;
}
AutoSweepJitScript sweep(targetScript);
StackTypeSet* thisTypes = jitScript->thisTypes(sweep, targetScript);
if (!thisTypes->hasType(TypeSet::ObjectType(templateObject))) {
return nullptr;
}
// Generate an inline path to create a new |this| object with
// the given singleton prototype.
MConstant* templateConst =
MConstant::NewConstraintlessObject(alloc(), templateObject);
MCreateThisWithTemplate* createThis = MCreateThisWithTemplate::New(
alloc(), constraints(), templateConst,
templateObject->group()->initialHeap(constraints()));
current->add(templateConst);
current->add(createThis);
return createThis;
}
MDefinition* IonBuilder::createThisScriptedBaseline(MDefinition* callee) {
// Try to inline |this| creation based on Baseline feedback.
JSFunction* target = inspector->getSingleCallee(pc);
if (!target || !target->hasScript()) {
return nullptr;
}
if (target->isBoundFunction() || target->isDerivedClassConstructor()) {
return nullptr;
}
JSObject* templateObject = inspector->getTemplateObject(pc);
if (!templateObject) {
return nullptr;
}
if (!templateObject->is<PlainObject>()) {
return nullptr;
}
if (templateObject->nonCCWRealm() != target->realm()) {
return nullptr;
}
Shape* shape = target->lookupPure(realm->runtime()->names().prototype);
if (!shape || !shape->isDataProperty()) {
return nullptr;
}
Value protov = target->getSlot(shape->slot());
if (!protov.isObject()) {
return nullptr;
}
JSObject* proto = checkNurseryObject(&protov.toObject());
if (proto != templateObject->staticPrototype()) {
return nullptr;
}
TypeSet::ObjectKey* templateObjectKey =
TypeSet::ObjectKey::get(templateObject->group());
if (templateObjectKey->hasFlags(constraints(),
OBJECT_FLAG_NEW_SCRIPT_CLEARED)) {
return nullptr;
}
JSScript* targetScript = target->nonLazyScript();
JitScript* jitScript = targetScript->maybeJitScript();
if (!jitScript) {
return nullptr;
}
AutoSweepJitScript sweep(targetScript);
StackTypeSet* thisTypes = jitScript->thisTypes(sweep, targetScript);
if (!thisTypes->hasType(TypeSet::ObjectType(templateObject))) {
return nullptr;
}
// Shape guard.
callee = addShapeGuard(callee, target->lastProperty(), Bailout_ShapeGuard);
// Guard callee.prototype == proto.
MOZ_ASSERT(shape->numFixedSlots() == 0, "Must be a dynamic slot");
MSlots* slots = MSlots::New(alloc(), callee);
current->add(slots);
MLoadSlot* prototype = MLoadSlot::New(alloc(), slots, shape->slot());
current->add(prototype);
MDefinition* protoConst = constant(ObjectValue(*proto));
MGuardObjectIdentity* guard =
MGuardObjectIdentity::New(alloc(), prototype, protoConst,
/* bailOnEquality = */ false);
current->add(guard);
// Generate an inline path to create a new |this| object with
// the given prototype.
MConstant* templateConst =
MConstant::NewConstraintlessObject(alloc(), templateObject);
MCreateThisWithTemplate* createThis = MCreateThisWithTemplate::New(
alloc(), constraints(), templateConst,
templateObject->group()->initialHeap(constraints()));
current->add(templateConst);
current->add(createThis);
return createThis;
}
MDefinition* IonBuilder::createThis(JSFunction* target, MDefinition* callee,
MDefinition* newTarget) {
// Create |this| for unknown target.
if (!target) {
if (MDefinition* createThis = createThisScriptedBaseline(callee)) {
return createThis;
}
MCreateThis* createThis = MCreateThis::New(alloc(), callee, newTarget);
current->add(createThis);
return createThis;
}
// Native constructors build the new Object themselves.
if (target->isNative()) {
if (!target->isConstructor()) {
return nullptr;
}
// Only asm.js natives can be constructors and asm.js natives don't have
// jit entries.
MOZ_ASSERT(!target->isNativeWithJitEntry());
MConstant* magic = MConstant::New(alloc(), MagicValue(JS_IS_CONSTRUCTING));
current->add(magic);
return magic;
}
if (target->isBoundFunction()) {
return constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
}
if (target->isDerivedClassConstructor()) {
MOZ_ASSERT(target->isClassConstructor());
return constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
}
// Try baking in the prototype.
if (MDefinition* createThis = createThisScriptedSingleton(target)) {
return createThis;
}
if (MDefinition* createThis = createThisScriptedBaseline(callee)) {
return createThis;
}
return createThisScripted(callee, newTarget);
}
AbortReasonOr<Ok> IonBuilder::jsop_funcall(uint32_t argc) {
// Stack for JSOP_FUNCALL:
// 1: arg0
// ...
// argc: argN
// argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position.
// argc+2: The native 'call' function.
int calleeDepth = -((int)argc + 2);
int funcDepth = -((int)argc + 1);
// If |Function.prototype.call| may be overridden, don't optimize callsite.
TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
JSFunction* native = getSingleCallTarget(calleeTypes);
if (!native || !native->isNative() || native->native() != &fun_call) {
CallInfo callInfo(alloc(), pc, /* constructing = */ false,
/* ignoresReturnValue = */ BytecodeIsPopped(pc));
if (!callInfo.init(current, argc)) {
return abort(AbortReason::Alloc);
}
return makeCall(native, callInfo);
}
current->peek(calleeDepth)->setImplicitlyUsedUnchecked();
// Extract call target.
TemporaryTypeSet* funTypes = current->peek(funcDepth)->resultTypeSet();
JSFunction* target = getSingleCallTarget(funTypes);
CallInfo callInfo(alloc(), pc, /* constructing = */ false,
/* ignoresReturnValue = */ BytecodeIsPopped(pc));
// Save prior call stack in case we need to resolve during bailout
// recovery of inner inlined function. This includes the JSFunction and the
// 'call' native function.
MOZ_TRY(callInfo.savePriorCallStack(this, current, argc + 2));
// Shimmy the slots down to remove the native 'call' function.
current->shimmySlots(funcDepth - 1);
bool zeroArguments = (argc == 0);
// If no |this| argument was provided, explicitly pass Undefined.
// Pushing is safe here, since one stack slot has been removed.
if (zeroArguments) {
pushConstant(UndefinedValue());
} else {
// |this| becomes implicit in the call.
argc -= 1;
}
if (!callInfo.init(current, argc)) {
return abort(AbortReason::Alloc);
}
// Try to inline the call.
if (!zeroArguments) {
InliningDecision decision = makeInliningDecision(target, callInfo);
switch (decision) {
case InliningDecision_Error:
return abort(AbortReason::Error);
case InliningDecision_DontInline:
case InliningDecision_WarmUpCountTooLow:
break;
case InliningDecision_Inline: {
InliningStatus status;
MOZ_TRY_VAR(status, inlineSingleCall(callInfo, target));
if (status == InliningStatus_Inlined) {
return Ok();
}
break;
}
}
}
// Call without inlining.
return makeCall(target, callInfo);
}
AbortReasonOr<Ok> IonBuilder::jsop_funapply(uint32_t argc) {
int calleeDepth = -((int)argc + 2);
TemporaryTypeSet* calleeTypes = current->peek(calleeDepth)->resultTypeSet();
JSFunction* native = getSingleCallTarget(calleeTypes);
if (argc != 2 || info().analysisMode() == Analysis_ArgumentsUsage) {
CallInfo callInfo(alloc(), pc, /* constructing = */ false,
/* ignoresReturnValue = */ BytecodeIsPopped(pc));
if (!callInfo.init(current, argc)) {
return abort(AbortReason::Alloc);
}
return makeCall(native, callInfo);
}
// Disable compilation if the second argument to |apply| cannot be guaranteed
// to be either definitely |arguments| or definitely not |arguments|.
MDefinition* argument = current->peek(-1);
if (script()->argumentsHasVarBinding() &&
argument->mightBeType(MIRType::MagicOptimizedArguments) &&
argument->type() != MIRType::MagicOptimizedArguments) {
return abort(AbortReason::Disable, "fun.apply with MaybeArguments");
}
// Fallback to regular call if arg 2 is not definitely |arguments|.
if (argument->type() != MIRType::MagicOptimizedArguments) {
// Optimize fun.apply(self, array) if the length is sane and there are no
// holes.
TemporaryTypeSet* objTypes = argument->resultTypeSet();
if (native && native->isNative() && native->native() == fun_apply &&
objTypes &&
objTypes->getKnownClass(constraints()) == &ArrayObject::class_ &&
!objTypes->hasObjectFlags(constraints(), OBJECT_FLAG_LENGTH_OVERFLOW) &&
ElementAccessIsPacked(constraints(), argument)) {
return jsop_funapplyarray(argc);
}
CallInfo callInfo(alloc(), pc, /* constructing = */ false,
/* ignoresReturnValue = */ BytecodeIsPopped(pc));
if (!callInfo.init(current, argc)) {
return abort(AbortReason::Alloc);
}
return makeCall(native, callInfo);
}
if ((!native || !native->isNative() || native->native() != fun_apply) &&
info().analysisMode() != Analysis_DefiniteProperties) {
return abort(AbortReason::Disable, "fun.apply speculation failed");
}
// Use funapply that definitely uses |arguments|
return jsop_funapplyarguments(argc);
}
AbortReasonOr<Ok> IonBuilder::jsop_spreadcall() {
// The arguments array is constructed by a JSOP_NEWARRAY and not
// leaked to user. The complications of spread call iterator behaviour are
// handled when the user objects are expanded and copied into this hidden
// array.
#ifdef DEBUG
// If we know class, ensure it is what we expected
MDefinition* argument = current->peek(-1);
if (TemporaryTypeSet* objTypes = argument->resultTypeSet()) {
if (const JSClass* clasp = objTypes->getKnownClass(constraints())) {
MOZ_ASSERT(clasp == &ArrayObject::class_);
}
}
#endif
MDefinition* argArr = current->pop();
MDefinition* argThis = current->pop();
MDefinition* argFunc = current->pop();
// Extract call target.
TemporaryTypeSet* funTypes = argFunc->resultTypeSet();
JSFunction* target = getSingleCallTarget(funTypes);
WrappedFunction* wrappedTarget =
target ? new (alloc()) WrappedFunction(target) : nullptr;
// Dense elements of argument array
MElements* elements = MElements::New(alloc(), argArr);
current->add(elements);
MApplyArray* apply =
MApplyArray::New(alloc(), wrappedTarget, argFunc, elements, argThis);
current->add(apply);
current->push(apply);
MOZ_TRY(resumeAfter(apply));
if (target && target->realm() == script()->realm()) {
apply->setNotCrossRealm();
}
if (BytecodeIsPopped(pc)) {
apply->setIgnoresReturnValue();
}
// TypeBarrier the call result
TemporaryTypeSet* types = bytecodeTypes(pc);
return pushTypeBarrier(apply, types, BarrierKind::TypeSet);
}
bool IonBuilder::propertyIsConstantFunction(NativeObject* nobj, jsid id,
bool (*test)(IonBuilder* builder,
JSFunction* fun)) {
if (!nobj->isSingleton()) {
return false;
}
TypeSet::ObjectKey* objKey = TypeSet::ObjectKey::get(nobj);
if (analysisContext) {
objKey->ensureTrackedProperty(analysisContext, id);
}
if (objKey->unknownProperties()) {
return false;
}
HeapTypeSetKey property = objKey->property(id);
Value value = UndefinedValue();
if (!property.constant(constraints(), &value)) {
return false;
}
return value.isObject() && value.toObject().is<JSFunction>() &&
test(this, &value.toObject().as<JSFunction>());
}