/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/IonBuilder.h"
#include "mozilla/DebugOnly.h"
#include "builtin/Eval.h"
#include "builtin/TypedObject.h"
#include "frontend/SourceNotes.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineInspector.h"
#include "jit/Ion.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/IonSpewer.h"
#include "jit/Lowering.h"
#include "jit/MIRGraph.h"
#include "vm/ArgumentsObject.h"
#include "vm/Opcodes.h"
#include "vm/RegExpStatics.h"
#include "jsinferinlines.h"
#include "jsobjinlines.h"
#include "jsopcodeinlines.h"
#include "jsscriptinlines.h"
#include "jit/CompileInfo-inl.h"
#include "jit/ExecutionMode-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::DebugOnly;
using mozilla::Maybe;
class jit::BaselineFrameInspector
{
public:
types::Type thisType;
JSObject *singletonScopeChain;
Vector<types::Type, 4, IonAllocPolicy> argTypes;
Vector<types::Type, 4, IonAllocPolicy> varTypes;
BaselineFrameInspector(TempAllocator *temp)
: thisType(types::Type::UndefinedType()),
singletonScopeChain(nullptr),
argTypes(*temp),
varTypes(*temp)
{}
};
BaselineFrameInspector *
jit::NewBaselineFrameInspector(TempAllocator *temp, BaselineFrame *frame, CompileInfo *info)
{
JS_ASSERT(frame);
BaselineFrameInspector *inspector = temp->lifoAlloc()->new_<BaselineFrameInspector>(temp);
if (!inspector)
return nullptr;
// Note: copying the actual values into a temporary structure for use
// during compilation could capture nursery pointers, so the values' types
// are recorded instead.
inspector->thisType = types::GetMaybeOptimizedOutValueType(frame->thisValue());
if (frame->scopeChain()->hasSingletonType())
inspector->singletonScopeChain = frame->scopeChain();
JSScript *script = frame->script();
if (script->functionNonDelazifying()) {
if (!inspector->argTypes.reserve(frame->numFormalArgs()))
return nullptr;
for (size_t i = 0; i < frame->numFormalArgs(); i++) {
if (script->formalIsAliased(i)) {
inspector->argTypes.infallibleAppend(types::Type::UndefinedType());
} else if (!script->argsObjAliasesFormals()) {
types::Type type = types::GetMaybeOptimizedOutValueType(frame->unaliasedFormal(i));
inspector->argTypes.infallibleAppend(type);
} else if (frame->hasArgsObj()) {
types::Type type = types::GetMaybeOptimizedOutValueType(frame->argsObj().arg(i));
inspector->argTypes.infallibleAppend(type);
} else {
inspector->argTypes.infallibleAppend(types::Type::UndefinedType());
}
}
}
if (!inspector->varTypes.reserve(frame->script()->nfixed()))
return nullptr;
for (size_t i = 0; i < frame->script()->nfixed(); i++) {
if (info->isSlotAliasedAtOsr(i + info->firstLocalSlot())) {
inspector->varTypes.infallibleAppend(types::Type::UndefinedType());
} else {
types::Type type = types::GetMaybeOptimizedOutValueType(frame->unaliasedLocal(i));
inspector->varTypes.infallibleAppend(type);
}
}
return inspector;
}
IonBuilder::IonBuilder(JSContext *analysisContext, CompileCompartment *comp,
const JitCompileOptions &options, TempAllocator *temp,
MIRGraph *graph, types::CompilerConstraintList *constraints,
BaselineInspector *inspector, CompileInfo *info,
const OptimizationInfo *optimizationInfo,
BaselineFrameInspector *baselineFrame, size_t inliningDepth,
uint32_t loopDepth)
: MIRGenerator(comp, options, temp, graph, info, optimizationInfo),
backgroundCodegen_(nullptr),
analysisContext(analysisContext),
baselineFrame_(baselineFrame),
abortReason_(AbortReason_Disable),
descrSetHash_(nullptr),
constraints_(constraints),
analysis_(*temp, info->script()),
thisTypes(nullptr),
argTypes(nullptr),
typeArray(nullptr),
typeArrayHint(0),
bytecodeTypeMap(nullptr),
loopDepth_(loopDepth),
callerResumePoint_(nullptr),
callerBuilder_(nullptr),
cfgStack_(*temp),
loops_(*temp),
switches_(*temp),
labels_(*temp),
iterators_(*temp),
loopHeaders_(*temp),
inspector(inspector),
inliningDepth_(inliningDepth),
numLoopRestarts_(0),
failedBoundsCheck_(info->script()->failedBoundsCheck()),
failedShapeGuard_(info->script()->failedShapeGuard()),
nonStringIteration_(false),
lazyArguments_(nullptr),
inlineCallInfo_(nullptr)
{
script_ = info->script();
pc = info->startPC();
JS_ASSERT(script()->hasBaselineScript() == (info->executionMode() != ArgumentsUsageAnalysis));
JS_ASSERT(!!analysisContext == (info->executionMode() == DefinitePropertiesAnalysis));
}
void
IonBuilder::clearForBackEnd()
{
JS_ASSERT(!analysisContext);
baselineFrame_ = nullptr;
// The caches below allocate data from the malloc heap. Release this before
// later phases of compilation to avoid leaks, as the top level IonBuilder
// is not explicitly destroyed. Note that builders for inner scripts are
// constructed on the stack and will release this memory on destruction.
gsn.purge();
scopeCoordinateNameCache.purge();
}
bool
IonBuilder::abort(const char *message, ...)
{
// Don't call PCToLineNumber in release builds.
#ifdef DEBUG
va_list ap;
va_start(ap, message);
abortFmt(message, ap);
va_end(ap);
IonSpew(IonSpew_Abort, "aborted @ %s:%d", script()->filename(), PCToLineNumber(script(), pc));
#endif
return false;
}
void
IonBuilder::spew(const char *message)
{
// Don't call PCToLineNumber in release builds.
#ifdef DEBUG
IonSpew(IonSpew_MIR, "%s @ %s:%d", message, script()->filename(), PCToLineNumber(script(), pc));
#endif
}
static inline int32_t
GetJumpOffset(jsbytecode *pc)
{
JS_ASSERT(js_CodeSpec[JSOp(*pc)].type() == JOF_JUMP);
return GET_JUMP_OFFSET(pc);
}
IonBuilder::CFGState
IonBuilder::CFGState::If(jsbytecode *join, MTest *test)
{
CFGState state;
state.state = IF_TRUE;
state.stopAt = join;
state.branch.ifFalse = test->ifFalse();
state.branch.test = test;
return state;
}
IonBuilder::CFGState
IonBuilder::CFGState::IfElse(jsbytecode *trueEnd, jsbytecode *falseEnd, MTest *test)
{
MBasicBlock *ifFalse = test->ifFalse();
CFGState state;
// If the end of the false path is the same as the start of the
// false path, then the "else" block is empty and we can devolve
// this to the IF_TRUE case. We handle this here because there is
// still an extra GOTO on the true path and we want stopAt to point
// there, whereas the IF_TRUE case does not have the GOTO.
state.state = (falseEnd == ifFalse->pc())
? IF_TRUE_EMPTY_ELSE
: IF_ELSE_TRUE;
state.stopAt = trueEnd;
state.branch.falseEnd = falseEnd;
state.branch.ifFalse = ifFalse;
state.branch.test = test;
return state;
}
IonBuilder::CFGState
IonBuilder::CFGState::AndOr(jsbytecode *join, MBasicBlock *joinStart)
{
CFGState state;
state.state = AND_OR;
state.stopAt = join;
state.branch.ifFalse = joinStart;
state.branch.test = nullptr;
return state;
}
IonBuilder::CFGState
IonBuilder::CFGState::TableSwitch(jsbytecode *exitpc, MTableSwitch *ins)
{
CFGState state;
state.state = TABLE_SWITCH;
state.stopAt = exitpc;
state.tableswitch.exitpc = exitpc;
state.tableswitch.breaks = nullptr;
state.tableswitch.ins = ins;
state.tableswitch.currentBlock = 0;
return state;
}
JSFunction *
IonBuilder::getSingleCallTarget(types::TemporaryTypeSet *calleeTypes)
{
if (!calleeTypes)
return nullptr;
JSObject *obj = calleeTypes->getSingleton();
if (!obj || !obj->is<JSFunction>())
return nullptr;
return &obj->as<JSFunction>();
}
bool
IonBuilder::getPolyCallTargets(types::TemporaryTypeSet *calleeTypes, bool constructing,
ObjectVector &targets, uint32_t maxTargets, bool *gotLambda)
{
JS_ASSERT(targets.empty());
JS_ASSERT(gotLambda);
*gotLambda = false;
if (!calleeTypes)
return true;
if (calleeTypes->baseFlags() != 0)
return true;
unsigned objCount = calleeTypes->getObjectCount();
if (objCount == 0 || objCount > maxTargets)
return true;
if (!targets.reserve(objCount))
return false;
for(unsigned i = 0; i < objCount; i++) {
JSObject *obj = calleeTypes->getSingleObject(i);
JSFunction *fun;
if (obj) {
if (!obj->is<JSFunction>()) {
targets.clear();
return true;
}
fun = &obj->as<JSFunction>();
} else {
types::TypeObject *typeObj = calleeTypes->getTypeObject(i);
JS_ASSERT(typeObj);
if (!typeObj->interpretedFunction) {
targets.clear();
return true;
}
fun = typeObj->interpretedFunction;
*gotLambda = true;
}
// Don't optimize if we're constructing and the callee is not a
// constructor, so that CallKnown does not have to handle this case
// (it should always throw).
if (constructing && !fun->isInterpretedConstructor() && !fun->isNativeConstructor()) {
targets.clear();
return true;
}
DebugOnly<bool> appendOk = targets.append(fun);
JS_ASSERT(appendOk);
}
// For now, only inline "singleton" lambda calls
if (*gotLambda && targets.length() > 1)
targets.clear();
return true;
}
IonBuilder::InliningDecision
IonBuilder::DontInline(JSScript *targetScript, const char *reason)
{
if (targetScript) {
IonSpew(IonSpew_Inlining, "Cannot inline %s:%u: %s",
targetScript->filename(), targetScript->lineno(), reason);
} else {
IonSpew(IonSpew_Inlining, "Cannot inline: %s", reason);
}
return InliningDecision_DontInline;
}
IonBuilder::InliningDecision
IonBuilder::canInlineTarget(JSFunction *target, CallInfo &callInfo)
{
if (!optimizationInfo().inlineInterpreted())
return InliningDecision_DontInline;
if (!target->isInterpreted())
return DontInline(nullptr, "Non-interpreted target");
// Allow constructing lazy scripts when performing the definite properties
// analysis, as baseline has not been used to warm the caller up yet.
if (target->isInterpreted() && info().executionMode() == DefinitePropertiesAnalysis) {
RootedScript script(analysisContext, target->getOrCreateScript(analysisContext));
if (!script)
return InliningDecision_Error;
if (!script->hasBaselineScript() && script->canBaselineCompile()) {
MethodStatus status = BaselineCompile(analysisContext, script);
if (status == Method_Error)
return InliningDecision_Error;
if (status != Method_Compiled)
return InliningDecision_DontInline;
}
}
if (!target->hasScript())
return DontInline(nullptr, "Lazy script");
JSScript *inlineScript = target->nonLazyScript();
if (callInfo.constructing() && !target->isInterpretedConstructor())
return DontInline(inlineScript, "Callee is not a constructor");
ExecutionMode executionMode = info().executionMode();
if (!CanIonCompile(inlineScript, executionMode))
return DontInline(inlineScript, "Disabled Ion compilation");
// Don't inline functions which don't have baseline scripts.
if (!inlineScript->hasBaselineScript())
return DontInline(inlineScript, "No baseline jitcode");
if (TooManyArguments(target->nargs()))
return DontInline(inlineScript, "Too many args");
if (TooManyArguments(callInfo.argc()))
return DontInline(inlineScript, "Too many args");
// Allow inlining of recursive calls, but only one level deep.
IonBuilder *builder = callerBuilder_;
while (builder) {
if (builder->script() == inlineScript)
return DontInline(inlineScript, "Recursive call");
builder = builder->callerBuilder_;
}
if (target->isHeavyweight())
return DontInline(inlineScript, "Heavyweight function");
if (inlineScript->uninlineable())
return DontInline(inlineScript, "Uninlineable script");
if (inlineScript->needsArgsObj())
return DontInline(inlineScript, "Script that needs an arguments object");
if (!inlineScript->compileAndGo())
return DontInline(inlineScript, "Non-compileAndGo script");
types::TypeObjectKey *targetType = types::TypeObjectKey::get(target);
if (targetType->unknownProperties())
return DontInline(inlineScript, "Target type has unknown properties");
return InliningDecision_Inline;
}
void
IonBuilder::popCfgStack()
{
if (cfgStack_.back().isLoop())
loops_.popBack();
if (cfgStack_.back().state == CFGState::LABEL)
labels_.popBack();
cfgStack_.popBack();
}
bool
IonBuilder::analyzeNewLoopTypes(MBasicBlock *entry, jsbytecode *start, jsbytecode *end)
{
// The phi inputs at the loop head only reflect types for variables that
// were present at the start of the loop. If the variable changes to a new
// type within the loop body, and that type is carried around to the loop
// head, then we need to know about the new type up front.
//
// Since SSA information hasn't been constructed for the loop body yet, we
// need a separate analysis to pick out the types that might flow around
// the loop header. This is a best-effort analysis that may either over-
// or under-approximate the set of such types.
//
// Over-approximating the types may lead to inefficient generated code, and
// under-approximating the types will cause the loop body to be analyzed
// multiple times as the correct types are deduced (see finishLoop).
// If we restarted processing of an outer loop then get loop header types
// directly from the last time we have previously processed this loop. This
// both avoids repeated work from the bytecode traverse below, and will
// also pick up types discovered while previously building the loop body.
for (size_t i = 0; i < loopHeaders_.length(); i++) {
if (loopHeaders_[i].pc == start) {
MBasicBlock *oldEntry = loopHeaders_[i].header;
for (MPhiIterator oldPhi = oldEntry->phisBegin();
oldPhi != oldEntry->phisEnd();
oldPhi++)
{
MPhi *newPhi = entry->getSlot(oldPhi->slot())->toPhi();
if (!newPhi->addBackedgeType(oldPhi->type(), oldPhi->resultTypeSet()))
return false;
}
// Update the most recent header for this loop encountered, in case
// new types flow to the phis and the loop is processed at least
// three times.
loopHeaders_[i].header = entry;
return true;
}
}
loopHeaders_.append(LoopHeader(start, entry));
jsbytecode *last = nullptr, *earlier = nullptr;
for (jsbytecode *pc = start; pc != end; earlier = last, last = pc, pc += GetBytecodeLength(pc)) {
uint32_t slot;
if (*pc == JSOP_SETLOCAL)
slot = info().localSlot(GET_LOCALNO(pc));
else if (*pc == JSOP_SETARG)
slot = info().argSlotUnchecked(GET_ARGNO(pc));
else
continue;
if (slot >= info().firstStackSlot())
continue;
if (!analysis().maybeInfo(pc))
continue;
MPhi *phi = entry->getSlot(slot)->toPhi();
if (*last == JSOP_POS)
last = earlier;
if (js_CodeSpec[*last].format & JOF_TYPESET) {
types::TemporaryTypeSet *typeSet = bytecodeTypes(last);
if (!typeSet->empty()) {
MIRType type = typeSet->getKnownMIRType();
if (!phi->addBackedgeType(type, typeSet))
return false;
}
} else if (*last == JSOP_GETLOCAL || *last == JSOP_GETARG) {
uint32_t slot = (*last == JSOP_GETLOCAL)
? info().localSlot(GET_LOCALNO(last))
: info().argSlotUnchecked(GET_ARGNO(last));
if (slot < info().firstStackSlot()) {
MPhi *otherPhi = entry->getSlot(slot)->toPhi();
if (otherPhi->hasBackedgeType()) {
if (!phi->addBackedgeType(otherPhi->type(), otherPhi->resultTypeSet()))
return false;
}
}
} else {
MIRType type = MIRType_None;
switch (*last) {
case JSOP_VOID:
case JSOP_UNDEFINED:
type = MIRType_Undefined;
break;
case JSOP_NULL:
type = MIRType_Null;
break;
case JSOP_ZERO:
case JSOP_ONE:
case JSOP_INT8:
case JSOP_INT32:
case JSOP_UINT16:
case JSOP_UINT24:
case JSOP_BITAND:
case JSOP_BITOR:
case JSOP_BITXOR:
case JSOP_BITNOT:
case JSOP_RSH:
case JSOP_LSH:
case JSOP_URSH:
type = MIRType_Int32;
break;
case JSOP_FALSE:
case JSOP_TRUE:
case JSOP_EQ:
case JSOP_NE:
case JSOP_LT:
case JSOP_LE:
case JSOP_GT:
case JSOP_GE:
case JSOP_NOT:
case JSOP_STRICTEQ:
case JSOP_STRICTNE:
case JSOP_IN:
case JSOP_INSTANCEOF:
type = MIRType_Boolean;
break;
case JSOP_DOUBLE:
type = MIRType_Double;
break;
case JSOP_STRING:
case JSOP_TYPEOF:
case JSOP_TYPEOFEXPR:
case JSOP_ITERNEXT:
type = MIRType_String;
break;
case JSOP_ADD:
case JSOP_SUB:
case JSOP_MUL:
case JSOP_DIV:
case JSOP_MOD:
case JSOP_NEG:
type = inspector->expectedResultType(last);
default:
break;
}
if (type != MIRType_None) {
if (!phi->addBackedgeType(type, nullptr))
return false;
}
}
}
return true;
}
bool
IonBuilder::pushLoop(CFGState::State initial, jsbytecode *stopAt, MBasicBlock *entry, bool osr,
jsbytecode *loopHead, jsbytecode *initialPc,
jsbytecode *bodyStart, jsbytecode *bodyEnd, jsbytecode *exitpc,
jsbytecode *continuepc)
{
if (!continuepc)
continuepc = entry->pc();
ControlFlowInfo loop(cfgStack_.length(), continuepc);
if (!loops_.append(loop))
return false;
CFGState state;
state.state = initial;
state.stopAt = stopAt;
state.loop.bodyStart = bodyStart;
state.loop.bodyEnd = bodyEnd;
state.loop.exitpc = exitpc;
state.loop.continuepc = continuepc;
state.loop.entry = entry;
state.loop.osr = osr;
state.loop.successor = nullptr;
state.loop.breaks = nullptr;
state.loop.continues = nullptr;
state.loop.initialState = initial;
state.loop.initialPc = initialPc;
state.loop.initialStopAt = stopAt;
state.loop.loopHead = loopHead;
return cfgStack_.append(state);
}
bool
IonBuilder::init()
{
if (!types::TypeScript::FreezeTypeSets(constraints(), script(),
&thisTypes, &argTypes, &typeArray))
{
return false;
}
if (!analysis().init(alloc(), gsn))
return false;
// The baseline script normally has the bytecode type map, but compute
// it ourselves if we do not have a baseline script.
if (script()->hasBaselineScript()) {
bytecodeTypeMap = script()->baselineScript()->bytecodeTypeMap();
} else {
bytecodeTypeMap = alloc_->lifoAlloc()->newArrayUninitialized<uint32_t>(script()->nTypeSets());
if (!bytecodeTypeMap)
return false;
types::FillBytecodeTypeMap(script(), bytecodeTypeMap);
}
return true;
}
bool
IonBuilder::build()
{
if (!init())
return false;
if (!setCurrentAndSpecializePhis(newBlock(pc)))
return false;
if (!current)
return false;
#ifdef DEBUG
if (info().executionMode() == SequentialExecution && script()->hasIonScript()) {
IonSpew(IonSpew_Scripts, "Recompiling script %s:%d (%p) (usecount=%d, level=%s)",
script()->filename(), script()->lineno(), (void *)script(),
(int)script()->getUseCount(), OptimizationLevelString(optimizationInfo().level()));
} else {
IonSpew(IonSpew_Scripts, "Analyzing script %s:%d (%p) (usecount=%d, level=%s)",
script()->filename(), script()->lineno(), (void *)script(),
(int)script()->getUseCount(), OptimizationLevelString(optimizationInfo().level()));
}
#endif
initParameters();
// Initialize local variables.
for (uint32_t i = 0; i < info().nlocals(); i++) {
MConstant *undef = MConstant::New(alloc(), UndefinedValue());
current->add(undef);
current->initSlot(info().localSlot(i), undef);
}
// Initialize something for the scope chain. We can bail out before the
// start instruction, but the snapshot is encoded *at* the start
// instruction, which means generating any code that could load into
// registers is illegal.
MInstruction *scope = MConstant::New(alloc(), UndefinedValue());
current->add(scope);
current->initSlot(info().scopeChainSlot(), scope);
// Initialize the return value.
MInstruction *returnValue = MConstant::New(alloc(), UndefinedValue());
current->add(returnValue);
current->initSlot(info().returnValueSlot(), returnValue);
// Initialize the arguments object slot to undefined if necessary.
if (info().hasArguments()) {
MInstruction *argsObj = MConstant::New(alloc(), UndefinedValue());
current->add(argsObj);
current->initSlot(info().argsObjSlot(), argsObj);
}
// Emit the start instruction, so we can begin real instructions.
current->makeStart(MStart::New(alloc(), MStart::StartType_Default));
if (instrumentedProfiling())
current->add(MProfilerStackOp::New(alloc(), script(), MProfilerStackOp::Enter));
// Guard against over-recursion. Do this before we start unboxing, since
// this will create an OSI point that will read the incoming argument
// values, which is nice to do before their last real use, to minimize
// register/stack pressure.
MCheckOverRecursed *check = MCheckOverRecursed::New(alloc());
current->add(check);
check->setResumePoint(current->entryResumePoint());
// Parameters have been checked to correspond to the typeset, now we unbox
// what we can in an infallible manner.
rewriteParameters();
// It's safe to start emitting actual IR, so now build the scope chain.
if (!initScopeChain())
return false;
if (info().needsArgsObj() && !initArgumentsObject())
return false;
// Prevent |this| from being DCE'd: necessary for constructors.
if (info().funMaybeLazy())
current->getSlot(info().thisSlot())->setGuard();
// The type analysis phase attempts to insert unbox operations near
// definitions of values. It also attempts to replace uses in resume points
// with the narrower, unboxed variants. However, we must prevent this
// replacement from happening on values in the entry snapshot. Otherwise we
// could get this:
//
// v0 = MParameter(0)
// v1 = MParameter(1)
// -- ResumePoint(v2, v3)
// v2 = Unbox(v0, INT32)
// v3 = Unbox(v1, INT32)
//
// So we attach the initial resume point to each parameter, which the type
// analysis explicitly checks (this is the same mechanism used for
// effectful operations).
for (uint32_t i = 0; i < info().endArgSlot(); i++) {
MInstruction *ins = current->getEntrySlot(i)->toInstruction();
if (ins->type() == MIRType_Value)
ins->setResumePoint(current->entryResumePoint());
}
// lazyArguments should never be accessed in |argsObjAliasesFormals| scripts.
if (info().hasArguments() && !info().argsObjAliasesFormals()) {
lazyArguments_ = MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS));
current->add(lazyArguments_);
}
insertRecompileCheck();
if (!traverseBytecode())
return false;
if (!maybeAddOsrTypeBarriers())
return false;
if (!processIterators())
return false;
JS_ASSERT(loopDepth_ == 0);
abortReason_ = AbortReason_NoAbort;
return true;
}
bool
IonBuilder::processIterators()
{
// Find phis that must directly hold an iterator live.
Vector<MPhi *, 0, SystemAllocPolicy> worklist;
for (size_t i = 0; i < iterators_.length(); i++) {
MInstruction *ins = iterators_[i];
for (MUseDefIterator iter(ins); iter; iter++) {
if (iter.def()->isPhi()) {
if (!worklist.append(iter.def()->toPhi()))
return false;
}
}
}
// Propagate the iterator and live status of phis to all other connected
// phis.
while (!worklist.empty()) {
MPhi *phi = worklist.popCopy();
phi->setIterator();
phi->setImplicitlyUsedUnchecked();
for (MUseDefIterator iter(phi); iter; iter++) {
if (iter.def()->isPhi()) {
MPhi *other = iter.def()->toPhi();
if (!other->isIterator() && !worklist.append(other))
return false;
}
}
}
return true;
}
bool
IonBuilder::buildInline(IonBuilder *callerBuilder, MResumePoint *callerResumePoint,
CallInfo &callInfo)
{
if (!init())
return false;
inlineCallInfo_ = &callInfo;
IonSpew(IonSpew_Scripts, "Inlining script %s:%d (%p)",
script()->filename(), script()->lineno(), (void *)script());
callerBuilder_ = callerBuilder;
callerResumePoint_ = callerResumePoint;
if (callerBuilder->failedBoundsCheck_)
failedBoundsCheck_ = true;
if (callerBuilder->failedShapeGuard_)
failedShapeGuard_ = true;
// Generate single entrance block.
if (!setCurrentAndSpecializePhis(newBlock(pc)))
return false;
if (!current)
return false;
current->setCallerResumePoint(callerResumePoint);
// Connect the entrance block to the last block in the caller's graph.
MBasicBlock *predecessor = callerBuilder->current;
JS_ASSERT(predecessor == callerResumePoint->block());
// All further instructions generated in from this scope should be
// considered as part of the function that we're inlining. We also need to
// keep track of the inlining depth because all scripts inlined on the same
// level contiguously have only one InlineExit node.
if (instrumentedProfiling()) {
predecessor->add(MProfilerStackOp::New(alloc(), script(),
MProfilerStackOp::InlineEnter,
inliningDepth_));
}
predecessor->end(MGoto::New(alloc(), current));
if (!current->addPredecessorWithoutPhis(predecessor))
return false;
// Initialize scope chain slot to Undefined. It's set later by |initScopeChain|.
MInstruction *scope = MConstant::New(alloc(), UndefinedValue());
current->add(scope);
current->initSlot(info().scopeChainSlot(), scope);
// Initialize |return value| slot.
MInstruction *returnValue = MConstant::New(alloc(), UndefinedValue());
current->add(returnValue);
current->initSlot(info().returnValueSlot(), returnValue);
// Initialize |arguments| slot.
if (info().hasArguments()) {
MInstruction *argsObj = MConstant::New(alloc(), UndefinedValue());
current->add(argsObj);
current->initSlot(info().argsObjSlot(), argsObj);
}
// Initialize |this| slot.
current->initSlot(info().thisSlot(), callInfo.thisArg());
IonSpew(IonSpew_Inlining, "Initializing %u arg slots", info().nargs());
// NB: Ion does not inline functions which |needsArgsObj|. So using argSlot()
// instead of argSlotUnchecked() below is OK
JS_ASSERT(!info().needsArgsObj());
// Initialize actually set arguments.
uint32_t existing_args = Min<uint32_t>(callInfo.argc(), info().nargs());
for (size_t i = 0; i < existing_args; ++i) {
MDefinition *arg = callInfo.getArg(i);
current->initSlot(info().argSlot(i), arg);
}
// Pass Undefined for missing arguments
for (size_t i = callInfo.argc(); i < info().nargs(); ++i) {
MConstant *arg = MConstant::New(alloc(), UndefinedValue());
current->add(arg);
current->initSlot(info().argSlot(i), arg);
}
// Initialize the scope chain now that args are initialized.
if (!initScopeChain(callInfo.fun()))
return false;
IonSpew(IonSpew_Inlining, "Initializing %u local slots", info().nlocals());
// Initialize local variables.
for (uint32_t i = 0; i < info().nlocals(); i++) {
MConstant *undef = MConstant::New(alloc(), UndefinedValue());
current->add(undef);
current->initSlot(info().localSlot(i), undef);
}
IonSpew(IonSpew_Inlining, "Inline entry block MResumePoint %p, %u operands",
(void *) current->entryResumePoint(), current->entryResumePoint()->numOperands());
// +2 for the scope chain and |this|, maybe another +1 for arguments object slot.
JS_ASSERT(current->entryResumePoint()->numOperands() == info().totalSlots());
if (script_->argumentsHasVarBinding()) {
lazyArguments_ = MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS));
current->add(lazyArguments_);
}
insertRecompileCheck();
if (!traverseBytecode())
return false;
return true;
}
void
IonBuilder::rewriteParameter(uint32_t slotIdx, MDefinition *param, int32_t argIndex)
{
JS_ASSERT(param->isParameter() || param->isGetArgumentsObjectArg());
types::TemporaryTypeSet *types = param->resultTypeSet();
MDefinition *actual = ensureDefiniteType(param, types->getKnownMIRType());
if (actual == param)
return;
// Careful! We leave the original MParameter in the entry resume point. The
// arguments still need to be checked unless proven otherwise at the call
// site, and these checks can bailout. We can end up:
// v0 = Parameter(0)
// v1 = Unbox(v0, INT32)
// -- ResumePoint(v0)
//
// As usual, it would be invalid for v1 to be captured in the initial
// resume point, rather than v0.
current->rewriteSlot(slotIdx, actual);
}
// Apply Type Inference information to parameters early on, unboxing them if
// they have a definitive type. The actual guards will be emitted by the code
// generator, explicitly, as part of the function prologue.
void
IonBuilder::rewriteParameters()
{
JS_ASSERT(info().scopeChainSlot() == 0);
if (!info().funMaybeLazy())
return;
for (uint32_t i = info().startArgSlot(); i < info().endArgSlot(); i++) {
MDefinition *param = current->getSlot(i);
rewriteParameter(i, param, param->toParameter()->index());
}
}
void
IonBuilder::initParameters()
{
if (!info().funMaybeLazy())
return;
// If we are doing OSR on a frame which initially executed in the
// interpreter and didn't accumulate type information, try to use that OSR
// frame to determine possible initial types for 'this' and parameters.
if (thisTypes->empty() && baselineFrame_)
thisTypes->addType(baselineFrame_->thisType, alloc_->lifoAlloc());
MParameter *param = MParameter::New(alloc(), MParameter::THIS_SLOT, thisTypes);
current->add(param);
current->initSlot(info().thisSlot(), param);
for (uint32_t i = 0; i < info().nargs(); i++) {
types::TemporaryTypeSet *types = &argTypes[i];
if (types->empty() && baselineFrame_ &&
!script_->baselineScript()->modifiesArguments())
{
types->addType(baselineFrame_->argTypes[i], alloc_->lifoAlloc());
}
param = MParameter::New(alloc(), i, types);
current->add(param);
current->initSlot(info().argSlotUnchecked(i), param);
}
}
bool
IonBuilder::initScopeChain(MDefinition *callee)
{
MInstruction *scope = nullptr;
// If the script doesn't use the scopechain, then it's already initialized
// from earlier. However, always make a scope chain when |needsArgsObj| is true
// for the script, since arguments object construction requires the scope chain
// to be passed in.
if (!info().needsArgsObj() && !analysis().usesScopeChain())
return true;
// The scope chain is only tracked in scripts that have NAME opcodes which
// will try to access the scope. For other scripts, the scope instructions
// will be held live by resume points and code will still be generated for
// them, so just use a constant undefined value.
if (!script()->compileAndGo())
return abort("non-CNG global scripts are not supported");
if (JSFunction *fun = info().funMaybeLazy()) {
if (!callee) {
MCallee *calleeIns = MCallee::New(alloc());
current->add(calleeIns);
callee = calleeIns;
}
scope = MFunctionEnvironment::New(alloc(), callee);
current->add(scope);
// This reproduce what is done in CallObject::createForFunction. Skip
// this for analyses, as the script might not have a baseline script
// with template objects yet.
if (fun->isHeavyweight() && !info().executionModeIsAnalysis()) {
if (fun->isNamedLambda()) {
scope = createDeclEnvObject(callee, scope);
if (!scope)
return false;
}
scope = createCallObject(callee, scope);
if (!scope)
return false;
}
} else {
scope = constant(ObjectValue(script()->global()));
}
current->setScopeChain(scope);
return true;
}
bool
IonBuilder::initArgumentsObject()
{
IonSpew(IonSpew_MIR, "%s:%d - Emitting code to initialize arguments object! block=%p",
script()->filename(), script()->lineno(), current);
JS_ASSERT(info().needsArgsObj());
MCreateArgumentsObject *argsObj = MCreateArgumentsObject::New(alloc(), current->scopeChain());
current->add(argsObj);
current->setArgumentsObject(argsObj);
return true;
}
bool
IonBuilder::addOsrValueTypeBarrier(uint32_t slot, MInstruction **def_,
MIRType type, types::TemporaryTypeSet *typeSet)
{
MInstruction *&def = *def_;
MBasicBlock *osrBlock = def->block();
// Clear bogus type information added in newOsrPreheader().
def->setResultType(MIRType_Value);
def->setResultTypeSet(nullptr);
if (typeSet && !typeSet->unknown()) {
MInstruction *barrier = MTypeBarrier::New(alloc(), def, typeSet);
osrBlock->insertBefore(osrBlock->lastIns(), barrier);
osrBlock->rewriteSlot(slot, barrier);
def = barrier;
} else if (type == MIRType_Null ||
type == MIRType_Undefined ||
type == MIRType_MagicOptimizedArguments)
{
// No unbox instruction will be added below, so check the type by
// adding a type barrier for a singleton type set.
types::Type ntype = types::Type::PrimitiveType(ValueTypeFromMIRType(type));
typeSet = alloc_->lifoAlloc()->new_<types::TemporaryTypeSet>(ntype);
if (!typeSet)
return false;
MInstruction *barrier = MTypeBarrier::New(alloc(), def, typeSet);
osrBlock->insertBefore(osrBlock->lastIns(), barrier);
osrBlock->rewriteSlot(slot, barrier);
def = barrier;
}
switch (type) {
case MIRType_Boolean:
case MIRType_Int32:
case MIRType_Double:
case MIRType_String:
case MIRType_Object:
if (type != def->type()) {
MUnbox *unbox = MUnbox::New(alloc(), def, type, MUnbox::Fallible);
osrBlock->insertBefore(osrBlock->lastIns(), unbox);
osrBlock->rewriteSlot(slot, unbox);
def = unbox;
}
break;
case MIRType_Null:
{
MConstant *c = MConstant::New(alloc(), NullValue());
osrBlock->insertBefore(osrBlock->lastIns(), c);
osrBlock->rewriteSlot(slot, c);
def = c;
break;
}
case MIRType_Undefined:
{
MConstant *c = MConstant::New(alloc(), UndefinedValue());
osrBlock->insertBefore(osrBlock->lastIns(), c);
osrBlock->rewriteSlot(slot, c);
def = c;
break;
}
case MIRType_MagicOptimizedArguments:
JS_ASSERT(lazyArguments_);
osrBlock->rewriteSlot(slot, lazyArguments_);
def = lazyArguments_;
break;
default:
break;
}
JS_ASSERT(def == osrBlock->getSlot(slot));
return true;
}
bool
IonBuilder::maybeAddOsrTypeBarriers()
{
if (!info().osrPc())
return true;
// The loop has successfully been processed, and the loop header phis
// have their final type. Add unboxes and type barriers in the OSR
// block to check that the values have the appropriate type, and update
// the types in the preheader.
MBasicBlock *osrBlock = graph().osrBlock();
if (!osrBlock) {
// Because IonBuilder does not compile catch blocks, it's possible to
// end up without an OSR block if the OSR pc is only reachable via a
// break-statement inside the catch block. For instance:
//
// for (;;) {
// try {
// throw 3;
// } catch(e) {
// break;
// }
// }
// while (..) { } // <= OSR here, only reachable via catch block.
//
// For now we just abort in this case.
JS_ASSERT(graph().hasTryBlock());
return abort("OSR block only reachable through catch block");
}
MBasicBlock *preheader = osrBlock->getSuccessor(0);
MBasicBlock *header = preheader->getSuccessor(0);
static const size_t OSR_PHI_POSITION = 1;
JS_ASSERT(preheader->getPredecessor(OSR_PHI_POSITION) == osrBlock);
MPhiIterator headerPhi = header->phisBegin();
while (headerPhi != header->phisEnd() && headerPhi->slot() < info().startArgSlot())
headerPhi++;
for (uint32_t i = info().startArgSlot(); i < osrBlock->stackDepth(); i++, headerPhi++) {
// Aliased slots are never accessed, since they need to go through
// the callobject. The typebarriers are added there and can be
// discarded here.
if (info().isSlotAliasedAtOsr(i))
continue;
MInstruction *def = osrBlock->getSlot(i)->toInstruction();
JS_ASSERT(headerPhi->slot() == i);
MPhi *preheaderPhi = preheader->getSlot(i)->toPhi();
MIRType type = headerPhi->type();
types::TemporaryTypeSet *typeSet = headerPhi->resultTypeSet();
if (!addOsrValueTypeBarrier(i, &def, type, typeSet))
return false;
preheaderPhi->replaceOperand(OSR_PHI_POSITION, def);
preheaderPhi->setResultType(type);
preheaderPhi->setResultTypeSet(typeSet);
}
return true;
}
// We try to build a control-flow graph in the order that it would be built as
// if traversing the AST. This leads to a nice ordering and lets us build SSA
// in one pass, since the bytecode is structured.
//
// We traverse the bytecode iteratively, maintaining a current basic block.
// Each basic block has a mapping of local slots to instructions, as well as a
// stack depth. As we encounter instructions we mutate this mapping in the
// current block.
//
// Things get interesting when we encounter a control structure. This can be
// either an IFEQ, downward GOTO, or a decompiler hint stashed away in source
// notes. Once we encounter such an opcode, we recover the structure of the
// control flow (its branches and bounds), and push it on a stack.
//
// As we continue traversing the bytecode, we look for points that would
// terminate the topmost control flow path pushed on the stack. These are:
// (1) The bounds of the current structure (end of a loop or join/edge of a
// branch).
// (2) A "return", "break", or "continue" statement.
//
// For (1), we expect that there is a current block in the progress of being
// built, and we complete the necessary edges in the CFG. For (2), we expect
// that there is no active block.
//
// For normal diamond join points, we construct Phi nodes as we add
// predecessors. For loops, care must be taken to propagate Phi nodes back
// through uses in the loop body.
bool
IonBuilder::traverseBytecode()
{
for (;;) {
JS_ASSERT(pc < info().limitPC());
for (;;) {
if (!alloc().ensureBallast())
return false;
// Check if we've hit an expected join point or edge in the bytecode.
// Leaving one control structure could place us at the edge of another,
// thus |while| instead of |if| so we don't skip any opcodes.
if (!cfgStack_.empty() && cfgStack_.back().stopAt == pc) {
ControlStatus status = processCfgStack();
if (status == ControlStatus_Error)
return false;
if (status == ControlStatus_Abort)
return abort("Aborted while processing control flow");
if (!current)
return true;
continue;
}
// Some opcodes need to be handled early because they affect control
// flow, terminating the current basic block and/or instructing the
// traversal algorithm to continue from a new pc.
//
// (1) If the opcode does not affect control flow, then the opcode
// is inspected and transformed to IR. This is the process_opcode
// label.
// (2) A loop could be detected via a forward GOTO. In this case,
// we don't want to process the GOTO, but the following
// instruction.
// (3) A RETURN, STOP, BREAK, or CONTINUE may require processing the
// CFG stack to terminate open branches.
//
// Similar to above, snooping control flow could land us at another
// control flow point, so we iterate until it's time to inspect a real
// opcode.
ControlStatus status;
if ((status = snoopControlFlow(JSOp(*pc))) == ControlStatus_None)
break;
if (status == ControlStatus_Error)
return false;
if (status == ControlStatus_Abort)
return abort("Aborted while processing control flow");
if (!current)
return true;
}
#ifdef DEBUG
// In debug builds, after compiling this op, check that all values
// popped by this opcode either:
//
// (1) Have the ImplicitlyUsed flag set on them.
// (2) Have more uses than before compiling this op (the value is
// used as operand of a new MIR instruction).
//
// This is used to catch problems where IonBuilder pops a value without
// adding any SSA uses and doesn't call setImplicitlyUsedUnchecked on it.
Vector<MDefinition *, 4, IonAllocPolicy> popped(alloc());
Vector<size_t, 4, IonAllocPolicy> poppedUses(alloc());
unsigned nuses = GetUseCount(script_, script_->pcToOffset(pc));
for (unsigned i = 0; i < nuses; i++) {
MDefinition *def = current->peek(-int32_t(i + 1));
if (!popped.append(def) || !poppedUses.append(def->defUseCount()))
return false;
}
#endif
// Nothing in inspectOpcode() is allowed to advance the pc.
JSOp op = JSOp(*pc);
if (!inspectOpcode(op))
return false;
#ifdef DEBUG
for (size_t i = 0; i < popped.length(); i++) {
switch (op) {
case JSOP_POP:
case JSOP_POPN:
case JSOP_DUPAT:
case JSOP_DUP:
case JSOP_DUP2:
case JSOP_PICK:
case JSOP_SWAP:
case JSOP_SETARG:
case JSOP_SETLOCAL:
case JSOP_SETRVAL:
case JSOP_VOID:
// Don't require SSA uses for values popped by these ops.
break;
case JSOP_POS:
case JSOP_TOID:
// These ops may leave their input on the stack without setting
// the ImplicitlyUsed flag. If this value will be popped immediately,
// we may replace it with |undefined|, but the difference is
// not observable.
JS_ASSERT(i == 0);
if (current->peek(-1) == popped[0])
break;
// FALL THROUGH
default:
JS_ASSERT(popped[i]->isImplicitlyUsed() ||
// MNewDerivedTypedObject instances are
// often dead unless they escape from the
// fn. See IonBuilder::loadTypedObjectData()
// for more details.
popped[i]->isNewDerivedTypedObject() ||
popped[i]->defUseCount() > poppedUses[i]);
break;
}
}
#endif
pc += js_CodeSpec[op].length;
current->updateTrackedPc(pc);
}
return true;
}
IonBuilder::ControlStatus
IonBuilder::snoopControlFlow(JSOp op)
{
switch (op) {
case JSOP_NOP:
return maybeLoop(op, info().getNote(gsn, pc));
case JSOP_POP:
return maybeLoop(op, info().getNote(gsn, pc));
case JSOP_RETURN:
case JSOP_RETRVAL:
return processReturn(op);
case JSOP_THROW:
return processThrow();
case JSOP_GOTO:
{
jssrcnote *sn = info().getNote(gsn, pc);
switch (sn ? SN_TYPE(sn) : SRC_NULL) {
case SRC_BREAK:
case SRC_BREAK2LABEL:
return processBreak(op, sn);
case SRC_CONTINUE:
return processContinue(op);
case SRC_SWITCHBREAK:
return processSwitchBreak(op);
case SRC_WHILE:
case SRC_FOR_IN:
case SRC_FOR_OF:
// while (cond) { }
return whileOrForInLoop(sn);
default:
// Hard assert for now - make an error later.
MOZ_ASSUME_UNREACHABLE("unknown goto case");
}
break;
}
case JSOP_TABLESWITCH:
return tableSwitch(op, info().getNote(gsn, pc));
case JSOP_IFNE:
// We should never reach an IFNE, it's a stopAt point, which will
// trigger closing the loop.
MOZ_ASSUME_UNREACHABLE("we should never reach an ifne!");
default:
break;
}
return ControlStatus_None;
}
bool
IonBuilder::inspectOpcode(JSOp op)
{
switch (op) {
case JSOP_NOP:
case JSOP_LINENO:
case JSOP_LOOPENTRY:
return true;
case JSOP_LABEL:
return jsop_label();
case JSOP_UNDEFINED:
return pushConstant(UndefinedValue());
case JSOP_IFEQ:
return jsop_ifeq(JSOP_IFEQ);
case JSOP_TRY:
return jsop_try();
case JSOP_CONDSWITCH:
return jsop_condswitch();
case JSOP_BITNOT:
return jsop_bitnot();
case JSOP_BITAND:
case JSOP_BITOR:
case JSOP_BITXOR:
case JSOP_LSH:
case JSOP_RSH:
case JSOP_URSH:
return jsop_bitop(op);
case JSOP_ADD:
case JSOP_SUB:
case JSOP_MUL:
case JSOP_DIV:
case JSOP_MOD:
return jsop_binary(op);
case JSOP_POS:
return jsop_pos();
case JSOP_NEG:
return jsop_neg();
case JSOP_AND:
case JSOP_OR:
return jsop_andor(op);
case JSOP_DEFVAR:
case JSOP_DEFCONST:
return jsop_defvar(GET_UINT32_INDEX(pc));
case JSOP_DEFFUN:
return jsop_deffun(GET_UINT32_INDEX(pc));
case JSOP_EQ:
case JSOP_NE:
case JSOP_STRICTEQ:
case JSOP_STRICTNE:
case JSOP_LT:
case JSOP_LE:
case JSOP_GT:
case JSOP_GE:
return jsop_compare(op);
case JSOP_DOUBLE:
return pushConstant(info().getConst(pc));
case JSOP_STRING:
return pushConstant(StringValue(info().getAtom(pc)));
case JSOP_ZERO:
return pushConstant(Int32Value(0));
case JSOP_ONE:
return pushConstant(Int32Value(1));
case JSOP_NULL:
return pushConstant(NullValue());
case JSOP_VOID:
current->pop();
return pushConstant(UndefinedValue());
case JSOP_HOLE:
return pushConstant(MagicValue(JS_ELEMENTS_HOLE));
case JSOP_FALSE:
return pushConstant(BooleanValue(false));
case JSOP_TRUE:
return pushConstant(BooleanValue(true));
case JSOP_ARGUMENTS:
return jsop_arguments();
case JSOP_RUNONCE:
return jsop_runonce();
case JSOP_REST:
return jsop_rest();
case JSOP_GETARG:
if (info().argsObjAliasesFormals()) {
MGetArgumentsObjectArg *getArg = MGetArgumentsObjectArg::New(alloc(),
current->argumentsObject(),
GET_ARGNO(pc));
current->add(getArg);
current->push(getArg);
} else {
current->pushArg(GET_ARGNO(pc));
}
return true;
case JSOP_SETARG:
return jsop_setarg(GET_ARGNO(pc));
case JSOP_GETLOCAL:
current->pushLocal(GET_LOCALNO(pc));
return true;
case JSOP_SETLOCAL:
current->setLocal(GET_LOCALNO(pc));
return true;
case JSOP_POP:
current->pop();
// POP opcodes frequently appear where values are killed, e.g. after
// SET* opcodes. Place a resume point afterwards to avoid capturing
// the dead value in later snapshots, except in places where that
// resume point is obviously unnecessary.
if (pc[JSOP_POP_LENGTH] == JSOP_POP)
return true;
return maybeInsertResume();
case JSOP_POPN:
for (uint32_t i = 0, n = GET_UINT16(pc); i < n; i++)
current->pop();
return true;
case JSOP_DUPAT:
current->pushSlot(current->stackDepth() - 1 - GET_UINT24(pc));
return true;
case JSOP_NEWINIT:
if (GET_UINT8(pc) == JSProto_Array)
return jsop_newarray(0);
return jsop_newobject();
case JSOP_NEWARRAY:
return jsop_newarray(GET_UINT24(pc));
case JSOP_NEWOBJECT:
return jsop_newobject();
case JSOP_INITELEM:
return jsop_initelem();
case JSOP_INITELEM_ARRAY:
return jsop_initelem_array();
case JSOP_INITPROP:
{
PropertyName *name = info().getAtom(pc)->asPropertyName();
return jsop_initprop(name);
}
case JSOP_MUTATEPROTO:
{
return jsop_mutateproto();
}
case JSOP_INITPROP_GETTER:
case JSOP_INITPROP_SETTER: {
PropertyName *name = info().getAtom(pc)->asPropertyName();
return jsop_initprop_getter_setter(name);
}
case JSOP_INITELEM_GETTER:
case JSOP_INITELEM_SETTER:
return jsop_initelem_getter_setter();
case JSOP_ENDINIT:
return true;
case JSOP_FUNCALL:
return jsop_funcall(GET_ARGC(pc));
case JSOP_FUNAPPLY:
return jsop_funapply(GET_ARGC(pc));
case JSOP_CALL:
case JSOP_NEW:
return jsop_call(GET_ARGC(pc), (JSOp)*pc == JSOP_NEW);
case JSOP_EVAL:
return jsop_eval(GET_ARGC(pc));
case JSOP_INT8:
return pushConstant(Int32Value(GET_INT8(pc)));
case JSOP_UINT16:
return pushConstant(Int32Value(GET_UINT16(pc)));
case JSOP_GETGNAME:
{
PropertyName *name = info().getAtom(pc)->asPropertyName();
return jsop_getgname(name);
}
case JSOP_BINDGNAME:
return pushConstant(ObjectValue(script()->global()));
case JSOP_SETGNAME:
{
PropertyName *name = info().getAtom(pc)->asPropertyName();
JSObject *obj = &script()->global();
return setStaticName(obj, name);
}
case JSOP_NAME:
{
PropertyName *name = info().getAtom(pc)->asPropertyName();
return jsop_getname(name);
}
case JSOP_GETINTRINSIC:
{
PropertyName *name = info().getAtom(pc)->asPropertyName();
return jsop_intrinsic(name);
}
case JSOP_BINDNAME:
return jsop_bindname(info().getName(pc));
case JSOP_DUP:
current->pushSlot(current->stackDepth() - 1);
return true;
case JSOP_DUP2:
return jsop_dup2();
case JSOP_SWAP:
current->swapAt(-1);
return true;
case JSOP_PICK:
current->pick(-GET_INT8(pc));
return true;
case JSOP_GETALIASEDVAR:
return jsop_getaliasedvar(ScopeCoordinate(pc));
case JSOP_SETALIASEDVAR:
return jsop_setaliasedvar(ScopeCoordinate(pc));
case JSOP_UINT24:
return pushConstant(Int32Value(GET_UINT24(pc)));
case JSOP_INT32:
return pushConstant(Int32Value(GET_INT32(pc)));
case JSOP_LOOPHEAD:
// JSOP_LOOPHEAD is handled when processing the loop header.
MOZ_ASSUME_UNREACHABLE("JSOP_LOOPHEAD outside loop");
case JSOP_GETELEM:
case JSOP_CALLELEM:
return jsop_getelem();
case JSOP_SETELEM:
return jsop_setelem();
case JSOP_LENGTH:
return jsop_length();
case JSOP_NOT:
return jsop_not();
case JSOP_THIS:
return jsop_this();
case JSOP_CALLEE: {
MDefinition *callee = getCallee();
current->push(callee);
return true;
}
case JSOP_GETPROP:
case JSOP_CALLPROP:
{
PropertyName *name = info().getAtom(pc)->asPropertyName();
return jsop_getprop(name);
}
case JSOP_SETPROP:
case JSOP_SETNAME:
{
PropertyName *name = info().getAtom(pc)->asPropertyName();
return jsop_setprop(name);
}
case JSOP_DELPROP:
{
PropertyName *name = info().getAtom(pc)->asPropertyName();
return jsop_delprop(name);
}
case JSOP_DELELEM:
return jsop_delelem();
case JSOP_REGEXP:
return jsop_regexp(info().getRegExp(pc));
case JSOP_OBJECT:
return jsop_object(info().getObject(pc));
case JSOP_TYPEOF:
case JSOP_TYPEOFEXPR:
return jsop_typeof();
case JSOP_TOID:
return jsop_toid();
case JSOP_LAMBDA:
return jsop_lambda(info().getFunction(pc));
case JSOP_LAMBDA_ARROW:
return jsop_lambda_arrow(info().getFunction(pc));
case JSOP_ITER:
return jsop_iter(GET_INT8(pc));
case JSOP_ITERNEXT:
return jsop_iternext();
case JSOP_MOREITER:
return jsop_itermore();
case JSOP_ENDITER:
return jsop_iterend();
case JSOP_IN:
return jsop_in();
case JSOP_SETRVAL:
JS_ASSERT(!script()->noScriptRval());
current->setSlot(info().returnValueSlot(), current->pop());
return true;
case JSOP_INSTANCEOF:
return jsop_instanceof();
case JSOP_DEBUGLEAVEBLOCK:
return true;
default:
#ifdef DEBUG
return abort("Unsupported opcode: %s (line %d)", js_CodeName[op], info().lineno(pc));
#else
return abort("Unsupported opcode: %d (line %d)", op, info().lineno(pc));
#endif
}
}
// Given that the current control flow structure has ended forcefully,
// via a return, break, or continue (rather than joining), propagate the
// termination up. For example, a return nested 5 loops deep may terminate
// every outer loop at once, if there are no intervening conditionals:
//
// for (...) {
// for (...) {
// return x;
// }
// }
//
// If |current| is nullptr when this function returns, then there is no more
// control flow to be processed.
IonBuilder::ControlStatus
IonBuilder::processControlEnd()
{
JS_ASSERT(!current);
if (cfgStack_.empty()) {
// If there is no more control flow to process, then this is the
// last return in the function.
return ControlStatus_Ended;
}
return processCfgStack();
}
// Processes the top of the CFG stack. This is used from two places:
// (1) processControlEnd(), whereby a break, continue, or return may interrupt
// an in-progress CFG structure before reaching its actual termination
// point in the bytecode.
// (2) traverseBytecode(), whereby we reach the last instruction in a CFG
// structure.
IonBuilder::ControlStatus
IonBuilder::processCfgStack()
{
ControlStatus status = processCfgEntry(cfgStack_.back());
// If this terminated a CFG structure, act like processControlEnd() and
// keep propagating upward.
while (status == ControlStatus_Ended) {
popCfgStack();
if (cfgStack_.empty())
return status;
status = processCfgEntry(cfgStack_.back());
}
// If some join took place, the current structure is finished.
if (status == ControlStatus_Joined)
popCfgStack();
return status;
}
IonBuilder::ControlStatus
IonBuilder::processCfgEntry(CFGState &state)
{
switch (state.state) {
case CFGState::IF_TRUE:
case CFGState::IF_TRUE_EMPTY_ELSE:
return processIfEnd(state);
case CFGState::IF_ELSE_TRUE:
return processIfElseTrueEnd(state);
case CFGState::IF_ELSE_FALSE:
return processIfElseFalseEnd(state);
case CFGState::DO_WHILE_LOOP_BODY:
return processDoWhileBodyEnd(state);
case CFGState::DO_WHILE_LOOP_COND:
return processDoWhileCondEnd(state);
case CFGState::WHILE_LOOP_COND:
return processWhileCondEnd(state);
case CFGState::WHILE_LOOP_BODY:
return processWhileBodyEnd(state);
case CFGState::FOR_LOOP_COND:
return processForCondEnd(state);
case CFGState::FOR_LOOP_BODY:
return processForBodyEnd(state);
case CFGState::FOR_LOOP_UPDATE:
return processForUpdateEnd(state);
case CFGState::TABLE_SWITCH:
return processNextTableSwitchCase(state);
case CFGState::COND_SWITCH_CASE:
return processCondSwitchCase(state);
case CFGState::COND_SWITCH_BODY:
return processCondSwitchBody(state);
case CFGState::AND_OR:
return processAndOrEnd(state);
case CFGState::LABEL:
return processLabelEnd(state);
case CFGState::TRY:
return processTryEnd(state);
default:
MOZ_ASSUME_UNREACHABLE("unknown cfgstate");
}
}
IonBuilder::ControlStatus
IonBuilder::processIfEnd(CFGState &state)
{
if (current) {
// Here, the false block is the join point. Create an edge from the
// current block to the false block. Note that a RETURN opcode
// could have already ended the block.
current->end(MGoto::New(alloc(), state.branch.ifFalse));
if (!state.branch.ifFalse->addPredecessor(alloc(), current))
return ControlStatus_Error;
}
if (!setCurrentAndSpecializePhis(state.branch.ifFalse))
return ControlStatus_Error;
graph().moveBlockToEnd(current);
pc = current->pc();
return ControlStatus_Joined;
}
IonBuilder::ControlStatus
IonBuilder::processIfElseTrueEnd(CFGState &state)
{
// We've reached the end of the true branch of an if-else. Don't
// create an edge yet, just transition to parsing the false branch.
state.state = CFGState::IF_ELSE_FALSE;
state.branch.ifTrue = current;
state.stopAt = state.branch.falseEnd;
pc = state.branch.ifFalse->pc();
if (!setCurrentAndSpecializePhis(state.branch.ifFalse))
return ControlStatus_Error;
graph().moveBlockToEnd(current);
if (state.branch.test)
filterTypesAtTest(state.branch.test);
return ControlStatus_Jumped;
}
IonBuilder::ControlStatus
IonBuilder::processIfElseFalseEnd(CFGState &state)
{
// Update the state to have the latest block from the false path.
state.branch.ifFalse = current;
// To create the join node, we need an incoming edge that has not been
// terminated yet.
MBasicBlock *pred = state.branch.ifTrue
? state.branch.ifTrue
: state.branch.ifFalse;
MBasicBlock *other = (pred == state.branch.ifTrue) ? state.branch.ifFalse : state.branch.ifTrue;
if (!pred)
return ControlStatus_Ended;
// Create a new block to represent the join.
MBasicBlock *join = newBlock(pred, state.branch.falseEnd);
if (!join)
return ControlStatus_Error;
// Create edges from the true and false blocks as needed.
pred->end(MGoto::New(alloc(), join));
if (other) {
other->end(MGoto::New(alloc(), join));
if (!join->addPredecessor(alloc(), other))
return ControlStatus_Error;
}
// Ignore unreachable remainder of false block if existent.
if (!setCurrentAndSpecializePhis(join))
return ControlStatus_Error;
pc = current->pc();
return ControlStatus_Joined;
}
IonBuilder::ControlStatus
IonBuilder::processBrokenLoop(CFGState &state)
{
JS_ASSERT(!current);
JS_ASSERT(loopDepth_);
loopDepth_--;
// A broken loop is not a real loop (it has no header or backedge), so
// reset the loop depth.
for (MBasicBlockIterator i(graph().begin(state.loop.entry)); i != graph().end(); i++) {
if (i->loopDepth() > loopDepth_)
i->setLoopDepth(i->loopDepth() - 1);
}
// If the loop started with a condition (while/for) then even if the
// structure never actually loops, the condition itself can still fail and
// thus we must resume at the successor, if one exists.
if (!setCurrentAndSpecializePhis(state.loop.successor))
return ControlStatus_Error;
if (current) {
JS_ASSERT(current->loopDepth() == loopDepth_);
graph().moveBlockToEnd(current);
}
// Join the breaks together and continue parsing.
if (state.loop.breaks) {
MBasicBlock *block = createBreakCatchBlock(state.loop.breaks, state.loop.exitpc);
if (!block)
return ControlStatus_Error;
if (current) {
current->end(MGoto::New(alloc(), block));
if (!block->addPredecessor(alloc(), current))
return ControlStatus_Error;
}
if (!setCurrentAndSpecializePhis(block))
return ControlStatus_Error;
}
// If the loop is not gated on a condition, and has only returns, we'll
// reach this case. For example:
// do { ... return; } while ();
if (!current)
return ControlStatus_Ended;
// Otherwise, the loop is gated on a condition and/or has breaks so keep
// parsing at the successor.
pc = current->pc();
return ControlStatus_Joined;
}
IonBuilder::ControlStatus
IonBuilder::finishLoop(CFGState &state, MBasicBlock *successor)
{
JS_ASSERT(current);
JS_ASSERT(loopDepth_);
loopDepth_--;
JS_ASSERT_IF(successor, successor->loopDepth() == loopDepth_);
// Compute phis in the loop header and propagate them throughout the loop,
// including the successor.
AbortReason r = state.loop.entry->setBackedge(current);
if (r == AbortReason_Alloc)
return ControlStatus_Error;
if (r == AbortReason_Disable) {
// If there are types for variables on the backedge that were not
// present at the original loop header, then uses of the variables'
// phis may have generated incorrect nodes. The new types have been
// incorporated into the header phis, so remove all blocks for the
// loop body and restart with the new types.
return restartLoop(state);
}
if (successor) {
graph().moveBlockToEnd(successor);
successor->inheritPhis(state.loop.entry);
}
if (state.loop.breaks) {
// Propagate phis placed in the header to individual break exit points.
DeferredEdge *edge = state.loop.breaks;
while (edge) {
edge->block->inheritPhis(state.loop.entry);
edge = edge->next;
}
// Create a catch block to join all break exits.
MBasicBlock *block = createBreakCatchBlock(state.loop.breaks, state.loop.exitpc);
if (!block)
return ControlStatus_Error;
if (successor) {
// Finally, create an unconditional edge from the successor to the
// catch block.
successor->end(MGoto::New(alloc(), block));
if (!block->addPredecessor(alloc(), successor))
return ControlStatus_Error;
}
successor = block;
}
if (!setCurrentAndSpecializePhis(successor))
return ControlStatus_Error;
// An infinite loop (for (;;) { }) will not have a successor.
if (!current)
return ControlStatus_Ended;
pc = current->pc();
return ControlStatus_Joined;
}
IonBuilder::ControlStatus
IonBuilder::restartLoop(CFGState state)
{
spew("New types at loop header, restarting loop body");
if (js_JitOptions.limitScriptSize) {
if (++numLoopRestarts_ >= MAX_LOOP_RESTARTS)
return ControlStatus_Abort;
}
MBasicBlock *header = state.loop.entry;
// Remove all blocks in the loop body other than the header, which has phis
// of the appropriate type and incoming edges to preserve.
graph().removeBlocksAfter(header);
// Remove all instructions from the header itself, and all resume points
// except the entry resume point.
header->discardAllInstructions();
header->discardAllResumePoints(/* discardEntry = */ false);
header->setStackDepth(header->getPredecessor(0)->stackDepth());
popCfgStack();
loopDepth_++;
if (!pushLoop(state.loop.initialState, state.loop.initialStopAt, header, state.loop.osr,
state.loop.loopHead, state.loop.initialPc,
state.loop.bodyStart, state.loop.bodyEnd,
state.loop.exitpc, state.loop.continuepc))
{
return ControlStatus_Error;
}
CFGState &nstate = cfgStack_.back();
nstate.loop.condpc = state.loop.condpc;
nstate.loop.updatepc = state.loop.updatepc;
nstate.loop.updateEnd = state.loop.updateEnd;
// Don't specializePhis(), as the header has been visited before and the
// phis have already had their type set.
setCurrent(header);
if (!jsop_loophead(nstate.loop.loopHead))
return ControlStatus_Error;
pc = nstate.loop.initialPc;
return ControlStatus_Jumped;
}
IonBuilder::ControlStatus
IonBuilder::processDoWhileBodyEnd(CFGState &state)
{
if (!processDeferredContinues(state))
return ControlStatus_Error;
// No current means control flow cannot reach the condition, so this will
// never loop.
if (!current)
return processBrokenLoop(state);
MBasicBlock *header = newBlock(current, state.loop.updatepc);
if (!header)
return ControlStatus_Error;
current->end(MGoto::New(alloc(), header));
state.state = CFGState::DO_WHILE_LOOP_COND;
state.stopAt = state.loop.updateEnd;
pc = state.loop.updatepc;
if (!setCurrentAndSpecializePhis(header))
return ControlStatus_Error;
return ControlStatus_Jumped;
}
IonBuilder::ControlStatus
IonBuilder::processDoWhileCondEnd(CFGState &state)
{
JS_ASSERT(JSOp(*pc) == JSOP_IFNE);
// We're guaranteed a |current|, it's impossible to break or return from
// inside the conditional expression.
JS_ASSERT(current);
// Pop the last value, and create the successor block.
MDefinition *vins = current->pop();
MBasicBlock *successor = newBlock(current, GetNextPc(pc), loopDepth_ - 1);
if (!successor)
return ControlStatus_Error;
// Test for do {} while(false) and don't create a loop in that case.
if (vins->isConstant()) {
MConstant *cte = vins->toConstant();
if (cte->value().isBoolean() && !cte->value().toBoolean()) {
current->end(MGoto::New(alloc(), successor));
current = nullptr;
state.loop.successor = successor;
return processBrokenLoop(state);
}
}
// Create the test instruction and end the current block.
MTest *test = MTest::New(alloc(), vins, state.loop.entry, successor);
current->end(test);
return finishLoop(state, successor);
}
IonBuilder::ControlStatus
IonBuilder::processWhileCondEnd(CFGState &state)
{
JS_ASSERT(JSOp(*pc) == JSOP_IFNE || JSOp(*pc) == JSOP_IFEQ);
// Balance the stack past the IFNE.
MDefinition *ins = current->pop();
// Create the body and successor blocks.
MBasicBlock *body = newBlock(current, state.loop.bodyStart);
state.loop.successor = newBlock(current, state.loop.exitpc, loopDepth_ - 1);
if (!body || !state.loop.successor)
return ControlStatus_Error;
MTest *test;
if (JSOp(*pc) == JSOP_IFNE)
test = MTest::New(alloc(), ins, body, state.loop.successor);
else
test = MTest::New(alloc(), ins, state.loop.successor, body);
current->end(test);
state.state = CFGState::WHILE_LOOP_BODY;
state.stopAt = state.loop.bodyEnd;
pc = state.loop.bodyStart;
if (!setCurrentAndSpecializePhis(body))
return ControlStatus_Error;
return ControlStatus_Jumped;
}
IonBuilder::ControlStatus
IonBuilder::processWhileBodyEnd(CFGState &state)
{
if (!processDeferredContinues(state))
return ControlStatus_Error;
if (!current)
return processBrokenLoop(state);
current->end(MGoto::New(alloc(), state.loop.entry));
return finishLoop(state, state.loop.successor);
}
IonBuilder::ControlStatus
IonBuilder::processForCondEnd(CFGState &state)
{
JS_ASSERT(JSOp(*pc) == JSOP_IFNE);
// Balance the stack past the IFNE.
MDefinition *ins = current->pop();
// Create the body and successor blocks.
MBasicBlock *body = newBlock(current, state.loop.bodyStart);
state.loop.successor = newBlock(current, state.loop.exitpc, loopDepth_ - 1);
if (!body || !state.loop.successor)
return ControlStatus_Error;
MTest *test = MTest::New(alloc(), ins, body, state.loop.successor);
current->end(test);
state.state = CFGState::FOR_LOOP_BODY;
state.stopAt = state.loop.bodyEnd;
pc = state.loop.bodyStart;
if (!setCurrentAndSpecializePhis(body))
return ControlStatus_Error;
return ControlStatus_Jumped;
}
IonBuilder::ControlStatus
IonBuilder::processForBodyEnd(CFGState &state)
{
if (!processDeferredContinues(state))
return ControlStatus_Error;
// If there is no updatepc, just go right to processing what would be the
// end of the update clause. Otherwise, |current| might be nullptr; if this is
// the case, the udpate is unreachable anyway.
if (!state.loop.updatepc || !current)
return processForUpdateEnd(state);
pc = state.loop.updatepc;
state.state = CFGState::FOR_LOOP_UPDATE;
state.stopAt = state.loop.updateEnd;
return ControlStatus_Jumped;
}
IonBuilder::ControlStatus
IonBuilder::processForUpdateEnd(CFGState &state)
{
// If there is no current, we couldn't reach the loop edge and there was no
// update clause.
if (!current)
return processBrokenLoop(state);
current->end(MGoto::New(alloc(), state.loop.entry));
return finishLoop(state, state.loop.successor);
}
IonBuilder::DeferredEdge *
IonBuilder::filterDeadDeferredEdges(DeferredEdge *edge)
{
DeferredEdge *head = edge, *prev = nullptr;
while (edge) {
if (edge->block->isDead()) {
if (prev)
prev->next = edge->next;
else
head = edge->next;
} else {
prev = edge;
}
edge = edge->next;
}
// There must be at least one deferred edge from a block that was not
// deleted; blocks are deleted when restarting processing of a loop, and
// the final version of the loop body will have edges from live blocks.
JS_ASSERT(head);
return head;
}
bool
IonBuilder::processDeferredContinues(CFGState &state)
{
// If there are any continues for this loop, and there is an update block,
// then we need to create a new basic block to house the update.
if (state.loop.continues) {
DeferredEdge *edge = filterDeadDeferredEdges(state.loop.continues);
MBasicBlock *update = newBlock(edge->block, loops_.back().continuepc);
if (!update)
return false;
if (current) {
current->end(MGoto::New(alloc(), update));
if (!update->addPredecessor(alloc(), current))
return false;
}
// No need to use addPredecessor for first edge,
// because it is already predecessor.
edge->block->end(MGoto::New(alloc(), update));
edge = edge->next;
// Remaining edges
while (edge) {
edge->block->end(MGoto::New(alloc(), update));
if (!update->addPredecessor(alloc(), edge->block))
return false;
edge = edge->next;
}
state.loop.continues = nullptr;
if (!setCurrentAndSpecializePhis(update))
return ControlStatus_Error;
}
return true;
}
MBasicBlock *
IonBuilder::createBreakCatchBlock(DeferredEdge *edge, jsbytecode *pc)
{
edge = filterDeadDeferredEdges(edge);
// Create block, using the first break statement as predecessor
MBasicBlock *successor = newBlock(edge->block, pc);
if (!successor)
return nullptr;
// No need to use addPredecessor for first edge,
// because it is already predecessor.
edge->block->end(MGoto::New(alloc(), successor));
edge = edge->next;
// Finish up remaining breaks.
while (edge) {
edge->block->end(MGoto::New(alloc(), successor));
if (!successor->addPredecessor(alloc(), edge->block))
return nullptr;
edge = edge->next;
}
return successor;
}
IonBuilder::ControlStatus
IonBuilder::processNextTableSwitchCase(CFGState &state)
{
JS_ASSERT(state.state == CFGState::TABLE_SWITCH);
state.tableswitch.currentBlock++;
// Test if there are still unprocessed successors (cases/default)
if (state.tableswitch.currentBlock >= state.tableswitch.ins->numBlocks())
return processSwitchEnd(state.tableswitch.breaks, state.tableswitch.exitpc);
// Get the next successor
MBasicBlock *successor = state.tableswitch.ins->getBlock(state.tableswitch.currentBlock);
// Add current block as predecessor if available.
// This means the previous case didn't have a break statement.
// So flow will continue in this block.
if (current) {
current->end(MGoto::New(alloc(), successor));
if (!successor->addPredecessor(alloc(), current))
return ControlStatus_Error;
}
// Insert successor after the current block, to maintain RPO.
graph().moveBlockToEnd(successor);
// If this is the last successor the block should stop at the end of the tableswitch
// Else it should stop at the start of the next successor
if (state.tableswitch.currentBlock+1 < state.tableswitch.ins->numBlocks())
state.stopAt = state.tableswitch.ins->getBlock(state.tableswitch.currentBlock+1)->pc();
else
state.stopAt = state.tableswitch.exitpc;
if (!setCurrentAndSpecializePhis(successor))
return ControlStatus_Error;
pc = current->pc();
return ControlStatus_Jumped;
}
IonBuilder::ControlStatus
IonBuilder::processAndOrEnd(CFGState &state)
{
// We just processed the RHS of an && or || expression.
// Now jump to the join point (the false block).
current->end(MGoto::New(alloc(), state.branch.ifFalse));
if (!state.branch.ifFalse->addPredecessor(alloc(), current))
return ControlStatus_Error;
if (!setCurrentAndSpecializePhis(state.branch.ifFalse))
return ControlStatus_Error;
graph().moveBlockToEnd(current);
pc = current->pc();
return ControlStatus_Joined;
}
IonBuilder::ControlStatus
IonBuilder::processLabelEnd(CFGState &state)
{
JS_ASSERT(state.state == CFGState::LABEL);
// If there are no breaks and no current, controlflow is terminated.
if (!state.label.breaks && !current)
return ControlStatus_Ended;
// If there are no breaks to this label, there's nothing to do.
if (!state.label.breaks)
return ControlStatus_Joined;
MBasicBlock *successor = createBreakCatchBlock(state.label.breaks, state.stopAt);
if (!successor)
return ControlStatus_Error;
if (current) {
current->end(MGoto::New(alloc(), successor));
if (!successor->addPredecessor(alloc(), current))
return ControlStatus_Error;
}
pc = state.stopAt;
if (!setCurrentAndSpecializePhis(successor))
return ControlStatus_Error;
return ControlStatus_Joined;
}
IonBuilder::ControlStatus
IonBuilder::processTryEnd(CFGState &state)
{
JS_ASSERT(state.state == CFGState::TRY);
if (!state.try_.successor) {
JS_ASSERT(!current);
return ControlStatus_Ended;
}
if (current) {
current->end(MGoto::New(alloc(), state.try_.successor));
if (!state.try_.successor->addPredecessor(alloc(), current))
return ControlStatus_Error;
}
// Start parsing the code after this try-catch statement.
if (!setCurrentAndSpecializePhis(state.try_.successor))
return ControlStatus_Error;
graph().moveBlockToEnd(current);
pc = current->pc();
return ControlStatus_Joined;
}
IonBuilder::ControlStatus
IonBuilder::processBreak(JSOp op, jssrcnote *sn)
{
JS_ASSERT(op == JSOP_GOTO);
JS_ASSERT(SN_TYPE(sn) == SRC_BREAK ||
SN_TYPE(sn) == SRC_BREAK2LABEL);
// Find the break target.
jsbytecode *target = pc + GetJumpOffset(pc);
DebugOnly<bool> found = false;
if (SN_TYPE(sn) == SRC_BREAK2LABEL) {
for (size_t i = labels_.length() - 1; i < labels_.length(); i--) {
CFGState &cfg = cfgStack_[labels_[i].cfgEntry];
JS_ASSERT(cfg.state == CFGState::LABEL);
if (cfg.stopAt == target) {
cfg.label.breaks = new(alloc()) DeferredEdge(current, cfg.label.breaks);
found = true;
break;
}
}
} else {
for (size_t i = loops_.length() - 1; i < loops_.length(); i--) {
CFGState &cfg = cfgStack_[loops_[i].cfgEntry];
JS_ASSERT(cfg.isLoop());
if (cfg.loop.exitpc == target) {
cfg.loop.breaks = new(alloc()) DeferredEdge(current, cfg.loop.breaks);
found = true;
break;
}
}
}
JS_ASSERT(found);
setCurrent(nullptr);
pc += js_CodeSpec[op].length;
return processControlEnd();
}
static inline jsbytecode *
EffectiveContinue(jsbytecode *pc)
{
if (JSOp(*pc) == JSOP_GOTO)
return pc + GetJumpOffset(pc);
return pc;
}
IonBuilder::ControlStatus
IonBuilder::processContinue(JSOp op)
{
JS_ASSERT(op == JSOP_GOTO);
// Find the target loop.
CFGState *found = nullptr;
jsbytecode *target = pc + GetJumpOffset(pc);
for (size_t i = loops_.length() - 1; i < loops_.length(); i--) {
if (loops_[i].continuepc == target ||
EffectiveContinue(loops_[i].continuepc) == target)
{
found = &cfgStack_[loops_[i].cfgEntry];
break;
}
}
// There must always be a valid target loop structure. If not, there's
// probably an off-by-something error in which pc we track.
JS_ASSERT(found);
CFGState &state = *found;
state.loop.continues = new(alloc()) DeferredEdge(current, state.loop.continues);
setCurrent(nullptr);
pc += js_CodeSpec[op].length;
return processControlEnd();
}
IonBuilder::ControlStatus
IonBuilder::processSwitchBreak(JSOp op)
{
JS_ASSERT(op == JSOP_GOTO);
// Find the target switch.
CFGState *found = nullptr;
jsbytecode *target = pc + GetJumpOffset(pc);
for (size_t i = switches_.length() - 1; i < switches_.length(); i--) {
if (switches_[i].continuepc == target) {
found = &cfgStack_[switches_[i].cfgEntry];
break;
}
}
// There must always be a valid target loop structure. If not, there's
// probably an off-by-something error in which pc we track.
JS_ASSERT(found);
CFGState &state = *found;
DeferredEdge **breaks = nullptr;
switch (state.state) {
case CFGState::TABLE_SWITCH:
breaks = &state.tableswitch.breaks;
break;
case CFGState::COND_SWITCH_BODY:
breaks = &state.condswitch.breaks;
break;
default:
MOZ_ASSUME_UNREACHABLE("Unexpected switch state.");
}
*breaks = new(alloc()) DeferredEdge(current, *breaks);
setCurrent(nullptr);
pc += js_CodeSpec[op].length;
return processControlEnd();
}
IonBuilder::ControlStatus
IonBuilder::processSwitchEnd(DeferredEdge *breaks, jsbytecode *exitpc)
{
// No break statements, no current.
// This means that control flow is cut-off from this point
// (e.g. all cases have return statements).
if (!breaks && !current)
return ControlStatus_Ended;
// Create successor block.
// If there are breaks, create block with breaks as predecessor
// Else create a block with current as predecessor
MBasicBlock *successor = nullptr;
if (breaks)
successor = createBreakCatchBlock(breaks, exitpc);
else
successor = newBlock(current, exitpc);
if (!successor)
return ControlStatus_Ended;
// If there is current, the current block flows into this one.
// So current is also a predecessor to this block
if (current) {
current->end(MGoto::New(alloc(), successor));
if (breaks) {
if (!successor->addPredecessor(alloc(), current))
return ControlStatus_Error;
}
}
pc = exitpc;
if (!setCurrentAndSpecializePhis(successor))
return ControlStatus_Error;
return ControlStatus_Joined;
}
IonBuilder::ControlStatus
IonBuilder::maybeLoop(JSOp op, jssrcnote *sn)
{
// This function looks at the opcode and source note and tries to
// determine the structure of the loop. For some opcodes, like
// POP/NOP which are not explicitly control flow, this source note is
// optional. For opcodes with control flow, like GOTO, an unrecognized
// or not-present source note is a compilation failure.
switch (op) {
case JSOP_POP:
// for (init; ; update?) ...
if (sn && SN_TYPE(sn) == SRC_FOR) {
current->pop();
return forLoop(op, sn);
}
break;
case JSOP_NOP:
if (sn) {
// do { } while (cond)
if (SN_TYPE(sn) == SRC_WHILE)
return doWhileLoop(op, sn);
// Build a mapping such that given a basic block, whose successor
// has a phi
// for (; ; update?)
if (SN_TYPE(sn) == SRC_FOR)
return forLoop(op, sn);
}
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected opcode");
}
return ControlStatus_None;
}
void
IonBuilder::assertValidLoopHeadOp(jsbytecode *pc)
{
#ifdef DEBUG
JS_ASSERT(JSOp(*pc) == JSOP_LOOPHEAD);
// Make sure this is the next opcode after the loop header,
// unless the for loop is unconditional.
CFGState &state = cfgStack_.back();
JS_ASSERT_IF((JSOp)*(state.loop.entry->pc()) == JSOP_GOTO,
GetNextPc(state.loop.entry->pc()) == pc);
// do-while loops have a source note.
jssrcnote *sn = info().getNote(gsn, pc);
if (sn) {
jsbytecode *ifne = pc + js_GetSrcNoteOffset(sn, 0);
jsbytecode *expected_ifne;
switch (state.state) {
case CFGState::DO_WHILE_LOOP_BODY:
expected_ifne = state.loop.updateEnd;
break;
default:
MOZ_ASSUME_UNREACHABLE("JSOP_LOOPHEAD unexpected source note");
}
// Make sure this loop goes to the same ifne as the loop header's
// source notes or GOTO.
JS_ASSERT(ifne == expected_ifne);
} else {
JS_ASSERT(state.state != CFGState::DO_WHILE_LOOP_BODY);
}
#endif
}
IonBuilder::ControlStatus
IonBuilder::doWhileLoop(JSOp op, jssrcnote *sn)
{
// do { } while() loops have the following structure:
// NOP ; SRC_WHILE (offset to COND)
// LOOPHEAD ; SRC_WHILE (offset to IFNE)
// LOOPENTRY
// ... ; body
// ...
// COND ; start of condition
// ...
// IFNE -> ; goes to LOOPHEAD
int condition_offset = js_GetSrcNoteOffset(sn, 0);
jsbytecode *conditionpc = pc + condition_offset;
jssrcnote *sn2 = info().getNote(gsn, pc+1);
int offset = js_GetSrcNoteOffset(sn2, 0);
jsbytecode *ifne = pc + offset + 1;
JS_ASSERT(ifne > pc);
// Verify that the IFNE goes back to a loophead op.
jsbytecode *loopHead = GetNextPc(pc);
JS_ASSERT(JSOp(*loopHead) == JSOP_LOOPHEAD);
JS_ASSERT(loopHead == ifne + GetJumpOffset(ifne));
jsbytecode *loopEntry = GetNextPc(loopHead);
bool canOsr = LoopEntryCanIonOsr(loopEntry);
bool osr = info().hasOsrAt(loopEntry);
if (osr) {
MBasicBlock *preheader = newOsrPreheader(current, loopEntry);
if (!preheader)
return ControlStatus_Error;
current->end(MGoto::New(alloc(), preheader));
if (!setCurrentAndSpecializePhis(preheader))
return ControlStatus_Error;
}
unsigned stackPhiCount = 0;
MBasicBlock *header = newPendingLoopHeader(current, pc, osr, canOsr, stackPhiCount);
if (!header)
return ControlStatus_Error;
current->end(MGoto::New(alloc(), header));
jsbytecode *loophead = GetNextPc(pc);
jsbytecode *bodyStart = GetNextPc(loophead);
jsbytecode *bodyEnd = conditionpc;
jsbytecode *exitpc = GetNextPc(ifne);
if (!analyzeNewLoopTypes(header, bodyStart, exitpc))
return ControlStatus_Error;
if (!pushLoop(CFGState::DO_WHILE_LOOP_BODY, conditionpc, header, osr,
loopHead, bodyStart, bodyStart, bodyEnd, exitpc, conditionpc))
{
return ControlStatus_Error;
}
CFGState &state = cfgStack_.back();
state.loop.updatepc = conditionpc;
state.loop.updateEnd = ifne;
if (!setCurrentAndSpecializePhis(header))
return ControlStatus_Error;
if (!jsop_loophead(loophead))
return ControlStatus_Error;
pc = bodyStart;
return ControlStatus_Jumped;
}
IonBuilder::ControlStatus
IonBuilder::whileOrForInLoop(jssrcnote *sn)
{
// while (cond) { } loops have the following structure:
// GOTO cond ; SRC_WHILE (offset to IFNE)
// LOOPHEAD
// ...
// cond:
// LOOPENTRY
// ...
// IFNE ; goes to LOOPHEAD
// for (x in y) { } loops are similar; the cond will be a MOREITER.
JS_ASSERT(SN_TYPE(sn) == SRC_FOR_OF || SN_TYPE(sn) == SRC_FOR_IN || SN_TYPE(sn) == SRC_WHILE);
int ifneOffset = js_GetSrcNoteOffset(sn, 0);
jsbytecode *ifne = pc + ifneOffset;
JS_ASSERT(ifne > pc);
// Verify that the IFNE goes back to a loophead op.
JS_ASSERT(JSOp(*GetNextPc(pc)) == JSOP_LOOPHEAD);
JS_ASSERT(GetNextPc(pc) == ifne + GetJumpOffset(ifne));
jsbytecode *loopEntry = pc + GetJumpOffset(pc);
bool canOsr = LoopEntryCanIonOsr(loopEntry);
bool osr = info().hasOsrAt(loopEntry);
if (osr) {
MBasicBlock *preheader = newOsrPreheader(current, loopEntry);
if (!preheader)
return ControlStatus_Error;
current->end(MGoto::New(alloc(), preheader));
if (!setCurrentAndSpecializePhis(preheader))
return ControlStatus_Error;
}
unsigned stackPhiCount;
if (SN_TYPE(sn) == SRC_FOR_OF)
stackPhiCount = 2;
else if (SN_TYPE(sn) == SRC_FOR_IN)
stackPhiCount = 1;
else
stackPhiCount = 0;
MBasicBlock *header = newPendingLoopHeader(current, pc, osr, canOsr, stackPhiCount);
if (!header)
return ControlStatus_Error;
current->end(MGoto::New(alloc(), header));
// Skip past the JSOP_LOOPHEAD for the body start.
jsbytecode *loopHead = GetNextPc(pc);
jsbytecode *bodyStart = GetNextPc(loopHead);
jsbytecode *bodyEnd = pc + GetJumpOffset(pc);
jsbytecode *exitpc = GetNextPc(ifne);
if (!analyzeNewLoopTypes(header, bodyStart, exitpc))
return ControlStatus_Error;
if (!pushLoop(CFGState::WHILE_LOOP_COND, ifne, header, osr,
loopHead, bodyEnd, bodyStart, bodyEnd, exitpc))
{
return ControlStatus_Error;
}
// Parse the condition first.
if (!setCurrentAndSpecializePhis(header))
return ControlStatus_Error;
if (!jsop_loophead(loopHead))
return ControlStatus_Error;
pc = bodyEnd;
return ControlStatus_Jumped;
}
IonBuilder::ControlStatus
IonBuilder::forLoop(JSOp op, jssrcnote *sn)
{
// Skip the NOP or POP.
JS_ASSERT(op == JSOP_POP || op == JSOP_NOP);
pc = GetNextPc(pc);
jsbytecode *condpc = pc + js_GetSrcNoteOffset(sn, 0);
jsbytecode *updatepc = pc + js_GetSrcNoteOffset(sn, 1);
jsbytecode *ifne = pc + js_GetSrcNoteOffset(sn, 2);
jsbytecode *exitpc = GetNextPc(ifne);
// for loops have the following structures:
//
// NOP or POP
// [GOTO cond | NOP]
// LOOPHEAD
// body:
// ; [body]
// [increment:]
// ; [increment]
// [cond:]
// LOOPENTRY
// GOTO body
//
// If there is a condition (condpc != ifne), this acts similar to a while
// loop otherwise, it acts like a do-while loop.
jsbytecode *bodyStart = pc;
jsbytecode *bodyEnd = updatepc;
jsbytecode *loopEntry = condpc;
if (condpc != ifne) {
JS_ASSERT(JSOp(*bodyStart) == JSOP_GOTO);
JS_ASSERT(bodyStart + GetJumpOffset(bodyStart) == condpc);
bodyStart = GetNextPc(bodyStart);
} else {
// No loop condition, such as for(j = 0; ; j++)
if (op != JSOP_NOP) {
// If the loop starts with POP, we have to skip a NOP.
JS_ASSERT(JSOp(*bodyStart) == JSOP_NOP);
bodyStart = GetNextPc(bodyStart);
}
loopEntry = GetNextPc(bodyStart);
}
jsbytecode *loopHead = bodyStart;
JS_ASSERT(JSOp(*bodyStart) == JSOP_LOOPHEAD);
JS_ASSERT(ifne + GetJumpOffset(ifne) == bodyStart);
bodyStart = GetNextPc(bodyStart);
bool osr = info().hasOsrAt(loopEntry);
bool canOsr = LoopEntryCanIonOsr(loopEntry);
if (osr) {
MBasicBlock *preheader = newOsrPreheader(current, loopEntry);
if (!preheader)
return ControlStatus_Error;
current->end(MGoto::New(alloc(), preheader));
if (!setCurrentAndSpecializePhis(preheader))
return ControlStatus_Error;
}
unsigned stackPhiCount = 0;
MBasicBlock *header = newPendingLoopHeader(current, pc, osr, canOsr, stackPhiCount);
if (!header)
return ControlStatus_Error;
current->end(MGoto::New(alloc(), header));
// If there is no condition, we immediately parse the body. Otherwise, we
// parse the condition.
jsbytecode *stopAt;
CFGState::State initial;
if (condpc != ifne) {
pc = condpc;
stopAt = ifne;
initial = CFGState::FOR_LOOP_COND;
} else {
pc = bodyStart;
stopAt = bodyEnd;
initial = CFGState::FOR_LOOP_BODY;
}
if (!analyzeNewLoopTypes(header, bodyStart, exitpc))
return ControlStatus_Error;
if (!pushLoop(initial, stopAt, header, osr,
loopHead, pc, bodyStart, bodyEnd, exitpc, updatepc))
{
return ControlStatus_Error;
}
CFGState &state = cfgStack_.back();
state.loop.condpc = (condpc != ifne) ? condpc : nullptr;
state.loop.updatepc = (updatepc != condpc) ? updatepc : nullptr;
if (state.loop.updatepc)
state.loop.updateEnd = condpc;
if (!setCurrentAndSpecializePhis(header))
return ControlStatus_Error;
if (!jsop_loophead(loopHead))
return ControlStatus_Error;
return ControlStatus_Jumped;
}
int
IonBuilder::CmpSuccessors(const void *a, const void *b)
{
const MBasicBlock *a0 = * (MBasicBlock * const *)a;
const MBasicBlock *b0 = * (MBasicBlock * const *)b;
if (a0->pc() == b0->pc())
return 0;
return (a0->pc() > b0->pc()) ? 1 : -1;
}
IonBuilder::ControlStatus
IonBuilder::tableSwitch(JSOp op, jssrcnote *sn)
{
// TableSwitch op contains the following data
// (length between data is JUMP_OFFSET_LEN)
//
// 0: Offset of default case
// 1: Lowest number in tableswitch
// 2: Highest number in tableswitch
// 3: Offset of case low
// 4: Offset of case low+1
// .: ...
// .: Offset of case high
JS_ASSERT(op == JSOP_TABLESWITCH);
JS_ASSERT(SN_TYPE(sn) == SRC_TABLESWITCH);
// Pop input.
MDefinition *ins = current->pop();
// Get the default and exit pc
jsbytecode *exitpc = pc + js_GetSrcNoteOffset(sn, 0);
jsbytecode *defaultpc = pc + GET_JUMP_OFFSET(pc);
JS_ASSERT(defaultpc > pc && defaultpc <= exitpc);
// Get the low and high from the tableswitch
jsbytecode *pc2 = pc;
pc2 += JUMP_OFFSET_LEN;
int low = GET_JUMP_OFFSET(pc2);
pc2 += JUMP_OFFSET_LEN;
int high = GET_JUMP_OFFSET(pc2);
pc2 += JUMP_OFFSET_LEN;
// Create MIR instruction
MTableSwitch *tableswitch = MTableSwitch::New(alloc(), ins, low, high);
// Create default case
MBasicBlock *defaultcase = newBlock(current, defaultpc);
if (!defaultcase)
return ControlStatus_Error;
tableswitch->addDefault(defaultcase);
tableswitch->addBlock(defaultcase);
// Create cases
jsbytecode *casepc = nullptr;
for (int i = 0; i < high-low+1; i++) {
casepc = pc + GET_JUMP_OFFSET(pc2);
JS_ASSERT(casepc >= pc && casepc <= exitpc);
MBasicBlock *caseblock = newBlock(current, casepc);
if (!caseblock)
return ControlStatus_Error;
// If the casepc equals the current pc, it is not a written case,
// but a filled gap. That way we can use a tableswitch instead of
// condswitch, even if not all numbers are consecutive.
// In that case this block goes to the default case
if (casepc == pc) {
caseblock->end(MGoto::New(alloc(), defaultcase));
if (!defaultcase->addPredecessor(alloc(), caseblock))
return ControlStatus_Error;
}
tableswitch->addCase(tableswitch->addSuccessor(caseblock));
// If this is an actual case (not filled gap),
// add this block to the list that still needs to get processed
if (casepc != pc)
tableswitch->addBlock(caseblock);
pc2 += JUMP_OFFSET_LEN;
}
// Move defaultcase to the end, to maintain RPO.
graph().moveBlockToEnd(defaultcase);
JS_ASSERT(tableswitch->numCases() == (uint32_t)(high - low + 1));
JS_ASSERT(tableswitch->numSuccessors() > 0);
// Sort the list of blocks that still needs to get processed by pc
qsort(tableswitch->blocks(), tableswitch->numBlocks(),
sizeof(MBasicBlock*), CmpSuccessors);
// Create info
ControlFlowInfo switchinfo(cfgStack_.length(), exitpc);
if (!switches_.append(switchinfo))
return ControlStatus_Error;
// Use a state to retrieve some information
CFGState state = CFGState::TableSwitch(exitpc, tableswitch);
// Save the MIR instruction as last instruction of this block.
current->end(tableswitch);
// If there is only one successor the block should stop at the end of the switch
// Else it should stop at the start of the next successor
if (tableswitch->numBlocks() > 1)
state.stopAt = tableswitch->getBlock(1)->pc();
if (!setCurrentAndSpecializePhis(tableswitch->getBlock(0)))
return ControlStatus_Error;
if (!cfgStack_.append(state))
return ControlStatus_Error;
pc = current->pc();
return ControlStatus_Jumped;
}
bool
IonBuilder::filterTypesAtTest(MTest *test)
{
JS_ASSERT(test->ifTrue() == current || test->ifFalse() == current);
bool trueBranch = test->ifTrue() == current;
MDefinition *subject = nullptr;
bool removeUndefined;
bool removeNull;
test->filtersUndefinedOrNull(trueBranch, &subject, &removeUndefined, &removeNull);
// The test filters no undefined or null.
if (!subject)
return true;
// There is no TypeSet that can get filtered.
if (!subject->resultTypeSet() || subject->resultTypeSet()->unknown())
return true;
// Only do this optimization if the typeset does contains null or undefined.
if ((!(removeUndefined && subject->resultTypeSet()->hasType(types::Type::UndefinedType())) &&
!(removeNull && subject->resultTypeSet()->hasType(types::Type::NullType()))))
{
return true;
}
// Find all values on the stack that correspond to the subject
// and replace it with a MIR with filtered TypeSet information.
// Create the replacement MIR lazily upon first occurence.
MDefinition *replace = nullptr;
for (uint32_t i = 0; i < current->stackDepth(); i++) {
if (current->getSlot(i) != subject)
continue;
// Create replacement MIR with filtered TypesSet.
if (!replace) {
types::TemporaryTypeSet *type =
subject->resultTypeSet()->filter(alloc_->lifoAlloc(), removeUndefined,
removeNull);
if (!type)
return false;
replace = ensureDefiniteTypeSet(subject, type);
// Make sure we don't hoist it above the MTest, we can use the
// 'dependency' of an MInstruction. This is normally used by
// Alias Analysis, but won't get overwritten, since this
// instruction doesn't have an AliasSet.
replace->setDependency(test);
}
current->setSlot(i, replace);
}
return true;
}
bool
IonBuilder::jsop_label()
{
JS_ASSERT(JSOp(*pc) == JSOP_LABEL);
jsbytecode *endpc = pc + GET_JUMP_OFFSET(pc);
JS_ASSERT(endpc > pc);
ControlFlowInfo label(cfgStack_.length(), endpc);
if (!labels_.append(label))
return false;
return cfgStack_.append(CFGState::Label(endpc));
}
bool
IonBuilder::jsop_condswitch()
{
// CondSwitch op looks as follows:
// condswitch [length +exit_pc; first case offset +next-case ]
// {
// {
// ... any code ...
// case (+jump) [pcdelta offset +next-case]
// }+
// default (+jump)
// ... jump targets ...
// }
//
// The default case is always emitted even if there is no default case in
// the source. The last case statement pcdelta source note might have a 0
// offset on the last case (not all the time).
//
// A conditional evaluate the condition of each case and compare it to the
// switch value with a strict equality. Cases conditions are iterated
// linearly until one is matching. If one case succeeds, the flow jumps into
// the corresponding body block. The body block might alias others and
// might continue in the next body block if the body is not terminated with
// a break.
//
// Algorithm:
// 1/ Loop over the case chain to reach the default target
// & Estimate the number of uniq bodies.
// 2/ Generate code for all cases (see processCondSwitchCase).
// 3/ Generate code for all bodies (see processCondSwitchBody).
JS_ASSERT(JSOp(*pc) == JSOP_CONDSWITCH);
jssrcnote *sn = info().getNote(gsn, pc);
JS_ASSERT(SN_TYPE(sn) == SRC_CONDSWITCH);
// Get the exit pc
jsbytecode *exitpc = pc + js_GetSrcNoteOffset(sn, 0);
jsbytecode *firstCase = pc + js_GetSrcNoteOffset(sn, 1);
// Iterate all cases in the conditional switch.
// - Stop at the default case. (always emitted after the last case)
// - Estimate the number of uniq bodies. This estimation might be off by 1
// if the default body alias a case body.
jsbytecode *curCase = firstCase;
jsbytecode *lastTarget = GetJumpOffset(curCase) + curCase;
size_t nbBodies = 2; // default target and the first body.
JS_ASSERT(pc < curCase && curCase <= exitpc);
while (JSOp(*curCase) == JSOP_CASE) {
// Fetch the next case.
jssrcnote *caseSn = info().getNote(gsn, curCase);
JS_ASSERT(caseSn && SN_TYPE(caseSn) == SRC_NEXTCASE);
ptrdiff_t off = js_GetSrcNoteOffset(caseSn, 0);
curCase = off ? curCase + off : GetNextPc(curCase);
JS_ASSERT(pc < curCase && curCase <= exitpc);
// Count non-aliased cases.
jsbytecode *curTarget = GetJumpOffset(curCase) + curCase;
if (lastTarget < curTarget)
nbBodies++;
lastTarget = curTarget;
}
// The current case now be the default case which jump to the body of the
// default case, which might be behind the last target.
JS_ASSERT(JSOp(*curCase) == JSOP_DEFAULT);
jsbytecode *defaultTarget = GetJumpOffset(curCase) + curCase;
JS_ASSERT(curCase < defaultTarget && defaultTarget <= exitpc);
// Allocate the current graph state.
CFGState state = CFGState::CondSwitch(this, exitpc, defaultTarget);
if (!state.condswitch.bodies || !state.condswitch.bodies->init(alloc(), nbBodies))
return ControlStatus_Error;
// We loop on case conditions with processCondSwitchCase.
JS_ASSERT(JSOp(*firstCase) == JSOP_CASE);
state.stopAt = firstCase;
state.state = CFGState::COND_SWITCH_CASE;
return cfgStack_.append(state);
}
IonBuilder::CFGState
IonBuilder::CFGState::CondSwitch(IonBuilder *builder, jsbytecode *exitpc, jsbytecode *defaultTarget)
{
CFGState state;
state.state = COND_SWITCH_CASE;
state.stopAt = nullptr;
state.condswitch.bodies = (FixedList<MBasicBlock *> *)builder->alloc_->allocate(
sizeof(FixedList<MBasicBlock *>));
state.condswitch.currentIdx = 0;
state.condswitch.defaultTarget = defaultTarget;
state.condswitch.defaultIdx = uint32_t(-1);
state.condswitch.exitpc = exitpc;
state.condswitch.breaks = nullptr;
return state;
}
IonBuilder::CFGState
IonBuilder::CFGState::Label(jsbytecode *exitpc)
{
CFGState state;
state.state = LABEL;
state.stopAt = exitpc;
state.label.breaks = nullptr;
return state;
}
IonBuilder::CFGState
IonBuilder::CFGState::Try(jsbytecode *exitpc, MBasicBlock *successor)
{
CFGState state;
state.state = TRY;
state.stopAt = exitpc;
state.try_.successor = successor;
return state;
}
IonBuilder::ControlStatus
IonBuilder::processCondSwitchCase(CFGState &state)
{
JS_ASSERT(state.state == CFGState::COND_SWITCH_CASE);
JS_ASSERT(!state.condswitch.breaks);
JS_ASSERT(current);
JS_ASSERT(JSOp(*pc) == JSOP_CASE);
FixedList<MBasicBlock *> &bodies = *state.condswitch.bodies;
jsbytecode *defaultTarget = state.condswitch.defaultTarget;
uint32_t ¤tIdx = state.condswitch.currentIdx;
jsbytecode *lastTarget = currentIdx ? bodies[currentIdx - 1]->pc() : nullptr;
// Fetch the following case in which we will continue.
jssrcnote *sn = info().getNote(gsn, pc);
ptrdiff_t off = js_GetSrcNoteOffset(sn, 0);
jsbytecode *casePc = off ? pc + off : GetNextPc(pc);
bool caseIsDefault = JSOp(*casePc) == JSOP_DEFAULT;
JS_ASSERT(JSOp(*casePc) == JSOP_CASE || caseIsDefault);
// Allocate the block of the matching case.
bool bodyIsNew = false;
MBasicBlock *bodyBlock = nullptr;
jsbytecode *bodyTarget = pc + GetJumpOffset(pc);
if (lastTarget < bodyTarget) {
// If the default body is in the middle or aliasing the current target.
if (lastTarget < defaultTarget && defaultTarget <= bodyTarget) {
JS_ASSERT(state.condswitch.defaultIdx == uint32_t(-1));
state.condswitch.defaultIdx = currentIdx;
bodies[currentIdx] = nullptr;
// If the default body does not alias any and it would be allocated
// later and stored in the defaultIdx location.
if (defaultTarget < bodyTarget)
currentIdx++;
}
bodyIsNew = true;
// Pop switch and case operands.
bodyBlock = newBlockPopN(current, bodyTarget, 2);
bodies[currentIdx++] = bodyBlock;
} else {
// This body alias the previous one.
JS_ASSERT(lastTarget == bodyTarget);
JS_ASSERT(currentIdx > 0);
bodyBlock = bodies[currentIdx - 1];
}
if (!bodyBlock)
return ControlStatus_Error;
lastTarget = bodyTarget;
// Allocate the block of the non-matching case. This can either be a normal
// case or the default case.
bool caseIsNew = false;
MBasicBlock *caseBlock = nullptr;
if (!caseIsDefault) {
caseIsNew = true;
// Pop the case operand.
caseBlock = newBlockPopN(current, GetNextPc(pc), 1);
} else {
// The non-matching case is the default case, which jump directly to its
// body. Skip the creation of a default case block and directly create
// the default body if it does not alias any previous body.
if (state.condswitch.defaultIdx == uint32_t(-1)) {
// The default target is the last target.
JS_ASSERT(lastTarget < defaultTarget);
state.condswitch.defaultIdx = currentIdx++;
caseIsNew = true;
} else if (bodies[state.condswitch.defaultIdx] == nullptr) {
// The default target is in the middle and it does not alias any
// case target.
JS_ASSERT(defaultTarget < lastTarget);
caseIsNew = true;
} else {
// The default target is in the middle and it alias a case target.
JS_ASSERT(defaultTarget <= lastTarget);
caseBlock = bodies[state.condswitch.defaultIdx];
}
// Allocate and register the default body.
if (caseIsNew) {
// Pop the case & switch operands.
caseBlock = newBlockPopN(current, defaultTarget, 2);
bodies[state.condswitch.defaultIdx] = caseBlock;
}
}
if (!caseBlock)
return ControlStatus_Error;
// Terminate the last case condition block by emitting the code
// corresponding to JSOP_CASE bytecode.
if (bodyBlock != caseBlock) {
MDefinition *caseOperand = current->pop();
MDefinition *switchOperand = current->peek(-1);
MCompare *cmpResult = MCompare::New(alloc(), switchOperand, caseOperand, JSOP_STRICTEQ);
cmpResult->infer(inspector, pc);
JS_ASSERT(!cmpResult->isEffectful());
current->add(cmpResult);
current->end(MTest::New(alloc(), cmpResult, bodyBlock, caseBlock));
// Add last case as predecessor of the body if the body is aliasing
// the previous case body.
if (!bodyIsNew && !bodyBlock->addPredecessorPopN(alloc(), current, 1))
return ControlStatus_Error;
// Add last case as predecessor of the non-matching case if the
// non-matching case is an aliased default case. We need to pop the
// switch operand as we skip the default case block and use the default
// body block directly.
JS_ASSERT_IF(!caseIsNew, caseIsDefault);
if (!caseIsNew && !caseBlock->addPredecessorPopN(alloc(), current, 1))
return ControlStatus_Error;
} else {
// The default case alias the last case body.
JS_ASSERT(caseIsDefault);
current->pop(); // Case operand
current->pop(); // Switch operand
current->end(MGoto::New(alloc(), bodyBlock));
if (!bodyIsNew && !bodyBlock->addPredecessor(alloc(), current))
return ControlStatus_Error;
}
if (caseIsDefault) {
// The last case condition is finished. Loop in processCondSwitchBody,
// with potential stops in processSwitchBreak. Check that the bodies
// fixed list is over-estimate by at most 1, and shrink the size such as
// length can be used as an upper bound while iterating bodies.
JS_ASSERT(currentIdx == bodies.length() || currentIdx + 1 == bodies.length());
bodies.shrink(bodies.length() - currentIdx);
// Handle break statements in processSwitchBreak while processing
// bodies.
ControlFlowInfo breakInfo(cfgStack_.length() - 1, state.condswitch.exitpc);
if (!switches_.append(breakInfo))
return ControlStatus_Error;
// Jump into the first body.
currentIdx = 0;
setCurrent(nullptr);
state.state = CFGState::COND_SWITCH_BODY;
return processCondSwitchBody(state);
}
// Continue until the case condition.
if (!setCurrentAndSpecializePhis(caseBlock))
return ControlStatus_Error;
pc = current->pc();
state.stopAt = casePc;
return ControlStatus_Jumped;
}
IonBuilder::ControlStatus
IonBuilder::processCondSwitchBody(CFGState &state)
{
JS_ASSERT(state.state == CFGState::COND_SWITCH_BODY);
JS_ASSERT(pc <= state.condswitch.exitpc);
FixedList<MBasicBlock *> &bodies = *state.condswitch.bodies;
uint32_t ¤tIdx = state.condswitch.currentIdx;
JS_ASSERT(currentIdx <= bodies.length());
if (currentIdx == bodies.length()) {
JS_ASSERT_IF(current, pc == state.condswitch.exitpc);
return processSwitchEnd(state.condswitch.breaks, state.condswitch.exitpc);
}
// Get the next body
MBasicBlock *nextBody = bodies[currentIdx++];
JS_ASSERT_IF(current, pc == nextBody->pc());
// Fix the reverse post-order iteration.
graph().moveBlockToEnd(nextBody);
// The last body continue into the new one.
if (current) {
current->end(MGoto::New(alloc(), nextBody));
if (!nextBody->addPredecessor(alloc(), current))
return ControlStatus_Error;
}
// Continue in the next body.
if (!setCurrentAndSpecializePhis(nextBody))
return ControlStatus_Error;
pc = current->pc();
if (currentIdx < bodies.length())
state.stopAt = bodies[currentIdx]->pc();
else
state.stopAt = state.condswitch.exitpc;
return ControlStatus_Jumped;
}
bool
IonBuilder::jsop_andor(JSOp op)
{
JS_ASSERT(op == JSOP_AND || op == JSOP_OR);
jsbytecode *rhsStart = pc + js_CodeSpec[op].length;
jsbytecode *joinStart = pc + GetJumpOffset(pc);
JS_ASSERT(joinStart > pc);
// We have to leave the LHS on the stack.
MDefinition *lhs = current->peek(-1);
MBasicBlock *evalRhs = newBlock(current, rhsStart);
MBasicBlock *join = newBlock(current, joinStart);
if (!evalRhs || !join)
return false;
MTest *test = (op == JSOP_AND)
? MTest::New(alloc(), lhs, evalRhs, join)
: MTest::New(alloc(), lhs, join, evalRhs);
test->infer();
current->end(test);
if (!cfgStack_.append(CFGState::AndOr(joinStart, join)))
return false;
return setCurrentAndSpecializePhis(evalRhs);
}
bool
IonBuilder::jsop_dup2()
{
uint32_t lhsSlot = current->stackDepth() - 2;
uint32_t rhsSlot = current->stackDepth() - 1;
current->pushSlot(lhsSlot);
current->pushSlot(rhsSlot);
return true;
}
bool
IonBuilder::jsop_loophead(jsbytecode *pc)
{
assertValidLoopHeadOp(pc);
current->add(MInterruptCheck::New(alloc()));
insertRecompileCheck();
return true;
}
bool
IonBuilder::jsop_ifeq(JSOp op)
{
// IFEQ always has a forward offset.
jsbytecode *trueStart = pc + js_CodeSpec[op].length;
jsbytecode *falseStart = pc + GetJumpOffset(pc);
JS_ASSERT(falseStart > pc);
// We only handle cases that emit source notes.
jssrcnote *sn = info().getNote(gsn, pc);
if (!sn)
return abort("expected sourcenote");
MDefinition *ins = current->pop();
// Create true and false branches.
MBasicBlock *ifTrue = newBlock(current, trueStart);
MBasicBlock *ifFalse = newBlock(current, falseStart);
if (!ifTrue || !ifFalse)
return false;
MTest *test = MTest::New(alloc(), ins, ifTrue, ifFalse);
current->end(test);
// The bytecode for if/ternary gets emitted either like this:
//
// IFEQ X ; src note (IF_ELSE, COND) points to the GOTO
// ...
// GOTO Z
// X: ... ; else/else if
// ...
// Z: ; join
//
// Or like this:
//
// IFEQ X ; src note (IF) has no offset
// ...
// Z: ... ; join
//
// We want to parse the bytecode as if we were parsing the AST, so for the
// IF_ELSE/COND cases, we use the source note and follow the GOTO. For the
// IF case, the IFEQ offset is the join point.
switch (SN_TYPE(sn)) {
case SRC_IF:
if (!cfgStack_.append(CFGState::If(falseStart, test)))
return false;
break;
case SRC_IF_ELSE:
case SRC_COND:
{
// Infer the join point from the JSOP_GOTO[X] sitting here, then
// assert as we much we can that this is the right GOTO.
jsbytecode *trueEnd = pc + js_GetSrcNoteOffset(sn, 0);
JS_ASSERT(trueEnd > pc);
JS_ASSERT(trueEnd < falseStart);
JS_ASSERT(JSOp(*trueEnd) == JSOP_GOTO);
JS_ASSERT(!info().getNote(gsn, trueEnd));
jsbytecode *falseEnd = trueEnd + GetJumpOffset(trueEnd);
JS_ASSERT(falseEnd > trueEnd);
JS_ASSERT(falseEnd >= falseStart);
if (!cfgStack_.append(CFGState::IfElse(trueEnd, falseEnd, test)))
return false;
break;
}
default:
MOZ_ASSUME_UNREACHABLE("unexpected source note type");
}
// Switch to parsing the true branch. Note that no PC update is needed,
// it's the next instruction.
if (!setCurrentAndSpecializePhis(ifTrue))
return false;
// Filter the types in the true branch.
filterTypesAtTest(test);
return true;
}
bool
IonBuilder::jsop_try()
{
JS_ASSERT(JSOp(*pc) == JSOP_TRY);
if (!js_JitOptions.compileTryCatch)
return abort("Try-catch support disabled");
// Try-finally is not yet supported.
if (analysis().hasTryFinally())
return abort("Has try-finally");
// Try-catch within inline frames is not yet supported.
JS_ASSERT(!isInlineBuilder());
// Try-catch during the arguments usage analysis is not yet supported. Code
// accessing the arguments within the 'catch' block is not accounted for.
if (info().executionMode() == ArgumentsUsageAnalysis)
return abort("Try-catch during arguments usage analysis");
graph().setHasTryBlock();
jssrcnote *sn = info().getNote(gsn, pc);
JS_ASSERT(SN_TYPE(sn) == SRC_TRY);
// Get the pc of the last instruction in the try block. It's a JSOP_GOTO to
// jump over the catch block.
jsbytecode *endpc = pc + js_GetSrcNoteOffset(sn, 0);
JS_ASSERT(JSOp(*endpc) == JSOP_GOTO);
JS_ASSERT(GetJumpOffset(endpc) > 0);
jsbytecode *afterTry = endpc + GetJumpOffset(endpc);
// If controlflow in the try body is terminated (by a return or throw
// statement), the code after the try-statement may still be reachable
// via the catch block (which we don't compile) and OSR can enter it.
// For example:
//
// try {
// throw 3;
// } catch(e) { }
//
// for (var i=0; i<1000; i++) {}
//
// To handle this, we create two blocks: one for the try block and one
// for the code following the try-catch statement. Both blocks are
// connected to the graph with an MTest instruction that always jumps to
// the try block. This ensures the successor block always has a predecessor
// and later passes will optimize this MTest to a no-op.
//
// If the code after the try block is unreachable (control flow in both the
// try and catch blocks is terminated), only create the try block, to avoid
// parsing unreachable code.
MBasicBlock *tryBlock = newBlock(current, GetNextPc(pc));
if (!tryBlock)
return false;
MBasicBlock *successor;
if (analysis().maybeInfo(afterTry)) {
successor = newBlock(current, afterTry);
if (!successor)
return false;
// Add MTest(true, tryBlock, successorBlock).
MConstant *true_ = MConstant::New(alloc(), BooleanValue(true));
current->add(true_);
current->end(MTest::New(alloc(), true_, tryBlock, successor));
} else {
successor = nullptr;
current->end(MGoto::New(alloc(), tryBlock));
}
if (!cfgStack_.append(CFGState::Try(endpc, successor)))
return false;
// The baseline compiler should not attempt to enter the catch block
// via OSR.
JS_ASSERT(info().osrPc() < endpc || info().osrPc() >= afterTry);
// Start parsing the try block.
return setCurrentAndSpecializePhis(tryBlock);
}
IonBuilder::ControlStatus
IonBuilder::processReturn(JSOp op)
{
MDefinition *def;
switch (op) {
case JSOP_RETURN:
// Return the last instruction.
def = current->pop();
break;
case JSOP_RETRVAL:
// Return undefined eagerly if script doesn't use return value.
if (script()->noScriptRval()) {
MInstruction *ins = MConstant::New(alloc(), UndefinedValue());
current->add(ins);
def = ins;
break;
}
def = current->getSlot(info().returnValueSlot());
break;
default:
def = nullptr;
MOZ_ASSUME_UNREACHABLE("unknown return op");
}
if (instrumentedProfiling()) {
current->add(MProfilerStackOp::New(alloc(), script(), MProfilerStackOp::Exit,
inliningDepth_));
}
MReturn *ret = MReturn::New(alloc(), def);
current->end(ret);
if (!graph().addReturn(current))
return ControlStatus_Error;
// Make sure no one tries to use this block now.
setCurrent(nullptr);
return processControlEnd();
}
IonBuilder::ControlStatus
IonBuilder::processThrow()
{
MDefinition *def = current->pop();
// MThrow is not marked as effectful. This means when it throws and we
// are inside a try block, we could use an earlier resume point and this
// resume point may not be up-to-date, for example:
//
// (function() {
// try {
// var x = 1;
// foo(); // resume point
// x = 2;
// throw foo;
// } catch(e) {
// print(x);
// }
// ])();
//
// If we use the resume point after the call, this will print 1 instead
// of 2. To fix this, we create a resume point right before the MThrow.
//
// Note that this is not a problem for instructions other than MThrow
// because they are either marked as effectful (have their own resume
// point) or cannot throw a catchable exception.
//
// We always install this resume point (instead of only when the function
// has a try block) in order to handle the Debugger onExceptionUnwind
// hook. When we need to handle the hook, we bail out to baseline right
// after the throw and propagate the exception when debug mode is on. This
// is opposed to the normal behavior of resuming directly in the
// associated catch block.
MNop *nop = MNop::New(alloc());
current->add(nop);
if (!resumeAfter(nop))
return ControlStatus_Error;
MThrow *ins = MThrow::New(alloc(), def);
current->end(ins);
// Make sure no one tries to use this block now.
setCurrent(nullptr);
return processControlEnd();
}
bool
IonBuilder::pushConstant(const Value &v)
{
current->push(constant(v));
return true;
}
bool
IonBuilder::jsop_bitnot()
{
MDefinition *input = current->pop();
MBitNot *ins = MBitNot::New(alloc(), input);
current->add(ins);
ins->infer();
current->push(ins);
if (ins->isEffectful() && !resumeAfter(ins))
return false;
return true;
}
bool
IonBuilder::jsop_bitop(JSOp op)
{
// Pop inputs.
MDefinition *right = current->pop();
MDefinition *left = current->pop();
MBinaryBitwiseInstruction *ins;
switch (op) {
case JSOP_BITAND:
ins = MBitAnd::New(alloc(), left, right);
break;
case JSOP_BITOR:
ins = MBitOr::New(alloc(), left, right);
break;
case JSOP_BITXOR:
ins = MBitXor::New(alloc(), left, right);
break;
case JSOP_LSH:
ins = MLsh::New(alloc(), left, right);
break;
case JSOP_RSH:
ins = MRsh::New(alloc(), left, right);
break;
case JSOP_URSH:
ins = MUrsh::New(alloc(), left, right);
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected bitop");
}
current->add(ins);
ins->infer(inspector, pc);
current->push(ins);
if (ins->isEffectful() && !resumeAfter(ins))
return false;
return true;
}
bool
IonBuilder::jsop_binary(JSOp op, MDefinition *left, MDefinition *right)
{
// Do a string concatenation if adding two inputs that are int or string
// and at least one is a string.
if (op == JSOP_ADD &&
((left->type() == MIRType_String &&
(right->type() == MIRType_String ||
right->type() == MIRType_Int32 ||
right->type() == MIRType_Double)) ||
(left->type() == MIRType_Int32 &&
right->type() == MIRType_String) ||
(left->type() == MIRType_Double &&
right->type() == MIRType_String)))
{
MConcat *ins = MConcat::New(alloc(), left, right);
current->add(ins);
current->push(ins);
return maybeInsertResume();
}
MBinaryArithInstruction *ins;
switch (op) {
case JSOP_ADD:
ins = MAdd::New(alloc(), left, right);
break;
case JSOP_SUB:
ins = MSub::New(alloc(), left, right);
break;
case JSOP_MUL:
ins = MMul::New(alloc(), left, right);
break;
case JSOP_DIV:
ins = MDiv::New(alloc(), left, right);
break;
case JSOP_MOD:
ins = MMod::New(alloc(), left, right);
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected binary opcode");
}
current->add(ins);
ins->infer(alloc(), inspector, pc);
current->push(ins);
if (ins->isEffectful())
return resumeAfter(ins);
return maybeInsertResume();
}
bool
IonBuilder::jsop_binary(JSOp op)
{
MDefinition *right = current->pop();
MDefinition *left = current->pop();
return jsop_binary(op, left, right);
}
bool
IonBuilder::jsop_pos()
{
if (IsNumberType(current->peek(-1)->type())) {
// Already int32 or double.
return true;
}
// Compile +x as x * 1.
MDefinition *value = current->pop();
MConstant *one = MConstant::New(alloc(), Int32Value(1));
current->add(one);
return jsop_binary(JSOP_MUL, value, one);
}
bool
IonBuilder::jsop_neg()
{
// Since JSOP_NEG does not use a slot, we cannot push the MConstant.
// The MConstant is therefore passed to JSOP_MUL without slot traffic.
MConstant *negator = MConstant::New(alloc(), Int32Value(-1));
current->add(negator);
MDefinition *right = current->pop();
if (!jsop_binary(JSOP_MUL, negator, right))
return false;
return true;
}
class AutoAccumulateReturns
{
MIRGraph &graph_;
MIRGraphReturns *prev_;
public:
AutoAccumulateReturns(MIRGraph &graph, MIRGraphReturns &returns)
: graph_(graph)
{
prev_ = graph_.returnAccumulator();
graph_.setReturnAccumulator(&returns);
}
~AutoAccumulateReturns() {
graph_.setReturnAccumulator(prev_);
}
};
bool
IonBuilder::inlineScriptedCall(CallInfo &callInfo, JSFunction *target)
{
JS_ASSERT(target->hasScript());
JS_ASSERT(IsIonInlinablePC(pc));
callInfo.setImplicitlyUsedUnchecked();
// Ensure sufficient space in the slots: needed for inlining from FUNAPPLY.
uint32_t depth = current->stackDepth() + callInfo.numFormals();
if (depth > current->nslots()) {
if (!current->increaseSlots(depth - current->nslots()))
return false;
}
// Create new |this| on the caller-side for inlined constructors.
if (callInfo.constructing()) {
MDefinition *thisDefn = createThis(target, callInfo.fun());
if (!thisDefn)
return false;
callInfo.setThis(thisDefn);
}
// Capture formals in the outer resume point.
callInfo.pushFormals(current);
MResumePoint *outerResumePoint =
MResumePoint::New(alloc(), current, pc, callerResumePoint_, MResumePoint::Outer);
if (!outerResumePoint)
return false;
// Pop formals again, except leave |fun| on stack for duration of call.
callInfo.popFormals(current);
current->push(callInfo.fun());
JSScript *calleeScript = target->nonLazyScript();
BaselineInspector inspector(calleeScript);
// Improve type information of |this| when not set.
if (callInfo.constructing() &&
!callInfo.thisArg()->resultTypeSet() &&
calleeScript->types)
{
types::StackTypeSet *types = types::TypeScript::ThisTypes(calleeScript);
if (!types->unknown()) {
types::TemporaryTypeSet *clonedTypes = types->clone(alloc_->lifoAlloc());
if (!clonedTypes)
return oom();
MTypeBarrier *barrier = MTypeBarrier::New(alloc(), callInfo.thisArg(), clonedTypes);
current->add(barrier);
callInfo.setThis(barrier);
}
}
// Start inlining.
LifoAlloc *lifoAlloc = alloc_->lifoAlloc();
CompileInfo *info = lifoAlloc->new_<CompileInfo>(calleeScript, target,
(jsbytecode *)nullptr, callInfo.constructing(),
this->info().executionMode(),
/* needsArgsObj = */ false);
if (!info)
return false;
MIRGraphReturns returns(alloc());
AutoAccumulateReturns aar(graph(), returns);
// Build the graph.
IonBuilder inlineBuilder(analysisContext, compartment, options, &alloc(), &graph(), constraints(),
&inspector, info, &optimizationInfo(), nullptr, inliningDepth_ + 1,
loopDepth_);
if (!inlineBuilder.buildInline(this, outerResumePoint, callInfo)) {
if (analysisContext && analysisContext->isExceptionPending()) {
IonSpew(IonSpew_Abort, "Inline builder raised exception.");
abortReason_ = AbortReason_Error;
return false;
}
// Inlining the callee failed. Mark the callee as uninlineable only if
// the inlining was aborted for a non-exception reason.
if (inlineBuilder.abortReason_ == AbortReason_Disable) {
calleeScript->setUninlineable();
abortReason_ = AbortReason_Inlining;
} else if (inlineBuilder.abortReason_ == AbortReason_Inlining) {
abortReason_ = AbortReason_Inlining;
}
return false;
}
// Create return block.
jsbytecode *postCall = GetNextPc(pc);
MBasicBlock *returnBlock = newBlock(nullptr, postCall);
if (!returnBlock)
return false;
returnBlock->setCallerResumePoint(callerResumePoint_);
// When profiling add InlineExit instruction to indicate end of inlined function.
if (instrumentedProfiling())
returnBlock->add(MProfilerStackOp::New(alloc(), nullptr, MProfilerStackOp::InlineExit));
// Inherit the slots from current and pop |fun|.
returnBlock->inheritSlots(current);
returnBlock->pop();
// Accumulate return values.
if (returns.empty()) {
// Inlining of functions that have no exit is not supported.
calleeScript->setUninlineable();
abortReason_ = AbortReason_Inlining;
return false;
}
MDefinition *retvalDefn = patchInlinedReturns(callInfo, returns, returnBlock);
if (!retvalDefn)
return false;
returnBlock->push(retvalDefn);
// Initialize entry slots now that the stack has been fixed up.
if (!returnBlock->initEntrySlots(alloc()))
return false;
return setCurrentAndSpecializePhis(returnBlock);
}
MDefinition *
IonBuilder::patchInlinedReturn(CallInfo &callInfo, MBasicBlock *exit, MBasicBlock *bottom)
{
// Replaces the MReturn in the exit block with an MGoto.
MDefinition *rdef = exit->lastIns()->toReturn()->input();
exit->discardLastIns();
// Constructors must be patched by the caller to always return an object.
if (callInfo.constructing()) {
if (rdef->type() == MIRType_Value) {
// Unknown return: dynamically detect objects.
MReturnFromCtor *filter = MReturnFromCtor::New(alloc(), rdef, callInfo.thisArg());
exit->add(filter);
rdef = filter;
} else if (rdef->type() != MIRType_Object) {
// Known non-object return: force |this|.
rdef = callInfo.thisArg();
}
} else if (callInfo.isSetter()) {
// Setters return their argument, not whatever value is returned.
rdef = callInfo.getArg(0);
}
MGoto *replacement = MGoto::New(alloc(), bottom);
exit->end(replacement);
if (!bottom->addPredecessorWithoutPhis(exit))
return nullptr;
return rdef;
}
MDefinition *
IonBuilder::patchInlinedReturns(CallInfo &callInfo, MIRGraphReturns &returns, MBasicBlock *bottom)
{
// Replaces MReturns with MGotos, returning the MDefinition
// representing the return value, or nullptr.
JS_ASSERT(returns.length() > 0);
if (returns.length() == 1)
return patchInlinedReturn(callInfo, returns[0], bottom);
// Accumulate multiple returns with a phi.
MPhi *phi = MPhi::New(alloc(), bottom->stackDepth());
if (!phi->reserveLength(returns.length()))
return nullptr;
for (size_t i = 0; i < returns.length(); i++) {
MDefinition *rdef = patchInlinedReturn(callInfo, returns[i], bottom);
if (!rdef)
return nullptr;
phi->addInput(rdef);
}
bottom->addPhi(phi);
return phi;
}
IonBuilder::InliningDecision
IonBuilder::makeInliningDecision(JSFunction *target, CallInfo &callInfo)
{
// When there is no target, inlining is impossible.
if (target == nullptr)
return InliningDecision_DontInline;
// Never inline during the arguments usage analysis.
if (info().executionMode() == ArgumentsUsageAnalysis)
return InliningDecision_DontInline;
// Native functions provide their own detection in inlineNativeCall().
if (target->isNative())
return InliningDecision_Inline;
// Determine whether inlining is possible at callee site
InliningDecision decision = canInlineTarget(target, callInfo);
if (decision != InliningDecision_Inline)
return decision;
// Heuristics!
JSScript *targetScript = target->nonLazyScript();
// Skip heuristics if we have an explicit hint to inline.
if (!targetScript->shouldInline()) {
// Cap the inlining depth.
if (js_JitOptions.isSmallFunction(targetScript)) {
if (inliningDepth_ >= optimizationInfo().smallFunctionMaxInlineDepth())
return DontInline(targetScript, "Vetoed: exceeding allowed inline depth");
} else {
if (inliningDepth_ >= optimizationInfo().maxInlineDepth())
return DontInline(targetScript, "Vetoed: exceeding allowed inline depth");
if (targetScript->hasLoops())
return DontInline(targetScript, "Vetoed: big function that contains a loop");
// Caller must not be excessively large.
if (script()->length() >= optimizationInfo().inliningMaxCallerBytecodeLength())
return DontInline(targetScript, "Vetoed: caller excessively large");
}
// Callee must not be excessively large.
// This heuristic also applies to the callsite as a whole.
if (targetScript->length() > optimizationInfo().inlineMaxTotalBytecodeLength())
return DontInline(targetScript, "Vetoed: callee excessively large");
// Callee must have been called a few times to have somewhat stable
// type information, except for definite properties analysis,
// as the caller has not run yet.
if (targetScript->getUseCount() < optimizationInfo().usesBeforeInlining() &&
info().executionMode() != DefinitePropertiesAnalysis)
{
return DontInline(targetScript, "Vetoed: callee is insufficiently hot.");
}
}
// TI calls ObjectStateChange to trigger invalidation of the caller.
types::TypeObjectKey *targetType = types::TypeObjectKey::get(target);
targetType->watchStateChangeForInlinedCall(constraints());
// We mustn't relazify functions that have been inlined, because there's
// no way to tell if it safe to do so.
script()->setHasBeenInlined();
return InliningDecision_Inline;
}
bool
IonBuilder::selectInliningTargets(ObjectVector &targets, CallInfo &callInfo, BoolVector &choiceSet,
uint32_t *numInlineable)
{
*numInlineable = 0;
uint32_t totalSize = 0;
// For each target, ask whether it may be inlined.
if (!choiceSet.reserve(targets.length()))
return false;
for (size_t i = 0; i < targets.length(); i++) {
JSFunction *target = &targets[i]->as<JSFunction>();
bool inlineable;
InliningDecision decision = makeInliningDecision(target, callInfo);
switch (decision) {
case InliningDecision_Error:
return false;
case InliningDecision_DontInline:
inlineable = false;
break;
case InliningDecision_Inline:
inlineable = true;
break;
default:
MOZ_ASSUME_UNREACHABLE("Unhandled InliningDecision value!");
}
// Enforce a maximum inlined bytecode limit at the callsite.
if (inlineable && target->isInterpreted()) {
totalSize += target->nonLazyScript()->length();
if (totalSize > optimizationInfo().inlineMaxTotalBytecodeLength())
inlineable = false;
}
choiceSet.append(inlineable);
if (inlineable)
*numInlineable += 1;
}
JS_ASSERT(choiceSet.length() == targets.length());
return true;
}
static bool
CanInlineGetPropertyCache(MGetPropertyCache *cache, MDefinition *thisDef)
{
JS_ASSERT(cache->object()->type() == MIRType_Object);
if (cache->object() != thisDef)
return false;
InlinePropertyTable *table = cache->propTable();
if (!table)
return false;
if (table->numEntries() == 0)
return false;
return true;
}
MGetPropertyCache *
IonBuilder::getInlineableGetPropertyCache(CallInfo &callInfo)
{
if (callInfo.constructing())
return nullptr;
MDefinition *thisDef = callInfo.thisArg();
if (thisDef->type() != MIRType_Object)
return nullptr;
MDefinition *funcDef = callInfo.fun();
if (funcDef->type() != MIRType_Object)
return nullptr;
// MGetPropertyCache with no uses may be optimized away.
if (funcDef->isGetPropertyCache()) {
MGetPropertyCache *cache = funcDef->toGetPropertyCache();
if (cache->hasUses())
return nullptr;
if (!CanInlineGetPropertyCache(cache, thisDef))
return nullptr;
return cache;
}
// Optimize away the following common pattern:
// MTypeBarrier[MIRType_Object] <- MGetPropertyCache
if (funcDef->isTypeBarrier()) {
MTypeBarrier *barrier = funcDef->toTypeBarrier();
if (barrier->hasUses())
return nullptr;
if (barrier->type() != MIRType_Object)
return nullptr;
if (!barrier->input()->isGetPropertyCache())
return nullptr;
MGetPropertyCache *cache = barrier->input()->toGetPropertyCache();
if (cache->hasUses() && !cache->hasOneUse())
return nullptr;
if (!CanInlineGetPropertyCache(cache, thisDef))
return nullptr;
return cache;
}
return nullptr;
}
IonBuilder::InliningStatus
IonBuilder::inlineSingleCall(CallInfo &callInfo, JSFunction *target)
{
// Expects formals to be popped and wrapped.
if (target->isNative())
return inlineNativeCall(callInfo, target->native());
if (!inlineScriptedCall(callInfo, target))
return InliningStatus_Error;
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineCallsite(ObjectVector &targets, ObjectVector &originals,
bool lambda, CallInfo &callInfo)
{
if (targets.empty())
return InliningStatus_NotInlined;
// Is the function provided by an MGetPropertyCache?
// If so, the cache may be movable to a fallback path, with a dispatch
// instruction guarding on the incoming TypeObject.
MGetPropertyCache *propCache = getInlineableGetPropertyCache(callInfo);
// Inline single targets -- unless they derive from a cache, in which case
// avoiding the cache and guarding is still faster.
if (!propCache && targets.length() == 1) {
JSFunction *target = &targets[0]->as<JSFunction>();
InliningDecision decision = makeInliningDecision(target, callInfo);
switch (decision) {
case InliningDecision_Error:
return InliningStatus_Error;
case InliningDecision_DontInline:
return InliningStatus_NotInlined;
case InliningDecision_Inline:
break;
}
// Inlining will elminate uses of the original callee, but it needs to
// be preserved in phis if we bail out. Mark the old callee definition as
// implicitly used to ensure this happens.
callInfo.fun()->setImplicitlyUsedUnchecked();
// If the callee is not going to be a lambda (which may vary across
// different invocations), then the callee definition can be replaced by a
// constant.
if (!lambda) {
// Replace the function with an MConstant.
MConstant *constFun = constant(ObjectValue(*target));
callInfo.setFun(constFun);
}
return inlineSingleCall(callInfo, target);
}
// Choose a subset of the targets for polymorphic inlining.
BoolVector choiceSet(alloc());
uint32_t numInlined;
if (!selectInliningTargets(targets, callInfo, choiceSet, &numInlined))
return InliningStatus_Error;
if (numInlined == 0)
return InliningStatus_NotInlined;
// Perform a polymorphic dispatch.
if (!inlineCalls(callInfo, targets, originals, choiceSet, propCache))
return InliningStatus_Error;
return InliningStatus_Inlined;
}
bool
IonBuilder::inlineGenericFallback(JSFunction *target, CallInfo &callInfo, MBasicBlock *dispatchBlock,
bool clonedAtCallsite)
{
// Generate a new block with all arguments on-stack.
MBasicBlock *fallbackBlock = newBlock(dispatchBlock, pc);
if (!fallbackBlock)
return false;
// Create a new CallInfo to track modified state within this block.
CallInfo fallbackInfo(alloc(), callInfo.constructing());
if (!fallbackInfo.init(callInfo))
return false;
fallbackInfo.popFormals(fallbackBlock);
// Generate an MCall, which uses stateful |current|.
if (!setCurrentAndSpecializePhis(fallbackBlock))
return false;
if (!makeCall(target, fallbackInfo, clonedAtCallsite))
return false;
// Pass return block to caller as |current|.
return true;
}
bool
IonBuilder::inlineTypeObjectFallback(CallInfo &callInfo, MBasicBlock *dispatchBlock,
MTypeObjectDispatch *dispatch, MGetPropertyCache *cache,
MBasicBlock **fallbackTarget)
{
// Getting here implies the following:
// 1. The call function is an MGetPropertyCache, or an MGetPropertyCache
// followed by an MTypeBarrier.
JS_ASSERT(callInfo.fun()->isGetPropertyCache() || callInfo.fun()->isTypeBarrier());
// 2. The MGetPropertyCache has inlineable cases by guarding on the TypeObject.
JS_ASSERT(dispatch->numCases() > 0);
// 3. The MGetPropertyCache (and, if applicable, MTypeBarrier) only
// have at most a single use.
JS_ASSERT_IF(callInfo.fun()->isGetPropertyCache(), !cache->hasUses());
JS_ASSERT_IF(callInfo.fun()->isTypeBarrier(), cache->hasOneUse());
// This means that no resume points yet capture the MGetPropertyCache,
// so everything from the MGetPropertyCache up until the call is movable.
// We now move the MGetPropertyCache and friends into a fallback path.
// Create a new CallInfo to track modified state within the fallback path.
CallInfo fallbackInfo(alloc(), callInfo.constructing());
if (!fallbackInfo.init(callInfo))
return false;
// Capture stack prior to the call operation. This captures the function.
MResumePoint *preCallResumePoint =
MResumePoint::New(alloc(), dispatchBlock, pc, callerResumePoint_, MResumePoint::ResumeAt);
if (!preCallResumePoint)
return false;
DebugOnly<size_t> preCallFuncIndex = preCallResumePoint->numOperands() - callInfo.numFormals();
JS_ASSERT(preCallResumePoint->getOperand(preCallFuncIndex) == fallbackInfo.fun());
// In the dispatch block, replace the function's slot entry with Undefined.
MConstant *undefined = MConstant::New(alloc(), UndefinedValue());
dispatchBlock->add(undefined);
dispatchBlock->rewriteAtDepth(-int(callInfo.numFormals()), undefined);
// Construct a block that does nothing but remove formals from the stack.
// This is effectively changing the entry resume point of the later fallback block.
MBasicBlock *prepBlock = newBlock(dispatchBlock, pc);
if (!prepBlock)
return false;
fallbackInfo.popFormals(prepBlock);
// Construct a block into which the MGetPropertyCache can be moved.
// This is subtle: the pc and resume point are those of the MGetPropertyCache!
InlinePropertyTable *propTable = cache->propTable();
JS_ASSERT(propTable->pc() != nullptr);
JS_ASSERT(propTable->priorResumePoint() != nullptr);
MBasicBlock *getPropBlock = newBlock(prepBlock, propTable->pc(), propTable->priorResumePoint());
if (!getPropBlock)
return false;
prepBlock->end(MGoto::New(alloc(), getPropBlock));
// Since the getPropBlock inherited the stack from right before the MGetPropertyCache,
// the target of the MGetPropertyCache is still on the stack.
DebugOnly<MDefinition *> checkObject = getPropBlock->pop();
JS_ASSERT(checkObject == cache->object());
// Move the MGetPropertyCache and friends into the getPropBlock.
if (fallbackInfo.fun()->isGetPropertyCache()) {
JS_ASSERT(fallbackInfo.fun()->toGetPropertyCache() == cache);
getPropBlock->addFromElsewhere(cache);
getPropBlock->push(cache);
} else {
MTypeBarrier *barrier = callInfo.fun()->toTypeBarrier();
JS_ASSERT(barrier->type() == MIRType_Object);
JS_ASSERT(barrier->input()->isGetPropertyCache());
JS_ASSERT(barrier->input()->toGetPropertyCache() == cache);
getPropBlock->addFromElsewhere(cache);
getPropBlock->addFromElsewhere(barrier);
getPropBlock->push(barrier);
}
// Construct an end block with the correct resume point.
MBasicBlock *preCallBlock = newBlock(getPropBlock, pc, preCallResumePoint);
if (!preCallBlock)
return false;
getPropBlock->end(MGoto::New(alloc(), preCallBlock));
// Now inline the MCallGeneric, using preCallBlock as the dispatch point.
if (!inlineGenericFallback(nullptr, fallbackInfo, preCallBlock, false))
return false;
// inlineGenericFallback() set the return block as |current|.
preCallBlock->end(MGoto::New(alloc(), current));
*fallbackTarget = prepBlock;
return true;
}
bool
IonBuilder::inlineCalls(CallInfo &callInfo, ObjectVector &targets,
ObjectVector &originals, BoolVector &choiceSet,
MGetPropertyCache *maybeCache)
{
// Only handle polymorphic inlining.
JS_ASSERT(IsIonInlinablePC(pc));
JS_ASSERT(choiceSet.length() == targets.length());
JS_ASSERT_IF(!maybeCache, targets.length() >= 2);
JS_ASSERT_IF(maybeCache, targets.length() >= 1);
MBasicBlock *dispatchBlock = current;
callInfo.setImplicitlyUsedUnchecked();
callInfo.pushFormals(dispatchBlock);
// Patch any InlinePropertyTable to only contain functions that are inlineable.
//
// Note that we trim using originals, as callsite clones are not user
// visible. We don't patch the entries inside the table with the cloned
// targets, as the entries should only be used for comparison.
//
// The InlinePropertyTable will also be patched at the end to exclude native functions
// that vetoed inlining.
if (maybeCache) {
InlinePropertyTable *propTable = maybeCache->propTable();
propTable->trimToTargets(originals);
if (propTable->numEntries() == 0)
maybeCache = nullptr;
}
// Generate a dispatch based on guard kind.
MDispatchInstruction *dispatch;
if (maybeCache) {
dispatch = MTypeObjectDispatch::New(alloc(), maybeCache->object(), maybeCache->propTable());
callInfo.fun()->setImplicitlyUsedUnchecked();
} else {
dispatch = MFunctionDispatch::New(alloc(), callInfo.fun());
}
// Generate a return block to host the rval-collecting MPhi.
jsbytecode *postCall = GetNextPc(pc);
MBasicBlock *returnBlock = newBlock(nullptr, postCall);
if (!returnBlock)
return false;
returnBlock->setCallerResumePoint(callerResumePoint_);
// Set up stack, used to manually create a post-call resume point.
returnBlock->inheritSlots(dispatchBlock);
callInfo.popFormals(returnBlock);
MPhi *retPhi = MPhi::New(alloc(), returnBlock->stackDepth());
returnBlock->addPhi(retPhi);
returnBlock->push(retPhi);
// Create a resume point from current stack state.
returnBlock->initEntrySlots(alloc());
// Reserve the capacity for the phi.
// Note: this is an upperbound. Unreachable targets and uninlineable natives are also counted.
uint32_t count = 1; // Possible fallback block.
for (uint32_t i = 0; i < targets.length(); i++) {
if (choiceSet[i])
count++;
}
retPhi->reserveLength(count);
// During inlining the 'this' value is assigned a type set which is
// specialized to the type objects which can generate that inlining target.
// After inlining the original type set is restored.
types::TemporaryTypeSet *cacheObjectTypeSet =
maybeCache ? maybeCache->object()->resultTypeSet() : nullptr;
// Inline each of the inlineable targets.
JS_ASSERT(targets.length() == originals.length());
for (uint32_t i = 0; i < targets.length(); i++) {
// When original != target, the target is a callsite clone. The
// original should be used for guards, and the target should be the
// actual function inlined.
JSFunction *original = &originals[i]->as<JSFunction>();
JSFunction *target = &targets[i]->as<JSFunction>();
// Target must be inlineable.
if (!choiceSet[i])
continue;
// Target must be reachable by the MDispatchInstruction.
if (maybeCache && !maybeCache->propTable()->hasFunction(original)) {
choiceSet[i] = false;
continue;
}
MBasicBlock *inlineBlock = newBlock(dispatchBlock, pc);
if (!inlineBlock)
return false;
// Create a function MConstant to use in the entry ResumePoint.
MConstant *funcDef = MConstant::New(alloc(), ObjectValue(*target), constraints());
funcDef->setImplicitlyUsedUnchecked();
dispatchBlock->add(funcDef);
// Use the MConstant in the inline resume point and on stack.
int funIndex = inlineBlock->entryResumePoint()->numOperands() - callInfo.numFormals();
inlineBlock->entryResumePoint()->replaceOperand(funIndex, funcDef);
inlineBlock->rewriteSlot(funIndex, funcDef);
// Create a new CallInfo to track modified state within the inline block.
CallInfo inlineInfo(alloc(), callInfo.constructing());
if (!inlineInfo.init(callInfo))
return false;
inlineInfo.popFormals(inlineBlock);
inlineInfo.setFun(funcDef);
if (maybeCache) {
JS_ASSERT(callInfo.thisArg() == maybeCache->object());
types::TemporaryTypeSet *targetThisTypes =
maybeCache->propTable()->buildTypeSetForFunction(original);
if (!targetThisTypes)
return false;
maybeCache->object()->setResultTypeSet(targetThisTypes);
}
// Inline the call into the inlineBlock.
if (!setCurrentAndSpecializePhis(inlineBlock))
return false;
InliningStatus status = inlineSingleCall(inlineInfo, target);
if (status == InliningStatus_Error)
return false;
// Natives may veto inlining.
if (status == InliningStatus_NotInlined) {
JS_ASSERT(target->isNative());
JS_ASSERT(current == inlineBlock);
inlineBlock->discardAllResumePoints();
graph().removeBlock(inlineBlock);
choiceSet[i] = false;
continue;
}
// inlineSingleCall() changed |current| to the inline return block.
MBasicBlock *inlineReturnBlock = current;
setCurrent(dispatchBlock);
// Connect the inline path to the returnBlock.
//
// Note that guarding is on the original function pointer even
// if there is a clone, since cloning occurs at the callsite.
dispatch->addCase(original, inlineBlock);
MDefinition *retVal = inlineReturnBlock->peek(-1);
retPhi->addInput(retVal);
inlineReturnBlock->end(MGoto::New(alloc(), returnBlock));
if (!returnBlock->addPredecessorWithoutPhis(inlineReturnBlock))
return false;
}
// Patch the InlinePropertyTable to not dispatch to vetoed paths.
//
// Note that like above, we trim using originals instead of targets.
if (maybeCache) {
maybeCache->object()->setResultTypeSet(cacheObjectTypeSet);
InlinePropertyTable *propTable = maybeCache->propTable();
propTable->trimTo(originals, choiceSet);
// If all paths were vetoed, output only a generic fallback path.
if (propTable->numEntries() == 0) {
JS_ASSERT(dispatch->numCases() == 0);
maybeCache = nullptr;
}
}
// If necessary, generate a fallback path.
// MTypeObjectDispatch always uses a fallback path.
if (maybeCache || dispatch->numCases() < targets.length()) {
// Generate fallback blocks, and set |current| to the fallback return block.
if (maybeCache) {
MBasicBlock *fallbackTarget;
if (!inlineTypeObjectFallback(callInfo, dispatchBlock, (MTypeObjectDispatch *)dispatch,
maybeCache, &fallbackTarget))
{
return false;
}
dispatch->addFallback(fallbackTarget);
} else {
JSFunction *remaining = nullptr;
bool clonedAtCallsite = false;
// If there is only 1 remaining case, we can annotate the fallback call
// with the target information.
if (dispatch->numCases() + 1 == originals.length()) {
for (uint32_t i = 0; i < originals.length(); i++) {
if (choiceSet[i])
continue;
remaining = &targets[i]->as<JSFunction>();
clonedAtCallsite = targets[i] != originals[i];
break;
}
}
if (!inlineGenericFallback(remaining, callInfo, dispatchBlock, clonedAtCallsite))
return false;
dispatch->addFallback(current);
}
MBasicBlock *fallbackReturnBlock = current;
// Connect fallback case to return infrastructure.
MDefinition *retVal = fallbackReturnBlock->peek(-1);
retPhi->addInput(retVal);
fallbackReturnBlock->end(MGoto::New(alloc(), returnBlock));
if (!returnBlock->addPredecessorWithoutPhis(fallbackReturnBlock))
return false;
}
// Finally add the dispatch instruction.
// This must be done at the end so that add() may be called above.
dispatchBlock->end(dispatch);
// Check the depth change: +1 for retval
JS_ASSERT(returnBlock->stackDepth() == dispatchBlock->stackDepth() - callInfo.numFormals() + 1);
graph().moveBlockToEnd(returnBlock);
return setCurrentAndSpecializePhis(returnBlock);
}
MInstruction *
IonBuilder::createDeclEnvObject(MDefinition *callee, MDefinition *scope)
{
// Get a template CallObject that we'll use to generate inline object
// creation.
DeclEnvObject *templateObj = inspector->templateDeclEnvObject();
// One field is added to the function to handle its name. This cannot be a
// dynamic slot because there is still plenty of room on the DeclEnv object.
JS_ASSERT(!templateObj->hasDynamicSlots());
// Allocate the actual object. It is important that no intervening
// instructions could potentially bailout, thus leaking the dynamic slots
// pointer.
MInstruction *declEnvObj = MNewDeclEnvObject::New(alloc(), templateObj);
current->add(declEnvObj);
// Initialize the object's reserved slots. No post barrier is needed here:
// the object will be allocated in the nursery if possible, and if the
// tenured heap is used instead, a minor collection will have been performed
// that moved scope/callee to the tenured heap.
current->add(MStoreFixedSlot::New(alloc(), declEnvObj, DeclEnvObject::enclosingScopeSlot(), scope));
current->add(MStoreFixedSlot::New(alloc(), declEnvObj, DeclEnvObject::lambdaSlot(), callee));
return declEnvObj;
}
MInstruction *
IonBuilder::createCallObject(MDefinition *callee, MDefinition *scope)
{
// Get a template CallObject that we'll use to generate inline object
// creation.
CallObject *templateObj = inspector->templateCallObject();
// If the CallObject needs dynamic slots, allocate those now.
MInstruction *slots;
if (templateObj->hasDynamicSlots()) {
size_t nslots = JSObject::dynamicSlotsCount(templateObj->numFixedSlots(),
templateObj->lastProperty()->slotSpan(templateObj->getClass()),
templateObj->getClass());
slots = MNewSlots::New(alloc(), nslots);
} else {
slots = MConstant::New(alloc(), NullValue());
}
current->add(slots);
// Allocate the actual object. It is important that no intervening
// instructions could potentially bailout, thus leaking the dynamic slots
// pointer. Run-once scripts need a singleton type, so always do a VM call
// in such cases.
MUnaryInstruction *callObj;
if (script()->treatAsRunOnce())
callObj = MNewRunOnceCallObject::New(alloc(), templateObj, slots);
else
callObj = MNewCallObject::New(alloc(), templateObj, slots);
current->add(callObj);
// Initialize the object's reserved slots. No post barrier is needed here,
// for the same reason as in createDeclEnvObject.
current->add(MStoreFixedSlot::New(alloc(), callObj, CallObject::enclosingScopeSlot(), scope));
current->add(MStoreFixedSlot::New(alloc(), callObj, CallObject::calleeSlot(), callee));
// Initialize argument slots.
for (AliasedFormalIter i(script()); i; i++) {
unsigned slot = i.scopeSlot();
unsigned formal = i.frameIndex();
MDefinition *param = current->getSlot(info().argSlotUnchecked(formal));
if (slot >= templateObj->numFixedSlots())
current->add(MStoreSlot::New(alloc(), slots, slot - templateObj->numFixedSlots(), param));
else
current->add(MStoreFixedSlot::New(alloc(), callObj, slot, param));
}
return callObj;
}
MDefinition *
IonBuilder::createThisScripted(MDefinition *callee)
{
// Get callee.prototype.
//
// This instruction MUST be idempotent: since it does not correspond to an
// explicit operation in the bytecode, we cannot use resumeAfter().
// Getters may not override |prototype| fetching, so this operation is indeed idempotent.
// - First try an idempotent property cache.
// - Upon failing idempotent property cache, we can't use a non-idempotent cache,
// therefore we fallback to CallGetProperty
//
// Note: both CallGetProperty and GetPropertyCache can trigger a GC,
// and thus invalidation.
MInstruction *getProto;
if (!invalidatedIdempotentCache()) {
MGetPropertyCache *getPropCache = MGetPropertyCache::New(alloc(), callee, names().prototype,
/* monitored = */ false);
getPropCache->setIdempotent();
getProto = getPropCache;
} else {
MCallGetProperty *callGetProp = MCallGetProperty::New(alloc(), callee, names().prototype,
/* callprop = */ false);
callGetProp->setIdempotent();
getProto = callGetProp;
}
current->add(getProto);
// Create this from prototype
MCreateThisWithProto *createThis = MCreateThisWithProto::New(alloc(), callee, getProto);
current->add(createThis);
return createThis;
}
JSObject *
IonBuilder::getSingletonPrototype(JSFunction *target)
{
if (!target || !target->hasSingletonType())
return nullptr;
types::TypeObjectKey *targetType = types::TypeObjectKey::get(target);
if (targetType->unknownProperties())
return nullptr;
jsid protoid = NameToId(names().prototype);
types::HeapTypeSetKey protoProperty = targetType->property(protoid);
return protoProperty.singleton(constraints());
}
MDefinition *
IonBuilder::createThisScriptedSingleton(JSFunction *target, MDefinition *callee)
{
// Get the singleton prototype (if exists)
JSObject *proto = getSingletonPrototype(target);
if (!proto)
return nullptr;
JSObject *templateObject = inspector->getTemplateObject(pc);
if (!templateObject || !templateObject->is<JSObject>())
return nullptr;
if (!templateObject->hasTenuredProto() || templateObject->getProto() != proto)
return nullptr;
if (!target->nonLazyScript()->types)
return nullptr;
if (!types::TypeScript::ThisTypes(target->nonLazyScript())->hasType(types::Type::ObjectType(templateObject)))
return nullptr;
// For template objects with NewScript info, the appropriate allocation
// kind to use may change due to dynamic property adds. In these cases
// calling Ion code will be invalidated, but any baseline template object
// may be stale. Update to the correct template object in this case.
types::TypeObject *templateType = templateObject->type();
if (templateType->hasNewScript()) {
templateObject = templateType->newScript()->templateObject;
JS_ASSERT(templateObject->type() == templateType);
// Trigger recompilation if the templateObject changes.
types::TypeObjectKey::get(templateType)->watchStateChangeForNewScriptTemplate(constraints());
}
// Generate an inline path to create a new |this| object with
// the given singleton prototype.
MCreateThisWithTemplate *createThis =
MCreateThisWithTemplate::New(alloc(), constraints(), templateObject,
templateObject->type()->initialHeap(constraints()));
current->add(createThis);
return createThis;
}
MDefinition *
IonBuilder::createThis(JSFunction *target, MDefinition *callee)
{
// Create this for unknown target
if (!target) {
MCreateThis *createThis = MCreateThis::New(alloc(), callee);
current->add(createThis);
return createThis;
}
// Native constructors build the new Object themselves.
if (target->isNative()) {
if (!target->isNativeConstructor())
return nullptr;
MConstant *magic = MConstant::New(alloc(), MagicValue(JS_IS_CONSTRUCTING));
current->add(magic);
return magic;
}
// Try baking in the prototype.
MDefinition *createThis = createThisScriptedSingleton(target, callee);
if (createThis)
return createThis;
return createThisScripted(callee);
}
bool
IonBuilder::jsop_funcall(uint32_t argc)
{
// Stack for JSOP_FUNCALL:
// 1: arg0
// ...
// argc: argN
// argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position.
// argc+2: The native 'call' function.
int calleeDepth = -((int)argc + 2);
int funcDepth = -((int)argc + 1);
// If |Function.prototype.call| may be overridden, don't optimize callsite.
types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet();
JSFunction *native = getSingleCallTarget(calleeTypes);
if (!native || !native->isNative() || native->native() != &js_fun_call) {
CallInfo callInfo(alloc(), false);
if (!callInfo.init(current, argc))
return false;
return makeCall(native, callInfo, false);
}
current->peek(calleeDepth)->setImplicitlyUsedUnchecked();
// Extract call target.
types::TemporaryTypeSet *funTypes = current->peek(funcDepth)->resultTypeSet();
JSFunction *target = getSingleCallTarget(funTypes);
// Shimmy the slots down to remove the native 'call' function.
current->shimmySlots(funcDepth - 1);
bool zeroArguments = (argc == 0);
// If no |this| argument was provided, explicitly pass Undefined.
// Pushing is safe here, since one stack slot has been removed.
if (zeroArguments) {
pushConstant(UndefinedValue());
} else {
// |this| becomes implicit in the call.
argc -= 1;
}
CallInfo callInfo(alloc(), false);
if (!callInfo.init(current, argc))
return false;
// Try to inline the call.
if (!zeroArguments) {
InliningDecision decision = makeInliningDecision(target, callInfo);
switch (decision) {
case InliningDecision_Error:
return false;
case InliningDecision_DontInline:
break;
case InliningDecision_Inline:
if (target->isInterpreted())
return inlineScriptedCall(callInfo, target);
break;
}
}
// Call without inlining.
return makeCall(target, callInfo, false);
}
bool
IonBuilder::jsop_funapply(uint32_t argc)
{
int calleeDepth = -((int)argc + 2);
types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet();
JSFunction *native = getSingleCallTarget(calleeTypes);
if (argc != 2) {
CallInfo callInfo(alloc(), false);
if (!callInfo.init(current, argc))
return false;
return makeCall(native, callInfo, false);
}
// Disable compilation if the second argument to |apply| cannot be guaranteed
// to be either definitely |arguments| or definitely not |arguments|.
MDefinition *argument = current->peek(-1);
if (script()->argumentsHasVarBinding() &&
argument->mightBeType(MIRType_MagicOptimizedArguments) &&
argument->type() != MIRType_MagicOptimizedArguments)
{
return abort("fun.apply with MaybeArguments");
}
// Fallback to regular call if arg 2 is not definitely |arguments|.
if (argument->type() != MIRType_MagicOptimizedArguments) {
CallInfo callInfo(alloc(), false);
if (!callInfo.init(current, argc))
return false;
return makeCall(native, callInfo, false);
}
if (!native ||
!native->isNative() ||
native->native() != js_fun_apply)
{
return abort("fun.apply speculation failed");
}
current->peek(calleeDepth)->setImplicitlyUsedUnchecked();
// Use funapply that definitely uses |arguments|
return jsop_funapplyarguments(argc);
}
bool
IonBuilder::jsop_funapplyarguments(uint32_t argc)
{
// Stack for JSOP_FUNAPPLY:
// 1: Vp
// 2: This
// argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position.
// argc+2: The native 'apply' function.
int funcDepth = -((int)argc + 1);
// Extract call target.
types::TemporaryTypeSet *funTypes = current->peek(funcDepth)->resultTypeSet();
JSFunction *target = getSingleCallTarget(funTypes);
// When this script isn't inlined, use MApplyArgs,
// to copy the arguments from the stack and call the function
if (inliningDepth_ == 0 && info().executionMode() != DefinitePropertiesAnalysis) {
// The array argument corresponds to the arguments object. As the JIT
// is implicitly reading the arguments object in the next instruction,
// we need to prevent the deletion of the arguments object from resume
// points, so that Baseline will behave correctly after a bailout.
MDefinition *vp = current->pop();
vp->setImplicitlyUsedUnchecked();
MDefinition *argThis = current->pop();
// Unwrap the (JSFunction *) parameter.
MDefinition *argFunc = current->pop();
// Pop apply function.
current->pop();
MArgumentsLength *numArgs = MArgumentsLength::New(alloc());
current->add(numArgs);
MApplyArgs *apply = MApplyArgs::New(alloc(), target, argFunc, numArgs, argThis);
current->add(apply);
current->push(apply);
if (!resumeAfter(apply))
return false;
types::TemporaryTypeSet *types = bytecodeTypes(pc);
return pushTypeBarrier(apply, types, true);
}
// When inlining we have the arguments the function gets called with
// and can optimize even more, by just calling the functions with the args.
// We also try this path when doing the definite properties analysis, as we
// can inline the apply() target and don't care about the actual arguments
// that were passed in.
CallInfo callInfo(alloc(), false);
// Vp
MDefinition *vp = current->pop();
vp->setImplicitlyUsedUnchecked();
// Arguments
MDefinitionVector args(alloc());
if (inliningDepth_) {
if (!args.appendAll(inlineCallInfo_->argv()))
return false;
}
callInfo.setArgs(&args);
// This
MDefinition *argThis = current->pop();
callInfo.setThis(argThis);
// Pop function parameter.
MDefinition *argFunc = current->pop();
callInfo.setFun(argFunc);
// Pop apply function.
current->pop();
// Try to inline the call.
InliningDecision decision = makeInliningDecision(target, callInfo);
switch (decision) {
case InliningDecision_Error:
return false;
case InliningDecision_DontInline:
break;
case InliningDecision_Inline:
if (target->isInterpreted())
return inlineScriptedCall(callInfo, target);
}
return makeCall(target, callInfo, false);
}
bool
IonBuilder::jsop_call(uint32_t argc, bool constructing)
{
// If this call has never executed, try to seed the observed type set
// based on how the call result is used.
types::TemporaryTypeSet *observed = bytecodeTypes(pc);
if (observed->empty()) {
if (BytecodeFlowsToBitop(pc)) {
observed->addType(types::Type::Int32Type(), alloc_->lifoAlloc());
} else if (*GetNextPc(pc) == JSOP_POS) {
// Note: this is lame, overspecialized on the code patterns used
// by asm.js and should be replaced by a more general mechanism.
// See bug 870847.
observed->addType(types::Type::DoubleType(), alloc_->lifoAlloc());
}
}
int calleeDepth = -((int)argc + 2);
// Acquire known call target if existent.
ObjectVector originals(alloc());
bool gotLambda = false;
types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet();
if (calleeTypes) {
if (!getPolyCallTargets(calleeTypes, constructing, originals, 4, &gotLambda))
return false;
}
JS_ASSERT_IF(gotLambda, originals.length() <= 1);
// If any call targets need to be cloned, look for existing clones to use.
// Keep track of the originals as we need to case on them for poly inline.
bool hasClones = false;
ObjectVector targets(alloc());
for (uint32_t i = 0; i < originals.length(); i++) {
JSFunction *fun = &originals[i]->as<JSFunction>();
if (fun->hasScript() && fun->nonLazyScript()->shouldCloneAtCallsite()) {
if (JSFunction *clone = ExistingCloneFunctionAtCallsite(compartment->callsiteClones(), fun, script(), pc)) {
fun = clone;
hasClones = true;
}
}
if (!targets.append(fun))
return false;
}
CallInfo callInfo(alloc(), constructing);
if (!callInfo.init(current, argc))
return false;
// Try inlining
InliningStatus status = inlineCallsite(targets, originals, gotLambda, callInfo);
if (status == InliningStatus_Inlined)
return true;
if (status == InliningStatus_Error)
return false;
// No inline, just make the call.
JSFunction *target = nullptr;
if (targets.length() == 1)
target = &targets[0]->as<JSFunction>();
return makeCall(target, callInfo, hasClones);
}
MDefinition *
IonBuilder::makeCallsiteClone(JSFunction *target, MDefinition *fun)
{
// Bake in the clone eagerly if we have a known target. We have arrived here
// because TI told us that the known target is a should-clone-at-callsite
// function, which means that target already is the clone. Make sure to ensure
// that the old definition remains in resume points.
if (target) {
fun->setImplicitlyUsedUnchecked();
return constant(ObjectValue(*target));
}
// Add a callsite clone IC if we have multiple targets. Note that we
// should have checked already that at least some targets are marked as
// should-clone-at-callsite.
MCallsiteCloneCache *clone = MCallsiteCloneCache::New(alloc(), fun, pc);
current->add(clone);
return clone;
}
bool
IonBuilder::testShouldDOMCall(types::TypeSet *inTypes,
JSFunction *func, JSJitInfo::OpType opType)
{
if (!func->isNative() || !func->jitInfo())
return false;
// If all the DOM objects flowing through are legal with this
// property, we can bake in a call to the bottom half of the DOM
// accessor
DOMInstanceClassMatchesProto instanceChecker =
compartment->runtime()->DOMcallbacks()->instanceClassMatchesProto;
const JSJitInfo *jinfo = func->jitInfo();
if (jinfo->type() != opType)
return false;
for (unsigned i = 0; i < inTypes->getObjectCount(); i++) {
types::TypeObjectKey *curType = inTypes->getObject(i);
if (!curType)
continue;
if (!curType->hasTenuredProto())
return false;
JSObject *proto = curType->proto().toObjectOrNull();
if (!instanceChecker(proto, jinfo->protoID, jinfo->depth))
return false;
}
return true;
}
static bool
ArgumentTypesMatch(MDefinition *def, types::StackTypeSet *calleeTypes)
{
if (def->resultTypeSet()) {
JS_ASSERT(def->type() == MIRType_Value || def->mightBeType(def->type()));
return def->resultTypeSet()->isSubset(calleeTypes);
}
if (def->type() == MIRType_Value)
return false;
if (def->type() == MIRType_Object)
return calleeTypes->unknownObject();
return calleeTypes->mightBeMIRType(def->type());
}
bool
IonBuilder::testNeedsArgumentCheck(JSFunction *target, CallInfo &callInfo)
{
// If we have a known target, check if the caller arg types are a subset of callee.
// Since typeset accumulates and can't decrease that means we don't need to check
// the arguments anymore.
if (!target->hasScript())
return true;
JSScript *targetScript = target->nonLazyScript();
if (!targetScript->types)
return true;
if (!ArgumentTypesMatch(callInfo.thisArg(), types::TypeScript::ThisTypes(targetScript)))
return true;
uint32_t expected_args = Min<uint32_t>(callInfo.argc(), target->nargs());
for (size_t i = 0; i < expected_args; i++) {
if (!ArgumentTypesMatch(callInfo.getArg(i), types::TypeScript::ArgTypes(targetScript, i)))
return true;
}
for (size_t i = callInfo.argc(); i < target->nargs(); i++) {
if (!types::TypeScript::ArgTypes(targetScript, i)->mightBeMIRType(MIRType_Undefined))
return true;
}
return false;
}
MCall *
IonBuilder::makeCallHelper(JSFunction *target, CallInfo &callInfo, bool cloneAtCallsite)
{
// This function may be called with mutated stack.
// Querying TI for popped types is invalid.
uint32_t targetArgs = callInfo.argc();
// Collect number of missing arguments provided that the target is
// scripted. Native functions are passed an explicit 'argc' parameter.
if (target && !target->isNative())
targetArgs = Max<uint32_t>(target->nargs(), callInfo.argc());
bool isDOMCall = false;
if (target && !callInfo.constructing()) {
// We know we have a single call target. Check whether the "this" types
// are DOM types and our function a DOM function, and if so flag the
// MCall accordingly.
types::TemporaryTypeSet *thisTypes = callInfo.thisArg()->resultTypeSet();
if (thisTypes &&
thisTypes->isDOMClass() &&
testShouldDOMCall(thisTypes, target, JSJitInfo::Method))
{
isDOMCall = true;
}
}
MCall *call = MCall::New(alloc(), target, targetArgs + 1, callInfo.argc(),
callInfo.constructing(), isDOMCall);
if (!call)
return nullptr;
// Explicitly pad any missing arguments with |undefined|.
// This permits skipping the argumentsRectifier.
for (int i = targetArgs; i > (int)callInfo.argc(); i--) {
JS_ASSERT_IF(target, !target->isNative());
MConstant *undef = constant(UndefinedValue());
call->addArg(i, undef);
}
// Add explicit arguments.
// Skip addArg(0) because it is reserved for this
for (int32_t i = callInfo.argc() - 1; i >= 0; i--)
call->addArg(i + 1, callInfo.getArg(i));
// Now that we've told it about all the args, compute whether it's movable
call->computeMovable();
// Inline the constructor on the caller-side.
if (callInfo.constructing()) {
MDefinition *create = createThis(target, callInfo.fun());
if (!create) {
abort("Failure inlining constructor for call.");
return nullptr;
}
callInfo.thisArg()->setImplicitlyUsedUnchecked();
callInfo.setThis(create);
}
// Pass |this| and function.
MDefinition *thisArg = callInfo.thisArg();
call->addArg(0, thisArg);
// Add a callsite clone IC for multiple targets which all should be
// callsite cloned, or bake in the clone for a single target.
if (cloneAtCallsite) {
MDefinition *fun = makeCallsiteClone(target, callInfo.fun());
callInfo.setFun(fun);
}
if (target && !testNeedsArgumentCheck(target, callInfo))
call->disableArgCheck();
call->initFunction(callInfo.fun());
current->add(call);
return call;
}
static bool
DOMCallNeedsBarrier(const JSJitInfo* jitinfo, types::TemporaryTypeSet *types)
{
// If the return type of our DOM native is in "types" already, we don't
// actually need a barrier.
if (jitinfo->returnType() == JSVAL_TYPE_UNKNOWN)
return true;
// JSVAL_TYPE_OBJECT doesn't tell us much; we still have to barrier on the
// actual type of the object.
if (jitinfo->returnType() == JSVAL_TYPE_OBJECT)
return true;
// No need for a barrier if we're already expecting the type we'll produce.
return MIRTypeFromValueType(jitinfo->returnType()) != types->getKnownMIRType();
}
bool
IonBuilder::makeCall(JSFunction *target, CallInfo &callInfo, bool cloneAtCallsite)
{
// Constructor calls to non-constructors should throw. We don't want to use
// CallKnown in this case.
JS_ASSERT_IF(callInfo.constructing() && target,
target->isInterpretedConstructor() || target->isNativeConstructor());
MCall *call = makeCallHelper(target, callInfo, cloneAtCallsite);
if (!call)
return false;
current->push(call);
if (call->isEffectful() && !resumeAfter(call))
return false;
types::TemporaryTypeSet *types = bytecodeTypes(pc);
if (call->isCallDOMNative())
return pushDOMTypeBarrier(call, types, call->getSingleTarget());
return pushTypeBarrier(call, types, true);
}
bool
IonBuilder::jsop_eval(uint32_t argc)
{
int calleeDepth = -((int)argc + 2);
types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet();
// Emit a normal call if the eval has never executed. This keeps us from
// disabling compilation for the script when testing with --ion-eager.
if (calleeTypes && calleeTypes->empty())
return jsop_call(argc, /* constructing = */ false);
JSFunction *singleton = getSingleCallTarget(calleeTypes);
if (!singleton)
return abort("No singleton callee for eval()");
if (script()->global().valueIsEval(ObjectValue(*singleton))) {
if (argc != 1)
return abort("Direct eval with more than one argument");
if (!info().funMaybeLazy())
return abort("Direct eval in global code");
// The 'this' value for the outer and eval scripts must be the
// same. This is not guaranteed if a primitive string/number/etc.
// is passed through to the eval invoke as the primitive may be
// boxed into different objects if accessed via 'this'.
MIRType type = thisTypes->getKnownMIRType();
if (type != MIRType_Object && type != MIRType_Null && type != MIRType_Undefined)
return abort("Direct eval from script with maybe-primitive 'this'");
CallInfo callInfo(alloc(), /* constructing = */ false);
if (!callInfo.init(current, argc))
return false;
callInfo.setImplicitlyUsedUnchecked();
callInfo.fun()->setImplicitlyUsedUnchecked();
MDefinition *scopeChain = current->scopeChain();
MDefinition *string = callInfo.getArg(0);
// Direct eval acts as identity on non-string types according to
// ES5 15.1.2.1 step 1.
if (!string->mightBeType(MIRType_String)) {
current->push(string);
types::TemporaryTypeSet *types = bytecodeTypes(pc);
return pushTypeBarrier(string, types, true);
}
current->pushSlot(info().thisSlot());
MDefinition *thisValue = current->pop();
// Try to pattern match 'eval(v + "()")'. In this case v is likely a
// name on the scope chain and the eval is performing a call on that
// value. Use a dynamic scope chain lookup rather than a full eval.
if (string->isConcat() &&
string->getOperand(1)->isConstant() &&
string->getOperand(1)->toConstant()->value().isString())
{
JSAtom *atom = &string->getOperand(1)->toConstant()->value().toString()->asAtom();
if (StringEqualsAscii(atom, "()")) {
MDefinition *name = string->getOperand(0);
MInstruction *dynamicName = MGetDynamicName::New(alloc(), scopeChain, name);
current->add(dynamicName);
current->push(dynamicName);
current->push(thisValue);
CallInfo evalCallInfo(alloc(), /* constructing = */ false);
if (!evalCallInfo.init(current, /* argc = */ 0))
return false;
return makeCall(nullptr, evalCallInfo, false);
}
}
MInstruction *filterArguments = MFilterArgumentsOrEval::New(alloc(), string);
current->add(filterArguments);
MInstruction *ins = MCallDirectEval::New(alloc(), scopeChain, string, thisValue, pc);
current->add(ins);
current->push(ins);
types::TemporaryTypeSet *types = bytecodeTypes(pc);
return resumeAfter(ins) && pushTypeBarrier(ins, types, true);
}
return jsop_call(argc, /* constructing = */ false);
}
bool
IonBuilder::jsop_compare(JSOp op)
{
MDefinition *right = current->pop();
MDefinition *left = current->pop();
MCompare *ins = MCompare::New(alloc(), left, right, op);
current->add(ins);
current->push(ins);
ins->infer(inspector, pc);
if (ins->isEffectful() && !resumeAfter(ins))
return false;
return true;
}
bool
IonBuilder::jsop_newarray(uint32_t count)
{
JS_ASSERT(script()->compileAndGo());
JSObject *templateObject = inspector->getTemplateObject(pc);
if (!templateObject)
return abort("No template object for NEWARRAY");
JS_ASSERT(templateObject->is<ArrayObject>());
if (templateObject->type()->unknownProperties()) {
// We will get confused in jsop_initelem_array if we can't find the
// type object being initialized.
return abort("New array has unknown properties");
}
MNewArray *ins = MNewArray::New(alloc(), constraints(), count, templateObject,
templateObject->type()->initialHeap(constraints()),
MNewArray::NewArray_Allocating);
current->add(ins);
current->push(ins);
types::TemporaryTypeSet::DoubleConversion conversion =
ins->resultTypeSet()->convertDoubleElements(constraints());
if (conversion == types::TemporaryTypeSet::AlwaysConvertToDoubles)
templateObject->setShouldConvertDoubleElements();
else
templateObject->clearShouldConvertDoubleElements();
return true;
}
bool
IonBuilder::jsop_newobject()
{
// Don't bake in the TypeObject for non-CNG scripts.
JS_ASSERT(script()->compileAndGo());
JSObject *templateObject = inspector->getTemplateObject(pc);
if (!templateObject)
return abort("No template object for NEWOBJECT");
JS_ASSERT(templateObject->is<JSObject>());
MNewObject *ins = MNewObject::New(alloc(), constraints(), templateObject,
templateObject->hasSingletonType()
? gc::TenuredHeap
: templateObject->type()->initialHeap(constraints()),
/* templateObjectIsClassPrototype = */ false);
current->add(ins);
current->push(ins);
return resumeAfter(ins);
}
bool
IonBuilder::jsop_initelem()
{
MDefinition *value = current->pop();
MDefinition *id = current->pop();
MDefinition *obj = current->peek(-1);
MInitElem *initElem = MInitElem::New(alloc(), obj, id, value);
current->add(initElem);
return resumeAfter(initElem);
}
bool
IonBuilder::jsop_initelem_array()
{
MDefinition *value = current->pop();
MDefinition *obj = current->peek(-1);
// Make sure that arrays have the type being written to them by the
// intializer, and that arrays are marked as non-packed when writing holes
// to them during initialization.
bool needStub = false;
types::TypeObjectKey *initializer = obj->resultTypeSet()->getObject(0);
if (value->type() == MIRType_MagicHole) {
if (!initializer->hasFlags(constraints(), types::OBJECT_FLAG_NON_PACKED))
needStub = true;
} else if (!initializer->unknownProperties()) {
types::HeapTypeSetKey elemTypes = initializer->property(JSID_VOID);
if (!TypeSetIncludes(elemTypes.maybeTypes(), value->type(), value->resultTypeSet())) {
elemTypes.freeze(constraints());
needStub = true;
}
}
if (NeedsPostBarrier(info(), value))
current->add(MPostWriteBarrier::New(alloc(), obj, value));
if (needStub) {
MCallInitElementArray *store = MCallInitElementArray::New(alloc(), obj, GET_UINT24(pc), value);
current->add(store);
return resumeAfter(store);
}
MConstant *id = MConstant::New(alloc(), Int32Value(GET_UINT24(pc)));
current->add(id);
// Get the elements vector.
MElements *elements = MElements::New(alloc(), obj);
current->add(elements);
JSObject *templateObject = obj->toNewArray()->templateObject();
if (templateObject->shouldConvertDoubleElements()) {
MInstruction *valueDouble = MToDouble::New(alloc(), value);
current->add(valueDouble);
value = valueDouble;
}
// Store the value.
MStoreElement *store = MStoreElement::New(alloc(), elements, id, value, /* needsHoleCheck = */ false);
current->add(store);
// Update the initialized length. (The template object for this array has
// the array's ultimate length, so the length field is already correct: no
// updating needed.)
MSetInitializedLength *initLength = MSetInitializedLength::New(alloc(), elements, id);
current->add(initLength);
if (!resumeAfter(initLength))
return false;
return true;
}
bool
IonBuilder::jsop_mutateproto()
{
MDefinition *value = current->pop();
MDefinition *obj = current->peek(-1);
MMutateProto *mutate = MMutateProto::New(alloc(), obj, value);
current->add(mutate);
return resumeAfter(mutate);
}
bool
IonBuilder::jsop_initprop(PropertyName *name)
{
MDefinition *value = current->pop();
MDefinition *obj = current->peek(-1);
JSObject *templateObject = obj->toNewObject()->templateObject();
Shape *shape = templateObject->lastProperty()->searchLinear(NameToId(name));
if (!shape) {
// JSOP_NEWINIT becomes an MNewObject without preconfigured properties.
MInitProp *init = MInitProp::New(alloc(), obj, name, value);
current->add(init);
return resumeAfter(init);
}
if (PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current,
&obj, name, &value, /* canModify = */ true))
{
// JSOP_NEWINIT becomes an MNewObject without preconfigured properties.
MInitProp *init = MInitProp::New(alloc(), obj, name, value);
current->add(init);
return resumeAfter(init);
}
if (NeedsPostBarrier(info(), value))
current->add(MPostWriteBarrier::New(alloc(), obj, value));
bool needsBarrier = true;
if (obj->resultTypeSet() &&
!obj->resultTypeSet()->propertyNeedsBarrier(constraints(), NameToId(name)))
{
needsBarrier = false;
}
// In parallel execution, we never require write barriers. See
// forkjoin.cpp for more information.
if (info().executionMode() == ParallelExecution)
needsBarrier = false;
if (templateObject->isFixedSlot(shape->slot())) {
MStoreFixedSlot *store = MStoreFixedSlot::New(alloc(), obj, shape->slot(), value);
if (needsBarrier)
store->setNeedsBarrier();
current->add(store);
return resumeAfter(store);
}
MSlots *slots = MSlots::New(alloc(), obj);
current->add(slots);
uint32_t slot = templateObject->dynamicSlotIndex(shape->slot());
MStoreSlot *store = MStoreSlot::New(alloc(), slots, slot, value);
if (needsBarrier)
store->setNeedsBarrier();
current->add(store);
return resumeAfter(store);
}
bool
IonBuilder::jsop_initprop_getter_setter(PropertyName *name)
{
MDefinition *value = current->pop();
MDefinition *obj = current->peek(-1);
MInitPropGetterSetter *init = MInitPropGetterSetter::New(alloc(), obj, name, value);
current->add(init);
return resumeAfter(init);
}
bool
IonBuilder::jsop_initelem_getter_setter()
{
MDefinition *value = current->pop();
MDefinition *id = current->pop();
MDefinition *obj = current->peek(-1);
MInitElemGetterSetter *init = MInitElemGetterSetter::New(alloc(), obj, id, value);
current->add(init);
return resumeAfter(init);
}
MBasicBlock *
IonBuilder::addBlock(MBasicBlock *block, uint32_t loopDepth)
{
if (!block)
return nullptr;
graph().addBlock(block);
block->setLoopDepth(loopDepth);
return block;
}
MBasicBlock *
IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc)
{
MBasicBlock *block = MBasicBlock::New(graph(), &analysis(), info(),
predecessor, pc, MBasicBlock::NORMAL);
return addBlock(block, loopDepth_);
}
MBasicBlock *
IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc, MResumePoint *priorResumePoint)
{
MBasicBlock *block = MBasicBlock::NewWithResumePoint(graph(), info(), predecessor, pc,
priorResumePoint);
return addBlock(block, loopDepth_);
}
MBasicBlock *
IonBuilder::newBlockPopN(MBasicBlock *predecessor, jsbytecode *pc, uint32_t popped)
{
MBasicBlock *block = MBasicBlock::NewPopN(graph(), info(), predecessor, pc, MBasicBlock::NORMAL, popped);
return addBlock(block, loopDepth_);
}
MBasicBlock *
IonBuilder::newBlockAfter(MBasicBlock *at, MBasicBlock *predecessor, jsbytecode *pc)
{
MBasicBlock *block = MBasicBlock::New(graph(), &analysis(), info(),
predecessor, pc, MBasicBlock::NORMAL);
if (!block)
return nullptr;
graph().insertBlockAfter(at, block);
return block;
}
MBasicBlock *
IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc, uint32_t loopDepth)
{
MBasicBlock *block = MBasicBlock::New(graph(), &analysis(), info(),
predecessor, pc, MBasicBlock::NORMAL);
return addBlock(block, loopDepth);
}
MBasicBlock *
IonBuilder::newOsrPreheader(MBasicBlock *predecessor, jsbytecode *loopEntry)
{
JS_ASSERT(LoopEntryCanIonOsr(loopEntry));
JS_ASSERT(loopEntry == info().osrPc());
// Create two blocks: one for the OSR entry with no predecessors, one for
// the preheader, which has the OSR entry block as a predecessor. The
// OSR block is always the second block (with id 1).
MBasicBlock *osrBlock = newBlockAfter(*graph().begin(), loopEntry);
MBasicBlock *preheader = newBlock(predecessor, loopEntry);
if (!osrBlock || !preheader)
return nullptr;
MOsrEntry *entry = MOsrEntry::New(alloc());
osrBlock->add(entry);
// Initialize |scopeChain|.
{
uint32_t slot = info().scopeChainSlot();
MInstruction *scopev;
if (analysis().usesScopeChain()) {
scopev = MOsrScopeChain::New(alloc(), entry);
} else {
// Use an undefined value if the script does not need its scope
// chain, to match the type that is already being tracked for the
// slot.
scopev = MConstant::New(alloc(), UndefinedValue());
}
osrBlock->add(scopev);
osrBlock->initSlot(slot, scopev);
}
// Initialize |return value|
{
MInstruction *returnValue;
if (!script()->noScriptRval())
returnValue = MOsrReturnValue::New(alloc(), entry);
else
returnValue = MConstant::New(alloc(), UndefinedValue());
osrBlock->add(returnValue);
osrBlock->initSlot(info().returnValueSlot(), returnValue);
}
// Initialize arguments object.
bool needsArgsObj = info().needsArgsObj();
MInstruction *argsObj = nullptr;
if (info().hasArguments()) {
if (needsArgsObj)
argsObj = MOsrArgumentsObject::New(alloc(), entry);
else
argsObj = MConstant::New(alloc(), UndefinedValue());
osrBlock->add(argsObj);
osrBlock->initSlot(info().argsObjSlot(), argsObj);
}
if (info().funMaybeLazy()) {
// Initialize |this| parameter.
MParameter *thisv = MParameter::New(alloc(), MParameter::THIS_SLOT, nullptr);
osrBlock->add(thisv);
osrBlock->initSlot(info().thisSlot(), thisv);
// Initialize arguments.
for (uint32_t i = 0; i < info().nargs(); i++) {
uint32_t slot = needsArgsObj ? info().argSlotUnchecked(i) : info().argSlot(i);
// Only grab arguments from the arguments object if the arguments object
// aliases formals. If the argsobj does not alias formals, then the
// formals may have been assigned to during interpretation, and that change
// will not be reflected in the argsobj.
if (needsArgsObj && info().argsObjAliasesFormals()) {
JS_ASSERT(argsObj && argsObj->isOsrArgumentsObject());
// If this is an aliased formal, then the arguments object
// contains a hole at this index. Any references to this
// variable in the jitcode will come from JSOP_*ALIASEDVAR
// opcodes, so the slot itself can be set to undefined. If
// it's not aliased, it must be retrieved from the arguments
// object.
MInstruction *osrv;